diff --git a/.gitattributes b/.gitattributes
index 21d8eca96754804c6487d640534b05ed5f83dad4..0cd58331b2a989b68be4ec5676383437fca8687b 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -34,6 +34,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
*.so filter=lfs diff=lfs merge=lfs -text
-build/torch210-cu128-x86_64-windows/activation/_activation_e1b4b08.pyd filter=lfs diff=lfs merge=lfs -text
-media/benches.gif filter=lfs diff=lfs merge=lfs -text
-media/benches.mp4 filter=lfs diff=lfs merge=lfs -text
diff --git a/README.md b/README.md
index 9f52c61476792e17c42362b1b3aae4a751fd6cbe..6c2e823d2c11f8bd341796712252d0beceb93bfd 100644
--- a/README.md
+++ b/README.md
@@ -1,26 +1,10 @@
---
tags:
- - kernels
+- kernel
---
+
## Activation
-Activation kernels from [vLLM](https://github.com/vllm-project/vllm/blob/main/csrc/activation_kernels.cu).
-
-Kernel source: https://github.com/huggingface/kernels-community/tree/main/activation
-
-### Performance
-
-
-
-
-
-
-
-
-
-
-
-
-
+Activation kernels from [vLLM](https://github.com/vllm-project/vllm/blob/main/csrc/activation_kernels.cu).
\ No newline at end of file
diff --git a/activation/activation_kernels.cu b/activation/activation_kernels.cu
new file mode 100644
index 0000000000000000000000000000000000000000..55e6596797010403c8f2d8cc4d2ebbcae1c75d7e
--- /dev/null
+++ b/activation/activation_kernels.cu
@@ -0,0 +1,225 @@
+#include
+#include
+#include
+
+#include
+
+#include "cuda_compat.h"
+#include "dispatch_utils.h"
+
+namespace vllm {
+
+template
+__device__ __forceinline__ scalar_t compute(const scalar_t& x,
+ const scalar_t& y) {
+ return act_first ? ACT_FN(x) * y : x * ACT_FN(y);
+}
+// Activation and gating kernel template.
+
+template
+__global__ void act_and_mul_kernel(
+ scalar_t* __restrict__ out, // [..., d]
+ const scalar_t* __restrict__ input, // [..., 2, d]
+ const int d) {
+ const int64_t token_idx = blockIdx.x;
+ for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) {
+ const scalar_t x = VLLM_LDG(&input[token_idx * 2 * d + idx]);
+ const scalar_t y = VLLM_LDG(&input[token_idx * 2 * d + d + idx]);
+ out[token_idx * d + idx] = compute(x, y);
+ }
+}
+
+template
+__device__ __forceinline__ T silu_kernel(const T& x) {
+ // x * sigmoid(x)
+ return (T)(((float)x) / (1.0f + expf((float)-x)));
+}
+
+template
+__device__ __forceinline__ T gelu_kernel(const T& x) {
+ // Equivalent to PyTorch GELU with 'none' approximation.
+ // Refer to:
+ // https://github.com/pytorch/pytorch/blob/8ac9b20d4b090c213799e81acf48a55ea8d437d6/aten/src/ATen/native/cuda/ActivationGeluKernel.cu#L36-L38
+ const float f = (float)x;
+ constexpr float ALPHA = M_SQRT1_2;
+ return (T)(f * 0.5f * (1.0f + ::erf(f * ALPHA)));
+}
+
+template
+__device__ __forceinline__ T gelu_tanh_kernel(const T& x) {
+ // Equivalent to PyTorch GELU with 'tanh' approximation.
+ // Refer to:
+ // https://github.com/pytorch/pytorch/blob/8ac9b20d4b090c213799e81acf48a55ea8d437d6/aten/src/ATen/native/cuda/ActivationGeluKernel.cu#L25-L30
+ const float f = (float)x;
+ constexpr float BETA = M_SQRT2 * M_2_SQRTPI * 0.5f;
+ constexpr float KAPPA = 0.044715;
+ float x_cube = f * f * f;
+ float inner = BETA * (f + KAPPA * x_cube);
+ return (T)(0.5f * f * (1.0f + ::tanhf(inner)));
+}
+
+} // namespace vllm
+
+// Launch activation and gating kernel.
+// Use ACT_FIRST (bool) indicating whether to apply the activation function
+// first.
+#define LAUNCH_ACTIVATION_GATE_KERNEL(KERNEL, ACT_FIRST) \
+ int d = input.size(-1) / 2; \
+ int64_t num_tokens = input.numel() / input.size(-1); \
+ dim3 grid(num_tokens); \
+ dim3 block(std::min(d, 1024)); \
+ if (num_tokens == 0) { \
+ return; \
+ } \
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); \
+ const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); \
+ VLLM_DISPATCH_FLOATING_TYPES( \
+ input.scalar_type(), "act_and_mul_kernel", [&] { \
+ vllm::act_and_mul_kernel, ACT_FIRST> \
+ <<>>(out.data_ptr(), \
+ input.data_ptr(), d); \
+ });
+
+void silu_and_mul(torch::Tensor& out, // [..., d]
+ torch::Tensor& input) // [..., 2 * d]
+{
+ LAUNCH_ACTIVATION_GATE_KERNEL(vllm::silu_kernel, true);
+}
+
+void mul_and_silu(torch::Tensor& out, // [..., d]
+ torch::Tensor& input) // [..., 2 * d]
+{
+ // The difference between mul_and_silu and silu_and_mul is that mul_and_silu
+ // applies the silu to the latter half of the input.
+ LAUNCH_ACTIVATION_GATE_KERNEL(vllm::silu_kernel, false);
+}
+
+void gelu_and_mul(torch::Tensor& out, // [..., d]
+ torch::Tensor& input) // [..., 2 * d]
+{
+ LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_kernel, true);
+}
+
+void gelu_tanh_and_mul(torch::Tensor& out, // [..., d]
+ torch::Tensor& input) // [..., 2 * d]
+{
+ LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_tanh_kernel, true);
+}
+
+namespace vllm {
+
+template
+__device__ __forceinline__ T fatrelu_kernel(const T& x, const float threshold) {
+ const float f = (float)x;
+ return (T)(f > threshold ? f : 0.0f);
+}
+
+template
+__global__ void act_and_mul_kernel_with_param(
+ scalar_t* __restrict__ out, const scalar_t* __restrict__ input, const int d,
+ const float param) {
+ const int64_t token_idx = blockIdx.x;
+ for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) {
+ const scalar_t x = VLLM_LDG(&input[token_idx * 2 * d + idx]);
+ const scalar_t y = VLLM_LDG(&input[token_idx * 2 * d + d + idx]);
+ out[token_idx * d + idx] = ACT_FN(x, param) * y;
+ }
+}
+
+} // namespace vllm
+
+#define LAUNCH_ACTIVATION_GATE_KERNEL_WITH_PARAM(KERNEL, PARAM) \
+ int d = input.size(-1) / 2; \
+ int64_t num_tokens = input.numel() / input.size(-1); \
+ dim3 grid(num_tokens); \
+ dim3 block(std::min(d, 1024)); \
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); \
+ const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); \
+ VLLM_DISPATCH_FLOATING_TYPES( \
+ input.scalar_type(), "act_and_mul_kernel_with_param", [&] { \
+ vllm::act_and_mul_kernel_with_param> \
+ <<>>(out.data_ptr(), \
+ input.data_ptr(), d, \
+ PARAM); \
+ });
+
+void fatrelu_and_mul(torch::Tensor& out, // [..., d],
+ torch::Tensor& input, // [..., 2 * d]
+ double threshold) {
+ LAUNCH_ACTIVATION_GATE_KERNEL_WITH_PARAM(vllm::fatrelu_kernel, threshold);
+}
+namespace vllm {
+
+// Element-wise activation kernel template.
+template
+__global__ void activation_kernel(
+ scalar_t* __restrict__ out, // [..., d]
+ const scalar_t* __restrict__ input, // [..., d]
+ const int d) {
+ const int64_t token_idx = blockIdx.x;
+ for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) {
+ const scalar_t x = VLLM_LDG(&input[token_idx * d + idx]);
+ out[token_idx * d + idx] = ACT_FN(x);
+ }
+}
+
+} // namespace vllm
+
+// Launch element-wise activation kernel.
+#define LAUNCH_ACTIVATION_KERNEL(KERNEL) \
+ int d = input.size(-1); \
+ int64_t num_tokens = input.numel() / d; \
+ dim3 grid(num_tokens); \
+ dim3 block(std::min(d, 1024)); \
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); \
+ const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); \
+ VLLM_DISPATCH_FLOATING_TYPES(input.scalar_type(), "activation_kernel", [&] { \
+ vllm::activation_kernel> \
+ <<>>(out.data_ptr(), \
+ input.data_ptr(), d); \
+ });
+
+namespace vllm {
+
+template
+__device__ __forceinline__ T gelu_new_kernel(const T& x) {
+ const float x3 = (float)(x * x * x);
+ const T t = (T)tanhf((T)(0.79788456f * (float)(x + (T)(0.044715f * x3))));
+ return ((T)0.5) * x * (((T)1.0) + t);
+}
+
+template
+__device__ __forceinline__ T gelu_fast_kernel(const T& x) {
+ const float f = (float)x;
+ const T t =
+ (T)tanhf(((T)(f * 0.79788456f)) * (((T)1.0) + (T)(0.044715f * f) * x));
+ return ((T)0.5) * x * (((T)1.0) + t);
+}
+
+template
+__device__ __forceinline__ T gelu_quick_kernel(const T& x) {
+ // x * sigmoid(1.702 * x)
+ return (T)(((float)x) / (1.0f + expf(-1.702f * (float)x)));
+}
+
+} // namespace vllm
+
+void gelu_new(torch::Tensor& out, // [..., d]
+ torch::Tensor& input) // [..., d]
+{
+ LAUNCH_ACTIVATION_KERNEL(vllm::gelu_new_kernel);
+}
+
+void gelu_fast(torch::Tensor& out, // [..., d]
+ torch::Tensor& input) // [..., d]
+{
+ LAUNCH_ACTIVATION_KERNEL(vllm::gelu_fast_kernel);
+}
+
+void gelu_quick(torch::Tensor& out, // [..., d]
+ torch::Tensor& input) // [..., d]
+{
+ LAUNCH_ACTIVATION_KERNEL(vllm::gelu_quick_kernel);
+}
diff --git a/activation/cuda_compat.h b/activation/cuda_compat.h
new file mode 100644
index 0000000000000000000000000000000000000000..affa051c759512f2816c51ce25e35ee80f960f5e
--- /dev/null
+++ b/activation/cuda_compat.h
@@ -0,0 +1,49 @@
+#pragma once
+
+#ifdef USE_ROCM
+ #include
+#endif
+
+#if defined(USE_ROCM) && defined(__GFX9__)
+ #define WARP_SIZE 64
+#else
+ #define WARP_SIZE 32
+#endif
+
+#ifndef USE_ROCM
+ #define VLLM_LDG(arg) __ldg(arg)
+#else
+ #define VLLM_LDG(arg) *(arg)
+#endif
+
+#ifndef USE_ROCM
+ #define VLLM_SHFL_XOR_SYNC(var, lane_mask) \
+ __shfl_xor_sync(uint32_t(-1), var, lane_mask)
+ #define VLLM_SHFL_XOR_SYNC_WIDTH(var, lane_mask, width) \
+ __shfl_xor_sync(uint32_t(-1), var, lane_mask, width)
+#else
+ #define VLLM_SHFL_XOR_SYNC(var, lane_mask) __shfl_xor(var, lane_mask)
+ #define VLLM_SHFL_XOR_SYNC_WIDTH(var, lane_mask, width) \
+ __shfl_xor(var, lane_mask, width)
+#endif
+
+#ifndef USE_ROCM
+ #define VLLM_SHFL_SYNC(var, src_lane) __shfl_sync(uint32_t(-1), var, src_lane)
+#else
+ #define VLLM_SHFL_SYNC(var, src_lane) __shfl(var, src_lane)
+#endif
+
+#ifndef USE_ROCM
+ #define VLLM_SHFL_DOWN_SYNC(var, lane_delta) \
+ __shfl_down_sync(uint32_t(-1), var, lane_delta)
+#else
+ #define VLLM_SHFL_DOWN_SYNC(var, lane_delta) __shfl_down(var, lane_delta)
+#endif
+
+#ifndef USE_ROCM
+ #define VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize(FUNC, VAL) \
+ cudaFuncSetAttribute(FUNC, cudaFuncAttributeMaxDynamicSharedMemorySize, VAL)
+#else
+ #define VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize(FUNC, VAL) \
+ hipFuncSetAttribute(FUNC, hipFuncAttributeMaxDynamicSharedMemorySize, VAL)
+#endif
diff --git a/activation/dispatch_utils.h b/activation/dispatch_utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..f7b75c48373f68e9025020eea507415fb9405e2e
--- /dev/null
+++ b/activation/dispatch_utils.h
@@ -0,0 +1,83 @@
+/*
+ * Adapted from
+ * https://github.com/pytorch/pytorch/blob/v2.0.1/aten/src/ATen/Dispatch.h
+ */
+#pragma once
+
+#include
+
+// Need a special dispatch case macro since we will nest the FP8 dispatch.
+// Instead of the usual 'scalar_t', this names the dispatched type 'fp8_t'.
+#define AT_DISPATCH_FP8_CASE(enum_type, ...) \
+ AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, fp8_t, __VA_ARGS__)
+
+#define VLLM_DISPATCH_CASE_FLOATING_TYPES(...) \
+ AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__)
+
+#define VLLM_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \
+ AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
+
+// ROCm devices might use either fn or fnuz, so set up dispatch table for both.
+// A host-based check at runtime will create a preferred FP8 type for ROCm
+// such that the correct kernel is dispatched.
+#ifdef USE_ROCM
+ #define VLLM_DISPATCH_CASE_FP8_TYPES(...) \
+ AT_DISPATCH_FP8_CASE(at::ScalarType::Float8_e4m3fn, __VA_ARGS__) \
+ AT_DISPATCH_FP8_CASE(at::ScalarType::Float8_e4m3fnuz, __VA_ARGS__)
+
+ #define VLLM_DISPATCH_CASE_QUANT_TYPES(...) \
+ AT_DISPATCH_CASE(at::ScalarType::Float8_e4m3fn, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Float8_e4m3fnuz, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__)
+#else
+ #define VLLM_DISPATCH_CASE_FP8_TYPES(...) \
+ AT_DISPATCH_FP8_CASE(at::ScalarType::Float8_e4m3fn, __VA_ARGS__)
+
+ #define VLLM_DISPATCH_CASE_QUANT_TYPES(...) \
+ AT_DISPATCH_CASE(at::ScalarType::Float8_e4m3fn, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__)
+#endif
+
+// When using this dispatch macro, the type is 'fp8_t' not 'scalar_t'.
+// See AT_DISPATCH_FP8_CASE above.
+#define VLLM_DISPATCH_FP8_TYPES(TYPE, NAME, ...) \
+ AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_FP8_TYPES(__VA_ARGS__))
+
+#define VLLM_DISPATCH_QUANT_TYPES(TYPE, NAME, ...) \
+ AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_QUANT_TYPES(__VA_ARGS__))
+
+#define VLLM_DISPATCH_CASE_FLOATING_AND_BYTE_TYPES(...) \
+ AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__)
+
+#define VLLM_DISPATCH_FLOATING_AND_BYTE_TYPES(TYPE, NAME, ...) \
+ AT_DISPATCH_SWITCH(TYPE, NAME, \
+ VLLM_DISPATCH_CASE_FLOATING_AND_BYTE_TYPES(__VA_ARGS__))
+
+#define VLLM_DISPATCH_CASE_INTEGRAL_TYPES(...) \
+ AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__)
+
+#define VLLM_DISPATCH_CASE_INTEGRAL_AND_UNSIGNED_TYPES(...) \
+ AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::UInt16, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::UInt32, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::UInt64, __VA_ARGS__)
+
+#define VLLM_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) \
+ AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__))
+
+#define VLLM_DISPATCH_INTEGRAL_AND_UNSIGNED_TYPES(TYPE, NAME, ...) \
+ AT_DISPATCH_SWITCH( \
+ TYPE, NAME, VLLM_DISPATCH_CASE_INTEGRAL_AND_UNSIGNED_TYPES(__VA_ARGS__))
diff --git a/benchmarks/benchmark.py b/benchmarks/benchmark.py
deleted file mode 100644
index 0522bbfdbf8749d671faa8fe91b169dbd2cafa8d..0000000000000000000000000000000000000000
--- a/benchmarks/benchmark.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from kernels.benchmarks import SiluAndMulBenchmark
-
-
-class SiluWorkloads(SiluAndMulBenchmark):
- pass
diff --git a/build.toml b/build.toml
new file mode 100644
index 0000000000000000000000000000000000000000..0108f3f8b5b4e6a626b926dc4f91df50bf9a707e
--- /dev/null
+++ b/build.toml
@@ -0,0 +1,18 @@
+[general]
+name = "activation"
+universal = false
+
+[torch]
+src = [
+ "torch-ext/torch_binding.cpp",
+ "torch-ext/torch_binding.h",
+]
+
+[kernel.activation]
+backend = "cuda"
+depends = ["torch"]
+src = [
+ "activation/activation_kernels.cu",
+ "activation/cuda_compat.h",
+ "activation/dispatch_utils.h",
+]
diff --git a/build/torch210-cu128-x86_64-windows/activation/__init__.py b/build/torch210-cu128-x86_64-windows/activation/__init__.py
deleted file mode 100644
index 379e245ef7515d04bfe4e680e2549fcf8790cc15..0000000000000000000000000000000000000000
--- a/build/torch210-cu128-x86_64-windows/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch210-cu128-x86_64-windows/activation/_activation_e1b4b08.pyd b/build/torch210-cu128-x86_64-windows/activation/_activation_e1b4b08.pyd
deleted file mode 100644
index ca0f54ee392befa7fa8a084bcc730e416a912f23..0000000000000000000000000000000000000000
--- a/build/torch210-cu128-x86_64-windows/activation/_activation_e1b4b08.pyd
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:d741006dd4fe8a85ed461fa3727d4d9f1b438083d2f1075ae54650bbdd2dc179
-size 2463744
diff --git a/build/torch210-cu128-x86_64-windows/activation/_ops.py b/build/torch210-cu128-x86_64-windows/activation/_ops.py
deleted file mode 100644
index 110a36d47839efd80d8d58e5cce311e50d684990..0000000000000000000000000000000000000000
--- a/build/torch210-cu128-x86_64-windows/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_e1b4b08
-ops = torch.ops._activation_e1b4b08
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_e1b4b08::{op_name}"
\ No newline at end of file
diff --git a/build/torch210-cu128-x86_64-windows/activation/layers.py b/build/torch210-cu128-x86_64-windows/activation/layers.py
deleted file mode 100644
index 3dbfa19f89f2514b94e7b35d528a1e76ec4da7a3..0000000000000000000000000000000000000000
--- a/build/torch210-cu128-x86_64-windows/activation/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch210-cu128-x86_64-windows/metadata.json b/build/torch210-cu128-x86_64-windows/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch210-cu128-x86_64-windows/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/_activation_63b875f.abi3.so b/build/torch210-cxx11-cu126-x86_64-linux/_activation_63b875f.abi3.so
deleted file mode 100644
index 56710735c8054926018bdc657c86acf38357f2ea..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu126-x86_64-linux/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:5aaa5c4a7e3fe6967d10f5bc25c899507a57f2fd941c9aff02bd9ded610d9542
-size 3126824
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/_ops.py b/build/torch210-cxx11-cu126-x86_64-linux/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu126-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch210-cxx11-cu126-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu126-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/layers.py b/build/torch210-cxx11-cu126-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu126-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/metadata.json b/build/torch210-cxx11-cu126-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu126-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/_activation_63b875f.abi3.so b/build/torch210-cxx11-cu128-x86_64-linux/_activation_63b875f.abi3.so
deleted file mode 100644
index 2b154e7fcc3fa0cfaa0080d434d825559e56d1a1..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu128-x86_64-linux/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:0e4d411f1093b8e4ec62529e9eb94550fc4ff8efc073e20eaedd7ea587885390
-size 4406608
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/_ops.py b/build/torch210-cxx11-cu128-x86_64-linux/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu128-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/activation/__init__.py b/build/torch210-cxx11-cu128-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu128-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/layers.py b/build/torch210-cxx11-cu128-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu128-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/metadata.json b/build/torch210-cxx11-cu128-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu128-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/_activation_63b875f.abi3.so b/build/torch210-cxx11-cu130-x86_64-linux/_activation_63b875f.abi3.so
deleted file mode 100644
index d78e3b3eb68814951f17530588ae4abd731837eb..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu130-x86_64-linux/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:f843d365b4eadc5e5b587becaa8ba581a0e0007adf7f1fba59442dd8acf4cd42
-size 4190152
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/_ops.py b/build/torch210-cxx11-cu130-x86_64-linux/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu130-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/activation/__init__.py b/build/torch210-cxx11-cu130-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu130-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/layers.py b/build/torch210-cxx11-cu130-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu130-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/metadata.json b/build/torch210-cxx11-cu130-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu130-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch210-metal-aarch64-darwin/_activation_63b875f.abi3.so b/build/torch210-metal-aarch64-darwin/_activation_63b875f.abi3.so
deleted file mode 100644
index ba3b331d8ec8fbebaa26c880f2be4824ae26de15..0000000000000000000000000000000000000000
--- a/build/torch210-metal-aarch64-darwin/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:40b08339eb57c5db3a676d69eafc6d1be7cf14e71e57a544289e8922ab7c118c
-size 221272
diff --git a/build/torch210-metal-aarch64-darwin/_ops.py b/build/torch210-metal-aarch64-darwin/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch210-metal-aarch64-darwin/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch210-metal-aarch64-darwin/activation/__init__.py b/build/torch210-metal-aarch64-darwin/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch210-metal-aarch64-darwin/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-metal-aarch64-darwin/layers.py b/build/torch210-metal-aarch64-darwin/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch210-metal-aarch64-darwin/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch210-metal-aarch64-darwin/metadata.json b/build/torch210-metal-aarch64-darwin/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch210-metal-aarch64-darwin/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/__init__.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py
similarity index 76%
rename from build/torch210-cxx11-cu126-x86_64-linux/__init__.py
rename to build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357 100644
--- a/build/torch210-cxx11-cu126-x86_64-linux/__init__.py
+++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py
@@ -30,20 +30,6 @@ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0)
return out
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_fast(out, x)
return out
@@ -61,15 +47,11 @@ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
__all__ = [
"silu_and_mul",
- "mul_and_silu",
"gelu_and_mul",
"gelu_tanh_and_mul",
"fatrelu_and_mul",
"gelu_fast",
"gelu_new",
"gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
"layers",
]
diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_be5bedb.abi3.so b/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_be5bedb.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..c1e52a91b4fa56b4ff39c854b33497b094135599
--- /dev/null
+++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_be5bedb.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9b6ba32ecc6fc898df3b0cebee85e9afc6881749fe58142280f051ca3332d913
+size 2546864
diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..0110324ade19f59f705c61d5c21912c958e92e96
--- /dev/null
+++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_be5bedb
+ops = torch.ops._activation_be5bedb
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_be5bedb::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/layers.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py
similarity index 73%
rename from build/torch27-cxx11-cu128-aarch64-linux/activation/layers.py
rename to build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..45b31181ffb80509a85d729a7f7ee86fc2cf014a 100644
--- a/build/torch27-cxx11-cu128-aarch64-linux/activation/layers.py
+++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py
@@ -23,57 +23,6 @@ class SiluAndMul(nn.Module):
ops.silu_and_mul(out, x)
return out
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
class MulAndSilu(nn.Module):
"""An activation function for SwiGLU.
diff --git a/build/torch210-metal-aarch64-darwin/__init__.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py
similarity index 76%
rename from build/torch210-metal-aarch64-darwin/__init__.py
rename to build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357 100644
--- a/build/torch210-metal-aarch64-darwin/__init__.py
+++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py
@@ -30,20 +30,6 @@ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0)
return out
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_fast(out, x)
return out
@@ -61,15 +47,11 @@ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
__all__ = [
"silu_and_mul",
- "mul_and_silu",
"gelu_and_mul",
"gelu_tanh_and_mul",
"fatrelu_and_mul",
"gelu_fast",
"gelu_new",
"gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
"layers",
]
diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_be5bedb.abi3.so b/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_be5bedb.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..f45a6ffcf3f11e3b24919496e213a61acb258d2a
--- /dev/null
+++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_be5bedb.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:331dcb3900d5e47a11d3577cdbac54f15a0b6e14910239293323c1d9e4eb9f49
+size 2616928
diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..0110324ade19f59f705c61d5c21912c958e92e96
--- /dev/null
+++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_be5bedb
+ops = torch.ops._activation_be5bedb
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_be5bedb::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/layers.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py
similarity index 73%
rename from build/torch29-cxx11-cu126-aarch64-linux/activation/layers.py
rename to build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..45b31181ffb80509a85d729a7f7ee86fc2cf014a 100644
--- a/build/torch29-cxx11-cu126-aarch64-linux/activation/layers.py
+++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py
@@ -23,57 +23,6 @@ class SiluAndMul(nn.Module):
ops.silu_and_mul(out, x)
return out
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
class MulAndSilu(nn.Module):
"""An activation function for SwiGLU.
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/__init__.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py
similarity index 76%
rename from build/torch210-cxx11-cu130-x86_64-linux/__init__.py
rename to build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357 100644
--- a/build/torch210-cxx11-cu130-x86_64-linux/__init__.py
+++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py
@@ -30,20 +30,6 @@ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0)
return out
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_fast(out, x)
return out
@@ -61,15 +47,11 @@ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
__all__ = [
"silu_and_mul",
- "mul_and_silu",
"gelu_and_mul",
"gelu_tanh_and_mul",
"fatrelu_and_mul",
"gelu_fast",
"gelu_new",
"gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
"layers",
]
diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb.abi3.so b/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..12f5777398872e7a3d93ab936e42ade8eeec3213
--- /dev/null
+++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ce11492b9675a44afb3b896ed80e425f2a47e29481c4aad9c4a6ac59520f011
+size 2621472
diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..0110324ade19f59f705c61d5c21912c958e92e96
--- /dev/null
+++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_be5bedb
+ops = torch.ops._activation_be5bedb
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_be5bedb::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/layers.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py
similarity index 73%
rename from build/torch29-cxx11-cu128-aarch64-linux/activation/layers.py
rename to build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..45b31181ffb80509a85d729a7f7ee86fc2cf014a 100644
--- a/build/torch29-cxx11-cu128-aarch64-linux/activation/layers.py
+++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py
@@ -23,57 +23,6 @@ class SiluAndMul(nn.Module):
ops.silu_and_mul(out, x)
return out
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
class MulAndSilu(nn.Module):
"""An activation function for SwiGLU.
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/__init__.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py
similarity index 76%
rename from build/torch210-cxx11-cu128-x86_64-linux/__init__.py
rename to build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357 100644
--- a/build/torch210-cxx11-cu128-x86_64-linux/__init__.py
+++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py
@@ -30,20 +30,6 @@ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0)
return out
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_fast(out, x)
return out
@@ -61,15 +47,11 @@ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
__all__ = [
"silu_and_mul",
- "mul_and_silu",
"gelu_and_mul",
"gelu_tanh_and_mul",
"fatrelu_and_mul",
"gelu_fast",
"gelu_new",
"gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
"layers",
]
diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_be5bedb.abi3.so b/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_be5bedb.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..056de26936949cc36baf3caa9c4212d730da81f7
--- /dev/null
+++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_be5bedb.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:434bd1ae43b7cbdb10d86b82da9a237ec05ef9d9fb4fc15cdc9096d3d5ed3fa7
+size 2539352
diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..0110324ade19f59f705c61d5c21912c958e92e96
--- /dev/null
+++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_be5bedb
+ops = torch.ops._activation_be5bedb
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_be5bedb::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/layers.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py
similarity index 73%
rename from build/torch29-cxx11-cu130-aarch64-linux/activation/layers.py
rename to build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..45b31181ffb80509a85d729a7f7ee86fc2cf014a 100644
--- a/build/torch29-cxx11-cu130-aarch64-linux/activation/layers.py
+++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py
@@ -23,57 +23,6 @@ class SiluAndMul(nn.Module):
ops.silu_and_mul(out, x)
return out
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
class MulAndSilu(nn.Module):
"""An activation function for SwiGLU.
diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357
--- /dev/null
+++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py
@@ -0,0 +1,57 @@
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.mul_and_silu(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_be5bedb.abi3.so b/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_be5bedb.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..c31190f8f2be87dbb5d5a9c497c68cea2258fded
--- /dev/null
+++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_be5bedb.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53ddfb42466bfe01feb98348f5c2d6beefd589aeb3dec4c5c36609e11a6bde4c
+size 2605136
diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..0110324ade19f59f705c61d5c21912c958e92e96
--- /dev/null
+++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_be5bedb
+ops = torch.ops._activation_be5bedb
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_be5bedb::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..45b31181ffb80509a85d729a7f7ee86fc2cf014a
--- /dev/null
+++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py
@@ -0,0 +1,128 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ """An activation function for SwiGLU.
+
+ The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class MulAndSilu(nn.Module):
+ """An activation function for SwiGLU.
+
+ The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.mul_and_silu(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ """An activation function for GeGLU.
+
+ The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
+ return: (batch_size, seq_len, d) or (num_tokens, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ """An activation function for FATReLU.
+
+ The function computes x -> FATReLU(x[:d]) * x[d:] where
+ d = x.shape[-1] // 2.
+ This is used in openbmb/MiniCPM-S-1B-sft.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357
--- /dev/null
+++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py
@@ -0,0 +1,57 @@
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.mul_and_silu(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_be5bedb.abi3.so b/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_be5bedb.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..516f085e9ac787a2454fb78975dbaec25d2a6576
--- /dev/null
+++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_be5bedb.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac7174352dea307231f308c84ca32ee001cdbcefd976de860e76501c52aae591
+size 2613776
diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..0110324ade19f59f705c61d5c21912c958e92e96
--- /dev/null
+++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_be5bedb
+ops = torch.ops._activation_be5bedb
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_be5bedb::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..45b31181ffb80509a85d729a7f7ee86fc2cf014a
--- /dev/null
+++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py
@@ -0,0 +1,128 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ """An activation function for SwiGLU.
+
+ The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class MulAndSilu(nn.Module):
+ """An activation function for SwiGLU.
+
+ The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.mul_and_silu(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ """An activation function for GeGLU.
+
+ The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
+ return: (batch_size, seq_len, d) or (num_tokens, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ """An activation function for FATReLU.
+
+ The function computes x -> FATReLU(x[:d]) * x[d:] where
+ d = x.shape[-1] // 2.
+ This is used in openbmb/MiniCPM-S-1B-sft.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py b/build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357 100644
--- a/build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py
+++ b/build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py
@@ -30,20 +30,6 @@ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0)
return out
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_fast(out, x)
return out
@@ -61,15 +47,11 @@ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
__all__ = [
"silu_and_mul",
- "mul_and_silu",
"gelu_and_mul",
"gelu_tanh_and_mul",
"fatrelu_and_mul",
"gelu_fast",
"gelu_new",
"gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
"layers",
]
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc
index bbf3ad846a76e365312ad965559a177976801396..5155b241dff8af4302230c3ae23518cb41efa185 100644
Binary files a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc
index 47765ef8e985a500bbb3e25990387a1f1f15c767..53b5508fec27cd0ece00b9b018694ba8da40c5ba 100644
Binary files a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc
index de62862184381714910c79ecdf8db3ca14f8a753..7752cad4c2a06746b1a68c3637c7baef00bb5ddc 100644
Binary files a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so b/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..7d5463c37b3f4a3dec8b15df1a13168019fb26e3
--- /dev/null
+++ b/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aee7c6869a9e318ad81cb84460c58ca0dac2dc85f4ed739b12fe57641f766332
+size 2546984
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_beeaae6.abi3.so
deleted file mode 100755
index c6c9665f880b574481be0f6464ac7637e732df84..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_beeaae6.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:ce06ec284ecd4ac5423d3822a60cd9eeb686d0054b38d66567de73e1137b0567
-size 2773632
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py
index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..745e06b31cb5b9718d3b85236f4cc257459070d7 100644
--- a/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py
+++ b/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py
@@ -1,9 +1,9 @@
import torch
-from . import _activation_beeaae6
-ops = torch.ops._activation_beeaae6
+from . import _activation_be5bedb_dirty
+ops = torch.ops._activation_be5bedb_dirty
def add_op_namespace_prefix(op_name: str):
"""
Prefix op by namespace.
"""
- return f"_activation_beeaae6::{op_name}"
\ No newline at end of file
+ return f"_activation_be5bedb_dirty::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..45b31181ffb80509a85d729a7f7ee86fc2cf014a 100644
--- a/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py
+++ b/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py
@@ -23,57 +23,6 @@ class SiluAndMul(nn.Module):
ops.silu_and_mul(out, x)
return out
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
class MulAndSilu(nn.Module):
"""An activation function for SwiGLU.
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch27-cxx11-cu126-x86_64-linux/activation/__init__.py
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357 100644
--- a/build/torch27-cxx11-cu126-x86_64-linux/activation/__init__.py
+++ b/build/torch27-cxx11-cu126-x86_64-linux/activation/__init__.py
@@ -30,20 +30,6 @@ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0)
return out
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_fast(out, x)
return out
@@ -61,15 +47,11 @@ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
__all__ = [
"silu_and_mul",
- "mul_and_silu",
"gelu_and_mul",
"gelu_tanh_and_mul",
"fatrelu_and_mul",
"gelu_fast",
"gelu_new",
"gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
"layers",
]
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc
index 29e76b5c619af9b19c5650edcfd4f63c4725d35f..4b1fcc2dcde514cab92d358380824ca24616cd0b 100644
Binary files a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc
index f54053b63e8c2b7598967b6ca9739ecc85d6142a..665e89cb27b58c9caff761de28b7f6574cc2140e 100644
Binary files a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc
index 4d4a3c1172a3a2b4c954199c9762b3251d1c468c..4602c567b14a674c4a56d0e1cf8ef073fbc50beb 100644
Binary files a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so b/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..94c38d99b9593469317fe894be35b069017b493e
--- /dev/null
+++ b/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f075a6e0d47a2d382d16291b1c5d7d1d98111e2bbc5891b14b627e3c1778b699
+size 2621536
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so
deleted file mode 100755
index e9e9102689a8ddf42f881abedcd19e137f22d5e4..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:a529bd105aca5081398d63329e829b6b159570424cd654d3a9f275ca9a720e82
-size 2852200
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py
index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..745e06b31cb5b9718d3b85236f4cc257459070d7 100644
--- a/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py
+++ b/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py
@@ -1,9 +1,9 @@
import torch
-from . import _activation_beeaae6
-ops = torch.ops._activation_beeaae6
+from . import _activation_be5bedb_dirty
+ops = torch.ops._activation_be5bedb_dirty
def add_op_namespace_prefix(op_name: str):
"""
Prefix op by namespace.
"""
- return f"_activation_beeaae6::{op_name}"
\ No newline at end of file
+ return f"_activation_be5bedb_dirty::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..45b31181ffb80509a85d729a7f7ee86fc2cf014a 100644
--- a/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py
+++ b/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py
@@ -23,57 +23,6 @@ class SiluAndMul(nn.Module):
ops.silu_and_mul(out, x)
return out
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
class MulAndSilu(nn.Module):
"""An activation function for SwiGLU.
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/__init__.py b/build/torch27-cxx11-cu128-aarch64-linux/activation/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu128-aarch64-linux/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index 86ca448fc1e6e7e119172b94f978b4a88aeda3e1..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index cbcd1da77da3529c73226d8ed8decfae8b9e5436..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch27-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so
deleted file mode 100644
index 4df8f1606a76b66c06d538cd25db8e894d282405..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:34bdeb9ab72686850aef0a16b225b1b956162edb2cf46cba65c5e5b92ae267ae
-size 4207000
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/_ops.py b/build/torch27-cxx11-cu128-aarch64-linux/activation/_ops.py
deleted file mode 100644
index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu128-aarch64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_320b408
-ops = torch.ops._activation_320b408
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_320b408::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__init__.py b/build/torch27-cxx11-cu128-x86_64-linux/activation/__init__.py
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357 100644
--- a/build/torch27-cxx11-cu128-x86_64-linux/activation/__init__.py
+++ b/build/torch27-cxx11-cu128-x86_64-linux/activation/__init__.py
@@ -30,20 +30,6 @@ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0)
return out
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_fast(out, x)
return out
@@ -61,15 +47,11 @@ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
__all__ = [
"silu_and_mul",
- "mul_and_silu",
"gelu_and_mul",
"gelu_tanh_and_mul",
"fatrelu_and_mul",
"gelu_fast",
"gelu_new",
"gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
"layers",
]
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc
index 364976ff5017b183a827c0dfcda90becfbab0e7c..fe2206ed48c6e6b877620ac3db87af6ee49ddf07 100644
Binary files a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc
index 008e1b91db1ae539587989af1a212f9cd38a1ae2..6a940427d39d1a12a0806315d03b02bdfed65a3d 100644
Binary files a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc
index d00f03a5b9a4944132d13ac0986acc2c54e0ca3c..725246ac4c8d6c4374d8250ea67f759a871b1c38 100644
Binary files a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so b/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..e5c17e44367c005d1c9f8d6b391be8d49079b2fc
--- /dev/null
+++ b/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cc2406aa2fa09dd7bc1fd5e87cdcdf55edfc7e0853fad5f977e2500e08fa8899
+size 3565432
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so
deleted file mode 100755
index 6d8adc0f26f3b10cbc1b441b74bc7f49c0ebdaae..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:0f2cffcb6b5b9a49f03a2df46fc2ad36765676edecb468c233e78e1f5e21e206
-size 4127872
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py
index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..745e06b31cb5b9718d3b85236f4cc257459070d7 100644
--- a/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py
+++ b/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py
@@ -1,9 +1,9 @@
import torch
-from . import _activation_beeaae6
-ops = torch.ops._activation_beeaae6
+from . import _activation_be5bedb_dirty
+ops = torch.ops._activation_be5bedb_dirty
def add_op_namespace_prefix(op_name: str):
"""
Prefix op by namespace.
"""
- return f"_activation_beeaae6::{op_name}"
\ No newline at end of file
+ return f"_activation_be5bedb_dirty::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..45b31181ffb80509a85d729a7f7ee86fc2cf014a 100644
--- a/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py
+++ b/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py
@@ -23,57 +23,6 @@ class SiluAndMul(nn.Module):
ops.silu_and_mul(out, x)
return out
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
class MulAndSilu(nn.Module):
"""An activation function for SwiGLU.
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/__init__.py b/build/torch28-cxx11-cu126-x86_64-linux/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu126-x86_64-linux/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/_activation_f8d6759.abi3.so b/build/torch28-cxx11-cu126-x86_64-linux/_activation_f8d6759.abi3.so
deleted file mode 100644
index cabcacd16040aad8134b2892ea8f1f9781a9a78b..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu126-x86_64-linux/_activation_f8d6759.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:cf76431ff46ef5bc002ce8813eeed3ae9618a15094d98ef4b164f7a10a54f0bc
-size 3121056
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/_ops.py b/build/torch28-cxx11-cu126-x86_64-linux/_ops.py
deleted file mode 100644
index 140c6e96b3f93ce5b359648edac4dcb2913b8324..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu126-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_f8d6759
-ops = torch.ops._activation_f8d6759
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_f8d6759::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch28-cxx11-cu126-x86_64-linux/activation/__init__.py
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357 100644
--- a/build/torch28-cxx11-cu126-x86_64-linux/activation/__init__.py
+++ b/build/torch28-cxx11-cu126-x86_64-linux/activation/__init__.py
@@ -1,26 +1,57 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.mul_and_silu(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc
similarity index 52%
rename from build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc
rename to build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc
index 390f279894bed7ce9346ede4953b9ffc9e1b1808..5263d294bc5bc421b98d31436c896bbc244d0771 100644
Binary files a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fb753a567265e3db8b71afceb9a4442139a6aea7
Binary files /dev/null and b/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6dd25df0a6c63b7315d2c0d9f4b3894ff1626fc8
Binary files /dev/null and b/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so b/build/torch28-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..40900ff2070ff72eb665fdd5fd78f12d3a287cd9
--- /dev/null
+++ b/build/torch28-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c88e87951b92ea55313ef79a34d284cb2a23713d3bdafee735caa4fc955b9dcb
+size 2610616
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch28-cxx11-cu126-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..745e06b31cb5b9718d3b85236f4cc257459070d7
--- /dev/null
+++ b/build/torch28-cxx11-cu126-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_be5bedb_dirty
+ops = torch.ops._activation_be5bedb_dirty
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_be5bedb_dirty::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/layers.py b/build/torch28-cxx11-cu126-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..45b31181ffb80509a85d729a7f7ee86fc2cf014a
--- /dev/null
+++ b/build/torch28-cxx11-cu126-x86_64-linux/activation/layers.py
@@ -0,0 +1,128 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ """An activation function for SwiGLU.
+
+ The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class MulAndSilu(nn.Module):
+ """An activation function for SwiGLU.
+
+ The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.mul_and_silu(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ """An activation function for GeGLU.
+
+ The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
+ return: (batch_size, seq_len, d) or (num_tokens, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ """An activation function for FATReLU.
+
+ The function computes x -> FATReLU(x[:d]) * x[d:] where
+ d = x.shape[-1] // 2.
+ This is used in openbmb/MiniCPM-S-1B-sft.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/layers.py b/build/torch28-cxx11-cu126-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu126-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/metadata.json b/build/torch28-cxx11-cu126-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu126-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/__init__.py b/build/torch28-cxx11-cu128-x86_64-linux/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu128-x86_64-linux/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/_activation_f8d6759.abi3.so b/build/torch28-cxx11-cu128-x86_64-linux/_activation_f8d6759.abi3.so
deleted file mode 100644
index 564e3aa415dbcea5a132bfb14301b4900373fb58..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu128-x86_64-linux/_activation_f8d6759.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:2dc0a42d5ebcae09615265a3635bb90d33c76d9179fcfcec17fb2fc5cb16b7f5
-size 4400792
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/_ops.py b/build/torch28-cxx11-cu128-x86_64-linux/_ops.py
deleted file mode 100644
index 140c6e96b3f93ce5b359648edac4dcb2913b8324..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu128-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_f8d6759
-ops = torch.ops._activation_f8d6759
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_f8d6759::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/__init__.py b/build/torch28-cxx11-cu128-x86_64-linux/activation/__init__.py
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357 100644
--- a/build/torch28-cxx11-cu128-x86_64-linux/activation/__init__.py
+++ b/build/torch28-cxx11-cu128-x86_64-linux/activation/__init__.py
@@ -1,26 +1,57 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.mul_and_silu(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc
similarity index 52%
rename from build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc
rename to build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc
index 60dc82724c779cfa41bd9b8dcf39c036e2a50109..aedb284c8147a243ebfc99ec94000b62ae672077 100644
Binary files a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7ae3e6d861e600db32e9024ae7db059642f35a3f
Binary files /dev/null and b/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..51baab3cf4e592a2b8bed4cea0e9228a559b399d
Binary files /dev/null and b/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so b/build/torch28-cxx11-cu128-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..8b1ece63bdec0e63013816dae6bce9a87068f88e
--- /dev/null
+++ b/build/torch28-cxx11-cu128-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cf784c7ab178c476fc6268efe820b1948c7c5b8f049c046c851b03067da5dd59
+size 3558616
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/_ops.py b/build/torch28-cxx11-cu128-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..745e06b31cb5b9718d3b85236f4cc257459070d7
--- /dev/null
+++ b/build/torch28-cxx11-cu128-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_be5bedb_dirty
+ops = torch.ops._activation_be5bedb_dirty
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_be5bedb_dirty::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/layers.py b/build/torch28-cxx11-cu128-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..45b31181ffb80509a85d729a7f7ee86fc2cf014a
--- /dev/null
+++ b/build/torch28-cxx11-cu128-x86_64-linux/activation/layers.py
@@ -0,0 +1,128 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ """An activation function for SwiGLU.
+
+ The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class MulAndSilu(nn.Module):
+ """An activation function for SwiGLU.
+
+ The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.mul_and_silu(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ """An activation function for GeGLU.
+
+ The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
+ return: (batch_size, seq_len, d) or (num_tokens, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ """An activation function for FATReLU.
+
+ The function computes x -> FATReLU(x[:d]) * x[d:] where
+ d = x.shape[-1] // 2.
+ This is used in openbmb/MiniCPM-S-1B-sft.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/layers.py b/build/torch28-cxx11-cu128-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu128-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/metadata.json b/build/torch28-cxx11-cu128-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu128-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__init__.py b/build/torch28-cxx11-cu129-aarch64-linux/activation/__init__.py
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357 100644
--- a/build/torch28-cxx11-cu129-aarch64-linux/activation/__init__.py
+++ b/build/torch28-cxx11-cu129-aarch64-linux/activation/__init__.py
@@ -30,20 +30,6 @@ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0)
return out
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_fast(out, x)
return out
@@ -61,15 +47,11 @@ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
__all__ = [
"silu_and_mul",
- "mul_and_silu",
"gelu_and_mul",
"gelu_tanh_and_mul",
"fatrelu_and_mul",
"gelu_fast",
"gelu_new",
"gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
"layers",
]
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc
index e53c600baf751d47e3c75f0ea262aaa74cbaa2a0..8086d0297290c7f425c6040e160ad015337ce607 100644
Binary files a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc
index cfe526dc3c92a5c7b1a46084e58d4448fc74b15b..813a3ce81dbdc975cfd9ca5809f4d4c16e51d410 100644
Binary files a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc
index 878be1d140d35a1a92eb1b870cd3ccc0bbb65128..7bc821f8f3a247087893f6c0fdaa1592d0b84aee 100644
Binary files a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so b/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..18c50172a17d5c0376245a740b75508692c01696
--- /dev/null
+++ b/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b9f6a161283a05672bb3ed442990cf8a5ce553625bb482cd31ce514e07cfcf0a
+size 3684504
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_320b408.abi3.so
deleted file mode 100644
index 485825618d1d0c2e93123fe5197999883b59b748..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_320b408.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:3af83bae80c8641200010ba586e5a2cac271fa4fcd344e3532ea7d5094fd7c17
-size 4275744
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/_ops.py b/build/torch28-cxx11-cu129-aarch64-linux/activation/_ops.py
index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0f883290f823dd4b9ad1432d6644d25bcd3a4acf 100644
--- a/build/torch28-cxx11-cu129-aarch64-linux/activation/_ops.py
+++ b/build/torch28-cxx11-cu129-aarch64-linux/activation/_ops.py
@@ -1,9 +1,9 @@
import torch
-from . import _activation_320b408
-ops = torch.ops._activation_320b408
+from . import _activation_0c3eb4e_dirty
+ops = torch.ops._activation_0c3eb4e_dirty
def add_op_namespace_prefix(op_name: str):
"""
Prefix op by namespace.
"""
- return f"_activation_320b408::{op_name}"
\ No newline at end of file
+ return f"_activation_0c3eb4e_dirty::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/layers.py b/build/torch28-cxx11-cu129-aarch64-linux/activation/layers.py
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..45b31181ffb80509a85d729a7f7ee86fc2cf014a 100644
--- a/build/torch28-cxx11-cu129-aarch64-linux/activation/layers.py
+++ b/build/torch28-cxx11-cu129-aarch64-linux/activation/layers.py
@@ -23,57 +23,6 @@ class SiluAndMul(nn.Module):
ops.silu_and_mul(out, x)
return out
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
class MulAndSilu(nn.Module):
"""An activation function for SwiGLU.
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/__init__.py b/build/torch28-cxx11-cu129-x86_64-linux/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-x86_64-linux/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/_activation_f8d6759.abi3.so b/build/torch28-cxx11-cu129-x86_64-linux/_activation_f8d6759.abi3.so
deleted file mode 100644
index ff0d1df159bdd317b6293331073a9aab2d4bd06c..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-x86_64-linux/_activation_f8d6759.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:48d0f9b82abd2e6d7154889814140b789e2d4452aac1296d921c9a2d4ab19e91
-size 4438672
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/_ops.py b/build/torch28-cxx11-cu129-x86_64-linux/_ops.py
deleted file mode 100644
index 140c6e96b3f93ce5b359648edac4dcb2913b8324..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_f8d6759
-ops = torch.ops._activation_f8d6759
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_f8d6759::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/__init__.py b/build/torch28-cxx11-cu129-x86_64-linux/activation/__init__.py
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357 100644
--- a/build/torch28-cxx11-cu129-x86_64-linux/activation/__init__.py
+++ b/build/torch28-cxx11-cu129-x86_64-linux/activation/__init__.py
@@ -1,26 +1,57 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.mul_and_silu(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc
similarity index 52%
rename from build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc
rename to build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc
index 4d338b4d5170fa0130189f67e65562998f8f42be..01d30fced2b5392d0f6f4e6454cbe7d782a14daa 100644
Binary files a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..75b0e5f83e10b053d8584f2607d9a9f3009d45dc
Binary files /dev/null and b/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d6ed035d206ae523160771021be45010f234687e
Binary files /dev/null and b/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc differ
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so b/build/torch28-cxx11-cu129-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..33fb245664d9daef5b07440b390db2c19ef404f1
--- /dev/null
+++ b/build/torch28-cxx11-cu129-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e7cca3169eea8cbd67c61706d102548e49aadc936f8c2943efef3e7c4c0ee0d
+size 3592400
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/_ops.py b/build/torch28-cxx11-cu129-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..745e06b31cb5b9718d3b85236f4cc257459070d7
--- /dev/null
+++ b/build/torch28-cxx11-cu129-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_be5bedb_dirty
+ops = torch.ops._activation_be5bedb_dirty
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_be5bedb_dirty::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/layers.py b/build/torch28-cxx11-cu129-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..45b31181ffb80509a85d729a7f7ee86fc2cf014a
--- /dev/null
+++ b/build/torch28-cxx11-cu129-x86_64-linux/activation/layers.py
@@ -0,0 +1,128 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ """An activation function for SwiGLU.
+
+ The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class MulAndSilu(nn.Module):
+ """An activation function for SwiGLU.
+
+ The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.mul_and_silu(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ """An activation function for GeGLU.
+
+ The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
+ return: (batch_size, seq_len, d) or (num_tokens, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ """An activation function for FATReLU.
+
+ The function computes x -> FATReLU(x[:d]) * x[d:] where
+ d = x.shape[-1] // 2.
+ This is used in openbmb/MiniCPM-S-1B-sft.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/layers.py b/build/torch28-cxx11-cu129-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/metadata.json b/build/torch28-cxx11-cu129-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/__init__.py b/build/torch29-cxx11-cu126-aarch64-linux/activation/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-aarch64-linux/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index 48cda67561066b31e84ee5ecebcf0ef61e1ad322..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index 0082ca0b0e28577622a3e430602fabe010369318..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch29-cxx11-cu126-aarch64-linux/activation/_activation_320b408.abi3.so
deleted file mode 100644
index 41c75640cfdc7eeff3d57f4a6d403f7e7f10b8d8..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-aarch64-linux/activation/_activation_320b408.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:f9c24e0eb75a09a9fc19e7096276d560226f198617291681c1a18e94002a629e
-size 2963480
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/_ops.py b/build/torch29-cxx11-cu126-aarch64-linux/activation/_ops.py
deleted file mode 100644
index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-aarch64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_320b408
-ops = torch.ops._activation_320b408
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_320b408::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/__init__.py b/build/torch29-cxx11-cu126-x86_64-linux/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-x86_64-linux/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/_activation_63b875f.abi3.so b/build/torch29-cxx11-cu126-x86_64-linux/_activation_63b875f.abi3.so
deleted file mode 100644
index 3bb70b2a77f8c7dd8f0125e896cfca9359138ff9..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-x86_64-linux/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:c84b682f2dd4437835661f57f031d96865871f6f4ab25f5651d4f577acee1326
-size 3121128
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/_ops.py b/build/torch29-cxx11-cu126-x86_64-linux/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch29-cxx11-cu126-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/layers.py b/build/torch29-cxx11-cu126-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/metadata.json b/build/torch29-cxx11-cu126-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/__init__.py b/build/torch29-cxx11-cu128-aarch64-linux/activation/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-aarch64-linux/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index be7ffd679d4afbc36ea076dbc57e3162a60bd409..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index e50041e74611417f4e4037e568a9e041780a5e32..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch29-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so
deleted file mode 100644
index dc83e4989904884309410757826ec095ea0fdfe4..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:08ee3dfa4d481eaf44ac3c11a0843598c05950f779dba66abd468fecb7839b32
-size 4208760
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/_ops.py b/build/torch29-cxx11-cu128-aarch64-linux/activation/_ops.py
deleted file mode 100644
index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-aarch64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_320b408
-ops = torch.ops._activation_320b408
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_320b408::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/__init__.py b/build/torch29-cxx11-cu128-x86_64-linux/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-x86_64-linux/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/_activation_63b875f.abi3.so b/build/torch29-cxx11-cu128-x86_64-linux/_activation_63b875f.abi3.so
deleted file mode 100644
index e2e49fb0c5b136351663cc36a368639afff8a47c..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-x86_64-linux/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:3afed8f50b04121c408e2b7fc8f4920015ba696b97e54be8e165cbbdd7039d6b
-size 4400864
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/_ops.py b/build/torch29-cxx11-cu128-x86_64-linux/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/activation/__init__.py b/build/torch29-cxx11-cu128-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/layers.py b/build/torch29-cxx11-cu128-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/metadata.json b/build/torch29-cxx11-cu128-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/__init__.py b/build/torch29-cxx11-cu130-aarch64-linux/activation/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-aarch64-linux/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc
deleted file mode 100644
index 21696c8710d6b717d92ebd34545a9ac97cc44942..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index 1856969205a3825653d4be5e4c267a9585ff6594..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index a8e0f48d49bb34730201d17d0795310d829e20cb..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch29-cxx11-cu130-aarch64-linux/activation/_activation_320b408.abi3.so
deleted file mode 100644
index 02267d619c1ad4c0bb7f84b243e5456c6bf7c798..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-aarch64-linux/activation/_activation_320b408.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:73748b54059552f5983322f7dedc36ed349b38ad6fb9318301bb4965b1fe49aa
-size 4094968
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/_ops.py b/build/torch29-cxx11-cu130-aarch64-linux/activation/_ops.py
deleted file mode 100644
index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-aarch64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_320b408
-ops = torch.ops._activation_320b408
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_320b408::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/__init__.py b/build/torch29-cxx11-cu130-x86_64-linux/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-x86_64-linux/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/_activation_63b875f.abi3.so b/build/torch29-cxx11-cu130-x86_64-linux/_activation_63b875f.abi3.so
deleted file mode 100644
index fcd78de80c8ca89e556f4f7255428b9dbbfaaf2d..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-x86_64-linux/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:6bc5534a57cbe83a6ebc51a13bce94eab7c06ce5b4d41eb4e9db83f77ae64902
-size 4180240
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/_ops.py b/build/torch29-cxx11-cu130-x86_64-linux/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/activation/__init__.py b/build/torch29-cxx11-cu130-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/layers.py b/build/torch29-cxx11-cu130-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/metadata.json b/build/torch29-cxx11-cu130-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch29-metal-aarch64-darwin/__init__.py b/build/torch29-metal-aarch64-darwin/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-metal-aarch64-darwin/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-metal-aarch64-darwin/_activation_63b875f.abi3.so b/build/torch29-metal-aarch64-darwin/_activation_63b875f.abi3.so
deleted file mode 100644
index 986b7947b413077b8d8acf3967a52ee556212268..0000000000000000000000000000000000000000
--- a/build/torch29-metal-aarch64-darwin/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:884e887217a67931f5a59b3c39487acb754ff51282adb6b13b5db669e39cb12e
-size 220504
diff --git a/build/torch29-metal-aarch64-darwin/_ops.py b/build/torch29-metal-aarch64-darwin/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch29-metal-aarch64-darwin/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-metal-aarch64-darwin/activation/__init__.py b/build/torch29-metal-aarch64-darwin/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch29-metal-aarch64-darwin/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-metal-aarch64-darwin/layers.py b/build/torch29-metal-aarch64-darwin/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch29-metal-aarch64-darwin/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch29-metal-aarch64-darwin/metadata.json b/build/torch29-metal-aarch64-darwin/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch29-metal-aarch64-darwin/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/flake.lock b/flake.lock
new file mode 100644
index 0000000000000000000000000000000000000000..4fa3e9a2e465daa852b90bc10e0c14b442b53b12
--- /dev/null
+++ b/flake.lock
@@ -0,0 +1,168 @@
+{
+ "nodes": {
+ "flake-compat": {
+ "locked": {
+ "lastModified": 1747046372,
+ "narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
+ "type": "github"
+ },
+ "original": {
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "type": "github"
+ }
+ },
+ "flake-compat_2": {
+ "locked": {
+ "lastModified": 1733328505,
+ "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
+ "type": "github"
+ },
+ "original": {
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "type": "github"
+ }
+ },
+ "flake-utils": {
+ "inputs": {
+ "systems": "systems"
+ },
+ "locked": {
+ "lastModified": 1731533236,
+ "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "flake-utils_2": {
+ "inputs": {
+ "systems": "systems_2"
+ },
+ "locked": {
+ "lastModified": 1731533236,
+ "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "hf-nix": {
+ "inputs": {
+ "flake-compat": "flake-compat_2",
+ "flake-utils": "flake-utils_2",
+ "nixpkgs": "nixpkgs"
+ },
+ "locked": {
+ "lastModified": 1747919133,
+ "narHash": "sha256-VvF1naQOvv7yulQ5/cDiaxkNxlh1Y84QMZnderv1szk=",
+ "owner": "huggingface",
+ "repo": "hf-nix",
+ "rev": "9c71e026d6c7c8588ef85a5f7c77f57d598e038c",
+ "type": "github"
+ },
+ "original": {
+ "owner": "huggingface",
+ "repo": "hf-nix",
+ "type": "github"
+ }
+ },
+ "kernel-builder": {
+ "inputs": {
+ "flake-compat": "flake-compat",
+ "flake-utils": "flake-utils",
+ "hf-nix": "hf-nix",
+ "nixpkgs": [
+ "kernel-builder",
+ "hf-nix",
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1748620233,
+ "narHash": "sha256-VULm9HgGXvo3pyfsPy3SOhoqgkuqbGSaSemvzNUbdIU=",
+ "owner": "huggingface",
+ "repo": "kernel-builder",
+ "rev": "da3340e5b3cbb6086600420f4814b033395788d1",
+ "type": "github"
+ },
+ "original": {
+ "owner": "huggingface",
+ "repo": "kernel-builder",
+ "type": "github"
+ }
+ },
+ "nixpkgs": {
+ "locked": {
+ "lastModified": 1747820358,
+ "narHash": "sha256-fTqsZsUX6M3yeEvgyQvXcbGmT2CaRVyVwsi8eK29Oj4=",
+ "owner": "danieldk",
+ "repo": "nixpkgs",
+ "rev": "d3c1681180717528068082103bf323147de6ab0b",
+ "type": "github"
+ },
+ "original": {
+ "owner": "danieldk",
+ "ref": "cudatoolkit-12.9-kernel-builder",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "root": {
+ "inputs": {
+ "kernel-builder": "kernel-builder"
+ }
+ },
+ "systems": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ },
+ "systems_2": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ }
+ },
+ "root": "root",
+ "version": 7
+}
diff --git a/flake.nix b/flake.nix
new file mode 100644
index 0000000000000000000000000000000000000000..54ac44c0698d43fb86a123430f5e9d2e9bcda8ea
--- /dev/null
+++ b/flake.nix
@@ -0,0 +1,17 @@
+{
+ description = "Flake for activation kernels";
+
+ inputs = {
+ kernel-builder.url = "github:huggingface/kernel-builder";
+ };
+
+ outputs =
+ {
+ self,
+ kernel-builder,
+ }:
+ kernel-builder.lib.genFlakeOutputs {
+ path = ./.;
+ rev = self.shortRev or self.dirtyShortRev or self.lastModifiedDate;
+ };
+}
diff --git a/media/benches.gif b/media/benches.gif
deleted file mode 100644
index 10667fe862b382eb21f5e81757be71a0d42f4b59..0000000000000000000000000000000000000000
--- a/media/benches.gif
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:47790b5e6098edf91310ec8de8f28f4781be3a2e954242fc5aa1d8f5560d217d
-size 2839393
diff --git a/media/benches.mp4 b/media/benches.mp4
deleted file mode 100644
index 7c6e8a999069242522d6cd5d98936dbe42f7077e..0000000000000000000000000000000000000000
--- a/media/benches.mp4
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:8431422e8080a993672ca1440a1be6e9cde1737caec96a1a39dbc4df9fa41d29
-size 209572
diff --git a/media/benches_dark_animation.svg b/media/benches_dark_animation.svg
deleted file mode 100644
index 9f5d85ce8e4bda25a90d8a366c1cd18a7151269d..0000000000000000000000000000000000000000
--- a/media/benches_dark_animation.svg
+++ /dev/null
@@ -1,42 +0,0 @@
-
\ No newline at end of file
diff --git a/media/benches_dark_latency.svg b/media/benches_dark_latency.svg
deleted file mode 100644
index 54b01c10c89fade7e75cdec06c390c86b043b50a..0000000000000000000000000000000000000000
--- a/media/benches_dark_latency.svg
+++ /dev/null
@@ -1,2011 +0,0 @@
-
-
-
diff --git a/media/benches_dark_throughput.svg b/media/benches_dark_throughput.svg
deleted file mode 100644
index 46ca3b5f9d4af3646774948357e1897025bbdcf3..0000000000000000000000000000000000000000
--- a/media/benches_dark_throughput.svg
+++ /dev/null
@@ -1,2254 +0,0 @@
-
-
-
diff --git a/media/benches_latency.png b/media/benches_latency.png
deleted file mode 100644
index 23c0190661b9727df8205f133a0d39494becf109..0000000000000000000000000000000000000000
Binary files a/media/benches_latency.png and /dev/null differ
diff --git a/media/benches_light_animation.svg b/media/benches_light_animation.svg
deleted file mode 100644
index ebb9aa177a36416141a3f9a39b162ed8f5dd3bef..0000000000000000000000000000000000000000
--- a/media/benches_light_animation.svg
+++ /dev/null
@@ -1,42 +0,0 @@
-
\ No newline at end of file
diff --git a/media/benches_light_latency.svg b/media/benches_light_latency.svg
deleted file mode 100644
index 5aff08793189b1741313da91846bf1c468457bf6..0000000000000000000000000000000000000000
--- a/media/benches_light_latency.svg
+++ /dev/null
@@ -1,2011 +0,0 @@
-
-
-
diff --git a/media/benches_light_throughput.svg b/media/benches_light_throughput.svg
deleted file mode 100644
index 363d8b120646ed613e20aef3a377f53fa86d0ebc..0000000000000000000000000000000000000000
--- a/media/benches_light_throughput.svg
+++ /dev/null
@@ -1,2254 +0,0 @@
-
-
-
diff --git a/media/benches_throughput.png b/media/benches_throughput.png
deleted file mode 100644
index 5b2a4a7ca27e84fe600a6552dbecf234f3a9d0ee..0000000000000000000000000000000000000000
Binary files a/media/benches_throughput.png and /dev/null differ
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/kernels/__init__.py b/tests/kernels/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/kernels/allclose_default.py b/tests/kernels/allclose_default.py
new file mode 100644
index 0000000000000000000000000000000000000000..80eb1eeb9fb738d70efe28d64df98b2ff7223463
--- /dev/null
+++ b/tests/kernels/allclose_default.py
@@ -0,0 +1,14 @@
+import torch
+
+# Reference default values of atol and rtol are from
+# https://github.com/pytorch/pytorch/blob/6d96beb6bec24d73ee3f080bac54d2104068f675/test/test_transformers.py#L67
+default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float: 1e-5}
+default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float: 1.3e-6}
+
+
+def get_default_atol(output) -> float:
+ return default_atol[output.dtype]
+
+
+def get_default_rtol(output) -> float:
+ return default_rtol[output.dtype]
diff --git a/tests/kernels/test_activation.py b/tests/kernels/test_activation.py
new file mode 100644
index 0000000000000000000000000000000000000000..740f6837597943625d18c4d714bda3a35958c747
--- /dev/null
+++ b/tests/kernels/test_activation.py
@@ -0,0 +1,180 @@
+# SPDX-License-Identifier: Apache-2.0
+# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
+
+import math
+import random
+from typing import Type
+
+import activation
+import pytest
+import torch
+import torch.nn.functional as F
+
+from .utils import opcheck
+from .allclose_default import get_default_atol, get_default_rtol
+
+DTYPES = [torch.half, torch.bfloat16, torch.float]
+NUM_TOKENS = [7, 83, 2048] # Arbitrary values for testing
+D = [512, 13824] # Arbitrary values for testing
+SEEDS = [0]
+CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)]
+
+
+def gelu_fast(x: torch.Tensor) -> torch.Tensor:
+ return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
+
+
+def gelu_new(x: torch.Tensor) -> torch.Tensor:
+ c = math.sqrt(2.0 / math.pi)
+ return 0.5 * x * (1.0 + torch.tanh(c * (x + 0.044715 * torch.pow(x, 3.0))))
+
+
+def gelu_quick(x: torch.Tensor) -> torch.Tensor:
+ return x * torch.sigmoid(1.702 * x)
+
+
+def fatrelu_and_mul(x: torch.Tensor, threshold: float) -> torch.Tensor:
+ d = x.shape[-1] // 2
+ x1 = x[..., :d]
+ x2 = x[..., d:]
+ x1 = F.threshold(x1, threshold, 0.0)
+ return x1 * x2
+
+
+def silu_and_mul(x: torch.Tensor) -> torch.Tensor:
+ d = x.shape[-1] // 2
+ return F.silu(x[..., :d]) * x[..., d:]
+
+
+def mul_and_silu(x: torch.Tensor) -> torch.Tensor:
+ d = x.shape[-1] // 2
+ return x[..., :d] * F.silu(x[..., d:])
+
+
+def gelu_and_mul(x: torch.Tensor, approximate: str) -> torch.Tensor:
+ d = x.shape[-1] // 2
+ return F.gelu(x[..., :d], approximate=approximate) * x[..., d:]
+
+
+@pytest.mark.parametrize(
+ "activation_name", ["silu_and_mul", "mul_and_silu", "gelu", "gelu_tanh", "fatrelu"]
+)
+@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
+@pytest.mark.parametrize("d", D)
+@pytest.mark.parametrize("dtype", DTYPES)
+@pytest.mark.parametrize("seed", SEEDS)
+@pytest.mark.parametrize("device", CUDA_DEVICES)
+@torch.inference_mode()
+def test_act_and_mul(
+ activation_name: str,
+ num_tokens: int,
+ d: int,
+ dtype: torch.dtype,
+ seed: int,
+ device: str,
+) -> None:
+ random.seed(seed)
+ torch.manual_seed(seed)
+ torch.set_default_device(device)
+ x = torch.randn(num_tokens, 2 * d, dtype=dtype)
+ if activation_name == "silu_and_mul":
+ torch_fn = silu_and_mul
+ fn = activation.silu_and_mul
+ op = activation.ops.silu_and_mul
+ layer = activation.layers.SiluAndMul()
+ elif activation_name == "mul_and_silu":
+ torch_fn = mul_and_silu
+ fn = activation.mul_and_silu
+ op = activation.ops.mul_and_silu
+ layer = activation.layers.MulAndSilu()
+ elif activation_name == "gelu":
+ torch_fn = lambda x: gelu_and_mul(x, "none")
+ fn = activation.gelu_and_mul
+ op = activation.ops.gelu_and_mul
+ layer = activation.layers.GeluAndMul()
+ elif activation_name == "gelu_tanh":
+ torch_fn = lambda x: gelu_and_mul(x, "tanh")
+ fn = activation.gelu_tanh_and_mul
+ op = activation.ops.gelu_tanh_and_mul
+ layer = activation.layers.GeluTanhAndMul()
+ elif activation_name == "fatrelu":
+ threshold = random.uniform(0, 1)
+ torch_fn = lambda x: fatrelu_and_mul(x, threshold)
+ fn = lambda out, x: activation.fatrelu_and_mul(out, x, threshold)
+ op = activation.ops.fatrelu_and_mul
+ layer = activation.layers.FatreluAndMul(threshold)
+
+ out_shape = x.shape[:-1] + (x.shape[-1] // 2,)
+ out = torch.empty(out_shape, dtype=x.dtype, device=x.device)
+ out = fn(out, x)
+ mod_out = layer(x)
+ ref_out = torch_fn(x)
+
+ # The SiLU, GELU and FatReLU implementations are equivalent to the native
+ # PyTorch implementations, so we can do exact comparison.
+ torch.testing.assert_close(out, ref_out, atol=0.0, rtol=0.0)
+ torch.testing.assert_close(mod_out, ref_out, atol=0.0, rtol=0.0)
+
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ if activation_name == "fatrelu":
+ opcheck(op, (out, x, threshold))
+ else:
+ opcheck(op, (out, x))
+
+
+@pytest.mark.parametrize(
+ "activation_fns",
+ [
+ (
+ gelu_fast,
+ activation.gelu_fast,
+ activation.ops.gelu_fast,
+ activation.layers.FastGELU,
+ ),
+ (
+ gelu_new,
+ activation.gelu_new,
+ activation.ops.gelu_new,
+ activation.layers.NewGELU,
+ ),
+ (
+ gelu_quick,
+ activation.gelu_quick,
+ activation.ops.gelu_quick,
+ activation.layers.QuickGELU,
+ ),
+ ],
+)
+@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
+@pytest.mark.parametrize("d", D)
+@pytest.mark.parametrize("dtype", DTYPES)
+@pytest.mark.parametrize("seed", SEEDS)
+@pytest.mark.parametrize("device", CUDA_DEVICES)
+@torch.inference_mode()
+def test_activation(
+ activation_fns,
+ num_tokens: int,
+ d: int,
+ dtype: torch.dtype,
+ seed: int,
+ device: str,
+) -> None:
+ torch.manual_seed(seed)
+ torch.set_default_device(device)
+ x = torch.randn(num_tokens, d, dtype=dtype)
+ torch_fn, fn, op, cls = activation_fns
+ layer = cls()
+ out = fn(torch.empty_like(x), x)
+ layer_out = layer(x)
+ ref_out = torch_fn(x)
+ torch.testing.assert_close(
+ out, ref_out, atol=get_default_atol(out), rtol=get_default_rtol(out)
+ )
+ torch.testing.assert_close(
+ out, layer_out, atol=get_default_atol(out), rtol=get_default_rtol(out)
+ )
+
+ out = torch.empty_like(x)
+ opcheck(op, (out, x))
diff --git a/tests/kernels/utils.py b/tests/kernels/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..d24c5babfb9690031844f262212d80274fd478c6
--- /dev/null
+++ b/tests/kernels/utils.py
@@ -0,0 +1,73 @@
+"""Kernel test utils"""
+
+import itertools
+import random
+import unittest
+from numbers import Number
+from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
+
+import pytest
+import torch
+from torch._prims_common import TensorLikeType
+
+# For now, disable "test_aot_dispatch_dynamic" since there are some
+# bugs related to this test in PyTorch 2.4.
+DEFAULT_OPCHECK_TEST_UTILS: Tuple[str, ...] = (
+ "test_schema",
+ "test_autograd_registration",
+ "test_faketensor",
+)
+
+ALL_OPCHECK_TEST_UTILS: Tuple[str, ...] = (
+ "test_schema",
+ "test_autograd_registration",
+ "test_faketensor",
+ "test_aot_dispatch_dynamic",
+)
+
+
+# Copied/modified from torch._refs.__init__.py
+def fp8_allclose(
+ a: TensorLikeType,
+ b: TensorLikeType,
+ rtol: float = 1e-05,
+ atol: float = 1e-08,
+ equal_nan: bool = False,
+) -> bool:
+ """
+ Reference implementation of torch.allclose
+ """
+ torch._refs._check_close_args(name="torch.allclose", a=a, b=b, rtol=rtol, atol=atol)
+
+ return bool(
+ torch.all(
+ torch.isclose(
+ a.double(), b.double(), rtol=rtol, atol=atol, equal_nan=equal_nan
+ )
+ ).item()
+ )
+
+
+# A special version of op check that has a restricted default set of test_utils
+# and a patched version of allclose that supports fp8 types.
+def opcheck(
+ op: Union[
+ torch._ops.OpOverload,
+ torch._ops.OpOverloadPacket,
+ torch._library.custom_ops.CustomOpDef,
+ ],
+ args: Tuple[Any, ...],
+ kwargs: Optional[Dict[str, Any]] = None,
+ *,
+ test_utils: Union[str, Sequence[str]] = ALL_OPCHECK_TEST_UTILS,
+ raise_exception: bool = True,
+ cond: bool = True
+) -> Dict[str, str]:
+ with unittest.mock.patch("torch.allclose", new=fp8_allclose):
+ return (
+ torch.library.opcheck(
+ op, args, kwargs, test_utils=test_utils, raise_exception=raise_exception
+ )
+ if cond
+ else {}
+ )
diff --git a/torch-ext/activation/__init__.py b/torch-ext/activation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c4f207354093c6ef83eb5d7f3a5a3b22b95d357
--- /dev/null
+++ b/torch-ext/activation/__init__.py
@@ -0,0 +1,57 @@
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.mul_and_silu(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/torch-ext/activation/layers.py b/torch-ext/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..45b31181ffb80509a85d729a7f7ee86fc2cf014a
--- /dev/null
+++ b/torch-ext/activation/layers.py
@@ -0,0 +1,128 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ """An activation function for SwiGLU.
+
+ The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class MulAndSilu(nn.Module):
+ """An activation function for SwiGLU.
+
+ The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.mul_and_silu(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ """An activation function for GeGLU.
+
+ The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
+
+ Shapes:
+ x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
+ return: (batch_size, seq_len, d) or (num_tokens, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ """An activation function for FATReLU.
+
+ The function computes x -> FATReLU(x[:d]) * x[d:] where
+ d = x.shape[-1] // 2.
+ This is used in openbmb/MiniCPM-S-1B-sft.
+
+ Shapes:
+ x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
+ return: (num_tokens, d) or (batch_size, seq_len, d)
+ """
+
+ can_torch_compile: bool = True
+
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ can_torch_compile: bool = True
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/torch-ext/torch_binding.cpp b/torch-ext/torch_binding.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..321568290bf3b5d9d0eaa2dc9a98ae8111c34859
--- /dev/null
+++ b/torch-ext/torch_binding.cpp
@@ -0,0 +1,40 @@
+#include
+
+#include "registration.h"
+#include "torch_binding.h"
+
+TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
+ // Activation ops
+ // Activation function used in SwiGLU.
+ ops.def("silu_and_mul(Tensor! out, Tensor input) -> ()");
+ ops.impl("silu_and_mul", torch::kCUDA, &silu_and_mul);
+
+ ops.def("mul_and_silu(Tensor! out, Tensor input) -> ()");
+ ops.impl("mul_and_silu", torch::kCUDA, &mul_and_silu);
+
+ // Activation function used in GeGLU with `none` approximation.
+ ops.def("gelu_and_mul(Tensor! out, Tensor input) -> ()");
+ ops.impl("gelu_and_mul", torch::kCUDA, &gelu_and_mul);
+
+ // Activation function used in GeGLU with `tanh` approximation.
+ ops.def("gelu_tanh_and_mul(Tensor! out, Tensor input) -> ()");
+ ops.impl("gelu_tanh_and_mul", torch::kCUDA, &gelu_tanh_and_mul);
+
+ // FATReLU implementation.
+ ops.def("fatrelu_and_mul(Tensor! out, Tensor input, float threshold) -> ()");
+ ops.impl("fatrelu_and_mul", torch::kCUDA, &fatrelu_and_mul);
+
+ // GELU implementation used in GPT-2.
+ ops.def("gelu_new(Tensor! out, Tensor input) -> ()");
+ ops.impl("gelu_new", torch::kCUDA, &gelu_new);
+
+ // Approximate GELU implementation.
+ ops.def("gelu_fast(Tensor! out, Tensor input) -> ()");
+ ops.impl("gelu_fast", torch::kCUDA, &gelu_fast);
+
+ // Quick GELU implementation.
+ ops.def("gelu_quick(Tensor! out, Tensor input) -> ()");
+ ops.impl("gelu_quick", torch::kCUDA, &gelu_quick);
+}
+
+REGISTER_EXTENSION(TORCH_EXTENSION_NAME)
diff --git a/torch-ext/torch_binding.h b/torch-ext/torch_binding.h
new file mode 100644
index 0000000000000000000000000000000000000000..3d7e28ae62da83fb2c18131f28a2e6d37878b8f5
--- /dev/null
+++ b/torch-ext/torch_binding.h
@@ -0,0 +1,20 @@
+#pragma once
+
+#include
+
+void silu_and_mul(torch::Tensor &out, torch::Tensor &input);
+
+void mul_and_silu(torch::Tensor& out, torch::Tensor& input);
+
+void gelu_and_mul(torch::Tensor &out, torch::Tensor &input);
+
+void gelu_tanh_and_mul(torch::Tensor &out, torch::Tensor &input);
+
+void fatrelu_and_mul(torch::Tensor &out, torch::Tensor &input,
+ double threshold);
+
+void gelu_new(torch::Tensor &out, torch::Tensor &input);
+
+void gelu_fast(torch::Tensor &out, torch::Tensor &input);
+
+void gelu_quick(torch::Tensor &out, torch::Tensor &input);