diff --git a/README.md b/README.md index 6c2e823d2c11f8bd341796712252d0beceb93bfd..4d0561e803c34dc86855ced8fdaa54ca836ef8e2 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,6 @@ tags: - kernel --- - -![Status](https://hubwebhook.dholtz.com/shield?repo=kernels-community/activation) - ## Activation Activation kernels from [vLLM](https://github.com/vllm-project/vllm/blob/main/csrc/activation_kernels.cu). \ No newline at end of file diff --git a/activation/activation_kernels.cu b/activation/activation_kernels.cu index 55e6596797010403c8f2d8cc4d2ebbcae1c75d7e..839dc36ba4e29d34144b42b4f1ed40be03afd5eb 100644 --- a/activation/activation_kernels.cu +++ b/activation/activation_kernels.cu @@ -9,16 +9,8 @@ namespace vllm { -template -__device__ __forceinline__ scalar_t compute(const scalar_t& x, - const scalar_t& y) { - return act_first ? ACT_FN(x) * y : x * ACT_FN(y); -} // Activation and gating kernel template. - -template +template __global__ void act_and_mul_kernel( scalar_t* __restrict__ out, // [..., d] const scalar_t* __restrict__ input, // [..., 2, d] @@ -27,7 +19,7 @@ __global__ void act_and_mul_kernel( for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { const scalar_t x = VLLM_LDG(&input[token_idx * 2 * d + idx]); const scalar_t y = VLLM_LDG(&input[token_idx * 2 * d + d + idx]); - out[token_idx * d + idx] = compute(x, y); + out[token_idx * d + idx] = ACT_FN(x) * y; } } @@ -63,21 +55,16 @@ __device__ __forceinline__ T gelu_tanh_kernel(const T& x) { } // namespace vllm // Launch activation and gating kernel. -// Use ACT_FIRST (bool) indicating whether to apply the activation function -// first. -#define LAUNCH_ACTIVATION_GATE_KERNEL(KERNEL, ACT_FIRST) \ +#define LAUNCH_ACTIVATION_GATE_KERNEL(KERNEL) \ int d = input.size(-1) / 2; \ int64_t num_tokens = input.numel() / input.size(-1); \ dim3 grid(num_tokens); \ dim3 block(std::min(d, 1024)); \ - if (num_tokens == 0) { \ - return; \ - } \ const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); \ const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); \ VLLM_DISPATCH_FLOATING_TYPES( \ input.scalar_type(), "act_and_mul_kernel", [&] { \ - vllm::act_and_mul_kernel, ACT_FIRST> \ + vllm::act_and_mul_kernel> \ <<>>(out.data_ptr(), \ input.data_ptr(), d); \ }); @@ -85,27 +72,19 @@ __device__ __forceinline__ T gelu_tanh_kernel(const T& x) { void silu_and_mul(torch::Tensor& out, // [..., d] torch::Tensor& input) // [..., 2 * d] { - LAUNCH_ACTIVATION_GATE_KERNEL(vllm::silu_kernel, true); -} - -void mul_and_silu(torch::Tensor& out, // [..., d] - torch::Tensor& input) // [..., 2 * d] -{ - // The difference between mul_and_silu and silu_and_mul is that mul_and_silu - // applies the silu to the latter half of the input. - LAUNCH_ACTIVATION_GATE_KERNEL(vllm::silu_kernel, false); + LAUNCH_ACTIVATION_GATE_KERNEL(vllm::silu_kernel); } void gelu_and_mul(torch::Tensor& out, // [..., d] torch::Tensor& input) // [..., 2 * d] { - LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_kernel, true); + LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_kernel); } void gelu_tanh_and_mul(torch::Tensor& out, // [..., d] torch::Tensor& input) // [..., 2 * d] { - LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_tanh_kernel, true); + LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_tanh_kernel); } namespace vllm { diff --git a/activation/cuda_compat.h b/activation/cuda_compat.h index affa051c759512f2816c51ce25e35ee80f960f5e..82e55613d915a4a9dda5d73eea3601ad0ef949c5 100644 --- a/activation/cuda_compat.h +++ b/activation/cuda_compat.h @@ -4,10 +4,10 @@ #include #endif -#if defined(USE_ROCM) && defined(__GFX9__) - #define WARP_SIZE 64 -#else +#ifndef USE_ROCM #define WARP_SIZE 32 +#else + #define WARP_SIZE warpSize #endif #ifndef USE_ROCM diff --git a/activation/dispatch_utils.h b/activation/dispatch_utils.h index f7b75c48373f68e9025020eea507415fb9405e2e..a634e1c3d488676cf9beb11bc3029915e484b163 100644 --- a/activation/dispatch_utils.h +++ b/activation/dispatch_utils.h @@ -6,11 +6,6 @@ #include -// Need a special dispatch case macro since we will nest the FP8 dispatch. -// Instead of the usual 'scalar_t', this names the dispatched type 'fp8_t'. -#define AT_DISPATCH_FP8_CASE(enum_type, ...) \ - AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, fp8_t, __VA_ARGS__) - #define VLLM_DISPATCH_CASE_FLOATING_TYPES(...) \ AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \ @@ -19,35 +14,6 @@ #define VLLM_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \ AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__)) -// ROCm devices might use either fn or fnuz, so set up dispatch table for both. -// A host-based check at runtime will create a preferred FP8 type for ROCm -// such that the correct kernel is dispatched. -#ifdef USE_ROCM - #define VLLM_DISPATCH_CASE_FP8_TYPES(...) \ - AT_DISPATCH_FP8_CASE(at::ScalarType::Float8_e4m3fn, __VA_ARGS__) \ - AT_DISPATCH_FP8_CASE(at::ScalarType::Float8_e4m3fnuz, __VA_ARGS__) - - #define VLLM_DISPATCH_CASE_QUANT_TYPES(...) \ - AT_DISPATCH_CASE(at::ScalarType::Float8_e4m3fn, __VA_ARGS__) \ - AT_DISPATCH_CASE(at::ScalarType::Float8_e4m3fnuz, __VA_ARGS__) \ - AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) -#else - #define VLLM_DISPATCH_CASE_FP8_TYPES(...) \ - AT_DISPATCH_FP8_CASE(at::ScalarType::Float8_e4m3fn, __VA_ARGS__) - - #define VLLM_DISPATCH_CASE_QUANT_TYPES(...) \ - AT_DISPATCH_CASE(at::ScalarType::Float8_e4m3fn, __VA_ARGS__) \ - AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) -#endif - -// When using this dispatch macro, the type is 'fp8_t' not 'scalar_t'. -// See AT_DISPATCH_FP8_CASE above. -#define VLLM_DISPATCH_FP8_TYPES(TYPE, NAME, ...) \ - AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_FP8_TYPES(__VA_ARGS__)) - -#define VLLM_DISPATCH_QUANT_TYPES(TYPE, NAME, ...) \ - AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_QUANT_TYPES(__VA_ARGS__)) - #define VLLM_DISPATCH_CASE_FLOATING_AND_BYTE_TYPES(...) \ AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \ @@ -65,19 +31,5 @@ AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \ AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) -#define VLLM_DISPATCH_CASE_INTEGRAL_AND_UNSIGNED_TYPES(...) \ - AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) \ - AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) \ - AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__) \ - AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \ - AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) \ - AT_DISPATCH_CASE(at::ScalarType::UInt16, __VA_ARGS__) \ - AT_DISPATCH_CASE(at::ScalarType::UInt32, __VA_ARGS__) \ - AT_DISPATCH_CASE(at::ScalarType::UInt64, __VA_ARGS__) - #define VLLM_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) \ AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__)) - -#define VLLM_DISPATCH_INTEGRAL_AND_UNSIGNED_TYPES(TYPE, NAME, ...) \ - AT_DISPATCH_SWITCH( \ - TYPE, NAME, VLLM_DISPATCH_CASE_INTEGRAL_AND_UNSIGNED_TYPES(__VA_ARGS__)) diff --git a/build.toml b/build.toml index 0108f3f8b5b4e6a626b926dc4f91df50bf9a707e..7da9d632a70edb0699eb77f097b9b1a5ae573c48 100644 --- a/build.toml +++ b/build.toml @@ -1,18 +1,17 @@ [general] name = "activation" -universal = false [torch] src = [ - "torch-ext/torch_binding.cpp", - "torch-ext/torch_binding.h", + "torch-ext/torch_binding.cpp", + "torch-ext/torch_binding.h" ] [kernel.activation] -backend = "cuda" -depends = ["torch"] +cuda-capabilities = [ "7.0", "7.2", "7.5", "8.0", "8.6", "8.7", "8.9", "9.0" ] src = [ - "activation/activation_kernels.cu", - "activation/cuda_compat.h", - "activation/dispatch_utils.h", + "activation/activation_kernels.cu", + "activation/cuda_compat.h", + "activation/dispatch_utils.h", ] +depends = [ "torch" ] diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py b/build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py similarity index 71% rename from build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py rename to build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..71e0b01a03416c783f2bd67fc30d7ac86aee8764 100644 --- a/build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py +++ b/build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py @@ -1,8 +1,15 @@ import torch -from ._ops import ops +try: + from ._ops import ops +except ImportError as e: + # Fallback for local development. + try: + import _activation -from . import layers + ops = torch.ops._activition + except ImportError: + raise e def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: @@ -10,11 +17,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out @@ -43,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_quick(out, x) return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_o63kkyjirmkf4.abi3.so b/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_o63kkyjirmkf4.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..1442eb73f43c4768d2b8f9b916943743256af106 --- /dev/null +++ b/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_o63kkyjirmkf4.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d50cdabfbed1df74e921ac34ff00bca0555977b14ef8082ddae7b1f30985a494 +size 2370160 diff --git a/build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..0ee3c40cd38699ce7744be53b0531fe4fa505996 --- /dev/null +++ b/build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_o63kkyjirmkf4 +ops = torch.ops._activation_o63kkyjirmkf4 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_o63kkyjirmkf4::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py similarity index 71% rename from build/torch27-cxx11-cu126-x86_64-linux/activation/__init__.py rename to build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..71e0b01a03416c783f2bd67fc30d7ac86aee8764 100644 --- a/build/torch27-cxx11-cu126-x86_64-linux/activation/__init__.py +++ b/build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py @@ -1,8 +1,15 @@ import torch -from ._ops import ops +try: + from ._ops import ops +except ImportError as e: + # Fallback for local development. + try: + import _activation -from . import layers + ops = torch.ops._activition + except ImportError: + raise e def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: @@ -10,11 +17,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out @@ -43,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_quick(out, x) return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_vrl36m2ejer54.abi3.so b/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_vrl36m2ejer54.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..e6b6260c4378717d4369cd2577a00350960e317c --- /dev/null +++ b/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_vrl36m2ejer54.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bd0709ef09c8f0c18d1dc4a36c8096c59459bece61f5f5dbea95d1e73f54d44 +size 2393264 diff --git a/build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py b/build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..cd5eee20b4c9a70e4f59e7198a0a28c5dfa06244 --- /dev/null +++ b/build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_vrl36m2ejer54 +ops = torch.ops._activation_vrl36m2ejer54 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_vrl36m2ejer54::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__init__.py b/build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py similarity index 71% rename from build/torch27-cxx11-cu128-x86_64-linux/activation/__init__.py rename to build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..71e0b01a03416c783f2bd67fc30d7ac86aee8764 100644 --- a/build/torch27-cxx11-cu128-x86_64-linux/activation/__init__.py +++ b/build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py @@ -1,8 +1,15 @@ import torch -from ._ops import ops +try: + from ._ops import ops +except ImportError as e: + # Fallback for local development. + try: + import _activation -from . import layers + ops = torch.ops._activition + except ImportError: + raise e def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: @@ -10,11 +17,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out @@ -43,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_quick(out, x) return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_va3moa75vw7c2.abi3.so b/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_va3moa75vw7c2.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..2bfbc3667e483ce8441b310889b63614133f6334 --- /dev/null +++ b/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_va3moa75vw7c2.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8353447f64e7d2df1a6a341d9c53bced53abef267f079923ae774170d0d57c53 +size 2427936 diff --git a/build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py b/build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..05fa036d769ea858865c92b972c68a158899eca1 --- /dev/null +++ b/build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_va3moa75vw7c2 +ops = torch.ops._activation_va3moa75vw7c2 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_va3moa75vw7c2::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/__init__.py b/build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py similarity index 71% rename from build/torch28-cxx11-cu126-aarch64-linux/activation/__init__.py rename to build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..71e0b01a03416c783f2bd67fc30d7ac86aee8764 100644 --- a/build/torch28-cxx11-cu126-aarch64-linux/activation/__init__.py +++ b/build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py @@ -1,8 +1,15 @@ import torch -from ._ops import ops +try: + from ._ops import ops +except ImportError as e: + # Fallback for local development. + try: + import _activation -from . import layers + ops = torch.ops._activition + except ImportError: + raise e def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: @@ -10,11 +17,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out @@ -43,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_quick(out, x) return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_qr3gs3eckeig4.abi3.so b/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_qr3gs3eckeig4.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..fc35d5f1bdad2b294897ace78c80c2913e8bd793 --- /dev/null +++ b/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_qr3gs3eckeig4.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df184a6315118d787a1bd6b435cb45f1ca7828445a1f1c0e55c57645cfbba43a +size 2362600 diff --git a/build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py b/build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..11bf3a1e13ad4f53348d55ce0b8e727307534d67 --- /dev/null +++ b/build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_qr3gs3eckeig4 +ops = torch.ops._activation_qr3gs3eckeig4 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_qr3gs3eckeig4::{op_name}" \ No newline at end of file diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py b/build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..71e0b01a03416c783f2bd67fc30d7ac86aee8764 --- /dev/null +++ b/build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py @@ -0,0 +1,47 @@ +import torch + +try: + from ._ops import ops +except ImportError as e: + # Fallback for local development. + try: + import _activation + + ops = torch.ops._activition + except ImportError: + raise e + + +def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.silu_and_mul(out, x) + return out + + +def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_and_mul(out, x) + return out + + +def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_tanh_and_mul(out, x) + return out + + +def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: + ops.fatrelu_and_mul(out, x, threshold) + return out + + +def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_fast(out, x) + return out + + +def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_new(out, x) + return out + + +def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_quick(out, x) + return out diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_p7gbzt25w3zg2.abi3.so b/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_p7gbzt25w3zg2.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..e24099a45ad0e0ab481769c24c749947c164b07c --- /dev/null +++ b/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_p7gbzt25w3zg2.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccb13cfc2e45cf483e8b9f77f1760f28b48bcf185508d51b32d45bc759c4e8bb +size 2385440 diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py b/build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..fbe888c4ec71536f08f7de2d823b75d9bbac0173 --- /dev/null +++ b/build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_p7gbzt25w3zg2 +ops = torch.ops._activation_p7gbzt25w3zg2 + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_p7gbzt25w3zg2::{op_name}" \ No newline at end of file diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py b/build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..71e0b01a03416c783f2bd67fc30d7ac86aee8764 --- /dev/null +++ b/build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py @@ -0,0 +1,47 @@ +import torch + +try: + from ._ops import ops +except ImportError as e: + # Fallback for local development. + try: + import _activation + + ops = torch.ops._activition + except ImportError: + raise e + + +def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.silu_and_mul(out, x) + return out + + +def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_and_mul(out, x) + return out + + +def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_tanh_and_mul(out, x) + return out + + +def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: + ops.fatrelu_and_mul(out, x, threshold) + return out + + +def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_fast(out, x) + return out + + +def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_new(out, x) + return out + + +def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: + ops.gelu_quick(out, x) + return out diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_jg7yaigtn7wco.abi3.so b/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_jg7yaigtn7wco.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..dc1fd3cc2e28051b8a263a5baf287b794d741f41 --- /dev/null +++ b/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_jg7yaigtn7wco.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f8048853e8cb06e8574a9a9497800d2be438f7989d79f44dcf2e0ced38a75a9 +size 2420192 diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py b/build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..0c022ad4f749236320938c6ae13079a445e860ba --- /dev/null +++ b/build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _activation_jg7yaigtn7wco +ops = torch.ops._activation_jg7yaigtn7wco + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_activation_jg7yaigtn7wco::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..71e0b01a03416c783f2bd67fc30d7ac86aee8764 100644 --- a/build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py +++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py @@ -1,8 +1,15 @@ import torch -from ._ops import ops +try: + from ._ops import ops +except ImportError as e: + # Fallback for local development. + try: + import _activation -from . import layers + ops = torch.ops._activition + except ImportError: + raise e def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: @@ -10,11 +17,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out @@ -43,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_quick(out, x) return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_be5bedb.abi3.so b/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_be5bedb.abi3.so deleted file mode 100755 index c1e52a91b4fa56b4ff39c854b33497b094135599..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_be5bedb.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9b6ba32ecc6fc898df3b0cebee85e9afc6881749fe58142280f051ca3332d913 -size 2546864 diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_ncisyrun7guwk.abi3.so b/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_ncisyrun7guwk.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..951cd024fc65dcaf31f0ce123d7c56538db90255 --- /dev/null +++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_ncisyrun7guwk.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cde5439e78ba0e1aaa1937d798b214b46d38cbab8e4384b93a22239fed1a4dd4 +size 2370264 diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py index 0110324ade19f59f705c61d5c21912c958e92e96..f4538ecbd1302013d2026d413f07fefa1e3ed1ba 100644 --- a/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py +++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_be5bedb -ops = torch.ops._activation_be5bedb +from . import _activation_ncisyrun7guwk +ops = torch.ops._activation_ncisyrun7guwk def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_be5bedb::{op_name}" \ No newline at end of file + return f"_activation_ncisyrun7guwk::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..71e0b01a03416c783f2bd67fc30d7ac86aee8764 100644 --- a/build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py +++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py @@ -1,8 +1,15 @@ import torch -from ._ops import ops +try: + from ._ops import ops +except ImportError as e: + # Fallback for local development. + try: + import _activation -from . import layers + ops = torch.ops._activition + except ImportError: + raise e def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: @@ -10,11 +17,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out @@ -43,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_quick(out, x) return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_be5bedb.abi3.so b/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_be5bedb.abi3.so deleted file mode 100755 index f45a6ffcf3f11e3b24919496e213a61acb258d2a..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_be5bedb.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:331dcb3900d5e47a11d3577cdbac54f15a0b6e14910239293323c1d9e4eb9f49 -size 2616928 diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_ochhfvlnc3vyc.abi3.so b/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_ochhfvlnc3vyc.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..7cc13c8b18b95f7a24b708e41f20daa15cf8e5ba --- /dev/null +++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_ochhfvlnc3vyc.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6bd20d411c51fc8729b15cab6a60c5c9185222474aa035489e1bff299d76682 +size 2428040 diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py index 0110324ade19f59f705c61d5c21912c958e92e96..fc135b9b87ed568acd3b7ae002760780202297ab 100644 --- a/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py +++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_be5bedb -ops = torch.ops._activation_be5bedb +from . import _activation_ochhfvlnc3vyc +ops = torch.ops._activation_ochhfvlnc3vyc def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_be5bedb::{op_name}" \ No newline at end of file + return f"_activation_ochhfvlnc3vyc::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..71e0b01a03416c783f2bd67fc30d7ac86aee8764 100644 --- a/build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py +++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py @@ -1,8 +1,15 @@ import torch -from ._ops import ops +try: + from ._ops import ops +except ImportError as e: + # Fallback for local development. + try: + import _activation -from . import layers + ops = torch.ops._activition + except ImportError: + raise e def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: @@ -10,11 +17,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out @@ -43,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_quick(out, x) return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb.abi3.so b/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb.abi3.so deleted file mode 100755 index 12f5777398872e7a3d93ab936e42ade8eeec3213..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1ce11492b9675a44afb3b896ed80e425f2a47e29481c4aad9c4a6ac59520f011 -size 2621472 diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_u6vnqubnicksq.abi3.so b/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_u6vnqubnicksq.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..ed72afe1b709df6b64ae4daf96dfacf5397334e1 --- /dev/null +++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_u6vnqubnicksq.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41c18b20c2bf8c49d2d3088a9bc1aad4293df0b57eafc9b141a9e8e595fe551a +size 2436672 diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py index 0110324ade19f59f705c61d5c21912c958e92e96..03feb54a67ee96a3181145a654e9c1d3432d3c83 100644 --- a/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py +++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_be5bedb -ops = torch.ops._activation_be5bedb +from . import _activation_u6vnqubnicksq +ops = torch.ops._activation_u6vnqubnicksq def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_be5bedb::{op_name}" \ No newline at end of file + return f"_activation_u6vnqubnicksq::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..71e0b01a03416c783f2bd67fc30d7ac86aee8764 100644 --- a/build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py +++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py @@ -1,8 +1,15 @@ import torch -from ._ops import ops +try: + from ._ops import ops +except ImportError as e: + # Fallback for local development. + try: + import _activation -from . import layers + ops = torch.ops._activition + except ImportError: + raise e def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: @@ -10,11 +17,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out @@ -43,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_quick(out, x) return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_2vn6ty3gfqfb6.abi3.so b/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_2vn6ty3gfqfb6.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..d9c4fba021babdd0966b329da77216730a212c19 --- /dev/null +++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_2vn6ty3gfqfb6.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfbcd5da358cd5cb7982d19c8880cf4db6f08b46622a7a953f755ad59e4e1492 +size 2362752 diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_be5bedb.abi3.so b/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_be5bedb.abi3.so deleted file mode 100755 index 056de26936949cc36baf3caa9c4212d730da81f7..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_be5bedb.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:434bd1ae43b7cbdb10d86b82da9a237ec05ef9d9fb4fc15cdc9096d3d5ed3fa7 -size 2539352 diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py index 0110324ade19f59f705c61d5c21912c958e92e96..8ec67ec6be213233dc83cb83dcd9e3d8cade5a98 100644 --- a/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py +++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_be5bedb -ops = torch.ops._activation_be5bedb +from . import _activation_2vn6ty3gfqfb6 +ops = torch.ops._activation_2vn6ty3gfqfb6 def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_be5bedb::{op_name}" \ No newline at end of file + return f"_activation_2vn6ty3gfqfb6::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..71e0b01a03416c783f2bd67fc30d7ac86aee8764 100644 --- a/build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py +++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py @@ -1,8 +1,15 @@ import torch -from ._ops import ops +try: + from ._ops import ops +except ImportError as e: + # Fallback for local development. + try: + import _activation -from . import layers + ops = torch.ops._activition + except ImportError: + raise e def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: @@ -10,11 +17,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out @@ -43,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_quick(out, x) return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_be5bedb.abi3.so b/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_be5bedb.abi3.so deleted file mode 100755 index c31190f8f2be87dbb5d5a9c497c68cea2258fded..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_be5bedb.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:53ddfb42466bfe01feb98348f5c2d6beefd589aeb3dec4c5c36609e11a6bde4c -size 2605136 diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_myvteedxdpqc6.abi3.so b/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_myvteedxdpqc6.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..486879e6269e0c7d0763ac11d82a100dec1b1b91 --- /dev/null +++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_myvteedxdpqc6.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1bc928823117c800904bcd3492bf1a0c65a32f6d8a842dc039f55e29831ab49 +size 2420344 diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py index 0110324ade19f59f705c61d5c21912c958e92e96..0f4e8d31b42ed7be77b8eaef9aa29251327009bf 100644 --- a/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py +++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_be5bedb -ops = torch.ops._activation_be5bedb +from . import _activation_myvteedxdpqc6 +ops = torch.ops._activation_myvteedxdpqc6 def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_be5bedb::{op_name}" \ No newline at end of file + return f"_activation_myvteedxdpqc6::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..71e0b01a03416c783f2bd67fc30d7ac86aee8764 100644 --- a/build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py +++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py @@ -1,8 +1,15 @@ import torch -from ._ops import ops +try: + from ._ops import ops +except ImportError as e: + # Fallback for local development. + try: + import _activation -from . import layers + ops = torch.ops._activition + except ImportError: + raise e def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: @@ -10,11 +17,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out @@ -43,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_quick(out, x) return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_be5bedb.abi3.so b/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_be5bedb.abi3.so deleted file mode 100755 index 516f085e9ac787a2454fb78975dbaec25d2a6576..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_be5bedb.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ac7174352dea307231f308c84ca32ee001cdbcefd976de860e76501c52aae591 -size 2613776 diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_rbswus6emrhm2.abi3.so b/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_rbswus6emrhm2.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..42ad3291140b013999afb683ec84c23804759e46 --- /dev/null +++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_rbswus6emrhm2.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:474727e434a9cd4ec984a6da7124992ead4ca0fefce9581d0fd503e36c065aed +size 2424888 diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py index 0110324ade19f59f705c61d5c21912c958e92e96..c6d4e4c91a867d657f287510c40366bccef86c94 100644 --- a/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py +++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _activation_be5bedb -ops = torch.ops._activation_be5bedb +from . import _activation_rbswus6emrhm2 +ops = torch.ops._activation_rbswus6emrhm2 def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_activation_be5bedb::{op_name}" \ No newline at end of file + return f"_activation_rbswus6emrhm2::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 5155b241dff8af4302230c3ae23518cb41efa185..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 53b5508fec27cd0ece00b9b018694ba8da40c5ba..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index 7752cad4c2a06746b1a68c3637c7baef00bb5ddc..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so b/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so deleted file mode 100755 index 7d5463c37b3f4a3dec8b15df1a13168019fb26e3..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aee7c6869a9e318ad81cb84460c58ca0dac2dc85f4ed739b12fe57641f766332 -size 2546984 diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py deleted file mode 100644 index 745e06b31cb5b9718d3b85236f4cc257459070d7..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_be5bedb_dirty -ops = torch.ops._activation_be5bedb_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_be5bedb_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 4b1fcc2dcde514cab92d358380824ca24616cd0b..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 665e89cb27b58c9caff761de28b7f6574cc2140e..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index 4602c567b14a674c4a56d0e1cf8ef073fbc50beb..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so b/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so deleted file mode 100755 index 94c38d99b9593469317fe894be35b069017b493e..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f075a6e0d47a2d382d16291b1c5d7d1d98111e2bbc5891b14b627e3c1778b699 -size 2621536 diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py deleted file mode 100644 index 745e06b31cb5b9718d3b85236f4cc257459070d7..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_be5bedb_dirty -ops = torch.ops._activation_be5bedb_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_be5bedb_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index fe2206ed48c6e6b877620ac3db87af6ee49ddf07..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 6a940427d39d1a12a0806315d03b02bdfed65a3d..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index 725246ac4c8d6c4374d8250ea67f759a871b1c38..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so b/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so deleted file mode 100755 index e5c17e44367c005d1c9f8d6b391be8d49079b2fc..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cc2406aa2fa09dd7bc1fd5e87cdcdf55edfc7e0853fad5f977e2500e08fa8899 -size 3565432 diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py deleted file mode 100644 index 745e06b31cb5b9718d3b85236f4cc257459070d7..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_be5bedb_dirty -ops = torch.ops._activation_be5bedb_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_be5bedb_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 7aaa364368efe0e765de132c08296d189a969ede..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index cc7b128cfd05527bc856b66cdaf7d33691835eae..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index e547c241f319a637fa590b09ad35c1592aacce40..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so b/build/torch28-cxx11-cu126-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so deleted file mode 100755 index 8121d3da5057e1d53e4dee4b60de1e13285bd3e0..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:02b62f5d045f370c3fb7c0e7ef458165feb987fba186b8cb9aee55c735a82e93 -size 2699928 diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/_ops.py b/build/torch28-cxx11-cu126-aarch64-linux/activation/_ops.py deleted file mode 100644 index 0f883290f823dd4b9ad1432d6644d25bcd3a4acf..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_0c3eb4e_dirty -ops = torch.ops._activation_0c3eb4e_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_0c3eb4e_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/layers.py b/build/torch28-cxx11-cu126-aarch64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-aarch64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch28-cxx11-cu126-x86_64-linux/activation/__init__.py deleted file mode 100644 index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 5263d294bc5bc421b98d31436c896bbc244d0771..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index fb753a567265e3db8b71afceb9a4442139a6aea7..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index 6dd25df0a6c63b7315d2c0d9f4b3894ff1626fc8..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so b/build/torch28-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so deleted file mode 100755 index 40900ff2070ff72eb665fdd5fd78f12d3a287cd9..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c88e87951b92ea55313ef79a34d284cb2a23713d3bdafee735caa4fc955b9dcb -size 2610616 diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch28-cxx11-cu126-x86_64-linux/activation/_ops.py deleted file mode 100644 index 745e06b31cb5b9718d3b85236f4cc257459070d7..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_be5bedb_dirty -ops = torch.ops._activation_be5bedb_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_be5bedb_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/layers.py b/build/torch28-cxx11-cu126-x86_64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/__init__.py b/build/torch28-cxx11-cu128-aarch64-linux/activation/__init__.py deleted file mode 100644 index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-aarch64-linux/activation/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index bdb5a121a09f628a672c404f5207f691347f83c5..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 0d8c166048d114380e068ca6448ab46ef96da034..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index ea0551b7b1c5e408b9875b62598f6f5f0b489a30..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so b/build/torch28-cxx11-cu128-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so deleted file mode 100755 index f1d23623c037de97ee0207fe5f750d8ba9863d3c..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2f7fe0a00eaf2e228f237ee3058ac9eb2c6fbc4927b1276d0f566bb05bb043b9 -size 3683080 diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/_ops.py b/build/torch28-cxx11-cu128-aarch64-linux/activation/_ops.py deleted file mode 100644 index 0f883290f823dd4b9ad1432d6644d25bcd3a4acf..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_0c3eb4e_dirty -ops = torch.ops._activation_0c3eb4e_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_0c3eb4e_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/layers.py b/build/torch28-cxx11-cu128-aarch64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-aarch64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/__init__.py b/build/torch28-cxx11-cu128-x86_64-linux/activation/__init__.py deleted file mode 100644 index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index aedb284c8147a243ebfc99ec94000b62ae672077..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 7ae3e6d861e600db32e9024ae7db059642f35a3f..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index 51baab3cf4e592a2b8bed4cea0e9228a559b399d..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so b/build/torch28-cxx11-cu128-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so deleted file mode 100755 index 8b1ece63bdec0e63013816dae6bce9a87068f88e..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cf784c7ab178c476fc6268efe820b1948c7c5b8f049c046c851b03067da5dd59 -size 3558616 diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/_ops.py b/build/torch28-cxx11-cu128-x86_64-linux/activation/_ops.py deleted file mode 100644 index 745e06b31cb5b9718d3b85236f4cc257459070d7..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_be5bedb_dirty -ops = torch.ops._activation_be5bedb_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_be5bedb_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/layers.py b/build/torch28-cxx11-cu128-x86_64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__init__.py b/build/torch28-cxx11-cu129-aarch64-linux/activation/__init__.py deleted file mode 100644 index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-aarch64-linux/activation/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 8086d0297290c7f425c6040e160ad015337ce607..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 813a3ce81dbdc975cfd9ca5809f4d4c16e51d410..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index 7bc821f8f3a247087893f6c0fdaa1592d0b84aee..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so b/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so deleted file mode 100755 index 18c50172a17d5c0376245a740b75508692c01696..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b9f6a161283a05672bb3ed442990cf8a5ce553625bb482cd31ce514e07cfcf0a -size 3684504 diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/_ops.py b/build/torch28-cxx11-cu129-aarch64-linux/activation/_ops.py deleted file mode 100644 index 0f883290f823dd4b9ad1432d6644d25bcd3a4acf..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-aarch64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_0c3eb4e_dirty -ops = torch.ops._activation_0c3eb4e_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_0c3eb4e_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/layers.py b/build/torch28-cxx11-cu129-aarch64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-aarch64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/__init__.py b/build/torch28-cxx11-cu129-x86_64-linux/activation/__init__.py deleted file mode 100644 index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-x86_64-linux/activation/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -import torch - -from ._ops import ops - -from . import layers - - -def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.silu_and_mul(out, x) - return out - - -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - -def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_and_mul(out, x) - return out - - -def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_tanh_and_mul(out, x) - return out - - -def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None: - ops.fatrelu_and_mul(out, x, threshold) - return out - - -def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_fast(out, x) - return out - - -def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_new(out, x) - return out - - -def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: - ops.gelu_quick(out, x) - return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 01d30fced2b5392d0f6f4e6454cbe7d782a14daa..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc deleted file mode 100644 index 75b0e5f83e10b053d8584f2607d9a9f3009d45dc..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc deleted file mode 100644 index d6ed035d206ae523160771021be45010f234687e..0000000000000000000000000000000000000000 Binary files a/build/torch28-cxx11-cu129-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so b/build/torch28-cxx11-cu129-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so deleted file mode 100755 index 33fb245664d9daef5b07440b390db2c19ef404f1..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-x86_64-linux/activation/_activation_be5bedb_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9e7cca3169eea8cbd67c61706d102548e49aadc936f8c2943efef3e7c4c0ee0d -size 3592400 diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/_ops.py b/build/torch28-cxx11-cu129-x86_64-linux/activation/_ops.py deleted file mode 100644 index 745e06b31cb5b9718d3b85236f4cc257459070d7..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-x86_64-linux/activation/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _activation_be5bedb_dirty -ops = torch.ops._activation_be5bedb_dirty - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_activation_be5bedb_dirty::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/layers.py b/build/torch28-cxx11-cu129-x86_64-linux/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-x86_64-linux/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/flake.lock b/flake.lock deleted file mode 100644 index 4fa3e9a2e465daa852b90bc10e0c14b442b53b12..0000000000000000000000000000000000000000 --- a/flake.lock +++ /dev/null @@ -1,168 +0,0 @@ -{ - "nodes": { - "flake-compat": { - "locked": { - "lastModified": 1747046372, - "narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-compat_2": { - "locked": { - "lastModified": 1733328505, - "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1731533236, - "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_2": { - "inputs": { - "systems": "systems_2" - }, - "locked": { - "lastModified": 1731533236, - "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "hf-nix": { - "inputs": { - "flake-compat": "flake-compat_2", - "flake-utils": "flake-utils_2", - "nixpkgs": "nixpkgs" - }, - "locked": { - "lastModified": 1747919133, - "narHash": "sha256-VvF1naQOvv7yulQ5/cDiaxkNxlh1Y84QMZnderv1szk=", - "owner": "huggingface", - "repo": "hf-nix", - "rev": "9c71e026d6c7c8588ef85a5f7c77f57d598e038c", - "type": "github" - }, - "original": { - "owner": "huggingface", - "repo": "hf-nix", - "type": "github" - } - }, - "kernel-builder": { - "inputs": { - "flake-compat": "flake-compat", - "flake-utils": "flake-utils", - "hf-nix": "hf-nix", - "nixpkgs": [ - "kernel-builder", - "hf-nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1748620233, - "narHash": "sha256-VULm9HgGXvo3pyfsPy3SOhoqgkuqbGSaSemvzNUbdIU=", - "owner": "huggingface", - "repo": "kernel-builder", - "rev": "da3340e5b3cbb6086600420f4814b033395788d1", - "type": "github" - }, - "original": { - "owner": "huggingface", - "repo": "kernel-builder", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1747820358, - "narHash": "sha256-fTqsZsUX6M3yeEvgyQvXcbGmT2CaRVyVwsi8eK29Oj4=", - "owner": "danieldk", - "repo": "nixpkgs", - "rev": "d3c1681180717528068082103bf323147de6ab0b", - "type": "github" - }, - "original": { - "owner": "danieldk", - "ref": "cudatoolkit-12.9-kernel-builder", - "repo": "nixpkgs", - "type": "github" - } - }, - "root": { - "inputs": { - "kernel-builder": "kernel-builder" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "systems_2": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/flake.nix b/flake.nix index 54ac44c0698d43fb86a123430f5e9d2e9bcda8ea..f2e6b1f85c809079eb671a60228bbab307616f1a 100644 --- a/flake.nix +++ b/flake.nix @@ -2,7 +2,7 @@ description = "Flake for activation kernels"; inputs = { - kernel-builder.url = "github:huggingface/kernel-builder"; + kernel-builder.url = "git+ssh://git@github.com/huggingface/kernel-builder"; }; outputs = @@ -10,8 +10,5 @@ self, kernel-builder, }: - kernel-builder.lib.genFlakeOutputs { - path = ./.; - rev = self.shortRev or self.dirtyShortRev or self.lastModifiedDate; - }; + kernel-builder.lib.genFlakeOutputs ./.; } diff --git a/tests/kernels/test_activation.py b/tests/kernels/test_activation.py index 740f6837597943625d18c4d714bda3a35958c747..2f67a94f73db9ac8ed8d0c6a4b642702284ced9c 100644 --- a/tests/kernels/test_activation.py +++ b/tests/kernels/test_activation.py @@ -1,6 +1,3 @@ -# SPDX-License-Identifier: Apache-2.0 -# SPDX-FileCopyrightText: Copyright contributors to the vLLM project - import math import random from typing import Type @@ -46,19 +43,12 @@ def silu_and_mul(x: torch.Tensor) -> torch.Tensor: return F.silu(x[..., :d]) * x[..., d:] -def mul_and_silu(x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - return x[..., :d] * F.silu(x[..., d:]) - - def gelu_and_mul(x: torch.Tensor, approximate: str) -> torch.Tensor: d = x.shape[-1] // 2 return F.gelu(x[..., :d], approximate=approximate) * x[..., d:] -@pytest.mark.parametrize( - "activation_name", ["silu_and_mul", "mul_and_silu", "gelu", "gelu_tanh", "fatrelu"] -) +@pytest.mark.parametrize("activation_name", ["silu", "gelu", "gelu_tanh", "fatrelu"]) @pytest.mark.parametrize("num_tokens", NUM_TOKENS) @pytest.mark.parametrize("d", D) @pytest.mark.parametrize("dtype", DTYPES) @@ -77,43 +67,32 @@ def test_act_and_mul( torch.manual_seed(seed) torch.set_default_device(device) x = torch.randn(num_tokens, 2 * d, dtype=dtype) - if activation_name == "silu_and_mul": + if activation_name == "silu": torch_fn = silu_and_mul fn = activation.silu_and_mul op = activation.ops.silu_and_mul - layer = activation.layers.SiluAndMul() - elif activation_name == "mul_and_silu": - torch_fn = mul_and_silu - fn = activation.mul_and_silu - op = activation.ops.mul_and_silu - layer = activation.layers.MulAndSilu() elif activation_name == "gelu": torch_fn = lambda x: gelu_and_mul(x, "none") fn = activation.gelu_and_mul op = activation.ops.gelu_and_mul - layer = activation.layers.GeluAndMul() elif activation_name == "gelu_tanh": torch_fn = lambda x: gelu_and_mul(x, "tanh") fn = activation.gelu_tanh_and_mul op = activation.ops.gelu_tanh_and_mul - layer = activation.layers.GeluTanhAndMul() elif activation_name == "fatrelu": threshold = random.uniform(0, 1) torch_fn = lambda x: fatrelu_and_mul(x, threshold) fn = lambda out, x: activation.fatrelu_and_mul(out, x, threshold) op = activation.ops.fatrelu_and_mul - layer = activation.layers.FatreluAndMul(threshold) out_shape = x.shape[:-1] + (x.shape[-1] // 2,) out = torch.empty(out_shape, dtype=x.dtype, device=x.device) out = fn(out, x) - mod_out = layer(x) ref_out = torch_fn(x) # The SiLU, GELU and FatReLU implementations are equivalent to the native # PyTorch implementations, so we can do exact comparison. torch.testing.assert_close(out, ref_out, atol=0.0, rtol=0.0) - torch.testing.assert_close(mod_out, ref_out, atol=0.0, rtol=0.0) d = x.shape[-1] // 2 output_shape = x.shape[:-1] + (d,) @@ -127,24 +106,9 @@ def test_act_and_mul( @pytest.mark.parametrize( "activation_fns", [ - ( - gelu_fast, - activation.gelu_fast, - activation.ops.gelu_fast, - activation.layers.FastGELU, - ), - ( - gelu_new, - activation.gelu_new, - activation.ops.gelu_new, - activation.layers.NewGELU, - ), - ( - gelu_quick, - activation.gelu_quick, - activation.ops.gelu_quick, - activation.layers.QuickGELU, - ), + (gelu_fast, activation.gelu_fast, activation.ops.gelu_fast), + (gelu_new, activation.gelu_new, activation.ops.gelu_new), + (gelu_quick, activation.gelu_quick, activation.ops.gelu_quick), ], ) @pytest.mark.parametrize("num_tokens", NUM_TOKENS) @@ -164,17 +128,12 @@ def test_activation( torch.manual_seed(seed) torch.set_default_device(device) x = torch.randn(num_tokens, d, dtype=dtype) - torch_fn, fn, op, cls = activation_fns - layer = cls() + torch_fn, fn, op = activation_fns out = fn(torch.empty_like(x), x) - layer_out = layer(x) ref_out = torch_fn(x) torch.testing.assert_close( out, ref_out, atol=get_default_atol(out), rtol=get_default_rtol(out) ) - torch.testing.assert_close( - out, layer_out, atol=get_default_atol(out), rtol=get_default_rtol(out) - ) out = torch.empty_like(x) opcheck(op, (out, x)) diff --git a/torch-ext/activation/__init__.py b/torch-ext/activation/__init__.py index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..71e0b01a03416c783f2bd67fc30d7ac86aee8764 100644 --- a/torch-ext/activation/__init__.py +++ b/torch-ext/activation/__init__.py @@ -1,8 +1,15 @@ import torch -from ._ops import ops +try: + from ._ops import ops +except ImportError as e: + # Fallback for local development. + try: + import _activation -from . import layers + ops = torch.ops._activition + except ImportError: + raise e def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: @@ -10,11 +17,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: return out -def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None: - ops.mul_and_silu(out, x) - return out - - def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_and_mul(out, x) return out @@ -43,15 +45,3 @@ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None: def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None: ops.gelu_quick(out, x) return out - - -__all__ = [ - "silu_and_mul", - "gelu_and_mul", - "gelu_tanh_and_mul", - "fatrelu_and_mul", - "gelu_fast", - "gelu_new", - "gelu_quick", - "layers", -] diff --git a/torch-ext/activation/layers.py b/torch-ext/activation/layers.py deleted file mode 100644 index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..0000000000000000000000000000000000000000 --- a/torch-ext/activation/layers.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch.nn as nn - -from ._ops import ops - - -class SiluAndMul(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.silu_and_mul(out, x) - return out - - -class MulAndSilu(nn.Module): - """An activation function for SwiGLU. - - The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.mul_and_silu(out, x) - return out - - -class GeluAndMul(nn.Module): - """An activation function for GeGLU. - - The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2. - - Shapes: - x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d) - return: (batch_size, seq_len, d) or (num_tokens, d) - """ - - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_and_mul(out, x) - return out - - -class GeluTanhAndMul(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.gelu_tanh_and_mul(out, x) - return out - - -class FatreluAndMul(nn.Module): - """An activation function for FATReLU. - - The function computes x -> FATReLU(x[:d]) * x[d:] where - d = x.shape[-1] // 2. - This is used in openbmb/MiniCPM-S-1B-sft. - - Shapes: - x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) - return: (num_tokens, d) or (batch_size, seq_len, d) - """ - - can_torch_compile: bool = True - - def __init__(self, threshold: float = 0.0): - super().__init__() - self.threshold = threshold - - def forward(self, x: torch.Tensor): - d = x.shape[-1] // 2 - output_shape = x.shape[:-1] + (d,) - out = torch.empty(output_shape, dtype=x.dtype, device=x.device) - ops.fatrelu_and_mul(out, x, self.threshold) - return out - - -class FastGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_fast(out, x) - return out - - -class NewGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_new(out, x) - return out - - -class QuickGELU(nn.Module): - can_torch_compile: bool = True - - def forward(self, x: torch.Tensor) -> torch.Tensor: - out = torch.empty_like(x) - ops.gelu_quick(out, x) - return out diff --git a/torch-ext/torch_binding.cpp b/torch-ext/torch_binding.cpp index 321568290bf3b5d9d0eaa2dc9a98ae8111c34859..b6148ecc33137085656b962c0cdc10fd480dc787 100644 --- a/torch-ext/torch_binding.cpp +++ b/torch-ext/torch_binding.cpp @@ -9,9 +9,6 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { ops.def("silu_and_mul(Tensor! out, Tensor input) -> ()"); ops.impl("silu_and_mul", torch::kCUDA, &silu_and_mul); - ops.def("mul_and_silu(Tensor! out, Tensor input) -> ()"); - ops.impl("mul_and_silu", torch::kCUDA, &mul_and_silu); - // Activation function used in GeGLU with `none` approximation. ops.def("gelu_and_mul(Tensor! out, Tensor input) -> ()"); ops.impl("gelu_and_mul", torch::kCUDA, &gelu_and_mul); diff --git a/torch-ext/torch_binding.h b/torch-ext/torch_binding.h index 3d7e28ae62da83fb2c18131f28a2e6d37878b8f5..cb163cfc1da061377d077bab6d12a8b048d60fa5 100644 --- a/torch-ext/torch_binding.h +++ b/torch-ext/torch_binding.h @@ -4,8 +4,6 @@ void silu_and_mul(torch::Tensor &out, torch::Tensor &input); -void mul_and_silu(torch::Tensor& out, torch::Tensor& input); - void gelu_and_mul(torch::Tensor &out, torch::Tensor &input); void gelu_tanh_and_mul(torch::Tensor &out, torch::Tensor &input);