danieldk HF Staff commited on
Commit
6087c26
·
verified ·
1 Parent(s): 3a0dfaa

Build uploaded using `kernels`.

Browse files
Files changed (31) hide show
  1. .gitattributes +6 -0
  2. build/torch28-cxx11-cu126-x86_64-linux/__init__.py +63 -0
  3. build/torch28-cxx11-cu126-x86_64-linux/_ops.py +9 -0
  4. build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so +3 -0
  5. build/torch28-cxx11-cu126-x86_64-linux/metadata.json +1 -0
  6. build/torch28-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py +26 -0
  7. build/torch28-cxx11-cu128-x86_64-linux/__init__.py +63 -0
  8. build/torch28-cxx11-cu128-x86_64-linux/_ops.py +9 -0
  9. build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so +3 -0
  10. build/torch28-cxx11-cu128-x86_64-linux/metadata.json +1 -0
  11. build/torch28-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py +26 -0
  12. build/torch28-cxx11-cu129-x86_64-linux/__init__.py +63 -0
  13. build/torch28-cxx11-cu129-x86_64-linux/_ops.py +9 -0
  14. build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so +3 -0
  15. build/torch28-cxx11-cu129-x86_64-linux/metadata.json +1 -0
  16. build/torch28-cxx11-cu129-x86_64-linux/tinygrad_rms/__init__.py +26 -0
  17. build/torch29-cxx11-cu126-x86_64-linux/__init__.py +63 -0
  18. build/torch29-cxx11-cu126-x86_64-linux/_ops.py +9 -0
  19. build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so +3 -0
  20. build/torch29-cxx11-cu126-x86_64-linux/metadata.json +1 -0
  21. build/torch29-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py +26 -0
  22. build/torch29-cxx11-cu128-x86_64-linux/__init__.py +63 -0
  23. build/torch29-cxx11-cu128-x86_64-linux/_ops.py +9 -0
  24. build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so +3 -0
  25. build/torch29-cxx11-cu128-x86_64-linux/metadata.json +1 -0
  26. build/torch29-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py +26 -0
  27. build/torch29-cxx11-cu130-x86_64-linux/__init__.py +63 -0
  28. build/torch29-cxx11-cu130-x86_64-linux/_ops.py +9 -0
  29. build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so +3 -0
  30. build/torch29-cxx11-cu130-x86_64-linux/metadata.json +1 -0
  31. build/torch29-cxx11-cu130-x86_64-linux/tinygrad_rms/__init__.py +26 -0
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text
37
+ build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text
38
+ build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text
39
+ build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text
40
+ build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text
41
+ build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so filter=lfs diff=lfs merge=lfs -text
build/torch28-cxx11-cu126-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ def tinygrad_rms_norm(
9
+ x: torch.Tensor,
10
+ epsilon: float = 1e-6,
11
+ out: Optional[torch.Tensor] = None,
12
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
13
+ """
14
+ Compute RMSNorm using tinygrad-style CUDA kernels.
15
+
16
+ RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
17
+
18
+ This implementation uses a two-kernel approach:
19
+ 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
20
+ 2. Multiply input by the computed factor
21
+
22
+ Args:
23
+ x: Input tensor of shape (..., hidden_size)
24
+ epsilon: Small constant for numerical stability
25
+ out: Optional pre-allocated output tensor
26
+
27
+ Returns:
28
+ Tuple of (output tensor, rms_inv tensor)
29
+ """
30
+ if out is None:
31
+ out = torch.empty_like(x)
32
+
33
+ hidden_size = x.size(-1)
34
+ num_rows = x.numel() // hidden_size
35
+ rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
36
+
37
+ ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
38
+ return out, rms_inv
39
+
40
+
41
+ def tinygrad_rms_norm_simple(
42
+ x: torch.Tensor,
43
+ epsilon: float = 1e-6,
44
+ out: Optional[torch.Tensor] = None,
45
+ ) -> torch.Tensor:
46
+ """
47
+ Compute RMSNorm using tinygrad-style CUDA kernels.
48
+
49
+ This is a simpler interface that only returns the normalized output.
50
+
51
+ Args:
52
+ x: Input tensor of shape (..., hidden_size)
53
+ epsilon: Small constant for numerical stability
54
+ out: Optional pre-allocated output tensor
55
+
56
+ Returns:
57
+ Normalized output tensor
58
+ """
59
+ if out is None:
60
+ out = torch.empty_like(x)
61
+
62
+ ops.tinygrad_rms_norm_inplace(out, x, epsilon)
63
+ return out
build/torch28-cxx11-cu126-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _tinygrad_rms_3102ae4
3
+ ops = torch.ops._tinygrad_rms_3102ae4
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_tinygrad_rms_3102ae4::{op_name}"
build/torch28-cxx11-cu126-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be89de0420f14c5ed6705727ec25129a13946b039a7083a3a4d3c617bc3e9974
3
+ size 2055480
build/torch28-cxx11-cu126-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch28-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch28-cxx11-cu128-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ def tinygrad_rms_norm(
9
+ x: torch.Tensor,
10
+ epsilon: float = 1e-6,
11
+ out: Optional[torch.Tensor] = None,
12
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
13
+ """
14
+ Compute RMSNorm using tinygrad-style CUDA kernels.
15
+
16
+ RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
17
+
18
+ This implementation uses a two-kernel approach:
19
+ 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
20
+ 2. Multiply input by the computed factor
21
+
22
+ Args:
23
+ x: Input tensor of shape (..., hidden_size)
24
+ epsilon: Small constant for numerical stability
25
+ out: Optional pre-allocated output tensor
26
+
27
+ Returns:
28
+ Tuple of (output tensor, rms_inv tensor)
29
+ """
30
+ if out is None:
31
+ out = torch.empty_like(x)
32
+
33
+ hidden_size = x.size(-1)
34
+ num_rows = x.numel() // hidden_size
35
+ rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
36
+
37
+ ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
38
+ return out, rms_inv
39
+
40
+
41
+ def tinygrad_rms_norm_simple(
42
+ x: torch.Tensor,
43
+ epsilon: float = 1e-6,
44
+ out: Optional[torch.Tensor] = None,
45
+ ) -> torch.Tensor:
46
+ """
47
+ Compute RMSNorm using tinygrad-style CUDA kernels.
48
+
49
+ This is a simpler interface that only returns the normalized output.
50
+
51
+ Args:
52
+ x: Input tensor of shape (..., hidden_size)
53
+ epsilon: Small constant for numerical stability
54
+ out: Optional pre-allocated output tensor
55
+
56
+ Returns:
57
+ Normalized output tensor
58
+ """
59
+ if out is None:
60
+ out = torch.empty_like(x)
61
+
62
+ ops.tinygrad_rms_norm_inplace(out, x, epsilon)
63
+ return out
build/torch28-cxx11-cu128-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _tinygrad_rms_3102ae4
3
+ ops = torch.ops._tinygrad_rms_3102ae4
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_tinygrad_rms_3102ae4::{op_name}"
build/torch28-cxx11-cu128-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cb8f8f145b462cb3c631f8a11431fc7fc28f1491e3728ea264cd1603ce7b7d0
3
+ size 2147152
build/torch28-cxx11-cu128-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch28-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch28-cxx11-cu129-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ def tinygrad_rms_norm(
9
+ x: torch.Tensor,
10
+ epsilon: float = 1e-6,
11
+ out: Optional[torch.Tensor] = None,
12
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
13
+ """
14
+ Compute RMSNorm using tinygrad-style CUDA kernels.
15
+
16
+ RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
17
+
18
+ This implementation uses a two-kernel approach:
19
+ 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
20
+ 2. Multiply input by the computed factor
21
+
22
+ Args:
23
+ x: Input tensor of shape (..., hidden_size)
24
+ epsilon: Small constant for numerical stability
25
+ out: Optional pre-allocated output tensor
26
+
27
+ Returns:
28
+ Tuple of (output tensor, rms_inv tensor)
29
+ """
30
+ if out is None:
31
+ out = torch.empty_like(x)
32
+
33
+ hidden_size = x.size(-1)
34
+ num_rows = x.numel() // hidden_size
35
+ rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
36
+
37
+ ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
38
+ return out, rms_inv
39
+
40
+
41
+ def tinygrad_rms_norm_simple(
42
+ x: torch.Tensor,
43
+ epsilon: float = 1e-6,
44
+ out: Optional[torch.Tensor] = None,
45
+ ) -> torch.Tensor:
46
+ """
47
+ Compute RMSNorm using tinygrad-style CUDA kernels.
48
+
49
+ This is a simpler interface that only returns the normalized output.
50
+
51
+ Args:
52
+ x: Input tensor of shape (..., hidden_size)
53
+ epsilon: Small constant for numerical stability
54
+ out: Optional pre-allocated output tensor
55
+
56
+ Returns:
57
+ Normalized output tensor
58
+ """
59
+ if out is None:
60
+ out = torch.empty_like(x)
61
+
62
+ ops.tinygrad_rms_norm_inplace(out, x, epsilon)
63
+ return out
build/torch28-cxx11-cu129-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _tinygrad_rms_3102ae4
3
+ ops = torch.ops._tinygrad_rms_3102ae4
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_tinygrad_rms_3102ae4::{op_name}"
build/torch28-cxx11-cu129-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7f0993df65ef46a52d07c0d40e11fcb5ae1430ad1cdab7c118693542163cc11
3
+ size 2168648
build/torch28-cxx11-cu129-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch28-cxx11-cu129-x86_64-linux/tinygrad_rms/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu126-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ def tinygrad_rms_norm(
9
+ x: torch.Tensor,
10
+ epsilon: float = 1e-6,
11
+ out: Optional[torch.Tensor] = None,
12
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
13
+ """
14
+ Compute RMSNorm using tinygrad-style CUDA kernels.
15
+
16
+ RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
17
+
18
+ This implementation uses a two-kernel approach:
19
+ 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
20
+ 2. Multiply input by the computed factor
21
+
22
+ Args:
23
+ x: Input tensor of shape (..., hidden_size)
24
+ epsilon: Small constant for numerical stability
25
+ out: Optional pre-allocated output tensor
26
+
27
+ Returns:
28
+ Tuple of (output tensor, rms_inv tensor)
29
+ """
30
+ if out is None:
31
+ out = torch.empty_like(x)
32
+
33
+ hidden_size = x.size(-1)
34
+ num_rows = x.numel() // hidden_size
35
+ rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
36
+
37
+ ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
38
+ return out, rms_inv
39
+
40
+
41
+ def tinygrad_rms_norm_simple(
42
+ x: torch.Tensor,
43
+ epsilon: float = 1e-6,
44
+ out: Optional[torch.Tensor] = None,
45
+ ) -> torch.Tensor:
46
+ """
47
+ Compute RMSNorm using tinygrad-style CUDA kernels.
48
+
49
+ This is a simpler interface that only returns the normalized output.
50
+
51
+ Args:
52
+ x: Input tensor of shape (..., hidden_size)
53
+ epsilon: Small constant for numerical stability
54
+ out: Optional pre-allocated output tensor
55
+
56
+ Returns:
57
+ Normalized output tensor
58
+ """
59
+ if out is None:
60
+ out = torch.empty_like(x)
61
+
62
+ ops.tinygrad_rms_norm_inplace(out, x, epsilon)
63
+ return out
build/torch29-cxx11-cu126-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _tinygrad_rms_3102ae4
3
+ ops = torch.ops._tinygrad_rms_3102ae4
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_tinygrad_rms_3102ae4::{op_name}"
build/torch29-cxx11-cu126-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ef5c694a44df69ad379a9be347cfe07b6e546aa1495b7b3192d4e5439811771
3
+ size 2055456
build/torch29-cxx11-cu126-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch29-cxx11-cu126-x86_64-linux/tinygrad_rms/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu128-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ def tinygrad_rms_norm(
9
+ x: torch.Tensor,
10
+ epsilon: float = 1e-6,
11
+ out: Optional[torch.Tensor] = None,
12
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
13
+ """
14
+ Compute RMSNorm using tinygrad-style CUDA kernels.
15
+
16
+ RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
17
+
18
+ This implementation uses a two-kernel approach:
19
+ 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
20
+ 2. Multiply input by the computed factor
21
+
22
+ Args:
23
+ x: Input tensor of shape (..., hidden_size)
24
+ epsilon: Small constant for numerical stability
25
+ out: Optional pre-allocated output tensor
26
+
27
+ Returns:
28
+ Tuple of (output tensor, rms_inv tensor)
29
+ """
30
+ if out is None:
31
+ out = torch.empty_like(x)
32
+
33
+ hidden_size = x.size(-1)
34
+ num_rows = x.numel() // hidden_size
35
+ rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
36
+
37
+ ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
38
+ return out, rms_inv
39
+
40
+
41
+ def tinygrad_rms_norm_simple(
42
+ x: torch.Tensor,
43
+ epsilon: float = 1e-6,
44
+ out: Optional[torch.Tensor] = None,
45
+ ) -> torch.Tensor:
46
+ """
47
+ Compute RMSNorm using tinygrad-style CUDA kernels.
48
+
49
+ This is a simpler interface that only returns the normalized output.
50
+
51
+ Args:
52
+ x: Input tensor of shape (..., hidden_size)
53
+ epsilon: Small constant for numerical stability
54
+ out: Optional pre-allocated output tensor
55
+
56
+ Returns:
57
+ Normalized output tensor
58
+ """
59
+ if out is None:
60
+ out = torch.empty_like(x)
61
+
62
+ ops.tinygrad_rms_norm_inplace(out, x, epsilon)
63
+ return out
build/torch29-cxx11-cu128-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _tinygrad_rms_3102ae4
3
+ ops = torch.ops._tinygrad_rms_3102ae4
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_tinygrad_rms_3102ae4::{op_name}"
build/torch29-cxx11-cu128-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0229db5ced57dc868ce0558deafe5fe01035becd619437b2963f90c2344be3a0
3
+ size 2151224
build/torch29-cxx11-cu128-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch29-cxx11-cu128-x86_64-linux/tinygrad_rms/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu130-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ def tinygrad_rms_norm(
9
+ x: torch.Tensor,
10
+ epsilon: float = 1e-6,
11
+ out: Optional[torch.Tensor] = None,
12
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
13
+ """
14
+ Compute RMSNorm using tinygrad-style CUDA kernels.
15
+
16
+ RMSNorm(x) = x * (1 / sqrt(mean(x^2) + epsilon))
17
+
18
+ This implementation uses a two-kernel approach:
19
+ 1. Compute 1/sqrt(mean(x^2) + epsilon) for each row
20
+ 2. Multiply input by the computed factor
21
+
22
+ Args:
23
+ x: Input tensor of shape (..., hidden_size)
24
+ epsilon: Small constant for numerical stability
25
+ out: Optional pre-allocated output tensor
26
+
27
+ Returns:
28
+ Tuple of (output tensor, rms_inv tensor)
29
+ """
30
+ if out is None:
31
+ out = torch.empty_like(x)
32
+
33
+ hidden_size = x.size(-1)
34
+ num_rows = x.numel() // hidden_size
35
+ rms_inv = torch.empty(num_rows, dtype=x.dtype, device=x.device)
36
+
37
+ ops.tinygrad_rms_norm(out, rms_inv, x, epsilon)
38
+ return out, rms_inv
39
+
40
+
41
+ def tinygrad_rms_norm_simple(
42
+ x: torch.Tensor,
43
+ epsilon: float = 1e-6,
44
+ out: Optional[torch.Tensor] = None,
45
+ ) -> torch.Tensor:
46
+ """
47
+ Compute RMSNorm using tinygrad-style CUDA kernels.
48
+
49
+ This is a simpler interface that only returns the normalized output.
50
+
51
+ Args:
52
+ x: Input tensor of shape (..., hidden_size)
53
+ epsilon: Small constant for numerical stability
54
+ out: Optional pre-allocated output tensor
55
+
56
+ Returns:
57
+ Normalized output tensor
58
+ """
59
+ if out is None:
60
+ out = torch.empty_like(x)
61
+
62
+ ops.tinygrad_rms_norm_inplace(out, x, epsilon)
63
+ return out
build/torch29-cxx11-cu130-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _tinygrad_rms_3102ae4
3
+ ops = torch.ops._tinygrad_rms_3102ae4
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_tinygrad_rms_3102ae4::{op_name}"
build/torch29-cxx11-cu130-x86_64-linux/_tinygrad_rms_3102ae4.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4346c1856b3913788f2171dd8b561bd3dcfddd75e36f1250c1277163f2054999
3
+ size 2173416
build/torch29-cxx11-cu130-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch29-cxx11-cu130-x86_64-linux/tinygrad_rms/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))