python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import pUtils
from PIL import Image
from PixelView.imageContainers.abstractImage import AbstractImage
class Rgba8888Image(AbstractImage):
def __init__(self, data=bytearray(), width=0, height=0):
super().__init__(data, width, height)
self.bytesPerPixel = 4
self.mode = 'RGBA'
def save(self, filePath):
header = str.encode('rgba8888 ' + str(self.width) + ' ' + str(self.height) + chr(0x0A))
pUtils.quickFileWrite(filePath, header + self.data, 'wb')
def savePNG(self, filePath):
img = Image.frombytes('RGBA', (self.width, self.height), self.data)
img.save(filePath, 'PNG')
def load(self, filePath, data=None):
if data is None:
data = pUtils.quickFileRead(filePath, 'rb')
index = data.find(b'\x0A')
header = data[:index]
body = data[index + 1:]
# Sample string to match: rgba8888 320 240
t = re.match(b'rgba8888 ([\x30-\x39]+) ([\x30-\x39]+)', header)
if t is None:
raise Exception('Invalid header for a rgba8888 file type')
self.width = int(t.group(1))
self.height = int(t.group(2))
self.data = bytearray(body)
self.srcFilePath = filePath
self.srcFileFormat = 'RGBA8888'
|
PixelView-master
|
PixelView/imageContainers/rgba8888Image.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import pUtils
from PIL import Image
from PixelView.imageContainers.abstractImage import AbstractImage
class Rgb888Image(AbstractImage):
def __init__(self, data=bytearray(), width=0, height=0):
super().__init__(data, width, height)
self.bytesPerPixel = 3
self.mode = 'RGB'
def save(self, filePath):
header = str.encode('rgb888 ' + str(self.width) + ' ' + str(self.height) + chr(0x0A))
pUtils.quickFileWrite(filePath, header + self.data, 'wb')
def savePNG(self, filePath):
img = Image.frombytes('RGB', (self.width, self.height), self.data)
img.save(filePath, "PNG")
def load(self, filePath, data=None):
if data is None:
data = pUtils.quickFileRead(filePath, 'rb')
index = data.find(b'\x0A')
header = data[:index]
body = data[index + 1:]
# Sample string to match: rgb888 320 240
t = re.match(b'rgb888 ([\x30-\x39]+) ([\x30-\x39]+)', header)
if t is None:
raise Exception('Invalid header for a rgb888 file type')
self.width = int(t.group(1))
self.height = int(t.group(2))
self.data = bytearray(body)
self.srcFilePath = filePath
self.srcFileFormat = 'RGB888'
|
PixelView-master
|
PixelView/imageContainers/rgb888Image.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
PixelView-master
|
PixelView/config/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pUtils
from importlib import import_module
from PixelView.utils.cli import pprint, COLOR
from PixelView.gui.loadingIndicators.common import SHAPE, DIRECTION
class ConfigManager:
"""
The ConfigManager takes care of all settings/configurations throughout the
life of the application.
When the app requires to know the current value of a given setting/config,
it just asks the ConfigManager through the 'get' functions on the bottom
part of this file.
Any and all logic to figure out or pre-process any settings/config values
is centralized and encapsulated here.
"""
def __init__(self, configFilePath, configName, isUseInternalDefaults, verbose, **kwargs):
self.configStartPath = os.path.join(os.path.dirname(os.path.abspath(os.path.realpath(__file__))), 'nvidiaPixelViewConfigStart.cfg')
self.isUseInternaldefaults = isUseInternalDefaults
self.verbose = verbose
self.configData = {}
self.configFilePath = None
if self.isUseInternaldefaults is True: return
self.determineConfigFilePath(configFilePath, configName)
self.load()
def determineConfigFilePath(self, configFilePath, configName):
"""
ConfigManager determines what config file to use in the
following way:
1) If configFilePath is not None uses that and returns.
2) If configStart.txt exists:
2.1) If configName is not None then:
- ConfigManager loads 'configStart.txt' from the same directory as this file.
'configStart.txt' points to a config menu file
- ConfigManager loads the config menu file
The config menu file is a dictionary of 'configName: path' pairs
- ConfigManager uses configName with the dictionary and gets the path
2.2) if configName is None, is then the same as in #2.1 except that
defaultConfigName (from the config menu file) is used as configName.
3) Otherwise no configFile would be load
"""
if configFilePath is not None:
self.configFilePath = configFilePath
if not os.path.isabs(self.configFilePath):
self.configFilePath = os.path.join(os.path.dirname(self.configStartPath), self.configFilePath)
return
try:
self.configMenuPath = pUtils.quickFileRead(self.configStartPath, 'txt')[0]
except Exception:
if self.verbose:
pprint('---------------------------------')
pprint('Info: ', color=COLOR.TEAL, endLine=False); pprint('Unable to load:')
pprint(' %s' % self.configStartPath, color=COLOR.TEAL)
pprint('Internal defaults will be used')
pprint('---------------------------------')
return
if not os.path.isabs(self.configMenuPath):
self.configMenuPath = os.path.join(os.path.dirname(self.configStartPath), self.configMenuPath)
try:
configMenuData = pUtils.quickFileRead(self.configMenuPath, 'json')
except Exception:
pprint('Error: ', color=COLOR.RED, endLine=False); pprint('Unable to load json file:')
pprint(' ' + self.configMenuPath, color=COLOR.TEAL)
exit(1)
if configName is None:
try:
defaultConfigName = configMenuData['defaultConfigName']
except Exception:
pprint('Error: ', color=COLOR.RED, endLine=False); pprint('On file:')
pprint(' ' + self.configMenuPath, color=COLOR.TEAL)
pprint('Key: "', endLine=False); pprint('defaultConfigName', endLine=False, color=COLOR.TEAL); pprint('" not found')
exit(1)
configName = defaultConfigName
try:
self.configFilePathFromMenu = configMenuData['configFilePathDict'][configName]
except Exception:
pprint('Error: ', color=COLOR.RED, endLine=False); pprint('On file:')
pprint(' ' + self.configMenuPath, color=COLOR.TEAL)
pprint('Key sequence: "', endLine=False); pprint('configFilePathDict ' + configName, endLine=False, color=COLOR.TEAL); pprint('" not found')
exit(1)
self.configFilePath = self.configFilePathFromMenu
if not os.path.isabs(self.configFilePath):
self.configFilePath = os.path.join(os.path.dirname(self.configMenuPath), self.configFilePath)
def load(self):
if self.configFilePath is None: return 1
try:
self.configData = pUtils.quickFileRead(self.configFilePath, 'json')
except Exception:
pprint('Error: ', color=COLOR.RED, endLine=False); pprint('Unable to load json file:')
pprint(' ' + self.configFilePath, color=COLOR.TEAL)
exit(1)
return 0
def dump(self, filePath):
data = {'configData': self.configData,
'metadata': {'configStartPath': self.configStartPath,
'configMenuPath': self.configMenuPath,
'configFilePathFromMenu': self.configFilePathFromMenu,
'configFilePath': self.configFilePath}}
pUtils.quickFileWrite(filePath, data, 'json')
def saveFullConfig(self, filePath):
if os.path.exists(filePath): return 1
defaultSettingFuncList = [
'getPropDx',
'getPropHollowColor',
'getLoadingIndicatorDirection',
'getDumpFileName',
'getPropFillColor',
'getLoadingIndicatorClass',
'getPropShape',
'getNullColor',
'getDeltaImageColorDict',
'getPropDy',
'getMarkerColor',
'getLoadingIndicatorRefreshRate',
]
for funcName in defaultSettingFuncList:
func = getattr(self, funcName)
func()
pUtils.quickFileWrite(filePath, self.configData, 'json')
return 0
def genMenuConfigFile(self, filePath, configName, configPath):
if os.path.exists(filePath): return 1
configMenuData = {'defaultConfigName': configName,
'configFilePathDict': {configName: configPath}}
pUtils.quickFileWrite(filePath, configMenuData, 'json')
return 0
def setConfigStart(self, filePath):
if not os.path.exists(filePath):
pprint('Error: ', color=COLOR.RED, endLine=False); pprint('File:')
pprint(' ' + filePath, color=COLOR.TEAL)
pprint('Does not exists.')
return 1
if os.path.exists(self.configStartPath):
pprint('Warning: ', color=COLOR.RED, endLine=False); pprint('File:')
pprint(' ' + self.configStartPath, color=COLOR.TEAL)
pprint('Will be overwritten.')
promptString = 'Proceed (y/n)? '
if input(promptString) != 'y':
pprint('Aborted action')
return 2
pUtils.quickFileWrite(self.configStartPath, os.path.abspath(os.path.realpath(filePath)))
pprint('DONE', color=COLOR.TEAL)
return 0
def clearConfigStart(self):
if not os.path.exists(self.configStartPath):
pprint('Info: ', color=COLOR.TEAL, endLine=False); pprint('Already clear')
return 1
pprint('Warning: ', color=COLOR.RED, endLine=False); pprint('File:')
pprint(' ' + self.configStartPath, color=COLOR.TEAL)
pprint('Will be deleted.')
promptString = 'Proceed (y/n)? '
if input(promptString) != 'y':
pprint('Aborted action')
return 2
try:
os.remove(self.configStartPath)
except Exception:
pprint('Error: ', color=COLOR.RED, endLine=False); pprint('Unable to clear configStart')
return 3
pprint('DONE', color=COLOR.TEAL)
return 0
def getter(self, key, default):
self.configData[key] = self.configData.get(key, default)
return self.configData[key]
# Get functions
def getDeltaImageColorDict(self):
return self.getter('deltaImageColor',
{'0': [0x00, 0x00, 0x00],
'1': [0x00, 0xFF, 0x00],
'2': [0x00, 0x00, 0xFF],
'default': [0xFF, 0xFF, 0xFF]})
def getDeltaImageColor(self, deltaValue):
t = self.getDeltaImageColorDict()
return t.get(str(deltaValue), t['default'])
def getDumpFileName(self):
return self.getter('dumpFileName', 'dump.json')
def getMarkerColor(self):
return self.getter('markerColor', [0xFF, 0x00, 0x00])
def getNullColor(self):
return self.getter('nullColor', [0xFF, 0x00, 0xFF])
def getLoadingIndicatorClass(self):
moduleName, objName = self.getter('loadingIndicator', ['PixelView.gui.loadingIndicators.line', 'Line'])
module = import_module(moduleName, 'PixelView.gui.loadingIndicators')
obj = getattr(module, objName)
return obj
def getLoadingIndicatorRefreshRate(self):
return self.getter('refreshRate', 100)
def getLoadingIndicatorDirection(self):
default = 3
try:
t = DIRECTION(self.getter('loadingIndicatorDirection', default))
except Exception:
t = DIRECTION(default)
return t
def getPropDx(self):
return self.getter('propDx', 15)
def getPropDy(self):
return self.getter('propDy', 15)
def getPropFillColor(self):
return self.getter('propFillColor', [0, 0, 255, 255])
def getPropHollowColor(self):
return self.getter('propHollowColor', [150, 150, 150, 150])
def getPropShape(self):
default = 1
try:
t = SHAPE(self.getter('propShape', default))
except Exception:
t = SHAPE(default)
return t
|
PixelView-master
|
PixelView/config/configManager.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
PixelView-master
|
PixelView/utils/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def truncateString(s, pixelWidth):
retString = s[int(-1 * pixelWidth / 8):]
if len(retString) != len(s):
retString = '###' + retString
return retString
|
PixelView-master
|
PixelView/utils/other.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
class OneShotThread(threading.Thread):
def __init__(self, oneShotFunc, inputData=None):
super().__init__()
self.inputData = inputData
self.oneShotFunc = oneShotFunc
self.sleepInterval = 100
def run(self):
if self.inputData is None:
self.returnData = self.oneShotFunc()
else:
self.returnData = self.oneShotFunc(**self.inputData)
|
PixelView-master
|
PixelView/utils/threading.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import enum
import pUtils
import platform
def paramList(arg):
return arg.split(',')
def handleCli(args, topLevel, ConfigManager=None):
kwargs = vars(args)
preprocessFileList(kwargs)
command = kwargs.pop('command')
func = getattr(topLevel, command)
if kwargs['verbose']:
pprint(kwargs, color=COLOR.TEAL)
pprint('---------------------------')
if ConfigManager:
kwargs['configManager'] = ConfigManager(**kwargs)
func(**kwargs)
def preprocessFileList(kwargs):
fList = kwargs.get('fList')
if not fList: return
fListVarNameList = kwargs['fListVarNameList']
for fListVarName in fListVarNameList:
fListVar = kwargs.get(fListVarName)
if fListVar is None: continue
t = []
for item in fListVar:
t += pUtils.quickFileRead(item, 'txt')
kwargs[fListVarName] = t
class COLOR(enum.Enum):
RED = 91
GREEN = 32
BLUE = 34
TEAL = 96
def colorString(s, color):
if platform.system() == 'Linux':
return '\033[%im%s\033[00m' % (color.value, s)
return s
def pprint(s, color=None, endLine=True):
s = str(s)
if endLine:
s += '\n'
if PPRINT_LOG_FILE:
pUtils.quickFileWrite(PPRINT_LOG_FILE, s, 'at')
if color:
s = colorString(s, color)
sys.stdout.write(s)
sys.stdout.flush()
PPRINT_LOG_FILE = None
|
PixelView-master
|
PixelView/utils/cli.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pUtils
from PIL import Image
from io import BytesIO
from PySide2.QtGui import QImage, QPixmap
from PixelView.imageContainers.rgb888Image import Rgb888Image
from PixelView.imageContainers.rgba8888Image import Rgba8888Image
def loadImage(filePath, nullColor=None):
def genPlaceHolder():
# If we couldn't load an image we generate a placeholder
# This is necessary since we may be dealing with a list of images
# and we can't just end the application when one 'unloadable' image
# is found.
# By generating a place holder instead, we allow the user to keep
# reviewing the images on the list as well as indicating something
# went wrong.
width = 100
height = 100
t = Rgba8888Image(bytearray(nullColor + [0]) * width * height, width, height)
t.srcFileFormat = 'nullImage'
return t
try:
data = pUtils.quickFileRead(filePath, 'rb')
except Exception:
if nullColor: return genPlaceHolder()
raise
try:
img = Rgba8888Image()
img.load(filePath, data=data)
return img
except Exception: pass
try:
img = Rgb888Image()
img.load(filePath, data=data)
return img
except Exception: pass
try:
img = Image.open(BytesIO(data))
except Exception:
raise Exception('Unable to identify image format')
try:
if img.format != 'PNG': raise Exception('Unsupported image format ' + img.format)
width, height = img.size
data = bytearray(img.tobytes())
if img.mode == 'RGBA':
t = Rgba8888Image(data, width, height)
elif img.mode == 'RGB':
t = Rgb888Image(data, width, height)
else:
raise Exception('Unknown Image mode')
t.srcFilePath = filePath
t.srcFileFormat = 'PNG'
return t
except Exception:
if nullColor: return genPlaceHolder()
raise
def dropAlpha(img):
if isinstance(img, Rgb888Image):
return img
if isinstance(img, Rgba8888Image):
data = img.data
red = data[0::4]
green = data[1::4]
blue = data[2::4]
newData = bytearray([0, 0, 0] * int(len(data) / 4))
newData[0::3] = red
newData[1::3] = green
newData[2::3] = blue
return Rgb888Image(newData, img.width, img.height)
raise Exception('Invalid input parameter type')
def getAlphaImage(img):
if isinstance(img, Rgb888Image):
return None
if isinstance(img, Rgba8888Image):
data = img.data
alpha = data[3::4]
newData = bytearray([alpha[i // 3] for i in range(len(alpha) * 3)])
return Rgb888Image(newData, img.width, img.height)
raise Exception('Invalid input parameter type')
def widgetDisplayImage(widget, img):
imgFormat = QImage.Format_RGB888
img = dropAlpha(img)
displayImage = QImage(img.data,
img.width, img.height,
imgFormat)
displayImagePix = QPixmap.fromImage((displayImage))
widget.setPixmap(displayImagePix)
|
PixelView-master
|
PixelView/utils/image.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
PixelView-master
|
PixelView/gui/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import enum
import pUtils
from PySide2.QtWidgets import QApplication, QMainWindow, QMenuBar, QAction, QMessageBox
from PixelView.gui.centralWidgets.view import View
from PixelView.gui.centralWidgets.compare import Compare
@enum.unique
class MAIN_WINDOW_MODE(enum.Enum):
VIEW = 1
COMPARE = 2
class MainWindow(QMainWindow):
def __init__(self, parent=None, **kwargs):
super(MainWindow, self).__init__(parent)
self.initVars(**kwargs)
self.resize(200, 200)
self.move(0, 0)
self.setWindowTitle('PixelView')
self.initMenuBar()
self.selectCentralWidget()
def initVars(self, mode, configManager, **kwargs):
self.cm = configManager
self.imagePathList1 = []
self.imagePathList2 = []
self.droppedList1 = []
self.droppedList2 = []
self.index = 0
self.pixelDiffIndex = None
self.pixelIndex1 = 0
self.pixelIndex2 = 0
self.mode = mode
self.centralWidgetDict = {
MAIN_WINDOW_MODE.VIEW: View(configManager=configManager, **kwargs),
MAIN_WINDOW_MODE.COMPARE: Compare(configManager=configManager, **kwargs),
}
def initMenuBar(self, mode=None):
if mode:
self.mode = mode
self.setMenuBar(self.createMenuBar())
def selectCentralWidget(self, mode=None):
if mode:
self.mode = mode
self.setCentralWidget(self.centralWidgetDict[self.mode])
def dropImage(self):
if len(self.imagePathList1) == 1: return
self.droppedList1.append(self.imagePathList1.pop(self.index))
if self.imagePathList2:
self.droppedList2.append(self.imagePathList2.pop(self.index))
if self.index >= len(self.imagePathList1):
self.index = len(self.imagePathList1) - 1
self.draw()
def writeLists(self):
def f(s):
return os.path.abspath(os.path.realpath(s))
d = dict(imagePathList1=[f(i) for i in self.imagePathList1],
imagePathList2=[f(i) for i in self.imagePathList2],
droppedPathList1=[f(i) for i in self.droppedList1],
droppedPathList2=[f(i) for i in self.droppedList2],)
filePath = os.path.abspath(self.cm.getDumpFileName())
try:
if not os.path.exists(filePath):
pUtils.quickFileWrite(filePath, d, 'json')
else:
msgBox = QMessageBox(self)
msgBox.setText('File:\n %s\nalready exists' % filePath)
msgBox.setInformativeText('Do you want to overwrite it?')
msgBox.setIcon(QMessageBox.Warning)
msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.Cancel)
msgBox.setDefaultButton(QMessageBox.Cancel)
ret = msgBox.exec_()
if ret == QMessageBox.Yes:
pUtils.quickFileWrite(filePath, d, 'json')
except Exception:
msgBox = QMessageBox(self)
msgBox.setText('Unable to write file:\n %s' % filePath)
msgBox.setInformativeText('Please make sure you have right access and space left')
msgBox.setIcon(QMessageBox.Critical)
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.exec_()
def prevImage(self):
if self.index <= 0: return
self.index = self.index - 1
self.draw()
def nextImage(self):
if self.index >= len(self.imagePathList1) - 1: return
self.index = self.index + 1
self.draw()
def createMenuBar(self):
menuBar = QMenuBar(parent=None)
# File Menu
fileMenu = menuBar.addMenu('&File')
dumpListsAction = QAction('Dump Lists', fileMenu)
dumpListsAction.setShortcut('Ctrl+S')
dumpListsAction.triggered.connect(self.writeLists)
fileMenu.addAction(dumpListsAction)
fileMenu.addSeparator()
exitAction = QAction('E&xit', fileMenu)
exitAction.setShortcut('Ctrl+Q')
exitAction.triggered.connect(QApplication.closeAllWindows)
fileMenu.addAction(exitAction)
# View Menu
viewMenu = menuBar.addMenu('V&iew')
t = ' pair' if self.mode == MAIN_WINDOW_MODE.COMPARE else ''
nextImageAction = QAction('next image' + t, viewMenu)
nextImageAction.setShortcut('Ctrl+]')
nextImageAction.triggered.connect(self.nextImage)
viewMenu.addAction(nextImageAction)
prevImageAction = QAction('prev image' + t, viewMenu)
prevImageAction.setShortcut('Ctrl+[')
prevImageAction.triggered.connect(self.prevImage)
viewMenu.addAction(prevImageAction)
viewMenu.addSeparator()
dropImageAction = QAction('Drop image' + t, viewMenu)
dropImageAction.setShortcut('Ctrl+D')
dropImageAction.triggered.connect(self.dropImage)
viewMenu.addAction(dropImageAction)
if self.mode == MAIN_WINDOW_MODE.COMPARE:
viewMenu.addSeparator()
nextDiffPixelAction = QAction('next diff pixel', viewMenu)
nextDiffPixelAction.setShortcut('Ctrl+.')
nextDiffPixelAction.triggered.connect(self.centralWidgetDict[MAIN_WINDOW_MODE.COMPARE].nextDiffPixel)
viewMenu.addAction(nextDiffPixelAction)
prevDiffPixelAction = QAction('prev diff pixel', viewMenu)
prevDiffPixelAction.setShortcut('Ctrl+,')
prevDiffPixelAction.triggered.connect(self.centralWidgetDict[MAIN_WINDOW_MODE.COMPARE].prevDiffPixel)
viewMenu.addAction(prevDiffPixelAction)
return menuBar
def draw(self):
imagePath1 = self.imagePathList1[self.index]
imagePath2 = ''
if self.imagePathList2: imagePath2 = self.imagePathList2[self.index]
self.centralWidget().draw(imagePath1=imagePath1,
imagePath2=imagePath2,
index=self.index,
totalImageSets=len(self.imagePathList1))
def launch(configManager, imagePathList1, imagePathList2=[], mode=MAIN_WINDOW_MODE.VIEW, **kwargs):
app = QApplication([])
pv = MainWindow(mode=mode, configManager=configManager, **kwargs)
pv.imagePathList1 = imagePathList1
pv.imagePathList2 = imagePathList2
pv.draw()
pv.show()
app.exec_()
|
PixelView-master
|
PixelView/gui/mainWindow.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
PixelView-master
|
PixelView/gui/centralWidgets/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PySide2.QtCore import Qt
from PySide2.QtWidgets import QWidget, QVBoxLayout, QLabel, QFrame
from PixelView.utils.other import truncateString
from PixelView.utils.threading import OneShotThread
from PixelView.utils.image import loadImage, widgetDisplayImage
class View(QWidget):
def __init__(self, configManager, parent=None, **kwargs):
super(View, self).__init__(parent)
self.cm = configManager
self.initLayout()
self.initLoadingIndicator()
def initInfoLayout(self):
layout = QVBoxLayout()
self.counterLabel = QLabel()
layout.addWidget(self.counterLabel, alignment=Qt.AlignHCenter | Qt.AlignTop)
return layout
def initLayout(self):
layout = QVBoxLayout()
layout.addLayout(self.initInfoLayout())
self.imageLabel = QLabel()
self.imageLabel.setFrameStyle(QFrame.Panel | QFrame.Sunken)
self.imagePathLabel = QLabel()
imageLayout = QVBoxLayout()
imageLayout.addWidget(self.imageLabel)
imageLayout.addWidget(self.imagePathLabel, alignment=Qt.AlignRight | Qt.AlignTop)
layout.addLayout(imageLayout)
self.setLayout(layout)
def initLoadingIndicator(self):
self.loadingIndicator = self.cm.getLoadingIndicatorClass()(parent=self, configManager=self.cm)
def updateInfo(self):
self.counterLabel.setText('%i of %i' % (self.index + 1, self.totalImageSets))
self.imagePathLabel.setText(truncateString(self.imagePath, self.img.width))
def loading(self):
img = loadImage(self.imagePath, self.cm.getNullColor())
returnData = dict(img=img)
return returnData
def draw(self, imagePath1, index, totalImageSets, **kwargs):
self.imagePath = imagePath1
self.index = index
self.totalImageSets = totalImageSets
self.loadingThread = OneShotThread(oneShotFunc=self.loading)
self.loadingThread.start()
self.loadingIndicator.start(isDoneFunc=lambda: not self.loadingThread.isAlive(),
postFunc=self.drawPart2)
def drawPart2(self):
data = self.loadingThread.returnData
self.img = data.get('img')
self.updateInfo()
widgetDisplayImage(self.imageLabel, data.get('img'))
|
PixelView-master
|
PixelView/gui/centralWidgets/view.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from PySide2.QtCore import Qt
from PySide2.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QGridLayout, QLabel, QFrame, QMessageBox
from PixelView.utils.other import truncateString
from PixelView.utils.threading import OneShotThread
from PixelView.utils.image import loadImage, widgetDisplayImage, getAlphaImage
from PixelView.imageContainers.common import COMPARE_TYPE
from PixelView.imageContainers.rgb888Image import Rgb888Image
class Compare(QWidget):
def __init__(self, configManager, geometry1=None, geometry2=None, parent=None, **kwargs):
super(Compare, self).__init__(parent)
self.cm = configManager
self.geometry1 = geometry1
self.geometry2 = geometry2
self.initVars()
self.initLayout()
self.initLoadingIndicator()
def initVars(self):
self.pixelIndex1 = 0
self.pixelIndex2 = 0
self.pixelDiffIndex = -1
def initPixelInfoLayout(self):
layout = QGridLayout()
layout.setColumnStretch(3, 9)
layout.setVerticalSpacing(0)
layout.setHorizontalSpacing(20)
self.pixelXY1Label = QLabel()
self.pixelXY2Label = QLabel()
self.pixelIndex1Label = QLabel()
self.pixelIndex2Label = QLabel()
self.pixelColor1Label = QLabel()
self.pixelColor2Label = QLabel()
layout.addWidget(self.pixelXY1Label, 0, 0, alignment=Qt.AlignLeft | Qt.AlignTop)
layout.addWidget(self.pixelXY2Label, 1, 0, alignment=Qt.AlignLeft | Qt.AlignTop)
layout.addWidget(self.pixelIndex1Label, 0, 1, alignment=Qt.AlignLeft | Qt.AlignTop)
layout.addWidget(self.pixelIndex2Label, 1, 1, alignment=Qt.AlignLeft | Qt.AlignTop)
layout.addWidget(self.pixelColor1Label, 0, 2, alignment=Qt.AlignLeft | Qt.AlignTop)
layout.addWidget(self.pixelColor2Label, 1, 2, alignment=Qt.AlignLeft | Qt.AlignTop)
return layout
def initDifferencesInfoLayout(self):
layout = QGridLayout()
self.differentPixelsTotalLabel = QLabel()
layout.addWidget(self.differentPixelsTotalLabel, 0, 0, alignment=Qt.AlignRight | Qt.AlignBottom)
return layout
def initInfoLayout(self):
layout = QVBoxLayout()
headerLayout = QHBoxLayout()
self.counterLabel = QLabel()
headerLayout.addWidget(self.counterLabel, alignment=Qt.AlignHCenter | Qt.AlignTop)
bodyLayout = QHBoxLayout()
bodyLayout.addLayout(self.initPixelInfoLayout())
bodyLayout.addLayout(self.initDifferencesInfoLayout())
layout.addLayout(headerLayout)
layout.addLayout(bodyLayout)
return layout
def initImagesLayout(self):
def initSubLayout(imageLabel, imagePathLabel):
subLayout = QGridLayout()
subLayout.setColumnStretch(1, 9)
subLayout.addWidget(imageLabel, 0, 0)
subLayout.addWidget(imagePathLabel, 1, 0, 1, 2, alignment=Qt.AlignRight | Qt.AlignTop)
return subLayout
layout = QGridLayout()
self.imageLabelList = []
for i in range(6):
tLabel = QLabel()
tLabel.setFrameStyle(QFrame.Panel | QFrame.Sunken)
self.imageLabelList.append(tLabel)
self.imagePath1Label = QLabel()
self.imagePath2Label = QLabel()
self.differentPixelsRgbLabel = QLabel()
self.differentPixelsAlphaLabel = QLabel()
subLayout = initSubLayout(self.imageLabelList[0], self.imagePath1Label)
layout.addLayout(subLayout, 0, 0, alignment=Qt.AlignHCenter | Qt.AlignTop)
subLayout = initSubLayout(self.imageLabelList[1], self.imagePath2Label)
layout.addLayout(subLayout, 0, 1, alignment=Qt.AlignHCenter | Qt.AlignTop)
subLayout = initSubLayout(self.imageLabelList[2], self.differentPixelsRgbLabel)
layout.addLayout(subLayout, 0, 2, alignment=Qt.AlignHCenter | Qt.AlignTop)
layout.addWidget(self.imageLabelList[3], 1, 0, alignment=Qt.AlignHCenter | Qt.AlignTop)
layout.addWidget(self.imageLabelList[4], 1, 1, alignment=Qt.AlignHCenter | Qt.AlignTop)
subLayout = initSubLayout(self.imageLabelList[5], self.differentPixelsAlphaLabel)
layout.addLayout(subLayout, 1, 2, alignment=Qt.AlignHCenter | Qt.AlignTop)
return layout
def initLayout(self):
layout = QVBoxLayout()
layout.addLayout(self.initInfoLayout())
layout.addLayout(self.initImagesLayout())
self.setLayout(layout)
def initLoadingIndicator(self):
self.loadingIndicator = self.cm.getLoadingIndicatorClass()(parent=self, configManager=self.cm)
def pixelIndexToXY(self, pixelIndex, bytesPerPixel, width):
t = pixelIndex / bytesPerPixel
y = int(t / width)
x = int(t % width)
return (x, y)
def updateMarker(self):
t = deepcopy(self.diffData['deltaImageRgbData'])
img3 = Rgb888Image(*t)
### Calculate the pixel index for the subImage ###
x, y = self.pixelIndexToXY(self.pixelIndex1, self.img1.bytesPerPixel, self.img1.width)
x0 = x - self.geometry1.x
y0 = y - self.geometry1.y
indexTmp = (self.geometry1.width * y0 + x0) * 3
##################################################
img3.data[indexTmp: indexTmp + 3] = self.cm.getMarkerColor()
widgetDisplayImage(self.imageLabelList[2], img3)
def updateInfo(self):
def genHexFormatString(bytesPerPixel):
return '[%02X' + (':%02X' * (bytesPerPixel - 1)) + ']'
self.counterLabel.setText('%i of %i' % (self.index + 1, self.totalImageSets))
### Pixel Info ###
if self.pixelDiffIndex == -1:
pixelXY1String = 'PixelXY1: ' + ' '
pixelXY2String = 'PixelXY2: ' + ' '
pixelIndex1String = 'PixelIndex1: ' + ' '
pixelIndex2String = 'PixelIndex2: ' + ' '
pixelColor1String = 'PixelColor1: ' + ' '
pixelColor2String = 'PixelColor1: ' + ' '
else:
pixel1Data = self.img1.data[self.pixelIndex1:self.pixelIndex1 + self.img1.bytesPerPixel]
pixel2Data = self.img2.data[self.pixelIndex2:self.pixelIndex2 + self.img2.bytesPerPixel]
pixelXY1String = 'PixelXY1: ' + '%i,%i' % self.pixelIndexToXY(self.pixelIndex1, self.img1.bytesPerPixel, self.img1.width)
pixelXY2String = 'PixelXY2: ' + '%i,%i' % self.pixelIndexToXY(self.pixelIndex2, self.img2.bytesPerPixel, self.img2.width)
pixelIndex1String = 'PixelIndex1: ' + str(self.pixelIndex1) + ' (in bytes)'
pixelIndex2String = 'PixelIndex2: ' + str(self.pixelIndex2) + ' (in bytes)'
pixelColor1String = 'PixelColor1: ' + genHexFormatString(self.img1.bytesPerPixel) % tuple(pixel1Data)
pixelColor2String = 'PixelColor2: ' + genHexFormatString(self.img2.bytesPerPixel) % tuple(pixel2Data)
self.pixelXY1Label.setText(pixelXY1String)
self.pixelXY2Label.setText(pixelXY2String)
self.pixelIndex1Label.setText(pixelIndex1String)
self.pixelIndex2Label.setText(pixelIndex2String)
self.pixelColor1Label.setText(pixelColor1String)
self.pixelColor2Label.setText(pixelColor2String)
##################
### Different Pixels Counts ###
differentPixelsTotal = self.diffData.get('pixelDiffCount')
differentPixelsRgb = self.diffData.get('diffPixelRgbList')
differentPixelsAlpha = self.diffData.get('diffPixelAlphaList')
differentPixelsTotalString = str(differentPixelsTotal) if differentPixelsTotal is not None else 'UNAVAILABLE'
differentPixelsRgbString = str(len(differentPixelsRgb)) if differentPixelsRgb is not None else 'UNAVAILABLE'
differentPixelsAlphaString = str(len(differentPixelsAlpha)) if differentPixelsAlpha is not None else 'UNAVAILABLE'
self.differentPixelsTotalLabel.setText('Different Pixels Total: ' + differentPixelsTotalString)
self.differentPixelsRgbLabel.setText( 'Different Pixels (RGB): ' + differentPixelsRgbString)
self.differentPixelsAlphaLabel.setText('Different Pixels (Alpha): ' + differentPixelsAlphaString)
###############################
### Image Paths ###
self.imagePath1Label.setText(truncateString(self.imagePath1, self.img1.width))
self.imagePath2Label.setText(truncateString(self.imagePath2, self.img2.width))
###################
def nextDiffPixel(self):
t = self.diffData.get('diffPixelRgbList')
if t is None or len(t) == 0: return
self.pixelDiffIndex += 1
self.pixelDiffIndex = min(self.pixelDiffIndex, len(self.diffData['diffPixelRgbList']) - 1)
self.pixelIndex1 = self.diffData['diffPixelRgbList'][self.pixelDiffIndex][0]
self.pixelIndex2 = self.diffData['diffPixelRgbList'][self.pixelDiffIndex][1]
self.updateInfo()
self.updateMarker()
def prevDiffPixel(self):
t = self.diffData.get('diffPixelRgbList')
if t is None or len(t) == 0: return
self.pixelDiffIndex -= 1
self.pixelDiffIndex = max(self.pixelDiffIndex, 0)
self.pixelIndex1 = self.diffData['diffPixelRgbList'][self.pixelDiffIndex][0]
self.pixelIndex2 = self.diffData['diffPixelRgbList'][self.pixelDiffIndex][1]
self.updateInfo()
self.updateMarker()
def loading(self):
nullImageData = None
def genNullImageData(refImg):
if nullImageData: return nullImageData
return [bytearray(self.cm.getNullColor() * refImg.width * refImg.height), refImg.width, refImg.height]
img1 = loadImage(self.imagePath1, self.cm.getNullColor())
img2 = loadImage(self.imagePath2, self.cm.getNullColor())
nullImageData1 = genNullImageData(img1)
nullImageData2 = genNullImageData(img2)
img4 = getAlphaImage(img1)
img5 = getAlphaImage(img2)
if img4 is None: img4 = nullImageData1
if img5 is None: img5 = nullImageData2
if img1.srcFileFormat == 'nullImage' or img2.srcFileFormat == 'nullImage':
data = {}
img3 = None
img6 = None
else:
data = img1.getDiff(img2, compareType=COMPARE_TYPE.FULL,
returnFailPixelList=True, colorDict=self.cm.getDeltaImageColorDict(),
geometry1=self.geometry1, geometry2=self.geometry2)
self.geometry1 = data.get('geometry1', self.geometry1)
self.geometry2 = data.get('geometry2', self.geometry2)
img3 = Rgb888Image(*data.get('deltaImageRgbData', nullImageData1))
img6 = Rgb888Image(*data.get('deltaImageAlphaData', nullImageData1))
returnData = dict(img1=img1,
img2=img2,
img3=img3,
img4=img4,
img5=img5,
img6=img6,
diffData=data)
return returnData
def draw(self, imagePath1, imagePath2, index, totalImageSets, **kwargs):
self.imagePath1 = imagePath1
self.imagePath2 = imagePath2
self.index = index
self.totalImageSets = totalImageSets
self.loadingThread = OneShotThread(oneShotFunc=self.loading)
self.loadingThread.start()
self.loadingIndicator.start(isDoneFunc=lambda: not self.loadingThread.isAlive(),
postFunc=self.drawPart2)
def drawPart2(self):
data = self.loadingThread.returnData
self.initVars()
self.img1 = data.get('img1')
self.img2 = data.get('img2')
self.diffData = data.get('diffData')
img3 = data.get('img3')
img4 = data.get('img4')
img5 = data.get('img5')
img6 = data.get('img6')
self.updateInfo()
widgetDisplayImage(self.imageLabelList[0], self.img1)
widgetDisplayImage(self.imageLabelList[1], self.img2)
widgetDisplayImage(self.imageLabelList[3], img4)
widgetDisplayImage(self.imageLabelList[4], img5)
widgetList = [
self.imageLabelList[2],
self.imageLabelList[5],
self.differentPixelsTotalLabel,
self.differentPixelsRgbLabel,
self.differentPixelsAlphaLabel,
]
if img3 is None or img6 is None:
for widget in widgetList:
func = getattr(widget, 'hide')
func()
else:
widgetDisplayImage(self.imageLabelList[2], img3)
widgetDisplayImage(self.imageLabelList[5], img6)
for widget in widgetList:
func = getattr(widget, 'show')
func()
if self.img1.srcFileFormat == 'nullImage' or self.img2.srcFileFormat == 'nullImage':
msgBox = QMessageBox(self)
msgBox.setText('Unable to load the current image pair')
msgBox.setInformativeText('Go to the next image pair to continue reviewing the images')
msgBox.setIcon(QMessageBox.Warning)
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.exec_()
|
PixelView-master
|
PixelView/gui/centralWidgets/compare.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PySide2.QtCore import QPoint
from .abstractLoadingIndicator import AbstractLoadingIndicator
class FourCorners(AbstractLoadingIndicator):
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
def initVars(self, parent, **kwargs):
super().initVars(parent, **kwargs)
self.posList = [QPoint(-1, -1),
QPoint( 1, -1),
QPoint( 1, 1),
QPoint(-1, 1)]
self.spacing = QPoint(2 * self.prop.dx + 5, 2 * self.prop.dy + 5)
|
PixelView-master
|
PixelView/gui/loadingIndicators/fourCorners.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PySide2.QtCore import QPoint
from .abstractLoadingIndicator import AbstractLoadingIndicator
class Line(AbstractLoadingIndicator):
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
def initVars(self, parent, **kwargs):
super().initVars(parent, **kwargs)
self.posList = [
QPoint(-2, 0),
QPoint(-1, 0),
QPoint( 0, 0),
QPoint( 1, 0),
QPoint( 2, 0),
]
self.spacing = QPoint(2 * self.prop.dx + 10, 2 * self.prop.dy + 10)
|
PixelView-master
|
PixelView/gui/loadingIndicators/line.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PySide2.QtCore import QPoint, QRect
from PySide2.QtGui import QPainter, QBrush, QColor
from .abstractLoadingIndicator import AbstractLoadingIndicator
class Blink(AbstractLoadingIndicator):
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
center = QPoint(self.width() / 2, self.height() / 2)
painter.fillRect(self.rect(), QBrush(QColor(255, 255, 255, 160)))
margin = 20
painter.fillRect(QRect(center.x() - self.prop.dx - margin,
center.y() - self.prop.dy - margin,
(self.prop.dx + margin) * 2,
(self.prop.dy + margin) * 2),
QBrush(QColor(0, 0, 0, 255)))
self.prop.draw(painter,
QPoint(center.x(),
center.y()),
True if self.counter == 0 else False)
painter.end()
self.counter += 1
self.counter %= 2
|
PixelView-master
|
PixelView/gui/loadingIndicators/blink.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
PixelView-master
|
PixelView/gui/loadingIndicators/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
@enum.unique
class SHAPE(enum.Enum):
RECT = 1
ELLIPSE = 2
@enum.unique
class DIRECTION(enum.Enum):
FORWARD = 1
BACKWARD = 2
BACK_AND_FORTH = 3
|
PixelView-master
|
PixelView/gui/loadingIndicators/common.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PySide2.QtWidgets import QWidget
from PySide2.QtGui import QPainter, QBrush, QColor
from PySide2.QtCore import QRect, QPoint, QSize, Qt
from PixelView.utils.cli import pprint
from PixelView.gui.loadingIndicators.common import SHAPE, DIRECTION
class Prop:
def __init__(self):
self.dx = 0
self.dy = 0
self.space = 0
self.fillBrush = None
self.hollowBrush = None
self.numSockets = 0
self.shape = SHAPE.RECT
self.drawFuncDict = {
SHAPE.RECT: self.drawRect,
SHAPE.ELLIPSE: self.drawEllipse,
}
def draw(self, painter, center, isFill):
self.drawFuncDict.get(self.shape, self.drawDefault)(painter, center, isFill)
def drawEllipse(self, painter, center, isFill):
painter.setBrush(self.fillBrush if isFill else self.hollowBrush)
painter.drawEllipse(center, self.dx, self.dy)
def drawRect(self, painter, center, isFill):
painter.fillRect(center.x() - self.dx, center.y() - self.dy, self.dx * 2, self.dy * 2, self.fillBrush if isFill else self.hollowBrush)
def drawDefault(self, painter, center, isFill):
self.drawRect(self, painter, center, isFill)
class AbstractLoadingIndicator(QWidget):
def __init__(self, parent=None, **kwargs):
super(AbstractLoadingIndicator, self).__init__(parent)
self.palette().setColor(self.palette().Background, Qt.transparent)
self.initVars(parent, **kwargs)
def initVars(self, parent, configManager, **kwargs):
self.cm = configManager
self.parent = parent
self.counter = 0
self.refreshRate = self.cm.getLoadingIndicatorRefreshRate()
self.prop = Prop()
self.prop.dx = self.cm.getPropDx()
self.prop.dy = self.cm.getPropDy()
self.prop.fillBrush = QBrush(QColor(*self.cm.getPropFillColor()))
self.prop.hollowBrush = QBrush(QColor(*self.cm.getPropHollowColor()))
self.prop.shape = self.cm.getPropShape()
self.posList = []
self.spacing = QPoint(0, 0)
self.direction = self.cm.getLoadingIndicatorDirection()
self.tmpIncr = 1
self.postFunc = None
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
center = QPoint(self.width() / 2, self.height() / 2)
# Dim out the background
painter.fillRect(self.rect(), QBrush(QColor(255, 255, 255, 160)))
### Add a black rectangle where the loading icon would be displayed ###
startPos = QPoint(min([i.x() for i in self.posList]),
min([i.y() for i in self.posList]))
endPos = QPoint(max([i.x() for i in self.posList]),
max([i.y() for i in self.posList]))
delta = QPoint(endPos.x() - startPos.x(),
endPos.y() - startPos.y())
marginX = 20
marginY = 20
origin = QPoint(center.x() + startPos.x() * self.spacing.x() - self.prop.dx - marginX,
center.y() + startPos.y() * self.spacing.y() - self.prop.dy - marginY)
size = QSize(delta.x() * self.spacing.x() + 2 * self.prop.dx + 2 * marginX,
delta.y() * self.spacing.y() + 2 * self.prop.dy + 2 * marginY)
painter.fillRect(QRect(origin, size), QColor(0, 0, 0, 255))
#######################################################################
for i in range(len(self.posList)):
pos = self.posList[i]
self.prop.draw(painter,
QPoint(center.x() + pos.x() * self.spacing.x(),
center.y() + pos.y() * self.spacing.y()),
True if i == self.counter else False)
painter.end()
self.counterNext()
def counterForward(self):
self.counter += 1
self.counter %= len(self.posList)
return self.counter
def counterBack(self):
self.counter -= 1
if self.counter < 0:
self.counter = len(self.posList) - 1
return self.counter
def counterBackAndForth(self):
self.counter += self.tmpIncr
if self.counter < 0:
self.counter = 1
self.tmpIncr = 1
elif self.counter >= len(self.posList):
self.counter = len(self.posList) - 2
self.tmpIncr = -1
return self.counter
def counterNext(self):
if self.direction is DIRECTION.FORWARD: return self.counterForward()
if self.direction is DIRECTION.BACKWARD: return self.counterBack()
if self.direction is DIRECTION.BACK_AND_FORTH: return self.counterBackAndForth()
# This should never be reached, but just for safety
pprint('WARNING: direction "%s" not known' % str(self.direction))
return self.counterForward
def isDoneFunc(self):
return False
def timerEvent(self, event):
if self.isDoneFunc():
self.hide()
self.postFunc()
return
self.update()
def start(self, isDoneFunc, postFunc):
self.isDoneFunc = isDoneFunc
self.postFunc = postFunc
self.show()
def showEvent(self, event):
self.resize(self.parent.width(), self.parent.height())
self.timer = self.startTimer(self.refreshRate)
def hideEvent(self, event):
self.killTimer(self.timer)
self.counter = 0
|
PixelView-master
|
PixelView/gui/loadingIndicators/abstractLoadingIndicator.py
|
#!/usr/bin/env python2.7
#
# Copyright (c) 2015-2021, NVIDIA CORPORATION.
# SPDX-License-Identifier: Apache-2.0
#
# Collect periodic heap profiles from proxyfsd using the
# HTTP interface. This requies that proxyfsd be compiled with the
# patch included below.
#
import os
import sys
import datetime
import argparse
cmd_name = os.path.basename(sys.argv[0])
heap_profile_url = "http://localhost:6060/debug/pprof/heap"
profile_interval_sec = 30
def timestamp():
'''Return a timestamp suitable for use in filenames.
'''
now = datetime.datetime.now()
ts = ("%04d-%02d-%02d_%02d:%02d:%02d" %
(now.year, now.month, now.day, now.hour, now.minute, now.second))
return ts
def patch_print():
'''Print the patch that needs to be applied to proxyfsd to allow this
script to work.
'''
patch = '''
a/proxyfsd/daemon.go b/proxyfsd/daemon.go
index 656412a..8c441b7 100644
--- a/proxyfsd/daemon.go
+++ b/proxyfsd/daemon.go
@@ -2,6 +2,8 @@ package proxyfsd
import (
"fmt"
+ "net/http"
+ _ "net/http/pprof"
"os"
"os/signal"
"sync"
@@ -246,6 +248,11 @@ func Daemon(confFile string, confStrings []string, signalHandlerIsArmed *bool, e
wg.Done()
}()
+ go func() {
+ logger.Infof("proxyfsd.Daemon() starting debug HTTP server: %s",
+ http.ListenAndServe("localhost:6060", nil))
+ }()
+
// Arm signal handler used to indicate termination and wait on it
//
// Note: signalled chan must be buffered to avoid race with window between
'''
print "Apply this patch to proxyfsd sources and recompile:\n", patch, "\n"
return
def main():
'''
Figure out what to do and do it.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--patch', action = 'store_true',
help = "Print patch for proxyfsd for profiling.")
args = parser.parse_args()
if (args.patch):
patch_print()
return 0
ts = timestamp()
print >> sys.stderr, ts, ": No action specified"
return 2
if __name__ == '__main__':
rc = main()
sys.exit(rc)
|
proxyfs-development
|
bin/profile-proxyfsd.py
|
#!/usr/bin/env python
#
# Copyright (c) 2015-2021, NVIDIA CORPORATION.
# SPDX-License-Identifier: Apache-2.0
#
#
# Exercise the multipart-upload functionality of S3API using the aws
# command from the awscli package and perform some validation of the
# results.
#
# The aws command needs a file ~/.aws/config that contains the
# authentication information for the account(s) being used.
# For the proxyfs runway environment, the file can look like this:
#
# % cat ~/.aws/config
'''
[plugins]
endpoint = awscli_plugin_endpoint
[profile default]
aws_access_key_id = test:tester
aws_secret_access_key = testing
s3 =
endpoint_url = http://127.0.0.1:8080
multipart_threshold = 64MB
multipart_chunksize = 16MB
s3api =
endpoint_url = http://127.0.0.1:8080
multipart_threshold = 5MB
multipart_chunksize = 5MB
[profile swift]
aws_access_key_id = admin:admin
aws_secret_access_key = admin
s3 =
endpoint_url = http://127.0.0.1:8080
multipart_threshold = 64MB
multipart_chunksize = 16MB
s3api =
endpoint_url = http://127.0.0.1:8080
multipart_threshold = 5MB
multipart_chunksize = 5MB
'''
# The above config file defines two profiles, "default" and "swift".
# this, where the admin account is a separate, non-swift, account
# that can be accessed via curl at AUTH_admin.
#
# This command also assumes three files are in /tmp, available for
# uploading, named /tmp/{part01,part02,part03} which contain 5 Mbyte
# of zeros, 5 Mbyte of zeros, and 1 Mbyte of zeros, respectively.
#
# dd if=/dev/zero of=/tmp/part01 bs=1M count=10
# dd if=/dev/zero of=/tmp/part02 bs=1M count=10
# dd if=/dev/zero of=/tmp/part03 bs=1M count=1
#
from __future__ import print_function
import argparse
import os
import Queue
import sys
import subprocess32
import json
def run_s3api_cmd(sub_cmd, profile_name, bucket_name, obj_name, extra_args):
'''Create and execute an "aws s3api" sub-command using the passed
arguments (any of which can be None).
Return the return code, stdout, and stderr as a tuple.
'''
cmd = ['aws', 's3api', sub_cmd]
if profile_name is not None:
cmd += ['--profile', profile_name]
if bucket_name is not None:
cmd += ['--bucket', bucket_name]
if obj_name is not None:
cmd += ['--key', obj_name]
if extra_args is not None:
cmd += extra_args
aws_proc = subprocess32.Popen(cmd, bufsize=-1, stdout=subprocess32.PIPE, stderr=subprocess32.PIPE)
(aws_stdout, aws_stderr) = aws_proc.communicate()
rc = aws_proc.wait()
if rc != 0:
err = ("run_s3api_cmd(): command '%s' returned %d"
% (' '.join(cmd), rc))
print("%s\nstderr: %s\n" % (err, aws_stderr), file=sys.stderr)
return (err, aws_stdout, aws_stderr)
print("%s\n%s\n" % (' '.join(cmd), aws_stdout))
return (None, aws_stdout, aws_stderr)
def multipart_upload(profile, bucket, objname, file_parts):
'''
Create a new multipart-upload context (uploadId) for the object
and then upload the files specified in file_parts.
Return the JSON decoded string from the "complete-multipart-upload"
and "head-object" subcommands.
'''
if bucket is None or objname is None:
err ="multipart_upload(): bucket '%s' objname '%s' must be specified" % (bucket, objname)
print("%s\n" % (err), file=sys.stderr)
return err
if len(file_parts) < 2:
err = "multipart_upload(): must specify at least two files to upload, got %d" % (len(file_parts))
print("%s\n" % (err), file=sys.stderr)
return err
for file in file_parts:
try:
fp = open(file, 'r')
fp.close()
except:
err = "multipart_upload(): could not open file '%s' for reading" % (file)
print("%s\n" % (err), file=sys.stderr)
return err
# create the upload bucket (ignore failures if already present)
(err, aws_stdout, aws_stderr) = run_s3api_cmd(
'create-bucket', profile, bucket, None, None)
# if err is not None:
# err = "'create-bucket' failed: %s" % (err)
# print("%s\n" % (err), file=sys.stderr)
# return err
# create the upload context
(err, aws_stdout, aws_stderr) = run_s3api_cmd(
'create-multipart-upload', profile, bucket, objname, None)
if err is not None:
err = "'create-multipart-upload' failed: %s" % (err)
print("%s\n" % (err), file=sys.stderr)
return err
create_resp = json.loads(aws_stdout)
upload_id = create_resp['UploadId']
# upload the parts
part_num = 0
etag_set = []
for file in file_parts:
part_num += 1
extra_args = ['--upload-id', upload_id, '--part-number', str(part_num), '--body', file]
(err, aws_stdout, aws_stderr) = run_s3api_cmd(
'upload-part', profile, bucket, objname, extra_args)
if err is not None:
err = "'upload-part' failed: %s" % (err)
print("%s\n" % (err), file=sys.stderr)
return err
upload_resp = json.loads(aws_stdout)
etag = { "ETag": upload_resp['ETag'], "PartNumber": part_num }
etag_set.append(etag)
mpu_info = { "Parts": etag_set}
print("%s\n" % json.dumps(mpu_info))
# complete the multipart-upload (if there are too many parts we won't
# be able to pass them on the command line and this will break)
extra_args = ['--upload-id', upload_id, '--multipart-upload', json.dumps(mpu_info)]
(err, aws_stdout, aws_stderr) = run_s3api_cmd(
'complete-multipart-upload', profile, bucket, objname, extra_args)
if err is not None:
err = "'complete-multipart-upload' failed: %s" % (err)
print("%s\n" % (err), file=sys.stderr)
return err
complete_resp = json.loads(aws_stdout)
# we should validate ""Last-Modified"" time in the response
# headers, except they aren't visible via aws s3api! but there
# was a bug where the time was incorrect.
# get the object attributes
(err, aws_stdout, aws_stderr) = run_s3api_cmd(
'head-object', profile, bucket, objname, None)
if err is not None:
err = "'head-object' failed: %s" % (err)
print("%s\n" % (err), file=sys.stderr)
return err
head_resp = json.loads(aws_stdout)
# TODO: should validate ""LastModified"" time in the HEAD response
# TODO: should validate the ACL (use 'get-object-acl')
# get the object attributes from a list-object commands;
# they should match
(err, aws_stdout, aws_stderr) = run_s3api_cmd(
'list-objects', profile, bucket, None, None)
if err is not None:
err = "'list-objects' failed: %s" % (err)
print("%s\n" % (err), file=sys.stderr)
return err
list_resp = json.loads(aws_stdout)
s3_etag = complete_resp['ETag']
if s3_etag != head_resp['ETag']:
print("ERROR: ETag from complete-multipart-upload '%s' does not match ETag from head-object '%s'\n" %
(s3_etag, head_resp['ETag']))
for obj_info in list_resp['Contents']:
if obj_info['Key'] == 'testobj':
if s3_etag != obj_info['ETag']:
print("ERROR: ETag from complete-multipart-upload '%s' does not match ETag from list-objects '%s'\n" %
(s3_etag, obj_info['ETag']))
break
return (None, complete_resp, head_resp)
(err, proxyfs_complete_resp, proxyfs_head_resp) = multipart_upload(
"default", "multipart-upload", "testobj", ['/tmp/part01', '/tmp/part02', '/tmp/part03'])
(err, swift_complete_resp, swift_head_resp) = multipart_upload(
"swift", "multipart-upload", "testobj", ['/tmp/part01', '/tmp/part02', '/tmp/part03'])
if proxyfs_complete_resp['ETag'] != swift_complete_resp['ETag']:
print("ERROR: ETag from proxyfs complete-multipart-upload '%s' does not match ETag from swift '%s'\n" %
(proxyfs_complete_resp['ETag'], swift_complete_resp['ETag']))
sys.exit(0)
|
proxyfs-development
|
bin/aws-multipart-test.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015-2021, NVIDIA CORPORATION.
# SPDX-License-Identifier: Apache-2.0
#
# ProxyFS documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 20 11:16:31 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import datetime
import logging
import re
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ProxyFS'
copyright = u'2017, SwiftStack and the ProxyFS contributors'
author = u'SwiftStack and the ProxyFS contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = re.sub('^v', '', os.popen('git describe').read().strip()).rsplit('-', 1)[0]
# The short X.Y version.
version = release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme_path = ['theme']
html_theme = 'swiftopensource'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
#html_sidebars = {
# '**': [
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# ]
#}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ProxyFSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ProxyFS.tex', u'ProxyFS Documentation',
u'SwiftStack and the ProxyFS contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'proxyfs', u'ProxyFS Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ProxyFS', u'ProxyFS Documentation',
author, 'ProxyFS', 'One line description of project.',
'Miscellaneous'),
]
|
proxyfs-development
|
docs/source/conf.py
|
# Copyright 2010 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""GDB Pretty printers and convenience functions for Go's runtime structures.
This script is loaded by GDB when it finds a .debug_gdb_scripts
section in the compiled binary. The [68]l linkers emit this with a
path to this file based on the path to the runtime package.
"""
# Known issues:
# - pretty printing only works for the 'native' strings. E.g. 'type
# foo string' will make foo a plain struct in the eyes of gdb,
# circumventing the pretty print triggering.
from __future__ import print_function
import re
import sys
print("Loading Go Runtime support.", file=sys.stderr)
#http://python3porting.com/differences.html
if sys.version > '3':
xrange = range
# allow to manually reload while developing
goobjfile = gdb.current_objfile() or gdb.objfiles()[0]
goobjfile.pretty_printers = []
#
# Value wrappers
#
class SliceValue:
"Wrapper for slice values."
def __init__(self, val):
self.val = val
@property
def len(self):
return int(self.val['len'])
@property
def cap(self):
return int(self.val['cap'])
def __getitem__(self, i):
if i < 0 or i >= self.len:
raise IndexError(i)
ptr = self.val["array"]
return (ptr + i).dereference()
#
# Pretty Printers
#
class StringTypePrinter:
"Pretty print Go strings."
pattern = re.compile(r'^struct string( \*)?$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
l = int(self.val['len'])
return self.val['str'].string("utf-8", "ignore", l)
class SliceTypePrinter:
"Pretty print slices."
pattern = re.compile(r'^struct \[\]')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)[6:] # skip 'struct '
def children(self):
sval = SliceValue(self.val)
if sval.len > sval.cap:
return
for idx, item in enumerate(sval):
yield ('[{0}]'.format(idx), item)
class MapTypePrinter:
"""Pretty print map[K]V types.
Map-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^map\[.*\].*$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'map'
def to_string(self):
return str(self.val.type)
def children(self):
B = self.val['B']
buckets = self.val['buckets']
oldbuckets = self.val['oldbuckets']
flags = self.val['flags']
inttype = self.val['hash0'].type
cnt = 0
for bucket in xrange(2 ** int(B)):
bp = buckets + bucket
if oldbuckets:
oldbucket = bucket & (2 ** (B - 1) - 1)
oldbp = oldbuckets + oldbucket
oldb = oldbp.dereference()
if (oldb['overflow'].cast(inttype) & 1) == 0: # old bucket not evacuated yet
if bucket >= 2 ** (B - 1):
continue # already did old bucket
bp = oldbp
while bp:
b = bp.dereference()
for i in xrange(8):
if b['tophash'][i] != 0:
k = b['keys'][i]
v = b['values'][i]
if flags & 1:
k = k.dereference()
if flags & 2:
v = v.dereference()
yield str(cnt), k
yield str(cnt + 1), v
cnt += 2
bp = b['overflow']
class ChanTypePrinter:
"""Pretty print chan[T] types.
Chan-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^struct hchan<.*>$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)
def children(self):
# see chan.c chanbuf(). et is the type stolen from hchan<T>::recvq->first->elem
et = [x.type for x in self.val['recvq']['first'].type.target().fields() if x.name == 'elem'][0]
ptr = (self.val.address + 1).cast(et.pointer())
for i in range(self.val["qcount"]):
j = (self.val["recvx"] + i) % self.val["dataqsiz"]
yield ('[{0}]'.format(i), (ptr + j).dereference())
#
# Register all the *Printer classes above.
#
def makematcher(klass):
def matcher(val):
try:
if klass.pattern.match(str(val.type)):
return klass(val)
except Exception:
pass
return matcher
goobjfile.pretty_printers.extend([makematcher(var) for var in vars().values() if hasattr(var, 'pattern')])
#
# For reference, this is what we're trying to do:
# eface: p *(*(struct 'runtime.rtype'*)'main.e'->type_->data)->string
# iface: p *(*(struct 'runtime.rtype'*)'main.s'->tab->Type->data)->string
#
# interface types can't be recognized by their name, instead we check
# if they have the expected fields. Unfortunately the mapping of
# fields to python attributes in gdb.py isn't complete: you can't test
# for presence other than by trapping.
def is_iface(val):
try:
return str(val['tab'].type) == "struct runtime.itab *" and str(val['data'].type) == "void *"
except gdb.error:
pass
def is_eface(val):
try:
return str(val['_type'].type) == "struct runtime._type *" and str(val['data'].type) == "void *"
except gdb.error:
pass
def lookup_type(name):
try:
return gdb.lookup_type(name)
except gdb.error:
pass
try:
return gdb.lookup_type('struct ' + name)
except gdb.error:
pass
try:
return gdb.lookup_type('struct ' + name[1:]).pointer()
except gdb.error:
pass
def iface_commontype(obj):
if is_iface(obj):
go_type_ptr = obj['tab']['_type']
elif is_eface(obj):
go_type_ptr = obj['_type']
else:
return
return go_type_ptr.cast(gdb.lookup_type("struct reflect.rtype").pointer()).dereference()
def iface_dtype(obj):
"Decode type of the data field of an eface or iface struct."
# known issue: dtype_name decoded from runtime.rtype is "nested.Foo"
# but the dwarf table lists it as "full/path/to/nested.Foo"
dynamic_go_type = iface_commontype(obj)
if dynamic_go_type is None:
return
dtype_name = dynamic_go_type['string'].dereference()['str'].string()
dynamic_gdb_type = lookup_type(dtype_name)
if dynamic_gdb_type is None:
return
type_size = int(dynamic_go_type['size'])
uintptr_size = int(dynamic_go_type['size'].type.sizeof) # size is itself an uintptr
if type_size > uintptr_size:
dynamic_gdb_type = dynamic_gdb_type.pointer()
return dynamic_gdb_type
def iface_dtype_name(obj):
"Decode type name of the data field of an eface or iface struct."
dynamic_go_type = iface_commontype(obj)
if dynamic_go_type is None:
return
return dynamic_go_type['string'].dereference()['str'].string()
class IfacePrinter:
"""Pretty print interface values
Casts the data field to the appropriate dynamic type."""
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
if self.val['data'] == 0:
return 0x0
try:
dtype = iface_dtype(self.val)
except Exception:
return "<bad dynamic type>"
if dtype is None: # trouble looking up, print something reasonable
return "({0}){0}".format(iface_dtype_name(self.val), self.val['data'])
try:
return self.val['data'].cast(dtype).dereference()
except Exception:
pass
return self.val['data'].cast(dtype)
def ifacematcher(val):
if is_iface(val) or is_eface(val):
return IfacePrinter(val)
goobjfile.pretty_printers.append(ifacematcher)
#
# Convenience Functions
#
class GoLenFunc(gdb.Function):
"Length of strings, slices, maps or channels"
how = ((StringTypePrinter, 'len'), (SliceTypePrinter, 'len'), (MapTypePrinter, 'count'), (ChanTypePrinter, 'qcount'))
def __init__(self):
gdb.Function.__init__(self, "len")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class GoCapFunc(gdb.Function):
"Capacity of slices or channels"
how = ((SliceTypePrinter, 'cap'), (ChanTypePrinter, 'dataqsiz'))
def __init__(self):
gdb.Function.__init__(self, "cap")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class DTypeFunc(gdb.Function):
"""Cast Interface values to their dynamic type.
For non-interface types this behaves as the identity operation.
"""
def __init__(self):
gdb.Function.__init__(self, "dtype")
def invoke(self, obj):
try:
return obj['data'].cast(iface_dtype(obj))
except gdb.error:
pass
return obj
#
# Commands
#
sts = ('idle', 'runnable', 'running', 'syscall', 'waiting', 'moribund', 'dead', 'recovery')
def linked_list(ptr, linkfield):
while ptr:
yield ptr
ptr = ptr[linkfield]
class GoroutinesCmd(gdb.Command):
"List all goroutines."
def __init__(self):
gdb.Command.__init__(self, "info goroutines", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, _arg, _from_tty):
# args = gdb.string_to_argv(arg)
vp = gdb.lookup_type('void').pointer()
for ptr in SliceValue(gdb.parse_and_eval("'runtime.allgs'")):
if ptr['atomicstatus'] == 6: # 'gdead'
continue
s = ' '
if ptr['m']:
s = '*'
pc = ptr['sched']['pc'].cast(vp)
# python2 will not cast pc (type void*) to an int cleanly
# instead python2 and python3 work with the hex string representation
# of the void pointer which we can parse back into an int.
# int(pc) will not work.
try:
#python3 / newer versions of gdb
pc = int(pc)
except gdb.error:
# str(pc) can return things like
# "0x429d6c <runtime.gopark+284>", so
# chop at first space.
pc = int(str(pc).split(None, 1)[0], 16)
blk = gdb.block_for_pc(pc)
print(s, ptr['goid'], "{0:8s}".format(sts[int(ptr['atomicstatus'])]), blk.function)
def find_goroutine(goid):
"""
find_goroutine attempts to find the goroutine identified by goid
and returns a pointer to the goroutine info.
@param int goid
@return ptr
"""
vp = gdb.lookup_type('void').pointer()
for ptr in SliceValue(gdb.parse_and_eval("'runtime.allgs'")):
if ptr['atomicstatus'] == 6: # 'gdead'
continue
if ptr['goid'] == goid:
return ptr
return None
def goroutine_info(ptr):
'''
Given a pointer to goroutine info clean it up a bit
and return the interesting info in a dict.
'''
gorinfo = {}
gorinfo['goid'] = ptr['goid']
gorinfo['atomicstatus'] = sts[int(ptr['atomicstatus'])]
if gorinfo['atomicstatus'] == 'gdead':
return gorinfo
vp = gdb.lookup_type('void').pointer()
gorinfo['pc_as_str'] = str(ptr['sched']['pc'].cast(vp))
gorinfo['sp_as_str'] = str(ptr['sched']['sp'].cast(vp))
# str(pc) can return things like
# "0x429d6c <runtime.gopark+284>", so
# chop at first space.
gorinfo['pc_as_int'] = int(gorinfo['pc_as_str'].split(None, 1)[0], 16)
gorinfo['sp_as_int'] = int(gorinfo['sp_as_str'], 16)
return gorinfo
class GoroutineCmd(gdb.Command):
"""Execute a gdb command in the context of goroutine <goid>.
Switch PC and SP to the ones in the goroutine's G structure,
execute an arbitrary gdb command, and restore PC and SP.
Usage: (gdb) goroutine <goid> <gdbcmd>
Use goid 0 to invoke the command on all go routines.
Note that it is ill-defined to modify state in the context of a goroutine.
Restrict yourself to inspecting values.
"""
def __init__(self):
gdb.Command.__init__(self, "goroutine", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, arg, _from_tty):
goid, cmd = arg.split(None, 1)
goid = gdb.parse_and_eval(goid)
if goid == 0:
goptr_list = SliceValue(gdb.parse_and_eval("'runtime.allgs'"))
else:
ptr = find_goroutine(goid)
if ptr is None:
print("No such goroutine: ", goid)
return
goptr_list = [ ptr ]
for ptr in goptr_list:
gor = goroutine_info(ptr)
if gor['atomicstatus'] == 'dead':
continue
print("\ngoroutine %d [%s]:" % (gor['goid'], gor['atomicstatus']))
if gor['sp_as_int'] == 0:
print("#0 %s -- stack trace unavailable (goroutine status: %s)" %
(gor['pc_as_str'], gor['atomicstatus']))
if gor['atomicstatus'] == 'running':
print("Try checking per thread stacks, i.e. 'thread apply all backtrace'")
continue
save_frame = gdb.selected_frame()
gdb.parse_and_eval('$save_sp = $sp')
gdb.parse_and_eval('$save_pc = $pc')
gdb.parse_and_eval('$sp = {0}'.format(str(gor['sp_as_int'])))
gdb.parse_and_eval('$pc = {0}'.format(str(gor['pc_as_int'])))
try:
gdb.execute(cmd)
finally:
gdb.parse_and_eval('$sp = $save_sp')
gdb.parse_and_eval('$pc = $save_pc')
save_frame.select()
class GoIfaceCmd(gdb.Command):
"Print Static and dynamic interface types"
def __init__(self):
gdb.Command.__init__(self, "iface", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
def invoke(self, arg, _from_tty):
for obj in gdb.string_to_argv(arg):
try:
#TODO fix quoting for qualified variable names
obj = gdb.parse_and_eval(str(obj))
except Exception as e:
print("Can't parse ", obj, ": ", e)
continue
if obj['data'] == 0:
dtype = "nil"
else:
dtype = iface_dtype(obj)
if dtype is None:
print("Not an interface: ", obj.type)
continue
print("{0}: {1}".format(obj.type, dtype))
# TODO: print interface's methods and dynamic type's func pointers thereof.
#rsc: "to find the number of entries in the itab's Fn field look at
# itab.inter->numMethods
# i am sure i have the names wrong but look at the interface type
# and its method count"
# so Itype will start with a commontype which has kind = interface
#
# Register all convenience functions and CLI commands
#
GoLenFunc()
GoCapFunc()
DTypeFunc()
GoroutinesCmd()
GoroutineCmd()
GoIfaceCmd()
|
proxyfs-development
|
cookbooks/proxyfs/files/default/usr/local/go/src/runtime/runtime-gdb.py
|
"""
The MIT License (MIT)
Copyright (c) 2020 NVIDIA
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import numpy as np
import os
import json
from builtins import range
import tensorrtserver.api.model_config_pb2 as model_config
from tensorrtserver.api import *
import time
def triton_inferer(ctx, input_name, output_name, batch):
batch = [batch[i].astype(np.float32) for i in range(0,batch.shape[0]) ]
input_dict = { input_name : batch }
output_dict = { output_name : (InferContext.ResultFormat.RAW)}
results = ctx.run(
inputs=input_dict,
outputs=output_dict,
batch_size=80
)
return results[output_name]
def parse_model(url, protocol, model_name, batch_size, verbose=False):
"""
Check the configuration of a model to make sure it meets the
requirements
"""
ctx = ServerStatusContext(url, protocol, model_name, verbose)
server_status = ctx.get_server_status()
if model_name not in server_status.model_status:
raise Exception("unable to get status for '" + model_name + "'")
status = server_status.model_status[model_name]
config = status.config
if len(config.input) != 1:
raise Exception("expecting 1 input, got {}".format(len(config.input)))
if len(config.output) != 1:
raise Exception("expecting 1 output, got {}".format(len(config.output)))
input = config.input[0]
output = config.output[0]
# Model specifying maximum batch size of 0 indicates that batching
# is not supported and so the input tensors do not expect an "N"
# dimension (and 'batch_size' should be 1 so that only a single
# image instance is inferred at a time).
max_batch_size = config.max_batch_size
if max_batch_size == 0:
if batch_size != 1:
raise Exception("batching not supported for model '" + model_name + "'")
else: # max_batch_size > 0
if batch_size > max_batch_size:
raise Exception("expecting batch size <= {} for model {}".format(max_batch_size, model_name))
# Model input must have 3 dims, either CHW or HWC
if len(input.dims) != 3:
raise Exception(
"expecting input to have 3 dimensions, model '{}' input has {}".format(
model_name, len(input.dims)))
if input.format == model_config.ModelInput.FORMAT_NHWC:
h = input.dims[0]
w = input.dims[1]
c = input.dims[2]
else:
c = input.dims[0]
h = input.dims[1]
w = input.dims[2]
return (input.name, output.name, c, h, w, input.format, input.data_type)
def main():
parser = argparse.ArgumentParser(description='Run inference with a model on the TRITON server')
parser.add_argument('--model', help='Model in TRITON server')
parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8000',
help='Inference server URL. Default is localhost:8000.')
parser.add_argument('-i', '--protocol', type=str, required=False, default='http',
help='Protocol ("http"/"grpc") used to ' +
'communicate with inference service. Default is "http".')
parser.add_argument('-H', dest='http_headers', metavar="HTTP_HEADER",
required=False, action='append',
help='HTTP headers to add to inference server requests. ' +
'Format is -H"Header:Value".')
parser.add_argument('--interval', default=1, type=int,
metavar='N', help='interval to simulate inference requests')
args = parser.parse_args()
url = args.url
protocol = ProtocolType.from_str(args.protocol)
model_name = args.model
model_version = -1
try:
print("Checking Health for model {}".format(model_name))
health_ctx = ServerHealthContext(args.url, protocol,
http_headers=args.http_headers)
print("Live: {}".format(health_ctx.is_live()))
print("Ready: {}".format(health_ctx.is_ready()))
except:
raise RuntimeError("Model not available in server.. OR.. Is it running?")
batch_size = 80
input_name, output_name, c, h, w, format, dtype = parse_model(url, protocol, model_name, batch_size, verbose=True)
## Generate random inputs
input_shape = (batch_size,c,h,w)
inputs = np.random.random(input_shape).astype(np.float32)
ctx = InferContext(url, protocol, model_name, model_version, verbose=False)
request_interval = args.interval
counter = 0
while counter < 10000:
out = triton_inferer(ctx, input_name, output_name, inputs)
if counter % 100 == 0: print('Iteration {}'.format(counter))
time.sleep(1)
counter += 1
if __name__ == "__main__":
main()
|
healthcare-on-tap-TRT-TRITON-demo-master
|
sim_inference_req_triton.py
|
"""
The MIT License (MIT)
Copyright (c) 2020 NVIDIA
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import monai
from monai.transforms import \
Compose, LoadNiftid, AddChanneld, ScaleIntensityRanged, CropForegroundd, \
RandCropByPosNegLabeld, RandAffined, Spacingd, Orientationd, ToTensord
from monai.inferers import sliding_window_inference
from monai.networks.layers import Norm
from monai.metrics import compute_meandice
from monai.utils import set_determinism
# monai.config.print_config()
from typing import Callable, Sequence, Union
import torch
import torch.nn.functional as F
from monai.data.utils import compute_importance_map, dense_patch_slices, get_valid_patch_size
from monai.utils import BlendMode, PytorchPadMode, fall_back_tuple
def triton_inferer(batch):
results = ctx.run(
{ 'INPUT__0' : ([batch.cpu().numpy()]) },
{ 'OUTPUT__0' : (InferContext.ResultFormat.RAW) }
)
return results['OUTPUT__0'][0]
def sliding_window_inference_trtis(
inputs: torch.Tensor,
roi_size: Union[Sequence[int], int],
sw_batch_size: int,
predictor: Callable,
overlap: float = 0.25,
mode: Union[BlendMode, str] = BlendMode.CONSTANT,
padding_mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT,
cval: float = 0.0,
):
"""
Sliding window inference on `inputs` with `predictor`.
When roi_size is larger than the inputs' spatial size, the input image are padded during inference.
To maintain the same spatial sizes, the output image will be cropped to the original input size.
Args:
inputs: input image to be processed (assuming NCHW[D])
roi_size: the spatial window size for inferences.
When its components have None or non-positives, the corresponding inputs dimension will be used.
if the components of the `roi_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
sw_batch_size: the batch size to run window slices.
predictor: given input tensor `patch_data` in shape NCHW[D], `predictor(patch_data)`
should return a prediction with the same spatial shape and batch_size, i.e. NMHW[D];
where HW[D] represents the patch spatial size, M is the number of output channels, N is `sw_batch_size`.
overlap: Amount of overlap between scans.
mode: {``"constant"``, ``"gaussian"``}
How to blend output of overlapping windows. Defaults to ``"constant"``.
- ``"constant``": gives equal weight to all predictions.
- ``"gaussian``": gives less weight to predictions on edges of windows.
padding_mode: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}
Padding mode when ``roi_size`` is larger than inputs. Defaults to ``"constant"``
See also: https://pytorch.org/docs/stable/nn.functional.html#pad
cval: fill value for 'constant' padding mode. Default: 0
Raises:
NotImplementedError: inputs must have batch_size=1.
Note:
- input must be channel-first and have a batch dim, support both spatial 2D and 3D.
- currently only supports `inputs` with batch_size=1.
"""
num_spatial_dims = len(inputs.shape) - 2
assert 0 <= overlap < 1, "overlap must be >= 0 and < 1."
# determine image spatial size and batch size
# Note: all input images must have the same image size and batch size
image_size_ = list(inputs.shape[2:])
batch_size = inputs.shape[0]
# TODO: Enable batch sizes > 1 in future
if batch_size > 1:
raise NotImplementedError("inputs must have batch_size=1.")
roi_size = fall_back_tuple(roi_size, image_size_)
# in case that image size is smaller than roi size
image_size = tuple(max(image_size_[i], roi_size[i]) for i in range(num_spatial_dims))
pad_size = []
for k in range(len(inputs.shape) - 1, 1, -1):
diff = max(roi_size[k - 2] - inputs.shape[k], 0)
half = diff // 2
pad_size.extend([half, diff - half])
inputs = F.pad(inputs, pad=pad_size, mode=PytorchPadMode(padding_mode).value, value=cval)
scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims, overlap)
# Store all slices in list
slices = dense_patch_slices(image_size, roi_size, scan_interval)
slice_batches = []
for slice_index in range(0, len(slices), sw_batch_size):
slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices)))
input_slices = []
for curr_index in slice_index_range:
curr_slice = slices[curr_index]
if len(curr_slice) == 3:
input_slices.append(inputs[0, :, curr_slice[0], curr_slice[1], curr_slice[2]])
else:
input_slices.append(inputs[0, :, curr_slice[0], curr_slice[1]])
slice_batches.append(torch.stack(input_slices))
# Perform predictions
output_rois = list()
for data in slice_batches:
if data.shape[0] != sw_batch_size:
pad_dim = sw_batch_size-data.shape[0]
print('###',data.shape)
data = F.pad(input=data,pad=(0,0,0,0,0,0,0,0,0,pad_dim),mode='constant',value=0)
print('###',data.shape)
seg_prob = predictor(data)
seg_prob = seg_prob[:pad_dim]
else: seg_prob = predictor(data) # batched patch segmentation
output_rois.append(seg_prob)
# stitching output image
output_classes = output_rois[0].shape[1]
output_shape = [batch_size, output_classes] + list(image_size)
# Create importance map
importance_map = compute_importance_map(get_valid_patch_size(image_size, roi_size), mode=mode, device=inputs.device)
# allocate memory to store the full output and the count for overlapping parts
output_image = torch.zeros(output_shape, dtype=torch.float32, device=inputs.device)
count_map = torch.zeros(output_shape, dtype=torch.float32, device=inputs.device)
for window_id, slice_index in enumerate(range(0, len(slices), sw_batch_size)):
slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices)))
# store the result in the proper location of the full output. Apply weights from importance map.
for curr_index in slice_index_range:
curr_slice = slices[curr_index]
if len(curr_slice) == 3:
output_image[0, :, curr_slice[0], curr_slice[1], curr_slice[2]] += (
importance_map * output_rois[window_id][curr_index - slice_index, :]
)
count_map[0, :, curr_slice[0], curr_slice[1], curr_slice[2]] += importance_map
else:
output_image[0, :, curr_slice[0], curr_slice[1]] += (
importance_map * output_rois[window_id][curr_index - slice_index, :]
)
count_map[0, :, curr_slice[0], curr_slice[1]] += importance_map
# account for any overlapping sections
output_image /= count_map
if num_spatial_dims == 3:
return output_image[
...,
pad_size[4] : image_size_[0] + pad_size[4],
pad_size[2] : image_size_[1] + pad_size[2],
pad_size[0] : image_size_[2] + pad_size[0],
]
return output_image[
..., pad_size[2] : image_size_[0] + pad_size[2], pad_size[0] : image_size_[1] + pad_size[0]
] # 2D
def _get_scan_interval(image_size, roi_size, num_spatial_dims: int, overlap: float):
assert len(image_size) == num_spatial_dims, "image coord different from spatial dims."
assert len(roi_size) == num_spatial_dims, "roi coord different from spatial dims."
scan_interval = []
for i in range(num_spatial_dims):
if roi_size[i] == image_size[i]:
scan_interval.append(int(roi_size[i]))
else:
# scan interval is (1-overlap)*roi_size
scan_interval.append(int(roi_size[i] * (1 - overlap)))
return tuple(scan_interval)
|
healthcare-on-tap-TRT-TRITON-demo-master
|
lib/triton_utils.py
|
healthcare-on-tap-TRT-TRITON-demo-master
|
lib/__init__.py
|
|
"""
The MIT License (MIT)
Copyright (c) 2020 NVIDIA
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import numpy as np
import os
from pathlib import Path
from PIL import Image
import torch
import torchvision.datasets as datasets
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision.transforms import transforms
from torch.autograd import Variable
import matplotlib.pyplot as plt
from matplotlib import patches, patheffects
import matplotlib.gridspec as gridspec
class CXRDataset(Dataset):
"""CXR dataset."""
def __init__(self, dataset_info, transform=None, is_training = True):
"""
Args:
dataset_info (string): Dataset JSON file with all the details
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.dataset_info = dataset_info['training']
if not is_training:
self.dataset_info = dataset_info['testing'][:8000]
self.transform = transform
def __len__(self):
return len(self.dataset_info)
def __getitem__(self, idx):
img_name = self.dataset_info[idx]['image']
image = np.array(Image.open(prescaled_path / img_name))
if image.ndim == 3:
image = image[:,:,0]
image = torch.tensor(image).expand(3, 256, 256)
label = np.asarray(self.dataset_info[idx]['label'], dtype='int8')
if self.transform:
image = self.transform(image)
return (image,label)
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
def get_test_loader_TTA(data, batch_size, workers=5, _worker_init_fn=None, shuffle=True, is_training=False):
full_dataset = CXRDataset(data,transform=transforms.Compose([
transforms.ToPILImage(),
transforms.TenCrop((224)),
# transforms.Resize((224,224)),
# transforms.ToTensor() #Too slow
#normalize,
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops]))
]), is_training=is_training)
test_loader = torch.utils.data.DataLoader(full_dataset,
batch_size=batch_size, shuffle=False,
num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True)
return test_loader
def show_images(data,targs,labels,batch_size):
columns = 4
rows = (batch_size + 1) // (columns)
fig = plt.figure(figsize = (16,(16 // columns) * rows))
gs = gridspec.GridSpec(rows, columns)
for j in range(rows*columns):
ax = plt.subplot(gs[j])
plt.axis("off")
rand_idx = np.random.randint((j-1)*(10),(j-1)*(10)+10)
img = data[rand_idx].cpu().squeeze()
targ_lab = [labels[str(idx)] for idx,xx in enumerate(targs[j]) if xx!=0]
if targ_lab == []: targ_lab = ['Normal']
ax = show_img_title(img[0], targ_lab, figsize=(10,10), ax=ax)
def show_img_title(im, targs, figsize=None, ax=None):
if not ax: fig,ax = plt.subplots(figsize=figsize)
ax.imshow(im, cmap='gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_title(targs, fontsize = 14)
return ax
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
prescaled_path = Path('/workspace/data/ChestXray14/images_prescaled/')
|
healthcare-on-tap-TRT-TRITON-demo-master
|
lib/dataset_utils.py
|
"""
The MIT License (MIT)
Copyright (c) 2020 NVIDIA
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import tensorrt as trt
def build_engine(args):
print('Loading custom plugins')
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt.init_libnvinfer_plugins(TRT_LOGGER, '')
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
print('Building TRT engine from ONNX file:', args.model)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 8 << 30
# Specifying runtime Dimensions
#
# profile = builder.create_optimization_profile()
# profile.set_shape("input", (1 , 3, 256, 256), (4 , 3, 256, 256), (8 , 3, 256, 256))
# config = builder.create_builder_config()
# config.add_optimization_profile(profile)
if args.fp16:
print('Using FP16')
config.set_flag(trt.BuilderFlag.FP16)
config.set_flag(trt.BuilderFlag.STRICT_TYPES) # probably don't need/want this flag anywa
with open(args.model, 'rb') as model:
if not parser.parse(model.read()):
print('Throwing an Error')
for error in range(parser.num_errors):
print (parser.get_error(error))
return None
else:
print('Parsing Model')
engine = builder.build_engine(network=network, config=config)
print(engine)
return engine
def save_engine(engine, engine_dest_path):
print('Saving the engine file at:', engine_dest_path)
buf = engine.serialize()
with open(engine_dest_path, 'wb') as f:
f.write(buf)
def main():
parser = argparse.ArgumentParser(description='Convert onnx model to TensorRT engine.')
parser.add_argument('--model', help='The path to the .onnx model file')
parser.add_argument('--output', default='.', help='The path save the .engine file')
parser.add_argument('--fp16',
action='store_true',
help='Do conversion in fp16 mode.')
args = parser.parse_args()
engine = build_engine(args)
if engine is not None:
save_engine(engine, args.output)
else:
print('something is wrong')
if __name__ == '__main__':
main()
|
healthcare-on-tap-TRT-TRITON-demo-master
|
lib/onnx_to_tensorrt7.py
|
# Environment variables used during build:
#
# MAX_JOBS
# maximum number of compile jobs we should use to compile your code
#
# build argument:
#
# --cmake-only
# Only generate ./build directory with cmake setup
#
# --no-python
# Skips python API target `libnvfuser.so`, i.e. `_C.cpython-xxx.so`
#
# --no-test
# Skips cpp tests `nvfuser_tests`
#
# --no-benchmark
# Skips benchmark target `nvfuser_bench`
#
# --no-ninja
# In case you want to use make instead of ninja for build
#
# --build-with-ucc
# Build nvfuser with UCC support. You may need to specify environment variables of UCC_HOME, UCC_DIR, UCX_HOME, UCX_DIR.
#
# --debug
# Building nvfuser in debug mode
#
# --debinfo
# Building nvfuser in release mode with debug info, a.k.a. RelwithDebInfo
#
# -version-tag=TAG
# Specify the tag for build nvfuser version, this is used for pip wheel
# package nightly where we might want to add a date tag
# nvfuser-VERSION+TAG+gitSHA1-....-whl
#
# -install_requires=pkg0[,pkg1...]
# this is used for pip wheel build to specify package required for install
# e.g. -install_requires=nvidia-cuda-nvrtc-cu12
#
# -wheel-name=NAME
# Specify the wheel name this is used for pip wheel package where we want
# to identify the cuda toolkit version
#
import multiprocessing
import os
import shutil
import subprocess
import sys
import setuptools
import setuptools.command.build_ext
from setuptools import Extension, setup
# pick args used by this script
CMAKE_ONLY = False
BUILD_SETUP = True
NO_PYTHON = False
NO_TEST = False
NO_BENCHMARK = False
NO_NINJA = False
BUILD_WITH_UCC = False
PATCH_NVFUSER = True
OVERWRITE_VERSION = False
VERSION_TAG = None
BUILD_TYPE = "Release"
WHEEL_NAME = "nvfuser"
INSTALL_REQUIRES = []
forward_args = []
for i, arg in enumerate(sys.argv):
if arg == "--cmake-only":
CMAKE_ONLY = True
continue
if arg == "--no-python":
NO_PYTHON = True
continue
if arg == "--no-test":
NO_TEST = True
continue
if arg == "--no-benchmark":
NO_BENCHMARK = True
continue
if arg == "--no-ninja":
NO_NINJA = True
continue
if arg == "--build-with-ucc":
BUILD_WITH_UCC = True
continue
if arg == "--debug":
BUILD_TYPE = "Debug"
continue
if arg == "--debinfo":
BUILD_TYPE = "RelwithDebInfo"
continue
if arg.startswith("-install_requires="):
INSTALL_REQUIRES = arg.split("=")[1].split(",")
continue
if arg.startswith("-version-tag="):
OVERWRITE_VERSION = True
VERSION_TAG = arg.split("=")[1]
continue
if arg.startswith("-wheel-name="):
WHEEL_NAME = arg.split("=")[1]
continue
if arg in ["clean"]:
# only disables BUILD_SETUP, but keep the argument for setuptools
BUILD_SETUP = False
if arg in ["bdist_wheel"]:
# bdist_wheel doesn't install entry-points, so we can't really patch it yet
PATCH_NVFUSER = False
forward_args.append(arg)
sys.argv = forward_args
def get_cmake_bin():
# TODO: double check cmake version here and retrieve later version if necessary
return "cmake"
class clean(setuptools.Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import glob
with open(".gitignore", "r") as f:
ignores = f.read()
for entry in ignores.split("\n"):
# ignore comment in .gitignore
if len(entry) >= 1 and entry[0] != "#":
for filename in glob.glob(entry):
print("removing: ", filename)
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename, ignore_errors=True)
class build_ext(setuptools.command.build_ext.build_ext):
def build_extension(self, ext):
if ext.name == "nvfuser._C":
# Copy files on necessity.
filename = self.get_ext_filename(self.get_ext_fullname(ext.name))
fileext = os.path.splitext(filename)[1]
libnvfuser_path = os.path.join("./nvfuser/lib", f"libnvfuser{fileext}")
assert os.path.exists(libnvfuser_path)
install_dst = os.path.join(self.build_lib, filename)
if not os.path.exists(os.path.dirname(install_dst)):
os.makedirs(os.path.dirname(install_dst))
self.copy_file(libnvfuser_path, install_dst)
else:
super().build_extension(ext)
class concat_third_party_license:
def __init__(self, directory="third_party"):
self.license_file = "LICENSE"
self.directory = directory
def __enter__(self):
# read original license file
with open(self.license_file, "r") as f:
self.nvfuser_license_txt = f.read()
licenses = {"LICENSE", "LICENSE.txt", "LICENSE.rst", "COPYING.BSD"}
# aggregated license, we key on project name
aggregated_license = {}
for root, dirs, files in os.walk(self.directory):
license = list(licenses & set(files))
if license:
project_name = root.split("/")[-1]
# let's worry about multiple license when we see it.
assert len(license) == 1
license_entry = os.path.join(root, license[0])
if project_name in aggregated_license:
# Only add it if the license is different
aggregated_license[project_name].append(license_entry)
else:
aggregated_license[project_name] = [license_entry]
return aggregated_license
def __exit__(self, exception_type, exception_value, traceback):
# restore original license file
with open(self.license_file, "w") as f:
f.write(self.nvfuser_license_txt)
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
build_whl = None
else:
class build_whl(bdist_wheel):
def run(self):
with concat_third_party_license() as tp_licenses:
if len(tp_licenses) != 0:
with open("LICENSE", "a") as f:
f.write("\n\n")
f.write(
"NVIDIA/fuser depends on libraries with license listed below:"
)
for project_name, license_files in tp_licenses.items():
# check all license files are identical
with open(license_files[0], "r") as f:
license_ref = f.read()
def check_file(file_name):
with open(file_name, "r") as f:
return f.read() == license_ref
identical_flag = all(map(check_file, license_files[1:]))
if not identical_flag:
raise RuntimeError(
"inconsistent license found for project: ",
project_name,
" check its license files under: ",
license_files,
)
with open("LICENSE", "a") as f:
f.write("\n\nProject Name: " + project_name)
f.write("\nLicense Files:\n")
for file_name in license_files:
f.write("\t" + file_name)
f.write("\n" + license_ref)
# generate whl before we restore LICENSE
super().run()
def version_tag():
from tools.gen_nvfuser_version import get_version
version = get_version()
if OVERWRITE_VERSION:
version = version.split("+")[0]
if len(VERSION_TAG) != 0:
# use "." to be pypi friendly
version = ".".join([version, VERSION_TAG])
return version
from tools.memory import get_available_memory_gb
def cmake(build_dir: str = "", install_prefix: str = "./nvfuser"):
# make build directories
cwd = os.path.dirname(os.path.abspath(__file__))
cmake_build_dir = os.path.join(cwd, "build" if not build_dir else build_dir)
if not os.path.exists(cmake_build_dir):
os.makedirs(cmake_build_dir)
from tools.gen_nvfuser_version import get_pytorch_cmake_prefix
# this is used to suppress import error.
# so we can get the right pytorch prefix for cmake
import logging
logger = logging.getLogger("nvfuser")
logger_level = logger.getEffectiveLevel()
logger.setLevel(logging.CRITICAL)
pytorch_cmake_config = "-DCMAKE_PREFIX_PATH=" + get_pytorch_cmake_prefix()
logger.setLevel(logger_level)
# generate cmake directory
cmd_str = [
get_cmake_bin(),
pytorch_cmake_config,
"-DCMAKE_BUILD_TYPE=" + BUILD_TYPE,
f"-DCMAKE_INSTALL_PREFIX={install_prefix}",
"-B",
cmake_build_dir,
]
if BUILD_WITH_UCC:
cmd_str.append("-DNVFUSER_STANDALONE_BUILD_WITH_UCC=ON")
if not NO_NINJA:
cmd_str.append("-G")
cmd_str.append("Ninja")
if not NO_TEST:
cmd_str.append("-DBUILD_TEST=ON")
if not NO_PYTHON:
cmd_str.append("-DBUILD_PYTHON=ON")
cmd_str.append(f"-DPython_EXECUTABLE={sys.executable}")
if not NO_BENCHMARK:
cmd_str.append("-DBUILD_NVFUSER_BENCHMARK=ON")
cmd_str.append(".")
print(f"Configuring CMake with {' '.join(cmd_str)}")
subprocess.check_call(cmd_str)
max_jobs = multiprocessing.cpu_count()
mem_gb_per_task = 3 # Currently compilation of nvFuser souce code takes ~3GB of memory per task, we should adjust this value if it changes in the future.
available_mem = get_available_memory_gb()
if available_mem > 0:
max_jobs_mem = int(available_mem / mem_gb_per_task)
max_jobs = min(max_jobs, max_jobs_mem)
if not CMAKE_ONLY:
# build binary
max_jobs = os.getenv("MAX_JOBS", str(max_jobs))
print(f"Using {max_jobs} jobs for compilation")
cmd_str = [
get_cmake_bin(),
"--build",
cmake_build_dir,
"--target",
"install",
"--",
"-j",
max_jobs,
]
subprocess.check_call(cmd_str)
def main():
# NOTE(crcrpar): Deliberately build basically two dynamic libraries here so that they can
# be treated as "nvfuser_package_data". This function call will put the two of "nvfuser" and
# "nvfuser_codegen" into "./nvfuser/lib", and the former will be "nvfuser._C".
if BUILD_SETUP:
cmake()
if not CMAKE_ONLY:
# NOTE: package include files for cmake
# TODO(crcrpar): Better avoid hardcoding `libnvfuser_codegen.so`
# might can be treated by using `exclude_package_data`.
nvfuser_package_data = [
"lib/libnvfuser_codegen.so",
"include/nvfuser/*.h",
"include/nvfuser/struct.inl",
"include/nvfuser/C++20/type_traits",
"include/nvfuser/device_lower/*.h",
"include/nvfuser/device_lower/analysis/*.h",
"include/nvfuser/device_lower/pass/*.h",
"include/nvfuser/dynamic_type/*",
"include/nvfuser/dynamic_type/C++20/*",
"include/nvfuser/kernel_db/*.h",
"include/nvfuser/multidevice/*.h",
"include/nvfuser/ops/*.h",
"include/nvfuser/ir/*.h",
"include/nvfuser/python_frontend/*.h",
"include/nvfuser/scheduler/*.h",
"include/nvfuser/serde/*.h",
"include/nvfuser/flatbuffers/*.h",
"share/cmake/nvfuser/NvfuserConfig*",
"contrib/*",
"contrib/nn/*",
# TODO(crcrpar): it'd be better to ship the following two binaries.
# Would need some change in CMakeLists.txt.
# "bin/nvfuser_tests",
# "bin/nvfuser_bench"
]
setup(
name=WHEEL_NAME,
version=version_tag(),
url="https://github.com/NVIDIA/Fuser",
description="A Fusion Code Generator for NVIDIA GPUs (commonly known as 'nvFuser')",
packages=["nvfuser", "nvfuser_python_utils"],
ext_modules=[Extension(name="nvfuser._C", sources=[])],
license_files=("LICENSE",),
cmdclass={
"bdist_wheel": build_whl,
"build_ext": build_ext,
"clean": clean,
},
package_data={
"nvfuser": nvfuser_package_data,
},
install_requires=INSTALL_REQUIRES,
extra_requires={
"test": ["numpy", "expecttest", "pytest"],
},
entry_points={
"console_scripts": [
"patch-nvfuser = nvfuser_python_utils:patch_installation",
],
},
license="BSD-3-Clause",
)
if BUILD_SETUP and PATCH_NVFUSER:
sys.path.append("./nvfuser_python_utils")
from patch_nvfuser import patch_installation
patch_installation()
if __name__ == "__main__":
main()
|
Fuser-main
|
setup.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
import ast
import typing
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--symbolic_sizes", nargs="+", type=int)
cmd_args = parser.parse_args()
symbolic_sizes_index = 0
input = sys.stdin.read()
lines = input.strip().splitlines()
is_aten = False
def parse(l: str):
ast_ = ast.parse(l)
assert len(ast_.body) == 1
return ast_.body[0]
def replace_name(name: str):
ops_prefix = "fd.ops."
if name.startswith("T"):
if is_aten:
return f"t{name[1:]}"
else:
return f"tv{name[1:]}"
elif name.startswith("S"):
return f"s{name[1:]}"
elif name.startswith(ops_prefix):
op = name[len(ops_prefix) :]
if is_aten:
return "at::" + op
else:
if op == "cast":
return "castOp"
if op == "var_mean":
return "variance_mean"
return op
elif name == "fd.add_output":
if is_aten:
return "outputs.push_back"
else:
return "fusion->addOutput"
elif name == "fd.define_scalar":
if not is_aten:
return "IrBuilder::create<Val>"
return name
def tocppstr(x):
if isinstance(x, bool):
return "true" if x else "false"
return str(x)
def list2vector(l: typing.Union[ast.List, list]):
if isinstance(l, ast.List):
l = eval(ast.unparse(l))
l = [tocppstr(x) for x in l]
l = "{" + ", ".join(l) + "}"
return ast.Name(l)
def handle_call(l: ast.Call):
func = ast.Name(replace_name(ast.unparse(l.func)))
args = handle(l.args)
keywords = l.keywords
if func.id == "IrBuilder::create<Val>":
assert len(args) == 1
arg = args[0]
assert isinstance(arg, ast.Constant)
arg = arg.value
if len(keywords) > 0:
keyword = keywords[0]
assert keyword.arg == "dtype"
value = ast.unparse(keyword.value).replace(".", "::")
value = ast.Name(value)
args.append(value)
keywords = []
elif func.id == "fd.define_scalar":
assert is_aten
arg = args[0]
assert isinstance(arg, ast.Constant)
return arg
elif func.id == "castOp":
assert len(args) == 1
assert len(keywords) == 1
keyword = keywords[0]
assert isinstance(keyword, ast.keyword)
assert keyword.arg == "dtype"
value = ast.unparse(keyword.value).replace(".", "::")
value = ast.Name(value)
args.insert(0, value)
keywords = []
elif func.id == "at::cast":
assert is_aten
assert len(args) == 1
assert len(keywords) == 1
keyword = keywords[0]
assert isinstance(keyword, ast.keyword)
assert keyword.arg == "dtype"
value = ast.unparse(keyword.value).replace("DataType.", "ScalarType::")
value = ast.Name(value)
func = ast.Attribute(args[0], "to")
args[0] = value
keywords = []
elif func.id == "view" or func.id == "at::view":
assert len(args) == 1
assert len(keywords) == 2
original_shape = keywords[0]
assert original_shape.arg == "original_shape"
original_shape = list2vector(original_shape.value)
new_shape = keywords[1]
assert new_shape.arg == "new_shape"
new_shape = list2vector(new_shape.value)
if is_aten:
func = ast.Attribute(args[0], "view")
args = [new_shape]
else:
args.extend([original_shape, new_shape])
keywords = []
elif func.id == "fd.define_tensor":
assert len(keywords) == 3
assert len(args) == 0
symbolic_sizes = keywords[0]
assert symbolic_sizes.arg == "symbolic_sizes"
symbolic_sizes_val = symbolic_sizes.value
ndims = len(symbolic_sizes_val.elts)
symbolic_sizes = list2vector(symbolic_sizes_val)
contiguous = keywords[1]
assert contiguous.arg == "contiguous"
contiguous = contiguous.value
assert ndims == len(contiguous.elts)
contiguous = list2vector(contiguous)
dtype = keywords[2]
assert dtype.arg == "dtype"
dtype = ast.unparse(dtype.value)
if is_aten:
sizes = symbolic_sizes
if cmd_args.symbolic_sizes is not None:
sizes = eval(ast.unparse(symbolic_sizes_val))
for i, s in enumerate(sizes):
if s == -1:
global symbolic_sizes_index
sizes[i] = cmd_args.symbolic_sizes[symbolic_sizes_index]
symbolic_sizes_index += 1
sizes = list2vector(sizes)
result = ast.Call(ast.Name("at::randn"), [sizes, ast.Name("options")], [])
if dtype != "DataType.Float":
to = ast.Attribute(result, "to")
result = ast.Call(
to, [ast.Name(dtype.replace("DataType.", "ScalarType::"))], []
)
if "false" in contiguous.id:
contig = ast.Attribute(result, "set_contiguous")
result = ast.Call(contig, [contiguous], [])
return result
else:
builder = ast.Name("TensorViewBuilder()")
ndims_call = ast.Call(
ast.Attribute(builder, "ndims"), [ast.Constant(ndims)], []
)
shape_call = ast.Call(
ast.Attribute(ndims_call, "shape"), [symbolic_sizes], []
)
contig_call = ast.Call(
ast.Attribute(shape_call, "contiguity"), [contiguous], []
)
dtype_call = ast.Call(
ast.Attribute(contig_call, "dtype"),
[ast.Name(dtype.replace(".", "::"))],
[],
)
build_call = ast.Call(ast.Attribute(dtype_call, "build"), [], [])
return build_call
elif func.id == "broadcast_in_dim" or func.id == "at::broadcast_in_dim":
assert len(keywords) == 2
assert len(args) == 1
output_shape = keywords[0]
assert output_shape.arg == "output_shape"
output_shape = output_shape.value
output_shape = eval(ast.unparse(output_shape))
broadcast_dims = keywords[1]
assert broadcast_dims.arg == "broadcast_dims"
broadcast_dims = broadcast_dims.value
broadcast_dims = eval(ast.unparse(broadcast_dims))
n_out_dims = len(output_shape)
is_broadcast = [True] * n_out_dims
for orig_dim in broadcast_dims:
is_broadcast[orig_dim] = False
if is_aten:
result = args[0]
for i, b in enumerate(is_broadcast):
if b:
result = ast.Call(
ast.Attribute(result, "unsqueeze"), [ast.Constant(i)], []
)
result = ast.Call(
ast.Attribute(result, "expand"), [list2vector(output_shape)], []
)
else:
result = ast.Call(
ast.Name("broadcast"), [args[0], list2vector(is_broadcast)], []
)
result = ast.Call(
ast.Name("expand"),
[
result,
list2vector([f"IrBuilder::create<Val>({x})" for x in output_shape]),
],
[],
)
return result
elif func.id == "at::var_mean" or func.id == "variance_mean":
assert len(args) == 1
assert len(keywords) == 3
axes = keywords[0]
assert isinstance(axes, ast.keyword)
assert axes.arg == "axes"
axes = list2vector(axes.value)
correction = keywords[1]
assert isinstance(correction, ast.keyword)
assert correction.arg == "correction"
correction = correction.value
keepdim = keywords[2]
assert isinstance(keepdim, ast.keyword)
assert keepdim.arg == "keepdim"
keepdim = keepdim.value
assert isinstance(keepdim, ast.Constant)
keepdim = ast.Name(tocppstr(keepdim.value))
args.extend([axes, correction, keepdim])
keywords = []
return ast.Call(func, args, keywords)
def handle(l):
if isinstance(l, list):
result = []
for item in l:
result.append(handle(item))
return result
elif isinstance(l, ast.Assign):
create = ast.Assign(handle(l.targets), handle(l.value), l.type_comment)
if len(create.targets) == 1 and isinstance(create.targets[0], ast.Tuple):
targets = create.targets[0].elts
tuple_name = ast.Name("_".join([x.id for x in targets]))
create.targets[0] = tuple_name
result = [create]
for i, n in enumerate(targets):
value = ast.Call(ast.Name(f"std::get<{i}>"), [tuple_name], [])
result.append(ast.Assign([n], value))
return result
is_define_tensor = (
isinstance(l.value, ast.Call)
and ast.unparse(l.value.func) == "fd.define_tensor"
)
if is_define_tensor:
if is_aten:
func = "inputs.push_back"
else:
func = "fusion->addInput"
assert len(create.targets) == 1
add = ast.Call(ast.Name(func), [create.targets[0]], [])
return [create, add]
else:
return create
elif isinstance(l, ast.Name):
return ast.Name(replace_name(l.id), l.ctx)
elif isinstance(l, ast.Call):
return handle_call(l)
elif isinstance(l, ast.Expr):
return ast.Expr(handle(l.value))
elif isinstance(l, ast.Tuple):
return ast.Tuple([handle(x) for x in l.elts])
return l
def ast2str(l):
if isinstance(l, ast.Assign):
assert len(l.targets) == 1
return f"auto {ast2str(l.targets[0])} = {ast2str(l.value)}"
l.lineno = 0
return ast.unparse(l)
test_str = """TEST_F(NVFuserTest, FusionGeneratedTest_CUDA) {
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
{
"""
for l in lines:
l = parse(l)
l = handle(l)
if not isinstance(l, list):
l = [l]
for x in l:
test_str += f" {ast2str(x)};\n"
test_str += """ }
auto options = at::TensorOptions().dtype(kFloat).device(at::kCUDA, 0);
std::vector<IValue> inputs;
std::vector<Tensor> outputs;
{
"""
is_aten = True
for l in lines:
l = parse(l)
l = handle(l)
if not isinstance(l, list):
l = [l]
for x in l:
test_str += f" {ast2str(x)};\n"
test_str += """ }
FusionExecutorCache fec(std::move(fusion_ptr));
auto cg_outputs = fec.runFusionWithInputs(inputs);
testValidate(fusion, cg_outputs, inputs, outputs, __LINE__, __FILE__);
}"""
print(test_str)
|
Fuser-main
|
tools/cpp-repro-gen.py
|
import os
ext_map = {
"cpp": ("c", "cpp", "cu", "cc", "cuh", "h"),
"py": ("py",),
"txt": ("txt",),
}
licence_style_0 = """\
// clang-format off
/*
* SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
// clang-format on
"""
licence_style_1 = """\
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
"""
header_license = {
"cpp": licence_style_0,
"py": licence_style_1,
"txt": licence_style_1,
}
exclude_list = (
"./tools/update_copyright.py",
"./examples/sinh_libtorch/main.cpp",
"./version.txt",
"./test/main.cpp",
"Dependencies.cmake",
"FlatBuffers.cmake",
# lint adapters are taken from pytorch
"tools/linter/adapters/black_linter.py",
"tools/linter/adapters/clangformat_linter.py",
"tools/linter/adapters/clangtidy_linter.py",
"tools/linter/adapters/exec_linter.py",
"tools/linter/adapters/flake8_linter.py",
"tools/linter/adapters/grep_linter.py",
"tools/linter/adapters/mypy_linter.py",
"tools/linter/adapters/newlines_linter.py",
"tools/linter/adapters/pip_init.py",
"tools/linter/adapters/README.md",
"tools/linter/adapters/s3_init_config.json",
"tools/linter/adapters/s3_init.py",
)
def get_exclusions():
parent_path = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/..")
print(parent_path)
return [os.path.abspath(os.path.join(parent_path, f)) for f in exclude_list]
def has_licence(file_handle, licence_str):
header_content = file_handle.read(len(licence_str))
file_handle.seek(0, 0)
return header_content.startswith(licence_str)
def update_licence(file_handle, licence_str):
if not has_licence(file_handle, licence_str):
content = file_handle.read()
file_handle.seek(0, 0)
file_handle.write(licence_str + content)
return True
return False
def update_files(root_path):
exclusions = get_exclusions()
print(exclusions)
for root, dirs, files in os.walk(root_path):
for file_name in files:
abs_file = os.path.abspath(os.path.join(root, file_name))
print(abs_file)
if file_name[0] == "." or abs_file in exclusions:
continue
file_ext = file_name.split(".")[-1]
for k, v in ext_map.items():
if file_ext in v:
licence_str = header_license[k]
with open(abs_file, "r+") as file_handle:
if update_licence(file_handle, licence_str):
print("attached licence header to ", abs_file)
if __name__ == "__main__":
update_files(".")
|
Fuser-main
|
tools/update_copyright.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
import subprocess
import sys
from pathlib import Path
UNKNOWN = "Unknown"
nvfuser_root = Path(__file__).parent.parent
# note that this root currently is still part of pytorch.
def get_sha() -> str:
try:
return (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=nvfuser_root)
.decode("ascii")
.strip()
)
except Exception:
return UNKNOWN
def get_version() -> str:
sha = get_sha()
version = (
open((nvfuser_root / "version.txt"), "r").read().strip() + "+git" + sha[:7]
)
return version
def get_pytorch_cmake_prefix():
from subprocess import Popen, PIPE
# need to do this in a separate process so we are not going to delete nvfuser library while it's loaded by torch
process_torch_prefix = Popen(
[
sys.executable,
"-c",
"import torch.utils; print(torch.utils.cmake_prefix_path)",
],
stdout=PIPE,
)
stdout_msg, error_msg = process_torch_prefix.communicate()
return stdout_msg.decode("utf-8").rstrip("\n")
if __name__ == "__main__":
version_file = nvfuser_root / "nvfuser" / "version.py"
with open(version_file, "w") as f:
f.write("_version_str = '{}'\n".format(get_version()))
|
Fuser-main
|
tools/gen_nvfuser_version.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
def get_available_memory_gb():
"""Returns the available memory in GB."""
try:
import psutil
return psutil.virtual_memory().available / 1024 / 1024 / 1024
except: # noqa: E722
pass
try:
with open("/proc/meminfo", "r") as f:
while True:
line = f.readline()
if line.startswith("MemAvailable:"):
mem = line.split()[1]
assert line.split()[2] == "kB"
return int(mem) / 1024 / 1024
if not line:
break
except: # noqa: E722
pass
return 0
|
Fuser-main
|
tools/memory.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# Generates a C++ header files embedding the original input as a string literal
import argparse
import pathlib
from datetime import datetime
arg_parser = argparse.ArgumentParser(
description="Converts source files to C++ string literals", allow_abbrev=False
)
arg_parser.add_argument("-i", "--input", required=True, help="Input source file")
arg_parser.add_argument(
"-o", "--output", required=True, help="Name of the generated header file"
)
args = arg_parser.parse_args()
# msvc string literal maximum length 16380
# https://docs.microsoft.com/en-us/cpp/error-messages/compiler-errors-1/compiler-error-c2026?view=msvc-170
MAX_STRING_LITERAL = 16000
# https://docs.microsoft.com/en-us/cpp/c-language/maximum-string-length?view=msvc-170
MAX_STRING_CONCATENATED = 65535
with open(args.input, "r") as fin:
with open(args.output, "w") as fout:
literal_name = f"{pathlib.Path(args.input).stem}_cu"
fout.write(f'// Generated from "{args.input}"\n')
fout.write(f'// {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}\n\n')
fout.write("namespace nvfuser_resources {\n\n")
fout.write(f'constexpr const char* {literal_name} = R"(\n')
accumulated_chars = 0
accumulated_chars_per_literal = 0
for line in fin:
accumulated_chars = accumulated_chars + len(line) + 1
accumulated_chars_per_literal = (
accumulated_chars_per_literal + len(line) + 1
)
if accumulated_chars_per_literal >= MAX_STRING_LITERAL:
fout.write(')"\n')
fout.write('R"(\n')
fout.write(line)
accumulated_chars_per_literal = len(line) + 1
else:
fout.write(line)
fout.write(')";\n')
fout.write("\n} // namespace nvfuser_resources\n")
if accumulated_chars >= MAX_STRING_CONCATENATED:
raise Exception("runtime header file exceeds size limit of 65535 for MSVC")
|
Fuser-main
|
tools/stringify_file.py
|
Fuser-main
|
tools/__init__.py
|
|
"""
Generic linter that greps for a pattern and optionally suggests replacements.
"""
import argparse
import json
import logging
import os
import subprocess
import sys
import time
from enum import Enum
from typing import Any, List, NamedTuple, Optional
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
def run_command(
args: List[str],
) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def lint_file(
matching_line: str,
replace_pattern: str,
linter_name: str,
error_name: str,
error_description: str,
) -> LintMessage:
# matching_line looks like:
# tools/linter/clangtidy_linter.py:13:import foo.bar.baz
split = matching_line.split(":")
filename = split[0]
original = None
replacement = None
if replace_pattern:
with open(filename, "r") as f:
original = f.read()
try:
proc = run_command(["sed", "-r", replace_pattern, filename])
replacement = proc.stdout.decode("utf-8")
except Exception as err:
return LintMessage(
path=None,
line=None,
char=None,
code=linter_name,
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.decode("utf-8").strip() or "(empty)",
stdout=err.stdout.decode("utf-8").strip() or "(empty)",
)
),
)
return LintMessage(
path=split[0],
line=int(split[1]),
char=None,
code=linter_name,
severity=LintSeverity.ERROR,
name=error_name,
original=original,
replacement=replacement,
description=error_description,
)
def main() -> None:
parser = argparse.ArgumentParser(
description="grep wrapper linter.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--pattern",
required=True,
help="pattern to grep for",
)
parser.add_argument(
"--linter-name",
required=True,
help="name of the linter",
)
parser.add_argument(
"--error-name",
required=True,
help="human-readable description of what the error is",
)
parser.add_argument(
"--error-description",
required=True,
help="message to display when the pattern is found",
)
parser.add_argument(
"--replace-pattern",
help=(
"the form of a pattern passed to `sed -r`. "
"If specified, this will become proposed replacement text."
),
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
try:
proc = run_command(["grep", "-nEHI", args.pattern, *args.filenames])
except Exception as err:
err_msg = LintMessage(
path=None,
line=None,
char=None,
code=args.linter_name,
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.decode("utf-8").strip() or "(empty)",
stdout=err.stdout.decode("utf-8").strip() or "(empty)",
)
),
)
print(json.dumps(err_msg._asdict()), flush=True)
exit(0)
lines = proc.stdout.decode().splitlines()
for line in lines:
lint_message = lint_file(
line,
args.replace_pattern,
args.linter_name,
args.error_name,
args.error_description,
)
print(json.dumps(lint_message._asdict()), flush=True)
if __name__ == "__main__":
main()
|
Fuser-main
|
tools/linter/adapters/grep_linter.py
|
import argparse
import concurrent.futures
import json
import logging
import os
import subprocess
import sys
import time
from enum import Enum
from typing import Any, List, NamedTuple, Optional, BinaryIO
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
def _run_command(
args: List[str],
*,
stdin: BinaryIO,
timeout: int,
) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=IS_WINDOWS, # So batch scripts are found.
timeout=timeout,
check=True,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def run_command(
args: List[str],
*,
stdin: BinaryIO,
retries: int,
timeout: int,
) -> "subprocess.CompletedProcess[bytes]":
remaining_retries = retries
while True:
try:
return _run_command(args, stdin=stdin, timeout=timeout)
except subprocess.TimeoutExpired as err:
if remaining_retries == 0:
raise err
remaining_retries -= 1
logging.warning(
"(%s/%s) Retrying because command failed with: %r",
retries - remaining_retries,
retries,
err,
)
time.sleep(1)
def check_file(
filename: str,
retries: int,
timeout: int,
) -> List[LintMessage]:
try:
with open(filename, "rb") as f:
original = f.read()
with open(filename, "rb") as f:
proc = run_command(
[sys.executable, "-mblack", "--stdin-filename", filename, "-"],
stdin=f,
retries=retries,
timeout=timeout,
)
except subprocess.TimeoutExpired:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="BLACK",
severity=LintSeverity.ERROR,
name="timeout",
original=None,
replacement=None,
description=(
"black timed out while trying to process a file. "
"Please report an issue in pytorch/pytorch with the "
"label 'module: lint'"
),
)
]
except (OSError, subprocess.CalledProcessError) as err:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="BLACK",
severity=LintSeverity.ADVICE,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.decode("utf-8").strip() or "(empty)",
stdout=err.stdout.decode("utf-8").strip() or "(empty)",
)
),
)
]
replacement = proc.stdout
if original == replacement:
return []
return [
LintMessage(
path=filename,
line=None,
char=None,
code="BLACK",
severity=LintSeverity.WARNING,
name="format",
original=original.decode("utf-8"),
replacement=replacement.decode("utf-8"),
description="Run `lintrunner -a` to apply this patch.",
)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="Format files with black.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--retries",
default=3,
type=int,
help="times to retry timed out black",
)
parser.add_argument(
"--timeout",
default=90,
type=int,
help="seconds to wait for black",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
with concurrent.futures.ThreadPoolExecutor(
max_workers=os.cpu_count(),
thread_name_prefix="Thread",
) as executor:
futures = {
executor.submit(check_file, x, args.retries, args.timeout): x
for x in args.filenames
}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
|
Fuser-main
|
tools/linter/adapters/black_linter.py
|
"""
Initializer script that installs stuff to pip.
"""
import os
import argparse
import logging
import subprocess
import sys
import time
from typing import List
def run_command(args: List[str]) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(args, check=True)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="pip initializer")
parser.add_argument(
"packages",
nargs="+",
help="pip packages to install",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"--dry-run", help="do not install anything, just print what would be done."
)
parser.add_argument(
"--no-binary",
help="do not use pre-compiled binaries from pip.",
action="store_true",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET if args.verbose else logging.DEBUG,
stream=sys.stderr,
)
pip_args = ["pip3", "install"]
# If we are in a global install, use `--user` to install so that you do not
# need root access in order to initialize linters.
#
# However, `pip install --user` interacts poorly with virtualenvs (see:
# https://bit.ly/3vD4kvl) and conda (see: https://bit.ly/3KG7ZfU). So in
# these cases perform a regular installation.
in_conda = os.environ.get("CONDA_PREFIX") is not None
in_virtualenv = os.environ.get("VIRTUAL_ENV") is not None
if not in_conda and not in_virtualenv:
pip_args.append("--user")
pip_args.extend(args.packages)
for package in args.packages:
package_name, _, version = package.partition("=")
if version == "":
raise RuntimeError(
"Package {package_name} did not have a version specified. "
"Please specify a version to produce a consistent linting experience."
)
if args.no_binary:
pip_args.append(f"--no-binary={package_name}")
dry_run = args.dry_run == "1"
if dry_run:
print(f"Would have run: {pip_args}")
sys.exit(0)
run_command(pip_args)
|
Fuser-main
|
tools/linter/adapters/pip_init.py
|
import argparse
import json
import logging
import os
import re
import subprocess
import sys
import time
from enum import Enum
from typing import Any, Dict, List, NamedTuple, Optional, Set, Pattern
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
# fmt: off
# https://www.flake8rules.com/
DOCUMENTED_IN_FLAKE8RULES: Set[str] = {
"E101", "E111", "E112", "E113", "E114", "E115", "E116", "E117",
"E121", "E122", "E123", "E124", "E125", "E126", "E127", "E128", "E129",
"E131", "E133",
"E201", "E202", "E203",
"E211",
"E221", "E222", "E223", "E224", "E225", "E226", "E227", "E228",
"E231",
"E241", "E242",
"E251",
"E261", "E262", "E265", "E266",
"E271", "E272", "E273", "E274", "E275",
"E301", "E302", "E303", "E304", "E305", "E306",
"E401", "E402",
"E501", "E502",
"E701", "E702", "E703", "E704",
"E711", "E712", "E713", "E714",
"E721", "E722",
"E731",
"E741", "E742", "E743",
"E901", "E902", "E999",
"W191",
"W291", "W292", "W293",
"W391",
"W503", "W504",
"W601", "W602", "W603", "W604", "W605",
"F401", "F402", "F403", "F404", "F405",
"F811", "F812",
"F821", "F822", "F823",
"F831",
"F841",
"F901",
"C901",
}
# https://pypi.org/project/flake8-comprehensions/#rules
DOCUMENTED_IN_FLAKE8COMPREHENSIONS: Set[str] = {
"C400", "C401", "C402", "C403", "C404", "C405", "C406", "C407", "C408", "C409",
"C410",
"C411", "C412", "C413", "C413", "C414", "C415", "C416",
}
# https://github.com/PyCQA/flake8-bugbear#list-of-warnings
DOCUMENTED_IN_BUGBEAR: Set[str] = {
"B001", "B002", "B003", "B004", "B005", "B006", "B007", "B008", "B009", "B010",
"B011", "B012", "B013", "B014", "B015",
"B301", "B302", "B303", "B304", "B305", "B306",
"B901", "B902", "B903", "B950",
}
# fmt: on
# stdin:2: W802 undefined name 'foo'
# stdin:3:6: T484 Name 'foo' is not defined
# stdin:3:-100: W605 invalid escape sequence '\/'
# stdin:3:1: E302 expected 2 blank lines, found 1
RESULTS_RE: Pattern[str] = re.compile(
r"""(?mx)
^
(?P<file>.*?):
(?P<line>\d+):
(?:(?P<column>-?\d+):)?
\s(?P<code>\S+?):?
\s(?P<message>.*)
$
"""
)
def _test_results_re() -> None:
"""
>>> def t(s): return RESULTS_RE.search(s).groupdict()
>>> t(r"file.py:80:1: E302 expected 2 blank lines, found 1")
... # doctest: +NORMALIZE_WHITESPACE
{'file': 'file.py', 'line': '80', 'column': '1', 'code': 'E302',
'message': 'expected 2 blank lines, found 1'}
>>> t(r"file.py:7:1: P201: Resource `stdout` is acquired but not always released.")
... # doctest: +NORMALIZE_WHITESPACE
{'file': 'file.py', 'line': '7', 'column': '1', 'code': 'P201',
'message': 'Resource `stdout` is acquired but not always released.'}
>>> t(r"file.py:8:-10: W605 invalid escape sequence '/'")
... # doctest: +NORMALIZE_WHITESPACE
{'file': 'file.py', 'line': '8', 'column': '-10', 'code': 'W605',
'message': "invalid escape sequence '/'"}
"""
pass
def _run_command(
args: List[str],
*,
extra_env: Optional[Dict[str, str]],
) -> "subprocess.CompletedProcess[str]":
logging.debug(
"$ %s",
" ".join(
([f"{k}={v}" for (k, v) in extra_env.items()] if extra_env else []) + args
),
)
start_time = time.monotonic()
try:
return subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
encoding="utf-8",
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def run_command(
args: List[str],
*,
extra_env: Optional[Dict[str, str]],
retries: int,
) -> "subprocess.CompletedProcess[str]":
remaining_retries = retries
while True:
try:
return _run_command(args, extra_env=extra_env)
except subprocess.CalledProcessError as err:
if remaining_retries == 0 or not re.match(
r"^ERROR:1:1: X000 linting with .+ timed out after \d+ seconds",
err.stdout,
):
raise err
remaining_retries -= 1
logging.warning(
"(%s/%s) Retrying because command failed with: %r",
retries - remaining_retries,
retries,
err,
)
time.sleep(1)
def get_issue_severity(code: str) -> LintSeverity:
# "B901": `return x` inside a generator
# "B902": Invalid first argument to a method
# "B903": __slots__ efficiency
# "B950": Line too long
# "C4": Flake8 Comprehensions
# "C9": Cyclomatic complexity
# "E2": PEP8 horizontal whitespace "errors"
# "E3": PEP8 blank line "errors"
# "E5": PEP8 line length "errors"
# "F401": Name imported but unused
# "F403": Star imports used
# "F405": Name possibly from star imports
# "T400": type checking Notes
# "T49": internal type checker errors or unmatched messages
if any(
code.startswith(x)
for x in [
"B9",
"C4",
"C9",
"E2",
"E3",
"E5",
"F401",
"F403",
"F405",
"T400",
"T49",
]
):
return LintSeverity.ADVICE
# "F821": Undefined name
# "E999": syntax error
if any(code.startswith(x) for x in ["F821", "E999"]):
return LintSeverity.ERROR
# "F": PyFlakes Error
# "B": flake8-bugbear Error
# "E": PEP8 "Error"
# "W": PEP8 Warning
# possibly other plugins...
return LintSeverity.WARNING
def get_issue_documentation_url(code: str) -> str:
if code in DOCUMENTED_IN_FLAKE8RULES:
return f"https://www.flake8rules.com/rules/{code}.html"
if code in DOCUMENTED_IN_FLAKE8COMPREHENSIONS:
return "https://pypi.org/project/flake8-comprehensions/#rules"
if code in DOCUMENTED_IN_BUGBEAR:
return "https://github.com/PyCQA/flake8-bugbear#list-of-warnings"
return ""
def check_files(
filenames: List[str],
flake8_plugins_path: Optional[str],
severities: Dict[str, LintSeverity],
retries: int,
) -> List[LintMessage]:
try:
proc = run_command(
[sys.executable, "-mflake8", "--exit-zero"] + filenames,
extra_env={"FLAKE8_PLUGINS_PATH": flake8_plugins_path}
if flake8_plugins_path
else None,
retries=retries,
)
except (OSError, subprocess.CalledProcessError) as err:
return [
LintMessage(
path=None,
line=None,
char=None,
code="FLAKE8",
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.strip() or "(empty)",
stdout=err.stdout.strip() or "(empty)",
)
),
)
]
return [
LintMessage(
path=match["file"],
name=match["code"],
description="{}\nSee {}".format(
match["message"],
get_issue_documentation_url(match["code"]),
),
line=int(match["line"]),
char=int(match["column"])
if match["column"] is not None and not match["column"].startswith("-")
else None,
code="FLAKE8",
severity=severities.get(match["code"]) or get_issue_severity(match["code"]),
original=None,
replacement=None,
)
for match in RESULTS_RE.finditer(proc.stdout)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="Flake8 wrapper linter.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--flake8-plugins-path",
help="FLAKE8_PLUGINS_PATH env value",
)
parser.add_argument(
"--severity",
action="append",
help="map code to severity (e.g. `B950:advice`)",
)
parser.add_argument(
"--retries",
default=3,
type=int,
help="times to retry timed out flake8",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
flake8_plugins_path = (
None
if args.flake8_plugins_path is None
else os.path.realpath(args.flake8_plugins_path)
)
severities: Dict[str, LintSeverity] = {}
if args.severity:
for severity in args.severity:
parts = severity.split(":", 1)
assert len(parts) == 2, f"invalid severity `{severity}`"
severities[parts[0]] = LintSeverity(parts[1])
lint_messages = check_files(
args.filenames, flake8_plugins_path, severities, args.retries
)
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
if __name__ == "__main__":
main()
|
Fuser-main
|
tools/linter/adapters/flake8_linter.py
|
import argparse
import concurrent.futures
import json
import logging
import os
import subprocess
import sys
import time
from enum import Enum
from pathlib import Path
from typing import Any, List, NamedTuple, Optional
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
def _run_command(
args: List[str],
*,
timeout: int,
) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=IS_WINDOWS, # So batch scripts are found.
timeout=timeout,
check=True,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def run_command(
args: List[str],
*,
retries: int,
timeout: int,
) -> "subprocess.CompletedProcess[bytes]":
remaining_retries = retries
while True:
try:
return _run_command(args, timeout=timeout)
except subprocess.TimeoutExpired as err:
if remaining_retries == 0:
raise err
remaining_retries -= 1
logging.warning(
"(%s/%s) Retrying because command failed with: %r",
retries - remaining_retries,
retries,
err,
)
time.sleep(1)
def check_file(
filename: str,
binary: str,
retries: int,
timeout: int,
) -> List[LintMessage]:
try:
with open(filename, "rb") as f:
original = f.read()
proc = run_command(
[binary, filename],
retries=retries,
timeout=timeout,
)
except subprocess.TimeoutExpired:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.ERROR,
name="timeout",
original=None,
replacement=None,
description=(
"clang-format timed out while trying to process a file. "
"Please report an issue in pytorch/pytorch with the "
"label 'module: lint'"
),
)
]
except (OSError, subprocess.CalledProcessError) as err:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.ADVICE,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.decode("utf-8").strip() or "(empty)",
stdout=err.stdout.decode("utf-8").strip() or "(empty)",
)
),
)
]
replacement = proc.stdout
if original == replacement:
return []
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.WARNING,
name="format",
original=original.decode("utf-8"),
replacement=replacement.decode("utf-8"),
description="See https://clang.llvm.org/docs/ClangFormat.html.\nRun `lintrunner -a` to apply this patch.",
)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="Format files with clang-format.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--binary",
required=True,
help="clang-format binary path",
)
parser.add_argument(
"--retries",
default=3,
type=int,
help="times to retry timed out clang-format",
)
parser.add_argument(
"--timeout",
default=90,
type=int,
help="seconds to wait for clang-format",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
binary = os.path.normpath(args.binary) if IS_WINDOWS else args.binary
binary = os.path.expanduser(binary)
if not Path(binary).exists():
lint_message = LintMessage(
path=None,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.ERROR,
name="init-error",
original=None,
replacement=None,
description=(
f"Could not find clang-format binary at {binary}, "
"did you forget to run `lintrunner init`?"
),
)
print(json.dumps(lint_message._asdict()), flush=True)
sys.exit(0)
with concurrent.futures.ThreadPoolExecutor(
max_workers=os.cpu_count(),
thread_name_prefix="Thread",
) as executor:
futures = {
executor.submit(check_file, x, binary, args.retries, args.timeout): x
for x in args.filenames
}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
|
Fuser-main
|
tools/linter/adapters/clangformat_linter.py
|
"""
EXEC: Ensure that source files are not executable.
"""
import argparse
import json
import logging
import os
import sys
from enum import Enum
from typing import NamedTuple, Optional
LINTER_CODE = "EXEC"
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def check_file(filename: str) -> Optional[LintMessage]:
is_executable = os.access(filename, os.X_OK)
if is_executable:
return LintMessage(
path=filename,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="executable-permissions",
original=None,
replacement=None,
description="This file has executable permission; please remove it by using `chmod -x`.",
)
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="exec linter",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--verbose",
action="store_true",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
lint_messages = []
for filename in args.filenames:
lint_message = check_file(filename)
if lint_message is not None:
lint_messages.append(lint_message)
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
|
Fuser-main
|
tools/linter/adapters/exec_linter.py
|
import argparse
import concurrent.futures
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
from enum import Enum
from pathlib import Path
from sysconfig import get_paths as gp
from typing import Any, List, NamedTuple, Optional, Pattern
# Nvfuser directory root
result = subprocess.run(
["git", "rev-parse", "--show-toplevel"],
stdout=subprocess.PIPE,
check=True,
)
NVFUSER_ROOT = result.stdout.decode("utf-8").strip()
IS_WINDOWS: bool = os.name == "nt"
# Returns '/usr/local/include/python<version number>'
def get_python_include_dir() -> str:
return gp()["include"]
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
# c10/core/DispatchKey.cpp:281:26: error: 'k' used after it was moved [bugprone-use-after-move]
RESULTS_RE: Pattern[str] = re.compile(
r"""(?mx)
^
(?P<file>.*?):
(?P<line>\d+):
(?:(?P<column>-?\d+):)?
\s(?P<severity>\S+?):?
\s(?P<message>.*)
\s(?P<code>\[.*\])
$
"""
)
def run_command(
args: List[str],
) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
capture_output=True,
check=False,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
# Severity is either "error" or "note":
# https://github.com/python/mypy/blob/8b47a032e1317fb8e3f9a818005a6b63e9bf0311/mypy/errors.py#L46-L47
severities = {
"error": LintSeverity.ERROR,
"warning": LintSeverity.WARNING,
}
def clang_search_dirs() -> List[str]:
# Compilers are ordered based on fallback preference
# We pick the first one that is available on the system
compilers = ["clang", "gcc", "cpp", "cc"]
compilers = [c for c in compilers if shutil.which(c) is not None]
if len(compilers) == 0:
raise RuntimeError(f"None of {compilers} were found")
compiler = compilers[0]
result = subprocess.run(
[compiler, "-E", "-x", "c++", "-", "-v"],
stdin=subprocess.DEVNULL,
capture_output=True,
check=True,
)
stderr = result.stderr.decode().strip().split("\n")
search_start = r"#include.*search starts here:"
search_end = r"End of search list."
append_path = False
search_paths = []
for line in stderr:
if re.match(search_start, line):
if append_path:
continue
else:
append_path = True
elif re.match(search_end, line):
break
elif append_path:
search_paths.append(line.strip())
return search_paths
include_args = []
include_dir = [
"/usr/lib/llvm-11/include/openmp",
get_python_include_dir(),
os.path.join(NVFUSER_ROOT, "third_party/pybind11/include"),
] + clang_search_dirs()
for dir in include_dir:
include_args += ["--extra-arg", f"-I{dir}"]
def check_file(
filename: str,
binary: str,
build_dir: Path,
) -> List[LintMessage]:
try:
proc = run_command(
[binary, f"-p={build_dir}", *include_args, filename],
)
except OSError as err:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGTIDY",
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(f"Failed due to {err.__class__.__name__}:\n{err}"),
)
]
lint_messages = []
try:
# Change the current working directory to the build directory, since
# clang-tidy will report files relative to the build directory.
saved_cwd = os.getcwd()
os.chdir(build_dir)
for match in RESULTS_RE.finditer(proc.stdout.decode()):
# Convert the reported path to an absolute path.
abs_path = str(Path(match["file"]).resolve())
message = LintMessage(
path=abs_path,
name=match["code"],
description=match["message"],
line=int(match["line"]),
char=int(match["column"])
if match["column"] is not None and not match["column"].startswith("-")
else None,
code="CLANGTIDY",
severity=severities.get(match["severity"], LintSeverity.ERROR),
original=None,
replacement=None,
)
lint_messages.append(message)
finally:
os.chdir(saved_cwd)
return lint_messages
def main() -> None:
parser = argparse.ArgumentParser(
description="clang-tidy wrapper linter.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--binary",
required=True,
help="clang-tidy binary path",
)
parser.add_argument(
"--build-dir",
"--build_dir",
required=True,
help=(
"Where the compile_commands.json file is located. "
"Gets passed to clang-tidy -p"
),
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
args.binary = os.path.expanduser(args.binary)
if not os.path.exists(args.binary):
err_msg = LintMessage(
path="<none>",
line=None,
char=None,
code="CLANGTIDY",
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(
f"Could not find clang-tidy binary at {args.binary},"
" you may need to run `lintrunner init`."
),
)
print(json.dumps(err_msg._asdict()), flush=True)
exit(0)
abs_build_dir = Path(args.build_dir).resolve()
# Get the absolute path to clang-tidy and use this instead of the relative
# path such as .lintbin/clang-tidy. The problem here is that os.chdir is
# per process, and the linter uses it to move between the current directory
# and the build folder. And there is no .lintbin directory in the latter.
# When it happens in a race condition, the linter command will fails with
# the following no such file or directory error: '.lintbin/clang-tidy'
binary_path = os.path.abspath(args.binary)
with concurrent.futures.ThreadPoolExecutor(
max_workers=os.cpu_count(),
thread_name_prefix="Thread",
) as executor:
futures = {
executor.submit(
check_file,
filename,
binary_path,
abs_build_dir,
): filename
for filename in args.filenames
}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
|
Fuser-main
|
tools/linter/adapters/clangtidy_linter.py
|
import argparse
import json
import logging
import os
import re
import subprocess
import sys
import time
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Pattern
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
# tools/linter/flake8_linter.py:15:13: error: Incompatibl...int") [assignment]
RESULTS_RE: Pattern[str] = re.compile(
r"""(?mx)
^
(?P<file>.*?):
(?P<line>\d+):
(?:(?P<column>-?\d+):)?
\s(?P<severity>\S+?):?
\s(?P<message>.*)
\s(?P<code>\[.*\])
$
"""
)
def run_command(
args: List[str],
*,
extra_env: Optional[Dict[str, str]],
retries: int,
) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
# Severity is either "error" or "note":
# https://github.com/python/mypy/blob/8b47a032e1317fb8e3f9a818005a6b63e9bf0311/mypy/errors.py#L46-L47
severities = {
"error": LintSeverity.ERROR,
"note": LintSeverity.ADVICE,
}
def check_files(
filenames: List[str],
config: str,
retries: int,
) -> List[LintMessage]:
try:
proc = run_command(
[sys.executable, "-mmypy", f"--config={config}"] + filenames,
extra_env={},
retries=retries,
)
except OSError as err:
return [
LintMessage(
path=None,
line=None,
char=None,
code="MYPY",
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(f"Failed due to {err.__class__.__name__}:\n{err}"),
)
]
stdout = str(proc.stdout, "utf-8").strip()
return [
LintMessage(
path=match["file"],
name=match["code"],
description=match["message"],
line=int(match["line"]),
char=int(match["column"])
if match["column"] is not None and not match["column"].startswith("-")
else None,
code="MYPY",
severity=severities.get(match["severity"], LintSeverity.ERROR),
original=None,
replacement=None,
)
for match in RESULTS_RE.finditer(stdout)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="mypy wrapper linter.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--retries",
default=3,
type=int,
help="times to retry timed out mypy",
)
parser.add_argument(
"--config",
required=True,
help="path to an mypy .ini config file",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
# Use a dictionary here to preserve order. mypy cares about order,
# tragically, e.g. https://github.com/python/mypy/issues/2015
filenames: Dict[str, bool] = {}
# If a stub file exists, have mypy check it instead of the original file, in
# accordance with PEP-484 (see https://www.python.org/dev/peps/pep-0484/#stub-files)
for filename in args.filenames:
if filename.endswith(".pyi"):
filenames[filename] = True
continue
stub_filename = filename.replace(".py", ".pyi")
if Path(stub_filename).exists():
filenames[stub_filename] = True
else:
filenames[filename] = True
lint_messages = check_files(list(filenames), args.config, args.retries)
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
if __name__ == "__main__":
main()
|
Fuser-main
|
tools/linter/adapters/mypy_linter.py
|
"""
NEWLINE: Checks files to make sure there are no trailing newlines.
"""
import argparse
import json
import logging
import os
import sys
from enum import Enum
from typing import NamedTuple, Optional
NEWLINE = 10 # ASCII "\n"
LINTER_CODE = "NEWLINE"
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def correct_trailing_newlines(filename: str) -> bool:
with open(filename, "rb") as f:
a = len(f.read(2))
if a == 0:
return True
elif a == 1:
# file is wrong whether or not the only byte is a newline
return False
else:
f.seek(-2, os.SEEK_END)
b, c = f.read(2)
# no ASCII byte is part of any non-ASCII character in UTF-8
return b != NEWLINE and c == NEWLINE
def check_file(filename: str) -> Optional[LintMessage]:
logging.debug("Checking file %s", filename)
with open(filename, "rb") as f:
a = len(f.read(2))
if a == 0:
# File is empty, just leave it alone.
return None
elif a == 1:
# file is wrong whether or not the only byte is a newline
return LintMessage(
path=filename,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="testestTrailing newline",
original=None,
replacement=None,
description="Trailing newline found. Run `lintrunner --take NEWLINE -a` to apply changes.",
)
else:
# Read the last two bytes
f.seek(-2, os.SEEK_END)
b, c = f.read(2)
# no ASCII byte is part of any non-ASCII character in UTF-8
if b != NEWLINE and c == NEWLINE:
return None
else:
f.seek(0)
try:
original = f.read().decode("utf-8")
except Exception as err:
return LintMessage(
path=filename,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="Decoding failure",
original=None,
replacement=None,
description=f"utf-8 decoding failed due to {err.__class__.__name__}:\n{err}",
)
return LintMessage(
path=filename,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="Trailing newline",
original=original,
replacement=original.rstrip("\n") + "\n",
description="Trailing newline found. Run `lintrunner --take NEWLINE -a` to apply changes.",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="native functions linter",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--verbose",
action="store_true",
help="location of native_functions.yaml",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
lint_messages = []
for filename in args.filenames:
lint_message = check_file(filename)
if lint_message is not None:
lint_messages.append(lint_message)
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
|
Fuser-main
|
tools/linter/adapters/newlines_linter.py
|
T0 = fd.define_tensor(symbolic_sizes=[-1], contiguous=[True], dtype=DataType.Float)
T1 = fd.define_tensor(symbolic_sizes=[-1], contiguous=[True], dtype=DataType.Float)
T2 = fd.define_tensor(symbolic_sizes=[-1, -1], contiguous=[True, True], dtype=DataType.Half)
T3 = fd.ops.broadcast_in_dim(T0, output_shape=[1, 1024, 768], broadcast_dims=[2])
T4 = fd.ops.broadcast_in_dim(T1, output_shape=[1, 1024, 768], broadcast_dims=[2])
T5 = fd.ops.view(T2, original_shape=[1024, 768], new_shape=[1, 1024, 768])
T6 = fd.ops.cast(T5, dtype=DataType.Float)
S7 = fd.define_scalar(0.500000)
T8 = fd.ops.mul(T6, S7)
S9 = fd.define_scalar(0.707107)
T10 = fd.ops.mul(T6, S9)
T11 = fd.ops.erf(T10)
S12 = fd.define_scalar(1.00000)
T13 = fd.ops.add(T11, S12)
T14 = fd.ops.mul(T8, T13)
T15 = fd.ops.cast(T14, dtype=DataType.Half)
T16 = fd.ops.cast(T15, dtype=DataType.Float)
T17, T18 = fd.ops.var_mean(T16, axes=[2], correction=0, keepdim=False)
T19 = fd.ops.broadcast_in_dim(T17, output_shape=[1, 1024, 1], broadcast_dims=[0, 1])
T20 = fd.ops.broadcast_in_dim(T18, output_shape=[1, 1024, 1], broadcast_dims=[0, 1])
S21 = fd.define_scalar(1.00000e-05)
T22 = fd.ops.add(T19, S21)
T23 = fd.ops.broadcast_in_dim(T20, output_shape=[1, 1024, 768], broadcast_dims=[0, 1, 2])
T24 = fd.ops.rsqrt(T22)
T25 = fd.ops.sub(T16, T23)
T26 = fd.ops.broadcast_in_dim(T24, output_shape=[1, 1024, 768], broadcast_dims=[0, 1, 2])
T27 = fd.ops.mul(T25, T26)
T28 = fd.ops.mul(T27, T3)
T29 = fd.ops.add(T28, T4)
T30 = fd.ops.cast(T29, dtype=DataType.Float)
T31 = fd.ops.cast(T30, dtype=DataType.Half)
T32 = fd.ops.view(T31, original_shape=[1, 1024, 768], new_shape=[1024, 768])
fd.add_output(T5)
fd.add_output(T16)
fd.add_output(T20)
fd.add_output(T24)
fd.add_output(T32)
|
Fuser-main
|
tools/examples/repro.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# Owner(s): ["module: nvfuser"]
from copy import deepcopy
from functools import partial
import itertools
import math
import random
import re
from typing import List, Callable
import tempfile
import unittest
import torch
import torch.nn.functional as F
from torch.testing._internal.common_utils import run_tests, TEST_WITH_ROCM, TestCase
from torch.testing._internal.jit_utils import RUN_CUDA
import torch._refs as refs
import torch._prims as prims
# Will only create the nvfuser module if CUDA is available
try:
from nvfuser import (
FusionCache,
FusionDefinition,
DataType,
Tensor,
version,
compute_contiguity,
)
from nvfuser.pytorch_utils import torch_dtype_to_nvfuser_dtype
except ImportError:
pass
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
def is_pre_volta():
if not RUN_NVFUSER:
return False
prop = torch.cuda.get_device_properties(torch.cuda.current_device())
return prop.major < 7
def serde_check(test_fn: Callable):
"""
A decorator to verify that serialization works with the given exec_nvfuser function.
Currently, it uses serialization to rebuild the FusionCache structure.
"""
def inner_fn(*args, **kwargs):
self, fusion_func, inputs = args
# Deep copy inputs because when a fusion output aliases an input, it will change the input value for the
# subsequent function calls.
inputs_copy = deepcopy(inputs)
# NOTE: For debug purposes, clear FusionCache before running first test
# if ("new_fusion_expected" not in kwargs) or kwargs["new_fusion_expected"]:
# FusionCache.reset()
# skip_serde_check is only used by the decorator so remove it before running test_fn
skip_serde_check = kwargs.pop("skip_serde_check", False)
# Run test to populate FusionCache
result = test_fn(*args, **kwargs)
if skip_serde_check:
return result
with tempfile.NamedTemporaryFile() as tmp:
# Serialize FusionCache
fc = FusionCache.get()
fc.serialize(tmp.name)
FusionCache.reset()
# Get new FusionCache because the previous one was destroyed by the reset call.
fc = FusionCache.get()
fc.deserialize(tmp.name)
# Run test with repopulated FusionCache
kwargs["new_fusion_expected"] = False
return test_fn(self, fusion_func, inputs_copy, **kwargs)
return inner_fn
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(is_pre_volta(), "Only supported on Volta and newer devices.")
class TestNvFuserFrontend(TestCase):
# Helper function to verify the nvfuser output and make sure the string
# definition based on the FusionDefinition is executable and matches the
# original definition
@serde_check
def exec_nvfuser(self, fusion_func, inputs, *, new_fusion_expected=True):
inputs_cap = deepcopy(inputs)
fc = FusionCache.get()
before_fusions = fc.num_fusions()
# Execute a fusion function and capture the string python definition
with FusionDefinition() as fd:
fusion_func(fd)
fd_str = fd.__repr__()
torch.manual_seed(0)
out = fd.execute(inputs)
# Execute the python definition that was captured
try:
func_name = re.findall("(nvfuser_fusion_id\\d+)", fd_str.split("\n")[1])[0]
exec(fd_str)
with FusionDefinition() as fd_cap:
eval(func_name)(fd_cap)
torch.manual_seed(0)
out_cap = fd_cap.execute(inputs_cap)
except Exception as err:
print("\nException For Printed FusionDefinition:")
print(
"(A failure here suggests a mismatch in functionality between the original definition and the printed definition.)"
)
print(fd_str)
raise err
# Make sure the original and captured definitions match
for idx in range(len(out)):
self.assertEqual(out[idx], out_cap[idx])
self.assertEqual(fc.num_fusions() - before_fusions, int(new_fusion_expected))
return out, fd
def test_basic(self):
inputs = [
torch.ones(2, 4, 8, device="cuda"),
torch.ones(2, 4, 8, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
c0 = fd.define_scalar(3.0)
t2 = fd.ops.add(t0, t1)
t3 = fd.ops.mul(t2, c0)
t4 = fd.ops.sum(t3, [-1], False, DataType.Float)
fd.add_output(t4)
# Expected Output is a tensor of 48's
nvf_out1, _ = self.exec_nvfuser(fusion_func, inputs)
# Create a new fusion with the same definition, it should hit the cache!
nvf_out2, fd2 = self.exec_nvfuser(
fusion_func, inputs, new_fusion_expected=False
)
# Create a fusion from a fusion id and make sure it executes!
fd3 = FusionDefinition(fd2.id())
nvf_out3 = fd3.execute(inputs)
eager_out = torch.sum((inputs[0] + inputs[1]) * 3.0, dim=-1)
self.assertEqual(eager_out, nvf_out1[0])
self.assertEqual(eager_out, nvf_out2[0])
self.assertEqual(eager_out, nvf_out3[0])
def test_basic_fp16(self):
inputs = [
torch.ones(2, 4, 8, device="cuda", dtype=torch.float16),
torch.ones(2, 4, 8, device="cuda", dtype=torch.float16),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
c0 = fd.define_scalar(3.0)
t2 = fd.ops.add(t0, t1)
t3 = fd.ops.mul(t2, c0)
t4 = fd.ops.sum(t3, [-1], False, DataType.Float)
t5 = fd.ops.cast(t4, DataType.Half)
fd.add_output(t5)
# Expected Output is a tensor of 48's
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = torch.sum((inputs[0] + inputs[1]) * 3.0, dim=-1)
self.assertEqual(eager_out, nvf_out[0])
def test_cast_double_to_half(self):
inputs = [
torch.randn(2, 4, device="cuda", dtype=torch.float64),
torch.randn(2, 4, device="cuda", dtype=torch.float64),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t0h = fd.ops.cast(t0, DataType.Half)
t1h = fd.ops.cast(t1, DataType.Half)
t2 = fd.ops.add(t0h, t1h)
t3 = fd.ops.relu(t2)
t4 = fd.ops.cast(t3, DataType.Half)
fd.add_output(t4)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = torch.relu(inputs[0].to(torch.half) + inputs[1].to(torch.half))
self.assertEqual(eager_out, nvf_out[0])
def test_promote_to_double(self):
inputs = [
torch.randn(2, 4, device="cuda", dtype=torch.float16),
torch.randn(2, 4, device="cuda", dtype=torch.float64),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t2 = fd.ops.add(t0, t1)
t5 = fd.ops.relu(t2)
fd.add_output(t5)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = torch.relu(inputs[0] + inputs[1])
self.assertEqual(eager_out, nvf_out[0])
def test_implicit_broadcast_input(self):
inputs = [
torch.randn(3, device="cuda"),
torch.randn(2, 3, 4, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t0_b = fd.ops.broadcast_in_dim(t0, [2, 3, 4], [1])
t2 = fd.ops.add(t0_b, t1)
fd.add_output(t2)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = refs.add(
prims.broadcast_in_dim(inputs[0], inputs[1].size(), [1]), inputs[1]
)
self.assertEqual(eager_out, nvf_out[0])
def test_explicit_broadcast_input(self):
inputs = [
torch.randn(1, 1, 4, device="cuda"),
torch.randn(2, 3, 4, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t0_b = fd.ops.broadcast_in_dim(t0, inputs[1].size(), [0, 1, 2])
t2 = fd.ops.add(t0_b, t1)
fd.add_output(t2)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = refs.add(
prims.broadcast_in_dim(inputs[0], inputs[1].size(), [0, 1, 2]), inputs[1]
)
self.assertEqual(eager_out, nvf_out[0])
def test_broadcast_mixing(self):
inputs = [
torch.randn(3, 1, device="cuda"),
torch.randn(3, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t1_b = fd.ops.broadcast_in_dim(t1, [3, 3], [0])
t2 = fd.ops.add(t0, t1_b)
fd.add_output(t2)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = refs.add(inputs[0], prims.broadcast_in_dim(inputs[1], [3, 3], [0]))
self.assertEqual(eager_out, nvf_out[0])
def test_ops_broadcast(self):
inputs = [
torch.randn(3, device="cuda"),
torch.randn(2, 3, 4, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t0_b = fd.ops.broadcast(t0, [True, False, True])
t2 = fd.ops.add(t0_b, t1)
fd.add_output(t2)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = refs.add(
prims.broadcast_in_dim(inputs[0], inputs[1].size(), [1]), inputs[1]
)
self.assertEqual(eager_out, nvf_out[0])
def test_prim_layer_norm_fwd(self):
input_size = [64, 128, 1024]
dtype = torch.float32
device = "cuda"
inputs = [
torch.randn(*input_size, device=device, requires_grad=True),
torch.nn.Parameter(torch.randn(input_size[2], dtype=dtype, device=device)),
torch.nn.Parameter(torch.randn(input_size[2], dtype=dtype, device=device)),
]
def primitive_definition(
inputs: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
normalization_axis: int,
keepdim: bool,
) -> torch.Tensor:
mean = inputs.mean(normalization_axis, keepdim=keepdim)
diff = inputs - mean
diff_sq = diff * diff
var = diff_sq.mean(normalization_axis, keepdim=keepdim)
pre_shift_scale_norm_output = (inputs - mean) / torch.sqrt(var + 1e-12)
norm_output = weight * pre_shift_scale_norm_output + bias
return norm_output
def nvfuser_fusion(
fd: FusionDefinition,
normalization_axis: int,
norm_size: int,
input_shape: List[int],
eps: float,
keepDim: bool,
) -> None:
inputs = fd.define_tensor(
shape=[-1, -1, -1],
contiguity=[True, True, True],
dtype=DataType.Float,
)
weights = fd.define_tensor(
shape=[-1], contiguity=[True], dtype=DataType.Float
)
bias = fd.define_tensor(shape=[-1], contiguity=[True], dtype=DataType.Float)
sum0 = fd.ops.sum(inputs, axes=[normalization_axis], keepdim=keepDim)
norm_const = fd.define_scalar(norm_size)
mean = fd.ops.div(sum0, norm_const)
diff = fd.ops.sub(inputs, mean)
diff_sq = fd.ops.mul(diff, diff)
sum1 = fd.ops.sum(diff_sq, axes=[normalization_axis], keepdim=keepDim)
var = fd.ops.div(sum1, norm_const)
eps_const = fd.define_scalar(eps)
var_eps = fd.ops.add(var, eps_const)
invstd = fd.ops.rsqrt(var_eps)
pre_scale_bias = fd.ops.mul(diff, invstd)
weights_bcast = fd.ops.broadcast_in_dim(
weights, shape=input_shape, broadcast_dims=[2]
)
scale = fd.ops.mul(pre_scale_bias, weights_bcast)
bias_bcast = fd.ops.broadcast_in_dim(
bias, shape=input_shape, broadcast_dims=[2]
)
out = fd.ops.add(scale, bias_bcast)
fd.add_output(out)
fd.add_output(mean)
fd.add_output(invstd)
def nvfuser_fusion_var_mean(
fd: FusionDefinition,
normalization_axis: int,
norm_size: int,
input_shape: List[int],
eps: float,
keepDim: bool,
) -> None:
inputs = fd.define_tensor(
shape=[-1, -1, -1],
contiguity=[True, True, True],
dtype=DataType.Float,
)
weights = fd.define_tensor(
shape=[-1], contiguity=[True], dtype=DataType.Float
)
bias = fd.define_tensor(shape=[-1], contiguity=[True], dtype=DataType.Float)
var, mean = fd.ops.var_mean(
inputs, axes=[normalization_axis], correction=0, keepdim=keepDim
)
eps_const = fd.define_scalar(eps)
var_eps = fd.ops.add(var, eps_const)
invstd = fd.ops.rsqrt(var_eps)
diff = fd.ops.sub(inputs, mean)
pre_scale_bias = fd.ops.mul(diff, invstd)
weights_bcast = fd.ops.broadcast_in_dim(
weights, shape=input_shape, broadcast_dims=[2]
)
scale = fd.ops.mul(pre_scale_bias, weights_bcast)
bias_bcast = fd.ops.broadcast_in_dim(
bias, shape=input_shape, broadcast_dims=[2]
)
out = fd.ops.add(scale, bias_bcast)
fd.add_output(out)
fd.add_output(mean)
fd.add_output(invstd)
fusion_func_1 = partial(
nvfuser_fusion,
normalization_axis=2,
norm_size=inputs[0].size()[2],
input_shape=inputs[0].size(),
eps=1e-12,
keepDim=True,
)
nvf_out, _ = self.exec_nvfuser(fusion_func_1, inputs)
fusion_func_2 = partial(
nvfuser_fusion_var_mean,
normalization_axis=2,
norm_size=inputs[0].size()[2],
input_shape=inputs[0].size(),
eps=1e-12,
keepDim=True,
)
nvf_var_mean_out, _ = self.exec_nvfuser(fusion_func_2, inputs)
eager_out = primitive_definition(inputs[0], inputs[1], inputs[2], 2, True)
self.assertEqual(eager_out, nvf_out[0])
self.assertEqual(eager_out, nvf_var_mean_out[0])
def test_prim_rms_norm_fwd(self):
input_size = [64, 128, 1024]
dtype = torch.float32
device = "cuda"
inputs = [
torch.randn(*input_size, device=device, requires_grad=True),
torch.nn.Parameter(torch.randn(input_size[2], dtype=dtype, device=device)),
]
def primitive_definition(
inputs: torch.Tensor,
weight: torch.Tensor,
normalization_axis: int,
keepdim: bool,
) -> torch.Tensor:
var = inputs.mul(inputs).mean(normalization_axis, keepdim)
pre_shift_scale_norm_output = inputs / torch.sqrt(var + 1e-12)
norm_output = weight * pre_shift_scale_norm_output
return norm_output
def nvfuser_fusion(
fd: FusionDefinition,
normalization_axis: int,
norm_size: int,
input_shape: List[int],
eps: float,
keepDim: bool,
) -> None:
inputs = fd.define_tensor(
shape=[-1, -1, -1],
contiguity=[True, True, True],
dtype=DataType.Float,
)
weights = fd.define_tensor(
shape=[-1], contiguity=[True], dtype=DataType.Float
)
inputs_sq = fd.ops.mul(inputs, inputs)
sum0 = fd.ops.sum(inputs_sq, axes=[normalization_axis], keepdim=keepDim)
norm_const = fd.define_scalar(norm_size)
var = fd.ops.div(sum0, norm_const)
eps_const = fd.define_scalar(eps)
var_eps = fd.ops.add(var, eps_const)
invstd = fd.ops.rsqrt(var_eps)
pre_scale = fd.ops.mul(inputs, invstd)
weights_bcast = fd.ops.broadcast_in_dim(
weights, shape=input_shape, broadcast_dims=[2]
)
out = fd.ops.mul(pre_scale, weights_bcast)
fd.add_output(out)
fd.add_output(invstd)
fusion_func = partial(
nvfuser_fusion,
normalization_axis=2,
norm_size=inputs[0].size()[2],
input_shape=inputs[0].size(),
eps=1e-12,
keepDim=True,
)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = primitive_definition(inputs[0], inputs[1], 2, True)
self.assertEqual(eager_out, nvf_out[0])
# Testing a scenario where a broadcast requires a symbolic output shape
def test_tensor_shape(self):
inputs = [
torch.randn(2, 3, 4, device="cuda"),
torch.randn(4, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t1_b = fd.ops.broadcast_in_dim(t1, t0.shape(), [2])
t2 = fd.ops.sub(t0, t1_b)
fd.add_output(t2)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = refs.sub(
inputs[0], prims.broadcast_in_dim(inputs[1], inputs[0].size(), [2])
)
self.assertEqual(eager_out, nvf_out[0])
# Testing a scenario where no broadcast is needed
def test_tensor_shape_nobcast(self):
inputs = [
torch.randn(2, 3, device="cuda"),
torch.randn(2, 3, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t1_b = fd.ops.broadcast_in_dim(t1, t0.shape(), [0, 1])
t2 = fd.ops.add(t0, t1_b)
fd.add_output(t2)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = refs.add(
inputs[0], prims.broadcast_in_dim(inputs[1], inputs[0].size(), [0, 1])
)
self.assertEqual(eager_out, nvf_out[0])
# Testing a scenario where each arg of a binary op has broadcast.
def test_tensor_size_both_args_bcast(self):
inputs = [
torch.randn(1, 3, device="cuda"),
torch.randn(2, 1, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t0_b = fd.ops.broadcast_in_dim(t0, [t1.size(0), t0.size(1)], [0, 1])
t1_b = fd.ops.broadcast_in_dim(t1, [t1.size(0), t0.size(1)], [0, 1])
t2 = fd.ops.add(t0_b, t1_b)
fd.add_output(t2)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = refs.add(
prims.broadcast_in_dim(
inputs[0], [inputs[1].size()[0], inputs[0].size()[1]], [0, 1]
),
prims.broadcast_in_dim(
inputs[1], [inputs[1].size()[0], inputs[0].size()[1]], [0, 1]
),
)
self.assertEqual(eager_out, nvf_out[0])
def test_broadcast_in_dim_with_dynamic_shapes(self):
inputs_1 = [
torch.randn(2, 3, 4, device="cuda"),
torch.randn(4, device="cuda"),
]
inputs_2 = [
torch.randn(2, 3, 1024, device="cuda"),
torch.randn(1024, device="cuda"),
]
def fusion_func_1(fd: FusionDefinition):
t0 = fd.define_tensor(shape=[-1, -1, -1], contiguity=[True, True, True])
t1 = fd.define_tensor(shape=[-1], contiguity=[True])
t1_b = fd.ops.broadcast_in_dim(t1, t0.shape(), [2])
t2 = fd.ops.add(t0, t1_b)
fd.add_output(t2)
def fusion_func_2(fd: FusionDefinition):
t0 = fd.define_tensor(shape=[-1, -1, -1], contiguity=[True, True, True])
t1 = fd.define_tensor(shape=[-1], contiguity=[True])
t1_b = fd.ops.broadcast_in_dim(t1, inputs_1[0].size(), [2])
t2 = fd.ops.add(t0, t1_b)
fd.add_output(t2)
def fusion_func_3(fd: FusionDefinition):
t0 = fd.define_tensor(shape=[-1, -1, -1], contiguity=[True, True, True])
t1 = fd.define_tensor(shape=[-1], contiguity=[True])
t1_b = fd.ops.broadcast_in_dim(t1, inputs_2[0].size(), [2])
t2 = fd.ops.add(t0, t1_b)
fd.add_output(t2)
# Func_1 uses tensor.shape() to propagate dynamic size, therefore, it is
# expected that test 2 should be cached based on test 2
# Test 1
inputs = inputs_1
nvf_out, _ = self.exec_nvfuser(fusion_func_1, inputs)
eager_out = refs.add(
inputs[0], prims.broadcast_in_dim(inputs[1], inputs[0].size(), [2])
)
self.assertEqual(eager_out, nvf_out[0])
# Test 2
inputs = inputs_2
nvf_out, _ = self.exec_nvfuser(fusion_func_1, inputs, new_fusion_expected=False)
eager_out = refs.add(
inputs[0], prims.broadcast_in_dim(inputs[1], inputs[0].size(), [2])
)
self.assertEqual(eager_out, nvf_out[0])
# Func_2 and Func_3 are nearly identical except that have a different
# concrete output shape for their broadcast_in_dim. Therefore, test 4
# should not be cached.
# Note: It is assumed that definition will change with Tensor Size with
# concrete shapes.
# Test 3
inputs = inputs_1
nvf_out, _ = self.exec_nvfuser(fusion_func_2, inputs)
eager_out = refs.add(
inputs[0], prims.broadcast_in_dim(inputs[1], inputs[0].size(), [2])
)
self.assertEqual(eager_out, nvf_out[0])
# Test 4
inputs = inputs_2
nvf_out, _ = self.exec_nvfuser(fusion_func_3, inputs)
eager_out = refs.add(
inputs[0], prims.broadcast_in_dim(inputs[1], inputs[0].size(), [2])
)
self.assertEqual(eager_out, nvf_out[0])
# Testing a scenario where the broadcast is necessary to realize the output
def test_tensor_shape_with_output_bcast(self):
def fusion_func(fd: FusionDefinition):
t0 = fd.define_tensor(shape=[-1, -1, -1], contiguity=[True, True, True])
t1 = fd.ops.sum(t0, axes=[2])
t1_b = fd.ops.broadcast_in_dim(t1, t0.shape(), [0, 1])
fd.add_output(t1_b)
inputs_1 = [
torch.randn(2, 3, 4, device="cuda"),
]
inputs_2 = [
torch.randn(4, 5, 32, device="cuda"),
]
inputs = inputs_1
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = prims.broadcast_in_dim(
torch.sum(inputs[0], dim=-1), inputs[0].size(), [0, 1]
)
self.assertEqual(eager_out, nvf_out[0])
# Testing Dynamic usage of same Fusion
inputs = inputs_2
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs, new_fusion_expected=False)
eager_out = prims.broadcast_in_dim(
torch.sum(inputs[0], dim=-1), inputs[0].size(), [0, 1]
)
self.assertEqual(eager_out, nvf_out[0])
# Testing an expand followed by a broadcast
def test_tensor_shape_expand_bcast(self):
def fusion_func(fd: FusionDefinition):
t0 = fd.define_tensor(shape=[-1, -1, -1], contiguity=[True, True, True])
t1 = fd.define_tensor(shape=[-1, 1, -1], contiguity=[True, None, True])
t2 = fd.define_tensor(shape=[-1, 1, -1], contiguity=[True, None, True])
t1_b = fd.ops.broadcast_in_dim(t1, t0.shape(), [0, 1, 2])
t2_b = fd.ops.broadcast_in_dim(t2, t1_b.shape(), [0, 1, 2])
fd.add_output(t2_b)
inputs = [
torch.randn(2, 3, 4, device="cuda"),
torch.randn(2, 1, 4, device="cuda"),
torch.randn(2, 1, 4, device="cuda"),
]
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out1 = prims.broadcast_in_dim(inputs[1], inputs[0].size(), [0, 1, 2])
eager_out2 = prims.broadcast_in_dim(inputs[2], eager_out1.size(), [0, 1, 2])
self.assertEqual(eager_out2, nvf_out[0])
def test_alias_output_to_input(self):
inputs = [
torch.ones(4, 4, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
s0 = fd.define_scalar(1.0)
s1 = fd.define_scalar(2.0)
s2 = fd.define_scalar(3.0)
t1 = fd.ops.add(t0, s0)
t2 = fd.ops.add(t0, s1)
t3 = fd.ops.add(t2, s2)
fd.add_output(t1)
fd.add_output(t2, alias_input=t0)
fd.add_output(t3)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out1 = torch.add(torch.ones(4, 4, device="cuda"), 1.0)
eager_out2 = torch.add(torch.ones(4, 4, device="cuda"), 2.0)
eager_out3 = torch.add(eager_out2, 3.0)
self.assertEqual(eager_out1, nvf_out[0])
self.assertEqual(eager_out2, inputs[0])
self.assertEqual(eager_out3, nvf_out[1])
def test_gather(self):
inputs = [
torch.randn(8, 16, device="cuda"),
torch.randn(8, 16, device="cuda"),
torch.randint(0, 8, (4, 4), device="cuda").to(dtype=torch.long),
]
def test_fn(dim):
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t2 = fd.from_pytorch(inputs[2])
t3 = fd.ops.add(t0, t1)
t4 = fd.ops.gather(t3, t2, dim)
fd.add_output(t4)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = torch.gather(inputs[0] + inputs[1], dim, inputs[2])
self.assertEqual(eager_out, nvf_out[0])
test_fn(0)
test_fn(1)
def test_take_along_axis(self):
inputs = [
torch.randn(8, 16, device="cuda"),
torch.randn(8, 16, device="cuda"),
torch.randint(0, 8, (8, 16), device="cuda").to(dtype=torch.long),
]
def test_fn(dim):
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t2 = fd.from_pytorch(inputs[2])
t3 = fd.ops.add(t0, t1)
t4 = fd.ops.take_along_axis(t3, t2, dim)
fd.add_output(t4)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = torch.gather(inputs[0] + inputs[1], dim, inputs[2])
self.assertEqual(eager_out, nvf_out[0])
test_fn(0)
test_fn(1)
def test_index_select(self):
inputs = [
torch.randn(8, 16, device="cuda"),
torch.randn(8, 16, device="cuda"),
torch.randint(0, 8, (6,), device="cuda").to(dtype=torch.long),
]
def test_fn(dim):
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t2 = fd.from_pytorch(inputs[2])
t3 = fd.ops.add(t0, t1)
t4 = fd.ops.index_select(t3, t2, dim)
fd.add_output(t4)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = torch.index_select(inputs[0] + inputs[1], dim, inputs[2])
self.assertEqual(eager_out, nvf_out[0])
test_fn(0)
test_fn(1)
def test_index_select_scalar_indices(self):
inputs = [
torch.randn(8, 16, device="cuda"),
torch.tensor(2, device="cuda").to(dtype=torch.long),
]
def test_fn(dim):
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t2 = fd.ops.index_select(t0, t1, dim)
fd.add_output(t2)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = torch.index_select(inputs[0], dim, inputs[1])
self.assertEqual(eager_out, nvf_out[0])
test_fn(0)
test_fn(1)
def test_squeeze(self):
t0_sizes = [4]
t1_sizes = [1, 4, 1]
t2_sizes = [2, 1, 4]
inputs = [
torch.randn(*t0_sizes, device="cuda"),
torch.randn(*t1_sizes, device="cuda"),
torch.randn(*t2_sizes, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.define_tensor(shape=[-1], contiguity=[True])
t1 = fd.define_tensor(sizes=t1_sizes, strides=[4, 1, 1])
t2 = fd.define_tensor(sizes=t2_sizes, strides=[4, 4, 1])
t3 = fd.ops.squeeze(t1, t1_sizes, [0, -1])
t4 = fd.ops.squeeze(
t2,
t2_sizes,
[
-2,
],
)
t5 = fd.ops.sum(t4, [0])
t6 = fd.ops.mul(t0, t3)
t7 = fd.ops.mul(t6, t5)
fd.add_output(t7)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
v1 = torch.sum(inputs[1], [0, -1])
v2 = torch.sum(inputs[2], [0, 1])
eager_out = inputs[0] * v1 * v2
self.assertEqual(eager_out, nvf_out[0])
def test_from_pytorch_fails_on_cpu_tensor(self):
inputs = [
torch.randn(4, 4, device="cpu"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.ops.relu(t0)
fd.add_output(t1)
try:
with FusionDefinition() as fd:
fusion_func(fd)
raise RuntimeError(
"FusionDefinition.from_pytorch should have raised an error for a CPU Tensor!"
)
except ValueError:
pass
def test_no_definition(self):
inputs = [
torch.randn(4, 4, device="cpu"),
]
# A FusionDefinition object is constructed but not defined, should trip an error
try:
fd = FusionDefinition()
out = fd.execute(inputs)
raise RuntimeError(
"Expecting an error for a lack of a child class defining a definition!"
)
except NotImplementedError:
pass
def test_func_definition(self):
inputs = [
torch.randn(4, 4, device="cuda"),
]
class MyFusion(FusionDefinition):
def definition(self):
t0 = self.from_pytorch(inputs[0])
t1 = self.ops.sigmoid(t0)
self.add_output(t1)
fd = MyFusion()
nvf_out = fd.execute(inputs)
eager_out = torch.sigmoid(inputs[0])
self.assertEqual(eager_out, nvf_out[0])
def test_python_version_API(self):
from nvfuser.nvfuser_version import Version
self.assertTrue(version() > "0.0.0")
self.assertTrue(version() > Version("0.0.0"))
def test_zero_size_dim(self):
inputs = [
torch.ones(0, 0, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.define_tensor(
shape=[0, 0], contiguity=[True, True], dtype=DataType.Float
)
t1 = fd.ops.relu(t0)
fd.add_output(t1)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = torch.relu(inputs[0])
self.assertEqual(eager_out.numel(), nvf_out[0].numel())
def test_static_tensor_sizes(self):
inputs = [
torch.randn(4, 5, 1, device="cuda"),
torch.randn(1, 5, 6, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0], static_sizes=True)
t1 = fd.from_pytorch(inputs[1], static_sizes=True)
t2 = fd.ops.mul(t0, t1)
fd.add_output(t2)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = torch.mul(inputs[0], inputs[1])
self.assertEqual(eager_out, nvf_out[0])
def test_normal(self):
input_size = [64, 128, 1024]
dtype = torch.float32
device = "cuda"
inputs = [
torch.randn(*input_size, device=device, dtype=dtype),
]
mean = 3.7
std = 2.5
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
s_mean = fd.define_scalar(mean)
s_std = fd.define_scalar(std)
size = fd.ops.tensor_sizes(t0)
t1 = fd.ops.normal(s_mean, s_std, size, DataType.Double)
fd.add_output(t1)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
# Is there a better way to test distribution?!
self.assertTrue(
nvf_out[0]
.mean()
.cpu()
.float()
.isclose(torch.tensor(mean), rtol=1e-2, atol=1e-2)
.item()
)
self.assertTrue(
nvf_out[0]
.std()
.cpu()
.float()
.isclose(torch.tensor(std), rtol=1e-2, atol=1e-2)
.item()
)
def test_uniform(self):
input_size = [64, 128, 1024]
dtype = torch.float32
device = "cuda"
inputs = [
torch.randn(*input_size, device=device, dtype=dtype),
]
lo = 1.8
hi = 1223.5
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
s_lo = fd.define_scalar(lo)
s_hi = fd.define_scalar(hi)
size = fd.ops.tensor_sizes(t0)
t1 = fd.ops.uniform(s_lo, s_hi, size, DataType.Double)
fd.add_output(t1)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
# Is there a better way to test distribution?!
self.assertTrue(
nvf_out[0]
.mean()
.cpu()
.float()
.isclose(torch.tensor((hi - lo) / 2.0), rtol=1e-2, atol=1e-2)
.item()
)
self.assertTrue(
nvf_out[0]
.min()
.cpu()
.float()
.isclose(torch.tensor(lo), rtol=1e-2, atol=1e-2)
.item()
)
self.assertTrue(
nvf_out[0]
.max()
.cpu()
.float()
.isclose(torch.tensor(hi), rtol=1e-2, atol=1e-2)
.item()
)
def test_where_dtypes(self):
inputs = [
torch.arange(2, device="cuda").type(torch.bool),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
c0 = fd.define_scalar(3.0)
c1 = fd.define_scalar(5.0)
t1 = fd.ops.where(t0, c0, c1) # DataType.Double
fd.add_output(t1)
c0f = fd.define_scalar(3.0, DataType.Float)
c1f = fd.define_scalar(5.0, DataType.Float)
t1f = fd.ops.where(t0, c0f, c1f) # DataType.Float
fd.add_output(t1f)
c0d = fd.define_scalar(3.0, DataType.Double)
c1d = fd.define_scalar(5.0, DataType.Double)
t1d = fd.ops.where(t0, c0d, c1d) # DataType.Double
fd.add_output(t1d)
c0i = fd.define_scalar(3, DataType.Int32)
c1i = fd.define_scalar(5, DataType.Int32)
t1i = fd.ops.where(t0, c0i, c1i) # DataType.Int32
fd.add_output(t1i)
c0l = fd.define_scalar(3)
c1l = fd.define_scalar(5)
t1l = fd.ops.where(t0, c0l, c1l) # DataType.Int
fd.add_output(t1l)
c0c = fd.define_scalar(complex(3.0))
c1c = fd.define_scalar(complex(5.0))
t1c = fd.ops.where(t0, c0c, c1c) # DataType.ComplexDouble
fd.add_output(t1c)
c0cf = fd.define_scalar(3.0 + 0j, DataType.ComplexFloat)
c1cf = fd.define_scalar(5.0 + 0j, DataType.ComplexFloat)
t1cf = fd.ops.where(t0, c0cf, c1cf) # DataType.ComplexFloat
fd.add_output(t1cf)
c0cd = fd.define_scalar(3.0 + 0j, DataType.ComplexDouble)
c1cd = fd.define_scalar(5.0 + 0j, DataType.ComplexDouble)
t1cd = fd.ops.where(t0, c0cd, c1cd) # DataType.ComplexDouble
fd.add_output(t1cd)
c0b = fd.define_scalar(True, DataType.Bool)
c1b = fd.define_scalar(False, DataType.Bool)
t1b = fd.ops.where(t0, c0b, c1b) # DataType.Bool
fd.add_output(t1b)
(
n,
nf,
nd,
ni,
nl,
nc,
ncf,
ncd,
nb,
), _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = torch.where(inputs[0], 3.0, 5.0)
# explicit Float dtype matches torch.where behavior
self.assertEqual(eager_out, nf)
assert n.dtype == torch.float64
assert nf.dtype == torch.float32
assert nd.dtype == torch.float64
assert ni.dtype == torch.int32
assert nl.dtype == torch.int64
assert nc.dtype == torch.complex128
assert ncf.dtype == torch.complex64
assert ncd.dtype == torch.complex128
assert nb.dtype == torch.bool
def test_complex_constants(self):
inputs = [
torch.arange(2, device="cuda").type(torch.complex64),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
c0 = fd.define_scalar(complex(3.0, 0.5))
t1 = fd.ops.mul(t0, c0)
fd.add_output(t1)
(n,), _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = inputs[0] * (3.0 + 0.5j)
self.assertEqual(eager_out, n)
assert n.dtype == torch.complex64
def test_where_op(self):
def nvfuser_where(pred, a, b):
with FusionDefinition() as fd:
nv_pred = fd.define_tensor(
sizes=pred.shape, strides=pred.stride(), dtype=DataType.Bool
)
nv_a = fd.define_tensor(
sizes=a.shape,
strides=a.stride(),
dtype=torch_dtype_to_nvfuser_dtype(a.dtype),
)
nv_b = fd.define_tensor(
sizes=b.shape,
strides=b.stride(),
dtype=torch_dtype_to_nvfuser_dtype(b.dtype),
)
result = fd.ops.where(nv_pred, nv_a, nv_b)
fd.add_output(result)
return fd.execute((pred, a, b))[0]
pred = torch.testing.make_tensor((5,), device="cuda", dtype=torch.bool)
list_of_dtype = [torch.float16, torch.bfloat16, torch.float32]
for atype in list_of_dtype:
for btype in list_of_dtype:
a = torch.randn((5,), device="cuda", dtype=atype)
b = torch.randn((5,), device="cuda", dtype=btype)
nv_result = nvfuser_where(pred, a, b)
torch_result = torch.where(pred, a, b)
self.assertEqual(nv_result, torch_result)
def test_iota(self):
inputs = [
(2, 0, 2, DataType.Int),
(3, 100, 1, DataType.Int32),
# TODO: How do I that that? I am getting the following error:
# NameError: name 'None0' is not defined
# (4, None, None, DataType.Int),
]
def fusion_func(fd: FusionDefinition):
for input in inputs:
c0 = fd.define_scalar(input[0])
c1 = None if input[1] is None else fd.define_scalar(input[1])
c2 = None if input[2] is None else fd.define_scalar(input[2])
dt = input[3]
t3 = fd.ops.iota(c0, c1, c2, dt)
fd.add_output(t3)
nvf_out, _ = self.exec_nvfuser(fusion_func, [])
eager_out1 = torch.tensor([0, 2], dtype=torch.long, device="cuda")
eager_out2 = torch.tensor([100, 101, 102], dtype=torch.int, device="cuda")
eager_out3 = torch.tensor([0, 1, 2, 3], dtype=torch.long, device="cuda")
self.assertEqual(eager_out1, nvf_out[0])
self.assertEqual(eager_out2, nvf_out[1])
# self.assertEqual(eager_out3, nvf_out[2])
def test_complex_rsqrt(self):
inputs = [
torch.randn(4, device="cuda", dtype=torch.complex64),
torch.randn(4, device="cuda", dtype=torch.complex128),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t2 = fd.ops.rsqrt(t0)
fd.add_output(t2)
t3 = fd.ops.rsqrt(t1)
fd.add_output(t3)
(rfloat, rdouble), _ = self.exec_nvfuser(fusion_func, inputs)
at_rfloat = inputs[0].rsqrt()
at_rdouble = inputs[1].rsqrt()
self.assertEqual(at_rfloat, rfloat)
self.assertEqual(at_rdouble, rdouble)
def test_reduction_complex_number(self):
def test_dtype(torch_dtype):
inputs = [torch.randn(2, 32, device="cuda", dtype=torch_dtype)]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.ops.sum(
t0, [-1], False, torch_dtype_to_nvfuser_dtype(torch_dtype)
)
fd.add_output(t1)
nvf_out1, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = torch.sum(inputs[0], dim=-1)
self.assertEqual(eager_out, nvf_out1[0])
list_of_dtype = [torch.complex64, torch.complex128]
for torch_dtype in list_of_dtype:
test_dtype(torch_dtype)
def test_arithmetic_ops(self):
inputs = [
torch.randn(3, 4, 5, device="cuda", dtype=torch.float32),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
c0 = fd.define_scalar(1.0)
t1 = -t0
t2 = abs(t0)
c1 = -c0
c2 = abs(c0)
# Using literals like this will work once
# https://github.com/csarofeen/pytorch/pull/2449 is merged
# t3 = -t1 * (1 + t0 ** 2) / t2 + c2 ** c1 - 1.0
t3 = -t1 * (c0 - t0 * t0) / t2 + c2**c1 - c0
fd.add_output(t1)
fd.add_output(t2)
fd.add_output(t3)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
at_out0 = -inputs[0]
at_out1 = abs(inputs[0])
at_out2 = inputs[0] * (1.0 - inputs[0] * inputs[0]) / abs(inputs[0])
self.assertEqual(at_out0, nvf_out[0])
self.assertEqual(at_out1, nvf_out[1])
self.assertEqual(at_out2, nvf_out[2])
def test_signbit(self):
inputs = [
torch.randn(3, 4, 5, device="cuda", dtype=torch.float32),
torch.randn(3, 4, 5, device="cuda", dtype=torch.float32),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t2 = fd.ops.where(fd.ops.signbit(t0), -abs(t1), abs(t1))
fd.add_output(t2)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
at_out = torch.where(
torch.signbit(inputs[0]), -torch.abs(inputs[1]), torch.abs(inputs[1])
)
self.assertEqual(at_out, nvf_out[0])
def test_all_dim_var_mean(self):
inputs = [torch.randn(2, 2, 2, device="cuda")]
def fuser_function(correction):
with FusionDefinition() as fd:
t0 = fd.from_pytorch(inputs[0])
t1, t2 = fd.ops.var_mean(t0, [0, 1, 2], correction)
fd.add_output(t1)
fd.add_output(t2)
return fd.execute(inputs)
list_of_test_cases = [0, 1]
for correction in list_of_test_cases:
fuser_result = fuser_function(correction)
torch_result = torch.var_mean(inputs[0], [0, 1, 2], bool(correction))
self.assertEqual(fuser_result, torch_result)
def test_var_mean_correction(self):
num_elem = 2
inputs = [torch.randn(2, num_elem, device="cuda")]
def fuser_function(correction):
with FusionDefinition() as fd:
t0 = fd.from_pytorch(inputs[0])
t1, t2 = fd.ops.var_mean(t0, [-1], correction)
fd.add_output(t1)
fd.add_output(t2)
return fd.execute(inputs)
for correction in range(num_elem + 5):
fuser_result = fuser_function(correction)
torch_result = torch.var_mean(inputs[0], [-1], correction=correction)
self.assertEqual(fuser_result, torch_result)
def test_var_correction(self):
num_elem = 2
inputs = [torch.randn(2, num_elem, device="cuda")]
def fuser_function(correction):
with FusionDefinition() as fd:
t0 = fd.from_pytorch(inputs[0])
t1 = fd.ops.var(t0, [-1], correction)
fd.add_output(t1)
return fd.execute(inputs)
for correction in range(num_elem + 5):
fuser_result = fuser_function(correction)
torch_result = torch.var(inputs[0], [-1], correction=correction)
self.assertEqual(fuser_result, [torch_result])
def test_scalar_only_inputs(self):
# We don't allow scalar outputs, currently,
# so a tensor has to be returned
def fusion_func(fd: FusionDefinition):
s0 = fd.define_scalar()
s1 = fd.define_scalar()
s2 = fd.ops.add(s0, s1)
c0 = fd.define_scalar(1.0, DataType.Float)
t3 = fd.ops.full(shape=[2, 2], fill_value=c0, dtype=DataType.Float)
t4 = fd.ops.mul(t3, s2)
fd.add_output(t4)
with FusionDefinition() as fd:
fusion_func(fd)
# TODO: full is broken and does not print its proper definition
# Issue: https://github.com/csarofeen/pytorch/issues/2502
nvf_out = fd.execute([2.0, 3.0])
eager_out = torch.full([2, 2], 1.0) * 5.0
self.assertEqual(eager_out, nvf_out[0])
def test_addcmul(self):
inputs = [
torch.randn(4, device="cuda", dtype=torch.float32),
torch.randn(4, device="cuda", dtype=torch.float32),
torch.randn(4, device="cuda", dtype=torch.float32),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t2 = fd.from_pytorch(inputs[2])
c0 = fd.define_scalar(0.1)
t3 = fd.ops.addcmul(t0, t1, t2, c0)
fd.add_output(t3)
nvfout, _ = self.exec_nvfuser(fusion_func, inputs)
torch_out = torch.addcmul(*inputs, value=0.1)
self.assertEqual(nvfout[0], torch_out)
def test_compute_contiguity(self):
sizes = [2, 1, 3, 1, 4, 5, 6]
strides = [80, 30, 30, 456456465465, 0, 6, 1]
contiguity = [False, None, True, None, None, True, True]
self.assertEqual(compute_contiguity(sizes, strides), contiguity)
strides = [800, 300, 300, 456456465465, 0, 60, 10]
contiguity = [False, None, True, None, None, True, False]
self.assertEqual(compute_contiguity(sizes, strides), contiguity)
def test_prod(self):
inputs = [
torch.ones(2, 4, 8, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.ops.prod(t0, DataType.Float)
t2 = fd.ops.prod(t0, 1, False, DataType.Float)
t3 = fd.ops.prod(t0, 1, True, DataType.Float)
t4 = fd.ops.prod(t0, [-1], False, DataType.Float)
fd.add_output(t1)
fd.add_output(t2)
fd.add_output(t3)
fd.add_output(t4)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
eager_outs = [
torch.prod(inputs[0], dtype=torch.float32),
torch.prod(inputs[0], 1, False, dtype=torch.float32),
torch.prod(inputs[0], 1, True, dtype=torch.float32),
torch.prod(inputs[0], -1, False, dtype=torch.float32),
]
assert len(nvf_out) == len(eager_outs)
for n, e in zip(nvf_out, eager_outs):
self.assertEqual(n, e)
def test_output_stride_order(self):
inputs = [
torch.arange(0, 120).reshape(2, 3, 4, 5).cuda().float(),
]
eager_out = inputs[0] + 3.0
for perm in itertools.permutations(range(4), 4):
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
c0 = fd.define_scalar(3.0)
t1 = fd.ops.add(t0, c0)
fd.add_output(t1, perm)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
self.assertEqual(eager_out, nvf_out[0])
nvf_stride = nvf_out[0].stride()
sorted_stride = list(nvf_stride)
for idx, axis in enumerate(perm):
sorted_stride[axis] = nvf_stride[idx]
self.assertTrue(sorted(sorted_stride, reverse=True) == sorted_stride)
def test_expanded_bcast_tensor(self):
inputs = [
torch.tensor(1.5, device="cuda"),
torch.randn(5, 5, 5, device="cuda"),
torch.randint(0, 1, (5, 5), device="cuda")
.bool()
.unsqueeze(-1)
.expand(5, 5, 5),
]
def fusion_func(fd: FusionDefinition) -> None:
T0 = fd.from_pytorch(inputs[0])
T1 = fd.from_pytorch(inputs[1])
T2 = fd.from_pytorch(inputs[2])
T3 = fd.ops.add(T0, T1)
T4 = fd.ops.add(T2, T3)
fd.add_output(T4)
eager_out = inputs[0] + inputs[1] + inputs[2]
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
self.assertEqual(eager_out, nvf_out[0])
def test_segment_set(self):
inputs = [
torch.randn(5, 5, 5, device="cuda"),
]
def fusion_func(fd: FusionDefinition) -> None:
T0 = fd.from_pytorch(inputs[0])
T1 = fd.ops.neg(T0)
T2 = fd.ops.segment_set(T1)
T3 = fd.ops.relu(T2)
fd.add_output(T3)
eager_out = inputs[0].neg().relu()
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
self.assertEqual(eager_out, nvf_out[0])
def test_fix_2549(self):
a = torch.ones(4, 1, dtype=torch.double, device="cuda")
b = torch.ones(4, 4, dtype=torch.double, device="cuda")
def nvfuser_fusion_id(fd: FusionDefinition) -> None:
T0 = fd.define_tensor(
sizes=a.shape, strides=a.stride(), dtype=DataType.Double, is_cpu=False
)
T1 = fd.define_tensor(
sizes=b.shape, strides=b.stride(), dtype=DataType.Double, is_cpu=False
)
T2 = fd.ops.broadcast_in_dim(T0, shape=[4, 4], broadcast_dims=[0, 1])
T3 = fd.ops.div(T1, T2)
fd.add_output(T3)
with FusionDefinition() as fd:
nvfuser_fusion_id(fd)
out = fd.execute([a, b])
self.assertEqual(out[0], b / a)
def test_real_imag(self):
for dtype in [torch.complex128, torch.complex64]:
inputs = [
torch.randn(5, dtype=dtype, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
fd.add_output(fd.ops.real(t0))
fd.add_output(fd.ops.imag(t0))
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
self.assertEqual(torch.real(inputs[0]), nvf_out[0])
self.assertEqual(torch.imag(inputs[0]), nvf_out[1])
def test_cuda_code_and_scheduled_fusion_ir_strings(self):
inputs = [
torch.randn(2, 2, 2, 2, device="cuda"),
]
big_inputs = [
torch.randn(64, 64, 64, 64, device="cuda"),
]
# Function only based definition
class DefFuncFusion(FusionDefinition):
def definition(self):
t0 = self.from_pytorch(inputs[0])
t1 = self.ops.relu(t0)
self.add_output(t1)
# Function based definition plus a user schedule
class UserSchedFusion(FusionDefinition):
def definition(self):
t0 = self.from_pytorch(inputs[0])
t1 = self.ops.sinh(t0)
self.add_output(t1)
def schedule(self):
pass
# Context Based Definition
ctx_fusion = FusionDefinition()
with ctx_fusion:
t0 = ctx_fusion.from_pytorch(inputs[0])
t1 = ctx_fusion.ops.tanh(t0)
ctx_fusion.add_output(t1)
# Context Based Definition with a segmented fusion
ctx_seg_fusion = FusionDefinition()
with ctx_seg_fusion:
t0 = ctx_seg_fusion.from_pytorch(inputs[0])
t1 = ctx_seg_fusion.ops.sum(t0, axis=0)
t2 = ctx_seg_fusion.ops.sum(t0, axis=-1)
ctx_seg_fusion.add_output(t1)
ctx_seg_fusion.add_output(t2)
test_defs = [DefFuncFusion(), UserSchedFusion(), ctx_fusion, ctx_seg_fusion]
for fd in test_defs:
# Attempting to get the cuda code for an un-executed FusionDefinition
# should trigger a RuntimeError and not a segfault
with self.assertRaisesRegex(RuntimeError, "Invalid fusion definition!"):
_ = fd.last_cuda_code()
with self.assertRaisesRegex(RuntimeError, "Invalid fusion definition!"):
_ = fd.last_scheduled_fusion_ir()
# Only make this check for function based definitions
if hasattr(super(type(self), self), "definition"):
with self.assertRaisesRegex(RuntimeError, "Invalid fusion definition!"):
_ = fd.fusion_ir()
_ = fd.execute(inputs)
code_len = len(fd.last_cuda_code())
self.assertTrue(code_len > 0, "Cuda Code was not produced!")
code_len = len(fd.last_cuda_code(intrinsic_code=True))
self.assertTrue(code_len > 0, "Cuda Code was not produced!")
sched_ir_len = len(fd.last_scheduled_fusion_ir())
self.assertTrue(code_len > 0, "Scheduled Fusion IR was not produced!")
sched_ir_len = len(fd.last_scheduled_fusion_ir(tensor_transforms=True))
self.assertTrue(code_len > 0, "Scheduled Fusion IR was not produced!")
sched_ir_len = len(fd.fusion_ir())
self.assertTrue(code_len > 0, "Unscheduled Fusion IR was not produced!")
code_len = len(fd.cuda_code_for(inputs))
self.assertTrue(code_len > 0, "Cuda Code was not produced!")
code_len = len(fd.cuda_code_for(inputs, intrinsic_code=True))
self.assertTrue(code_len > 0, "Cuda Code was not produced!")
sched_ir_len = len(fd.scheduled_fusion_ir_for(inputs))
self.assertTrue(code_len > 0, "Scheduled Fusion IR was not produced!")
sched_ir_len = len(
fd.scheduled_fusion_ir_for(inputs, tensor_transforms=True)
)
self.assertTrue(code_len > 0, "Scheduled Fusion IR was not produced!")
# Attemp to get strings for inputs that do not heuristically match
# and a new fusion has not been compiled
with self.assertRaisesRegex(RuntimeError, "Fusion is not compiled!"):
_ = fd.cuda_code_for(big_inputs)
with self.assertRaisesRegex(RuntimeError, "Fusion is not compiled!"):
_ = fd.scheduled_fusion_ir_for(big_inputs)
# It is necessary to reset the Fusion Cache
# so serialization/deserialization does not exhibit the same error across tests.
fc = FusionCache.get()
fc.reset()
def test_pad(self):
inputs = [
torch.testing.make_tensor((2, 3), dtype=torch.float32, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.ops.pad(t0, [1, 1, 1, 1])
fd.add_output(t1)
# zero padding in some dims
t2 = fd.ops.pad(t0, [0, 0, 2, 3])
fd.add_output(t2)
# zero padding in all dims
t3 = fd.ops.pad(t0, [0, 0, 0, 0])
fd.add_output(t3)
# no padding provided in first dim
t4 = fd.ops.pad(t0, [2, 3])
fd.add_output(t4)
# test padding with a value other than 0
fill_val = fd.define_scalar(2.0)
t5 = fd.ops.pad(t0, [2, 3], fill_val)
fd.add_output(t5)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
self.assertEqual(F.pad(inputs[0], [1, 1, 1, 1]), nvf_out[0])
self.assertEqual(F.pad(inputs[0], [0, 0, 2, 3]), nvf_out[1])
self.assertEqual(F.pad(inputs[0], [0, 0, 0, 0]), nvf_out[2])
self.assertEqual(F.pad(inputs[0], [2, 3]), nvf_out[3])
self.assertEqual(F.pad(inputs[0], [2, 3], "constant", 2.0), nvf_out[4])
def test_pad_cache(self):
"""Test that using different pad widths causes a cache miss.
cf. https://github.com/NVIDIA/Fuser/pull/10#pullrequestreview-1352667557
"""
inputs = [
torch.testing.make_tensor((2, 3), dtype=torch.float32, device="cuda"),
]
def fusion_func_pad1(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.ops.pad(t0, [1, 1])
fd.add_output(t1)
nvf_out1, _ = self.exec_nvfuser(
fusion_func_pad1, inputs, new_fusion_expected=True
)
_ = self.exec_nvfuser(fusion_func_pad1, inputs, new_fusion_expected=False)
def fusion_func_pad2(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.ops.pad(t0, [2, 2])
fd.add_output(t1)
nvf_out2, _ = self.exec_nvfuser(
fusion_func_pad2, inputs, new_fusion_expected=True
)
def fusion_func_pad3(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
fill_val = fd.define_scalar(2.0)
t1 = fd.ops.pad(t0, [1, 1], fill_val)
fd.add_output(t1)
nvf_out3, _ = self.exec_nvfuser(
fusion_func_pad3, inputs, new_fusion_expected=True
)
_ = self.exec_nvfuser(fusion_func_pad3, inputs, new_fusion_expected=False)
self.assertEqual(F.pad(inputs[0], [1, 1]), nvf_out1[0])
# Erroneous cache miss would use kernel 1 instead of 2
self.assertEqual(F.pad(inputs[0], [2, 2]), nvf_out2[0])
# Erroneous cache hit based on fill value would use kernel1
self.assertEqual(F.pad(inputs[0], [1, 1], "constant", 2.0), nvf_out3[0])
def test_cat(self):
inputs = [
torch.randn(2, 4, device="cuda"),
torch.randn(2, 3, device="cuda"),
torch.randn(4, 4, device="cuda"),
torch.randn(0, 4, device="cuda"),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t2 = fd.from_pytorch(inputs[2])
t3 = fd.from_pytorch(inputs[3])
t3 = fd.ops.cat([t0, t1], 1)
fd.add_output(t3)
t4 = fd.ops.cat([t0, t2], 0)
fd.add_output(t4)
# torch.cat accepts empty tensors (size 0 in the concat dimension),
# which do not affect the output.
# The below fails with RuntimeError: mapped_id_resize != nullptr
# INTERNAL ASSERT FAILED at
# "/opt/pytorch/nvfuser/csrc/lower_index_compute.cpp":1306
# t5 = fd.ops.cat([t0, t3], 0)
# fd.add_output(t5)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
self.assertEqual(torch.cat([inputs[0], inputs[1]], dim=1), nvf_out[0])
self.assertEqual(torch.cat([inputs[0], inputs[2]], dim=0), nvf_out[1])
# self.assertEqual(torch.cat([inputs[0], inputs[3]], dim=0), nvf_out[2])
def test_nextafter(self):
inputs = [
# torch.nextafter is only defined for float{32,64} tensor inputs
torch.testing.make_tensor(4, device="cuda", dtype=torch.float32),
torch.testing.make_tensor(4, device="cuda", dtype=torch.float64),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
s0 = fd.define_scalar(1.0, dtype=DataType.Float)
s1 = fd.define_scalar(-1.0, dtype=DataType.Double)
for a, b in itertools.product(
[t0, t1, s0, s1],
[t0, t1, s0, s1],
):
# always enter the fusion...
t = fd.ops.nextafter(a, b)
if isinstance(t, Tensor):
# ...but skip outputting scalars, which we don't support
fd.add_output(t)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
ab = [inputs[0], inputs[1], 1.0, -1.0]
i = 0
for a, b in itertools.product(ab, ab):
if not (isinstance(a, torch.Tensor) or isinstance(b, torch.Tensor)):
continue
n = nvf_out[i]
i += 1
torch_out = torch.nextafter(
torch.as_tensor(a, device="cuda"), torch.as_tensor(b, device="cuda")
)
self.assertEqual(n, torch_out)
def test_nanogpt_mha_dpa(self):
inputs = [
torch.randn(16, 16, 128, 128, device="cuda"),
torch.randn(1, 1, 1024, 1024, device="cuda"),
]
def nvfuser_fusion(fd: FusionDefinition, prob) -> None:
T0 = fd.define_tensor(
shape=[-1, -1, -1, -1],
contiguity=[True, True, True, True],
dtype=DataType.Float,
is_cpu=False,
)
T1 = fd.define_tensor(
shape=[1, 1, -1, -1],
contiguity=[None, None, True, True],
dtype=DataType.Float,
is_cpu=False,
)
S2 = fd.define_scalar(0.125000, dtype=DataType.Double)
T3 = fd.ops.mul(T0, S2)
T4 = fd.ops.slice(
T1,
start_indices=[0, 0, 0, 0],
end_indices=[1, 1, 128, 128],
strides=[1, 1, 1, 1],
)
S5 = fd.define_scalar(0.00000, dtype=DataType.Double)
T6 = fd.ops.eq(S5, T4)
T7 = fd.ops.broadcast_in_dim(
T6, shape=[16, 16, 128, 128], broadcast_dims=[0, 1, 2, 3]
)
S8 = fd.define_scalar(float("-inf"), dtype=DataType.Double)
T9 = fd.ops.where(T7, S8, T3)
S10 = fd.define_scalar(-1, dtype=DataType.Int)
S11 = fd.define_scalar(4, dtype=DataType.Int)
S12 = fd.ops.add(S10, S11)
T13 = fd.ops.max(T9, axes=[3], keepdim=False, dtype=DataType.Null)
T14 = fd.ops.broadcast_in_dim(
T13, shape=[16, 16, 128, 1], broadcast_dims=[0, 1, 2]
)
T15 = fd.ops.broadcast_in_dim(
T14, shape=[16, 16, 128, 128], broadcast_dims=[0, 1, 2, 3]
)
T16 = fd.ops.sub(T9, T15)
T17 = fd.ops.exp(T16)
S18 = fd.define_scalar(-1, dtype=DataType.Int)
S19 = fd.define_scalar(4, dtype=DataType.Int)
S20 = fd.ops.add(S18, S19)
T21 = fd.ops.sum(T17, axes=[3], keepdim=False, dtype=DataType.Null)
T22 = fd.ops.broadcast_in_dim(
T21, shape=[16, 16, 128, 1], broadcast_dims=[0, 1, 2]
)
T23 = fd.ops.broadcast_in_dim(
T22, shape=[16, 16, 128, 128], broadcast_dims=[0, 1, 2, 3]
)
T24 = fd.ops.div(T17, T23)
S25 = fd.define_scalar(16, dtype=DataType.Int)
S26 = fd.define_scalar(16, dtype=DataType.Int)
S27 = fd.define_scalar(128, dtype=DataType.Int)
S28 = fd.define_scalar(128, dtype=DataType.Int)
S29 = fd.define_scalar(0.00000, dtype=DataType.Double)
S30 = fd.define_scalar(1.00000, dtype=DataType.Double)
T31 = fd.ops.uniform(
S29, S30, shape=[S25, S26, S27, S28], dtype=DataType.Float
)
S32 = fd.define_scalar(1.0 - prob, dtype=DataType.Double)
T33 = fd.ops.lt(T31, S32)
T34 = fd.ops.cast(T33, dtype=DataType.Float)
T35 = fd.ops.mul(T24, T34)
S36 = fd.define_scalar(1.0 / (1.0 - prob), dtype=DataType.Double)
T37 = fd.ops.mul(T35, S36)
fd.add_output(T37)
def torch_def(acts, bias, n_seq_len, n_head_dim, prob):
att = acts * (1.0 / math.sqrt(n_head_dim))
att = att.masked_fill(
bias[:, :, :n_seq_len, :n_seq_len] == 0, float("-inf")
)
att = torch.nn.functional.softmax(att, dim=-1)
att = torch.nn.functional.dropout(att, p=prob)
return att
# NOTE: The dropout probabilities need to be set to 0 elements zeroed out
# in order to match implementations as eager and nvFuser do not have matching
# blocking.
nvf_out, _ = self.exec_nvfuser(partial(nvfuser_fusion, prob=0.0), inputs)
eager_out = torch_def(inputs[0], inputs[1], 128, 64, 0.0)
for idx in range(len(nvf_out)):
self.assertEqual(eager_out, nvf_out[idx])
def test_nanogpt_split_mha_linears(self):
inputs = [
torch.randn(16, 128, 3072, device="cuda"),
]
def nvfuser_fusion_0(fd: FusionDefinition) -> None:
T0 = fd.from_pytorch(inputs[0])
T0_slice1 = fd.ops.slice(T0, [0, 0, 0], [16, 128, 1024], [1, 1, 1])
T0_slice2 = fd.ops.slice(T0, [0, 0, 1024], [16, 128, 2048], [1, 1, 1])
T0_slice3 = fd.ops.slice(T0, [0, 0, 2048], [16, 128, 3072], [1, 1, 1])
T1_slice1 = fd.ops.reshape(T0_slice1, [16, 128, 1024], [16, 128, 16, 64])
T1_slice2 = fd.ops.reshape(T0_slice2, [16, 128, 1024], [16, 128, 16, 64])
T1_slice3 = fd.ops.reshape(T0_slice3, [16, 128, 1024], [16, 128, 16, 64])
T2_slice1 = fd.ops.permute(T1_slice1, [0, 2, 1, 3])
T2_slice2 = fd.ops.permute(T1_slice2, [0, 2, 1, 3])
T2_slice3 = fd.ops.permute(T1_slice3, [0, 2, 1, 3])
fd.add_output(T2_slice1)
fd.add_output(T2_slice2)
fd.add_output(T2_slice3)
def torch_def_0(acts, n_embd, n_head):
B, T, C = acts.size()
q, k, v = acts.split(n_embd, dim=2)
k = k.view(B, T, n_head, (C // 3) // n_head).transpose(
1, 2
) # (B, nh, T, hs)
q = q.view(B, T, n_head, (C // 3) // n_head).transpose(
1, 2
) # (B, nh, T, hs)
v = v.view(B, T, n_head, (C // 3) // n_head).transpose(
1, 2
) # (B, nh, T, hs)
return (
q,
k,
v,
)
def nvfuser_fusion_1(fd: FusionDefinition) -> None:
T0 = fd.define_tensor(
shape=[-1, -1, -1],
contiguity=[True, True, True],
dtype=DataType.Float,
is_cpu=False,
)
T1 = fd.ops.slice(
T0,
start_indices=[0, 0, 0],
end_indices=[16, 128, 1024],
strides=[1, 1, 1],
)
T2 = fd.ops.slice(
T0,
start_indices=[0, 0, 1024],
end_indices=[16, 128, 2048],
strides=[1, 1, 1],
)
T3 = fd.ops.slice(
T0,
start_indices=[0, 0, 2048],
end_indices=[16, 128, 3072],
strides=[1, 1, 1],
)
fd.add_output(T1)
fd.add_output(T2)
fd.add_output(T3)
def torch_def_1(acts, n_embd, n_head):
B, T, C = acts.size()
q, k, v = acts.split(n_embd, dim=2)
return (
q,
k,
v,
)
tests = [
(nvfuser_fusion_0, torch_def_0),
(nvfuser_fusion_1, torch_def_1),
]
for nvf_func, torch_func in tests:
nvf_out, _ = self.exec_nvfuser(nvf_func, inputs)
eager_out = torch_func(*inputs, 1024, 16)
for idx in range(len(eager_out)):
self.assertEqual(eager_out[idx], nvf_out[idx])
def test_slice_error_checks(self):
inputs = [
[torch.randn(10, 10, device="cuda")],
[torch.randn(5, 5, device="cuda")],
]
def check_start_indices(fd: FusionDefinition, acts) -> None:
T0 = fd.from_pytorch(acts[0])
T1 = fd.ops.slice(
T0, start_indices=[-1, -2], end_indices=[5, 5], strides=[7, 7]
)
fd.add_output(T1)
def check_end_indices(fd: FusionDefinition, acts) -> None:
T0 = fd.from_pytorch(acts[0])
T1 = fd.ops.slice(
T0, start_indices=[3, 4], end_indices=[1, 2], strides=[1, 1]
)
fd.add_output(T1)
def check_strides(fd: FusionDefinition, acts) -> None:
T0 = fd.from_pytorch(acts[0])
T1 = fd.ops.slice(
T0, start_indices=[0, 0], end_indices=[5, 5], strides=[5, 5]
)
fd.add_output(T1)
def check_tensor_dims(fd: FusionDefinition, acts) -> None:
T0 = fd.from_pytorch(acts[0])
T1 = fd.ops.slice(
T0, start_indices=[0, 0, 0], end_indices=[4, 4, 4], strides=[1, 1, 1]
)
fd.add_output(T1)
def check_slice_dims_start(fd: FusionDefinition, acts) -> None:
T0 = fd.from_pytorch(acts[0])
T1 = fd.ops.slice(
T0, start_indices=[0, 0, 0], end_indices=[4, 4], strides=[1, 1]
)
fd.add_output(T1)
def check_slice_dims_end(fd: FusionDefinition, acts) -> None:
T0 = fd.from_pytorch(acts[0])
T1 = fd.ops.slice(
T0, start_indices=[0, 0], end_indices=[4, 4, 4], strides=[1, 1]
)
fd.add_output(T1)
def check_slice_dims_stride(fd: FusionDefinition, acts) -> None:
T0 = fd.from_pytorch(acts[0])
T1 = fd.ops.slice(
T0, start_indices=[0, 0], end_indices=[4, 4], strides=[1, 1, 1]
)
fd.add_output(T1)
def check_nostrides(fd: FusionDefinition, acts) -> None:
T0 = fd.from_pytorch(acts[0])
T1 = fd.ops.slice(T0, start_indices=[2, 2], end_indices=[4, 4])
fd.add_output(T1)
# TODO: Currently, this check fails to produce a zero-element tensor whne the tensor
# is smaller than the index range of the slize. Therefore, it is disabled.
# Issue: https://github.com/NVIDIA/Fuser/issues/52
def legal(fd: FusionDefinition, acts) -> None:
T0 = fd.from_pytorch(acts[0])
T1 = fd.ops.slice(
T0, start_indices=[6, 6], end_indices=[8, 8], strides=[1, 1]
)
fd.add_output(T1)
checks = [
(
check_start_indices,
"Slice operation start_indices must be greater-than-or-equal-to 0. .*",
),
(
check_end_indices,
"Slice operation end_indices must be greater-than-or-equal-to start_indices. .*",
),
(
check_strides,
"nvFuser Limitation: All slice operation strides must be of size 1. .*",
),
(
check_tensor_dims,
"Number of tensor dimensions does not match slice dimensions! .*",
),
(
check_slice_dims_start,
"Slice start_indices and strides don't match! .*",
),
(
check_slice_dims_end,
"Slice indexing attribute dimensions don't match! .*",
),
(
check_slice_dims_stride,
"Slice start_indices and strides don't match! .*",
),
(check_nostrides, None),
# (legal, None),
]
first_check = True
for inp in inputs:
for check, error in checks:
if error is None:
# First check is here on legel fusions since the second time
# through they should already be cached
out = self.exec_nvfuser(
partial(check, acts=inp), inp, new_fusion_expected=first_check
)
else:
# When a fusion definition with errors is deserialized, it is recreated, triggering an error.
# skip_serde_check=True is necessary to skip these failing fusion definitions
# so serialization/deserialization does not exhibit the same errors in subsequent tests.
self.assertRaisesRegex(
RuntimeError,
error,
self.exec_nvfuser,
partial(check, acts=inp),
inp,
skip_serde_check=True,
)
first_check = False
def test_constant_nans(self):
inputs = [
torch.randn(4, 4, device="cuda"),
]
def fusion_func(fd: FusionDefinition) -> None:
t0 = fd.from_pytorch(inputs[0])
c0 = fd.define_scalar(float("nan"))
t1 = fd.ops.add(t0, c0)
fd.add_output(t1)
eager_out = inputs[0] + float("nan")
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
self.assertEqual(eager_out, nvf_out[0])
def test_def_op_in_schedule(self):
"""
Tests for an error when a definition op is used in a schedule
"""
inputs = [
torch.randn(4, 4, 4, device="cuda"),
]
class SchedError(FusionDefinition):
def definition(self):
self.t0 = self.from_pytorch(inputs[0])
self.t1 = self.ops.tanh(self.t0)
self.add_output(self.t1)
def schedule(self):
self.t2 = self.ops.relu(self.t1)
with self.assertRaisesRegex(
RuntimeError, "Attempting to add to a completed definition!"
):
fd = SchedError()
_ = fd.execute(inputs)
@unittest.skipIf(
torch.cuda.device_count() < 2, "test_selected_device requires multiple GPUs"
)
def test_selected_device(self):
"""
Run the same Fusion as in test_scalar_only_inputs, but on device 1
"""
def fusion_func(fd: FusionDefinition):
s0 = fd.define_scalar()
s1 = fd.define_scalar()
s2 = fd.ops.add(s0, s1)
c0 = fd.define_scalar(1.0, DataType.Float)
t3 = fd.ops.full(shape=[2, 2], fill_value=c0, dtype=DataType.Float)
t4 = fd.ops.mul(t3, s2)
fd.add_output(t4)
with FusionDefinition() as fd:
fusion_func(fd)
nvf_out = fd.execute([2.0, 3.0], device="cuda:1")
eager_out = torch.full([2, 2], 1.0, device="cuda:1") * 5.0
self.assertEqual(eager_out, nvf_out[0])
self.assertTrue(nvf_out[0].device.index == 1)
def test_matmuls(self):
# Matmul Constraints:
# 1. Inputs shapes need to be a multiple of 8
# 2. Inputs need to be contiguous as the nvFuser matmul does
# not see non-contiguous inputs.
nvf_inputs_nn = [
torch.randn(8, 24, device="cuda", dtype=torch.float16),
torch.randn(16, 8, device="cuda", dtype=torch.float16),
]
eager_inputs_nn = [
nvf_inputs_nn[0].clone().transpose(0, 1),
nvf_inputs_nn[1].clone().transpose(0, 1),
]
nvf_inputs_nt = [
torch.randn(8, 24, device="cuda", dtype=torch.float16),
torch.randn(8, 16, device="cuda", dtype=torch.float16),
]
eager_inputs_nt = [
nvf_inputs_nt[0].clone().transpose(0, 1),
nvf_inputs_nt[1].clone(),
]
nvf_inputs_tn = [
torch.randn(24, 8, device="cuda", dtype=torch.float16),
torch.randn(16, 8, device="cuda", dtype=torch.float16),
]
eager_inputs_tn = [
nvf_inputs_tn[0].clone(),
nvf_inputs_tn[1].clone().transpose(0, 1),
]
nvf_inputs_tt = [
torch.randn(24, 8, device="cuda", dtype=torch.float16),
torch.randn(8, 16, device="cuda", dtype=torch.float16),
]
def fusion_func(fd: FusionDefinition, inps, matmul_fn) -> None:
t0 = fd.from_pytorch(inps[0])
t1 = fd.from_pytorch(inps[1])
t2 = eval(matmul_fn)(t0, t1)
fd.add_output(t2)
tests = [
("fd.ops._matmul_nn", nvf_inputs_nn, eager_inputs_nn),
("fd.ops._matmul_nt", nvf_inputs_nt, eager_inputs_nt),
("fd.ops._matmul_tn", nvf_inputs_tn, eager_inputs_tn),
("fd.ops._matmul_tt", nvf_inputs_tt, nvf_inputs_tt),
]
prop = torch.cuda.get_device_properties(torch.cuda.current_device())
for mm_str, nvf_test_inputs, eager_test_inputs in tests:
if prop.major == 8:
nvf_out, _ = self.exec_nvfuser(
partial(fusion_func, inps=nvf_test_inputs, matmul_fn=mm_str),
nvf_test_inputs,
)
eager_out = torch.matmul(eager_test_inputs[0], eager_test_inputs[1])
fp16_nvf_out = nvf_out[0].to(dtype=torch.float16)
self.assertEqual(eager_out, fp16_nvf_out)
else:
with self.assertRaisesRegex(
RuntimeError, "Only the Ampere MMA Op is currently supported!"
):
with FusionDefinition() as fd:
partial(fusion_func, inps=nvf_test_inputs, matmul_fn=mm_str)(fd)
nvf_out = fd.execute(nvf_test_inputs)
# It is necessary to reset the Fusion Cache so
# serialization/deserialization does not exhibit the same error
# across tests
fc = FusionCache.get()
fc.reset()
def test_integer_division(self):
inputs = [
torch.testing.make_tensor(1024, device="cuda", dtype=torch.long),
torch.testing.make_tensor(1024, device="cuda", dtype=torch.long),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t2 = fd.ops.div(t0, t1)
t3 = fd.ops.truediv(t0, t1)
fd.add_output(t2)
fd.add_output(t3)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
self.assertEqual(
nvf_out[0], torch.div(inputs[0], inputs[1], rounding_mode="trunc")
)
self.assertEqual(nvf_out[1], torch.true_divide(inputs[0], inputs[1]))
def test_right_shift_arithmetic(self):
inputs = [
torch.tensor([-2147483648, 1073741824], dtype=torch.int32, device="cuda")
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
c0 = fd.define_scalar(3)
t1 = fd.ops.bitwise_right_shift(t0, c0)
fd.add_output(t1)
nvf_out1, _ = self.exec_nvfuser(fusion_func, inputs)
eager_out = torch.bitwise_right_shift(inputs[0], 3)
self.assertEqual(eager_out, nvf_out1[0])
def test_right_shift_logical(self):
dtypes = [torch.int32, torch.int64]
input = torch.tensor(
[
-1,
-2147483648,
1073741824,
-64463884,
-65968277,
4042311,
-98914167,
5526216,
],
device="cuda",
)
# expected_outputs given by jax.lax.shift_right_logical(inputs, 3)
expected_outputs = [
torch.tensor(
[
536870911,
268435456,
134217728,
528812926,
528624877,
505288,
524506641,
690777,
],
dtype=torch.int32,
device="cuda",
),
torch.tensor(
[
2305843009213693951,
2305843008945258496,
134217728,
2305843009205635966,
2305843009205447917,
505288,
2305843009201329681,
690777,
],
dtype=torch.int64,
device="cuda",
),
]
for idx, dtype in enumerate(dtypes):
current_input = input.to(dtype)
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(current_input)
c0 = fd.define_constant(3)
t1 = fd.ops.logical_right_shift(t0, c0)
fd.add_output(t1)
nvf_out, _ = self.exec_nvfuser(fusion_func, [current_input])
self.assertEqual(nvf_out[0], expected_outputs[idx])
def test_right_shift_logical_sizeof_dtype(self):
dtypes = [torch.int32, torch.int64]
input = torch.tensor(
[
-1,
-2147483648,
1073741824,
-64463884,
-65968277,
4042311,
-98914167,
5526216,
],
device="cuda",
)
for idx, dtype in enumerate(dtypes):
current_input = input.to(dtype)
num_bits = 32 if (dtype == torch.int32) else 64
# expected_outputs given by jax.lax.shift_right_logical(inputs, sizeof(dtype))
expected_output = torch.zeros_like(current_input)
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(current_input)
c0 = fd.define_scalar(None, dtype=DataType.Int)
t1 = fd.ops.logical_right_shift(t0, c0)
fd.add_output(t1)
nvf_out, _ = self.exec_nvfuser(fusion_func, [current_input, num_bits])
self.assertEqual(nvf_out[0], expected_output)
def test_gcd(self):
inputs = [
torch.testing.make_tensor(1024, device="cuda", dtype=torch.long),
torch.testing.make_tensor(1024, device="cuda", dtype=torch.long),
]
def fusion_func(fd: FusionDefinition):
t0 = fd.from_pytorch(inputs[0])
t1 = fd.from_pytorch(inputs[1])
t2 = fd.ops.gcd(t0, t1)
fd.add_output(t2)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
self.assertEqual(nvf_out[0], torch.gcd(inputs[0], inputs[1]))
def test_input_scalar(self):
inputs = [
torch.randn((3,), dtype=torch.float32, device="cuda:0"),
0.1,
]
def fusion_func(fd: FusionDefinition) -> None:
T0 = fd.from_pytorch(inputs[0])
S1 = fd.define_scalar()
T1 = fd.ops.mul(T0, S1)
fd.add_output(T1)
# Just test that this executes, not that it's correct
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
def test_debug_output(self):
inputs = [
torch.randn((3,), dtype=torch.float32, device="cuda:0"),
0.1,
]
with FusionDefinition() as fd:
T0 = fd.from_pytorch(inputs[0])
S1 = fd.define_scalar()
T1 = fd.ops.div(T0, S1)
fd.add_output(T1)
out1 = fd.execute(inputs)
self.assertIsNone(fd.debug_output())
# If debug output is captured, getDebugOutput() will not return None.
# The output will depend on the NVFUSER_DUMP environment variable in
# such case
out2 = fd.execute(inputs, capture_debug_output=True)
self.assertIsNotNone(fd.debug_output())
# Test that deterministic random ops (uniform, normal) give same results as
# their stochastic versions
def test_deterministic_random(self):
input_size = [5, 9]
dtype = torch.float32
device = "cuda"
inputs = [
torch.randn(*input_size, device=device, dtype=dtype),
]
for randopname in ["uniform", "normal"]:
def fusion_func(fd: FusionDefinition, *, deterministic) -> None:
t1 = fd.from_pytorch(inputs[0])
a = fd.define_scalar(0.3, DataType.Float)
b = fd.define_scalar(1.7, DataType.Float)
shape = [fd.define_scalar(5), fd.define_scalar(9)]
randop = getattr(fd.ops, randopname)
if deterministic:
rng_seed = fd.define_scalar(DataType.Int)
rng_offset = fd.define_scalar(DataType.Int)
u = randop(a, b, shape, rng_seed=rng_seed, rng_offset=rng_offset)
else:
u = randop(a, b, shape)
t2 = t1 * u
fd.add_output(t2)
# exec_nvfuser tests printing and serde, so run that for each definition first
self.exec_nvfuser(partial(fusion_func, deterministic=False), inputs)
self.exec_nvfuser(
partial(fusion_func, deterministic=True), [inputs[0], 0, 0]
)
# Now instantiate FusionDefinitions in each mode
with FusionDefinition() as fd_stoch:
fusion_func(fd_stoch, deterministic=False)
with FusionDefinition() as fd_det:
fusion_func(fd_det, deterministic=True)
# Test with three different random seeds
for _ in range(3):
max_seed = 2**63 - 1
seed = random.randint(0, max_seed)
torch.manual_seed(seed)
stateful_sequence = [fd_stoch.execute(inputs) for _ in range(10)]
# Each call to uniform with DataType::Float will advance the offset by 4
stateless_sequence = [
fd_det.execute([inputs[0], seed, rng_offset])
for rng_offset in range(0, 10 * 4, 4)
]
for i, (sful, sless) in enumerate(
zip(stateful_sequence, stateless_sequence)
):
try:
torch.testing.assert_close(sful[0], sless[0])
except AssertionError as e:
print(f"Assertion failed for iteration {i} with seed {seed}")
print(e)
break
# Test expand to zero is replaced with expanded extent and not 1
# see https://github.com/NVIDIA/Fuser/issues/603
def test_expand_to_zero(self):
inputs = [
# This is an actually empty tensor
torch.zeros((1, 0), dtype=torch.float32, device="cuda:0"),
# This one is not actually empty, but should appear to be empty due to expand
torch.zeros((1, 1), dtype=torch.float32, device="cuda:0"),
]
def fusion_func(fd: FusionDefinition) -> None:
T0 = fd.from_pytorch(inputs[0])
T1 = fd.from_pytorch(inputs[1])
T2 = fd.ops.broadcast_in_dim(T0, shape=[0, 0], broadcast_dims=[0, 1])
T3 = fd.ops.broadcast_in_dim(T1, shape=[0, 0], broadcast_dims=[0, 1])
fd.add_output(T2)
fd.add_output(T3)
nvf_out, _ = self.exec_nvfuser(fusion_func, inputs)
self.assertEqual(nvf_out[0].shape, (0, 0))
self.assertEqual(nvf_out[1].shape, (0, 0))
if __name__ == "__main__":
run_tests()
|
Fuser-main
|
python_tests/test_python_frontend.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# Owner(s): ["module: nvfuser"]
import math
import torch
import jax
from pytest_core import OpInfo, ReferenceType, Domain
from pytest_fusion_definitions import (
api_test_fd_fn,
tensor_input_fd_fn,
tensor_api_test_fd_fn,
vector_api_test_fd_fn,
)
from pytest_input_generators import (
broadcast_error_generator,
broadcast_in_dim_generator,
broadcast_in_dim_error_generator,
cat_generator,
cat_error_generator,
define_tensor_generator,
define_tensor_error_generator,
define_vector_constant_error_generator,
define_vector_input_error_generator,
elementwise_binary_generator,
_elementwise_binary_torch,
elementwise_unary_generator,
_elementwise_unary_torch,
full_error_generator,
gather_generator,
index_select_generator,
index_select_error_generator,
iota_error_generator,
pad_error_generator,
permute_generator,
permute_error_generator,
reduction_error_generator,
reshape_generator,
reshape_error_generator,
slice_generator,
slice_error_generator,
take_along_axis_generator,
take_along_axis_error_generator,
tensor_size_error_generator,
var_mean_generator,
vector_at_error_generator,
where_error_generator,
)
from pytest_utils import (
bool_int_dtypes,
int_dtypes,
full_precision_float_dtypes,
int_float_dtypes,
float_complex_dtypes,
ArgumentType,
)
from functools import partial
from typing import List
eps = 1e-2
opinfos = []
""" Start Fusion Input Operations """
fusion_input_ops = []
define_tensor_opinfo = OpInfo(
lambda fd: fd.define_tensor,
"define_tensor",
sample_input_generator=define_tensor_generator,
error_input_generator=define_tensor_error_generator,
fd_correctness_fn=tensor_input_fd_fn,
fd_error_input_fn=tensor_input_fd_fn,
)
fusion_input_ops.append(define_tensor_opinfo)
# NOTE: "define_vector" only supports vectors of integers that represent
# tensor shapes and is not a general interface for defining vectors of
# data. Vectors of data should be handled with a 1D `define_tensor`.
define_vector_constant_opinfo = OpInfo(
lambda fd: fd.define_vector,
"define_vector_constant",
sample_input_generator=None,
error_input_generator=define_vector_constant_error_generator,
fd_error_input_fn=api_test_fd_fn,
)
fusion_input_ops.append(define_vector_constant_opinfo)
define_vector_input_opinfo = OpInfo(
lambda fd: fd.define_vector,
"define_vector_input",
sample_input_generator=None,
error_input_generator=define_vector_input_error_generator,
fd_error_input_fn=api_test_fd_fn,
)
fusion_input_ops.append(define_vector_input_opinfo)
""" End Fusion Input Operations """
""" Start Unary-Float Operations """
unary_ops = []
abs_opinfo = OpInfo(
lambda fd: fd.ops.abs,
"abs",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.abs),
)
unary_ops.append(abs_opinfo)
acos_opinfo = OpInfo(
lambda fd: fd.ops.acos,
"acos",
domain=Domain(-1, 1),
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.acos),
)
unary_ops.append(acos_opinfo)
acosh_opinfo = OpInfo(
lambda fd: fd.ops.acosh,
"acosh",
domain=Domain(-1, math.inf),
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.acosh),
)
unary_ops.append(acosh_opinfo)
asin_opinfo = OpInfo(
lambda fd: fd.ops.asin,
"asin",
domain=Domain(-1, 1),
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.asin),
)
unary_ops.append(asin_opinfo)
asinh_opinfo = OpInfo(
lambda fd: fd.ops.asinh,
"asinh",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.asinh),
)
unary_ops.append(asinh_opinfo)
atan_opinfo = OpInfo(
lambda fd: fd.ops.atan,
"atan",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.atan),
)
unary_ops.append(atan_opinfo)
atanh_opinfo = OpInfo(
lambda fd: fd.ops.atanh,
"atanh",
domain=Domain(-1 + eps, 1 + eps),
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.atanh),
)
unary_ops.append(atanh_opinfo)
bitwise_not_opinfo = OpInfo(
lambda fd: fd.ops.bitwise_not,
"bitwise_not",
dtypes=bool_int_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.bitwise_not),
)
unary_ops.append(bitwise_not_opinfo)
# TODO add nvfuser exception for int dtypes
ceil_opinfo = OpInfo(
lambda fd: fd.ops.ceil,
"ceil",
dtypes=full_precision_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.ceil),
)
unary_ops.append(ceil_opinfo)
cos_opinfo = OpInfo(
lambda fd: fd.ops.cos,
"cos",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.cos),
)
unary_ops.append(cos_opinfo)
cosh_opinfo = OpInfo(
lambda fd: fd.ops.cosh,
"cosh",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.cosh),
)
unary_ops.append(cosh_opinfo)
erf_opinfo = OpInfo(
lambda fd: fd.ops.erf,
"erf",
dtypes=int_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.erf),
)
unary_ops.append(erf_opinfo)
erfc_opinfo = OpInfo(
lambda fd: fd.ops.erfc,
"erfc",
dtypes=int_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.erfc),
)
unary_ops.append(erfc_opinfo)
erfcinv_opinfo = OpInfo(
lambda fd: fd.ops.erfcinv,
"erfcinv",
dtypes=(
torch.float32,
torch.float64,
),
domain=Domain(0.3, 0.7),
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(lambda x: torch.erfinv(1 - x)),
)
unary_ops.append(erfcinv_opinfo)
erfinv_opinfo = OpInfo(
lambda fd: fd.ops.erfinv,
"erfinv",
dtypes=int_float_dtypes,
domain=Domain(-1, 1),
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.erfinv),
)
unary_ops.append(erfinv_opinfo)
exp_opinfo = OpInfo(
lambda fd: fd.ops.exp,
"exp",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.exp),
)
unary_ops.append(exp_opinfo)
exp2_opinfo = OpInfo(
lambda fd: fd.ops.exp2,
"exp2",
dtypes=int_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.exp2),
)
unary_ops.append(exp2_opinfo)
expm1_opinfo = OpInfo(
lambda fd: fd.ops.expm1,
"expm1",
dtypes=int_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.expm1),
)
unary_ops.append(expm1_opinfo)
# TODO add nvfuser exception for int dtypes
floor_opinfo = OpInfo(
lambda fd: fd.ops.floor,
"floor",
dtypes=full_precision_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.floor),
)
unary_ops.append(floor_opinfo)
frac_opinfo = OpInfo(
lambda fd: fd.ops.frac,
"frac",
dtypes=full_precision_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.frac),
)
unary_ops.append(frac_opinfo)
isfinite_opinfo = OpInfo(
lambda fd: fd.ops.isfinite,
"isfinite",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.isfinite),
)
unary_ops.append(isfinite_opinfo)
isinf_opinfo = OpInfo(
lambda fd: fd.ops.isinf,
"isinf",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.isinf),
)
unary_ops.append(isinf_opinfo)
isnan_opinfo = OpInfo(
lambda fd: fd.ops.isnan,
"isnan",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.isnan),
)
unary_ops.append(isnan_opinfo)
# NOTE half-precision floating types are not automatically promoted to fp32
isneginf_opinfo = OpInfo(
lambda fd: fd.ops.isneginf,
"isneginf",
dtypes=full_precision_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.isneginf),
)
unary_ops.append(isneginf_opinfo)
# NOTE half-precision floating types are not automatically promoted to fp32
isposinf_opinfo = OpInfo(
lambda fd: fd.ops.isposinf,
"isposinf",
dtypes=full_precision_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.isposinf),
)
unary_ops.append(isposinf_opinfo)
isreal_opinfo = OpInfo(
lambda fd: fd.ops.isreal,
"isreal",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.isreal),
)
unary_ops.append(isreal_opinfo)
lgamma_opinfo = OpInfo(
lambda fd: fd.ops.lgamma,
"lgamma",
dtypes=int_float_dtypes,
domain=Domain(-1.0 + eps, math.inf),
sample_input_generator=partial(elementwise_unary_generator, exclude_zero=True),
reference=_elementwise_unary_torch(torch.lgamma),
)
unary_ops.append(lgamma_opinfo)
log_opinfo = OpInfo(
lambda fd: fd.ops.log,
"log",
domain=Domain(0, math.inf),
sample_input_generator=partial(elementwise_unary_generator, exclude_zero=True),
reference=_elementwise_unary_torch(torch.log),
)
unary_ops.append(log_opinfo)
log10_opinfo = OpInfo(
lambda fd: fd.ops.log10,
"log10",
dtypes=int_float_dtypes,
domain=Domain(0, math.inf),
sample_input_generator=partial(elementwise_unary_generator, exclude_zero=True),
reference=_elementwise_unary_torch(torch.log10),
)
unary_ops.append(log10_opinfo)
log1p_opinfo = OpInfo(
lambda fd: fd.ops.log1p,
"log1p",
dtypes=int_float_dtypes,
domain=Domain(-1 + eps, math.inf),
sample_input_generator=partial(elementwise_unary_generator, exclude_zero=True),
reference=_elementwise_unary_torch(torch.log1p),
)
unary_ops.append(log1p_opinfo)
log2_opinfo = OpInfo(
lambda fd: fd.ops.log2,
"log2",
domain=Domain(0, math.inf),
sample_input_generator=partial(elementwise_unary_generator, exclude_zero=True),
reference=_elementwise_unary_torch(torch.log2),
)
unary_ops.append(log2_opinfo)
neg_opinfo = OpInfo(
lambda fd: fd.ops.neg,
"neg",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.neg),
)
unary_ops.append(neg_opinfo)
reciprocal_opinfo = OpInfo(
lambda fd: fd.ops.reciprocal,
"reciprocal",
domain=Domain(0 + eps, math.inf),
sample_input_generator=partial(
elementwise_unary_generator,
enable_small_value_testing=False,
enable_extremal_value_testing=False,
exclude_zero=True,
),
reference=_elementwise_unary_torch(torch.reciprocal),
)
unary_ops.append(reciprocal_opinfo)
# TODO add nvfuser exception for int dtypes
round_opinfo = OpInfo(
lambda fd: fd.ops.round,
"round",
dtypes=full_precision_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.round),
)
unary_ops.append(round_opinfo)
rsqrt_opinfo = OpInfo(
lambda fd: fd.ops.rsqrt,
"rsqrt",
domain=Domain(0 + eps, math.inf),
sample_input_generator=partial(
elementwise_unary_generator,
enable_small_value_testing=False,
enable_extremal_value_testing=False,
exclude_zero=True,
),
reference=_elementwise_unary_torch(torch.rsqrt),
)
unary_ops.append(rsqrt_opinfo)
sigmoid_opinfo = OpInfo(
lambda fd: fd.ops.sigmoid,
"sigmoid",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.sigmoid),
)
unary_ops.append(sigmoid_opinfo)
signbit_opinfo = OpInfo(
lambda fd: fd.ops.signbit,
"signbit",
dtypes=int_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.signbit),
)
unary_ops.append(signbit_opinfo)
sin_opinfo = OpInfo(
lambda fd: fd.ops.sin,
"sin",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.sin),
)
unary_ops.append(sin_opinfo)
sinh_opinfo = OpInfo(
lambda fd: fd.ops.sinh,
"sinh",
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.sinh),
)
unary_ops.append(sinh_opinfo)
sqrt_opinfo = OpInfo(
lambda fd: fd.ops.sqrt,
"sqrt",
domain=Domain(0, math.inf),
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.sqrt),
)
unary_ops.append(sqrt_opinfo)
tan_opinfo = OpInfo(
lambda fd: fd.ops.tan,
"tan",
dtypes=int_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.tan),
)
unary_ops.append(tan_opinfo)
tanh_opinfo = OpInfo(
lambda fd: fd.ops.tanh,
"tanh",
dtypes=int_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.tanh),
)
unary_ops.append(tanh_opinfo)
# TODO add nvfuser exception for int dtypes
trunc_opinfo = OpInfo(
lambda fd: fd.ops.trunc,
"trunc",
dtypes=full_precision_float_dtypes,
sample_input_generator=elementwise_unary_generator,
reference=_elementwise_unary_torch(torch.trunc),
)
unary_ops.append(trunc_opinfo)
""" End Unary-Float Operations """
""" Start Binary Operations """
# atan2 --- promote int to float; allows fp16 and bf16
# nextafter, truediv --- promote int to float; requires full-precision fp32, fp64
# ceildiv, div, fmod, mod, remainder, truediv --- except_zero
# add, mul, pow, sub
# bitwise_and, bitwise_or, bitwise_xor --- bool_int_only
# bitwise_left_shift, bitwise_right_shift, logical_right_shift --- int_only
# eq, ne, ge, gt, le, lt --- compare
# TODO Add "ceildiv" to python_frontend
# TODO Add support for python reference for "mod".
# TODO atan2 - complex dtypes are unsupported, but we fail when compiling kernel
# TODO logical_right_shift - domain of shift parameter is non-zero; Otherwise the result is undefined.
binary_ops = []
add_opinfo = OpInfo(
lambda fd: fd.ops.add,
"add",
sample_input_generator=partial(
elementwise_binary_generator, enable_extremal_value_testing=False
),
reference=_elementwise_binary_torch(torch.add),
)
binary_ops.append(add_opinfo)
# TODO complex dtypes are unsupported, but we fail when compiling kernel
atan2_opinfo = OpInfo(
lambda fd: fd.ops.atan2,
"atan2",
dtypes=int_float_dtypes,
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.atan2),
)
binary_ops.append(atan2_opinfo)
bitwise_and_opinfo = OpInfo(
lambda fd: fd.ops.bitwise_and,
"bitwise_and",
dtypes=bool_int_dtypes,
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.bitwise_and),
)
binary_ops.append(bitwise_and_opinfo)
bitwise_left_shift_opinfo = OpInfo(
lambda fd: fd.ops.bitwise_left_shift,
"bitwise_left_shift",
dtypes=int_dtypes,
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.bitwise_left_shift),
)
binary_ops.append(bitwise_left_shift_opinfo)
bitwise_or_opinfo = OpInfo(
lambda fd: fd.ops.bitwise_or,
"bitwise_or",
dtypes=bool_int_dtypes,
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.bitwise_or),
)
binary_ops.append(bitwise_or_opinfo)
bitwise_right_shift_opinfo = OpInfo(
lambda fd: fd.ops.bitwise_right_shift,
"bitwise_right_shift",
dtypes=int_dtypes,
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.bitwise_right_shift),
)
binary_ops.append(bitwise_right_shift_opinfo)
bitwise_xor_opinfo = OpInfo(
lambda fd: fd.ops.bitwise_xor,
"bitwise_xor",
dtypes=bool_int_dtypes,
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.bitwise_xor),
)
binary_ops.append(bitwise_xor_opinfo)
div_opinfo = OpInfo(
lambda fd: fd.ops.div,
"div",
dtypes=float_complex_dtypes,
sample_input_generator=partial(
elementwise_binary_generator,
enable_small_value_testing=False,
enable_extremal_value_testing=False,
exclude_zero=True,
),
reference=_elementwise_binary_torch(torch.div),
)
binary_ops.append(div_opinfo)
eq_opinfo = OpInfo(
lambda fd: fd.ops.eq,
"eq",
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.eq),
)
binary_ops.append(eq_opinfo)
fmod_opinfo = OpInfo(
lambda fd: fd.ops.fmod,
"fmod",
dtypes=int_float_dtypes,
sample_input_generator=partial(elementwise_binary_generator, exclude_zero=True),
reference=_elementwise_binary_torch(torch.fmod),
)
binary_ops.append(fmod_opinfo)
ge_opinfo = OpInfo(
lambda fd: fd.ops.ge,
"ge",
dtypes=int_float_dtypes,
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.ge),
)
binary_ops.append(ge_opinfo)
gt_opinfo = OpInfo(
lambda fd: fd.ops.gt,
"gt",
dtypes=int_float_dtypes,
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.gt),
)
binary_ops.append(gt_opinfo)
le_opinfo = OpInfo(
lambda fd: fd.ops.le,
"le",
dtypes=int_float_dtypes,
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.le),
)
binary_ops.append(le_opinfo)
# TODO domain of shift parameter greater than zero; Otherwise the result is undefined.
logical_right_shift_opinfo = OpInfo(
lambda fd: fd.ops.logical_right_shift,
"logical_right_shift",
domain=Domain(0, None),
dtypes=int_dtypes,
sample_input_generator=partial(
elementwise_binary_generator,
enable_broadcast_testing=False,
enable_extremal_value_testing=False,
enable_large_value_testing=False,
enable_small_value_testing=False,
),
reference=jax.lax.shift_right_logical,
reference_type=ReferenceType.Jax,
)
binary_ops.append(logical_right_shift_opinfo)
lt_opinfo = OpInfo(
lambda fd: fd.ops.lt,
"lt",
dtypes=int_float_dtypes,
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.lt),
)
binary_ops.append(lt_opinfo)
mod_opinfo = OpInfo(
lambda fd: fd.ops.mod,
"mod",
dtypes=int_dtypes,
sample_input_generator=partial(
elementwise_binary_generator,
exclude_zero=True,
),
# Matlab rem (Remainder after Division) function
# For more details, see https://www.mathworks.com/help/matlab/ref/rem.html
reference=lambda a, b: a - b * torch.trunc(a / b).to(a.dtype),
)
binary_ops.append(mod_opinfo)
mul_opinfo = OpInfo(
lambda fd: fd.ops.mul,
"mul",
sample_input_generator=partial(
elementwise_binary_generator, enable_extremal_value_testing=False
),
reference=_elementwise_binary_torch(torch.mul),
)
binary_ops.append(mul_opinfo)
ne_opinfo = OpInfo(
lambda fd: fd.ops.ne,
"ne",
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.ne),
)
binary_ops.append(ne_opinfo)
nextafter_opinfo = OpInfo(
lambda fd: fd.ops.nextafter,
"nextafter",
dtypes=full_precision_float_dtypes,
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.nextafter),
)
binary_ops.append(nextafter_opinfo)
# complex dtypes --- AssertionError: Tensor-likes are not close!
pow_opinfo = OpInfo(
lambda fd: fd.ops.pow,
"pow",
dtypes=int_float_dtypes,
sample_input_generator=elementwise_binary_generator,
reference=_elementwise_binary_torch(torch.pow),
)
binary_ops.append(pow_opinfo)
remainder_opinfo = OpInfo(
lambda fd: fd.ops.remainder,
"remainder",
dtypes=int_float_dtypes,
sample_input_generator=partial(
elementwise_binary_generator,
exclude_zero=True,
enable_extremal_value_testing=False,
),
reference=_elementwise_binary_torch(torch.remainder),
)
binary_ops.append(remainder_opinfo)
sub_opinfo = OpInfo(
lambda fd: fd.ops.sub,
"sub",
sample_input_generator=partial(
elementwise_binary_generator, enable_extremal_value_testing=False
),
reference=_elementwise_binary_torch(torch.sub),
)
binary_ops.append(sub_opinfo)
truediv_opinfo = OpInfo(
lambda fd: fd.ops.truediv,
"truediv",
sample_input_generator=partial(
elementwise_binary_generator,
enable_small_value_testing=False,
enable_extremal_value_testing=False,
exclude_zero=True,
),
reference=_elementwise_binary_torch(torch.true_divide),
)
binary_ops.append(truediv_opinfo)
# For int dtypes, nvfuser div op has the semantics of c++ / operator, so its reference is trunc_divide.
trunc_div_opinfo = OpInfo(
lambda fd: fd.ops.div,
"trunc_div",
dtypes=int_dtypes,
sample_input_generator=partial(
elementwise_binary_generator,
enable_small_value_testing=False,
enable_extremal_value_testing=False,
exclude_zero=True,
),
reference=_elementwise_binary_torch(partial(torch.div, rounding_mode="trunc")),
)
binary_ops.append(trunc_div_opinfo)
""" End Binary Operations """
""" Start Ternary Operations """
ternary_ops = []
where_opinfo = OpInfo(
lambda fd: fd.ops.where,
"where",
error_input_generator=where_error_generator,
)
ternary_ops.append(where_opinfo)
""" End Ternary Operations """
""" Start Dynamic Shape Enabling Operations """
dynamic_shapes_ops = []
# TODO: Add correctness testing as noted below
tensor_shape_opinfo = OpInfo(
lambda fd: fd.ops.shape,
"tensor_shape",
# TODO: Check correctness once there are operators that can consume a Vector
sample_input_generator=None,
# NOTE: ops.shape will take any legal Tensor object where the creation of
# Tensor inputs will check possible errors
error_input_generator=None,
)
dynamic_shapes_ops.append(tensor_shape_opinfo)
# TODO: Add correctness testing as noted below
tensor_size_opinfo = OpInfo(
lambda fd: fd.ops.size,
"tensor_size",
# TODO: Check correctness once there are operators that can consume a Vector
sample_input_generator=None,
error_input_generator=tensor_size_error_generator,
fd_correctness_fn=None,
fd_error_input_fn=tensor_api_test_fd_fn,
)
dynamic_shapes_ops.append(tensor_size_opinfo)
# TODO: Add correctness testing as noted below
vector_at_opinfo = OpInfo(
lambda fd: fd.ops.at,
"vector_at",
# TODO: Check correctness once there are operators that can consume a Vector
sample_input_generator=None,
error_input_generator=vector_at_error_generator,
fd_correctness_fn=None,
fd_error_input_fn=vector_api_test_fd_fn,
)
dynamic_shapes_ops.append(vector_at_opinfo)
""" End Dynamic Shape Enabling Operations """
""" Start Normalization Operations """
normalization_ops = []
var_mean_opinfo = OpInfo(
lambda fd: fd.ops.var_mean,
"var_mean",
dtypes=float_complex_dtypes,
sample_input_generator=var_mean_generator,
error_input_generator=reduction_error_generator,
reference=torch.var_mean,
symbolic_parameter_list=(ArgumentType.Symbolic, ArgumentType.Constant),
)
normalization_ops.append(var_mean_opinfo)
""" End Normalization Operations """
""" Start Shape Operations """
shape_ops = []
cat_opinfo = OpInfo(
lambda fd: fd.ops.cat,
"cat",
sample_input_generator=cat_generator,
error_input_generator=cat_error_generator,
reference=torch.cat,
symbolic_parameter_list=(ArgumentType.Symbolic, ArgumentType.Constant),
)
shape_ops.append(cat_opinfo)
broadcast_opinfo = OpInfo(
lambda fd: fd.ops.broadcast,
"broadcast",
error_input_generator=broadcast_error_generator,
symbolic_parameter_list=(ArgumentType.Symbolic, ArgumentType.Constant),
)
shape_ops.append(broadcast_opinfo)
# NOTE: The constant version of broadcast_in_dim opinfo tests the "shape"
# argument when a List of Constant Ints is used as an input.
# The symbolic parameter list lists the argument as "Constant" because
# otherwise an input is generated to attempt to supply the "shape" arg.
broadcast_in_dim_constant_opinfo = OpInfo(
lambda fd: fd.ops.broadcast_in_dim,
"broadcast_in_dim_constant",
sample_input_generator=broadcast_in_dim_generator,
error_input_generator=broadcast_in_dim_error_generator,
reference=jax.lax.broadcast_in_dim,
reference_type=ReferenceType.Jax,
symbolic_parameter_list=(
ArgumentType.Symbolic,
# This argument is purposely Constant even though the positional
# argument can also be symbolic.
ArgumentType.Constant,
ArgumentType.Constant,
),
)
shape_ops.append(broadcast_in_dim_constant_opinfo)
# NOTE: The symbolic version of broadcast_in_dim opinfo tests the "shape"
# argument with a Vector generated from another operation like ops.shape.
def broadcast_in_dim_sym_fn(fd, arg1, arg2, broadcast_dims):
return fd.ops.broadcast_in_dim(arg1, arg2.shape(), broadcast_dims)
def jax_broadcast_in_dim_fn(arg1, arg2, broadcast_dims):
return jax.lax.broadcast_in_dim(arg1, jax.numpy.shape(arg2), broadcast_dims)
broadcast_in_dim_symbolic_opinfo = OpInfo(
lambda fd: partial(broadcast_in_dim_sym_fn, fd),
"broadcast_in_dim_symbolic",
sample_input_generator=broadcast_in_dim_generator,
error_input_generator=broadcast_in_dim_error_generator,
reference=jax_broadcast_in_dim_fn,
reference_type=ReferenceType.Jax,
symbolic_parameter_list=(
ArgumentType.Symbolic,
ArgumentType.Symbolic,
ArgumentType.Constant,
),
)
shape_ops.append(broadcast_in_dim_symbolic_opinfo)
# translate between nvfuser and pytorch argument order for gather, take_along_dim, and index_select
def gather_wrapper(fn: callable, input: torch.Tensor, index: torch.Tensor, dim: int):
return fn(input, dim, index)
gather_opinfo = OpInfo(
lambda fd: fd.ops.gather,
"gather",
sample_input_generator=gather_generator,
error_input_generator=take_along_axis_error_generator,
reference=partial(gather_wrapper, torch.gather),
symbolic_parameter_list=(
ArgumentType.Symbolic,
ArgumentType.Symbolic,
ArgumentType.Constant,
),
)
shape_ops.append(gather_opinfo)
index_select_opinfo = OpInfo(
lambda fd: fd.ops.index_select,
"index_select",
sample_input_generator=index_select_generator,
error_input_generator=index_select_error_generator,
reference=partial(gather_wrapper, torch.index_select),
symbolic_parameter_list=(
ArgumentType.Symbolic,
ArgumentType.Symbolic,
ArgumentType.Constant,
),
)
shape_ops.append(index_select_opinfo)
# NvFuser's API is significantly different than JAX.
# TODO: Change python frontend api to match JAX using a cpp wrapper function.
pad_opinfo = OpInfo(
lambda fd: fd.ops.pad,
"pad",
error_input_generator=pad_error_generator,
symbolic_parameter_list=(
ArgumentType.Symbolic,
ArgumentType.Constant,
ArgumentType.Symbolic,
),
)
shape_ops.append(pad_opinfo)
permute_opinfo = OpInfo(
lambda fd: fd.ops.permute,
"permute",
sample_input_generator=permute_generator,
error_input_generator=permute_error_generator,
reference=torch.permute,
symbolic_parameter_list=(ArgumentType.Symbolic, ArgumentType.Constant),
)
shape_ops.append(permute_opinfo)
# nvfuser expects input and output shapes while pytorch only requires the output shape.
def reshape_wrapper(
fn: callable, input: torch.Tensor, input_shape: List[int], output_shape: List[int]
):
return fn(input, output_shape)
reshape_opinfo = OpInfo(
lambda fd: fd.ops.reshape,
"reshape",
sample_input_generator=reshape_generator,
error_input_generator=reshape_error_generator,
reference=partial(reshape_wrapper, torch.reshape),
symbolic_parameter_list=(
ArgumentType.Symbolic,
ArgumentType.Constant,
ArgumentType.Constant,
),
)
shape_ops.append(reshape_opinfo)
slice_opinfo = OpInfo(
lambda fd: fd.ops.slice,
"slice",
sample_input_generator=slice_generator,
error_input_generator=slice_error_generator,
reference=jax.lax.slice,
reference_type=ReferenceType.Jax,
)
shape_ops.append(slice_opinfo)
take_along_axis_opinfo = OpInfo(
lambda fd: fd.ops.take_along_axis,
"take_along_dim",
sample_input_generator=take_along_axis_generator,
error_input_generator=take_along_axis_error_generator,
reference=torch.take_along_dim,
symbolic_parameter_list=(
ArgumentType.Symbolic,
ArgumentType.Symbolic,
ArgumentType.Constant,
),
)
shape_ops.append(take_along_axis_opinfo)
""" End Shape Operations """
""" Start Tensor Creation """
tensor_creation_ops = []
full_opinfo = OpInfo(
lambda fd: fd.ops.full,
"full",
error_input_generator=full_error_generator,
symbolic_parameter_list=(
ArgumentType.Constant,
ArgumentType.Symbolic,
ArgumentType.Constant,
),
)
tensor_creation_ops.append(full_opinfo)
# Dynamic scalars are not checked at runtime, so we treat length, start, step as constants.
iota_opinfo = OpInfo(
lambda fd: fd.ops.iota,
"iota",
dtypes=(torch.int64, torch.float64),
error_input_generator=iota_error_generator,
symbolic_parameter_list=(
ArgumentType.ConstantScalar,
ArgumentType.ConstantScalar,
ArgumentType.ConstantScalar,
ArgumentType.Constant,
),
)
tensor_creation_ops.append(iota_opinfo)
""" End Tensor Creation """
# Puts all opinfos into the "opinfos" list
opinfos.extend(unary_ops)
opinfos.extend(binary_ops)
opinfos.extend(ternary_ops)
opinfos.extend(fusion_input_ops)
opinfos.extend(dynamic_shapes_ops)
opinfos.extend(normalization_ops)
opinfos.extend(shape_ops)
opinfos.extend(tensor_creation_ops)
|
Fuser-main
|
python_tests/pytest_opinfos.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# Owner(s): ["module: nvfuser"]
from typing import Callable
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TEST_WITH_ROCM, TestCase
from torch.testing._internal.jit_utils import RUN_CUDA
# Will only create the nvfuser module if CUDA is available
try:
from nvfuser import (
FusionDefinition,
)
except ImportError:
pass
RUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM
def is_pre_volta():
if not RUN_NVFUSER:
return False
prop = torch.cuda.get_device_properties(torch.cuda.current_device())
return prop.major < 7
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(is_pre_volta(), "Only supported on Volta and newer devices.")
class TestScheduleOps(TestCase):
def sched_op_in_definition_error(self, sched_op_fn: Callable):
"""
Common function to test for an error when a schedule op is used in a definition
"""
inputs = [
torch.randn(8, 8, 8, device="cuda"),
]
def fusion_fn(fd: FusionDefinition):
fd.t0 = fd.from_pytorch(inputs[0], static_sizes=True)
fd.t1 = fd.ops.tanh(fd.t0)
fd.add_output(fd.t1)
class DefError(FusionDefinition):
def definition(self):
fusion_fn(self)
sched_op_fn(self)
with self.assertRaisesRegex(
RuntimeError, "Attempting to use a SchedOperators Op prior to definition!"
):
fd = DefError()
_ = fd.execute(inputs)
def check_input_error(
self, sched_fn: Callable, error_msg: str, error_type=RuntimeError
):
"""
Common function to test for an input error to a schedule op
"""
inputs = [
torch.randn(8, 8, 8, device="cuda"),
]
def fusion_fn(fd: FusionDefinition):
fd.t0 = fd.from_pytorch(inputs[0], static_sizes=True)
fd.t1 = fd.ops.sum(fd.t0, axis=-1)
fd.add_output(fd.t1)
class InputError(FusionDefinition):
def definition(self):
fusion_fn(self)
def schedule(self):
sched_fn(self)
with self.assertRaisesRegex(error_type, error_msg):
fd = InputError()
_ = fd.execute(inputs)
def valid_use(self, sched_op_fn: Callable):
"""
Common function to test op works in a common case
"""
inputs = [
torch.randn(8, 8, 8, device="cuda"),
]
def fusion_fn(fd: FusionDefinition):
fd.t0 = fd.from_pytorch(inputs[0], static_sizes=True)
fd.t1 = fd.ops.sum(fd.t0, axis=-1)
fd.add_output(fd.t1)
class BasicValid(FusionDefinition):
def definition(self):
fusion_fn(self)
def schedule(self):
sched_op_fn(self)
fd = BasicValid()
# TODO: This can cause warnings from the FusionCache. It would be good
# to capture them, instead.
# Warning: You are overwriting the current user schedule for a definition!
nvf_user_out = fd.execute(inputs)
nvf_out = fd.execute(inputs, override_user_schedule=True)
self.assertEqual(nvf_user_out, nvf_out)
def test_merge_op(self):
self.sched_op_in_definition_error(lambda fd: fd.sched.merge(fd.t1, 1))
# Erorr check merge dimension. The dimension to merge is +1 from
# the relative dimension indicated
self.check_input_error(
lambda fd: fd.sched.merge(fd.t1, 1),
"Merging IterDomains requires that their iteration types match.",
)
self.check_input_error(
lambda fd: fd.sched.merge(fd.t1, 2),
"Invalid merge detected, either one or both axes are outside of TensorView's range.",
)
# TODO: I am not sure why this error doesn't match the previous error
# The previous error seems like it should match as they represent the
# same merge position -1 and 2 in a 3 dimensional tensor
# https://github.com/NVIDIA/Fuser/issues/171
self.check_input_error(
lambda fd: fd.sched.merge(fd.t1, -1),
"Merging IterDomains requires that their iteration types match",
)
self.check_input_error(
lambda fd: fd.sched.merge(fd.t1, -4),
"Cannot merge axes within compute at position. Either axis -1 or 0 are within computePosition = 0",
)
self.valid_use(lambda fd: fd.sched.merge(fd.t1, 0))
self.valid_use(lambda fd: fd.sched.merge(fd.t1, -3))
def test_reduction_factor_op(self):
self.sched_op_in_definition_error(
lambda fd: fd.sched.reduction_factor(fd.t1, [-1])
)
def error1_fn(fd: FusionDefinition):
fd.sched.split(fd.t1, 2, 2)
fd.sched.reduction_factor(fd.t1, [1])
self.check_input_error(
error1_fn, "Cannot rfactor axes that are not reduction axes."
)
def error2_fn(fd: FusionDefinition):
fd.sched.split(fd.t1, 2, 2)
fd.sched.reduction_factor(fd.t1, [-3])
self.check_input_error(
error2_fn, "Cannot rfactor axes that are not reduction axes."
)
def error3_fn(fd: FusionDefinition):
fd.sched.split(fd.t1, 2, 2)
fd.sched.reduction_factor(fd.t1, [2, 3])
self.check_input_error(
error3_fn, "Must have at least one reduction axis not marked as rfactor."
)
def sched_fn(fd: FusionDefinition):
fd.sched.split(fd.t1, 2, 2)
fd.sched.reduction_factor(fd.t1, [2])
self.valid_use(sched_fn)
# Donut whole factoring of reduction dims
def sched1_fn(fd: FusionDefinition):
fd.sched.split(fd.t1, 2, 4)
fd.sched.split(fd.t1, 2, 2)
fd.sched.reduction_factor(fd.t1, [2, 4])
self.valid_use(sched1_fn)
def sched2_fn(fd: FusionDefinition):
fd.sched.split(fd.t1, 2, 4)
fd.sched.split(fd.t1, 2, 2)
fd.sched.reduction_factor(fd.t1, [3])
self.valid_use(sched2_fn)
# NOTE: The binding function for the "rfactor" alias is identical so
# only proof of existence is needed
def sched_fn_alias(fd: FusionDefinition):
fd.sched.split(fd.t1, 2, 2)
fd.sched.rfactor(fd.t1, [2])
self.valid_use(sched_fn_alias)
def test_reorder_op(self):
self.sched_op_in_definition_error(
lambda fd: fd.sched.reorder(fd.t1, {0: 1, 1: 0})
)
# Error checks of reorder dict
self.check_input_error(
lambda fd: fd.sched.reorder(fd.t1, {0: 3}),
"Reorder axes are not within the number of dimensions of the provided domain",
)
self.check_input_error(
lambda fd: fd.sched.reorder(fd.t1, {3: 0}),
"Reorder axes are not within the number of dimensions of the provided domain",
)
self.check_input_error(
lambda fd: fd.sched.reorder(fd.t1, {-4: 0}),
'Found "old" position that\'s less than 0 even though already adjusted by nDims: -1',
)
self.check_input_error(
lambda fd: fd.sched.reorder(fd.t1, {0: -4}),
'Found "new" position that\'s less than 0 even though already adjusted by nDims: -1',
)
self.valid_use(lambda fd: fd.sched.reorder(fd.t1, {0: 1, 1: 0}))
self.valid_use(lambda fd: fd.sched.reorder(fd.t1, {0: 1, 0: 1}))
self.valid_use(lambda fd: fd.sched.reorder(fd.t1, {0: 0}))
self.valid_use(lambda fd: fd.sched.reorder(fd.t1, {}))
def test_split_op(self):
self.sched_op_in_definition_error(lambda fd: fd.sched.split(fd.t1, 1, 2))
# Error checking split dimension
self.check_input_error(
lambda fd: fd.sched.split(fd.t1, 3, 2),
"Tried to access position . in domain",
)
self.check_input_error(
lambda fd: fd.sched.split(fd.t1, -4, 2),
"Split axis is less than 0 even after adjusting for nDims",
)
# Error checking split factor.
# NOTE: ceildiv will always turn a split greater than the dimension
# size into 1.
self.check_input_error(
lambda fd: fd.sched.split(fd.t1, 1, 0),
"Expected rhs != 0 to be true, but got false",
)
# NOTE: While a negative split is not allowed, it does not make sense
# why the error is a TypeError given -1 is a valid int
self.check_input_error(
lambda fd: fd.sched.split(fd.t1, 1, -1),
"incompatible function arguments",
TypeError,
)
self.valid_use(lambda fd: fd.sched.split(fd.t1, 1, 2))
self.valid_use(lambda fd: fd.sched.split(fd.t1, -1, 2))
if __name__ == "__main__":
run_tests()
|
Fuser-main
|
python_tests/test_schedule_ops.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# Owner(s): ["module: nvfuser"]
import torch
from pytest_core import OpInfo
from pytest_utils import ArgumentType, is_tensor
from nvfuser import FusionDefinition
from nvfuser.pytorch_utils import (
python_scalar_to_nvfuser_dtype,
torch_dtype_to_nvfuser_dtype,
)
def parse_inputs_fusion_definition(fd: FusionDefinition, opinfo: OpInfo, *args):
if len(args) == 0:
return []
nvf_args = []
if opinfo.symbolic_parameter_list is None:
opinfo.symbolic_parameter_list = [ArgumentType.Symbolic] * len(args)
assert len(opinfo.symbolic_parameter_list) == len(args)
for arg_type, a in zip(opinfo.symbolic_parameter_list, args):
if arg_type == ArgumentType.Symbolic:
if isinstance(a, torch.Tensor):
nvf_args.append(fd.from_pytorch(a))
elif isinstance(a, list) and all(map(is_tensor, a)):
nvf_args.append([fd.from_pytorch(inner_a) for inner_a in a])
elif isinstance(a, list) or isinstance(a, tuple):
nvf_args.append(fd.define_vector(a))
else:
# For symbolic scalars, we do not define with constant value.
# Otherwise, it becomes a constant and is not a fusion input.
nvf_args.append(fd.define_scalar(python_scalar_to_nvfuser_dtype(a)))
elif arg_type == ArgumentType.ConstantScalar:
assert not isinstance(a, torch.Tensor)
nvf_args.append(fd.define_scalar(a))
elif isinstance(a, torch.dtype):
nvf_args.append(torch_dtype_to_nvfuser_dtype(a))
else:
assert not isinstance(a, torch.Tensor)
assert arg_type == ArgumentType.Constant
nvf_args.append(a)
return nvf_args
# This function will purposely not generate a functional FusionDefintion as
# it lacks defining an output. It is only meant to test the error checking
# of an operation.
def api_test_fd_fn(fd: FusionDefinition, opinfo: OpInfo, *args, **kwargs):
nvf_inputs = parse_inputs_fusion_definition(fd, opinfo, *args)
this_inputs = opinfo.op(fd)(**kwargs)
def default_fd_fn(fd: FusionDefinition, opinfo: OpInfo, *args, **kwargs):
nvf_inputs = parse_inputs_fusion_definition(fd, opinfo, *args)
result = opinfo.op(fd)(*nvf_inputs, **kwargs)
if isinstance(result, tuple):
for a in result:
fd.add_output(a)
else:
fd.add_output(result)
def tensor_input_fd_fn(fd: FusionDefinition, opinfo: OpInfo, *args, **kwargs):
nvf_inputs = parse_inputs_fusion_definition(fd, opinfo, *args)
this_inputs = opinfo.op(fd)(**kwargs)
t1 = fd.ops.add(nvf_inputs[0], this_inputs)
fd.add_output(t1)
def tensor_api_test_fd_fn(fd: FusionDefinition, opinfo: OpInfo, *args, **kwargs):
nvf_inputs = parse_inputs_fusion_definition(fd, opinfo, *args)
out = opinfo.op(fd)(nvf_inputs[0], **kwargs)
def vector_api_test_fd_fn(fd: FusionDefinition, opinfo: OpInfo, *args, **kwargs):
nvf_inputs = parse_inputs_fusion_definition(fd, opinfo, *args)
v0 = nvf_inputs[0].shape()
out = opinfo.op(fd)(v0, **kwargs)
|
Fuser-main
|
python_tests/pytest_fusion_definitions.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# Owner(s): ["oncall: jit"]
import contextlib
import unittest
import os
import random
import enum
import copy
from functools import reduce
import operator
import warnings
# Set NVFUSER_ envrionment variables. Make sure this is done before
# loading nvfuser
if "NVFUSER_ENABLE" not in os.environ:
os.environ["NVFUSER_ENABLE"] = ""
os.environ["NVFUSER_ENABLE"] = (
"linear_decomposition,conv_decomposition,graph_op_fusion,"
+ os.environ["NVFUSER_ENABLE"]
)
if "NVFUSER_DISABLE" not in os.environ:
os.environ["NVFUSER_DISABLE"] = ""
os.environ["NVFUSER_DISABLE"] = "fallback,fma," + os.environ["NVFUSER_DISABLE"]
os.environ["NVFUSER_JIT_OPT_LEVEL"] = "0"
import torch
from torch.nn import functional
from torch.profiler import profile, ProfilerActivity
from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
OpDTypes,
)
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.common_methods_invocations import op_db, SampleInput
from torch.testing._internal.common_utils import (
run_tests,
ProfilingMode,
GRAPH_EXECUTOR,
TEST_WITH_ROCM,
slowTest,
is_iterable_of_tensors,
freeze_rng_state,
skipIfRocm,
)
from torch.testing._internal.jit_utils import (
clone_inputs,
get_traced_sample_variant_pairs,
JitTestCase,
RUN_CUDA,
)
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
from torch.testing import FileCheck
import itertools
import numpy as np
import math
from torch.autograd.gradcheck import gradcheck
from typing import List
RUN_NVFUSER = RUN_CUDA
CUDA_MAJOR, CUDA_MINOR = 0, 0
if RUN_NVFUSER and torch.version.cuda is not None:
CUDA_MAJOR, CUDA_MINOR = (int(x) for x in torch.version.cuda.split(".")[:2])
# flag used to skip C++ integration test for torchscript
if os.environ.get("NVFUSER_TEST_ONLY_RUN_WHEEL_BUILD_SUBSET", "0") == "1":
RUN_NVFUSER = False
# TODO: enable complex when we fixes the extremal cases in OpInfo
# see issue https://github.com/csarofeen/pytorch/issues/1730"
# os.environ['NVFUSER_ENABLE'] = 'complex'
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
FUSION_GROUP = "prim::CudaFusionGroup"
FUSION_GUARD = "prim::CudaFusionGuard"
@contextlib.contextmanager
def nvfuser_singleton_fusion(flag):
old_value = torch._C._jit_set_nvfuser_single_node_mode(flag)
try:
yield
finally:
torch._C._jit_set_nvfuser_single_node_mode(old_value)
@contextlib.contextmanager
def nvfuser_horizontal_fusion(flag):
old_value = torch._C._jit_set_nvfuser_horizontal_mode(flag)
try:
yield
finally:
torch._C._jit_set_nvfuser_horizontal_mode(old_value)
def is_pre_volta():
if not RUN_NVFUSER:
return False
prop = torch.cuda.get_device_properties(torch.cuda.current_device())
return prop.major < 7
TEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported()
TEST_LARGE_TENSOR = RUN_NVFUSER
if RUN_NVFUSER:
torch.ones(1).cuda() # initialize cuda context
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
class CudaFuserTestOptions:
def __init__(self):
self.old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
self.old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
self.old_guard = torch._C._jit_set_nvfuser_guard_mode(False)
torch._C._debug_set_autodiff_subgraph_inlining(False)
self.old_value = torch._C._jit_set_autocast_mode(True)
if RUN_CUDA:
self.old_nvfuser = torch._C._jit_set_nvfuser_enabled(True)
def restore(self):
if RUN_CUDA:
torch._C._jit_set_nvfuser_enabled(self.old_nvfuser)
torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuse)
torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuse)
torch._C._jit_set_nvfuser_guard_mode(self.old_guard)
torch._C._debug_set_autodiff_subgraph_inlining(True)
torch._C._jit_set_autocast_mode(self.old_value)
class TestCudaFuser(JitTestCase):
def assertEqual(self, *args, **kwargs):
kwargs["exact_layout"] = True
super(JitTestCase, self).assertEqual(*args, **kwargs)
def _getSubgraphInFusion(self, graph):
num_node = 0
subgraph = None
def count(block, ret):
for n in block.nodes():
if n.kind() == FUSION_GROUP:
ret[0] = ret[0] + 1
self.assertTrue(n.hasAttribute("Subgraph"))
ret[1] = n.g("Subgraph")
for block in n.blocks():
count(block, ret)
ret = [num_node, subgraph]
count(graph, ret)
self.assertEqual(ret[0], 1)
return ret[1]
def setUp(self):
super(TestCudaFuser, self).setUp()
self.skip_node_list = []
disabled_ops = (
"aten::batch_norm",
"aten::_batch_norm_impl_index",
"aten::_batch_norm_impl_index_backward",
"aten::native_batch_norm_backward",
)
for op in disabled_ops:
disabled_flag = torch._C._jit_set_nvfuser_skip_node_kind(op, False)
if disabled_flag:
torch._C._jit_set_nvfuser_skip_node_kind(op, True)
self.skip_node_list.append(op)
# cpu backup to avoid errors in case this is run on a CPU-only machine
dev = "cuda" if RUN_NVFUSER else "cpu"
self.special_values = torch.tensor(
[
float("-inf"),
-10,
-math.pi,
-1,
-0.5,
0,
1,
0.5,
math.pi,
10,
float("inf"),
float("nan"),
],
dtype=torch.float,
device=dev,
)
self.int_types = [
torch.int8,
torch.uint8,
torch.int16,
torch.int32,
torch.int64,
]
self.support_tensor_dtypes = [
torch.int32,
torch.int64,
torch.float16,
torch.float32,
torch.float64,
torch.bool,
torch.complex64,
torch.complex128,
]
if TEST_BF16:
self.support_tensor_dtypes.append(torch.bfloat16)
if RUN_NVFUSER:
self.cuda_fuser_options = CudaFuserTestOptions()
def tearDown(self):
# restoring skip node to the configuration before tests
for op in self.skip_node_list:
disabled_flag = torch._C._jit_set_nvfuser_skip_node_kind(op, False)
if not disabled_flag:
torch._C._jit_set_nvfuser_skip_node_kind(op, True)
if RUN_NVFUSER:
self.cuda_fuser_options.restore()
super(TestCudaFuser, self).tearDown()
def _run_helper(
self, jit_op, op, *args, check_stride=False, num_fusion=1, check_runs=1
):
seed = 123
torch.cuda.manual_seed_all(seed)
jit_o = jit_op(*args)
for i in range(check_runs):
torch.cuda.manual_seed_all(seed + i)
jit_o = jit_op(*args)
torch.cuda.manual_seed_all(seed + i)
o = op(*args)
if type(jit_o) is torch.Tensor:
jit_o = [
jit_o,
]
o = [
o,
]
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
if check_stride:
self.assertEqual(oo.stride(), jit_oo.stride())
self.assertGraphContainsExactly(
jit_op.graph_for(*args), FUSION_GUARD, num_fusion, consider_subgraphs=True
)
def _run_training_helper(self, jit_op, op, grads, *args, num_bw_fusion=1):
def has_grad(x):
if torch.is_tensor(x) and x.requires_grad:
if x.grad is not None:
return True
return False
# save *args for the reference run and warm up
ref_args = [
t.detach().clone().requires_grad_() if has_grad(t) else t for t in args
]
for i in range(3):
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
jit_g = jit_o.backward(grads)
# zero grads for checking gradients
[t.grad.zero_() for t in args if has_grad(t)]
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
jit_g = jit_o.backward(grads)
torch.cuda.manual_seed_all(123)
o = op(*ref_args)
g = o.backward(grads)
self.assertEqual(o, jit_o)
self.assertEqual(g, jit_g)
# check gradients
for t, ref_t in zip(args, ref_args):
if has_grad(t):
self.assertEqual(ref_t.grad, t.grad)
self.assertGraphContainsExactly(
jit_op.graph_for(*args), FUSION_GUARD, 1, consider_subgraphs=True
)
bwd_graph = list(
list(jit_op.get_debug_state().execution_plans.values())[0]
.code.grad_executor_states()[0]
.execution_plans.values()
)[0].graph
self.assertGraphContainsExactly(
bwd_graph, FUSION_GUARD, num_bw_fusion, consider_subgraphs=True
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_half(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float):
o_16 = torch.add(x, y)
o_32_a = torch.add(y, z, alpha=alpha)
o_32_b = torch.add(o_16, z)
return (o_16, o_32_a, o_32_b)
t_jit = torch.jit.script(t)
alpha = 0.5
# stick to integers, this avoid the numerical difference due to our
# promotion
x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda")
y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda")
z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda")
jit_o = t_jit(x, y, z, alpha)
jit_o = t_jit(x, y, z, alpha)
o = t(x, y, z, alpha)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_bfloat(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float):
o_16 = torch.add(x, y)
o_32_a = torch.add(y, z, alpha=alpha)
o_32_b = torch.add(o_16, z)
return (o_16, o_32_a, o_32_b)
t_jit = torch.jit.script(t)
alpha = 0.5
# stick to integers, this avoid the numerical difference due to our
# promotion
x = torch.randint(0, 256, (4, 8)).to(dtype=torch.bfloat16, device="cuda")
y = torch.randint(0, 256, (4, 8)).to(dtype=torch.bfloat16, device="cuda")
z = torch.randint(0, 256, (4, 8)).to(dtype=torch.bfloat16, device="cuda")
jit_o = t_jit(x, y, z, alpha)
jit_o = t_jit(x, y, z, alpha)
o = t(x, y, z, alpha)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_const(self):
def t(x, y):
o = x + y
o = o + 2.0
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_chunk(self):
def t(x, y, z, q):
o = x + q
x0, x1 = torch.chunk(o, 2)
o = x0 + x1
o = o + y
o = o * z
o = torch.relu(o)
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, dtype=torch.float, device="cuda")
y = torch.randn(2, 8, dtype=torch.float, device="cuda")
z = torch.randn(2, 8, dtype=torch.float, device="cuda")
q = torch.randn(4, 8, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, z, q)
jit_o = t_jit(x, y, z, q)
o = t(x, y, z, q)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z, q), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_reduction_dtypes_axis(self):
for op in [torch.sum, torch.mean, torch.amax, torch.var, torch.std]:
for dtype in [torch.float16, torch.float32, torch.double]:
for axis in [-1, 2, 0]:
def make_func(op):
def func(x: torch.Tensor):
o = torch.mul(x, 2.0)
o = op(o, dim=[axis])
return o
return func
x = torch.randn(8, 4, 16, dtype=dtype, device="cuda")
t = make_func(op)
t_jit = torch.jit.trace(t, x)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(
self._compare("comparing output failed", o, jit_o, 1e-4)
)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_variance(self):
for op in [torch.var, torch.std]:
for dtype in [torch.float16, torch.float32, torch.double]:
for axis in [-2, -1, 2, 1]:
for unbiased in [False, True]:
def make_func(op):
def func(x: torch.Tensor):
o = torch.mul(x, 2.0)
o = op(o, dim=[axis])
return o
return func
x = torch.randn(8, 4, 16, dtype=dtype, device="cuda")
t = make_func(op)
t_jit = torch.jit.trace(t, x)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(
self._compare("comparing output failed", o, jit_o, 1e-4)
)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_variance_profiling(self):
with nvfuser_singleton_fusion(True):
for op in [torch.var, torch.std]:
for dtype in [torch.float16, torch.float32, torch.double]:
for axis in [-2, -1, 2, 1]:
for unbiased in [False, True]:
for keepdim in [False, True]:
def t(
x: torch.Tensor,
dim: List[int],
unbiased: bool,
keepdim: bool,
):
o = torch.mul(x, 2.0)
o = op(
o, dim=dim, unbiased=unbiased, keepdim=keepdim
)
return o
x = torch.randn(8, 4, 16, dtype=dtype, device="cuda")
t_jit = torch.jit.script(t)
self._run_helper(
t_jit,
t,
x,
[axis],
unbiased,
keepdim,
check_stride=False,
check_runs=5,
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_scalar_input(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 1, 32, dtype=torch.float, device="cuda")
y = y.expand(4, 8, 32, 32)
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_broadcasting_0(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(
subgraph, "aten::add", 2, consider_subgraphs=False
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_broadcasting_1(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(1, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(
subgraph, "aten::add", 2, consider_subgraphs=False
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_broadcasting_2(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 1, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(8, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(
subgraph, "aten::add", 2, consider_subgraphs=False
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_broadcasting_3(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(8, 17, 8, dtype=torch.float, device="cuda")
y = torch.randn(8, 17, 1, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(
subgraph, "aten::add", 2, consider_subgraphs=False
)
# test_broadcasting_partition_logic_X
# Testing partition logic that is capable to avoid creating unsupported
# broadcasting semantics in CudaFusionGroup
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_broadcasting_partition_logic_0(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
x = x + 12.0
o1 = x + y
o2 = x + z
o = o1 + o2
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 6, 8, dtype=torch.float32, device="cuda")
y = torch.randn(8, 6, 8, dtype=torch.float32, device="cuda")
z = torch.randn(6, 8, dtype=torch.float32, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, z))
self.assertGraphContainsExactly(
subgraph, "aten::add", 4, consider_subgraphs=False
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_broadcasting_partition_logic_1(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
x = x + 12.0
o1 = x + y
o2 = x + z
o = o1 + o2
return o
t_jit = torch.jit.script(t)
x = torch.randn(8, 6, 8, dtype=torch.float32, device="cuda")
y = torch.randn(4, 8, 6, 8, dtype=torch.float32, device="cuda")
z = torch.randn(4, 1, 6, 8, dtype=torch.float32, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, z))
self.assertGraphContainsExactly(
subgraph, "aten::add", 4, consider_subgraphs=False
)
@unittest.skipIf(True, "Broadcast with different output not supported yet")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_broadcasting_multiple_output_shape(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = x + 12
o1 = o + y
o2 = o + z
oo = o1.sum() + o2.sum()
return oo
t_jit = torch.jit.script(t)
x = torch.randn(32, 32, dtype=torch.float, device="cuda")
y = torch.randn(2, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
# Currently cannot fuse this
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(True, "broadcast on branches can't be resolved yet")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_broadcasting_multiple_output(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = x + 12
o1 = o + y
o2 = o + z
oo = o1.sum() + o2.sum()
return oo
t_jit = torch.jit.script(t)
x = torch.randn(32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
# Currently cannot fuse this
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
def _unary_test_helper(self, operation, dtype, random_data):
gradient_check = (dtype == torch.float64) and random_data
shape = self.special_values.shape
torch.cuda.manual_seed_all(211)
# need additional def of t for boolean ops
def t(x: torch.Tensor, y: torch.Tensor):
o = x * y
o = o + 5e-3
o = operation(o)
return o
y = torch.rand(
shape, dtype=torch.float32, device="cuda", requires_grad=gradient_check
)
y = y.to(dtype=dtype)
if random_data:
x = torch.rand(
shape, dtype=torch.float32, device="cuda", requires_grad=gradient_check
)
if dtype in self.int_types:
# prefer a larger variance for integer types
x = x * 5
x = x.to(dtype=dtype)
else:
x = self.special_values.to(dtype=dtype)
try:
ref = t(x, y)
except Exception:
# same way as TE checker, if eager mode throws, ignore this test
return
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
if gradient_check:
if jit_o.dtype != torch.bool:
# bool dtype has no `-`
gradcheck(t_jit, [x, y], nondet_tol=1e-5)
elif dtype in self.support_tensor_dtypes:
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
if dtype == torch.bfloat16:
# compare with the actual ground truth for
# bfloat16 kernels instead of eager mode
# implementation, since mismatch in cast
# adds excessive noise.
o = t(x.to(torch.float64), y.to(torch.float64))
if o.dtype.is_floating_point:
o = o.to(torch.bfloat16)
else:
o = t(x, y)
self.assertTrue(
self._compare(
"failing case {}\n{}\n{}\n{}".format(dtype, operation, x, y),
o,
jit_o,
1e-2,
)
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_unary_ops(self):
data_types = [
*self.int_types,
torch.float16,
torch.float32,
torch.float64,
# TODO: revert this
# see issue https://github.com/csarofeen/pytorch/issues/1730"
# torch.cfloat,
# torch.cdouble,
]
if TEST_BF16:
data_types.append(torch.bfloat16)
operations = [
torch.neg,
torch.abs,
torch.log,
torch.log10,
torch.log1p,
torch.log2,
torch.lgamma,
torch.exp,
torch.expm1,
torch.erf,
torch.erfc,
torch.cos,
torch.acos,
torch.cosh,
torch.sin,
torch.asin,
torch.sinh,
torch.tan,
torch.atan,
torch.sqrt,
torch.rsqrt,
torch.ceil,
torch.floor,
torch.round,
torch.trunc,
torch.frac,
torch.reciprocal,
torch.isfinite,
torch.isinf,
torch.isnan,
torch.isneginf,
torch.isposinf,
torch.isreal,
torch.nn.functional.softplus,
torch.nn.functional.gelu,
torch.nn.functional.leaky_relu,
torch.nn.functional.silu,
torch.relu,
torch.sigmoid,
torch.bitwise_not,
torch.tan,
torch.tanh,
]
skip_complex = {torch.rsqrt, torch.reciprocal}
for op, dtype in itertools.product(operations, data_types):
if dtype.is_complex and op in skip_complex:
continue
self._unary_test_helper(op, dtype, False) # test special numbers
self._unary_test_helper(op, dtype, True) # test random data
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_category_rule(self):
def run_tensor(x, z):
def t(x: torch.Tensor, z: torch.Tensor):
o = x + z
o = torch.abs(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, z)
jit_o = t_jit(x, z)
o = t(x, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, z), FUSION_GUARD)
def run_scalar(x, z):
def t(x: torch.Tensor, z: float):
o = x + z
o = torch.abs(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, z)
jit_o = t_jit(x, z)
o = t(x, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, z), FUSION_GUARD)
# n-dim with 0-dim (no type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.tensor(2.0, dtype=torch.double, device="cuda")
run_tensor(x, z)
# n-dim with 0-dim (type-promote)
x = torch.randn(4, 8, 32, 32, device="cuda").to(dtype=torch.long)
z = torch.tensor(2.0, dtype=torch.double, device="cuda")
run_tensor(x, z)
# n-dim with n-dim (type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 8, 32, 32, dtype=torch.double, device="cuda")
run_tensor(x, z)
# n-dim with scalar (no type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.float16, device="cuda")
z = torch.tensor(3.0, dtype=torch.double)
run_scalar(x, z)
if TEST_BF16:
# n-dim with scalar (no type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.bfloat16, device="cuda")
z = torch.tensor(3.0, dtype=torch.double)
run_scalar(x, z)
# n-dim with scalar (type-promote)
x = torch.randn(4, 8, 32, 32, device="cuda").to(dtype=torch.long)
z = torch.tensor(3.0, dtype=torch.double)
run_scalar(x, z)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_unary_bitwise(self):
def bit_not(x: torch.Tensor):
return ~(x + 1)
jitted = torch.jit.script(bit_not)
x = (
torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
.mul(5)
.to(torch.long)
)
jit_o = jitted(x)
jit_o = jitted(x)
o = bit_not(x)
self.assertEqual(o, jit_o)
jitted.graph_for(x) # Shows up in second instance, not first
self.assertGraphContains(jitted.graph_for(x), FUSION_GUARD)
def _get_scalar_binary_test_fn(
self, category_and_type1, category_and_type2, operation
):
category1, dtype_arg1 = category_and_type1
category2, dtype_arg2 = category_and_type2
def t_intx_tensory(x: int, y: torch.Tensor):
o = operation(x, y)
o = 2 + o
return o
def t_doublex_tensory(x: float, y: torch.Tensor):
o = operation(x, y)
o = 2 + o
return o
def t_cdoublex_tensory(x: complex, y: torch.Tensor):
o = operation(x, y)
o = 2 + o
return o
# Omit both scalar cases and swap cases
assert category1 == "scalar" and category2 != "scalar"
if dtype_arg1.is_floating_point:
return t_doublex_tensory
if dtype_arg1 == torch.int64 or dtype_arg1 == torch.int32:
return t_intx_tensory
if dtype_arg1.is_complex or dtype_arg1 == torch.int32:
return t_cdoublex_tensory
raise NotImplementedError
def _binary_test_helper(self, operation, dtypes, random_data, categories="ndim"):
if isinstance(dtypes, tuple):
dtype_arg1, dtype_arg2 = dtypes
else:
dtype_arg1 = dtype_arg2 = dtypes
if isinstance(categories, tuple) and random_data:
category1, category2 = categories
elif not random_data:
category1 = category2 = "ndim"
else:
category1 = category2 = categories
def is_cpu_category(x):
return x == "0dimcpu" or x == "scalar"
# skip unsupported cases
if is_cpu_category(category1) and is_cpu_category(category2):
return
# only test cases with first operand as scalar
if category2 == "scalar":
return
# skip ops that doesn't support scalar inputs in eager
if operation in [
torch.atan2,
torch.max,
torch.min,
torch.remainder, # unsupported in nvfuser
]:
if category1 == "scalar" or category2 == "scalar":
return
if operation in [
torch.fmod,
torch.eq,
torch.ne,
torch.ge,
torch.gt,
torch.le,
torch.lt,
]:
if category1 == "scalar":
return
# operators that does not support bfloat16
if operation in [torch.fmod]:
if dtype_arg1 == torch.bfloat16 or dtype_arg2 == torch.bfloat16:
return
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = operation(x, y)
o = o + z
return o
shape = (4, 32, 32)
shapex = shape if category1 == "ndim" else ()
shapey = shape if category2 == "ndim" else ()
if random_data:
x = (torch.randn(shapex, dtype=torch.float, device="cuda") * 5).to(
dtype_arg1
)
y = (torch.randn(shapey, dtype=torch.float, device="cuda") * 5).to(
dtype_arg2
)
else:
x = self.special_values.to(dtype=dtype_arg1)
y = (torch.rand_like(self.special_values) * 5).to(dtype_arg2)
r"""
Category conversion
"""
has_scalar = False
if category1 == "scalar":
has_scalar = True
x = x.item()
if category1 == "0dimcpu":
x = x.to(device="cpu")
if category2 == "scalar":
has_scalar = True
y = y.item()
if category2 == "0dimcpu":
y = y.to(device="cpu")
z = torch.tensor([2], device="cuda").to(dtype_arg1)
is_dtype_arg1_int = dtype_arg1 == torch.int32 or dtype_arg1 == torch.int64
is_dtype_arg2_int = dtype_arg2 == torch.int32 or dtype_arg2 == torch.int64
if operation in [torch.pow]:
if is_dtype_arg1_int and is_dtype_arg2_int:
if category2 == "scalar":
# RuntimeError: Integers to negative integer powers are not allowed
y = abs(y)
if category2 == "0dimcpu" and y == -1:
# https://github.com/pytorch/pytorch/issues/73196
y = y - 1
if category2 == "0dimcpu" and y == -2:
# avoid pow(0, -2), which gives inconsistent results on integer tensor
y = y - 1
# Avoid division by zero for integer tensors
div_like = [torch.div, torch.fmod, torch.remainder]
if operation in div_like and (
dtype_arg2 == torch.int32 or dtype_arg2 == torch.int64
):
y[y == 0] = 1
test_value = True
if dtype_arg1 == torch.half or dtype_arg2 == torch.half:
test_value = False
if dtype_arg1 == torch.bfloat16 or dtype_arg2 == torch.bfloat16:
test_value = False
try:
if not has_scalar:
o = t(x, y, z)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
self.assertEqual(o.dtype, jit_o.dtype)
if test_value:
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
elif category2 != "scalar": # only test the case where first is scalar
test_fn = self._get_scalar_binary_test_fn(
(category1, dtype_arg1), (category2, dtype_arg2), operation
)
o = test_fn(x, y)
t_jit = torch.jit.script(test_fn)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
if test_value:
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
except Exception as e:
print("failing test for op: ", operation.__name__)
print("with input\n\tx: ", x)
print("\ty: ", y)
print("\tz: ", z)
raise e
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_binary_ops(self):
data_types = [
torch.int32,
torch.int64,
torch.float16,
torch.float32,
torch.float64,
]
if TEST_BF16:
data_types.append(torch.bfloat16)
operations = [
torch.mul,
torch.div,
torch.atan2,
torch.max,
torch.min,
torch.pow,
torch.remainder,
torch.fmod,
torch.eq,
torch.ne,
torch.ge,
torch.gt,
torch.le,
torch.lt,
]
category_types = ["scalar", "0dim", "0dimcpu", "ndim"]
binary_dtype_combinations = list(itertools.combinations(data_types, 2))
category_combinations = list(itertools.combinations(category_types, 2))
for op, dtypes, categories in itertools.product(
operations, binary_dtype_combinations, category_combinations
):
self._binary_test_helper(op, dtypes, True, categories) # random data
for op, dtypes in itertools.product(operations, binary_dtype_combinations):
self._binary_test_helper(op, dtypes, False) # special numbers
# TODO: revert this
@unittest.skipIf(True, "see issue https://github.com/csarofeen/pytorch/issues/1730")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_binary_ops_complex(self):
data_types = [torch.cfloat, torch.cdouble]
operations = [torch.mul, torch.div, torch.pow, torch.eq, torch.ne]
category_types = ["scalar", "0dim", "0dimcpu", "ndim"]
binary_dtype_combinations = list(itertools.combinations(data_types, 2))
category_combinations = list(itertools.combinations(category_types, 2))
for op, dtypes, categories in itertools.product(
operations, binary_dtype_combinations, category_combinations
):
self._binary_test_helper(op, dtypes, True, categories) # random data
for op, dtypes in itertools.product(operations, binary_dtype_combinations):
self._binary_test_helper(op, dtypes, False) # special numbers
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_binary_bitwise(self):
dtypes = [torch.bool, torch.int32, torch.int64]
for dtype1, dtype2, dtype3 in itertools.product(dtypes, repeat=3):
def jit_and(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return torch.bitwise_and(x, y) & z
def jit_or(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return torch.bitwise_or(x, y) | z
def jit_xor(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return torch.bitwise_xor(x, y) ^ z
def jit_lshift(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return torch.bitwise_left_shift(x, y) << z
def jit_rshift(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return torch.bitwise_right_shift(x, y) >> z
for jit_func in [jit_and, jit_or, jit_xor, jit_lshift, jit_rshift]:
if torch.bool in {dtype1, dtype2, dtype3} and jit_func in {
jit_lshift,
jit_rshift,
}:
continue
x = (
torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
.mul(5)
.to(dtype1)
)
y = (
torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
.mul(5)
.to(dtype2)
)
z = (
torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
.mul(2)
.to(dtype3)
)
jitted = torch.jit.script(jit_func)
jit_o = jitted(x, y, z)
jit_o = jitted(x, y, z)
o = jit_func(x, y, z)
self.assertEqual(o, jit_o)
self.assertGraphContains(jitted.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_type_as_op(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = torch.lt(x, z)
o = o.type_as(y)
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 0.5)
jit_o = t_jit(x, y, 0.5)
o = t(x, y, 0.5)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 0.5), FUSION_GUARD)
def _ternary_integer_test_helper(self, dtype_arg1):
shape = (4, 8, 32, 32)
magnitude = 100
if dtype_arg1 in self.int_types:
x = torch.randint(
-magnitude, magnitude, shape, dtype=dtype_arg1, device="cuda"
)
else:
x = torch.randn(shape, dtype=dtype_arg1, device="cuda") * magnitude
arg2 = int(0)
arg3 = int(magnitude * 0.1)
def clamp0(x: torch.Tensor, f: int):
o = 2.0 * torch.clamp(x, min=f)
return o
clamp0_jit = torch.jit.script(clamp0)
self._run_helper(clamp0_jit, clamp0, x, arg2)
def clamp1(x: torch.Tensor, f: int, ff: int):
o = 2.0 * torch.clamp(x, min=f, max=ff)
return o
clamp1_jit = torch.jit.script(clamp1)
self._run_helper(clamp1_jit, clamp1, x, arg2, arg3)
def clamp2(x: torch.Tensor, f: float, ff: int):
o = 2.0 * torch.clamp(x, min=f, max=ff)
return o
clamp2_jit = torch.jit.script(clamp2)
self._run_helper(clamp2_jit, clamp2, x, float(arg2), arg3)
def clamp3(x: torch.Tensor, f: int, ff: float):
o = 2.0 * torch.clamp(x, min=f, max=ff)
return o
clamp3_jit = torch.jit.script(clamp3)
self._run_helper(clamp3_jit, clamp3, x, arg2, float(arg3))
def threshold(x: torch.Tensor, th: int, val: int):
o = 2.0 * torch.threshold(x, th, val)
return o
threshold_jit = torch.jit.script(threshold)
self._run_helper(threshold_jit, threshold, x, arg2, arg3)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_ternary_ops_integer_compatibility(self):
data_types = [torch.float16, torch.float32, torch.float64]
for dtype in data_types:
self._ternary_integer_test_helper(dtype)
def _ternary_test_helper(self, operation, dtypes, random_data):
if isinstance(dtypes, tuple):
dtype_arg1, dtype_arg2, dtype_arg3 = dtypes
else:
dtype_arg1 = dtype_arg2 = dtype_arg3 = dtypes
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: torch.Tensor):
o = operation(x, y, z)
o = o + alpha
return o
shape = (4, 32, 32)
if operation is torch.where:
dtype_arg1 = torch.bool
if random_data:
x = torch.randint(0, 2, shape).to(dtype=torch.bool, device="cuda")
y = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(
dtype_arg2
)
z = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(
dtype_arg3
)
else:
x = torch.randint(0, 2, self.special_values.size()).to(
dtype=torch.bool, device="cuda"
)
y = self.special_values.to(dtype=dtype_arg2)
z = (torch.rand_like(self.special_values) * 5).to(dtype_arg3)
elif random_data:
x = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(
dtype_arg1
)
y = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(
dtype_arg2
)
z = (torch.randn(shape, dtype=torch.float, device="cuda") * 5).to(
dtype_arg3
)
else:
x = self.special_values.to(dtype=dtype_arg1)
y = (torch.rand_like(self.special_values) * 5).to(dtype_arg2)
z = (torch.rand_like(self.special_values) * 5).to(dtype_arg3)
alpha = torch.tensor([2], device="cuda").to(dtype_arg1)
o = t(x, y, z, alpha)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z, alpha)
jit_o = t_jit(x, y, z, alpha)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_ternary_ops_type_promotion(self):
# TODO: update accuracy tolerance for bf16 / fp16 data types
data_types = [
# torch.float16,
torch.float32,
torch.float64,
]
"""
if TEST_BF16:
data_types.append(torch.bfloat16)
"""
# TODO: Add Tensor support for clamp
operations = [torch.clamp]
ternary_dtype_combinations = itertools.combinations(data_types, 3)
for op, dtypes in itertools.product(operations, ternary_dtype_combinations):
self._ternary_test_helper(op, dtypes, True) # random data
self._ternary_test_helper(op, dtypes, False) # special numbers
# We can't test the scalar version of rsub from python
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_rsub(self):
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
def rsub(x: torch.Tensor, y: torch.Tensor):
o = torch.rsub(x, y)
o = o * 2.0
return o
rsub_jit = torch.jit.script(rsub)
self._run_helper(rsub_jit, rsub, x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
# legacy fuser does not work for rand_like, see issue #34361
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_ternary_ops(self):
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
cond = torch.randint(0, 2, (4, 8, 32, 32)).to(dtype=torch.bool, device="cuda")
def add(x: torch.Tensor, other: torch.Tensor, alpha: float):
o = torch.relu(x)
o = torch.add(o, other=other, alpha=alpha)
return o
add_jit = torch.jit.script(add)
self._run_helper(add_jit, add, x, y, 2.0)
def clamp0(x: torch.Tensor, f: float):
o = 2.0 * torch.clamp(x, min=f)
return o
clamp0_jit = torch.jit.script(clamp0)
self._run_helper(clamp0_jit, clamp0, x, 0.5)
def clamp1(x: torch.Tensor, f: float, ff: float):
o = 2.0 * torch.clamp(x, min=f, max=ff)
return o
clamp1_jit = torch.jit.script(clamp1)
self._run_helper(clamp1_jit, clamp1, x, -0.2, 0.7)
def threshold(x: torch.Tensor, th: float, val: float):
o = 2.0 * torch.threshold(x, th, val)
return o
threshold_jit = torch.jit.script(threshold)
self._run_helper(threshold_jit, threshold, x, 0.2, 0.9)
def where(x: torch.Tensor, y: torch.Tensor, cond: torch.Tensor):
o = 2.0 * torch.where(cond, x, y)
return o
where_jit = torch.jit.script(where)
self._run_helper(where_jit, where, x, y, cond)
def lerp(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = 2.0 * torch.lerp(x, y, z)
return o
lerp_jit = torch.jit.script(lerp)
self._run_helper(lerp_jit, lerp, x, y, z)
def lerp_scale(x: torch.Tensor, y: torch.Tensor, z: float):
o = 2.0 * torch.lerp(x, y, z)
return o
lerp_scale_jit = torch.jit.script(lerp_scale)
self._run_helper(lerp_scale_jit, lerp_scale, x, y, 0.5)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires profiling node to run cuda fuser",
)
def test_addcmul_ops(self):
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
def addcmul(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, value: float):
o = torch.add(x, 0.5)
o = torch.addcmul(o, y, z, value=value)
return o
addcmul_jit = torch.jit.script(addcmul)
self._run_helper(addcmul_jit, addcmul, x, y, z, 2.0)
def addcmul_no_alpha(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, 0.5)
o = torch.addcmul(o, y, z)
return o
addcmul_no_alpha_jit = torch.jit.script(addcmul_no_alpha)
self._run_helper(addcmul_no_alpha_jit, addcmul_no_alpha, x, y, z)
def addcmul_const_alpha(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, 0.5)
o = torch.addcmul(o, y, z, value=0.75)
return o
addcmul_const_alpha_jit = torch.jit.script(addcmul_const_alpha)
self._run_helper(addcmul_const_alpha_jit, addcmul_const_alpha, x, y, z)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_dynamic_size(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_bailout_depth(20)
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(
subgraph, "aten::add", 2, consider_subgraphs=False
)
# this test is not ideal, as we rely on the bailout to test it and we
# don't know a way to verify the bailout graph to validate the proper
# fusion.
x = torch.randn(8, 32, 16, 8, dtype=torch.float, device="cuda")
y = torch.randn(16, 8, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)
x = torch.randn(8, 17, 8, dtype=torch.float, device="cuda")
y = torch.randn(8, 17, 1, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_random_topo(self):
os.environ["PYTORCH_NVFUSER_DISABLE_FALLBACK"] = "1"
self.assertTrue(runDefaultTestWithSeed(28449))
def _compare(self, desc, inp1, inp2, error):
a = inp1.clone()
b = inp2.clone()
close = torch.allclose(a, b, rtol=error, atol=error, equal_nan=True)
if not close:
print(desc, close)
z = a - b
index = (torch.abs(z) >= error + error * torch.abs(b)).nonzero()
print("dif : ", z[index])
print("inp1 : ", a[index])
print("inp2 : ", b[index])
print("maximum difference", z[index].max())
return close
# Permutation helper that applies binary operation between two tensors:
# 1. applies separate permutation `perm0` & `perm1` to two inputs
# 2. reduce dimension `broadcast_axis` of operand two to size 1
# The purpose of this test is to ensure permutation works well in
# complicated cases with arbitrary stride order and broadcasting dimensions
def _permutation_helper(self, sizes, broadcast_axis, dtype, device, perm0, perm1):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.relu(o)
return o
x = torch.randn([sizes[i] for i in perm0], dtype=dtype, device=device).permute(
[perm0.index(i) for i in range(len(sizes))]
)
if broadcast_axis >= 0:
sizes[broadcast_axis] = 1
y = torch.randn([sizes[i] for i in perm1], dtype=dtype, device=device).permute(
[perm1.index(i) for i in range(len(sizes))]
)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertEqual(o.stride(), jit_o.stride())
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
# end-2-end test of permutation & contiguity handling in integration.
# we are testing inputs with all combination of permutation order, just to
# ensure that integration would be able to generate functionally correct
# kernels
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_binary_ops_permutation(self):
# note that num_dim is exclusive from len(x), so we are not reducing
# to single element (codegen limitation at this moment)
x = [7, 8, 12]
b_axes = range(-1, len(x))
for b_axis in b_axes:
for perm0 in itertools.permutations(range(len(x))):
for perm1 in itertools.permutations(range(len(x))):
x = [7, 8, 12]
self._permutation_helper(
x, b_axis, torch.float32, "cuda", perm0, perm1
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_binary_ops_channels_last_with_bcast(self):
device = "cuda"
x = torch.randn([4, 3, 2, 5], device=device).to(
memory_format=torch.channels_last
)
w = torch.randn([2, 5], device=device)
def t(x: torch.Tensor, b: torch.Tensor):
o = x + b
return torch.relu(o)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, w)
jit_o = t_jit(x, w)
jit_o = t_jit(x, w)
o = t(x, w)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-4))
self.assertGraphContains(t_jit.graph_for(x, w), FUSION_GUARD)
def _reduction_helper(
self, sizes, reduction_axis, dtype, device, perm0, perm1, keepdim=False
):
class MyReduction(torch.nn.Module):
__constants__ = ["reduction_axis", "keepdim"]
def __init__(self):
super(MyReduction, self).__init__()
self.reduction_axis = reduction_axis
self.keepdim = keepdim
def forward(self, x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim)
return o
t = MyReduction()
x = torch.randn([sizes[i] for i in perm0], dtype=dtype, device=device).permute(
[perm0.index(i) for i in range(len(sizes))]
)
y = torch.randn([sizes[i] for i in perm1], dtype=dtype, device=device).permute(
[perm1.index(i) for i in range(len(sizes))]
)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-4))
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_reduction(self):
for x in ([7, 8, 12], [12, 8, 7, 9, 15], [128, 16, 8, 32]):
# note that num_dim is exclusive from len(x), so we are not reducing
# to single element (codegen limitation at this moment)
for num_reduce_dim in range(1, len(x)):
for axes in itertools.combinations(range(len(x)), num_reduce_dim):
for keepdim in (True, False):
perm0 = range(len(x))
perm1 = range(len(x))
self._reduction_helper(
x, axes, torch.float32, "cuda", perm0, perm1, keepdim
)
def _layer_norm_autodiff_helper(self, model, grad, shapes, args):
jit_model = torch.jit.script(model)
eps = np.random.random() * 1e-4
use_cudnn = bool(np.random.randint(0, 2))
# profile/optimization runs
for i in range(3):
jit_o = jit_model(shapes, *args, eps, use_cudnn)
jit_o.backward(grad)
ref_args = [t.detach().clone().requires_grad_() for t in args]
[t.grad.zero_() for t in args]
jit_o = jit_model(shapes, *args, eps, use_cudnn)
jit_o.backward(grad)
o = model(shapes, *ref_args, eps, use_cudnn)
o.backward(grad)
self.assertEqual(jit_o, o)
for arg, ref_arg in zip(args, ref_args):
self.assertEqual(arg.grad, ref_arg.grad)
# check fusion in fw & bw
g = jit_model.graph_for(shapes, *args, eps, use_cudnn)
for node in g.nodes():
n = node
dbg_state = jit_model.get_debug_state()
for val in dbg_state.execution_plans.values():
v = val
state2 = v.code.grad_executor_states()
for val in state2[0].execution_plans.values():
v2 = val
FileCheck().check(FUSION_GUARD).run(g)
FileCheck().check(FUSION_GUARD).run(v2.graph)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_layer_norm_autodiff(self):
def t_wb(shapes: List[int], x, w, b, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, w, b, eps, cudnn)
o = torch.relu(o)
return o
def t_w(shapes: List[int], x, w, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, w, None, eps, cudnn)
o = torch.relu(o)
return o
def t_b(shapes: List[int], x, b, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, None, b, eps, cudnn)
o = torch.relu(o)
return o
def t(shapes: List[int], x, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, None, None, eps, cudnn)
o = torch.relu(o)
return o
model = {3: t_wb, 2: t_w, 1: t_b, 0: t}
for w, b in itertools.product([True, False], repeat=2):
batch = [2]
# note: awkward shape here to avoid vectorized fast kernel, which is
# buggy in aten
shapes = [2, 7, 3]
m = model[w * 2 + b]
grad = torch.randn(batch + shapes, dtype=torch.float32, device="cuda")
args = [
torch.randn(
batch + shapes, dtype=torch.float32, device="cuda"
).requires_grad_()
]
if w:
args.append(
torch.randn(
shapes, dtype=torch.float32, device="cuda"
).requires_grad_()
)
if b:
args.append(
torch.randn(
shapes, dtype=torch.float32, device="cuda"
).requires_grad_()
)
self._layer_norm_autodiff_helper(m, grad, shapes, args)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_layer_norm_parser(self):
dtype = torch.float32
device = "cuda"
x = torch.randn([4, 4, 2], dtype=dtype, device=device)
w = torch.randn([4, 2], dtype=dtype, device=device)
b = torch.randn([4, 2], dtype=dtype, device=device)
def t(x: torch.Tensor, w: torch.Tensor, b: torch.Tensor):
o = torch.relu(x)
o = torch.layer_norm(o, [4, 2], w, b, 1e-5)
return o
o = t(x, w, b)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, w, b)
jit_o = t_jit(x, w, b)
o = t(x, w, b)
self.assertGraphContains(t_jit.graph_for(x, w, b), FUSION_GUARD)
def _native_layer_norm_helper(
self, shape, norm_shape, dtype, device, error, affine=True
):
class MyLayerNorm(torch.nn.Module):
__constants__ = ["norm_shape"]
def __init__(self, elementwise_affine=True):
super(MyLayerNorm, self).__init__()
self.norm_shape = norm_shape
if elementwise_affine:
self.weight = torch.randn(norm_shape, dtype=dtype, device=device)
self.bias = torch.randn(norm_shape, dtype=dtype, device=device)
with torch.no_grad():
self.weight.fill_(1)
self.bias.fill_(0)
else:
self.weight = None
self.bias = None
def forward(self, x: torch.Tensor):
o = torch.relu(x)
o = torch.native_layer_norm(
o, self.norm_shape, self.weight, self.bias, 1e-5
)
return o
t = MyLayerNorm(affine)
x = torch.randn(shape, dtype=dtype, device=device)
t_jit = torch.jit.script(t)
jit_o, jit_mean, jit_rstd = t_jit(x)
jit_o, jit_mean, jit_rstd = t_jit(x)
o, mean, rstd = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
self.assertTrue(self._compare("comparing mean failed", mean, jit_mean, error))
self.assertTrue(self._compare("comparing rstd failed", rstd, jit_rstd, error))
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_native_layer_norm(self):
dims = 4
rnds = 3
for idx in range(rnds):
for offset in range(1, dims):
for affine in (True, False):
input_shape = [random.randint(10, 30) for idx in range(dims)]
norm_shape = [
input_shape[idx] for idx in range(dims - offset, dims)
]
self._native_layer_norm_helper(
input_shape, norm_shape, torch.float32, "cuda", 1e-4, affine
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_native_layer_norm_half(self):
dims = 4
rnds = 3
for idx in range(rnds):
for offset in range(1, dims):
input_shape = [random.randint(10, 30) for idx in range(dims)]
norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)]
self._native_layer_norm_helper(
input_shape, norm_shape, torch.float16, "cuda", 5e-3
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_native_layer_norm_bfloat(self):
dims = 4
rnds = 3
for idx in range(rnds):
for offset in range(1, dims):
input_shape = [random.randint(10, 30) for idx in range(dims)]
norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)]
self._native_layer_norm_helper(
input_shape, norm_shape, torch.bfloat16, "cuda", 1e-1
)
def _norm_helper(
self,
shape,
dtype,
device,
error,
is_batch_norm_else_instance_norm,
memory_format=torch.contiguous_format,
*,
layer_dtype=torch.float32
):
class MyBatchNorm(torch.nn.Module):
def __init__(self):
super(MyBatchNorm, self).__init__()
def forward(
self, x: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor
):
o = torch.nn.functional.batch_norm(x, r_mean, r_var, training=True)
o = torch.relu(o)
return o
class MyInstanceNorm(torch.nn.Module):
def __init__(self):
super(MyInstanceNorm, self).__init__()
def forward(
self, x: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor
):
o = torch.nn.functional.instance_norm(
x, r_mean, r_var, use_input_stats=True
)
o = torch.relu(o)
return o
t = MyBatchNorm() if is_batch_norm_else_instance_norm else MyInstanceNorm()
x = torch.randn(shape, dtype=dtype, device=device).to(
memory_format=memory_format
)
running_mean = torch.zeros(shape[1], dtype=layer_dtype, device=device)
running_var = torch.ones(shape[1], dtype=layer_dtype, device=device)
t_jit = torch.jit.script(t)
eager_running_mean = running_mean.clone()
eager_running_var = running_var.clone()
jit_running_mean = running_mean.clone()
jit_running_var = running_var.clone()
jit_o = t_jit(x, running_mean.clone(), running_var.clone())
self.assertTrue(
self._compare(
"prerun comparing running_mean failed",
eager_running_mean,
jit_running_mean,
error,
)
)
self.assertTrue(
self._compare(
"prerun comparing running_var failed",
eager_running_var,
jit_running_var,
error,
)
)
jit_o = t_jit(x, jit_running_mean, jit_running_var)
o = t(x, eager_running_mean, eager_running_var)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.stride(), jit_o.stride())
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
self.assertTrue(
self._compare(
"comparing running_mean failed",
eager_running_mean,
jit_running_mean,
error,
)
)
self.assertTrue(
self._compare(
"comparing running_var failed",
eager_running_var,
jit_running_var,
error,
)
)
self.assertGraphContains(
t_jit.graph_for(x, running_mean, running_var), FUSION_GUARD
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_layer_norm_trivial_reduce_dim(self):
def t_wb(shapes: List[int], x, w, b, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, w, b, eps, cudnn)
o = torch.relu(o)
return o
batch = [1]
shapes = [2, 7, 3]
grad = torch.randn(batch + shapes, dtype=torch.float32, device="cuda")
args = [
torch.randn(
batch + shapes, dtype=torch.float32, device="cuda"
).requires_grad_()
]
args.append(
torch.randn(shapes, dtype=torch.float32, device="cuda").requires_grad_()
)
args.append(
torch.randn(shapes, dtype=torch.float32, device="cuda").requires_grad_()
)
self._layer_norm_autodiff_helper(t_wb, grad, shapes, args)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_norm_half_layer(self):
size = [2, 4, 2, 2]
for is_batch_norm_else_instance_norm in [False, True]:
for mf in [torch.channels_last, torch.contiguous_format]:
self._norm_helper(
size,
torch.float16,
"cuda",
1e-3,
is_batch_norm_else_instance_norm,
memory_format=mf,
layer_dtype=torch.float16,
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_norm_channels_last(self):
size = [3, 4, 5, 6]
with torch.backends.cudnn.flags(enabled=False):
for is_batch_norm_else_instance_norm in [False, True]:
for mf in [torch.channels_last, torch.contiguous_format]:
self._norm_helper(
size,
torch.float32,
"cuda",
1e-4,
is_batch_norm_else_instance_norm,
memory_format=mf,
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_norm(self):
output_elements = 10000
channel_sizes = [67, 457, 1024, 4096]
with torch.backends.cudnn.flags(enabled=False):
for is_batch_norm_else_instance_norm in [False, True]:
for dims in range(3, 6):
output_size = int(pow(output_elements, 1.0 / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(
x,
torch.float32,
"cuda",
1e-4,
is_batch_norm_else_instance_norm,
)
@skipIfRocm
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_norm_large(self):
output_elements = 262144
channel_sizes = 67, 457, 1024
for is_batch_norm_else_instance_norm in [True, False]:
for dims in range(3, 6):
output_size = int(pow(output_elements, 1.0 / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(
x, torch.float32, "cuda", 1e-4, is_batch_norm_else_instance_norm
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_norm_half(self):
output_elements = 10000
channel_sizes = [67, 457, 1024, 4096]
with torch.backends.cudnn.flags(enabled=False):
# TODO instance norm on ROCm was giving ~50% incorrect results
for is_batch_norm_else_instance_norm in (
[True] if TEST_WITH_ROCM else [False, True]
):
for dims in range(3, 6):
output_size = int(pow(output_elements, 1.0 / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(
x,
torch.float16,
"cuda",
5e-3,
is_batch_norm_else_instance_norm,
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_norm_bfloat(self):
output_elements = 10000
channel_sizes = [67, 457, 1024, 4096]
with torch.backends.cudnn.flags(enabled=False):
# TODO instance norm on ROCm was giving ~50% incorrect results
for is_batch_norm_else_instance_norm in (
[True] if TEST_WITH_ROCM else [False, True]
):
for dims in range(3, 6):
output_size = int(pow(output_elements, 1.0 / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(
x,
torch.bfloat16,
"cuda",
1e-1,
is_batch_norm_else_instance_norm,
)
def _softmax_helper(
self, shape, reduction_axis, is_log_softmax, dtype, device, error
):
class MySoftmax(torch.nn.Module):
__constants__ = ["reduction_axis"]
def __init__(self):
super(MySoftmax, self).__init__()
self.reduction_axis = reduction_axis
def forward(self, x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.nn.functional.softmax(o, dim=self.reduction_axis)
return o
class MyLogSoftmax(torch.nn.Module):
__constants__ = ["reduction_axis"]
def __init__(self):
super(MyLogSoftmax, self).__init__()
self.reduction_axis = reduction_axis
def forward(self, x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.nn.functional.log_softmax(o, dim=self.reduction_axis)
return o
gradient_check = dtype == torch.float64
t = MyLogSoftmax() if is_log_softmax else MySoftmax()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=gradient_check)
y = torch.randn(shape, dtype=dtype, device=device, requires_grad=gradient_check)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
if gradient_check:
gradcheck(t_jit.forward, [x, y], nondet_tol=1e-5)
else:
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_softmax_dtype(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = torch.nn.functional.softmax(o, dim=0, dtype=torch.float32)
return o
x = torch.randn([4, 4], dtype=torch.float16, device="cuda").requires_grad_()
y = torch.randn_like(x).requires_grad_()
grad = torch.randn_like(x).float()
ref_x = x.detach().requires_grad_()
ref_y = y.detach().requires_grad_()
o = t(ref_x, ref_y)
o.backward(grad)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o.backward(grad)
jit_o = t_jit(x, y)
jit_o.backward(grad)
jit_o = t_jit(x, y)
jit_o.backward(grad)
x.grad.zero_()
y.grad.zero_()
jit_o = t_jit(x, y)
jit_o.backward(grad)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(ref_x.grad, x.grad)
self.assertEqual(ref_y.grad, y.grad)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-3))
self.assertGraphContainsExactly(
t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True
)
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[0]
.code.grad_executor_states()[0]
.execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GUARD).run(bwd_graph)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test__softmax_function(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = torch._softmax(o, dim=-1, half_to_float=False)
return o
x = torch.randn([4, 4], dtype=torch.float16, device="cuda")
y = torch.randn_like(x)
o = t(x, y)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-3))
self.assertGraphContainsExactly(
t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test__softmax_function_half_to_float(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = torch._softmax(o, dim=-1, half_to_float=True)
return o
x = torch.randn([4, 4], dtype=torch.float16, device="cuda")
y = torch.randn_like(x)
o = t(x, y)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-3))
self.assertGraphContainsExactly(
t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_softmax(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1.0 / dims))
reduction_sizes = [67, 256, 1024, 4096]
# gradient check
for reduction_dim in range(dims):
for is_log_softmax in [False, True]:
shape = [output_size for idx in range(dims)]
self._softmax_helper(
shape, reduction_dim, is_log_softmax, torch.float64, "cuda", 1e-4
)
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
for is_log_softmax in [False, True]:
self._softmax_helper(
x, reduction_dim, is_log_softmax, torch.float32, "cuda", 1e-4
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_softmax_half(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1.0 / dims))
reduction_sizes = [67, 256, 1024, 4096]
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
for is_log_softmax in [False, True]:
self._softmax_helper(
x, reduction_dim, is_log_softmax, torch.float16, "cuda", 5e-3
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_softmax_bfloat(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1.0 / dims))
reduction_sizes = [67, 256, 1024, 4096]
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
for is_log_softmax in [False, True]:
self._softmax_helper(
x, reduction_dim, is_log_softmax, torch.bfloat16, "cuda", 1e-1
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_reduction_permutation(self):
x = [7, 8, 12]
# note that num_dim is exclusive from len(x), so we are not reducing
# to single element (codegen limitation at this moment)
for num_reduce_dim in range(1, len(x)):
for axes in itertools.combinations(range(len(x)), num_reduce_dim):
for perm0 in itertools.permutations(range(len(x))):
for perm1 in itertools.permutations(range(len(x))):
self._reduction_helper(
x, axes, torch.float32, "cuda", perm0, perm1
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_reduction_multiple_output(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_bailout_depth(20)
def t(x: torch.Tensor, y: torch.Tensor, scale: float, z: torch.Tensor):
o = torch.mul(x, y)
o = torch.mul(o, scale)
out1 = torch.mul(o, z)
out2 = torch.sum(out1, dim=[2])
return out1, out2
t_jit = torch.jit.script(t)
x = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
y = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
z = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
scale = 0.5
jit_o = t_jit(x, y, scale, z)
jit_o = t_jit(x, y, scale, z)
o = t(x, y, scale, z)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, scale, z), FUSION_GUARD)
x = x.to(memory_format=torch.channels_last)
y = y.to(memory_format=torch.channels_last)
z = z.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y, scale, z)
jit_o = t_jit(x, y, scale, z)
o = t(x, y, scale, z)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, scale, z), FUSION_GUARD)
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_channels_last_with_broadcast(self):
# setting this true forces a new graph to be generated with a new
# input a different broadcast shape
torch._C._jit_set_nvfuser_guard_mode(True)
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = o + 2.0
return o
t_jit = torch.jit.script(t)
# Single Channel broadcasts
# Test 1
x = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
x = x.to(memory_format=torch.channels_last)
y = torch.randn(8, 4, 10, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(
o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last),
)
self.assertEqual(o, jit_o)
# Test 2
y = torch.randn(8, 4, 1, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(
o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last),
)
self.assertEqual(o, jit_o)
# Test 3
y = torch.randn(8, 1, 10, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(
o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last),
)
self.assertEqual(o, jit_o)
# Test 3
y = torch.randn(1, 4, 10, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(
o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last),
)
self.assertEqual(o, jit_o)
"""
Currently, the JIT doesn't have tensor merge logic to handle adding
a broadcast tensor with more than one broadcast into a non-broadcast
tensor. Therefore, either of these tests can fail depending on the
sort implementation. The second test is known to fail.
# Two Channel broadcasts
# Test 1
y = torch.randn(8, 4, 1, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 2
y = torch.randn(8, 4, 1, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last).transpose(2,3)
x = x.transpose(2,3)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
"""
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_pw_single_reduction_partition(self):
sizes = [2, 2, 2]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device)
y = torch.randn(sizes, dtype=dtype, device=device)
z = torch.randn(sizes, dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, y)
o = torch.sum(o, dim=[0])
o = torch.add(o, z)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_permutation_preservation(self):
sizes = [2, 3, 4, 5]
dtype = torch.float
device = "cuda"
with nvfuser_singleton_fusion(True):
def t(x: torch.Tensor):
return torch.relu(x)
t_jit = torch.jit.script(t)
x = torch.randn(sizes, dtype=dtype, device=device).to(
memory_format=torch.channels_last
)
self._run_helper(t_jit, t, x, check_stride=True)
def t(x: torch.Tensor, y: torch.Tensor):
return torch.add(x, y)
t_jit = torch.jit.script(t)
x = torch.randn(sizes, dtype=dtype, device=device).to(
memory_format=torch.channels_last
)
y = torch.randn(sizes[1:], dtype=dtype, device=device)
self._run_helper(t_jit, t, x, y, check_stride=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_permutation_preservation_edge_case_0(self):
sizes = [2, 3, 4, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device).to(
memory_format=torch.channels_last
)
# mismatch rank with *note* different permutation recognized by PE
bias = torch.randn(3, dtype=dtype, device=device).unsqueeze(-1).unsqueeze(-1)
def t(x, y):
return x + y
t_jit = torch.jit.script(t)
with nvfuser_singleton_fusion(True):
self._run_helper(t_jit, t, x, bias, check_stride=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_permutation_preservation_edge_case_1_broken(self):
sizes = [2, 3, 4, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device).to(
memory_format=torch.channels_last
)
# in-compatible permutation, this will cause format propagation to break
bias = torch.randn(4, 5, dtype=dtype, device=device)
def t(x, y):
return x + y
t_jit = torch.jit.script(t)
with nvfuser_singleton_fusion(True):
for _ in range(5):
jit_o = t_jit(x, bias)
o = t(x, bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
try:
# nvfuser does not support in-compatible permutation, this will throw
self.assertEqual(o.stride(), jit_o.stride())
except Exception as e:
warnings.warn(
"permutation propagation is broken, proper support should come after nvfuser permutation scheduler update"
)
self.assertGraphContains(t_jit.graph_for(x, bias), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_permutation_preservation_edge_case_2(self):
sizes = [2, 3, 4, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device).to(
memory_format=torch.channels_last
)
y = torch.randn(sizes, dtype=dtype, device=device).to(
memory_format=torch.channels_last
)
z = torch.randn(sizes, dtype=dtype, device=device).to(
memory_format=torch.channels_last
)
def t(x, y, w):
tmp = torch.lerp(x, y, w)
tmp = torch.clamp(tmp, -1.0, 0.5)
tmp = torch.nn.functional.softplus(tmp)
return torch.threshold(tmp, -2.0, 0.5)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y, z, check_stride=True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_normalization_partition(self):
sizes = [3, 8, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device)
y = torch.randn(sizes, dtype=dtype, device=device)
z = torch.randn(sizes, dtype=dtype, device=device)
r_m = torch.randn(8, dtype=dtype, device=device)
r_v = torch.randn(8, dtype=dtype, device=device)
def t(
x: torch.Tensor,
y: torch.Tensor,
z: torch.Tensor,
r_mean: torch.Tensor,
r_var: torch.Tensor,
):
o = torch.add(x, y)
o = torch.nn.functional.softmax(o, dim=0)
o = torch.add(o, z)
o = torch.nn.functional.batch_norm(o, r_mean, r_var, training=True)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z, r_m, r_v)
jit_o = t_jit(x, y, z, r_m, r_v)
o = t(x, y, z, r_m, r_v)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z, r_m, r_v), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_sum_to_one(self):
dtype = torch.float
device = "cuda"
x = torch.randn([4, 5, 6], dtype=dtype, device=device)
def t(x: torch.Tensor):
o = torch.add(x, 1)
o = torch.sum(o, dim=[0, 1, 2])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_single_reduction_broadcast(self):
dtype = torch.float
device = "cuda"
x = torch.randn([7, 4, 8], dtype=dtype, device=device)
y = torch.randn([4, 8], dtype=dtype, device=device)
z = torch.randn([1, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, y)
o = torch.add(o, z)
o = torch.sum(o, dim=[0])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_trivial_reduction(self):
dtype = torch.float
device = "cuda"
x = torch.randn([1, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor):
o = torch.add(x, 1)
o = torch.sum(o, dim=[0])
o = torch.sum(o, dim=[0])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skip("Skipped due to rand_like behavior change")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_profiling_node(self):
# TODO: should we change this test to not use rand_like, or just
# remove this test?
dtype = torch.float
device = "cuda"
x = torch.randn(4, 8, 8, 8, dtype=dtype, device=device)
def repro(x: torch.Tensor, alpha: float):
o = torch.rand_like(x)
o = torch.add(o, alpha)
return o
repro_jit = torch.jit.script(repro)
self._run_helper(repro_jit, repro, x, 0.6)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_reduction_sizes_op(self):
dtype = torch.float
device = "cuda"
x = torch.randn(2, 3, 4, 5, dtype=dtype, device=device)
y = torch.randn(2, 3, 4, 5, dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor):
o = x + y
o = torch.relu(o)
o = o.sum((1, 3))
return o.size()
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 0)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_profile_ivalue(self):
dtype = torch.float
device = "cuda"
x = torch.randn([7, 4, 7], dtype=dtype, device=device)
y = torch.randn([7, 4, 7], dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, dim: List[int], keepdim: bool):
o = torch.add(x, y)
o = o.sum(dim, keepdim=keepdim)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, (0, 1), False)
jit_o = t_jit(x, y, (0, 1), False)
o = t(x, y, (0, 1), False)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, (0, 1), False), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_profile_ivalue_multiple_profiles(self):
dtype = torch.float
device = "cuda"
x = torch.randn([7, 4, 7], dtype=dtype, device=device)
def t(x, num: int):
for i in range(num):
# varying reduction axes should break profile_ivalue
tmp = x.sum(i, keepdim=True)
# inplace add on input/output, can't be functionalized/fused
x += tmp
return x
with nvfuser_singleton_fusion(True):
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, 3, num_fusion=0)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_sum_to_size(self):
dtype = torch.float
device = "cuda"
x = torch.randn([2, 4, 4], dtype=dtype, device=device)
y = torch.randn([2, 4, 4], dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, new_size: List[int]):
o = torch.add(x, y)
o = o.sum_to_size(new_size)
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y, (4, 1))
# update shape: old kernel should handle dynamic shape well without
# recompilation
x = torch.randn([2, 5, 8], dtype=dtype, device=device)
y = torch.randn([2, 5, 8], dtype=dtype, device=device)
# (TODO) check executed kernel, should extend autograd.profiler to fused
# kernels
self._run_helper(t_jit, t, x, y, (5, 1))
with nvfuser_singleton_fusion(True):
x = torch.randn([2, 5, 8], dtype=dtype, device=device)
def t(x: torch.Tensor):
# no-op reduction
return x.sum_to_size((2, 5, 8))
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_grad_sum_to_size(self):
dtype = torch.float
device = "cuda"
x = torch.randn([2, 4, 4], dtype=dtype, device=device).requires_grad_()
y = torch.randn([4], dtype=dtype, device=device).requires_grad_()
grad = torch.randn([2, 4, 4], dtype=dtype, device=device)
ref_x = x.detach().clone().requires_grad_()
ref_y = y.detach().clone().requires_grad_()
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.relu(o)
return o
# profiling runs for forward & backward
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o.backward(grad)
jit_o = t_jit(x, y)
jit_o.backward(grad)
x.grad = None
y.grad = None
jit_o = t_jit(x, y)
jit_o.backward(grad)
o = t(ref_x, ref_y)
o.backward(grad)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertEqual(x.grad, ref_x.grad)
self.assertEqual(y.grad, ref_y.grad)
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[0]
.code.grad_executor_states()[0]
.execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GUARD).run(bwd_graph)
# update shape: old kernel should handle dynamic shape well without
# recompilation
x = torch.randn([2, 5, 8], dtype=dtype, device=device).requires_grad_()
y = torch.randn([8], dtype=dtype, device=device).requires_grad_()
ref_x = x.detach().clone().requires_grad_()
ref_y = y.detach().clone().requires_grad_()
grad = torch.randn([2, 5, 8], dtype=dtype, device=device)
jit_o = t_jit(x, y)
# (TODO) check executed kernel, should extend autograd.profiler to fused
# kernels
jit_o.backward(grad)
o = t(ref_x, ref_y)
o.backward(grad)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertEqual(x.grad, ref_x.grad)
self.assertEqual(y.grad, ref_y.grad)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_dropout_inference_fusion(self):
dtype = torch.float
device = "cuda"
x = torch.randn([10, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o + 1.0
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, 0.15, False)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_dropout_train_nograd_fusion(self):
dtype = torch.float
device = "cuda"
x = torch.randn([64, 128, 1024], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o + 1.0
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, 0.0, True, check_runs=20)
self._run_helper(t_jit, t, x, 1.0, True, check_runs=20)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_dropout_train_nograd_prob_check(self):
dtype = torch.float
device = "cuda"
x = torch.randn([1024, 1024], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o * 2.0
return o
t_jit = torch.jit.script(t)
for prob in [0.0, 0.15, 0.5, 0.85, 1.0]:
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
self.assertTrue(jit_o.detach().isfinite().all().item())
num_elems = x.numel()
num_zeros = num_elems - jit_o.detach().count_nonzero().item()
percent_zeros = num_zeros / num_elems
self.assertTrue(
(percent_zeros >= (prob - 0.01)) and (percent_zeros <= (prob + 0.01))
)
self.assertGraphContainsExactly(
t_jit.graph_for(x, prob, True), FUSION_GUARD, 1, consider_subgraphs=True
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_dropout_training_fusion(self):
dtype = torch.float
device = "cuda"
sizes = [2, 3, 4, 5]
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o * 2.0
return o
def t2(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.softmax(x, dim=-1)
o = torch.nn.functional.dropout(o, p, training=train)
return o
# disabling cache so new inputs would generate new graph
t.__disable_jit_function_caching__ = True
t2.__disable_jit_function_caching__ = True
for fn in [t, t2]:
for m_format in [torch.contiguous_format, torch.channels_last]:
fn_jit = torch.jit.script(fn)
x = torch.randn(
sizes, dtype=dtype, device=device, requires_grad=True
).to(memory_format=m_format)
grads = torch.randn(sizes, dtype=dtype, device=device).to(
memory_format=m_format
)
# The drop probability needs to be set to zero given that the order of picking random
# numbers between eager mode and the jit is different
self._run_training_helper(fn_jit, fn, grads, x, 0.0, True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_gelu(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
dtype = torch.float
device = "cuda"
x = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=True)
grads = torch.randn(
[1024, 1024], dtype=dtype, device=device, requires_grad=False
)
def t(x: torch.Tensor, mode: str):
o = torch.nn.functional.gelu(x, approximate=mode)
o = o * 2.0
return o
t_jit = torch.jit.script(t)
self._run_training_helper(t_jit, t, grads, x, "none")
self._run_training_helper(t_jit, t, grads, x, "tanh")
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_dropout_training_prob_check(self):
dtype = torch.float
device = "cuda"
x = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=True)
x_nograd = torch.randn([1024, 1024], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o * 2.0
return o
t_jit = torch.jit.script(t)
for prob in [0.0, 0.15, 0.5, 0.85, 1.0]:
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
self.assertTrue(jit_o.detach().isfinite().all().item())
num_elems = x.numel()
num_zeros = num_elems - jit_o.detach().count_nonzero().item()
percent_zeros = num_zeros / num_elems
self.assertTrue(
(percent_zeros >= (prob - 0.01)) and (percent_zeros <= (prob + 0.01))
)
self.assertGraphContainsExactly(
t_jit.graph_for(x, prob, True), FUSION_GUARD, 1, consider_subgraphs=True
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_linear(self):
in_feature = 2
out_feature = 8
# Changing the input dims to be 3-D to avoid eager mode bias fusion
# The bias fusion causes some precision issues with TF-32
weight = torch.randn(
out_feature, in_feature, dtype=torch.float32, device="cuda"
)
bias = torch.randn(out_feature, dtype=torch.float32, device="cuda")
def t(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor):
o = torch.nn.functional.linear(x, weight, bias)
o = torch.relu(o)
return o
# disabling cache so new inputs would generate new graph
t.__disable_jit_function_caching__ = True
sizes = [
in_feature,
]
for i in range(4):
# increase input rank in each iteration
sizes.insert(0, i + 2)
x = torch.randn(*sizes, dtype=torch.float32, device="cuda")
t_jit = torch.jit.script(t)
# fusion only happens for input rank >= 4
has_fusion = 0 if len(sizes) < 4 else 1
self._run_helper(
t_jit, t, x, weight, bias, check_stride=True, num_fusion=has_fusion
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_linear_symbolic_shapes(self):
def fn(x: int):
y = torch.zeros((3, 4, x, x + 2)).cuda()
for i in range(2):
inp = torch.rand((3, 4, x, x + i)).cuda()
weight = torch.rand((x + 2, x + i)).cuda()
bias = torch.rand((x, x + 2)).cuda()
y += torch.sin(torch.nn.functional.linear(inp, weight, bias))
return y
fn_s = torch.jit.script(fn)
fn_s(5)
fn_s(5)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_conv2d_symbolic_shapes(self):
def fn(x: int):
responses = []
for i in range(2):
inp = torch.rand((3, 3, 32, 32)).cuda()
weight = torch.rand((x + i, 3, 7, 7)).cuda()
bias = torch.rand((x + i)).cuda()
res = torch.nn.functional.conv2d(inp, weight, bias, padding=3)
responses.append(res)
return responses
fn_s = torch.jit.script(fn)
fn_s(5)
fn_s(5)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_backward_type(self):
# not super useful to check gradient of integer/bool, so skipping here
type_pairs = [
(torch.float, torch.half),
(torch.double, torch.half),
(torch.float, torch.double),
]
if TEST_BF16:
type_pairs += [
(torch.float, torch.bfloat16),
(torch.double, torch.bfloat16),
]
for x_type, y_type in type_pairs:
x = torch.randn(4, 2, dtype=x_type, device="cuda", requires_grad=True)
y = torch.randn(4, 2, dtype=y_type, device="cuda", requires_grad=True)
grad = torch.randn(4, 2, dtype=torch.float, device="cuda")
def test1(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.add(o, y)
o = torch.add(o, y)
o = torch.add(o, y)
o = o + 1.0
return o
test1_jit = torch.jit.script(test1)
for i in range(3):
jit_o = test1_jit(x, y)
jit_o.backward(grad)
bwd_graph = list(
list(test1_jit.get_debug_state().execution_plans.values())[0]
.code.grad_executor_states()[0]
.execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(x.grad.dtype, x.dtype)
self.assertEqual(y.grad.dtype, y.dtype)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_autocast_1(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch._C._nn.linear(o, y)
return o
x = torch.randn(8, 4, dtype=torch.half, device="cuda", requires_grad=True)
y = torch.randn(4, 4, dtype=torch.float, device="cuda", requires_grad=True)
grad = torch.randn(8, 4, dtype=torch.half, device="cuda", requires_grad=False)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast():
jit_o = t_jit(x, y)
if i == 2:
fwd_graph = t_jit.graph_for(x, y)
jit_o.backward(grad)
self.assertGraphContainsExactly(
fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True
)
with torch.cuda.amp.autocast():
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[0]
.code.grad_executor_states()[0]
.execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.half)
self.assertEqual(x.grad.dtype, x.dtype)
self.assertEqual(y.grad.dtype, y.dtype)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_autocast_2(self):
def t(x: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch.softmax(o, dim=-1)
o = o * 4.0
return o
x = torch.randn(8, 4, dtype=torch.half, device="cuda", requires_grad=True)
grad = torch.randn(8, 4, dtype=torch.float, device="cuda", requires_grad=False)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast():
jit_o = t_jit(x)
if i == 2:
fwd_graph = t_jit.graph_for(x)
jit_o.backward(grad)
self.assertGraphContainsExactly(
fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True
)
with torch.cuda.amp.autocast():
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[0]
.code.grad_executor_states()[0]
.execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.float)
self.assertEqual(x.grad.dtype, x.dtype)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_autocast_1_bfloat(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch._C._nn.linear(o, y)
return o
x = torch.randn(8, 4, dtype=torch.bfloat16, device="cuda", requires_grad=True)
y = torch.randn(4, 4, dtype=torch.float, device="cuda", requires_grad=True)
grad = torch.randn(
8, 4, dtype=torch.bfloat16, device="cuda", requires_grad=False
)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
jit_o = t_jit(x, y)
if i == 2:
fwd_graph = t_jit.graph_for(x, y)
jit_o.backward(grad)
self.assertGraphContainsExactly(
fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True
)
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[0]
.code.grad_executor_states()[0]
.execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.bfloat16)
self.assertEqual(x.grad.dtype, x.dtype)
self.assertEqual(y.grad.dtype, y.dtype)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_autocast_2_bfloat(self):
def t(x: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch.softmax(o, dim=-1)
o = o * 4.0
return o
x = torch.randn(8, 4, dtype=torch.bfloat16, device="cuda", requires_grad=True)
grad = torch.randn(8, 4, dtype=torch.float, device="cuda", requires_grad=False)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
jit_o = t_jit(x)
if i == 2:
fwd_graph = t_jit.graph_for(x)
jit_o.backward(grad)
self.assertGraphContainsExactly(
fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True
)
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[0]
.code.grad_executor_states()[0]
.execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.float)
self.assertEqual(x.grad.dtype, x.dtype)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_to_dtype_fp32_to_fp16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.half)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.float, device="cuda")
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.half)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_to_dtype_fp16_to_fp32(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.float)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.half, device="cuda")
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.float)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_to_dtype_fp16_to_fp16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.half)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.half, device="cuda")
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.half)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_to_dtype_fp32_to_bf16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.bfloat16)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.float, device="cuda")
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.bfloat16)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_to_dtype_bf16_to_fp32(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.float)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.bfloat16, device="cuda")
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.float)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_to_dtype_bf16_to_bf16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.bfloat16)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.bfloat16, device="cuda")
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.bfloat16)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(not TEST_MULTIGPU, "requires multiple CUDA device")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_multiple_device_pw(self):
def t(x):
o = x + 1.0
o = torch.relu(o)
return o
x = torch.randn(2, dtype=torch.float32, device="cuda")
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
torch.cuda.device(1)
x = x.to("cuda:1")
jit_o = t_jit(x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_graph_for_with_missing_optimized_engine(self):
x = torch.randn(8, 4, 2, dtype=torch.float, device="cuda").requires_grad_()
def t(x: torch.Tensor, flag: bool):
x = x + 1.0
x = torch.relu(x)
if flag:
o = x + 1.0
o = torch.relu(o)
else:
o = x + 2.0
o = torch.relu(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, False)
jit_o = t_jit(x, False)
jit_o = t_jit(x, True)
o = t(x, True)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x, True), FUSION_GUARD, 1, True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_branches(self):
in_feature = 2
out_feature = 4
x = torch.randn(4, in_feature, dtype=torch.float32, device="cuda")
weight = torch.randn(
out_feature, in_feature, dtype=torch.float32, device="cuda"
)
bias = torch.randn(out_feature, dtype=torch.float32, device="cuda")
def t(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, flag: bool):
if flag:
o = torch.nn.functional.linear(x, weight, bias)
o = o + 1.0
o = torch.relu(o)
else:
o = x.sum()
o = o + 2.0
o = torch.relu(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, weight, bias, True)
jit_o = t_jit(x, weight, bias, True)
o = t(x, weight, bias, True)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(
t_jit.graph_for(x, weight, bias, True), FUSION_GUARD, 1
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_scalar_tensor(self):
x = torch.empty([], device="cuda", dtype=torch.float32)
def t(x: torch.Tensor):
o = x + 1.0
o = torch.nn.functional.relu(o)
return o
# bias set to true.
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
@unittest.skipIf(
os.environ.get("PYTORCH_NO_CUDA_MEMORY_CACHING") is not None,
"skipping graph_rng when caching allocator is disabled",
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(CUDA_MAJOR < 11, "requires CUDA11 or above")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_graph_rng(self):
self.assertTrue(torch._C._jit_nvfuser_enabled())
size = 10000
a = torch.randn((size,), device="cuda", dtype=torch.float)
def t(x):
o = x + 1.0
o = torch.nn.functional.dropout(o, p=0.1)
o = o + 1.0
o = torch.nn.functional.dropout(o, p=0.1)
return o
t_jit = torch.jit.script(t)
for _ in range(3):
t_jit(a)
self.assertGraphContainsExactly(t_jit.graph_for(a), FUSION_GUARD, 1)
# Control (jitted, ungraphed)
torch.cuda.manual_seed(5)
eager_out = a.clone()
for _ in range(3):
eager_out = t_jit(eager_out)
graph_in = a.clone()
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
torch.cuda.manual_seed(5)
g.capture_begin()
graph_out = t_jit(graph_in)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
# g is now a jitted, graphed version of t.
# Runs a (jitted, graphed) -> (jitted, ungraphed) -> (jitted, graphed) sequence.
# The ops in the overall sequence should be the same as Control.
g.replay()
# graph_out is now filled with g's result. Use it as ungraphed input.
out = t_jit(graph_out)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out should now equal eager_out
self.assertEqual(graph_out, eager_out)
def _test_batch_norm_impl_index_helper(
self,
batch,
c,
hw,
affine=True,
track_running_stats=True,
train=True,
dtype=torch.float32,
):
# enabling inlining to avoid counter increment in BN forward
torch._C._debug_set_autodiff_subgraph_inlining(True)
class MyModule(torch.nn.Module):
def __init__(self, num_features=10, affine=True, track_running_stats=True):
super(MyModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(
num_features,
1e-5,
affine=affine,
track_running_stats=track_running_stats,
).to(dtype=dtype)
def forward(self, x):
o = self.bn(x)
o = o * 2.0
return o
x = (
torch.randn(batch, c, hw, hw, dtype=torch.float, device="cuda")
.to(dtype=dtype)
.requires_grad_()
)
grad = (
torch.randint(-20, 20, (batch, c, hw, hw), device="cuda")
.to(dtype=dtype)
.div(-10)
)
my_module = MyModule(c, affine, track_running_stats).cuda()
ref_module = MyModule(c, affine, track_running_stats).cuda()
if not train:
my_module.eval()
ref_module.eval()
t_jit = torch.jit.script(my_module)
ref_module.load_state_dict(my_module.state_dict())
ref_x = x.detach().requires_grad_()
for i in range(0, 3):
jit_o = t_jit(x)
jit_o.backward(grad)
# TODO: remove this run?
o = ref_module(ref_x)
o.backward(grad)
has_affine = ref_module.bn.weight is not None
has_running_stats = ref_module.bn.running_mean is not None
if has_running_stats:
my_module.bn.running_mean.zero_()
my_module.bn.running_var.fill_(1.0)
ref_module.bn.running_mean.zero_()
ref_module.bn.running_var.fill_(1.0)
# Verify that when train is False, we don't have grad for weight/bias.
if has_affine and train:
my_module.bn.weight.grad.zero_()
my_module.bn.bias.grad.zero_()
ref_module.bn.weight.grad.zero_()
ref_module.bn.bias.grad.zero_()
x.grad.zero_()
ref_x.grad.zero_()
# real runs
jit_o = t_jit(x)
jit_o.backward(grad)
o = ref_module(ref_x)
o.backward(grad)
# assert forward graph fusion
self.assertGraphContainsExactly(
t_jit.graph_for(x), FUSION_GUARD, 1, consider_subgraphs=True
)
# assert backward graph fusion
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[0]
.code.grad_executor_states()[0]
.execution_plans.values()
)[0].graph
self.assertGraphContainsExactly(
bwd_graph, FUSION_GUARD, 1, consider_subgraphs=True
)
if TEST_WITH_ROCM:
e0 = 1e-3
e1 = 1e-2
e2 = 1e-2
else:
e0 = 1e-5 if dtype is not torch.half else 1e-3
e1 = 1e-4 if dtype is not torch.half else 1e-3
e2 = 1e-3 if dtype is not torch.half else 1e-2
self.assertTrue(self._compare("comparing output failed", jit_o, o, e0))
self.assertTrue(
self._compare("comparing input grad failed", x.grad, ref_x.grad, e1)
)
# TODO: switch to welford and reduce this to 1e-5
# The 1e-3 looks bad, but we don't have welford in codegen, so numeric
# is very different between reference and codegen.
if has_affine and train:
self.assertTrue(
self._compare(
"comparing weight grad failed",
my_module.bn.weight.grad,
ref_module.bn.weight.grad,
e2,
)
)
self.assertTrue(
self._compare(
"comparing bias grad failed",
my_module.bn.bias.grad,
ref_module.bn.bias.grad,
e1,
)
)
if has_running_stats:
self.assertTrue(
self._compare(
"comparing running_mean failed",
my_module.bn.running_mean,
ref_module.bn.running_mean,
e0,
)
)
self.assertTrue(
self._compare(
"comparing running_var failed",
my_module.bn.running_var,
ref_module.bn.running_var,
e0,
)
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_batch_norm_half(self):
with torch.backends.cudnn.flags(enabled=True):
setups = [[True, True], [False, False], [True, False], [False, True]]
for training_and_track, affine in itertools.product(setups, [True, False]):
training, track_running_stats = training_and_track
self._test_batch_norm_impl_index_helper(
4, 8, 5, affine, track_running_stats, training, torch.half
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_batch_norm_impl_index_inner_bcast(self):
# the repro
self._test_batch_norm_impl_index_helper(2, 1, 1, False, True, True)
# running the full set
setups = [[True, True], [False, False], [True, False], [False, True]]
for training_and_track, affine in itertools.product(setups, [True, False]):
training, track_running_stats = training_and_track
self._test_batch_norm_impl_index_helper(
2, 1, 1, affine, track_running_stats, training
)
@skipIfRocm
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_batch_norm_impl_index_correctness(self):
with torch.backends.cudnn.flags(enabled=True):
batch = [2, 7, 16]
channels = [4, 89, 19, 32]
hw = [1, 8, 17, 32]
# avoid tolerance failure in CI
torch.cuda.manual_seed_all(211)
# failing sizes (2, 1, 1, 1)
# failing sizes (2, 89, 8, 8) training False, track True, affine: False
for b, c, hw in itertools.product(batch, channels, hw):
setups = [[True, True], [False, False], [True, False], [False, True]]
for training_and_track, affine in itertools.product(
setups, [True, False]
):
training, track_running_stats = training_and_track
self._test_batch_norm_impl_index_helper(
b, c, hw, affine, track_running_stats, training
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_softplus_fuser(self):
def shifted_softplus(x: torch.Tensor, shift: float):
return functional.softplus(x) - shift
jitted = torch.jit.script(shifted_softplus)
inp = torch.randn(4, 2, dtype=torch.float32, device="cuda").requires_grad_()
inp_ref = inp.detach().clone().requires_grad_()
grad = torch.randn(4, 2, dtype=torch.float32, device="cuda")
aten_o = shifted_softplus(inp_ref, 0.693147)
aten_o.backward(grad)
aten_grad = inp_ref.grad
for i in range(3):
jit_o = jitted(inp, 0.693147)
inp.grad = None # avoid accumulation on grad
jit_o.backward(grad)
jit_grad = inp.grad
assert torch.allclose(jit_o, aten_o)
assert torch.allclose(jit_grad, aten_grad)
self.assertGraphContains(jitted.graph_for(inp, 0.693147), FUSION_GROUP, True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_inplace_removal(self):
def t(x: torch.Tensor):
o = torch.nn.functional.softmax(x, dim=0)
o += x
return o.relu_()
jitted = torch.jit.script(t)
inp = torch.randn(4, 2, dtype=torch.float32, device="cuda")
for i in range(3):
jit_o = jitted(inp)
graph = jitted.graph_for(inp)
self.assertGraphContains(graph, FUSION_GROUP, True)
self.assertGraphContains(graph, "aten::add", True)
self.assertGraphContains(graph, "aten::relu", True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_conv2d_bias(self):
def t(x: torch.Tensor, w: torch.Tensor, bias: torch.Tensor):
o = torch.nn.functional.conv2d(x, w, bias)
return o.relu()
jitted = torch.jit.script(t)
inp = torch.randn(4, 5, 3, 3, dtype=torch.float32, device="cuda")
weight = torch.randn(2, 5, 2, 2, dtype=torch.float32, device="cuda")
bias = torch.randn(2, dtype=torch.float32, device="cuda")
for i in range(3):
jit_o = jitted(inp, weight, bias)
graph = jitted.graph_for(inp)
self.assertGraphContains(graph, FUSION_GROUP, True)
def t_not_fused(x: torch.Tensor, w: torch.Tensor):
o = torch.nn.functional.conv2d(x, w)
return o.relu()
jitted_not_fused = torch.jit.script(t_not_fused)
for i in range(3):
jit_o = jitted_not_fused(inp, weight)
graph = jitted_not_fused.graph_for(inp)
self.assertGraphContainsExactly(graph, FUSION_GROUP, 0)
self.assertGraphContains(graph, "aten::relu", True)
def t_bias(x: torch.Tensor, w: torch.Tensor, bias: torch.Tensor):
o = torch.nn.functional.conv2d(x, w, bias)
return o.relu()
jitted_bias = torch.jit.script(t_bias)
for i in range(3):
jit_o = jitted_bias(inp, weight, bias)
graph = jitted_bias.graph_for(inp)
self.assertGraphContains(graph, FUSION_GROUP, True)
self.assertGraphContains(graph, "prim::add_optional", True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_remove_output_used_only_in_dtype(self):
class MyModule(torch.nn.Module):
def __init__(self, num_features=4):
super(MyModule, self).__init__()
self.bn0 = torch.nn.BatchNorm2d(num_features)
self.bn1 = torch.nn.BatchNorm2d(num_features)
def forward(self, x, y):
o1 = self.bn0(x)
o2 = self.bn1(y)
return torch.relu(o1 + o2)
t = MyModule(4).float().cuda()
jitted = torch.jit.script(t)
x = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda")
y = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda")
with torch.cuda.amp.autocast(True):
for i in range(5):
jit_o = jitted(x, y)
jit_o = jitted(x, y)
o = t(x, y)
self.assertTrue(torch.allclose(jit_o, o))
graph = jitted.graph_for(x, y)
self.assertGraphContains(graph, FUSION_GROUP, True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_fix_shape_expression_bn(self):
class MyModule(torch.nn.Module):
def __init__(self, num_features=4):
super(MyModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(num_features)
def forward(self, x, y):
out1 = self.bn(x)
out2 = out1 + y
out3 = torch.relu(out2)
return out3
t = MyModule(4).float().cuda()
jitted = torch.jit.script(t)
x = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda")
y = torch.randn(3, 4, 2, 5, dtype=torch.float32, device="cuda")
with torch.cuda.amp.autocast(True):
for i in range(5):
jit_o = jitted(x, y)
jit_o = jitted(x, y)
o = t(x, y)
self.assertTrue(torch.allclose(jit_o, o))
graph = jitted.graph_for(x, y)
self.assertGraphContains(graph, FUSION_GROUP, True)
def _run_fwd_helper(self, func, ops, *args):
jitted = torch.jit.script(func)
for i in range(3):
jit_o = jitted(*args)
jit_o = jitted(*args)
o = func(*args)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
graph = jitted.graph_for(*args)
self.assertGraphContains(graph, FUSION_GROUP, True)
for op in ops:
self.assertGraphContainsExactly(graph, op, 0)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_sibling_fusion(self):
device = "cuda"
dtype = torch.float
x = torch.randn(2, 5, dtype=dtype, device=device)
y = torch.randn(2, 5, dtype=dtype, device=device)
def t(x: torch.Tensor):
o1 = x + 1.0
o2 = x * 0.5
return o1, o2
self._run_fwd_helper(t, ["aten::add", "aten::mul"], x)
def t2(x: torch.Tensor, y: torch.Tensor):
o1 = x.sum(0)
o2 = (x * y).sum(0)
return o1, o2
self._run_fwd_helper(t2, ["aten::sum", "aten::mul"], x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_clean_profile_ivalue(self):
device = "cuda"
dtype = torch.float
x = torch.randn(2, 5, dtype=dtype, device=device, requires_grad=True)
# turn on autodiff subgraph inlining
# this is to verify that we clean up profile_ivalue node out side of
# fusion code path.
torch._C._debug_set_autodiff_subgraph_inlining(True)
def t(x: torch.Tensor, flag: bool):
return torch.dropout(x, 0.5, flag)
jit_t = torch.jit.script(t)
for idx in range(5):
out = jit_t(x, True)
graph = jit_t.graph_for(x, True)
out = jit_t(x, False)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_sibling_fusion_no_scalar_inputs(self):
device = "cuda"
dtype = torch.float
x = torch.randn(2, 5, dtype=dtype, device=device)
y = torch.randn(3, dtype=dtype, device=device)
# no tensor dependency between o1/o2, we shouldn't be fusing them
def t(x: torch.Tensor, y: torch.Tensor):
o1 = x + 1
o2 = y - 1
return o1, o2
jitted = torch.jit.script(t)
for i in range(3):
jit_o = jitted(x, y)
graph = jitted.graph_for(x, y)
self.assertGraphContainsExactly(graph, FUSION_GROUP, 0)
def _bias_view_relu_helper(self, shape, output_shape, dtype, device, error):
class BiasViewRelu(torch.nn.Module):
def __init__(self):
super(BiasViewRelu, self).__init__()
self.bias = torch.nn.Parameter(
torch.randn(shape, dtype=dtype, device=device), requires_grad=False
)
with torch.no_grad():
self.bias.fill_(10)
def forward(self, inputs: torch.Tensor, view_shape: List[int]):
o = inputs + self.bias
o = o.view(view_shape)
return torch.relu(o)
t = BiasViewRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
# profiling
jit_o = t_jit(x, output_shape)
# optimization
jit_o = t_jit(x, output_shape)
# final
jit_o = t_jit(x, output_shape)
# eager - baseline
o = t(x, output_shape)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, output_shape)
has_inferred_dimension = any([dim == -1 for dim in output_shape])
if has_inferred_dimension:
# prohibit fusing when view_shape contains an inferred dimension
# TODO: Revisit
self.assertGraphContainsExactly(graph, FUSION_GROUP, 0)
self.assertGraphContainsExactly(graph, "aten::view_copy", 0)
else:
self.assertGraphContains(graph, FUSION_GUARD)
self.assertGraphContains(graph, "aten::view_copy", True)
def _alias_bias_view_relu_helper(self, shape, output_shape, dtype, device, error):
class BiasViewRelu(torch.nn.Module):
def __init__(self):
super(BiasViewRelu, self).__init__()
self.bias = torch.nn.Parameter(
torch.randn(shape, dtype=dtype, device=device), requires_grad=False
)
with torch.no_grad():
self.bias.fill_(10)
def forward(
self, inputs: torch.Tensor, bias: torch.Tensor, view_shape: List[int]
):
o = inputs.view(view_shape)
inputs.add_(bias)
return torch.relu(o)
t = BiasViewRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
# profiling
jit_o = t_jit(x.clone(), bias, output_shape)
# optimization
jit_o = t_jit(x.clone(), bias, output_shape)
# final
jit_o = t_jit(x.clone(), bias, output_shape)
# eager - baseline
o = t(x.clone(), bias, output_shape)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias, output_shape)
self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)
self.assertGraphContainsExactly(graph, "aten::view_copy", 0)
# generate random view given original view
def _random_view(self, original_view: List[int], max_len=8, max_views=10000):
class Moves(enum.Enum):
Merge = 0
Split = 1
Broadcast = 2
ImplicitBroadcast = 3
Keep = 4
def valid(old_view, new_view):
old_view_size = reduce(operator.mul, old_view)
new_view_size = reduce(operator.mul, new_view)
return old_view_size == new_view_size
# given a random starting number, find the nearest divisor
def find_nearest_divisor(N):
if 2 >= (N - 1):
return -1
result = random.randint(2, N - 1)
while (N % result) != 0:
result += 1
return result
tuple_original_view = [tuple(original_view)]
complete_views = set(tuple_original_view)
to_visit = []
# empty new view, curent originaal view, start pos=0, move count = 0, last_move
to_visit.append(([], original_view, 0, [], Moves.Keep))
# depth-first search of view shapes, starting from the original view
while len(to_visit) > 0 and len(complete_views) < max_views:
new_view, old_view, odx, move_list, last_move = to_visit[-1]
to_visit.pop()
# iterate over each move type
for idx in range(len(Moves)):
state = Moves(idx)
new_view_clone = copy.deepcopy(new_view)
old_view_clone = copy.deepcopy(old_view)
new_move_list = move_list + [state]
new_odx = odx
# Update state using Move state
if state == Moves.Keep:
new_size = old_view_clone[odx]
new_view_clone.append(new_size)
new_odx += 1
elif state == Moves.Merge:
if odx + 1 < len(old_view_clone):
new_size = old_view_clone[odx] * old_view_clone[odx + 1]
new_view_clone.append(new_size)
new_odx += 2
else:
continue
elif state == Moves.Broadcast and last_move != Moves.Broadcast:
new_view_clone.append(1)
elif state == Moves.Split:
new_size = find_nearest_divisor(old_view_clone[odx])
if new_size == -1:
continue
new_view_clone.append(new_size)
old_view_clone[odx] = int(old_view[odx] / new_size)
if old_view_clone[odx] == 1:
new_odx += 1
elif state == Moves.ImplicitBroadcast:
old_view_clone.insert(odx + 1, 1)
new_size = old_view[odx] * 1
new_view_clone.append(new_size)
new_odx += 2
if new_odx < len(old_view_clone) and len(new_move_list) < max_len:
to_visit.append(
(new_view_clone, old_view_clone, new_odx, new_move_list, state)
)
elif valid(original_view, new_view_clone):
final_new_view = tuple(new_view_clone)
complete_views.add(final_new_view)
return list(complete_views)
# ndims - number of dimensions
# test_fn - view test function
def _view_test_generator(self, ndims, test_fn):
# create random tensor
# max value for each dimension
max_size = 10e7
max_value = max(int(pow(max_size, 1.0 / ndims)), 1)
sizes = [random.randint(1, max_value) for idx in range(ndims)]
x = torch.randn(sizes)
original_sizes = list(x.size())
all_views = self._random_view(original_sizes)
random.shuffle(all_views)
max_samples = 20
max_views = min(len(all_views), max_samples)
total = 0
correct = 0
# test random combinations of compatible views
for idx in range(max_views):
for jdx in range(idx + 1, max_views):
total += 1
test_fn(all_views[idx], all_views[jdx], torch.float, "cuda", 1e-6)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_view(self):
torch._C._jit_set_nvfuser_guard_mode(True)
self._bias_view_relu_helper([2, 3, 4, 5], [-1, 4, 5], torch.float, "cuda", 1e-6)
for ndims in range(1, 5):
self._view_test_generator(ndims, self._bias_view_relu_helper)
self._alias_bias_view_relu_helper(
[2, 3, 4, 5], [1, 6, 1, 2, 2, 5, 1], torch.float, "cuda", 1e-6
)
def _bias_flatten_relu_helper(
self, shape, start_dim, end_dim, dtype, device, error
):
class BiasFlattenRelu(torch.nn.Module):
def __init__(self):
super(BiasFlattenRelu, self).__init__()
self.bias = torch.nn.Parameter(
torch.randn(shape, dtype=dtype, device=device), requires_grad=False
)
with torch.no_grad():
self.bias.fill_(10)
def forward(self, inputs: torch.Tensor, start_dim: int, end_dim: int):
o = inputs + self.bias
o = o.flatten(start_dim, end_dim)
return torch.relu(o)
t = BiasFlattenRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, start_dim, end_dim)
self.assertGraphContains(
t_jit.graph_for(x, start_dim, end_dim), "prim::flatten_copy", True
)
def _alias_bias_flatten_relu_helper(
self, shape, start_dim, end_dim, dtype, device, error
):
class BiasFlattenRelu(torch.nn.Module):
def __init__(self):
super(BiasFlattenRelu, self).__init__()
self.bias = torch.nn.Parameter(
torch.randn(shape, dtype=dtype, device=device), requires_grad=False
)
with torch.no_grad():
self.bias.fill_(10)
def forward(
self,
inputs: torch.Tensor,
bias: torch.Tensor,
start_dim: int,
end_dim: int,
):
o = inputs.flatten(start_dim, end_dim)
inputs.add_(bias)
return torch.relu(o)
t = BiasFlattenRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
# profiling
jit_o = t_jit(x.clone(), bias, start_dim, end_dim)
# optimization
jit_o = t_jit(x.clone(), bias, start_dim, end_dim)
# final
jit_o = t_jit(x.clone(), bias, start_dim, end_dim)
# eager - baseline
o = t(x.clone(), bias, start_dim, end_dim)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias, start_dim, end_dim)
self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)
self.assertGraphContainsExactly(graph, "prim::flatten_copy", 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_flatten(self):
torch._C._jit_set_nvfuser_guard_mode(True)
self._bias_flatten_relu_helper([2, 3, 4, 5], 0, -1, torch.float, "cuda", 1e-6)
self._bias_flatten_relu_helper([2, 3, 4, 5], 1, -1, torch.float, "cuda", 1e-6)
self._bias_flatten_relu_helper([2, 3, 4, 5], 2, -1, torch.float, "cuda", 1e-6)
self._bias_flatten_relu_helper([2, 3, 4, 5], 0, 3, torch.float, "cuda", 1e-6)
self._bias_flatten_relu_helper([2, 3, 4, 5], 1, 2, torch.float, "cuda", 1e-6)
self._bias_flatten_relu_helper([2, 3, 4, 5], 2, 2, torch.float, "cuda", 1e-6)
self._alias_bias_flatten_relu_helper(
[2, 3, 4, 5], 0, -1, torch.float, "cuda", 1e-6
)
self._alias_bias_flatten_relu_helper(
[2, 3, 4, 5], 1, -1, torch.float, "cuda", 1e-6
)
self._alias_bias_flatten_relu_helper(
[2, 3, 4, 5], 2, -1, torch.float, "cuda", 1e-6
)
self._alias_bias_flatten_relu_helper(
[2, 3, 4, 5], 0, 3, torch.float, "cuda", 1e-6
)
self._alias_bias_flatten_relu_helper(
[2, 3, 4, 5], 1, 2, torch.float, "cuda", 1e-6
)
self._alias_bias_flatten_relu_helper(
[2, 3, 4, 5], 2, 2, torch.float, "cuda", 1e-6
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_strict_fusion(self):
def success(x):
with torch.jit.strict_fusion():
return x + x + x
scripted = self.checkScript(success, (torch.rand([4], device="cuda"),))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_not("aten::add").check("prim::CudaFusionGroup").run(g)
def failure(x):
with torch.jit.strict_fusion():
return x + torch.mm(x, x) + x
with self.assertRaises(Exception) as error_out:
foo_s = torch.jit.script(failure)
foo_s(torch.rand([4, 4]))
foo_s(torch.rand([4, 4]))
fc = FileCheck().check("Found unfused operators")
fc.check("aten::mm").run(str(error_out.exception))
def _ltc_helper(self, shape, dtype, device, error, approximate=True):
# modeled after LTC linear layer
class LTC(torch.nn.Module):
def __init__(self):
super(LTC, self).__init__()
self.weight = torch.nn.Parameter(
torch.randn([1024, 1024], dtype=dtype, device=device),
requires_grad=False,
)
self.bias = torch.nn.Parameter(
torch.randn([1, 1024], dtype=dtype, device=device),
requires_grad=False,
)
def forward(self, inputs: torch.Tensor):
o = inputs.view([32768, 1024])
o = torch.mm(o, self.weight)
o = o.view([256, 128, 1024])
o = o + self.bias
o = o.view([32768, 1024])
o = o.view([256, 128, 1024])
return torch.nn.functional.gelu(o)
t = LTC()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
# profile/optimization runs
for i in range(3):
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x)
self.assertGraphContains(graph, FUSION_GUARD)
self.assertGraphContains(graph, "aten::view_copy", True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_nested_view(self):
self._ltc_helper([256, 128, 1024], torch.float, "cuda", 1e-6)
def _bias_squeeze_relu_helper(self, shape, dtype, device, error):
class BiasSqueezeRelu(torch.nn.Module):
def __init__(self):
super(BiasSqueezeRelu, self).__init__()
def forward(self, inputs: torch.Tensor, bias: torch.Tensor):
o = inputs + bias
o = torch.squeeze(o)
return torch.relu(o)
t = BiasSqueezeRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, bias)
jit_o = t_jit(x, bias)
jit_o = t_jit(x, bias)
o = t(x, bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias)
self.assertGraphContains(graph, FUSION_GUARD)
self.assertGraphContains(graph, "aten::squeeze_copy", True)
def _alias_bias_squeeze_relu_helper(self, shape, dtype, device, error):
class BiasSqueezeRelu(torch.nn.Module):
def __init__(self):
super(BiasSqueezeRelu, self).__init__()
def forward(self, inputs: torch.Tensor, bias: torch.Tensor):
o = torch.squeeze(inputs)
inputs.add_(bias)
return torch.relu(o)
t = BiasSqueezeRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
jit_o = t_jit(x.clone(), bias)
jit_o = t_jit(x.clone(), bias)
jit_o = t_jit(x.clone(), bias)
o = t(x.clone(), bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias)
self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)
self.assertGraphContainsExactly(graph, "aten::squeeze_copy", 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_squeeze(self):
self._bias_squeeze_relu_helper([1, 6, 1, 2, 2, 5, 1], torch.float, "cuda", 1e-6)
self._alias_bias_squeeze_relu_helper(
[1, 6, 1, 2, 2, 5, 1], torch.float, "cuda", 1e-6
)
# remove this after opinfo tests are enabled
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_squeeze_zero(self):
x = torch.tensor(1.0, dtype=torch.float, device="cuda")
def squeeze_0(x: torch.Tensor):
o = x + 1.0
o = torch.squeeze(o, 0)
o = o * 2.0
return o
def squeeze_1(x: torch.Tensor):
o = x + 1.0
o = torch.squeeze(o, -1)
o = o + 0.5
return o
squeeze_0_jit = torch.jit.script(squeeze_0)
self._run_helper(squeeze_0_jit, squeeze_0, x)
squeeze_1_jit = torch.jit.script(squeeze_1)
self._run_helper(squeeze_1_jit, squeeze_1, x)
def _bias_unsqueeze_relu_helper(self, shape, dtype, device, error):
class BiasUnsqueezeRelu(torch.nn.Module):
def __init__(self):
super(BiasUnsqueezeRelu, self).__init__()
def forward(self, inputs: torch.Tensor, bias: torch.Tensor):
o = inputs + bias
o = torch.unsqueeze(o, 0)
return torch.relu(o)
t = BiasUnsqueezeRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, bias)
jit_o = t_jit(x, bias)
jit_o = t_jit(x, bias)
o = t(x, bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias)
self.assertGraphContains(graph, FUSION_GUARD)
self.assertGraphContains(graph, "aten::unsqueeze_copy", True)
def _alias_bias_unsqueeze_relu_helper(self, shape, dtype, device, error):
class BiasUnsqueezeRelu(torch.nn.Module):
def __init__(self):
super(BiasUnsqueezeRelu, self).__init__()
def forward(self, inputs: torch.Tensor, bias: torch.Tensor):
o = torch.unsqueeze(inputs, 0)
inputs.add_(bias)
return torch.relu(o)
t = BiasUnsqueezeRelu()
x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)
t_jit = torch.jit.script(t)
jit_o = t_jit(x.clone(), bias)
jit_o = t_jit(x.clone(), bias)
jit_o = t_jit(x.clone(), bias)
o = t(x.clone(), bias)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
graph = t_jit.graph_for(x, bias)
self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)
self.assertGraphContainsExactly(graph, "aten::unsqueeze_copy", 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_unsqueeze(self):
self._bias_unsqueeze_relu_helper([2, 3, 4, 5], torch.float, "cuda", 1e-6)
self._alias_bias_unsqueeze_relu_helper([2, 3, 4, 5], torch.float, "cuda", 1e-6)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_alias_pass_fix(self):
x = torch.randn(4, 24, 2, 2, dtype=torch.float, device="cuda")
w = torch.randn(24, 24, 1, 1, dtype=torch.float, device="cuda")
b = torch.randn(24, dtype=torch.float, device="cuda")
def t(x, w, b):
b2 = b + 1.0
o = torch.conv2d(x, w, b2)
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, w, b)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_squeeze_negative_dim(self):
x = torch.randn(4, 24, 1, 2, dtype=torch.float, device="cuda")
def t(x):
o = x + 1.0
o = o.squeeze(-2)
o = o * 2.0
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_index_select_0_dim_inputs(self):
lookup_tv = torch.tensor(1.5, device="cuda", dtype=torch.float)
indices_tv = torch.tensor([0], device="cuda", dtype=torch.long)
with nvfuser_singleton_fusion(True):
def t(x_kj, idx_kj):
return torch.index_select(x_kj, 0, idx_kj).relu()
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, lookup_tv, indices_tv)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_index_select_fusion(self):
lookup_size = 68
feat_dim = 128
num_elements = 355984
lookup_tv = torch.rand(lookup_size, feat_dim, dtype=torch.float, device="cuda")
indies_tv = torch.randint(0, lookup_size, (num_elements,), device="cuda").to(
dtype=torch.int
)
sbf = torch.rand(num_elements, feat_dim, dtype=torch.float, device="cuda")
def t(x_kj, idx_kj, sbf):
sbf_res = torch.index_select(x_kj, 0, idx_kj) * sbf
sbf_res = sbf_res + 17
return sbf_res
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, lookup_tv, indies_tv, sbf)
@unittest.skipIf(True, "skip because autodiff revert #95565")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_index_select_function(self):
def t(x: torch.Tensor, y: torch.Tensor, ind: torch.Tensor):
o = torch.mul(x, y)
o = torch.index_select(o, 0, ind)
return o
x = torch.randn([68, 128], dtype=torch.float, device="cuda").requires_grad_()
y = torch.randn_like(x).requires_grad_()
ind = torch.randint(0, 68, (130,), device="cuda").to(dtype=torch.int)
grad = torch.randn([130, 128], dtype=torch.float, device="cuda")
t_jit = torch.jit.script(t)
self._run_training_helper(t_jit, t, grad, x, y, ind)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_index_select_shape_expression(self):
x = torch.randn([68, 128], dtype=torch.float, device="cuda")
y = torch.randn_like(x)
ind = torch.randint(0, 68, (130,), device="cuda").to(dtype=torch.int)
def t(x: torch.Tensor, y: torch.Tensor, ind: torch.Tensor):
o = torch.mul(x, y)
o = torch.index_select(o, 0, ind)
return o.size()
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, ind)
jit_o = t_jit(x, y, ind)
o = t(x, y, ind)
g = torch.jit.last_executed_optimized_graph()
FileCheck().check("prim::infer_index_select_size").run(g)
self.assertEqual(o, jit_o)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_index_select_runtime_dim(self):
lookup_size = 68
feat_dim = 128
num_elements = 355984
dim = torch.tensor(0, device="cuda").to(dtype=torch.int)
lookup_tv = torch.rand(lookup_size, feat_dim, dtype=torch.float, device="cuda")
indies_tv = torch.randint(
0, lookup_size, (num_elements,), dtype=torch.float, device="cuda"
).to(dtype=torch.long)
sbf = torch.rand(num_elements, feat_dim, dtype=torch.float, device="cuda")
def t(x_kj: torch.Tensor, idx_kj: torch.Tensor, sbf: torch.Tensor, dim: int):
sbf_res = torch.index_select(x_kj, dim, idx_kj) * sbf
sbf_res = sbf_res + 17
return sbf_res
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, lookup_tv, indies_tv, sbf, dim)
@unittest.skipIf(True, "skip because autodiff revert #95565")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_gather_backward(self):
lookup_size = 68
feat_dim = 128
select_dim = 1
x = torch.randn(
lookup_size, feat_dim, dtype=torch.float, device="cuda"
).requires_grad_()
y = torch.randint(0, lookup_size, (lookup_size, select_dim), device="cuda").to(
dtype=torch.long
)
z = torch.rand(
lookup_size, select_dim, dtype=torch.float, device="cuda"
).requires_grad_()
grad = torch.randn(lookup_size, select_dim, dtype=torch.float, device="cuda")
def t(x, y, z):
o = torch.mul(x, z)
o = torch.gather(o, 1, y)
return o
t_jit = torch.jit.script(t)
self._run_training_helper(t_jit, t, grad, x, y, z)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_gather_fusion(self):
lookup_size = 68
feat_dim = 128
select_dim = 1
x = torch.randn(lookup_size, feat_dim, dtype=torch.float, device="cuda")
y = torch.randint(0, lookup_size, (lookup_size, select_dim), device="cuda").to(
dtype=torch.long
)
z = torch.rand(lookup_size, select_dim, dtype=torch.float, device="cuda")
def t(x, y, z):
o = torch.gather(x, 1, y) * z + 176
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y, z)
@unittest.skipIf(True, "skip because autodiff revert #95565")
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_gather_sparse_grad(self):
x = torch.randn(2, 2, dtype=torch.float, device="cuda").requires_grad_()
y = torch.randint(0, 1, (2, 2), device="cuda").to(dtype=torch.long)
z = torch.rand(2, 2, dtype=torch.float, device="cuda").requires_grad_()
grad = torch.randn(2, 2, dtype=torch.float, device="cuda")
def t(x, y, z):
o = torch.gather(x + z, 1, y + y, sparse_grad=True)
return o
t_jit = torch.jit.script(t)
self._run_training_helper(t_jit, t, grad, x, y, z, num_bw_fusion=0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_singleton_fusion(self):
x = torch.randn(4, 2, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x):
return x.relu()
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_issue1445_fusion(self):
def f(t0, t1, t2, t3):
masked_input = torch.where(t1, t2, t3)
total = masked_input.sum([0, 1, 2, 3])
sizes: List[int] = []
t10 = torch.reshape(t0, sizes)
t7 = total / t10
t4 = t7.to(dtype=torch.float)
return t4
x = torch.randn(1, 1, 1, 1, device="cuda").to(dtype=torch.long)
y = (
torch.randn(3, 2, 1, 1, device="cuda")
.to(dtype=torch.bool)
.expand([3, 2, 1, 2])
)
z = torch.randn(3, 2, 1, 2, device="cuda")
w = torch.tensor(1.5, device="cuda")
f_jit = torch.jit.script(f)
for i in range(5):
out_jit = f_jit(x, y, z, w)
out = f(x, y, z, w)
self.assertEqual(out, out_jit)
self.assertGraphContainsExactly(f_jit.graph_for(x, y, z, w), FUSION_GROUP, 1)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_disable_sibling_fuse(self):
x = torch.randn(4, 2, device="cuda")
y = torch.randn(8, device="cuda")
s = torch.tensor(1.5, device="cuda")
with nvfuser_horizontal_fusion(False):
def t(x, y, s):
o1 = x + s
o2 = y + s
return o1, o2
t_jit = torch.jit.script(t)
for i in range(5):
t_jit(x, y, s)
# sibling fusion should be disabled with the flag
self.assertGraphContainsExactly(t_jit.graph_for(x, y, s), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_build_shape_expression_native_dropout(self):
x = torch.randn(4, 2, device="cuda")
def t(x):
o, mask = torch.native_dropout(x, 0.0, True)
o1 = o.sigmoid()
o2 = mask.float().sigmoid()
return (o1, o2)
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_scalar_tensor_permuted(self):
x = torch.randn(4, 2, 3, device="cuda").permute([1, 2, 0])
y = torch.tensor(1.0, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x, y):
return x + y
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_cpu_scalar(self):
x = torch.randn(4, 2, 3, device="cuda")
y = torch.tensor(1.0, device="cpu")
z = torch.tensor(2.0, device="cpu")
with nvfuser_singleton_fusion(True):
# testing cpu scalar tensor promotion
def t(x, y):
return x + y
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y)
# scalar cpu tensor add should NOT be fused
@torch.jit.script
def t1(y, z):
return y * z
for _ in range(5):
t1(y, z)
self.assertGraphContainsExactly(t1.graph_for(y, z), FUSION_GUARD, 0)
# everything, including scalar cpu tensor add should be fused
@torch.jit.script
def t2(x, y, z):
tmp = y + z
return tmp + x
for _ in range(5):
t2(x, y, z)
self.assertGraphContainsExactly(t2.graph_for(x, y, z), "aten::add", 0)
self.assertGraphContainsExactly(t2.graph_for(x, y, z), FUSION_GUARD, 1)
# 'cpu_tmp = y + z' shouldn't be fused.
@torch.jit.script
def t3(x, y, z):
cpu_tmp = y + z
out = x + y
return cpu_tmp, out
for _ in range(5):
t3(x, y, z)
self.assertGraphContainsExactly(t3.graph_for(x, y, z), FUSION_GUARD, 1)
self.assertGraphContainsExactly(t3.graph_for(x, y, z), "aten::add", 1)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_shape_expression(self):
x = torch.randn(4, 2, 1, 3, device="cuda")
def t_unsqueeze(x):
t0 = x.relu()
t1 = t0.unsqueeze(1)
t2 = t1 + 1.0
t3 = t1.size()
return t2, t3
def t_squeeze(x):
t0 = x.relu()
t1 = t0.squeeze()
t2 = t1 + 1.0
t3 = t1.size()
return t2, t3
def t_squeeze_dim(x):
t0 = x.relu()
t1 = t0.squeeze(-2)
t2 = t1 + 1.0
t3 = t1.size()
return t2, t3
# squeezing a non-size 1 dimension should be a no op
def t_squeeze_dim_no_op(x):
t0 = x.relu()
t1 = t0.squeeze(1)
t2 = t1 + 1.0
t3 = t1.size()
return t2, t3
def run(fn):
jit_fn = torch.jit.script(fn)
jit_o = jit_fn(x)
jit_o = jit_fn(x)
jit_o = jit_fn(x)
o = fn(x)
# output 0 is a tensor, so we check dtype and value
self.assertEqual(o[0].dtype, jit_o[0].dtype)
self.assertEqual(o[0], jit_o[0])
# output 1 is shape
self.assertEqual(o[1], jit_o[1])
self.assertGraphContainsExactly(jit_fn.graph_for(x), FUSION_GUARD, 1)
for t in [t_unsqueeze, t_squeeze, t_squeeze_dim, t_squeeze_dim_no_op]:
run(t)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_scalar_cuda_tensor(self):
x = torch.tensor(2.0, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x):
return x + 1.0
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@torch.jit.script
def t_jitted(x):
return x.sum(0)
for i in range(5):
t_jitted(x)
self.assertGraphContainsExactly(t_jitted.graph_for(x), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_overlapped_input(self):
x = torch.randn(8, device="cuda").as_strided((2, 4), (1, 1))
with nvfuser_singleton_fusion(True):
def t(x):
return x + 1.0
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
def test_reduction_empty_axes(self):
x = torch.randn(4, 2, 3, device="cuda").permute([1, 2, 0])
with nvfuser_singleton_fusion(True):
def t(x):
sizes: List[int] = []
return x.sum(sizes)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
def test_int_tensor_input(self):
x = torch.randn(4, 2, device="cuda").to(dtype=torch.int)
with nvfuser_singleton_fusion(True):
def t(x):
return x.amax(dim=0)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_to_boolean(self):
x = torch.randn(4, 2, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x):
return x.to(dtype=torch.bool)
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_to_copy(self):
x = torch.randn(4, 2, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x, dtype: torch.dtype):
o = torch.ops.aten._to_copy(x, dtype=dtype)
return o
t.__disable_jit_function_caching__ = True
t_jit = torch.jit.script(t)
for dtype in [torch.float16, torch.bool, torch.float64]:
self._run_helper(t_jit, t, x, dtype)
def t_none(x):
with torch.jit.strict_fusion():
o = torch.ops.aten._to_copy(x, dtype=None)
return o
t_jit_none = torch.jit.script(t_none)
self._run_helper(t_jit_none, t_none, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_view_copy_graph_guard(self):
x = torch.randn(4, 2, 3, device="cuda").permute([1, 2, 0])
y = [4, 6]
with nvfuser_singleton_fusion(True):
def t(x, y: List[int]):
t1 = x + 1.0
t2 = t1 * 1.0
out = t2.reshape(y)
return out.relu()
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(True, "TS issue: https://github.com/NVIDIA/Fuser/issues/624")
def test_view_copy_graph_guard_double_fusion(self):
x = torch.randn(2, 2, 5, device="cuda")
w = torch.randn(5, 5, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x, w):
o = x.view([4, x.size()[-1]])
o = torch.matmul(o, w)
o = o.view([2, 2, o.size()[1]])
return o
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x, w)
o = t(x, w)
self.assertEqual(jit_o, o)
self.assertGraphContainsExactly(
t_jit.graph_for(x, w), FUSION_GUARD, 2, consider_subgraphs=True
)
@skipIfRocm
# see issue here on why we disabled this test https://github.com/csarofeen/pytorch/issues/2127
@unittest.skipIf(
is_pre_volta(), "permutation scheduling can be dangerous on pre-volta device"
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_view_before_permute(self):
view_examples = [
[[1, 19, 1, 12, 7, 1, 99], [1, 19, 1, 3, 2772]],
[[3, 17, 80, 1], [51, 1, 2, 4, 10]],
[[3, 17, 80, 1, 9], [51, 1, 2, 4, 10, 9]],
[[2, 3, 4, 5], [1, 6, 1, 2, 2, 5]],
[[22, 22, 2], [22, 11, 1, 1, 4]],
[[37, 9, 7, 6, 10], [333, 2, 2, 3, 35]],
[[8, 1, 1, 8, 1, 8], [8, 2, 4, 1, 8]],
[[1, 333, 1], [1, 37, 9]],
[[1, 333], [1, 1, 1, 111, 1, 3]],
[[1, 27454, 1, 2], [1, 7844, 1, 7]],
[[1, 7844, 1, 7], [1, 27454, 2]],
]
def _getTransposeAxes(sizes):
# broadcast do not change
# always move inner-most dim
# random permutation of other dims
result = []
valid_sizes = []
for idx, val in enumerate(sizes):
if val > 1 and idx < len(sizes) - 1:
valid_sizes.append((idx, val))
result.append(idx)
idx, new_size = valid_sizes[random.randint(0, len(valid_sizes) - 1)]
result[idx] = len(sizes) - 1
result[len(sizes) - 1] = idx
return result
def _transposeSize(sizes, dims):
return [sizes[old_pos] for old_pos in dims]
for example in view_examples:
before_view_size, after_view_size = example
axes = _getTransposeAxes(after_view_size)
output_size = _transposeSize(after_view_size, axes)
self._view_before_permute_helper(
before_view_size, after_view_size, output_size, axes
)
def _view_before_permute_helper(self, input_shape, view_shape, output_shape, dims):
def t(x, y, view_shape: List[int], dims: List[int]):
x_v = x.view(view_shape)
x_t = torch.permute(x_v, dims)
o = torch.add(x_t, y)
o = torch.relu(o)
return o
x = torch.randn(*input_shape, device="cuda")
y = torch.randn(*output_shape, device="cuda")
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y, view_shape, dims)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_permute(self):
max_dims = 4
for ndims in range(2, max_dims + 1):
shape = [idx + 2 for idx in range(ndims)]
for dims in itertools.permutations(range(ndims)):
self._permute_helper(shape, dims)
def _permute_helper(self, shape, dims):
def t(x, y, dims: List[int]):
x_t = torch.permute(x, dims)
y_t = torch.permute(y, dims)
o = torch.add(x_t, y_t)
o = torch.relu(o)
return o
x = torch.randn(*shape, device="cuda")
y = torch.randn(*shape, device="cuda")
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y, dims)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_transpose(self):
max_dims = 4
for ndims in range(2, max_dims + 1):
shape = [idx + 2 for idx in range(ndims)]
for idx in range(1, ndims):
for jdx in range(idx):
self._transpose_helper(shape, idx, jdx)
def _transpose_helper(self, shape, dim0, dim1):
def t(x, y, dim0: int, dim1: int):
x_t = torch.transpose(x, dim0, dim1)
y_t = torch.transpose(y, dim0, dim1)
o = torch.add(x_t, y_t)
o = torch.nn.functional.gelu(o)
return o
x = torch.randn(*shape, device="cuda")
y = torch.randn(*shape, device="cuda")
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y, dim0, dim1)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_transpose_default(self):
def t(x, y):
x_t = torch.t(x)
y_t = torch.t(y)
o = torch.add(x_t, y_t)
o = torch.nn.functional.gelu(o)
return o
x = torch.randn(3, 5, device="cuda")
y = torch.randn(3, 5, device="cuda")
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_input_output_passthrough(self):
def t(t0, t1, t2):
mask = t1.to(dtype=torch.bool)
masked_input = torch.where(t0, mask, t2)
return masked_input, mask
t_jit = torch.jit.script(t)
# stick to integers, this avoid the numerical difference due to our
# promotion
x = torch.randn(4, 4, device="cuda").to(dtype=torch.bool)
y = torch.randn(4, 4, device="cuda").to(dtype=torch.bool)
z = torch.tensor(1.0, device="cuda").to(dtype=torch.bool)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_pointwise_reference_tensor(self):
def t(input1, input2, scalar):
_unsafe_view = torch.ops.aten._unsafe_view(input1, [2, 4, 16])
add_ = torch.ops.aten.add_(_unsafe_view, input2)
gelu_ = torch.ops.aten.gelu(add_)
view_ = torch.ops.aten.view(gelu_, [8, 16])
mul_ = torch.ops.aten.mul(add_, scalar)
return [view_, mul_]
x = torch.randn(8, 16, device="cuda")
bias = torch.randn(16, device="cuda")
scalar = torch.ones(torch.Size([]), device="cuda")
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x, bias, scalar)
o = t(x, bias, scalar)
self.assertEqual(jit_o, o)
self.assertGraphContains(t_jit.graph_for(x, bias, scalar), FUSION_GUARD)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
def test_native_batch_norm_backward(self):
grad_output = torch.randn(4, 2, 3, device="cuda")
input = torch.randn(4, 2, 3, device="cuda")
weight = torch.randn(2, device="cuda")
r_m = torch.randn(2, device="cuda")
r_v = torch.randn(2, device="cuda").abs()
save_mean = torch.randn(2, device="cuda")
save_invstd = torch.randn(2, device="cuda").abs()
with nvfuser_singleton_fusion(True):
def t(
grad_out,
input,
weight,
r_m,
r_v,
save_mean,
save_invstd,
train: bool,
eps: float,
mask: List[bool],
):
return torch.ops.aten.native_batch_norm_backward(
grad_out,
input,
weight,
r_m,
r_v,
save_mean,
save_invstd,
train,
eps,
mask,
)
t_jit = torch.jit.script(t)
for i in range(4):
jit_o = t_jit(
grad_output,
input,
weight,
r_m.clone(),
r_v.clone(),
save_mean,
save_invstd,
True,
1e-5,
[True, True, True],
)
ref_m = r_m.clone()
ref_v = r_v.clone()
jit_o = t_jit(
grad_output,
input,
weight,
r_m,
r_v,
save_mean,
save_invstd,
True,
1e-5,
[True, True, True],
)
o = t(
grad_output,
input,
weight,
ref_m,
ref_v,
save_mean,
save_invstd,
True,
1e-5,
[True, True, True],
)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertEqual(ref_m.dtype, r_m.dtype)
self.assertEqual(ref_m, r_m)
self.assertEqual(ref_v.dtype, r_v.dtype)
self.assertEqual(ref_v, r_v)
self.assertGraphContains(
t_jit.graph_for(
grad_output,
input,
weight,
r_m.clone(),
r_v.clone,
save_mean,
save_invstd,
True,
1e-5,
[True, True, True],
),
FUSION_GUARD,
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_contiguous_on_broadcasted(self):
x = torch.randn(4, 1, device="cuda")
y = torch.randn(4, 128, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x, y):
t1 = x.expand([4, 128])
t2 = t1 * y
return t2
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_skip_parser(self):
x = torch.randn(4, 12, device="cuda")
with nvfuser_singleton_fusion(True):
def fn(x):
t1 = x + 1.0
return t1.relu()
fn_jit = torch.jit.script(fn)
self._run_helper(fn_jit, fn, x)
# add node should have been merged into fusion
self.assertGraphContains(fn_jit.graph_for(x), FUSION_GUARD)
self.assertGraphContainsExactly(fn_jit.graph_for(x), "aten::add", 0)
# flips skip parse for `aten::add`, following fusion should skip the
# add node
self.assertFalse(
torch._C._jit_set_nvfuser_skip_node_kind("aten::add", True)
)
def fn_1(x):
t1 = x + 2.0 # change const value so we'll not reuse plan
return t1.relu()
fn_1_jit = torch.jit.script(fn_1)
self._run_helper(fn_1_jit, fn_1, x)
# add node should have been merged into fusion
self.assertGraphContains(fn_1_jit.graph_for(x), FUSION_GUARD)
self.assertGraphContainsExactly(fn_1_jit.graph_for(x), "aten::add", 1)
# flips skip parse for `aten::add`, next fusion should fuse add node
self.assertTrue(torch._C._jit_set_nvfuser_skip_node_kind("aten::add", True))
def fn_2(x):
t1 = x + 2.0 # change const value so we'll not reuse plan
return t1.relu()
fn_2_jit = torch.jit.script(fn_2)
self._run_helper(fn_2_jit, fn_2, x)
# add node should have been merged into fusion
self.assertGraphContains(fn_2_jit.graph_for(x), FUSION_GUARD)
self.assertGraphContainsExactly(fn_2_jit.graph_for(x), "aten::add", 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_cuda_fusion_guard(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
class ConvModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.sin().sigmoid()
mod = ConvModule().to(device="cuda")
inputs = [torch.randn(20, 16, 50, 100, device="cuda", requires_grad=True)]
def reduce_scalar(temp):
return temp.sum()
scripted = torch.jit.script(mod)
with torch.no_grad():
scripted(*inputs)
res = scripted(*inputs)
reduce_scalar(res).backward()
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_nvfuser_comparison_callbacks_with_fallback(self):
try:
fused_result = None
unfused_result = None
graph_ir = None
def callback(fused_outputs, unfused_outputs, graph_str):
nonlocal unfused_result
nonlocal fused_result
nonlocal graph_ir
unfused_result = unfused_outputs[-1]
fused_result = fused_outputs[-1]
graph_ir = graph_str
torch._C._jit_nvfuser_set_comparison_callback(True, callback)
def fn(x, y):
z = torch.add(x, y)
return torch.relu(z)
x = torch.rand((4, 4)).cuda() - 0.5
y = torch.rand((4, 4)).cuda() - 0.5
fn_s = torch.jit.script(fn)
fn_s(x, y)
fn_s(x, y)
fn_s(x, y)
expected = fn(x, y)
self.assertEqual(expected, fused_result)
self.assertEqual(expected, unfused_result)
FileCheck().check("aten::add").run(graph_ir)
finally:
torch._C._jit_nvfuser_clear_comparison_callback()
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_nvfuser_comparison_callbacks_without_fallback(self):
try:
fused_result = None
unfused_result = None
graph_ir = None
def callback(fused_outputs, unfused_outputs, graph_str):
nonlocal unfused_result
nonlocal fused_result
nonlocal graph_ir
if len(unfused_outputs) > 0:
unfused_result = unfused_outputs[-1]
fused_result = fused_outputs[-1]
graph_ir = graph_str
torch._C._jit_nvfuser_set_comparison_callback(False, callback)
def fn(x, y):
z = torch.add(x, y)
return torch.relu(z)
x = torch.rand((4, 4)).cuda() - 0.5
y = torch.rand((4, 4)).cuda() - 0.5
fn_s = torch.jit.script(fn)
fn_s(x, y)
fn_s(x, y)
fn_s(x, y)
expected = fn(x, y)
self.assertEqual(expected, fused_result)
self.assertEqual(None, unfused_result)
FileCheck().check("aten::add").run(graph_ir)
finally:
torch._C._jit_nvfuser_clear_comparison_callback()
@unittest.skipIf(not RUN_NVFUSER, "requires NVFuser")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_cuda_fusion_guard_backward(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
inp = torch.randn(10, device="cuda", requires_grad=True)
grad = torch.randn(10, device="cuda")
def f(x):
a = x.cos().cos()
return a
scripted = torch.jit.script(f)
with profile(activities=[ProfilerActivity.CPU]) as prof:
for _ in range(5):
inp.grad = None
out = scripted(inp)
out.backward(grad)
# check that we do not have fallback triggered
self.assertEqual(prof.events().table().find("fallback"), -1)
torch._C._jit_set_nvfuser_guard_mode(old_guard)
# TODO: generalize this
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
def test_inf_quick_patch(self):
inputs = [
torch.tensor([-float("inf"), float("inf"), 4.0], device="cuda"),
torch.tensor([1.0, float("inf"), 4.0], device="cuda"),
torch.tensor([-float("inf"), -1.5, 4.0], device="cuda"),
torch.tensor([1.0, -3.0, float("nan")], device="cuda"),
torch.tensor([-float("inf"), -float("inf"), -float("inf")], device="cuda"),
torch.tensor([float("inf"), float("inf"), float("inf")], device="cuda"),
torch.tensor([float("nan"), float("nan"), float("nan")], device="cuda"),
]
def fn_amax(x):
return x.amax(dim=0)
def fn_amin(x):
return x.amin(dim=0)
def fn_add_nan(x):
return x.relu() + float("nan")
def fn_add(x):
return x + 1.0
with nvfuser_singleton_fusion(True):
for t in [fn_amax, fn_amin, fn_add, fn_add_nan]:
for x in inputs:
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_clamp_reversed_bound(self):
x = torch.tensor(
[1.0, -float("inf"), 2.0, float("inf"), float("nan")], device="cuda"
)
def t(x):
return x.clamp(min=1.0, max=0.5)
with nvfuser_singleton_fusion(True):
jit_t = torch.jit.script(t)
self._run_helper(jit_t, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_issue_1785(self):
class Fusion(torch.nn.Module):
def __init__(self):
super(Fusion, self).__init__()
def forward(self, x, a, b):
out = torch.mul(x.unsqueeze(-1), a)
out = out + b
return out
x = torch.randn(1024, 192, 3, device="cuda")
a = torch.randn(3, 128, device="cuda")
b = torch.randn(3, 128, device="cuda")
model = Fusion()
jit_model = torch.jit.script(model)
with torch.jit.fuser("fuser2"):
for _ in range(4):
out_ref = model(x, a, b)
out_jit = jit_model(x, a, b)
out_ref = model(x, a, b)
out_jit = jit_model(x, a, b)
self.assertTrue(
self._compare("comparing output failed", out_ref, out_jit, 1e-5)
)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_high_rank_fusion(self):
# currently we want to limit fusion to node with input where rank <= 8
rank_limit = 8
shapes = [4 for i in range(rank_limit + 1)]
x = torch.randn(shapes, device="cuda")
with nvfuser_singleton_fusion(True):
def t(x):
return x.relu()
jit_t = torch.jit.script(t)
for i in range(5):
jit_t(x)
self.assertGraphContainsExactly(jit_t.graph_for(x), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_clamp(self):
x = torch.tensor(
[1.0, float("inf"), 2.0, float("nan"), float("-inf")], device="cuda"
)
def clamp_max(x):
return x.clamp(max=1.5)
def clamp_min_max(x):
return x.clamp(min=1.5)
def clamp_min(x):
return x.clamp(min=1.0, max=3.0)
with nvfuser_singleton_fusion(True):
for t in [clamp_max, clamp_min, clamp_min_max]:
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_device_constant(self):
x = torch.randn(4, 2, device="cuda")
# cpu tensor shouldn't be fused
def t_cpu(x):
return torch.rand_like(x, device=torch.device(type="cpu"))
with nvfuser_singleton_fusion(True):
t_cpu_jit = torch.jit.script(t_cpu)
for _ in range(5):
t_cpu_jit(x)
self.assertGraphContainsExactly(t_cpu_jit.graph_for(x), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_expand(self):
device = "cuda"
x = torch.randn(3, 5, device=device)
y = torch.randn(4, 2, 3, 5, device=device)
def t(x, y):
with torch.jit.strict_fusion():
x = x.relu()
o0 = x.expand(2, 3, 5)
o1 = x.expand_as(y)
return o0, o1
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, y, check_stride=True)
def t2(x, y):
o0 = x.expand(2, 3, 5)
o1 = x.expand_as(y)
x.add_(1)
return o0, o1
t2_jit = torch.jit.script(t2)
self._run_helper(t2_jit, t2, x, y, check_stride=True, num_fusion=0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_scheduler_with_polymorphic_broadcast(self):
device = "cuda"
x0 = torch.randn(10, 128, device=device)
x1 = torch.rand_like(x0)
x2 = torch.randn(10, device=device)
def t(x0, x1, x2):
x3 = x2.unsqueeze(-1)
x4 = x3 + x0
x5 = x3 + x1
x6 = x5.sum(0)
return x4, x6
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x0, x1, x2, check_stride=True)
x2 = torch.randn(128, device=device)
def t2(x0, x1, x2):
x3 = x2.unsqueeze(0)
x4 = x3 + x0
x5 = x3 + x1
x6 = x5.sum(1)
return x4, x6
t2_jit = torch.jit.script(t2)
self._run_helper(t2_jit, t2, x0, x1, x2, check_stride=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_type_inference(self):
device = "cuda"
x0 = torch.randn(10, 128, device=device)
x1 = torch.rand_like(x0)
x2 = torch.rand_like(x0)
def t(x0, x1, x2, flag: bool = True):
x3 = 2.0 * x0
x4 = 2.0 * x1
x5 = 2.0 * x2
if flag:
return torch.stack([x3, x4, x5], dim=-1)
# second code path doesn't run through profiling
# hence would utilize type inference with profiling information
return x0 + x1 + x2
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x0, x1, x2, check_stride=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_disable_const_chunk_propagation_for_normalization(self):
device = "cuda"
x0 = torch.randn(10, 12, device=device)
x1 = torch.randn(10, 4, device=device)
w0 = torch.randn(12, device=device)
w1 = torch.randn(4, device=device)
def t(x, y, w0, w1):
ih = torch.layer_norm(x, (12,), w0)
i_r, i_z, i_n = ih.chunk(3, dim=1)
i_n = torch.layer_norm(i_n, (4,), w1)
r = torch.sigmoid(i_r)
n = torch.tanh(i_n + r * i_z)
h = n + r * y
return h
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x0, x1, w0, w1, check_stride=True)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_no_tensor_input(self):
device = "cuda"
x = torch.randn(512, device=device)
def t(x):
tensor0 = torch.tensor(3, dtype=torch.float32, device="cuda")
tensor1 = torch.tensor(3, dtype=torch.float32, device="cuda")
o = torch.div(x.numel(), tensor0)
o = torch.mul(o, tensor1)
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, check_stride=True)
# Note that curently TS embeds constant tensor in the graph
# this triggers memory leak check in CI
torch.jit._state._python_cu.drop_all_functions()
class TestEnableDisableCudaFuser(JitTestCase):
def setUp(self):
super().setUp()
if RUN_NVFUSER:
self.is_enabled = torch._C._jit_set_nvfuser_enabled(False)
def tearDown(self):
if RUN_NVFUSER:
torch._C._jit_set_nvfuser_enabled(self.is_enabled)
super().tearDown()
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
def test_context_manager_test(self):
x = torch.randn(4, 8, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, dtype=torch.float, device="cuda")
with torch.jit.fuser("fuser2"):
with torch.jit.fuser("fuser2"):
def t1(x, y):
o = x + y
o = o + 2.0
return o
t_jit = torch.jit.script(t1)
t_jit(x, y)
t_jit(x, y)
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
def t2(x, y):
o = x + y
o = o + 3.0
return o
t_jit_2 = torch.jit.script(t2)
t_jit_2(x, y)
t_jit_2(x, y)
self.assertGraphContains(t_jit_2.graph_for(x, y), FUSION_GUARD)
def t3(x, y):
o = x + y
o = o + 4.0
return o
t_jit_3 = torch.jit.script(t3)
t_jit_3(x, y)
t_jit_3(x, y)
self.assertGraphContainsExactly(t_jit_3.graph_for(x, y), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
def test_register_fuser(self):
self.assertFalse(torch._C._jit_set_nvfuser_enabled(True))
self.assertTrue(torch._C._jit_nvfuser_enabled())
self.assertTrue(torch._C._jit_set_nvfuser_enabled(True))
self.assertTrue(torch._C._jit_nvfuser_enabled())
self.assertTrue(torch._C._jit_set_nvfuser_enabled(False))
self.assertFalse(torch._C._jit_nvfuser_enabled())
@unittest.skipIf(RUN_CUDA, "Testing on CPU only")
def test_register_fuser_cpu(self):
with self.assertRaises(RuntimeError):
torch._C._jit_set_nvfuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
def test_can_be_enabled_nvfuser(self):
expected = RUN_CUDA
self.assertEqual(expected, torch._C._jit_nvfuser_can_be_enabled())
# See TestNNCOpInfoParent
class TestCudaFuserOpInfoParent(JitCommonTestCase):
pass
class TestCudaFuserOpInfo(TestCudaFuserOpInfoParent):
def setUp(self):
super(TestCudaFuserOpInfoParent, self).setUp()
if RUN_NVFUSER:
self.cuda_fuser_options = CudaFuserTestOptions()
# enables guard mode since tracing could change graph to violate guard.
torch._C._jit_set_nvfuser_guard_mode(True)
self.nvfuser_single_node_mode = torch._C._jit_set_nvfuser_single_node_mode(True)
def tearDown(self):
if RUN_NVFUSER:
self.cuda_fuser_options.restore()
torch._C._jit_set_nvfuser_single_node_mode(self.nvfuser_single_node_mode)
super(TestCudaFuserOpInfoParent, self).tearDown()
@slowTest
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@ops(op_db, dtypes=OpDTypes.supported)
def test_nvfuser_correctness(self, device, dtype, op):
if not op.supports_tracing:
self.skipTest("nvfuser requires tracing support")
variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)
for variant, sample in variant_sample_pairs:
trace = create_traced_fn(self, variant, cache_traced_fn=True)
ref = variant(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
val = trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
self.assertEqual(ref, val, exact_layout=True)
# Note: Clearing CU after NVFuser tests
# https://github.com/pytorch/pytorch/issues/35600
# each torch.jit.trace adds state to the _python_cu compilation unit
# since this test traces a lot of functions, out-of-memory can occur
# if the CU is not cleared.
torch.jit._state._python_cu.drop_all_functions()
@skipIfRocm
@slowTest
@unittest.skipIf(not RUN_NVFUSER, "requires CUDA")
@unittest.skipIf(
GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective",
)
@ops(
op_db,
allowed_dtypes=(
torch.float16,
torch.bfloat16,
torch.float32,
torch.float64,
torch.complex64,
torch.complex128,
),
)
def test_nvfuser_extremal_values(self, device, dtype, op):
if not op.supports_tracing:
self.skipTest("nvfuser requires tracing support")
variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)
def _get_extremal_tensor(x, val, dtype):
if x.dtype != dtype:
return x
return torch.full_like(x, val)
def _get_extremal_input(x, val, dtype):
if isinstance(x, torch.Tensor):
return _get_extremal_tensor(x, val, dtype)
elif is_iterable_of_tensors(x):
return [_get_extremal_tensor(y, val, dtype) for y in x]
return x
def _get_extremal_sample(sample: SampleInput, val, dtype):
extremal_sample = SampleInput(
input=_get_extremal_input(sample.input, val, dtype),
args=tuple(_get_extremal_input(x, val, dtype) for x in sample.args),
kwargs={
k: _get_extremal_input(v, val, dtype)
for k, v in sample.kwargs.items()
},
)
return extremal_sample
def _get_extremal_samples(sample: SampleInput, dtype):
vals = [float("inf"), float("-inf"), float("nan")]
if dtype.is_complex:
complex_vals = itertools.product(vals, vals)
vals = tuple(map(lambda x: complex(*x), complex_vals))
for val in vals:
yield _get_extremal_sample(sample, val, dtype)
variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)
for variant, sample in variant_sample_pairs:
trace = create_traced_fn(self, variant, cache_traced_fn=True)
trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)
for extremal_sample in _get_extremal_samples(sample, dtype):
try:
with freeze_rng_state():
ref = variant(
*clone_inputs(
(extremal_sample.input, *extremal_sample.args)
),
**extremal_sample.kwargs
)
except (torch._C._LinAlgError, RuntimeError, ValueError):
# if eager errors out, then don't expect NVFuser to pass
continue
with freeze_rng_state():
val = trace(
*clone_inputs((extremal_sample.input, *extremal_sample.args)),
**extremal_sample.kwargs
)
self.assertEqual(val, ref, equal_nan=True, exact_device=True)
# See [Note: Clearing CU after NVFuser tests]
torch.jit._state._python_cu.drop_all_functions()
instantiate_device_type_tests(TestCudaFuserOpInfo, globals(), only_for=("cuda"))
if __name__ == "__main__":
run_tests()
|
Fuser-main
|
python_tests/test_torchscript.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# Owner(s): ["module: nvfuser"]
import torch
import jax.numpy as jnp
from torch.testing import make_tensor
from typing import Optional
from enum import Enum, auto
class ArgumentType(Enum):
# a symbolic value requires an input argument during kernel execution
Symbolic = auto()
# scalar with constant value
ConstantScalar = auto()
# python number - int, float, complex, bool
Constant = auto()
bool_dtypes = (torch.bool,)
int_dtypes = (
torch.int32,
torch.int64,
)
half_precision_float_dtypes = (
torch.bfloat16,
torch.float16,
)
full_precision_float_dtypes = (
torch.float32,
torch.float64,
)
complex_dtypes = (
torch.complex64,
torch.complex128,
)
# Half-precision float dtypes bf16, fp16 are skipped because nvfuser upcasts those dtypes to fp32
# but does not return the original type.
bool_int_dtypes = bool_dtypes + int_dtypes
float_dtypes = half_precision_float_dtypes + full_precision_float_dtypes
int_float_dtypes = int_dtypes + full_precision_float_dtypes
float_complex_dtypes = full_precision_float_dtypes + complex_dtypes
all_dtypes_except_reduced = int_dtypes + full_precision_float_dtypes + complex_dtypes
all_dtypes_except_bool = all_dtypes_except_reduced + half_precision_float_dtypes
all_dtypes = all_dtypes_except_bool + bool_dtypes
map_dtype_to_str = {
torch.bool: "bool",
torch.uint8: "uint8",
torch.int8: "int8",
torch.int16: "int16",
torch.int32: "int32",
torch.int64: "int64",
torch.bfloat16: "bfloat16",
torch.float16: "float16",
torch.float32: "float32",
torch.float64: "float64",
torch.complex64: "complex64",
torch.complex128: "complex128",
}
torch_to_jax_dtype_map = {
torch.bool: jnp.bool_,
torch.uint8: jnp.uint8,
torch.int8: jnp.int8,
torch.int16: jnp.int16,
torch.int32: jnp.int32,
torch.int64: jnp.int64,
torch.bfloat16: jnp.bfloat16,
torch.float16: jnp.float16,
torch.float32: jnp.float32,
torch.float64: jnp.float64,
torch.complex64: jnp.complex64,
torch.complex128: jnp.complex128,
}
torch_to_python_dtype_map = {
torch.bool: bool,
torch.uint8: int,
torch.int8: int,
torch.int16: int,
torch.int32: int,
torch.int64: int,
torch.bfloat16: float,
torch.float16: float,
torch.float32: float,
torch.float64: float,
torch.complex64: complex,
torch.complex128: complex,
}
def make_tensor_like(a):
# type: (torch.Tensor) -> torch.Tensor
"""Returns a tensor with the same properties as the given tensor.
Args:
a (torch.Tensor): The tensor to copy properties from.
Returns:
torch.Tensor: A tensor with the same properties as :attr:`a`.
"""
return torch.testing.make_tensor(
a.shape, device=a.device, dtype=a.dtype, requires_grad=a.requires_grad
)
def make_number(
dtype: torch.dtype, low: Optional[float] = None, high: Optional[float] = None
):
"""Returns a random number with desired dtype
Args:
dtype (torch.dtype): Desired dtype for number.
low (Optional[Number]): Sets the lower limit (inclusive) of the given range.
high (Optional[Number]): Sets the upper limit (exclusive) of the given range.
Returns:
(Scalar): The scalar number with specified dtype.
"""
return make_tensor([1], device="cpu", dtype=dtype, low=low, high=high).item()
def find_nonmatching_dtype(dtype: torch.dtype):
if dtype in int_float_dtypes:
return torch.complex128
elif dtype in complex_dtypes:
return torch.double
elif dtype is torch.bool:
return torch.float32
return None
def is_complex_dtype(dtype: torch.dtype):
return dtype in complex_dtypes
def is_floating_dtype(dtype: torch.dtype):
return dtype in float_dtypes
def is_integer_dtype(dtype: torch.dtype):
return dtype in int_dtypes
def is_tensor(a):
return isinstance(a, torch.Tensor)
|
Fuser-main
|
python_tests/pytest_utils.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# Owner(s): ["module: nvfuser"]
import itertools
from functools import partial, wraps
import math
import torch
from torch.testing import make_tensor
import random
from numbers import Number
from pytest_core import OpInfo, SampleInput, ErrorSample, Domain
from pytest_utils import (
make_number,
find_nonmatching_dtype,
is_floating_dtype,
float_complex_dtypes,
complex_dtypes,
)
from nvfuser import DataType
MINIMUM_SYMBOLIC_SIZE = -1
INT64_MAX = 2**63 - 1
MAX_TENSOR_DIMS = 8
MAX_VECTOR_SIZE = 8
# Determine if a number is with desired Domain [low, high)
# The domain is half-open. The lower limit is inclusive while the upper limit is exclusive.
def is_within_domain(domain: Domain, a: Number, exclude_zero: bool = False):
# comparison operators are not defined for complex numbers
if isinstance(a, complex):
return True
if domain.low is not None and domain.low > a:
return False
if domain.high is not None and a >= domain.high:
return False
if exclude_zero and a == 0:
return False
return True
def _extremal_values(dtype: torch.dtype):
_float_vals = (float("inf"), float("-inf"), float("nan"))
_complex_vals = tuple(
complex(*x) for x in itertools.product(_float_vals, _float_vals)
)
_int16_vals = (-32768, 32767)
_int32_vals = (-2147483648, 2147483647)
_int64_vals = (-9223372036854775808, 9223372036854775807)
if dtype in (torch.float16, torch.bfloat16, torch.float32, torch.float64):
return _float_vals
elif dtype in (torch.complex64, torch.complex128):
return _complex_vals
elif dtype is torch.int16:
return _int16_vals
elif dtype is torch.int32:
return _int32_vals
elif dtype is torch.int64:
return _int64_vals
else:
raise ValueError(f"Unsupported dtype --- {dtype}")
def _large_values(dtype: torch.dtype):
_int_vals = (-1113, 1113, -10701, 10701)
_float16_vals = (-501, 501, -1001.2, 1001.2, -13437.7, 13437.7)
_float_vals = _float16_vals + (-4988429.2, 4988429.2, -1e20, 1e20)
_complex_vals = tuple(
complex(*x) for x in itertools.product(_float_vals, _float_vals)
)
if dtype == torch.float16:
return _float16_vals
elif dtype in (torch.bfloat16, torch.float32, torch.float64):
return _float_vals
elif dtype in (torch.complex64, torch.complex128):
print(_complex_vals)
return _complex_vals
elif dtype in (torch.int16, torch.int32, torch.int64):
return _int_vals
else:
raise ValueError(f"Unsupported dtype --- {dtype}")
def _small_values(dtype: torch.dtype):
eps = 1e-5
_int_vals = (0, -1, 1, -55, 55, -127, 127, -128)
_float_vals = (
0.0,
-0.0,
-1e-3,
1e-3,
-0.25,
0.25,
-1.0,
1.0,
-math.e / 2.0,
math.e / 2.0,
-math.e + eps,
math.e - eps,
-math.e,
math.e,
-math.e - eps,
math.e + eps,
)
_complex_vals = tuple(
complex(*x) for x in itertools.product(_float_vals, _float_vals)
)
if dtype in (torch.float16, torch.bfloat16, torch.float32, torch.float64):
return _float_vals
elif dtype in (torch.complex64, torch.complex128):
return _complex_vals
elif dtype in (torch.int16, torch.int32, torch.int64):
return _int_vals
else:
raise ValueError(f"Unsupported dtype --- {dtype}")
def broadcast_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
# jax.lax.broadcast(operand, sizes)
# add new dimensions to left-hand-side of tensor
# dims = tuple(range(len(sizes), len(sizes) + np.ndim(operand)))
# return broadcast_in_dim(operand, tuple(sizes) + np.shape(operand), dims)
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
fewer_original_axes = (
([2, 3], [True, False]),
RuntimeError,
"Invalid broadcast, number of false entries in is_broadcast_dim expected to be",
)
greater_original_axes = (
([2, 3], [True, False, False, False]),
RuntimeError,
"Invalid broadcast, number of false entries in is_broadcast_dim expected to be",
)
error_cases = [
fewer_original_axes,
greater_original_axes,
]
for es in error_cases:
ex_case, ex_type, ex_str = es
input_shape, bcast_dims = ex_case
input_tensor = make_arg(input_shape)
yield SampleInput(input_tensor, bcast_dims), ex_type, ex_str
def broadcast_in_dim_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
# The first 5 test cases below are taken from JAX's broadcast_in_dim tests
# https://github.com/google/jax/blob/main/tests/lax_test.py#L1171
# input shape, output shape, bcast_dims
cases = (
([2], [2, 2], [0]),
([2], [2, 2], [1]),
([2], [2, 3], [0]),
([], [2, 3], []),
([1], [2, 3], [1]),
((4, 6, 3, 1), (5, 4, 7, 6, 3, 6, 6), (1, 3, 4, 5)),
)
for input_shape, output_shape, bcast_dims in cases:
input_tensor = make_arg(input_shape)
if op.name == "broadcast_in_dim_symbolic":
bcast_shaped_tensor = make_arg(output_shape)
yield SampleInput(input_tensor, bcast_shaped_tensor, bcast_dims)
else:
yield SampleInput(input_tensor, output_shape, bcast_dims)
def broadcast_in_dim_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
# jax.lax.broadcast_in_dim(operand, shape, broadcast_dimensions)
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
# 1. Every dimension in the input tensor must be used in broadcast_dimensions.
missing_axis_in_bcast_dims = (
([2, 2], [2, 2, 3], [0]),
RuntimeError,
"The broadcast dimensions should match the input dimensions.",
)
# 2. New shape has weakly more dimentions than the original tensor.
fewer_dims_in_output_shape = (
([2, 2], [2], [0]),
RuntimeError,
"The new shape is expected to be greater-then-or-equal to the input",
)
# 3. broadcast_dimensions is an ascending sequence of integers.
descending_broadcast_dimensions = (
([2, 2], [2, 2], [1, 0]),
RuntimeError,
"Broadcast dimension is not greater than the previous value.",
)
# 4. Each broadcast dimension is within the new shape.
out_of_bounds_broadcast_dimensions = (
([2, 2], [2, 2], [0, 2]),
RuntimeError,
"Invalid broadcast_dims value.",
)
# 5. The original tensor is not broadcastable to desired shape.
# tensor.shape[idx] == 1 or tensor.shape[idx] == output_shape[new_idx]
#
# Jax Exception:
# TypeError: broadcast_in_dim operand dimension sizes must either be 1,
# or be equal to their corresponding dimensions in the target broadcast shape;
# got operand of shape (2, 3), target broadcast shape (2, 3, 4), broadcast_dimensions (0, 2)
not_broadcastable = (
([2, 3], [2, 3, 4], [0, 2]),
RuntimeError,
"Invalid broadcast_dims value.",
)
# 6. TypeError: broadcast_in_dim shape must have every element be nonnegative, got (-1, 2, 3).
negative_shape = (
([2, 3], [2, 3, -1], [0, 1]),
RuntimeError,
"Invalid broadcast_dims value.",
)
# TODO add exceptions for not_broadcastable, negative output shape
error_cases = [
missing_axis_in_bcast_dims,
fewer_dims_in_output_shape,
descending_broadcast_dimensions,
out_of_bounds_broadcast_dimensions,
# not_broadcastable,
# negative_shape,
]
for es in error_cases:
ex_case, ex_type, ex_str = es
input_shape, output_shape, bcast_dims = ex_case
input_tensor = make_arg(input_shape)
if op.name == "broadcast_in_dim_symbolic":
bcast_shaped_tensor = make_arg(output_shape)
yield SampleInput(
input_tensor, bcast_shaped_tensor, bcast_dims
), ex_type, ex_str
else:
yield SampleInput(input_tensor, output_shape, bcast_dims), ex_type, ex_str
def cat_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
# concatenating tensors along singleton, broadcast dimensions is unsupported by nvFuser.
# https://github.com/NVIDIA/Fuser/issues/224
# shapes, dim
cases = [
([(3,)], 0), # single tensor provided
# 1D
([(2,), (3,)], 0),
([(2,), (4,)], 0),
([(0,), (2,)], 0),
([(0,), (2,)], -1),
([(2, 3), (2, 4)], 1),
([(2, 3), (2, 4), (2, 5)], 1),
]
for shapes, dim in cases:
yield SampleInput([make_arg(s) for s in shapes], dim)
def cat_error_generator(op, dtype=torch.float32, requires_grad: bool = False, **kwargs):
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
# shapes, dim, exception type, exception string
empty_input_tensors = (
([], 0),
RuntimeError,
"Attempting to concatenate empty list of tensors",
)
positive_dim = (([(1,), (2,)], 1), RuntimeError, "Invalid dimension to cat")
negative_dim = (([(2,), (2,)], -2), RuntimeError, "Invalid dimension to cat")
# All tensors must have same number of dimension"
ndims_mismatch = (
([(2,), (2, 3)], 0),
RuntimeError,
"Unexpected number of dimensions",
)
# All tensors must have same shape except for the cat dimension
shape_mismatch = (([(2, 3), (4, 5)], 0), RuntimeError, "known_size == this_size")
error_cases = [
empty_input_tensors,
positive_dim,
negative_dim,
ndims_mismatch,
shape_mismatch,
]
for case, ex_type, ex_str in error_cases:
shapes, dim = case
yield SampleInput([make_arg(s) for s in shapes], dim), ex_type, ex_str
def define_tensor_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
yield SampleInput(shape=[-1], contiguity=[True])
def define_tensor_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
"""
"define_tensor",
[](FusionDefinition& self,
std::vector<int64_t>& sizes,
std::vector<int64_t>& strides,
PrimDataType dtype = DataType::Float,
bool static_sizes = false,
bool is_cpu = false) -> Tensor {
---
"define_tensor",
[](FusionDefinition& self,
std::vector<int64_t>& shape,
std::vector<std::optional<bool>>& contiguity,
PrimDataType dtype = DataType::Float,
bool is_cpu = false) -> Tensor {
"""
check_size_contiguity_match = ErrorSample(
{
"shape": [-1, -1],
"contiguity": [True, True, True],
"dtype": DataType.Float,
},
"The size of contiguity must equal to the number of non-broadcasting IterDomains",
)
check_empty_tensor_size = ErrorSample(
{"shape": [], "contiguity": []},
"Empty tensor is unsupported.",
)
check_max_tensor_size = ErrorSample(
{
"shape": [-1 for _ in range(MAX_TENSOR_DIMS + 1)],
"contiguity": [True for _ in range(MAX_TENSOR_DIMS + 1)],
},
"The specified tensor dimensionality exceeds the max tensor size for nvfuser.",
)
check_above_size_range = ErrorSample(
{"shape": [INT64_MAX + 1], "contiguity": [True]},
"define_tensor(): incompatible function arguments",
TypeError,
)
check_below_size_range = ErrorSample(
{"shape": [MINIMUM_SYMBOLIC_SIZE - 1], "contiguity": [True]},
"The value -2 at index 0 was neither symbolic(-1), zero_element(0), broadcast(1), or static(>1)",
)
check_contiguity_unknown_values = ErrorSample(
{"shape": [10], "contiguity": [-1]},
"define_tensor(): incompatible function arguments.",
TypeError,
)
check_shape_unknown_dtypes = ErrorSample(
{"shape": [10.0], "contiguity": [True]},
"define_tensor(): incompatible function arguments.",
TypeError,
)
# TODO: Fix empty and maximum tensor dimensionality error checks.
# TODO: Add invalid argument checks for contiguity.
error_cases = [
check_size_contiguity_match,
# check_empty_tensor_size,
# check_max_tensor_size,
check_above_size_range,
check_below_size_range,
# check_contiguity_unknown_values,
check_shape_unknown_dtypes,
]
input_tensor = make_tensor(
(10, 10), device="cuda", dtype=dtype, requires_grad=requires_grad
)
for es in error_cases:
yield SampleInput(input_tensor, **es.kwargs), es.ex_type, es.ex_str
def define_vector_constant_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
"""
"define_vector",
[](FusionDefinition& self, py::list& values) -> Vector {
"""
check_above_size_range = ErrorSample(
{"values": [INT64_MAX + 1]},
"define_vector(): incompatible function arguments",
TypeError,
)
check_below_size_range = ErrorSample(
{"values": [MINIMUM_SYMBOLIC_SIZE - 1]},
"The value -2 at index 0 was neither symbolic(-1), zero_element(0), broadcast(1), or static(>1)",
)
check_max_vector_size = ErrorSample(
{
"values": [-1 for _ in range(MAX_VECTOR_SIZE + 1)],
},
"The specified vector size exceeds the max tensor size for nvfuser.",
)
error_cases = [
# FIXME: The above_size_range case gives a non-sensical error message.
# "Unable to cast Python instance to C++ type (#define PYBIND11_DETAILED_ER"
# check_above_size_range,
check_below_size_range,
check_max_vector_size,
]
for es in error_cases:
yield SampleInput(**es.kwargs), es.ex_type, es.ex_str
def define_vector_input_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
"""
"define_vector",
[](FusionDefinition& self, size_t size) -> Vector {
"""
check_max_vector_size = ErrorSample(
{
"size": (MAX_VECTOR_SIZE + 1),
},
"The specified vector size exceeds the max tensor size for nvfuser.",
)
error_cases = [
check_max_vector_size,
]
for es in error_cases:
yield SampleInput(**es.kwargs), es.ex_type, es.ex_str
def _special_value_binary_generator(
lhs_generator_fn, rhs_generator_fn, dtype, requires_grad
):
lhs_vals, rhs_vals = zip(*itertools.product(lhs_generator_fn, rhs_generator_fn))
lhs = torch.tensor(
lhs_vals, device="cuda", dtype=dtype, requires_grad=requires_grad
)
rhs = torch.tensor(
rhs_vals, device="cuda", dtype=dtype, requires_grad=requires_grad
)
return SampleInput(lhs, rhs)
def elementwise_binary_generator(
op: OpInfo,
dtype: torch.dtype,
requires_grad: bool = False,
*,
supports_numbers: bool = True,
enable_broadcast_testing: bool = True,
enable_extremal_value_testing: bool = True,
enable_large_value_testing: bool = True,
enable_small_value_testing: bool = True,
**kwargs,
):
low = None if op.domain.low is None else max(-9, op.domain.low)
high = None if op.domain.high is None else min(9, op.domain.high)
make_arg = partial(
make_tensor,
device="cuda",
dtype=dtype,
low=low,
high=high,
requires_grad=requires_grad,
**kwargs,
)
shapes = (
(0, 2, 1),
(5, 0, 3),
(),
(11,),
(4, 4),
(1024, 1024),
(64, 64, 64),
)
# Typical inputs
for shape in shapes:
yield SampleInput(make_arg(shape), make_arg(shape))
yield SampleInput(
make_arg(shape, noncontiguous=True), make_arg(shape, noncontiguous=True)
)
if enable_broadcast_testing:
broadcast_shapes = (
((1,), ()),
((2,), ()),
((1,), (2,)),
((2, 1), (2,)),
((1, 2), (2,)),
((3, 2), (2,)),
((1, 3, 2), (2,)),
((1, 3, 2), (3, 2)),
((3, 1, 2), (3, 2)),
((2, 3, 2), ()),
((3, 1, 2), (1, 3, 2)),
)
for lhs_shape, rhs_shape in broadcast_shapes:
yield SampleInput(make_arg(lhs_shape), make_arg(rhs_shape))
yield SampleInput(
make_arg(lhs_shape, noncontiguous=True),
make_arg(rhs_shape, noncontiguous=True),
)
# Create filtered special inputs for this operation's domain
def _filter_lhs_domain(values):
return [v for v in values if is_within_domain(op.domain, v)]
def _filter_rhs_domain(values):
# NOTE: Check exclude_zero flag to avoid undefined behavior such as ZeroDivisionError: division by zero
exclude_zero = kwargs.get("exclude_zero", False)
return [v for v in values if is_within_domain(op.domain, v, exclude_zero)]
if (
enable_large_value_testing
and dtype != torch.bool
and dtype not in complex_dtypes
):
lhs_large_values = _filter_lhs_domain(_large_values(dtype))
rhs_large_values = _filter_rhs_domain(_large_values(dtype))
yield _special_value_binary_generator(
lhs_large_values, rhs_large_values, dtype, requires_grad
)
if enable_small_value_testing and dtype != torch.bool:
lhs_small_values = _filter_lhs_domain(_small_values(dtype))
rhs_small_values = _filter_rhs_domain(_small_values(dtype))
yield _special_value_binary_generator(
lhs_small_values, rhs_small_values, dtype, requires_grad
)
if enable_extremal_value_testing and dtype in float_complex_dtypes:
lhs_extremal_values = _filter_lhs_domain(_extremal_values(dtype))
rhs_extremal_values = _filter_rhs_domain(_extremal_values(dtype))
yield _special_value_binary_generator(
lhs_extremal_values, rhs_extremal_values, dtype, requires_grad
)
# Test interactions between extreme and normal values
make_cuda_tensor = partial(
torch.tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
rhs_normal = [random.uniform(-10, 10) for _ in range(len(lhs_extremal_values))]
lhs_normal = [random.uniform(-10, 10) for _ in range(len(rhs_extremal_values))]
yield SampleInput(
make_cuda_tensor(lhs_extremal_values), make_cuda_tensor(rhs_normal)
)
yield SampleInput(
make_cuda_tensor(lhs_normal), make_cuda_tensor(rhs_extremal_values)
)
def _elementwise_binary_torch(op):
@wraps(op)
def _fn(x, y):
if isinstance(x, torch.Tensor) or isinstance(y, torch.Tensor):
return op(x, y)
return op(torch.tensor(x), torch.tensor(y)).item()
return _fn
def elementwise_unary_generator(
op: OpInfo,
dtype: torch.dtype,
requires_grad: bool = False,
*,
supports_numbers: bool = True,
enable_extremal_value_testing: bool = True,
enable_large_value_testing: bool = True,
enable_small_value_testing: bool = True,
**kwargs,
):
low = None if op.domain.low is None else max(-9, op.domain.low)
high = None if op.domain.high is None else min(9, op.domain.high)
make_arg = partial(
make_tensor,
device="cuda",
dtype=dtype,
low=low,
high=high,
requires_grad=requires_grad,
**kwargs,
)
shapes = (
(0, 2, 1),
(5, 0, 3),
(),
(11,),
(4, 4),
(1024, 1024),
(64, 64, 64),
)
# Typical inputs
for shape in shapes:
yield SampleInput(make_arg(shape))
yield SampleInput(make_arg(shape, noncontiguous=True))
# Create filtered special inputs for this operation's domain
def _filter_domain(values):
return [v for v in values if is_within_domain(op.domain, v)]
if (
enable_large_value_testing
and dtype != torch.bool
and dtype not in complex_dtypes
):
filtered_large_values = _filter_domain(_large_values(dtype))
yield SampleInput(
torch.tensor(
filtered_large_values,
device="cuda",
dtype=dtype,
requires_grad=requires_grad,
)
)
if enable_small_value_testing and dtype != torch.bool:
filtered_small_values = _filter_domain(_small_values(dtype))
yield SampleInput(
torch.tensor(
filtered_small_values,
device="cuda",
dtype=dtype,
requires_grad=requires_grad,
)
)
if enable_extremal_value_testing and dtype in float_complex_dtypes:
filtered_extremal_values = _filter_domain(_extremal_values(dtype))
yield SampleInput(
torch.tensor(
filtered_extremal_values,
device="cuda",
dtype=dtype,
requires_grad=requires_grad,
)
)
def _elementwise_unary_torch(op):
@wraps(op)
def _fn(x):
if isinstance(x, torch.Tensor):
return op(x)
return op(torch.tensor(x)).item()
return _fn
def full_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
# torch.full(size, fill_value, dtype=None)
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
# Error: Trying to create tensor with negative dimension
negative_input_shape = [2, -2]
yield SampleInput(
negative_input_shape, make_number(dtype), dtype
), RuntimeError, "extent_int >= 0"
def gather_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
# torch.gather(input: Tensor, dim: int, index: LongTensor)
# * input and index tensors have same ndims.
# * index tensors must be smaller than input tensor along all dims except specified axis.
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
make_index = partial(
make_tensor, device="cuda", dtype=torch.long, requires_grad=False
)
# a.shape, dim, b.shape
cases = (
((4, 2, 3), 0, (8, 2, 3)),
((4, 2, 3), 1, (4, 1, 3)),
((4, 2, 3), 2, (4, 2, 5)),
((4,), 0, (8)),
((4,), 0, (1)),
((4, 1), 0, (3, 1)),
((4, 1), 1, (4, 5)),
# negative dim
((4, 2, 3), -3, (8, 2, 3)),
((4, 2, 3), -2, (4, 1, 3)),
((4, 2, 3), -1, (4, 2, 5)),
((4,), -1, (8)),
((4,), -1, (1)),
((4, 1), -2, (3, 1)),
((4, 1), -1, (4, 5)),
# nvfuser gather does not support broadcast non-axis dimensions
)
for shape_a, dim, shape_b in cases:
a = make_arg(shape_a)
b = make_index(shape_b, low=0, high=shape_a[dim])
yield SampleInput(a, b, dim)
def index_select_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
make_index = partial(make_tensor, device="cuda", requires_grad=False)
# a.shape, dim, b.shape
cases = (
((4, 2, 3), 0, (8)),
((4, 2, 3), 1, (7)),
((4, 2, 3), 2, (2)),
((4,), 0, (8)),
((4,), 0, (1)),
((4, 1), 0, (3)),
((4, 1), 1, (5)),
((1, 0, 3), 0, (8)),
)
for shape_a, dim, shape_b in cases:
for index_dtype in [torch.int, torch.long]:
a = make_arg(shape_a)
b = make_index(shape_b, low=0, high=shape_a[dim], dtype=index_dtype)
yield SampleInput(a, b, dim)
def index_select_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
# torch.index_select(input: Tensor, dim: int, index: LongTensor)
# * dim is within bounds
# * index is a 1D vector
# * index array can't have zero elements
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
make_index = partial(make_tensor, device="cuda", requires_grad=False)
input_shape = (4, 2)
index_shape = (8,)
a = make_arg(input_shape)
# dim, exception type, exception string
positive_axis = (2, RuntimeError, "index_select on invalid axis")
negative_axis = (-3, RuntimeError, "index_select on invalid axis")
error_cases = [
positive_axis,
negative_axis,
]
for dim, ex_type, ex_str in error_cases:
b = make_index(index_shape, low=0, high=10, dtype=torch.long)
yield SampleInput(a, b, dim), ex_type, ex_str
# TODO add index dtype check
# b = make_index(index_shape, low=0, high=input_shape[0], dtype=torch.float)
# yield SampleInput(a, b, 0), RuntimeError, "index tensor can only be int or long dtype."
# TODO add index out-of-bounds check
# b = make_index(index_shape, low=10, high=100, dtype=torch.long)
# yield SampleInput(a, b, 0), RuntimeError, "out of bounds index value."
def iota_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
# torch.arange(start=0, end, step=1, dtype=None)
# nvfuser.iota(length, start, step, dtype)
#
# length, start, step are not complex numbers and are finite numbers.
# step cannot be 0
yield SampleInput(
make_number(torch.complex64, low=1),
make_number(dtype, low=0),
make_number(dtype, low=0),
dtype,
), RuntimeError, "length must be integer"
yield SampleInput(
make_number(torch.int64, low=1),
make_number(torch.complex64),
make_number(dtype, low=0),
dtype,
), RuntimeError, "iota: start dtype does not match specified dtype argument"
yield SampleInput(
make_number(torch.int64, low=1),
make_number(dtype, low=0),
make_number(torch.complex64),
dtype,
), RuntimeError, "iota: step dtype does not match specified dtype argument"
if is_floating_dtype(dtype):
yield SampleInput(
make_number(torch.int64, low=1),
float("inf"),
float("inf"),
dtype,
), RuntimeError, "iota: length, start, step must be finite numbers."
zero_step = torch.tensor([0], dtype=dtype).item()
yield SampleInput(
10, make_number(dtype), zero_step, dtype
), RuntimeError, "iota: step value must not equal zero."
def pad_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
# Nvfuser - fd.ops.pad(Tensor arg, std::vector<int64_t>& pad_widths, std::optional<Scalar> value)
# Jax ----- jax.lax.pad(operand, padding_value, padding_config)
# PyTorch - torch.nn.functional.pad(input, pad, mode='constant', value=None)
#
# Note: Nvfuser does not support interior (between-element) padding.
#
# Nvfuser errors
# 1) Tensor arg and pad value must have the same dtype
# 2) Number of pad widths must be at most twice the input dimension - NvFuser
# 3) Dimension size after padding is not at least 0
#
# Jax and PyTorch errors
# 1) Interior padding is non-negative
# 2) Length of pad_widths is equal to number of operands
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
input_shape = (2, 2)
valid_pad_width = [1, 1, -1, 2]
yield SampleInput(
make_arg(input_shape),
valid_pad_width,
make_number(find_nonmatching_dtype(dtype)),
), RuntimeError, "Tensor arg and pad value must have the same dtype."
# TODO Add better error message.
# Dimension size after padding is not at least 0
delete_all_pad_width = [-3, 0, 0, 0]
yield SampleInput(
make_arg(input_shape), delete_all_pad_width, make_number(dtype)
), RuntimeError, "extent_int > 0"
too_many_pad_width = [1, 1, 1, 1, 1, 1]
yield SampleInput(
make_arg(input_shape), too_many_pad_width, make_number(dtype)
), RuntimeError, "Number of pad widths must be at most twice the input dimension"
uneven_pad_width = [1, 1, 0]
yield SampleInput(
make_arg(input_shape), uneven_pad_width, make_number(dtype)
), RuntimeError, "Invalid number of padding widths"
def permute_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
cases = (
((4, 3, 7, 8), (0, 1, 2, 3)),
((4, 3, 7, 8), (1, -2, 0, 3)),
((4, 3, 7, 8), (-2, 1, 0, -1)),
((4, 3, 7, 8), (0, 3, 1, 2)),
((4, 3, 7, 8), (0, -1, 1, 2)),
((4, 7), (1, 0)),
)
for shape, dims in cases:
yield SampleInput(make_arg(shape), dims)
def permute_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
# torch.permute(input: torch.Tensor, dims: List[int])
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
input_shape = (10, 3, 4, 4)
# dims = dtype, duplicate, in-range
# TODO Add dtype check.
yield SampleInput(
make_arg(input_shape), [0.0, 1.0, 2.0, 3.0]
), TypeError, "permute(): incompatible function arguments"
# TODO Add duplicate axis check.
yield SampleInput(
make_arg(input_shape), [0, 1, 1, 3]
), RuntimeError, "Duplicate entries in transformation map"
# TODO Add in-range axis check.
yield SampleInput(
make_arg(input_shape), [0, 1, 2, 4]
), RuntimeError, "New2Old axes are not within the number of dimensions of the provided domain"
# TODO Add in-range axis check.
yield SampleInput(
make_arg(input_shape), [0, 1, 2, -5]
), RuntimeError, "New2Old axes are not within the number of dimensions of the provided domain"
# TODO Add missing axes check.
# If dims list is empty, NvFuser ignores the permute operation.
yield SampleInput(
make_arg(input_shape), [0]
), RuntimeError, "The number of dimensions in the tensor input does not match the length of the desired ordering of dimensions"
# TODO Add out-of-bounds axes check.
yield SampleInput(
make_arg(input_shape), [0, 1, 2, 3, 4]
), RuntimeError, "The number of dimensions in the tensor input does not match the length of the desired ordering of dimensions"
def reduction_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
make_arg = partial(
make_tensor,
device="cuda",
dtype=dtype,
requires_grad=requires_grad,
# We set low (inclusive) and high (exclusive) here to avoid values
# whose products can otherwise become extremely large
low=-2,
high=3,
)
# shape, dim, keepdim, dtype
cases = (
((4, 4), None, False, None),
((5,), None, True, None),
((5,), (0,), False, None),
((8, 1, 6), (1,), True, None),
((8, 7, 5, 1), (0, 1), True, None),
((8, 7, 5, 1), (1, 3), False, None),
)
for c in cases:
shape, dim, keepdim, dtype = c
yield (SampleInput(make_arg(shape), dim, keepdim, dtype=dtype))
def reduction_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
make_arg = partial(
make_tensor,
device="cuda",
dtype=dtype,
requires_grad=requires_grad,
# We set low (inclusive) and high (exclusive) here to avoid values
# whose products can otherwise become extremely large
low=-2,
high=3,
)
# shape
cases = (
(8, 1, 6),
(8, 7, 5, 1),
)
# axes : List[int]
# 1) all axis are int --- use float dtype
# 2) all axes are unique --- duplicates
# 3) after normalization, 0 <= axis[i] <= len(size)
# 4) If empty tensor, then axis == 0
int_dtype_axis = (
lambda dims: float(dims),
TypeError,
"var_mean(): incompatible function arguments.",
)
duplicate_axis = (
lambda dims: (0, 0, 0),
RuntimeError,
"Reduction axes are not unique",
)
lower_bound = (lambda dims: (-dims - 1,), RuntimeError, "Reduction on invalid axis")
upper_bound = (lambda dims: (dims,), RuntimeError, "Reduction on invalid axis")
# TODO Fix duplicate_axis, lower_bound, upper_bound
error_cases = [int_dtype_axis]
for shape, es in itertools.product(cases, error_cases):
input_tensor = make_arg(shape)
axis_fn, ex_type, ex_str = es
yield SampleInput(input_tensor, axis_fn(len(shape))), ex_type, ex_str
def reshape_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
# TODO Add examples with negative index
# TODO: Add zero-dim cases
# TODO: Add strided tensor cases
cases = (
((1, 19, 1, 12, 7, 1, 99), (1, 19, 1, 3, 2772)),
((3, 17, 80, 1), (51, 1, 2, 4, 10)),
((3, 17, 80, 1, 9), (51, 1, 2, 4, 10, 9)),
((2, 3, 4, 5), (1, 6, 1, 2, 2, 5)),
((22, 22, 2), (22, 11, 1, 1, 4)),
((37, 9, 7, 6, 10), (333, 2, 2, 3, 35)),
((8, 1, 1, 8, 1, 8), (8, 2, 4, 1, 8)),
((1, 333, 1), (1, 37, 9)),
((1, 333), (1, 1, 1, 111, 1, 3)),
((1, 27454, 1, 2), (1, 7844, 1, 7)),
((1, 7844, 1, 7), (1, 27454, 2)),
)
for tensor_shape, output_shape in cases:
yield SampleInput(make_arg(tensor_shape), tensor_shape, output_shape)
def reshape_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
# torch.reshape(input: Tensor, shape: [int])
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
tensor_shape = (3, 14)
# Only a single inferred axis -1.
yield SampleInput(
make_arg(tensor_shape), tensor_shape, [3, -1, -1]
), RuntimeError, "Only one dimension can by inferred"
# Number of elements must be equal for input and output tensors
yield SampleInput(
make_arg(tensor_shape), tensor_shape, [3, 2, 8]
), RuntimeError, "Total element counts across view operation must match"
# TODO: add stride testing
def slice_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
# shape, start_indices, end_indices
cases = (
((5, 7, 8), (1, 0, 3), (2, 6, 8)),
((3,), (1,), (2,)),
)
for shape, start_indices, end_indices in cases:
a = make_arg(shape)
yield SampleInput(a, start_indices=start_indices, end_indices=end_indices)
def slice_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
# shape
cases = ((10, 10), (5, 5))
check_start_indices = ErrorSample(
{"start_indices": [-1, -2], "end_indices": [5, 5], "strides": [7, 7]},
"Slice operation start_indices must be greater-than-or-equal-to 0.",
)
check_end_indices = ErrorSample(
{"start_indices": [3, 4], "end_indices": [1, 2], "strides": [1, 1]},
"Slice operation end_indices must be greater-than-or-equal-to start_indices.",
)
check_strides = ErrorSample(
{"start_indices": [0, 0], "end_indices": [5, 5], "strides": [5, 5]},
"nvFuser Limitation: All slice operation strides must be of size 1.",
)
check_tensor_dims = ErrorSample(
{"start_indices": [0, 0, 0], "end_indices": [4, 4, 4], "strides": [1, 1, 1]},
"Number of tensor dimensions does not match slice dimensions!",
)
check_slice_dims_start = ErrorSample(
{"start_indices": [0, 0, 0], "end_indices": [4, 4], "strides": [1, 1]},
"Slice start_indices and strides don't match!",
)
check_slice_dims_end = ErrorSample(
{"start_indices": [0, 0], "end_indices": [4, 4, 4], "strides": [1, 1]},
"Slice indexing attribute dimensions don't match!",
)
check_slice_dims_stride = ErrorSample(
{"start_indices": [0, 0], "end_indices": [4, 4], "strides": [1, 1, 1]},
"Slice start_indices and strides don't match!",
)
error_cases = [
check_start_indices,
check_end_indices,
check_strides,
check_tensor_dims,
check_slice_dims_start,
check_slice_dims_end,
check_slice_dims_stride,
]
for shape, es in itertools.product(cases, error_cases):
input_tensor = make_arg(shape)
yield SampleInput(input_tensor, **es.kwargs), es.ex_type, es.ex_str
def take_along_axis_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
make_index = partial(
make_tensor, device="cuda", dtype=torch.long, requires_grad=False
)
# a.shape, dim, b.shape
cases = (
((4, 2, 3), 0, (8, 2, 3)),
((4, 2, 3), 1, (4, 1, 3)),
((4, 2, 3), 2, (4, 2, 5)),
((4,), 0, (8)),
((4,), 0, (1)),
((4, 1), 0, (3, 1)),
((4, 1), 1, (4, 5)),
# negative dim
((4, 2, 3), -3, (8, 2, 3)),
((4, 2, 3), -2, (4, 1, 3)),
((4, 2, 3), -1, (4, 2, 5)),
((4,), -1, (8)),
((4,), -1, (1)),
((4, 1), -2, (3, 1)),
((4, 1), -1, (4, 5)),
# broadcast non-axis dimensions
((4, 2, 3), 0, (8, 2, 1)),
((4, 2, 3), 0, (8, 1, 3)),
((4, 2, 3), 0, (8, 2, 3)),
)
for shape_a, dim, shape_b in cases:
a = make_arg(shape_a)
b = make_index(shape_b, low=0, high=shape_a[dim])
yield SampleInput(a, b, dim)
def take_along_axis_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
# numpy.take_along_axis(arr: Tensor, indices: LongTensor, axis: int)
#
# torch.take_along_dim(input: Tensor, indices: LongTensor, dim: int)
# * If no dim argument, flatten tensors.
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
make_index = partial(
make_tensor, device="cuda", dtype=torch.long, requires_grad=False
)
input_shape = (4, 2)
a = make_arg(input_shape)
valid_index_shape = (3, 1)
b = make_index(valid_index_shape, low=0, high=10, dtype=torch.long)
# out-of-bounds axis error checks
ex_type = RuntimeError
ex_str = "Tensor arguments have dimension"
positive_error_dim = 2
negative_error_dim = -3
yield SampleInput(a, b, positive_error_dim), ex_type, ex_str
yield SampleInput(a, b, negative_error_dim), ex_type, ex_str
# TODO Fix: index tensor integer dtype
# b = make_index(valid_index_shape, low=0, high=input_shape[0], dtype=torch.float)
# yield SampleInput(a, b, 0), RuntimeError, "index tensor can only be int or long dtype."
# TODO Fix: out-of-bound index value
# b = make_index(valid_index_shape, low=10, high=100, dtype=torch.long)
# yield SampleInput(a, b, 0), RuntimeError, "out of bounds index value."
# TODO Fix: index shape exceeds input tensor axis
# larger_index_shape = (5, 3)
# b = make_index(
# larger_index_shape, low=0, high=larger_index_shape[0], dtype=torch.long
# )
# yield (
# SampleInput(a, b, 0),
# RuntimeError,
# "Expected dimension of index tensor to be smaller than input tensor except for specified axis",
# )
# TODO Fix: too many dimensions in index tensor
# dim argument must be specified. Otherwise, the tensors are flattened.
# too_many_dims_index_shape = (3, 1, 2)
# b = make_index(
# too_many_dims_index_shape,
# low=0,
# high=too_many_dims_index_shape[0],
# dtype=torch.long,
# )
# yield (
# SampleInput(a, b, 0),
# RuntimeError,
# "input and indices should have the same number of dimensions",
# )
def var_mean_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
"""torch.var_mean(input, dim=None, *, correction=1, keepdim=False)"""
correction = (0, 1)
samples = reduction_generator(op, dtype, requires_grad)
for c, sample in itertools.product(correction, samples):
a = sample.args[0]
dim = (
sample.args[1]
if (len(sample.args) > 1 and sample.args[1])
else tuple(range(a.ndim))
)
keepdim = sample.args[2] if len(sample.args) > 2 else False
yield SampleInput(a, dim, correction=c, keepdim=keepdim)
def where_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
# torch.where(condition, input, other)
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
input_shape = (2, 3, 4)
yield SampleInput(
make_tensor(input_shape, device="cuda", dtype=torch.float32),
make_arg(input_shape),
make_arg(input_shape),
), RuntimeError, "Condition should be of DataType Bool"
def tensor_size_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
check_index_beyond_num_dims = (
{
"tensor_shape": [2 for _ in range(0, MAX_TENSOR_DIMS)],
"dim": MAX_TENSOR_DIMS,
},
RuntimeError,
"The dimension requested is beyond the bounds of the shape of the indexed tensor!",
)
check_relative_index_beyond_num_dims = (
{
"tensor_shape": [2 for _ in range(0, MAX_TENSOR_DIMS)],
"dim": -MAX_TENSOR_DIMS - 1,
},
RuntimeError,
"The dimension requested is beyond the bounds of the shape of the indexed tensor!",
)
error_checks = [
check_index_beyond_num_dims,
check_relative_index_beyond_num_dims,
]
for error_case, error_type, error_msg in error_checks:
yield SampleInput(
make_arg(error_case["tensor_shape"]), dim=error_case["dim"]
), error_type, error_msg
def vector_at_error_generator(
op: OpInfo, dtype: torch.dtype, requires_grad: bool = False, **kwargs
):
make_arg = partial(
make_tensor, device="cuda", dtype=dtype, requires_grad=requires_grad
)
check_index_beyond_num_dims = (
{
"tensor_shape": [2 for _ in range(0, MAX_TENSOR_DIMS)],
"index": MAX_TENSOR_DIMS,
},
RuntimeError,
"The index requested is beyond the bounds of the indexed vector!",
)
check_relative_index_beyond_num_dims = (
{
"tensor_shape": [2 for _ in range(0, MAX_TENSOR_DIMS)],
"index": -MAX_TENSOR_DIMS - 1,
},
RuntimeError,
"The index requested is beyond the bounds of the indexed vector!",
)
error_checks = [
check_index_beyond_num_dims,
check_relative_index_beyond_num_dims,
]
for error_case, error_type, error_msg in error_checks:
yield SampleInput(
make_arg(error_case["tensor_shape"]), index=error_case["index"]
), error_type, error_msg
|
Fuser-main
|
python_tests/pytest_input_generators.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# Owner(s): ["module: nvfuser"]
from pytest_utils import (
all_dtypes_except_reduced,
ArgumentType,
torch_to_jax_dtype_map,
torch_to_python_dtype_map,
)
from typing import Callable, Optional
import torch
import jax.numpy as jnp
from enum import Enum
from dataclasses import dataclass, field
class ReferenceType(Enum):
Pytorch = 0
Jax = 1
Numpy = 2
Python = 3
@dataclass
class ErrorSample:
kwargs: dict
ex_str: str
ex_type: Exception = RuntimeError
@dataclass
class Domain:
low: int
high: int
class SampleInput:
"""Represents sample inputs to a function."""
__slots__ = [
"args",
"kwargs",
]
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __repr__(self):
return f"[SampleInput args={self.args} kwargs={self.kwargs}]"
def jax(self):
def to_jax(t):
if isinstance(t, torch.Tensor):
return jnp.array(t.cpu().numpy())
if isinstance(t, torch.dtype):
return torch_to_jax_dtype_map[t]
return t
# Note: We assume arguments have flat hierarchy.
# TODO Add support for kwargs
args = map(to_jax, self.args)
return SampleInput(*args, *self.kwargs.values())
def python(self):
# Flatten Pytorch Tensors into Python Lists
def to_python(t):
if isinstance(t, torch.Tensor):
return list(t.flatten().cpu().numpy())
if isinstance(t, torch.dtype):
return torch_to_python_dtype_map[t]
return t
# Note: We assume arguments have flat hierarchy.
# TODO Add support for kwargs
args = map(to_python, self.args)
return SampleInput(*args, *self.kwargs.values())
@dataclass
class OpInfo:
"""Operator information and helper functions for acquiring it."""
op: Callable
name: str
# Set of valid inputs for this operation
domain: Domain = field(default_factory=lambda: Domain(None, None))
# Set of valid dtypes for this operation
dtypes: tuple = all_dtypes_except_reduced
# Generates valid inputs
sample_input_generator: Callable = None
# Generates error inputs
error_input_generator: Callable = None
# Function of FusionDefintion operations for valid inputs
fd_correctness_fn: Callable = None
# Function of FusionDefintion operations for error inputs
fd_error_input_fn: Callable = None
# Reference function for operation
reference: Callable = None
# Designate which framework defines the reference
reference_type: ReferenceType = ReferenceType.Pytorch
# Nvfuser requires reduction axes to be constant values.
# symbolic_parameter_list specifies whether an operation's parameters are symbolic.
# All keyword arguments are considered constant.
# If symbolic_parameter_list is None, then we assume all parameters to be symbolic.
symbolic_parameter_list: Optional[list[ArgumentType]] = None
|
Fuser-main
|
python_tests/pytest_core.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# Owner(s): ["module: nvfuser"]
import torch
import pytest
import numpy as np
from pytest_fusion_definitions import default_fd_fn, parse_inputs_fusion_definition
from pytest_framework import create_op_test
from pytest_core import ReferenceType, OpInfo, SampleInput
from pytest_opinfos import opinfos
from pytest_utils import ArgumentType, is_tensor
from typing import Callable
from nvfuser import FusionDefinition
def is_pre_volta():
prop = torch.cuda.get_device_properties(torch.cuda.current_device())
return prop.major < 7
def parse_args_fusion_execution(opinfo: OpInfo, *args):
if len(args) == 0:
return []
if opinfo.symbolic_parameter_list is None:
opinfo.symbolic_parameter_list = [ArgumentType.Symbolic] * len(args)
assert len(opinfo.symbolic_parameter_list) == len(args)
result = []
for arg_type, a in zip(opinfo.symbolic_parameter_list, args):
if arg_type == ArgumentType.Symbolic:
if isinstance(a, list) and all(map(is_tensor, a)):
result.extend(a)
else:
result.append(a)
return result
# ****** Check an Operation's Results are Correct ******
def torch_correctness_test_fn(fd_fn: Callable, nvf_op: OpInfo, sample: SampleInput):
with FusionDefinition() as fd:
fd_fn(fd, nvf_op, *sample.args, **sample.kwargs)
nvfuser_result = fd.execute(parse_args_fusion_execution(nvf_op, *sample.args))
torch_result = nvf_op.reference(*sample.args, **sample.kwargs)
if isinstance(nvfuser_result, Exception):
raise nvfuser_result
if len(nvfuser_result) == 1:
nvfuser_result = nvfuser_result[0]
# TODO If dtype is fp16 or bf16, skip dtype check because nvfuser promotes to fp32 but does not return original dtype.
# TODO Add specific dtype tolerances
torch.testing.assert_close(
nvfuser_result, torch_result, equal_nan=True, atol=1e-3, rtol=0
)
def jax_correctness_test_fn(fd_fn: Callable, nvf_op: OpInfo, sample: SampleInput):
with FusionDefinition() as fd:
fd_fn(fd, nvf_op, *sample.args, **sample.kwargs)
nvfuser_result = fd.execute(parse_args_fusion_execution(nvf_op, *sample.args))
jax_sample = sample.jax()
jax_result = nvf_op.reference(*jax_sample.args, **jax_sample.kwargs)
# NOTE: this strange unpacking is to handle NumPy's and JAX's sometimes odd
# number vs. array representation. In particular, NumPy can mimic
# Python numbers, but `asarray` doesn't understand this mimicry
np_array = np.array(jax_result)
if np_array.shape == ():
jax_result = torch.tensor(np_array.item(), device="cuda")
else:
jax_result = torch.asarray(np_array, device="cuda")
if len(nvfuser_result) == 1:
nvfuser_result = nvfuser_result[0]
# NOTE: dtype is not checked because jax will translate int64, float64, and complex128 to int32, float32 and complex64
torch.testing.assert_close(
nvfuser_result, jax_result, equal_nan=True, atol=1e-3, rtol=0, check_dtype=False
)
def python_correctness_test_fn(fd_fn: Callable, nvf_op: OpInfo, sample: SampleInput):
# python reference function does not accept keyword arguments
assert len(sample.kwargs) == 0
with FusionDefinition() as fd:
fd_fn(fd, nvf_op, *sample.args)
nvfuser_result = fd.execute(parse_args_fusion_execution(nvf_op, *sample.args))
# expect only single result from function
assert len(nvfuser_result) == 1
# convert tensor arguments into flat, python lists
python_sample = sample.python()
# apply reference to python lists
python_result = map(nvf_op.reference, *python_sample.args)
# create pytorch tensor
np_array = np.array(list(python_result))
if np_array.shape == ():
python_result = torch.tensor(
np_array.item(), dtype=nvfuser_result[0].dtype, device="cuda"
)
else:
python_result = torch.asarray(
np_array, dtype=nvfuser_result[0].dtype, device="cuda"
)
# reshape flat output tensor into expected shape
torch.testing.assert_close(
nvfuser_result[0],
python_result.reshape(nvfuser_result[0].shape),
equal_nan=True,
atol=1e-3,
rtol=0,
)
def correctness_test_fn(
reference_type: ReferenceType,
nvf_op: OpInfo,
sample: SampleInput,
):
_fd_fn = (
nvf_op.fd_correctness_fn
if nvf_op.fd_correctness_fn is not None
else default_fd_fn
)
if reference_type == ReferenceType.Pytorch:
return torch_correctness_test_fn(_fd_fn, nvf_op, sample)
elif reference_type == ReferenceType.Jax:
return jax_correctness_test_fn(_fd_fn, nvf_op, sample)
elif reference_type == ReferenceType.Python:
return python_correctness_test_fn(_fd_fn, nvf_op, sample)
else:
return None
@create_op_test(tuple(op for op in opinfos if op.reference is not None))
def test_correctness(op: OpInfo, dtype: torch.dtype):
for sample in op.sample_input_generator(op, dtype):
result = correctness_test_fn(op.reference_type, op, sample)
if result is not None:
return result
# ****** Check a Definition Operation is not added to a Schedule ******
def definition_op_in_schedule_error_test_fn(opinfo: OpInfo, sample: SampleInput):
class SchedError(FusionDefinition):
def definition(self):
# Create default fusion definition
nvf_inputs = parse_inputs_fusion_definition(self, opinfo, *sample.args)
result = opinfo.op(fd)(*nvf_inputs, **sample.kwargs)
if isinstance(result, tuple):
for a in result:
self.add_output(a)
else:
self.add_output(result)
def schedule(self):
# Attempt to add fusion operation during scheduling
nvf_inputs = parse_inputs_fusion_definition(self, opinfo, *sample.args)
opinfo.op(self)(*nvf_inputs, **sample.kwargs)
fd = SchedError()
nvfuser_result = fd.execute(parse_args_fusion_execution(opinfo, *sample.args))
# TODO Maybe only test a single dtype
@create_op_test(tuple(op for op in opinfos if op.sample_input_generator is not None))
def test_definition_op_in_schedule_error(op: OpInfo, dtype: torch.dtype):
for sample in op.sample_input_generator(op, dtype):
with pytest.raises(
RuntimeError, match=r"Attempting to add to a completed definition"
):
definition_op_in_schedule_error_test_fn(op, sample)
# ****** Check that an Operation's API Gives Appropriate Input Errors ******
def errors_test_fn(
nvf_op: OpInfo,
sample: SampleInput,
):
_fd_fn = (
nvf_op.fd_error_input_fn
if nvf_op.fd_error_input_fn is not None
else default_fd_fn
)
with FusionDefinition() as fd:
_fd_fn(fd, nvf_op, *sample.args, **sample.kwargs)
fd.execute(parse_args_fusion_execution(nvf_op, *sample.args))
# A pair of parentheses () represents a capture group in regex.
# Escape parenthesis in regex string to match raw characters.
def _regex_escape_parenthesis(a: str) -> str:
b = a.replace(r"(", r"\(")
return b.replace(r")", r"\)")
@create_op_test(tuple(op for op in opinfos if op.error_input_generator is not None))
def test_errors(op: OpInfo, dtype: torch.dtype):
for sample, exception_type, exception_regex in op.error_input_generator(op, dtype):
with pytest.raises(
exception_type, match=_regex_escape_parenthesis(exception_regex)
):
errors_test_fn(op, sample)
|
Fuser-main
|
python_tests/pytest_ops.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# Owner(s): ["module: nvfuser"]
import inspect
import torch
from typing import Callable
from pytest_utils import map_dtype_to_str
import pytest
def _instantiate_opinfo_test_template(
template: Callable, *, opinfo, dtype: torch.dtype
) -> Callable:
"""Instantiates a test template for an operator."""
def test():
# Ref: https://github.com/pytorch/pytorch/blob/aa8ea1d787a9d21b064b664c5344376265feea6c/torch/testing/_internal/common_utils.py#L2251-L2263
# > CUDA device side error will cause subsequence test cases to fail.
# > stop entire test suite if catches RuntimeError during torch.cuda.synchronize().
if torch.cuda.is_initialized():
try:
torch.cuda.synchronize()
except RuntimeError as rte:
pytest.exit(
"TEST SUITE EARLY TERMINATION due to torch.cuda.synchronize() failure"
)
return template(opinfo, dtype)
test.__name__ = "_".join((template.__name__, opinfo.name, map_dtype_to_str[dtype]))
test.__module__ = test.__module__
return test
class create_op_test:
def __init__(self, opinfos, *, scope=None):
self.opinfos = opinfos
# Acquires the caller's global scope
if scope is None:
previous_frame = inspect.currentframe().f_back
scope = previous_frame.f_globals
self.scope = scope
def __call__(self, test_template):
# NOTE Unlike a typical decorator, this __call__ does not return a function, because it may
# (and typically does) instantiate multiple functions from the template it consumes.
# Since Python doesn't natively support one-to-many function decorators, the produced
# functions are directly assigned to the requested scope (the caller's global scope by default)
for opinfo in self.opinfos:
for dtype in sorted(opinfo.dtypes, key=lambda t: repr(t)):
test = _instantiate_opinfo_test_template(
test_template,
opinfo=opinfo,
dtype=dtype,
)
# Adds the instantiated test to the requested scope
self.scope[test.__name__] = test
|
Fuser-main
|
python_tests/pytest_framework.py
|
import unittest
import pytest
import torch
import torch.nn as nn
from nvfuser.contrib.nn.normalization import InstanceNorm3dNVFuser
def assert_close(a: torch.Tensor, b: torch.Tensor):
"""Given two Tensors, compare with a reasonable precision.
If the dtypes mismatch, use a custom rule to cast one or the other
"""
# increasing order of precision
precedence = [torch.bfloat16, torch.float16, torch.float32, torch.float64]
# demote inputs so we use the more permissive test
if precedence.index(a.dtype) < precedence.index(b.dtype):
b = b.type(a.dtype)
else:
a = a.type(b.dtype)
if a.dtype in [torch.float16, torch.bfloat16]:
# torch.nn.InstanceNorm3d fails rtol=6, atols=4e-2 for half precision
torch.testing.assert_close(a, b, rtol=10, atol=5e-2)
else: # use default tolerance
torch.testing.assert_close(a, b)
dtypes = {
"float32": torch.float,
"float64": torch.double,
"float16": torch.half,
}
if torch.cuda.get_device_capability() >= (8, 0):
dtypes["bfloat16"] = torch.bfloat16
@pytest.mark.parametrize(
"batch_size,channel_size,spatial_size,compare",
[
(5, 7, 3, True),
# check size=1 dimensions
(1, 7, 3, True), # NOTE: FAILS!
(5, 1, 3, True),
# (5, 7, 1, True), # eager instance norm needs more than one spatial element
(1, 1, 3, True),
# Don't check output for larger inputs, but check that they run
# (16, 1, 64, False),
# (16, 2, 64, False),
# (1, 16, 64, False),
# (2, 16, 64, False),
# (16, 16, 64, False),
],
)
@pytest.mark.parametrize("memory_format", ["contiguous", "channels_last", "strided"])
@pytest.mark.parametrize("affine", [False, True])
@pytest.mark.parametrize("track_running_stats", [False, True])
@pytest.mark.parametrize("training", [False, True])
@pytest.mark.parametrize("dtype", dtypes.keys())
def test_match(
dtype,
training,
track_running_stats,
memory_format,
affine,
batch_size,
channel_size,
spatial_size,
compare,
):
dtype = dtypes[dtype]
m = InstanceNorm3dNVFuser(
channel_size,
affine=affine,
track_running_stats=track_running_stats,
device="cuda",
dtype=dtype,
)
reference_m = torch.nn.InstanceNorm3d(
channel_size,
affine=affine,
track_running_stats=track_running_stats,
device="cuda",
dtype=torch.float64,
)
torch.manual_seed(42)
for i in range(2): # exercise JIT + caching
inp = torch.rand(
(
batch_size,
channel_size,
spatial_size,
spatial_size,
2 * spatial_size if memory_format == "strided" else spatial_size,
),
device="cuda",
requires_grad=True,
dtype=dtype,
)
if memory_format == "channels_last":
inp = inp.to(memory_format=torch.channels_last_3d)
elif memory_format == "strided":
inp = inp[..., ::2]
inp = inp.detach()
inp.requires_grad = True
inp2 = inp.clone().type(torch.float64).detach()
inp2.requires_grad = True
if training:
m.train()
reference_m.train()
else:
m.eval()
reference_m.eval()
out = m(inp)
out2 = reference_m(inp2)
if compare:
assert_close(out, out2)
if m.running_mean is None:
assert reference_m.running_mean is None
assert m.running_var is None
assert reference_m.running_var is None
else:
if compare:
assert_close(m.running_mean, reference_m.running_mean)
if not training:
return
grad_out = torch.randn_like(inp)
out.backward(grad_out)
out2.backward(grad_out)
if compare:
assert_close(inp.grad, inp2.grad)
# compare weight gradients
if m.weight is not None:
assert_close(m.weight.grad, reference_m.weight.grad)
if m.bias is not None:
assert_close(m.bias.grad, reference_m.bias.grad)
@unittest.skipIf(torch.cuda.device_count() < 2, "more than 1 GPU required")
def test_multigpu():
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.norm = InstanceNorm3dNVFuser(4)
def forward(self, x):
x = self.norm(x)
x = torch.sum(x, dim=(1, 2, 3, 4))
return x
device = torch.device("cuda:1")
model = Model().to(device)
x = torch.randn(2, 4, 128, 128, 128, device=device, requires_grad=True)
y = torch.randn(2, device=device)
pred = model(x)
loss = nn.functional.mse_loss(pred, y.float())
loss.backward()
if __name__ == "__main__":
pytest.main(["-v", __file__])
|
Fuser-main
|
python_tests/test_normalization.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
import torch
import nvfuser_extension # noqa: F401
t = torch.randn((5, 5), device="cuda")
expected = torch.sinh(t)
output = torch.ops.myop.sinh_nvfuser(t)
print("Expected:", expected)
print("Output:", output)
assert torch.allclose(output, expected)
print("They match!")
|
Fuser-main
|
examples/sinh_extension/test.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
import os
nvfuser_csrc_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "..", "csrc"
)
setup(
name="nvfuser_extension",
ext_modules=[
CUDAExtension(
name="nvfuser_extension",
pkg="nvfuser_extension",
include_dirs=[nvfuser_csrc_dir],
libraries=["nvfuser_codegen"],
sources=["main.cpp"],
)
],
cmdclass={"build_ext": BuildExtension},
)
|
Fuser-main
|
examples/sinh_extension/setup.py
|
from .patch_nvfuser import patch_installation
__all__ = ["patch_installation"]
|
Fuser-main
|
nvfuser_python_utils/__init__.py
|
import os
def patch_pytorch_nvfuser_binaries(torch_lib):
nvfuser_lib = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "nvfuser", "lib"
)
import shutil
for f_name in ["libnvfuser_codegen.so"]:
shutil.copyfile(
os.path.join(nvfuser_lib, f_name),
os.path.join(torch_lib, f_name),
)
def remove_nvfuser_python_module(installed_nvfuser_dir):
# only remove if installed nvfuser is in a different path
if installed_nvfuser_dir != os.path.join(
os.path.dirname(os.path.dirname(__file__)), "nvfuser"
):
import shutil
shutil.rmtree(installed_nvfuser_dir)
def patch_installation():
from importlib import util
torch_dir = os.path.dirname(util.find_spec("torch").origin)
torch_lib = os.path.join(torch_dir, "lib")
installed_nvfuser_dir = os.path.join(os.path.dirname(torch_dir), "nvfuser")
patch_pytorch_nvfuser_binaries(torch_lib)
if os.path.exists(installed_nvfuser_dir):
remove_nvfuser_python_module(installed_nvfuser_dir)
if __name__ == "__main__":
patch_installation()
|
Fuser-main
|
nvfuser_python_utils/patch_nvfuser.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
import torch
from typing import Type, Union
from ._C import DataType
NumberTypeType = Union[Type[bool], Type[int], Type[float], Type[complex]]
_torch_dtype_to_nvfuser_dtype_map = {
torch.cdouble: DataType.ComplexDouble,
torch.cfloat: DataType.ComplexFloat,
torch.double: DataType.Double,
torch.float: DataType.Float,
torch.half: DataType.Half,
torch.bfloat16: DataType.BFloat16,
torch.long: DataType.Int,
torch.int: DataType.Int32,
torch.bool: DataType.Bool,
# Python scalars
complex: DataType.ComplexDouble,
float: DataType.Double,
int: DataType.Int,
bool: DataType.Bool,
}
def python_scalar_to_nvfuser_dtype(a: Union[int, float, complex, bool]):
return _torch_dtype_to_nvfuser_dtype_map[type(a)]
def torch_dtype_to_nvfuser_dtype(dtype: Union[torch.dtype, NumberTypeType]):
"""
Translates from torch.dtype to nvFuser's DataType enum
"""
return _torch_dtype_to_nvfuser_dtype_map[dtype]
def patch_codegen_so():
"""
Replace libnvfuser_codegen.so installed along with torch
"""
import torch
import shutil
import os
dst_dir = os.path.join(os.path.dirname(torch.__file__), "lib")
src_dir = os.path.join(os.path.dirname(__file__), "lib")
shutil.copyfile(
os.path.join(src_dir, "libnvfuser_codegen.so"),
os.path.join(dst_dir, "libnvfuser_codegen.so"),
)
|
Fuser-main
|
nvfuser/pytorch_utils.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
import logging
import os
import sys
from typing import Optional, Union # noqa: F401
import torch
# This is needed when libnvfuser.so is patched and doesn't have the pytorch library location available.
pytorch_lib_dir = os.path.join(os.path.dirname(torch.__file__), "lib")
if pytorch_lib_dir not in sys.path:
sys.path.append(pytorch_lib_dir)
# we need to import _C here to avoid confusing error message generated from failure in this python script ended up with
# complaining on `_C` not defined for `_C._FusionDefinition`
try:
from . import _C
except ImportError as err:
logging.getLogger("nvfuser").error(
"""==== importing nvfuser failed ====
try run `patch-nvfuser` if https://github.com/NVIDIA/Fuser is installed via pip package"""
)
raise err
from ._C import * # noqa: F401,F403
from . import contrib # noqa: F401
logger = logging.getLogger("nvfuser")
class FusionDefinition(_C._FusionDefinition):
def __enter__(self):
return self._setup_definition()
def __exit__(self, type, value, traceback):
self._finalize_definition()
def definition(self):
raise NotImplementedError("definition() should be implemented by child class!")
def schedule(self):
raise NotImplementedError("schedule() should be implemented by child class!")
def execute(
self,
inputs,
*,
device=None,
override_user_schedule=False,
capture_debug_output=False,
):
"""
Executes an nvFuser set of kernels for a given Fusion
The FusionDefinition will be executed on a single CUDA device.
Typically, which device to run on is determined by the devices where
the input tensors reside. However, if the Fusion is defined such that
none of the inputs are tensors, we are not able to infer a device from
the inputs. For example, the following FusionDefinition will be unable
to unambiguously infer the device of its output:
with FusionDefinition() as fd:
tv1 = fd.ops.full([5])
fd.add_output(tv1)
In that case, we default to selecting the first CUDA
device, i.e. `torch.device("cuda:0")`. This method enables selecting an
alternative preferred device.
Args:
inputs (List[Union[Tensor, Scalar]]): A list of inputs to fusion.
Kwargs:
override_user_schedule (bool): For a user defined schedule,
override with auto-generated schedule (default: False)
device (Optional[Union[int, str, torch.device]]): This is a hint to run
the Fusion on the given CUDA device. This is not typically
necessary, as the device is usually inferred from the locations
of input tensors. However, for some fusion definitions, no
tensors will be input (for example when all tensors are
generated with `full` or `uniform` ops). In these cases, we
must either tell NVFuser where to run the resulting kernel, or
let it default to 0. Note that passing this option providing
and input tensors that lie on another device is an error.
capture_debug_output (bool): Whether to capture any printed
debugging information as a string. If True, the string can be
retrieved after execution using :meth:`get_debug_output`. If False,
then that method will return None when called.
Returns:
List[Tensor]
"""
func_based_def = False
if device is not None:
if not isinstance(device, torch.device):
device = torch.device(device)
assert (
device.type == "cuda"
), "If device argument is passed it must be a CUDA device"
device = device.index
# if definition is not defined by a context manager, try a child class
if self.id() is None:
self._setup_definition()
self.definition()
self._finalize_definition()
func_based_def = True
# If schedule is defined by child class, make a schedule for inputs
if func_based_def and (super(type(self), self).schedule != self.schedule):
self._setup_schedule(inputs)
self.schedule()
self._finalize_schedule(inputs)
result = None
try:
result = self._execute(
inputs,
override_user_schedule,
device=device,
capture_debug_output=capture_debug_output,
)
except Exception as err:
msg = (
f"An error occurred while executing nvFuser FusionDefinition {self.id()}.\n"
"If you believe this is a bug or need assistance, please file an issue at "
"https://github.com/NVIDIA/Fuser/issues/new\n"
)
msg += (
f"Here's a script to reproduce the error:\n"
"```\n"
"import torch\n"
"from nvfuser import FusionDefinition, DataType\n"
f"{self}"
"with FusionDefinition() as fd:\n"
f" nvfuser_fusion_id{self.id()}(fd)\n"
"\n"
"inputs = [\n"
)
for i in inputs:
if isinstance(i, torch.Tensor):
# max linear index determines number of elements to generate
sz = 1
for szi, stri in zip(i.size(), i.stride()):
if szi == 0:
sz = 0
break
sz += (szi - 1) * stri
if i.dtype.is_floating_point:
msg += (
f" torch.randn(({sz},), dtype={i.dtype}, device='{i.device}')"
f".as_strided({tuple(i.size())}, {tuple(i.stride())}),\n"
)
else:
msg += (
f" torch.randint(0, 10, ({sz},), dtype={i.dtype}, device='{i.device}')"
f".as_strided({tuple(i.size())}, {tuple(i.stride())}),\n"
)
else:
msg += f" {i},\n"
msg += "]"
msg += "\nfd.execute(inputs)\n"
msg += "```\n"
logger.exception(msg)
raise
return result
def debug_output(self):
"""
Retrieve string of captured debug information from the previous execution.
Note that `capture_debug_output=True` must be passed to `execute()` in
order to enable capturing this output. Otherwise, this method will
return `None`.
Returns:
Optional[String] : the captured debug output for the previous call
to execute(). If the `capture_debug_output` argument to that call
was False, returns None. Otherwise, returns the output as a string.
"""
return self._debug_output()
def from_pytorch(self, tensor, static_sizes=False):
"""
Defines an nvfuser input tensor from a pytorch tensor and defaults
to definining a symbolic tensor for dynamic shape usage.
Args:
tensor (torch.Tensor): Input tensor to nvFuser
static_sizes (bool) : Interprets sizes as static rather than
as symbolic for dynamic shape usage
Returns:
nvfuser.Tensor
"""
try:
from .pytorch_utils import torch_dtype_to_nvfuser_dtype
except ImportError:
raise ImportError("Unable to import pytorch_utils!")
if not tensor.is_cuda:
raise ValueError("Tensor should be on a cuda device!")
return self.define_tensor(
sizes=tensor.size(),
strides=tensor.stride(),
dtype=torch_dtype_to_nvfuser_dtype(tensor.dtype),
static_sizes=static_sizes,
)
def fusion_ir(self):
"""
Returns the uscheduled Fusion IR for the given definition that corresponds to all scheduled inputs.
Returns:
String
"""
return self._fusion_ir()
def last_cuda_code(self, intrinsic_code=False, **kwargs):
"""
Returns the Cuda Code for the last executed set of inputs
Args:
intrinsic_code (Bool): Include all the additional code required to run kernel(s). (default: False)
Kwargs:
override_user_schedule (Bool): For a user defined schedule, override with auto-generated schedule (default: False)
Returns:
String
"""
override_user_schedule = kwargs.pop("override_user_schedule", False)
return self._last_cuda_code(intrinsic_code, override_user_schedule)
def cuda_code_for(self, inputs, intrinsic_code=False, **kwargs):
"""
Returns the Cuda Code for the given inputs
Args:
inputs (List[Union[Tensor, Scalar]]): A list of inputs to fusion.
intrinsic_code (Bool): Include all the additional code required to run kernel(s). (default: False)
Kwargs:
override_user_schedule (Bool): For a user defined schedule, override with auto-generated schedule (default: False)
Returns:
String
"""
override_user_schedule = kwargs.pop("override_user_schedule", False)
return self._cuda_code_for(inputs, intrinsic_code, override_user_schedule)
def last_scheduled_fusion_ir(self, tensor_transforms=False, **kwargs):
"""
Returns the Scheduled Fusion IR for the last executed set of inputs
Args:
tensor_transforms (Bool): Include tensor transforms that were applied through scheduling. (default: False)
Kwargs:
override_user_schedule (Bool): For a user defined schedule, override with auto-generated schedule (default: False)
Returns:
String
"""
override_user_schedule = kwargs.pop("override_user_schedule", False)
return self._last_scheduled_fusion_ir(tensor_transforms, override_user_schedule)
def scheduled_fusion_ir_for(self, inputs, tensor_transforms=False, **kwargs):
"""
Returns the Scheduled Fusion IR for the last executed set of inputs
Args:
inputs (List[Union[Tensor, Scalar]]): A list of inputs to fusion.
tensor_transforms (Bool): Include tensor transforms that were applied through scheduling. (default: False)
Kwargs:
override_user_schedule (Bool): For a user defined schedule, override with auto-generated schedule (default: False)
Returns:
String
"""
override_user_schedule = kwargs.pop("override_user_schedule", False)
return self._scheduled_fusion_ir_for(
inputs, tensor_transforms, override_user_schedule
)
from .nvfuser_version import __version__
def version():
r"""returns nvfuser version in format of a string 'm.n.p+git[7d-sha]'.
We strip the git[7d-sha] and convert the string to
`nvfuser_version.Version` for comparison. e.g. you can use it as:
import nvfuser
print(nvfuser.version()) # 0.0.1+git21df524
nvfuser.version() == '0.0.1` # True
nvfuser.version() > '0.0.0` # True
from nvfuser_version import Version
nvfuser.version() < Version('1.0.0') # True
"""
return __version__
|
Fuser-main
|
nvfuser/__init__.py
|
import os
__all__ = [
"cmake_prefix_path",
]
cmake_prefix_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"nvfuser",
"share",
"cmake",
"nvfuser",
)
|
Fuser-main
|
nvfuser/utils.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023-present NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
from typing import Any
from .version import _version_str
__all__ = ["NvfuserVersion", "Version"]
class _LazyImport:
"""Wraps around classes lazy imported from packaging.version
Output of the function v in following snippets are identical:
from packaging.version import Version
def v():
return Version('1.2.3')
and
Version = _LazyImport('Version')
def v():
return Version('1.2.3')
The difference here is that in later example imports
do not happen until v is called
"""
def __init__(self, cls_name: str) -> None:
self._cls_name = cls_name
def get_cls(self):
try:
import packaging.version # type: ignore[import]
except ImportError:
# If packaging isn't installed, try and use the vendored copy
# in pkg_resources
from pkg_resources import packaging # type: ignore[attr-defined, no-redef]
return getattr(packaging.version, self._cls_name)
def __call__(self, *args, **kwargs):
return self.get_cls()(*args, **kwargs)
def __instancecheck__(self, obj):
return isinstance(obj, self.get_cls())
Version = _LazyImport("Version")
class NvfuserVersion(str):
@classmethod
def _convert_to_version(cls, ver: Any) -> Version:
if isinstance(ver, str):
return Version(ver.split("+")[0])
elif isinstance(ver, Version.get_cls()):
return ver
else:
raise ValueError("can't convert {} to Version".format(ver))
def _cmp_version(self, other: Any, method: str) -> Version:
return getattr(NvfuserVersion._convert_to_version(self), method)(
NvfuserVersion._convert_to_version(other)
)
for cmp_method in ["__gt__", "__lt__", "__eq__", "__ge__", "__le__"]:
setattr(
NvfuserVersion,
cmp_method,
lambda x, y, method=cmp_method: x._cmp_version(y, method),
)
__version__ = NvfuserVersion(_version_str)
|
Fuser-main
|
nvfuser/nvfuser_version.py
|
from . import nn
__all__ = [
"nn",
]
|
Fuser-main
|
nvfuser/contrib/__init__.py
|
from .normalization import InstanceNorm1dNVFuser
from .normalization import InstanceNorm2dNVFuser
from .normalization import InstanceNorm3dNVFuser
__all__ = [
"InstanceNorm1dNVFuser",
"InstanceNorm2dNVFuser",
"InstanceNorm3dNVFuser",
]
|
Fuser-main
|
nvfuser/contrib/nn/__init__.py
|
import enum
from typing import Any, Dict, List, Optional, Tuple
import torch
import nvfuser
from nvfuser.pytorch_utils import torch_dtype_to_nvfuser_dtype
__all__ = [
"InstanceNorm1dNVFuser",
"InstanceNorm2dNVFuser",
"InstanceNorm3dNVFuser",
]
NamedAxis = enum.Enum("NamedAxis", ["BATCH", "CHANNEL"])
def partially_contig_tensor(
fd: "nvfuser.FusionDefinition",
x: torch.Tensor,
) -> "nvfuser.Tensor":
return fd.define_tensor(
shape=[-1] * x.ndim,
contiguity=nvfuser.compute_contiguity(x.size(), x.stride()),
dtype=torch_dtype_to_nvfuser_dtype(x.dtype),
)
def norm_fusion_forward(
fd: "nvfuser.FusionDefinition",
inputs: List[torch.Tensor],
x: "nvfuser.Tensor",
weight: Optional["nvfuser.Tensor"],
bias: Optional["nvfuser.Tensor"],
running_mean: Optional["nvfuser.Tensor"],
running_var: Optional["nvfuser.Tensor"],
eps: "nvfuser.Scalar",
use_input_stats: bool,
momentum: "nvfuser.Scalar",
channels_last: bool,
x_datatype: "nvfuser.DataType",
unbiased: bool = False,
*,
stat_axes: List[NamedAxis],
) -> Tuple["nvfuser.Tensor", "nvfuser.Tensor", "nvfuser.Tensor"]:
"""Modify FusionDefinition to add a generic normalization layer (forward).
This can be used to construct a BatchNorm, GroupNorm, InstanceNorm, or
LayerNorm network by indicating different sets of axes to preserve.
BatchNorm: `stat_axes = [NamedAxis.CHANNEL]`
LayerNorm: `stat_axes = [NamedAxis.BATCH]`
InstanceNorm: `stat_axes = [NamedAxis.BATCH, NamedAxis.CHANNEL]`
Args:
fd: An initialized FusionDefinition.
inputs: A list of :class:'torch.Tensor' inputs to the
`FusionDefinition` `fd`.
x: An input NVFuser tensor.
weight: If given, multiply normed output by this `Tensor`. It should be
one-dimensional if `NamedAxis.CHANNEL` is in `stat_axes`, and
zero-dimensional otherwise. It will be broadcast along all other
dimensions.
bias: If given, add this `Tensor` to normed output. It should be
one-dimensional if `NamedAxis.CHANNEL` is in `stat_axes`, and
zero-dimensional otherwise. It will be broadcast along all other
dimensions.
running_mean: If given, a running mean estimate that will be modified
in place.
running_var: If given, a running variance estimate that will be
modified in place.
eps: Amount to regularize the square root needed to convert variance to
standard deviation.
use_input_stats: Whether to compute the stats of this batch or to
_only_ use the provided running_mean and running_var.
momentum: Momentum for exponentially weighted moving average of running
stats.
channels_last: Whether channels are in position -1 (`True`) or 1
(`False`).
x_datatype: :class:'DataType' of input :class:'Tensor' `x`
unbiased: Whether to use unbiased variance for computing current batch
statistics. Note that unbiased estimates are always used for
running variance updates, regardless of this argument's value.
stat_axes: A list of `NamedAxis` objects indicating a combination of
axes with which to index the computed statistics. This can be used
to implement multiple types of normalization layers, since most of
those differ only in which axes are reduced over.
Returns:
The normalized output, as well as mean and 1/std. Note that
`fd.add_output` is _not_ called by this function.
"""
assert not (
(running_var is None) ^ (running_mean is None)
), "Iff running mean or var is given, the other should be"
# dyn_shape holds Scalars describing the size of the input x
dyn_shape = fd.ops.tensor_sizes(x)
num_dims = len(dyn_shape)
batch_dim = 0
batch_size = dyn_shape[batch_dim]
channel_dim = num_dims - 1 if channels_last else 1
num_channels = dyn_shape[channel_dim]
# Running stats will be kept possibly for channel but never by instance, so
# we will reduce along batch_dim before updating running stats.
# These are used to broadcast in spatial dims
is_spatial_dim = [True] * num_dims
is_spatial_or_batch_dim = [True] * num_dims
num_stats = fd.define_scalar(1)
if NamedAxis.BATCH in stat_axes:
is_spatial_dim[batch_dim] = False
num_stats = fd.ops.mul(num_stats, batch_size)
if NamedAxis.CHANNEL in stat_axes:
is_spatial_dim[channel_dim] = False
is_spatial_or_batch_dim[channel_dim] = False
num_stats = fd.ops.mul(num_stats, num_channels)
x_reduction_axes = [ax for ax, flag in enumerate(is_spatial_dim) if flag]
num_features = fd.define_scalar(1)
for ax in x_reduction_axes:
num_features = fd.ops.mul(num_features, dyn_shape[ax])
if use_input_stats or running_mean is None:
# In NVFuser Python we pass correction=1 to request unbiased variance calculation
x_var, x_mean = fd.ops.var_mean(x, x_reduction_axes, int(unbiased))
if running_mean is not None:
one = fd.define_scalar(1.0)
rev_momentum = fd.ops.sub(one, momentum)
# do running mean with momentum
current_mean_hat = fd.ops.mul(x_mean, momentum)
mean_hat = fd.ops.mul(running_mean, rev_momentum)
new_mean_hat = fd.ops.add(mean_hat, current_mean_hat)
# If computing stats for each instance, we don't want to keep those
# for our running mean calculation, so we sum them here
new_mean_sum = (
fd.ops.sum(new_mean_hat, [0])
if NamedAxis.BATCH in stat_axes
else new_mean_hat
)
rev_batch_size = fd.ops.reciprocal(batch_size)
new_mean_channels_only = fd.ops.mul(new_mean_sum, rev_batch_size)
if x_datatype in [nvfuser.DataType.Half, nvfuser.DataType.BFloat16]:
new_mean_channels_only = fd.ops.cast(new_mean_channels_only, x_datatype)
fd.add_output(new_mean_channels_only, alias_input=running_mean)
# running var calculation
x_var_unbiased = x_var
if not unbiased:
# multiply by correction to go from biased to unbiased estimate
b2ub = fd.ops.div(
num_features, fd.ops.sub(num_features, fd.define_scalar(1))
)
x_var_unbiased = fd.ops.mul(x_var, b2ub)
current_var_hat = fd.ops.mul(x_var_unbiased, momentum)
var_hat = fd.ops.mul(running_var, rev_momentum)
new_var_hat = fd.ops.add(var_hat, current_var_hat)
# See above about reducing over batch dim for running stats
new_var_sum = (
fd.ops.sum(new_var_hat, [0])
if NamedAxis.BATCH in stat_axes
else new_var_hat
)
new_var_channels_only = fd.ops.mul(new_var_sum, rev_batch_size)
if x_datatype in [nvfuser.DataType.Half, nvfuser.DataType.BFloat16]:
new_var_channels_only = fd.ops.cast(new_var_channels_only, x_datatype)
fd.add_output(new_var_channels_only, alias_input=running_var)
mean = x_mean
mean_bcast = fd.ops.broadcast(mean, is_spatial_dim)
x_sub_mean = fd.ops.sub(x, mean_bcast)
var_eps = fd.ops.add(x_var, eps)
invstd = fd.ops.rsqrt(var_eps)
invstd_bcast = fd.ops.broadcast(invstd, is_spatial_dim)
x_normed = fd.ops.mul(x_sub_mean, invstd_bcast)
else: # This is inference mode with running stats
assert running_mean is not None
r_mean_bcast = fd.ops.broadcast(running_mean, is_spatial_or_batch_dim)
x_sub_mean = fd.ops.sub(x, r_mean_bcast)
var_eps = fd.ops.add(running_var, eps)
invstd = fd.ops.rsqrt(var_eps)
invstd_bcast = fd.ops.broadcast(invstd, is_spatial_or_batch_dim)
mean = running_mean
x_normed = fd.ops.mul(x_sub_mean, invstd_bcast)
if weight is not None:
weight_bcast = fd.ops.broadcast(weight, is_spatial_or_batch_dim)
x_normed = fd.ops.mul(x_normed, weight_bcast)
if bias is not None:
bias_bcast = fd.ops.broadcast(bias, is_spatial_or_batch_dim)
x_normed = fd.ops.add(x_normed, bias_bcast)
return x_normed, mean, invstd
def norm_fusion_backward(
fd: "nvfuser.FusionDefinition",
inputs: List[torch.Tensor],
x: "nvfuser.Tensor",
grad_output: "nvfuser.Tensor",
mean: Optional[torch.Tensor],
invstd: torch.Tensor,
weight: Optional["nvfuser.Tensor"],
bias: Optional["nvfuser.Tensor"],
running_mean: Optional["nvfuser.Tensor"],
running_var: Optional["nvfuser.Tensor"],
use_input_stats: bool,
channels_last: bool,
x_datatype: "nvfuser.DataType",
*,
stat_axes: List[NamedAxis],
) -> Tuple["nvfuser.Tensor", "nvfuser.Tensor", "nvfuser.Tensor"]:
"""
Modify FusionDefinition to add a generic normalization layer (backward).
Args:
fd: An initialized FusionDefinition.
inputs: A list of :class:'torch.Tensor' inputs to the
`FusionDefinition` `fd`.
x: The input NVFuser tensor.
grad_output: NVFuser tensor representing gradient of loss with respect
to downstream activation (typical input to backward()).
mean: The mean used in the forward normalization.
invstd: The reciprocal of standard deviation used in the forward normalization.
weight: If given, multiply normed output by this `Tensor`. It should be
one-dimensional if `NamedAxis.CHANNEL` is in `stat_axes`, and
zero-dimensional otherwise. It will be broadcast along all other
dimensions.
bias: If given, add this `Tensor` to normed output. It should be
one-dimensional if `NamedAxis.CHANNEL` is in `stat_axes`, and
zero-dimensional otherwise. It will be broadcast along all other
dimensions.
running_mean: If given, a running mean estimate that will be modified
in place.
running_var: If given, a running variance estimate that will be
modified in place.
use_input_stats: Whether to compute the stats of this batch or to
_only_ use the provided running_mean and running_var.
channels_last: Whether channels are in position -1 (`True`) or 1
(`False`).
x_datatype: :class:'DataType' of input :class:'Tensor' `x`
stat_axes: A list of `NamedAxis` objects indicating a combination of
axes with which to index the computed statistics. This can be used
to implement multiple types of normalization layers, since most of
those differ only in which axes are reduced over.
Returns:
The normalized output, as well as mean and 1/std. Note that
`fd.add_output` is _not_ called by this function.
"""
assert not (
(running_var is None) ^ (running_mean is None)
), "Iff running mean or var is given, the other should be"
# dyn_shape holds Scalars describing the size of the input x
dyn_shape = fd.ops.tensor_sizes(x)
num_dims = len(dyn_shape)
batch_dim = 0
batch_size = dyn_shape[batch_dim]
channel_dim = num_dims - 1 if channels_last else 1
num_channels = dyn_shape[channel_dim]
# Running stats will be kept possibly for channel but never by instance, so
# we will reduce along batch_dim before updating running stats.
# These are used to broadcast in spatial dims
is_spatial_dim = [True] * num_dims
is_spatial_or_batch_dim = [True] * num_dims
num_stats = fd.define_scalar(1)
if NamedAxis.BATCH in stat_axes:
is_spatial_dim[batch_dim] = False
num_stats = fd.ops.mul(num_stats, batch_size)
if NamedAxis.CHANNEL in stat_axes:
is_spatial_dim[channel_dim] = False
is_spatial_or_batch_dim[channel_dim] = False
num_stats = fd.ops.mul(num_stats, num_channels)
x_reduction_axes = [ax for ax, flag in enumerate(is_spatial_dim) if flag]
num_features = fd.define_scalar(1)
for ax in x_reduction_axes:
num_features = fd.ops.mul(num_features, dyn_shape[ax])
mean = fd.ops.broadcast(mean, is_spatial_dim)
norm = fd.ops.reciprocal(num_features)
grad_output_sum = fd.ops.sum(grad_output, x_reduction_axes)
dot_p = fd.ops.sum(
fd.ops.mul(
grad_output,
fd.ops.sub(x, mean),
),
x_reduction_axes,
)
grad_mean = fd.ops.broadcast(fd.ops.mul(grad_output_sum, norm), is_spatial_dim)
proj_scale = fd.ops.broadcast(
fd.ops.mul(
fd.ops.mul(dot_p, norm),
fd.ops.mul(invstd, invstd),
),
is_spatial_dim,
)
invstd_bcast = fd.ops.broadcast(invstd, is_spatial_dim)
grad_scale = (
invstd_bcast
if weight is None
else fd.ops.mul(
invstd_bcast,
fd.ops.broadcast(weight, is_spatial_or_batch_dim),
)
)
if use_input_stats:
proj = fd.ops.mul(fd.ops.sub(x, mean), proj_scale)
grad_input = fd.ops.mul(
fd.ops.sub(
fd.ops.sub(grad_output, proj),
grad_mean,
),
grad_scale,
)
else:
grad_input = fd.ops.mul(grad_output, grad_scale)
if weight is not None:
grad_weight = fd.ops.mul(dot_p, invstd)
grad_weight_reduced = fd.ops.sum(grad_weight, [0])
else:
grad_weight_reduced = None
if bias is not None:
grad_bias = grad_output_sum
grad_bias_reduced = fd.ops.sum(grad_bias, [0])
else:
grad_bias_reduced = None
return grad_input, grad_weight_reduced, grad_bias_reduced
class NormNVFuserFunction(torch.autograd.Function):
@staticmethod
def forward(
ctx: Any, # contexts are actually objects of the type we are currently defining
x: torch.Tensor,
weight: Optional[torch.Tensor],
bias: Optional[torch.Tensor],
running_mean: Optional[torch.Tensor],
running_var: Optional[torch.Tensor],
use_input_stats: bool,
momentum: float,
eps: float,
unbiased: bool,
stat_axes: List[NamedAxis],
) -> torch.Tensor:
# When x.shape[1] == 1, is_contiguous will tell us the tensor is
# channels_last, even when it is ordinary contiguous. This causes some
# issues so we only detect channels_last when channels > 1
channels_last = x.shape[1] > 1 and (
x.is_contiguous(memory_format=torch.channels_last)
or x.is_contiguous(memory_format=torch.channels_last_3d)
)
xorig = x
if channels_last:
order = [0] + list(range(2, len(x.shape))) + [1]
x = x.permute(order)
x_datatype = torch_dtype_to_nvfuser_dtype(x.dtype)
with nvfuser.FusionDefinition() as fd:
tv_x = partially_contig_tensor(fd, x)
inputs = [x]
if weight is not None:
tv_weight = partially_contig_tensor(fd, weight)
inputs.append(weight)
else:
tv_weight = None
if bias is not None:
tv_bias = partially_contig_tensor(fd, bias)
inputs.append(bias)
else:
tv_bias = None
if running_mean is None:
tv_running_mean = None
tv_running_var = None
else:
assert running_var is not None
tv_running_mean = partially_contig_tensor(fd, running_mean)
tv_running_var = partially_contig_tensor(fd, running_var)
inputs.extend([running_mean, running_var])
if running_mean.dtype in [torch.half, torch.bfloat16]:
tv_running_mean = fd.ops.cast(
tv_running_mean, nvfuser.DataType.Float
)
if running_var.dtype in [torch.half, torch.bfloat16]:
tv_running_var = fd.ops.cast(tv_running_var, nvfuser.DataType.Float)
s_momentum = fd.define_scalar(nvfuser.DataType.Double)
s_eps = fd.define_scalar(nvfuser.DataType.Double)
inputs.extend([momentum, eps])
# cast inputs if necessary
if x_datatype in [nvfuser.DataType.Half, nvfuser.DataType.BFloat16]:
tv_x = fd.ops.cast(tv_x, nvfuser.DataType.Float)
if weight is not None and weight.dtype in [torch.half, torch.bfloat16]:
tv_weight = fd.ops.cast(tv_weight, nvfuser.DataType.Float)
if bias is not None and bias.dtype in [torch.half, torch.bfloat16]:
tv_bias = fd.ops.cast(tv_bias, nvfuser.DataType.Float)
out, mean, invstd = norm_fusion_forward(
fd,
inputs,
tv_x,
tv_weight,
tv_bias,
tv_running_mean,
tv_running_var,
s_eps,
use_input_stats,
s_momentum,
channels_last,
x_datatype=x_datatype,
unbiased=unbiased,
stat_axes=stat_axes,
)
if x_datatype in [nvfuser.DataType.Half, nvfuser.DataType.BFloat16]:
out = fd.ops.cast(out, x_datatype)
fd.add_output(out)
fd.add_output(mean)
fd.add_output(invstd)
out, mean, invstd = fd.execute(inputs)
ctx.stat_axes = stat_axes
ctx.use_input_stats = use_input_stats
ctx.channels_last = channels_last
# saving for backward in "explicit channels-last format"
ctx.save_for_backward(x, weight, bias, running_mean, running_var, mean, invstd)
if channels_last:
order = [0, len(x.shape) - 1] + list(range(1, len(x.shape) - 1))
out = out.permute(order)
if len(out.shape) == 4:
assert out.is_contiguous(memory_format=torch.channels_last)
assert xorig.is_contiguous(memory_format=torch.channels_last)
elif len(out.shape) == 5:
assert out.is_contiguous(memory_format=torch.channels_last_3d)
assert xorig.is_contiguous(memory_format=torch.channels_last_3d)
else:
raise RuntimeError(
"unhandled channels_last format variation in forward"
)
return out
@staticmethod
def backward(
ctx: Any, grad_output: torch.Tensor
) -> Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
None,
None,
None,
None,
None,
None,
None,
]:
"""Instance norm backward using NVFuser"""
if ctx.channels_last:
order = [0] + list(range(2, len(grad_output.shape))) + [1]
grad_output = grad_output.permute(order)
# input was saved in "explicit channels-last format"
# assert ctx.saved_tensors[0].is_contiguous()
# grad_output = grad_output.contiguous()
x, weight, bias, running_mean, running_var, mean, invstd = ctx.saved_tensors
with nvfuser.FusionDefinition() as fd:
tv_x = partially_contig_tensor(fd, x)
if x.dtype in [torch.half, torch.bfloat16]:
tv_x = fd.ops.cast(tv_x, nvfuser.DataType.Float)
inputs = [x]
if weight is not None:
tv_weight = partially_contig_tensor(fd, weight)
if weight.dtype in [torch.half, torch.bfloat16]:
tv_weight = fd.ops.cast(tv_weight, nvfuser.DataType.Float)
inputs.append(weight)
else:
tv_weight = None
if bias is not None:
tv_bias = partially_contig_tensor(fd, bias)
if bias.dtype in [torch.half, torch.bfloat16]:
tv_bias = fd.ops.cast(tv_bias, nvfuser.DataType.Float)
inputs.append(bias)
else:
tv_bias = None
if running_mean is not None:
tv_running_mean = partially_contig_tensor(fd, running_mean)
if running_mean.dtype in [torch.half, torch.bfloat16]:
tv_running_mean = fd.ops.cast(
tv_running_mean, nvfuser.DataType.Float
)
inputs.append(running_mean)
else:
tv_running_mean = None
if running_var is not None:
tv_running_var = partially_contig_tensor(fd, running_var)
if running_var.dtype in [torch.half, torch.bfloat16]:
tv_running_var = fd.ops.cast(tv_running_var, nvfuser.DataType.Float)
inputs.append(running_var)
else:
tv_running_var = None
tv_mean = partially_contig_tensor(fd, mean)
if mean.dtype in [torch.half, torch.bfloat16]:
tv_mean = fd.ops.cast(tv_mean, nvfuser.DataType.Float)
inputs.append(mean)
tv_invstd = partially_contig_tensor(fd, invstd)
if invstd.dtype in [torch.half, torch.bfloat16]:
tv_invstd = fd.ops.cast(tv_invstd, nvfuser.DataType.Float)
inputs.append(invstd)
tv_grad_output = partially_contig_tensor(fd, grad_output)
if grad_output.dtype in [torch.half, torch.bfloat16]:
tv_grad_output = fd.ops.cast(tv_grad_output, nvfuser.DataType.Float)
inputs.append(grad_output)
x_datatype = torch_dtype_to_nvfuser_dtype(x.dtype)
grad_input, grad_weight, grad_bias = norm_fusion_backward(
fd,
inputs,
tv_x,
tv_grad_output,
tv_mean,
tv_invstd,
tv_weight,
tv_bias,
tv_running_mean,
tv_running_var,
ctx.use_input_stats,
ctx.channels_last,
x_datatype=x_datatype,
stat_axes=ctx.stat_axes,
)
if x_datatype in [nvfuser.DataType.Half, nvfuser.DataType.BFloat16]:
grad_input = fd.ops.cast(grad_input, x_datatype)
fd.add_output(grad_input)
if weight is not None:
if x_datatype in [nvfuser.DataType.Half, nvfuser.DataType.BFloat16]:
grad_weight = fd.ops.cast(grad_weight, x_datatype)
fd.add_output(grad_weight)
if bias is not None:
if x_datatype in [nvfuser.DataType.Half, nvfuser.DataType.BFloat16]:
grad_bias = fd.ops.cast(grad_bias, x_datatype)
fd.add_output(grad_bias)
res = fd.execute(inputs)
grad_input = res[0]
c = 1
if weight is not None:
grad_weight = res[c]
c += 1
else:
grad_weight = None
if bias is not None:
grad_bias = res[c]
c += 1
else:
grad_bias = None
if ctx.channels_last:
order = [0, len(grad_input.shape) - 1] + list(
range(1, len(grad_input.shape) - 1)
)
grad_input = grad_input.permute(order)
if len(grad_input.shape) == 4:
assert grad_input.is_contiguous(memory_format=torch.channels_last)
elif len(grad_input.shape) == 5:
assert grad_input.is_contiguous(memory_format=torch.channels_last_3d)
else:
raise RuntimeError(
"unhandled channels_last format variation in backward"
)
return (
grad_input,
grad_weight,
grad_bias,
None,
None,
None,
None,
None,
None,
None,
)
class _NormNVFuserBase(torch.nn.modules.batchnorm._NormBase):
stat_axes: Optional[List[NamedAxis]] = None
def __init__(
self,
num_features: int,
eps: float = 1e-5,
momentum: float = 0.1,
affine: bool = False,
track_running_stats: bool = False,
device: torch.device = None,
dtype: torch.dtype = None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
)
def _check_input_dim(self, input: torch.Tensor) -> None:
raise NotImplementedError
def _load_from_state_dict(
self,
state_dict: Dict[str, Any],
prefix: str,
local_metadata: Any,
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
) -> None:
version = local_metadata.get("version", None)
# at version 1: removed running_mean and running_var when
# track_running_stats=False (default)
if version is None and not self.track_running_stats:
running_stats_keys = []
for name in ("running_mean", "running_var"):
key = prefix + name
if key in state_dict:
running_stats_keys.append(key)
if len(running_stats_keys) > 0:
error_msgs.append(
"Unexpected running stats buffer(s) {names} for {klass} "
"with track_running_stats=False. If state_dict is a "
"checkpoint saved before 0.4.0, this may be expected "
"because {klass} does not track running stats by default "
"since 0.4.0. Please remove these keys from state_dict. If "
"the running stats are actually needed, instead set "
"track_running_stats=True in {klass} to enable them. See "
"the documentation of {klass} for details.".format(
names=" and ".join(
'"{}"'.format(k) for k in running_stats_keys
),
klass=self.__class__.__name__,
)
)
for key in running_stats_keys:
state_dict.pop(key)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def forward(self, input: nvfuser.Tensor) -> nvfuser.Tensor:
assert input.is_cuda, "NVFuser InstanceNorm is CUDA only"
self._check_input_dim(input)
out = NormNVFuserFunction.apply(
input,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.training or not self.track_running_stats,
self.momentum,
self.eps,
False, # unbiased=False to match PyTorch functionality
self.stat_axes,
)
return out
class _InstanceNormNVFuser(_NormNVFuserBase):
stat_axes = [NamedAxis.BATCH, NamedAxis.CHANNEL]
class _BatchNormNVFuser(_NormNVFuserBase):
stat_axes = [NamedAxis.CHANNEL]
class _LayerNormNVFuser(_NormNVFuserBase):
stat_axes = [NamedAxis.BATCH]
class InstanceNorm1dNVFuser(_InstanceNormNVFuser):
def _check_input_dim(self, input: torch.Tensor) -> None:
if input.dim() != 3:
raise ValueError("expected 3D input (got {}D input)".format(input.dim()))
class InstanceNorm2dNVFuser(_InstanceNormNVFuser):
def _check_input_dim(self, input: torch.Tensor) -> None:
if input.dim() != 4:
raise ValueError("expected 4D input (got {}D input)".format(input.dim()))
class InstanceNorm3dNVFuser(_InstanceNormNVFuser):
def _check_input_dim(self, input: torch.Tensor) -> None:
if input.dim() != 5:
raise ValueError("expected 5D input (got {}D input)".format(input.dim()))
|
Fuser-main
|
nvfuser/contrib/nn/normalization.py
|
#!/usr/bin/python3
##########################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/tools/packaging/package.py
# Description: Script to package a subset of the repo into a tarball
##########################################################################
import os
import sys
import glob
import shutil
from distutils.dir_util import copy_tree
from distutils.errors import DistutilsFileError
import errno
import argparse
import tarfile
from string import Template
# The files that will be necessary in every tarball
REQUIRED_FILES = set(["//WORKSPACE", "//.bazelrc", "//toolchains", "//tools", "//libs"])
# Bazel targets that should not be copied
BLACKLISTED_BAZEL_TARGETS = ["//external"]
PARSER = argparse.ArgumentParser(description="Package a subset of the repo")
PARSER.add_argument("components", nargs="+", help="Targets to include")
PARSER.add_argument("--platform", nargs="+", help="Platform")
PARSER.add_argument(
"-p",
"--pdk",
required=True,
help="Specify the pdk version and platform to target <platform>-<pdk version>\n\
Supported platforms: qnx, aarch64-linux, both")
PARSER.add_argument("-b", "--bazel_version", required=True, help="Specify the bazel version")
PARSER.add_argument("-d", "--docs", help="additional docs to include in the readme", nargs="?", default="")
PARSER.add_argument("-o", "--out", help="custom output file name for the tarball")
ARGS = PARSER.parse_args()
def copy(src, dest):
"""
Copy directories and files
"""
try:
copy_tree(src, dest)
except DistutilsFileError:
shutil.copy(src, dest)
except:
print('Directory not copied. Error: %s' % e)
def find_bazel_root():
"""
Finds the root directory of the bazel space
"""
curdir = os.path.dirname(os.path.realpath(__file__))
while 1:
if os.path.exists(curdir + "/WORKSPACE"):
return curdir
if curdir == "/":
sys.exit("Error: was unable to find a bazel workspace")
curdir = os.path.dirname(curdir)
def determine_component_deps(bazel_bin, name, component_name):
"""
Takes a bazel target and determines all local dependencies
"""
component_deps = set()
with open(bazel_bin + '/' + component_name.split(":")[1] + "_deps_" + name, 'r') as dep_file:
content = dep_file.readlines()
content = [x.strip() for x in content]
for dep_str in content:
if dep_str[0] is not '@': #Do not want external dependencies
dep = dep_str.strip().split(":")[0]
if dep not in BLACKLISTED_BAZEL_TARGETS:
component_deps.add(dep)
return component_deps
def toolchain_command(platform):
"""
Generate the correct toolchain commands to insert into the README
"""
if platform == "qnx":
return "dazel build //exampleApp --config=\"D5Q-toolchain\" #Compile with QNX Toolchain"
elif platform == "aarch64-linux":
return "dazel build //exampleApp --config=\"D5L-toolchain\" #Compile with aarch64 Toolchain"
return "dazel build //exampleApp --config=\"D5Q-toolchain\" #Compile with QNX Toolchain\ndazel build //exampleApp --config=\"D5L-toolchain\" #Compile with aarch64 Toolchain"
def format_toc(toc):
toc_str = "## Included Components\n"
for s in toc:
toc_str += "- {}\n".format(s)
return toc_str
def fill_out_readme(readme_tpl_file, title, toc, platform, other=""):
"""
Fill out the README template
"""
fields = {
'title': title,
'toc': format_toc(toc),
'platform': platform,
'toolchain_command': toolchain_command(platform),
'component_specific_instructions': other
}
readme_template = Template(readme_tpl_file.read())
return readme_template.substitute(fields)
def fill_out_dockerfile_dazel(dockerfile_tpl_file, version):
"""
Fill out the README template
"""
fields = {'BAZEL_VERSION': version}
dockerfile_template = Template(dockerfile_tpl_file.read())
return dockerfile_template.substitute(fields)
def select_dockerfile(platform, os, version):
'''
Set up the docker base containers
'''
dockerfile_name = "Dockerfile.{}.{}".format(os, version)
dockerfile_path = BAZEL_ROOT + "/docker/" + platform + "/" + dockerfile_name
if not os.path.exists(STAGING_AREA + "/docker"):
os.mkdir(STAGING_AREA + "/docker")
copy(dockerfile_path, STAGING_AREA + "/docker")
if __name__ == "__main__":
#Find the bazel root
BAZEL_ROOT = find_bazel_root()
os.chdir(BAZEL_ROOT)
BAZEL_GENFILES = BAZEL_ROOT + "/bazel-genfiles"
BAZEL_BIN = BAZEL_ROOT + "/bazel-bin"
#Name of the package
OUTFILE_NAME = ARGS.out if ARGS.out else "DL4AGX"
#Location to assemble the tarball (create if does not exist)
STAGING_AREA = BAZEL_GENFILES + '/' + OUTFILE_NAME
if not os.path.exists(STAGING_AREA):
os.mkdir(STAGING_AREA)
#Figure out all of the directories needed
required_targets = set()
table_of_contents = []
for component in ARGS.components:
deps = determine_component_deps(BAZEL_BIN, OUTFILE_NAME, component)
required_targets = required_targets.union(deps)
table_of_contents.append(component.split(":")[0][2:])
required_targets = REQUIRED_FILES.union(required_targets)
required_dirs = [d[1:] for d in required_targets]
for d in required_dirs:
copy(BAZEL_ROOT + d, STAGING_AREA + d)
PLATFORM = ARGS.platform
#Figure out the specific PDK version and platform
PDK_PLATFORM = ARGS.pdk.split(':')[0]
PDK_VERSION = ARGS.pdk.split(':')[1]
#Use PDK version and platform to select dockerfile
if PDK_PLATFORM == 'both':
select_dockerfile(PLATFORM, "qnx", PDK_VERSION)
select_dockerfile(PLATFORM, "aarch64-linux", PDK_VERSION)
else:
select_dockerfile(PLATFORM, PDK_PLATFORM, PDK_VERSION)
copy(BAZEL_ROOT + "/docker/README.md", STAGING_AREA + "/docker")
copy(BAZEL_ROOT + "/tools/packaging/.dazelrc", STAGING_AREA)
#Fill out the readme template
readme_tpl = open(BAZEL_ROOT + "/tools/packaging/README.md.tpl")
readme = fill_out_readme(readme_tpl, OUTFILE_NAME, table_of_contents, PDK_PLATFORM, ARGS.docs if ARGS.docs else "")
with open(STAGING_AREA + "/README.md", "w") as f:
f.write(readme)
#Fill out the dockerfile template
dockerfile_dazel_tpl = open(BAZEL_ROOT + "/tools/packaging/Dockerfile.dazel.tpl")
dockerfile_dazel = fill_out_dockerfile_dazel(dockerfile_dazel_tpl, ARGS.bazel_version)
with open(STAGING_AREA + "/Dockerfile.dazel", "w") as f:
f.write(dockerfile_dazel)
# Remove code owner files
for f in glob.glob(STAGING_AREA + "/**/OWNERS"):
os.remove(f)
# Remove dazel_run files
for f in glob.glob(STAGING_AREA + "/**/.dazel_run"):
os.remove(f)
with tarfile.open(BAZEL_BIN + '/' + OUTFILE_NAME + ".tar.gz", "w:gz") as tar:
tar.add(STAGING_AREA, arcname=os.path.basename(STAGING_AREA))
shutil.rmtree(STAGING_AREA)
for f in glob.glob(BAZEL_BIN + "/*deps*"):
os.remove(f)
|
DL4AGX-master
|
tools/packaging/package.py
|
#!/usr/bin/python3
##########################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/tools/linter/pylint.py
# Description: Lint files using yapf and overwrite with suggested formating
##########################################################################
import os
import sys
import glob
import utils
import subprocess
import yapf
VALID_PY_FILE_TYPES = [".py"]
def lint(user, target_files, conf, change_file=True):
return yapf.FormatFiles(
filenames=target_files,
lines=None,
style_config=conf,
no_local_style=None,
in_place=change_file,
print_diff=False,
verify=True,
parallel=True,
verbose=True)
if __name__ == "__main__":
BAZEL_ROOT = utils.find_bazel_root()
STYLE_CONF_PATH = BAZEL_ROOT + "/.style.yapf"
USER = BAZEL_ROOT.split('/')[2]
subprocess.run(["useradd", USER])
projects = utils.CHECK_PROJECTS(sys.argv[1:])
if "//..." in projects:
projects = [p.replace(BAZEL_ROOT, "/")[:-1] for p in glob.glob(BAZEL_ROOT + '/*/')]
projects = [p for p in projects if p not in utils.BLACKLISTED_BAZEL_TARGETS]
for p in projects:
if p.endswith("/..."):
p = p[:-4]
path = BAZEL_ROOT + '/' + p[2:]
files = utils.glob_files(path, VALID_PY_FILE_TYPES)
if files != []:
changed = lint(USER, files, STYLE_CONF_PATH)
if changed:
print(
"\033[93mWARNING:\033[0m This command modified your files with the recommended linting, you should review the changes before committing"
)
sys.exit(1)
|
DL4AGX-master
|
tools/linter/pylint.py
|
#!/usr/bin/python3
##########################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/tools/linter/pylint_diff.py
# Description: Lint files with yapf and print the differences
##########################################################################
import os
import sys
import glob
import utils
import subprocess
import yapf
VALID_PY_FILE_TYPES = [".py"]
def lint(user, target_files, conf, change_file=False):
return yapf.FormatFiles(
filenames=target_files,
lines=None,
style_config=conf,
no_local_style=None,
in_place=change_file,
print_diff=True,
verify=True,
parallel=True,
verbose=True)
if __name__ == "__main__":
BAZEL_ROOT = utils.find_bazel_root()
STYLE_CONF_PATH = BAZEL_ROOT + "/.style.yapf"
USER = BAZEL_ROOT.split('/')[2]
subprocess.run(["useradd", USER])
projects = utils.CHECK_PROJECTS(sys.argv[1:])
if "//..." in projects:
projects = [p.replace(BAZEL_ROOT, "/")[:-1] for p in glob.glob(BAZEL_ROOT + '/*/')]
projects = [p for p in projects if p not in utils.BLACKLISTED_BAZEL_TARGETS]
for p in projects:
if p.endswith("/..."):
p = p[:-4]
path = BAZEL_ROOT + '/' + p[2:]
files = utils.glob_files(path, VALID_PY_FILE_TYPES)
if files != []:
changed = lint(USER, files, STYLE_CONF_PATH)
if changed:
print("\033[91mERROR:\033[0m Some files do not conform to style guidelines")
sys.exit(1)
|
DL4AGX-master
|
tools/linter/pylint_diff.py
|
##########################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/tools/linter/utils.py
# Description: Utils for navigating bazel in python
##########################################################################
import os
import sys
import glob
import subprocess
BLACKLISTED_BAZEL_TARGETS = [
"//external", "//tools", "//libs", "//docker", "//toolchains", "//third_party", "//bazel-bin", "//bazel-genfiles",
"//bazel-out", "//bazel-DL4AGX", "//bazel-testlogs"
]
def CHECK_PROJECTS(projs):
for p in projs:
if p[:2] != "//":
sys.exit(p + " is not a valid bazel target")
return projs
def find_bazel_root():
"""
Finds the root directory of the bazel space
"""
curdir = os.path.dirname(os.path.realpath(__file__))
while 1:
if os.path.exists(curdir + "/WORKSPACE"):
return curdir
if curdir == "/":
sys.exit("Error: was unable to find a bazel workspace")
curdir = os.path.dirname(curdir)
def glob_files(project, file_types):
files = []
for t in file_types:
files += glob.glob(project + "/**/*" + t, recursive=True)
return files
|
DL4AGX-master
|
tools/linter/utils.py
|
#!/usr/bin/python3
##########################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/tools/linter/cpplint_diff.py
# Description: Lint files with clang format and print the differences
##########################################################################
import os
import sys
import glob
import subprocess
import utils
VALID_CPP_FILE_TYPES = [".cpp", ".cc", ".c", ".cu", ".hpp", ".h", ".cuh"]
def lint(target_files):
failure = False
for f in target_files:
with open("/tmp/changes.txt", "w") as changes:
subprocess.run(['clang-format', f], stdout=changes)
output = subprocess.run(["git", "diff", "-u", "--color", f, "/tmp/changes.txt"])
if output.returncode != 0:
failure = True
return failure
if __name__ == "__main__":
BAZEL_ROOT = utils.find_bazel_root()
projects = utils.CHECK_PROJECTS(sys.argv[1:])
if "//..." in projects:
projects = [p.replace(BAZEL_ROOT, "/")[:-1] for p in glob.glob(BAZEL_ROOT + '/*/')]
projects = [p for p in projects if p not in utils.BLACKLISTED_BAZEL_TARGETS]
failure = False
for p in projects:
if p.endswith("/..."):
p = p[:-4]
path = BAZEL_ROOT + '/' + p[2:]
files = utils.glob_files(path, VALID_CPP_FILE_TYPES)
if files != []:
failure = lint(files)
if failure:
print("\033[91mERROR:\033[0m Some files do not conform to style guidelines")
sys.exit(1)
|
DL4AGX-master
|
tools/linter/cpplint_diff.py
|
#!/usr/bin/python3
##########################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: DL4AGX/tools/linter/cpplinkt.py
# Description: Lint C++ files with clang-format and overwrite them with
# with the correct formating
##########################################################################
import os
import sys
import glob
import subprocess
import utils
VALID_CPP_FILE_TYPES = [".cpp", ".cc", ".c", ".cu", ".hpp", ".h", ".cuh"]
def lint(user, target_files, change_file=True):
cmd = ['clang-format']
if change_file:
cmd.append("-i")
print(
"\033[93mWARNING:\033[0m This command is modifying your files with the recommended linting, you should review the changes before committing"
)
for f in target_files:
cmd.append(f)
subprocess.run(cmd)
subprocess.run(["chown", user + ":" + user, f])
subprocess.run(["chmod", "u+rw,g+rw", f])
if __name__ == "__main__":
BAZEL_ROOT = utils.find_bazel_root()
USER = BAZEL_ROOT.split('/')[2]
subprocess.run(["useradd", USER])
projects = utils.CHECK_PROJECTS(sys.argv[1:])
if "//..." in projects:
projects = [p.replace(BAZEL_ROOT, "/")[:-1] for p in glob.glob(BAZEL_ROOT + '/*/')]
projects = [p for p in projects if p not in utils.BLACKLISTED_BAZEL_TARGETS]
for p in projects:
if p.endswith("/..."):
p = p[:-4]
path = BAZEL_ROOT + '/' + p[2:]
files = utils.glob_files(path, VALID_CPP_FILE_TYPES)
if files != []:
lint(USER, files)
|
DL4AGX-master
|
tools/linter/cpplint.py
|
##########################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: //LeNetWithS3Pooling/training/pooling.py
# Description: Implementation of S3Pooling
##########################################################################
import numpy as np
import random
import torch
from torchsummary import summary
import torch.nn.functional as F
class StochasticPool2d(torch.nn.Module):
def __init__(self, kernel_size=2, stride=2, padding=0):
super(StochasticPool2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.grid_size = kernel_size
# Reference: https://arxiv.org/pdf/1611.05138.pdf
# First, perform with stride=1 and maintain resolution
# Hence, padding zeroes only on the right and bottom
self.padding = torch.nn.ConstantPad2d((0,1,0,1),0)
def forward(self, x, s3pool_flag=False):
# If S3Pool flag is enabled or training mode: Run S3Pooling
if s3pool_flag or self.training:
# Compute spatial dimensions from input feature map tensor
h, w = x.shape[-2:]
n_h = h // self.grid_size
n_w = w // self.grid_size
n_h = int(n_h)
n_w = int(n_w)
# Reference: https://arxiv.org/pdf/1611.05138.pdf
# First, perform with stride=1 and maintain resolution
# Hence, padding only on the right and bottom
x = self.padding(x)
# First step : perform maxpooling
x = F.max_pool2d(x, self.kernel_size, 1)
w_indices = []
h_indices = []
# Second step : Perform stochastic S3Pooling
for i in range(n_w):
# Calculate offset
position_offset = self.grid_size * i
# Max range for Boundary case
if i + 1 < n_w:
max_range = self.grid_size
else:
max_range = w - position_offset
# Pick random w index from [ position_offset to grid size ]
# Don't use random at inference time for exporting to IR
if not self.training:
w_index = torch.LongTensor([0])
else:
w_index = torch.LongTensor(1).random_(0, max_range)
w_indices.append(torch.add(w_index, position_offset))
for j in range(n_h):
# Calculate offset
position_offset = self.grid_size * j
# Max range for Boundary case
if j + 1 < n_h:
max_range = self.grid_size
else:
max_range = h - position_offset
# Pick random h index from [position offset to grid_size]
# Don't use random at inference time for exporting to IR
if not self.training:
h_index = torch.LongTensor([0])
else:
h_index = torch.LongTensor(1).random_(0, max_range)
h_indices.append(torch.add(h_index, position_offset))
# Gather all the h, w indicies from S3Pooling step
h_indices = torch.cat(h_indices, dim = 0)
w_indices = torch.cat(w_indices, dim = 0)
#output = x
# Pick values corresponding to h, w indices calculated
output = x[:, :, h_indices.cuda()][:, :, :, w_indices.cuda()]
print(x.shape, output.shape)
else:
# If S3Pooling flag disabled and inference time, perform average pooling
# Use AvgPooling
output = F.avg_pool2d(x, self.kernel_size, self.stride)
return output
|
DL4AGX-master
|
LeNetWithS3Pooling/training/pooling.py
|
##########################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: //LeNetWithS3Pooling/training/main.py
# Description: Train LeNet with S3Pooling
##########################################################################
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torchsummary import summary
from pooling import StochasticPool2d
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
self.sp1 = StochasticPool2d(2, 2)
self.sp2 = StochasticPool2d(2, 2)
def setPoolFlag(self, s3pool_flag=False):
self.s3pool_flag = s3pool_flag
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.sp1(x, self.s3pool_flag)
x = F.relu(self.conv2(x))
x = self.sp2(x, self.s3pool_flag)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--s3pool', action='store_true', default=False,
help='For using S3Pooling in inference path')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
model.setPoolFlag(args.s3pool)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if (args.save_model):
torch.save(model.state_dict(),"mnist_cnn.pt")
summary(model, (1, 28, 28))
x = torch.randn(1, 1, 28, 28, requires_grad=True).to(device)
torch_out = model(x)
output_name = "mnist.onnx"
if not args.s3pool_flag:
output_name = "mnist_with_avgpool.onnx"
torch.onnx.export(model, x, output_name, export_params=True)
if __name__ == '__main__':
main()
|
DL4AGX-master
|
LeNetWithS3Pooling/training/main.py
|
##########################################################################
# Copyright (c) 2018-2019 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: //LeNetWithS3Pooling/utils/modify_onnx.py
# Description: Modify ONNX graph to accept S3Pooling Plugin
##########################################################################
import torch
import onnx
import argparse
def main():
# Configurable parameters from command line
parser = argparse.ArgumentParser(description='ONNX Modifying Example')
parser.add_argument('--onnx',
help='onnx file to modify')
parser.add_argument('--output', default="output.onnx",
help='input batch size for testing (default: output.onnx)')
args = parser.parse_args()
# Load ONNX file
model = onnx.load(args.onnx)
# Retrieve graph_def
graph = model.graph
# List of nodes in the graph
nodes = graph.node
prev_node = None
# Iterate through all the nodes
for node in nodes:
# Search for Pad + MaxPool layer for modification in ONNX export
if prev_node and prev_node.op_type == "Pad" and node.op_type == "MaxPool":
# Modify the Padding layer as per Average Pooling
dup_prev_node = onnx.helper.make_node("Pad",
inputs=[x for x in prev_node.input],
outputs=[x for x in prev_node.output],
mode='constant',
value=0.0,
pads=[0, 0, 0, 0, 0, 0, 0, 0],
)
# Replace the Padding node with new padding node
graph.node.remove(prev_node)
graph.node.extend([dup_prev_node])
# Modify the pooling with S3Pooling for Webinar demo
node.op_type ="S3Pool"
prev_node = node
# Generate model_definition from modified graph
model_def = onnx.helper.make_model(graph)
# Save the serialized model
onnx.save(model_def, args.output)
if __name__ == '__main__':
main()
|
DL4AGX-master
|
LeNetWithS3Pooling/utils/modify_onnx.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import os
import sys
import argparse
import torch
import torch.utils.data
from torch import nn
import json
# from torchvision import models
sys.path.insert(0, "./vision")
try:
from torchvision import models
except ImportError:
print("Error importing pytorch's torchvision repository!")
from train_utils import train_loop, evaluate, data_loading, eval_baseline, prune_trained_model_custom, \
get_optimizer, get_lr_scheduler
sys.path.append("./vision/references/classification/")
try:
from train import get_args_parser
import utils as utils_vision
except ImportError:
print("Error import pytorch's vision repository!")
def train_sparse(model, args, criterion, data_loader, data_loader_test, data_loader_val):
# Clone model to make sure the original model is not modified
model_sparse = copy.deepcopy(model)
# Set optimizer
parameters = utils_vision.set_weight_decay(
model_sparse,
args.sparse_weight_decay,
norm_weight_decay=None,
custom_keys_weight_decay=None,
)
optimizer = get_optimizer(args, parameters)
if args.distributed:
model_sparse = torch.nn.parallel.DistributedDataParallel(model_sparse, device_ids=[args.gpu])
else:
model_sparse = torch.nn.DataParallel(model_sparse.cuda(0))
model_sparse_without_ddp = model_sparse.module
# Initialize sparsity mode before loading checkpoints and/or starting training.
# Apart from the import statement, it is sufficient to add just the following line of code before the training
# phase to augment the model and the optimizer for sparse training/inference:
prune_trained_model_custom(model_sparse, optimizer, allow_recompute_mask=True)
model_sparse.to(args.device)
sparse_ckpt_path = os.path.join(args.output_dir, args.sparse_ckpt)
if os.path.exists(sparse_ckpt_path) and not args.rewrite_sparse_weights:
print("> Loading Sparse ckpt from {}!".format(sparse_ckpt_path))
try:
load_dict = torch.load(sparse_ckpt_path)
except Exception:
print("Loading checkpoint from distributed model. Mapping GPU location to local single-GPU setting.")
load_dict = torch.load(sparse_ckpt_path, map_location="cuda:0") # "cuda:{}".format(args.device))
try:
model_sparse.load_state_dict(load_dict["model_state_dict"]) # , strict=False)
except Exception:
model_sparse_without_ddp.load_state_dict(load_dict["model_state_dict"], strict=False)
else:
print("> Fine-tuning stage started...")
# Set LR scheduler
lr_scheduler = get_lr_scheduler(args, optimizer)
# Training loop
train_loop(model_sparse, model_sparse_without_ddp, criterion, optimizer, data_loader, data_loader_val,
torch.device(args.device),
lr_scheduler=lr_scheduler, epoch=args.sparse_epoch, args=args,
summary_writer_dir=os.path.join(args.output_dir, "logs", "sparse"),
save_ckpt_path=sparse_ckpt_path, opset=13,
steps_per_epoch=args.sparse_steps_per_epoch)
# Load BEST model
if os.path.exists(sparse_ckpt_path):
print("> Loading Sparse ckpt from {}!".format(sparse_ckpt_path))
load_dict = torch.load(sparse_ckpt_path)
model_sparse_without_ddp.load_state_dict(load_dict["model_state_dict"])
# Evaluate model
acc1, acc5 = None, None
if args.eval_sparse:
with torch.no_grad():
acc1, acc5, _ = evaluate(model_sparse_without_ddp, criterion, data_loader_test, device="cuda", print_freq=args.print_freq)
return model_sparse, acc1, acc5
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This script Sparsifies a Dense model and fine-tunes it.")
parser.add_argument("--model_name", type=str, default="resnet34",
help="See more model names at https://pytorch.org/vision/stable/models.html and "
" https://github.com/pytorch/vision/tree/main/torchvision/models")
parser.add_argument("--data_dir", type=str, default="/media/Data/imagenet_data", help="Path to ImageNet dataset.")
parser.add_argument("--batch_size", type=int, default=128, help="Batch size.")
parser.add_argument("--train_data_size", type=int, default=None,
help="Dataset to be used during training."
" If None, take the entire train data. Otherwise, take subset.")
parser.add_argument("--test_data_size", type=int, default=None,
help="Dataset to be used for the final model evaluation (to obtain accuracy)."
" If None, take the entire val data. Otherwise, take subset.")
parser.add_argument("--val_data_size", type=int, default=None,
help="Dataset to be used during training to check for best checkpoint."
" If None, take the entire val data. Otherwise, take subset."
" Test and Val data are obtained from the same dataset. The only difference is the number"
" of samples. The motivation behind this is that a small val data should be enough to "
" check for the best checkpoint while removing the time bottleneck of the validation"
" step during training. After training is done, the model can then be evaluated on the "
" complete val data.")
parser.add_argument("--device", type=str, default="cuda", help="Hardware to run code on.")
parser.add_argument("--output_dir", type=str, default="./weights_qat",
help="Path to save outputs (log files, checkpoint, ...).")
# Sparse params
parser.add_argument("--sparse_epoch", type=int, default=30,
help="Number of epochs to fine-tune Sparse model.")
parser.add_argument("--sparse_steps_per_epoch", type=int, default=None,
help="Steps per epoch: number of steps = train_data_size/batch_size."
" If None, use the entire train data in each epoch. Otherwise, use a subset.")
parser.add_argument("--sparse_lr", type=float, default=0.1, help="Base learning rate for Sparse workflow.")
parser.add_argument("--sparse_weight_decay", type=float, default=1e-4, help="Weight decay for Sparse workflow.")
parser.add_argument("--sparse_momentum", type=float, default=0.9, help="Momentum for Sparse workflow.")
parser.add_argument("--sparse_ckpt", type=str, default="sparse-finetuned_best.pth",
help="Sparse checkpoint filename (must be inside `output_dir` and of type .pth)."
" If checkpoint exists, simply load it."
" Otherwise, perform Sparse fine-tuning and save checkpoint.")
# torchvision args
parser.add_argument("--opt", default="sgd", type=str, help="optimizer")
parser.add_argument("--lr_scheduler", default="constant", type=str, help="LR Scheduler, options={multistep, constant, step}")
parser.add_argument("--lr_warmup_epochs", default=5, type=int, help="the number of epochs to warmup (default: 0)")
parser.add_argument("--lr_warmup_decay", default=0.01, type=float, help="the decay for lr")
parser.add_argument("--lr_step_size", default=30, type=int, help="decrease lr every step-size epochs")
parser.add_argument("--lr_gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma")
parser.add_argument("--lr_min", default=0.0, type=float, help="minimum lr of lr schedule (default: 0.0)")
parser.add_argument("--label_smoothing", default=0.0, type=float, help="label smoothing (default: 0.0)", dest="label_smoothing")
parser.add_argument("--print_freq", default=20, type=int, help="print frequency")
# distributed training parameters
parser.add_argument("--world_size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist_url", default="env://", type=str, help="url used to set up distributed training")
parser.add_argument("--model_ema", action="store_true", help="enable tracking Exponential Moving Average of model parameters")
parser.add_argument("--model_ema_steps", type=int, default=32,
help="the number of iterations that controls how often to update the EMA model (default: 32)")
parser.add_argument("--model_ema_decay", type=float, default=0.99998,
help="decay factor for Exponential Moving Average of model parameters (default: 0.99998)")
parser.add_argument("--clip_grad_norm", default=None, type=float, help="the maximum gradient norm (default None)")
# Dataloader arguments from 'vision' repo
parser.add_argument("--cache-dataset", dest="cache_dataset", action="store_true",
help="Cache the datasets for quicker initialization. It also serializes the transforms")
parser.add_argument("--test-only", dest="test_only", action="store_true", help="Only test the model")
parser.add_argument("--auto-augment", default=None, type=str, help="auto augment policy (default: None)")
parser.add_argument("--ra-magnitude", default=9, type=int, help="magnitude of auto augment policy")
parser.add_argument("--augmix-severity", default=3, type=int, help="severity of augmix policy")
parser.add_argument("--random-erase", default=0.0, type=float, help="random erasing probability (default: 0.0)")
parser.add_argument("--interpolation", default="bilinear", type=str, help="the interpolation method (default: bilinear)")
parser.add_argument("--val-resize-size", default=256, type=int, help="the resize size used for validation (default: 256)")
parser.add_argument("--val-crop-size", default=224, type=int, help="the central crop size used for validation (default: 224)")
parser.add_argument("--train-crop-size", default=224, type=int, help="the random crop size used for training (default: 224)")
parser.add_argument("--ra-sampler", action="store_true", help="whether to use Repeated Augmentation in training")
parser.add_argument("--ra-reps", default=3, type=int, help="number of repetitions for Repeated Augmentation (default: 3)")
parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load")
# Distributed
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
# Eval params
parser.add_argument("--save_baseline", dest="save_baseline", action="store_true", help="Save baseline model.")
parser.add_argument("--eval_baseline", dest="eval_baseline", action="store_true", help="Evaluate baseline model.")
parser.add_argument("--eval_sparse", dest="eval_sparse", action="store_true", help="Evaluate sparse model.")
parser.add_argument("--rewrite_sparse_weights", dest="rewrite_sparse_weights", action="store_true",
help="Rewrite Sparse checkpoint if it exists.")
args = parser.parse_args()
# The function below creates args.distributed and args.gpu/rank
utils_vision.init_distributed_mode(args)
if args.distributed:
print("Running distributed script with world size of {}".format(args.world_size))
else:
print("Running script in non-distributed manner!")
args.output_dir = os.path.join(args.output_dir, args.model_name)
if not os.path.exists(args.output_dir):
try:
os.makedirs(args.output_dir)
except FileExistsError:
print("Directory {} exists, not creating it again.".format(args.output_dir))
# Data loading
print("---------- Loading data ----------")
data_loader, data_loader_test, data_loader_val = data_loading(
args.data_dir, args.batch_size, args,
args.train_data_size, args.test_data_size, args.val_data_size
)
# Set loss criteria
criterion = nn.CrossEntropyLoss(label_smoothing=args.label_smoothing)
# ############# BASELINE ##################
assert hasattr(models, args.model_name), print("Model {} not supported!".format(args.model_name))
model, acc1, acc5 = eval_baseline(args, criterion, data_loader_test)
# ############### SPARSE ###################
print("---------- Fine-tuning Dense as Sparse model (FP32) for {} epochs ----------".format(args.sparse_epoch))
model_sparse, acc1_sparse, acc5_sparse = train_sparse(
model, args, criterion, data_loader, data_loader_test, data_loader_val
)
# ############ Write logs to 'out.log' and Save args into 'args.json' ###############
results_str = " ------------ Evaluation Results ------------\n"
if args.eval_baseline:
results_str += "Baseline: Top-1 {:.3f}%, Top-5: {:.3f}%\n".format(acc1, acc5)
if args.eval_sparse:
results_str += "Sparse: Top-1 {:.3f}%, Top-5 {:.3f}%\n".format(acc1_sparse, acc5_sparse)
results_str += " ------------ CMD -------------------\n"
results_str += '\n'.join(sys.argv[1:])
with open(os.path.join(args.output_dir, "out_sparse.log"), 'w') as f:
f.write(results_str)
with open(os.path.join(args.output_dir, "args_sparse.json"), 'w') as f:
json.dump(args.__dict__, f, indent=2)
print("End!")
|
DL4AGX-master
|
SparsityINT8/step1_sparse_training.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# > prune_trained_model_custom()
# Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
#
# BSD-3-Clause license
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# > train_one_epoch(), evaluate()
# Copyright (c) Soumith Chintala 2016. All rights reserved.
#
# BSD 3-Clause License
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Modifications:
- prune_trained_model_custom():
1. Abstracted 'allow_recompute_mask' and 'allow_permutation' arguments;
2. Enabled sparse mask computation as optional.
- train_one_epoch(): added steps_per_epoch if condition.
- evaluate():
1. Returned Top-5 accuracy and loss on top of the Top-1 accuracy;
2. Commented out "and torch.distributed.get_rank() == 0" if.
"""
import copy
import os
import sys
import time
import torch
import torch.utils.data
from torch import nn
import warnings
from torch.utils.tensorboard import SummaryWriter
# from torchvision import models
sys.path.insert(0, "./vision")
try:
from torchvision import models
except ImportError:
print("Error importing pytorch's torchvision repository!")
sys.path.append(os.path.abspath("./vision/references/classification"))
try:
import utils
from train import load_data
except ImportError:
print("Error import pytorch's vision repository!")
# Sparsity Toolkit
sys.path.append("./apex")
try:
from apex.contrib.sparsity import ASP
except ImportError:
print("Error importing `apex`!")
# Quantization Toolkit
from pytorch_quantization import nn as quant_nn
from pytorch_quantization import calib
import tqdm
def prune_trained_model_custom(model, optimizer, allow_recompute_mask=False, allow_permutation=True,
compute_sparse_masks=True):
""" Adds mask buffers to model (init_model_for_pruning), augments optimize, and computes masks if .
Source: https://github.com/NVIDIA/apex/blob/52c512803ba0a629b58e1c1d1b190b4172218ecd/apex/contrib/sparsity/asp.py#L299
Modifications:
1) Abstracted 'allow_recompute_mask' and 'allow_permutation' arguments
2) Enabled sparse mask computation as optional
"""
asp = ASP()
asp.init_model_for_pruning(
model,
mask_calculator="m4n2_1d",
verbosity=2,
whitelist=[torch.nn.Linear, torch.nn.Conv2d],
allow_recompute_mask=allow_recompute_mask,
allow_permutation=allow_permutation
)
asp.init_optimizer_for_pruning(optimizer)
if compute_sparse_masks:
asp.compute_sparse_masks()
return asp
def data_loading(data_path, batch_size, torchvision_args, train_data_size=None, test_data_size=None, val_data_size=None,
distributed=False, drop_last=False):
traindir = os.path.join(data_path, 'train')
valdir = os.path.join(data_path, 'val')
dataset, dataset_test, train_sampler, test_sampler = load_data(traindir, valdir, torchvision_args)
dataset_val = copy.deepcopy(dataset_test)
val_sampler = copy.deepcopy(test_sampler)
# Take subset if != None
if train_data_size:
dataset = torch.utils.data.Subset(dataset, list(range(0, train_data_size)))
if torchvision_args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
train_sampler = torch.utils.data.RandomSampler(dataset)
if test_data_size:
dataset_test = torch.utils.data.Subset(dataset_test, list(range(0, test_data_size)))
if torchvision_args.distributed:
test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test)
else:
test_sampler = torch.utils.data.RandomSampler(dataset_test)
if val_data_size:
dataset_val = torch.utils.data.Subset(dataset_val, list(range(0, val_data_size)))
if torchvision_args.distributed:
val_sampler = torch.utils.data.distributed.DistributedSampler(dataset_val)
else:
val_sampler = torch.utils.data.RandomSampler(dataset_val)
# Make dataloader
train_data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size,
sampler=train_sampler, drop_last=drop_last)
test_data_loader = torch.utils.data.DataLoader(
dataset_test, batch_size=batch_size,
sampler=test_sampler, drop_last=drop_last)
val_data_loader = torch.utils.data.DataLoader(
dataset_val, batch_size=batch_size,
sampler=val_sampler, drop_last=drop_last)
return train_data_loader, test_data_loader, val_data_loader
def eval_baseline(args, criterion, data_loader_test):
model = models.__dict__[args.model_name](pretrained=True)
model.to(args.device)
acc1, acc5, loss = None, None, None
if args.eval_baseline:
print("---------- Evaluating baseline model ----------")
with torch.no_grad():
acc1, acc5, loss = evaluate(model, criterion, data_loader_test, device=args.device, print_freq=args.print_freq)
if args.save_baseline:
ckpt_path = os.path.join(args.output_dir, "baseline.pth")
torch.save({
'model_state_dict': model.state_dict(),
"acc1_val": acc1,
"acc5_val": acc5,
"loss_val": loss,
'args': args
}, ckpt_path)
export_onnx(model, ckpt_path.replace(".pth", ".onnx"), args.batch_size, val_crop_size=args.val_crop_size)
return model, acc1, acc5
def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args, steps_per_epoch=None,
model_ema=None, scaler=None):
"""
Codebase: torch/vision/references/classification/train.py
Modification: added steps_per_epoch.
"""
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}"))
metric_logger.add_meter("img/s", utils.SmoothedValue(window_size=10, fmt="{value}"))
header = f"Epoch: [{epoch}]"
for i, (image, target) in enumerate(metric_logger.log_every(data_loader, args.print_freq, header)):
if steps_per_epoch is not None and i >= steps_per_epoch:
break
start_time = time.time()
image, target = image.to(device), target.to(device)
with torch.cuda.amp.autocast(enabled=scaler is not None):
output = model(image)
loss = criterion(output, target)
optimizer.zero_grad()
if scaler is not None:
scaler.scale(loss).backward()
if args.clip_grad_norm is not None:
# we should unscale the gradients of optimizer's assigned params if do gradient clipping
scaler.unscale_(optimizer)
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad_norm)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if args.clip_grad_norm is not None:
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad_norm)
optimizer.step()
if model_ema and i % args.model_ema_steps == 0:
model_ema.update_parameters(model)
if epoch < args.lr_warmup_epochs:
# Reset ema buffer to keep copying weights during warmup period
model_ema.n_averaged.fill_(0)
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
metric_logger.meters["img/s"].update(batch_size / (time.time() - start_time))
return metric_logger
def train_loop(model, model_without_ddp, criterion, optimizer, data_loader, data_loader_val, device, epoch, args, summary_writer_dir,
save_ckpt_path, steps_per_epoch=None, model_ema=None, scaler=None, lr_scheduler=None, opset=13):
def _save_model(ep, ckpt_path, acc1_val, acc5_val, current_val_loss):
torch.save({
'epoch': ep,
'model_state_dict': model_without_ddp.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'lr_scheduler_state_dict': lr_scheduler.state_dict() if lr_scheduler is not None else None, # ADDED
'loss': criterion,
"acc1_val": acc1_val,
"acc5_val": acc5_val,
"loss_val": current_val_loss,
'args': args # ADDED
}, ckpt_path)
# Export to ONNX
export_onnx(model_without_ddp, ckpt_path.replace(".pth", ".onnx"), args.batch_size,
val_crop_size=args.val_crop_size, opset_version=opset)
summary_writer = SummaryWriter(summary_writer_dir)
best_val_loss = float("inf")
tick = time.time()
for e in range(0, epoch):
print("Epoch {}/{}".format(e, epoch))
if args.distributed:
data_loader.sampler.set_epoch(e)
torch.distributed.barrier()
metric_logger = train_one_epoch(
model, criterion, optimizer, data_loader, device, e, args, steps_per_epoch=steps_per_epoch,
model_ema=model_ema, scaler=scaler
)
if lr_scheduler is not None:
lr_scheduler.step()
if args.distributed:
torch.distributed.barrier()
# ======== Validation step ========
acc1_val, acc5_val, loss_val = evaluate(model, criterion, data_loader_val, device, print_freq=args.print_freq)
# Save the BEST model
current_val_loss = loss_val
if current_val_loss < best_val_loss:
best_val_loss = current_val_loss
_save_model(e, save_ckpt_path, acc1_val, acc5_val, current_val_loss)
# ======== Summary Writer (Tensorboard) log ========
summary_writer.add_scalar("lr", metric_logger.lr.value, e)
summary_writer.add_scalar("Loss_train/epoch", metric_logger.loss.global_avg, e)
summary_writer.add_scalar("Accuracy_top1_train/epoch", metric_logger.acc1.global_avg, e)
summary_writer.add_scalar("Accuracy_top5_train/epoch", metric_logger.acc5.global_avg, e)
summary_writer.add_scalar("Loss_val/epoch", loss_val, e)
summary_writer.add_scalar("Accuracy_top1_val/epoch", acc1_val, e)
summary_writer.add_scalar("Accuracy_top5_val/epoch", acc5_val, e)
# Save the FINAL model
_save_model(e, save_ckpt_path.replace("_best.pth", "_final.pth"), acc1_val, acc5_val, current_val_loss)
tock = time.time()
time_min = (tock - tick) / (1000 * 60)
print("Training took {} minutes or {} hours!".format(time_min, time_min / 60))
def evaluate(model, criterion, data_loader, device, print_freq=100, log_suffix=""):
"""
Codebase: torch/vision/references/classification/train.py
Modifications:
1) returning Top-5 accuracy and loss on top of the Top-1 accuracy, and
2) commented out "and torch.distributed.get_rank() == 0" if.
"""
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = f"Test: {log_suffix}"
num_processed_samples = 0
with torch.inference_mode():
for image, target in metric_logger.log_every(data_loader, print_freq, header):
image = image.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
output = model(image)
loss = criterion(output, target)
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
# FIXME need to take into account that the datasets
# could have been padded in distributed setup
batch_size = image.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
num_processed_samples += batch_size
# gather the stats from all processes
num_processed_samples = utils.reduce_across_processes(num_processed_samples)
print("Num processed samples: {}".format(num_processed_samples))
print("data_loader len: {}".format(len(data_loader.dataset)))
if (
hasattr(data_loader.dataset, "__len__")
and len(data_loader.dataset) != num_processed_samples
# and torch.distributed.get_rank() == 0
):
# See FIXME above
warnings.warn(
f"It looks like the dataset has {len(data_loader.dataset)} samples, but {num_processed_samples} "
"samples were used for the validation, which might bias the results. "
"Try adjusting the batch size and / or the world size. "
"Setting the world size to 1 is always a safe bet."
)
metric_logger.synchronize_between_processes()
print(f"{header} Acc@1 {metric_logger.acc1.global_avg:.3f},"
f" Acc@5 {metric_logger.acc5.global_avg:.3f},"
f" loss {metric_logger.loss.global_avg:.5f}")
return metric_logger.acc1.global_avg, metric_logger.acc5.global_avg, metric_logger.loss.global_avg
def export_onnx(model, onnx_filename, batch_onnx, val_crop_size=224, opset_version=13, verbose=False,
do_constant_folding=True, trace_model=False):
model.eval()
# We have to shift to pytorch's fake quant ops before exporting the model to ONNX
quant_nn.TensorQuantizer.use_fb_fake_quant = True
# Export ONNX for multiple batch sizes
print("Creating ONNX file: " + onnx_filename)
dummy_input = torch.randn(batch_onnx, 3, val_crop_size, val_crop_size, device="cuda")
try:
print("Exporting ONNX model with input {} to {} with opset {}!".format(dummy_input.shape, onnx_filename, opset_version))
model_tmp = model
if isinstance(model, torch.nn.DataParallel) or isinstance(model, torch.nn.parallel.DistributedDataParallel):
# '.module' is necessary here because model is wrapped in torch.nn.DataParallel
model_tmp = model.module
if trace_model:
model_tmp = torch.jit.trace(model_tmp, dummy_input)
torch.onnx.export(
model_tmp, dummy_input, onnx_filename,
verbose=verbose,
opset_version=opset_version,
do_constant_folding=do_constant_folding
)
except ValueError:
print("Failed to export to ONNX")
return False
return True
def get_optimizer(args, parameters):
opt_name = args.opt.lower()
if opt_name.startswith("sgd"):
optimizer = torch.optim.SGD(
parameters,
lr=args.sparse_lr,
momentum=args.sparse_momentum,
weight_decay=args.sparse_weight_decay
)
elif opt_name == "rmsprop":
optimizer = torch.optim.RMSprop(
parameters,
lr=args.sparse_lr,
momentum=args.sparse_momentum,
weight_decay=args.sparse_weight_decay,
eps=0.0316,
alpha=0.9
)
elif opt_name == "adamw":
optimizer = torch.optim.AdamW(
parameters,
lr=args.sparse_lr,
weight_decay=args.sparse_weight_decay
)
else:
raise RuntimeError(f"Invalid optimizer {args.opt}. Only SGD, RMSprop and AdamW are supported.")
return optimizer
def get_lr_scheduler(args, optimizer):
if args.lr_scheduler == "multistep":
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[30, 60, 80], gamma=args.lr_gamma)
elif args.lr_scheduler == "constant":
lr_scheduler = torch.optim.lr_scheduler.ConstantLR(
optimizer, factor=args.lr_warmup_decay, total_iters=args.sparse_epoch
)
elif args.lr_scheduler == "step":
main_lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer,
step_size=args.lr_step_size,
gamma=args.lr_gamma
)
if args.lr_warmup_epochs > 0:
warmup_lr_scheduler = torch.optim.lr_scheduler.ConstantLR(
optimizer, factor=args.lr_warmup_decay, total_iters=args.lr_warmup_epochs
)
lr_scheduler = torch.optim.lr_scheduler.SequentialLR(
optimizer,
schedulers=[warmup_lr_scheduler, main_lr_scheduler],
milestones=[args.lr_warmup_epochs]
)
else:
lr_scheduler = main_lr_scheduler
else:
raise ("LR Scheduler {} not supported!".format(args.lr_scheduler))
return lr_scheduler
# ======== Calibration ========
# Source: https://github.com/NVIDIA/TensorRT/blob/master/tools/pytorch-quantization/examples/calibrate_quant_resnet50.ipynb
def collect_stats(model, data_loader, num_batches):
"""Feed data to the network and collect statistic"""
# Enable calibrators
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
progress_bar = tqdm.tqdm(total=len(data_loader), leave=True, desc='Evaluation Progress')
for i, (image, _) in enumerate(data_loader):
model(image.to(torch.device("cuda:0"))) # .cuda())
progress_bar.update()
if i >= num_batches:
break
progress_bar.update()
# Disable calibrators
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
module.enable_quant()
module.disable_calib()
else:
module.enable()
def compute_amax(model, **kwargs):
"""Load calib result"""
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator):
module.load_calib_amax(strict=False)
else:
module.load_calib_amax(strict=False, **kwargs)
model.cuda()
|
DL4AGX-master
|
SparsityINT8/train_utils.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import argparse
import numpy as np
import torch
from train_utils import data_loading
import tensorrt as trt
import pycuda.driver as cuda
cuda.init()
import sys
sys.path.append("./vision/references/classification/")
try:
import utils as utils_vision
except ImportError:
print("Error importing pytorch's vision repository!")
TRT_DYNAMIC_DIM = -1
class HostDeviceMem(object):
"""Simple helper data class to store Host and Device memory."""
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
def allocate_buffers(engine: trt.ICudaEngine, batch_size: int) -> [list, list, list]:
"""
Function to allocate buffers and bindings for TensorRT inference.
Args:
engine (trt.ICudaEngine):
batch_size (int): batch size to be used during inference.
Returns:
inputs (List): list of input buffers.
outputs (List): list of output buffers.
dbindings (List): list of device bindings.
"""
inputs = []
outputs = []
dbindings = []
for binding in engine:
binding_shape = engine.get_binding_shape(binding)
if binding_shape[0] == TRT_DYNAMIC_DIM: # dynamic shape
size = batch_size * abs(trt.volume(binding_shape))
else:
size = abs(trt.volume(binding_shape))
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings
dbindings.append(int(device_mem))
# Append to the appropriate list (input/output)
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, dbindings
def infer(
engine_path: str,
val_batches: torch.utils.data.DataLoader,
batch_size: int = 8,
log_file: str = "engine_accuracy.log"
) -> None:
"""
Performs inference in TensorRT engine.
Args:
engine_path (str): path to the TensorRT engine.
val_batches (torch.utils.data.DataLoader): validation dataset (batches).
batch_size (int): batch size used for inference and dataset batch splitting.
log_file (str): filename to save logs.
Raises:
RuntimeError: raised when loading images in the host fails.
"""
def override_shape(shape: tuple) -> tuple:
"""Overrides batch dimension if dynamic."""
if TRT_DYNAMIC_DIM in shape:
shape = tuple(
[batch_size if dim == TRT_DYNAMIC_DIM else dim for dim in shape]
)
return shape
# Open engine as runtime
with open(engine_path, "rb") as f, trt.Runtime(
trt.Logger(trt.Logger.ERROR)
) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
ctx = cuda.Context.attach()
# Allocate buffers and create a CUDA stream.
inputs, outputs, dbindings = allocate_buffers(engine, batch_size)
ctx.detach()
# Initiate test_accuracy
metric_logger = utils_vision.MetricLogger(delimiter=" ")
# Contexts are used to perform inference.
with engine.create_execution_context() as context:
# Resolves dynamic shapes in the context
for binding in engine:
binding_idx = engine.get_binding_index(binding)
binding_shape = engine.get_binding_shape(binding_idx)
if engine.binding_is_input(binding_idx):
binding_shape = override_shape(binding_shape)
context.set_binding_shape(binding_idx, binding_shape)
input_shape = binding_shape
# Loop over number of steps to evaluate entire validation dataset
for step, example in enumerate(val_batches):
images, labels = example
if step % 100 == 0 and step != 0:
print(
"Evaluating batch {}: {:.4f}, {:.4f}".format(
step, metric_logger.acc1.global_avg, metric_logger.acc5.global_avg
)
)
try:
# Load images in Host (pad, flatten, and copy to page-locked buffer in Host)
images_padded = images.numpy()
labels_padded = labels
if images.shape[0] != batch_size:
# Pad images tensor so it's in the shape that pagelocked_buffer is expecting
pad_size = batch_size - images.shape[0]
padding = [images_padded[0] for _ in range(pad_size)]
images_padded = np.concatenate((images_padded, padding), axis=0)
padding_label = [labels[0] for _ in range(pad_size)]
labels_padded = np.concatenate((labels_padded, padding_label), axis=0)
data = images_padded.astype(np.float32).ravel()
pagelocked_buffer = inputs[0].host
np.copyto(pagelocked_buffer, data)
except RuntimeError:
raise RuntimeError(
"Failed to load images in Host at step {}".format(step)
)
inp = inputs[0]
# Transfer input data from Host to Device (GPU)
cuda.memcpy_htod(inp.device, inp.host)
# Run inference
context.execute_v2(dbindings)
# Transfer predictions back to Host from GPU
out = outputs[0]
cuda.memcpy_dtoh(out.host, out.device)
# Split 1-D output of length N*labels into 2-D array of (N, labels)
batch_outs = np.array(np.split(np.array(out.host), batch_size))
# Update test accuracy
acc1, acc5 = utils_vision.accuracy(torch.tensor(batch_outs), torch.tensor(labels_padded), topk=(1, 5))
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
# Print final accuracy and save to log file
print("\n======================================\n")
result_str = "Top-1,5 accuracy: {:.4f}, {:.4f}\n".format(
metric_logger.acc1.global_avg, metric_logger.acc5.global_avg
)
print(result_str)
# Save logs to file
results_dir = "/".join(engine_path.split("/")[:-1])
with open(os.path.join(results_dir, log_file), "w") as log_file:
log_file.write(result_str)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run inference on TensorRT engines for Imagenet-based Classification models.")
parser.add_argument("-e", "--engine", type=str, default="", help="Path to TensorRT engine")
parser.add_argument("-d", "--data_dir", default="/media/Data/imagenet_data", type=str,
help="Path to directory of input images (val data).")
parser.add_argument("-b", "--batch_size", default=1, type=int,
help="Number of inputs to send in parallel (up to max batch size of engine).")
parser.add_argument("--log_file", type=str, default="engine_accuracy.log", help="Filename to save logs.")
parser.add_argument('--val_data_size', type=int, default=None,
help='Indicates how much validation data should be used for accuracy eval. '
'If None, use all. Otherwise, use a subset.')
# Dataloader arguments from 'vision' repo
parser.add_argument("--cache-dataset", dest="cache_dataset", action="store_true",
help="Cache the datasets for quicker initialization. It also serializes the transforms")
parser.add_argument("--test-only", dest="test_only", action="store_true", help="Only test the model")
parser.add_argument("--auto-augment", default=None, type=str, help="auto augment policy (default: None)")
parser.add_argument("--ra-magnitude", default=9, type=int, help="magnitude of auto augment policy")
parser.add_argument("--augmix-severity", default=3, type=int, help="severity of augmix policy")
parser.add_argument("--random-erase", default=0.0, type=float, help="random erasing probability (default: 0.0)")
parser.add_argument("--interpolation", default="bilinear", type=str,
help="the interpolation method (default: bilinear)")
parser.add_argument("--val-resize-size", default=256, type=int,
help="the resize size used for validation (default: 256)")
parser.add_argument("--val-crop-size", default=224, type=int,
help="the central crop size used for validation (default: 224)")
parser.add_argument("--train-crop-size", default=224, type=int,
help="the random crop size used for training (default: 224)")
parser.add_argument("--ra-sampler", action="store_true", help="whether to use Repeated Augmentation in training")
parser.add_argument("--ra-reps", default=3, type=int,
help="number of repetitions for Repeated Augmentation (default: 3)")
parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load")
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
args = parser.parse_args()
utils_vision.init_distributed_mode(args)
# distributed = args.world_size > 1
if args.distributed:
print("Running distributed script with world size of {}".format(args.world_size))
else:
print("Running script in non-distributed manner!")
# Load the test data and pre-process input
print("---------- Loading data ----------")
_, _, val_batches = data_loading(
args.data_dir, args.batch_size, args,
train_data_size=1, test_data_size=1, val_data_size=args.val_data_size
)
# Perform inference
if args.engine:
infer(args.engine, val_batches, batch_size=args.batch_size, log_file=args.log_file)
else:
raise Exception("Please indicate a TRT engine via --engine.")
|
DL4AGX-master
|
SparsityINT8/infer_engine.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script uses the Calibrator API provided by Polygraphy to calibrate an ONNX model via PTQ and build a
quantized TensorRT engine that runs in INT8 precision. This script also allows for sparse weights to be used.
Calibrator:
- Function: https://github.com/NVIDIA/TensorRT/blob/8e756f163f83d54389c7ff82235e57a518f6eb03/tools/Polygraphy/polygraphy/backend/trt/calibrator.py#L29
- Calibrator options: https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/infer/Int8/pyInt8.html
"""
import os
from argparse import ArgumentParser
from train_utils import data_loading
import sys
sys.path.append("./vision/references/classification/")
try:
import utils as utils_vision
except ImportError:
print("Error importing pytorch's vision repository!")
from infer_engine import infer
from polygraphy.backend.trt import Calibrator, CreateConfig, EngineFromNetwork, NetworkFromOnnxPath, \
TrtRunner, SaveEngine
import onnx
import onnx_graphsurgeon as gs
from polygraphy.logger import G_LOGGER
import tensorrt as trt
ARGPARSER = ArgumentParser('This script calibrates an ONNX model and generates a calibration cache and a quantized TRT engine for deployment.')
ARGPARSER.add_argument('--onnx_path', type=str, default="./model.onnx")
ARGPARSER.add_argument('--output_dir', '-o', type=str, default='./converted',
help='Output directory to save the ONNX file with appropriate batch size.')
ARGPARSER.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size for calibration')
ARGPARSER.add_argument('--calibrator_type', '-c', type=str, default='entropy',
help='Options={entropy (trt.IInt8EntropyCalibrator2), minmax (trt.IInt8MinMaxCalibrator)}')
ARGPARSER.add_argument('--onnx_input_name', type=str, default="input.1", help='Input tensor name in ONNX file.')
ARGPARSER.add_argument("--is_dense_calibration", dest="is_dense_calibration", action="store_true",
help="True if we should activate Dense QAT training instead of Sparse.")
# Dataloader
ARGPARSER.add_argument('--data_dir', '-d', type=str, default='/media/Data/imagenet_data',
help='Directory containing the tfrecords data.')
ARGPARSER.add_argument("--test_data_size", type=int, default=None,
help='Indicates how much validation data should be used for accuracy evaluation.'
'If None, use all. Otherwise, use a subset.')
ARGPARSER.add_argument('--calib_data_size', type=int, default=None,
help='Indicates how much validation data should be used for calibration.'
'If None, use all. Otherwise, use a subset.')
# Dataloader arguments from 'vision' repo
ARGPARSER.add_argument("--cache-dataset", dest="cache_dataset", action="store_true",
help="Cache the datasets for quicker initialization. It also serializes the transforms",)
ARGPARSER.add_argument("--test-only", dest="test_only", action="store_true", help="Only test the model")
ARGPARSER.add_argument("--auto-augment", default=None, type=str, help="auto augment policy (default: None)")
ARGPARSER.add_argument("--ra-magnitude", default=9, type=int, help="magnitude of auto augment policy")
ARGPARSER.add_argument("--augmix-severity", default=3, type=int, help="severity of augmix policy")
ARGPARSER.add_argument("--random-erase", default=0.0, type=float, help="random erasing probability (default: 0.0)")
ARGPARSER.add_argument("--interpolation", default="bilinear", type=str, help="the interpolation method (default: bilinear)")
ARGPARSER.add_argument("--val-resize-size", default=256, type=int, help="the resize size used for validation (default: 256)")
ARGPARSER.add_argument("--val-crop-size", default=224, type=int, help="the central crop size used for validation (default: 224)")
ARGPARSER.add_argument("--train-crop-size", default=224, type=int, help="the random crop size used for training (default: 224)")
ARGPARSER.add_argument("--ra-sampler", action="store_true", help="whether to use Repeated Augmentation in training")
ARGPARSER.add_argument("--ra-reps", default=3, type=int, help="number of repetitions for Repeated Augmentation (default: 3)")
ARGPARSER.add_argument("--weights", default=None, type=str, help="the weights enum name to load")
ARGPARSER.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
# The data loader argument to `Calibrator` can be any iterable or generator that yields `feed_dict`s.
# A `feed_dict` is just a mapping of input names to corresponding inputs.
def calib_data(val_batches, input_name):
for iteration, (images, labels) in enumerate(val_batches):
yield {input_name: images.numpy()}
def main(args):
# ======== Ensure ONNX file and output dir exist =========
assert os.path.exists(args.onnx_path), f"ONNX model {args.onnx_path} does not exist!"
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# ======== Create ONNX with batch size BS =========
new_onnx_filename = args.onnx_path.replace(".onnx", f"_bs{args.batch_size}.onnx").split("/")[-1]
new_onnx_path = os.path.join(args.output_dir, new_onnx_filename)
graph = gs.import_onnx(onnx.load(args.onnx_path))
input_shape = graph.inputs[0].shape
input_shape[0] = args.batch_size
import subprocess as sp
sp.run(["polygraphy", "surgeon", "sanitize", args.onnx_path, "-o", new_onnx_path, "--override-input-shapes",
"{}:{}".format(graph.inputs[0].name, str(input_shape))])
# ======== Load data ========
print("---------- Loading data ----------")
_, data_loader_test, data_loader_calib = data_loading(
args.data_dir, args.batch_size, args,
test_data_size=args.test_data_size, val_data_size=args.calib_data_size
)
# ======== TensorRT Deployment ========
# Set Calibrator
calibration_cache_path = new_onnx_path.replace(".onnx", "_calibration.cache")
print("CALIBRATOR = {}".format(args.calibrator_type))
if args.calibrator_type == "entropy":
# This is the default calibrator (BaseClass=trt.IInt8EntropyCalibrator2)
calibrator = Calibrator(data_loader=calib_data(data_loader_calib, args.onnx_input_name),
cache=calibration_cache_path)
elif args.calibrator_type == "minmax":
calibrator = Calibrator(data_loader=calib_data(data_loader_calib, args.onnx_input_name),
cache=calibration_cache_path, BaseClass=trt.IInt8MinMaxCalibrator)
else:
raise("Calibrator of type {} not supported!".format(args.calibrator_type))
# Build engine from ONNX model by enabling INT8 and sparsity weights, and providing the calibrator.
print("Sparse: {}".format(not args.is_dense_calibration))
build_engine = EngineFromNetwork(
NetworkFromOnnxPath(new_onnx_path),
config=CreateConfig(
int8=True,
calibrator=calibrator,
sparse_weights=not args.is_dense_calibration
)
)
# Trigger engine saving
engine_path = new_onnx_path.replace(".onnx", "_ptq.engine")
build_engine = SaveEngine(build_engine, path=engine_path)
# Calibrate engine
# When we activate our runner, it will calibrate and build the engine. If we want to
# see the logging output from TensorRT, we can temporarily increase logging verbosity:
with G_LOGGER.verbosity(G_LOGGER.VERBOSE), TrtRunner(build_engine) as runner:
print("Calibrated engine!")
# Infer PTQ engine and evaluate its accuracy
log_file = engine_path.split("/")[-1].replace(".engine", "_accuracy.txt")
infer(engine_path, data_loader_test, batch_size=args.batch_size, log_file=log_file)
print("Inference succeeded for model {}!".format(args.onnx_path))
if __name__ == "__main__":
args = ARGPARSER.parse_args()
utils_vision.init_distributed_mode(args)
if args.distributed:
print("Running distributed script with world size of {}".format(args.world_size))
else:
print("Running script in non-distributed manner!")
main(args)
|
DL4AGX-master
|
SparsityINT8/step2_1_ptq_calibration.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
import torch
import torch.utils.data
from torch import nn
import json
# from torchvision import models
sys.path.insert(0, "./vision")
try:
# This repository is needed to add QDQ nodes in residual branches
# Modified the model definition as instructed in the pytorch-quantization toolkit:
# https://github.com/NVIDIA/TensorRT/blob/main/tools/pytorch-quantization/examples/torchvision/models/classification/resnet.py#L154-L155
from torchvision import models
except ImportError:
print("Error importing pytorch's torchvision repository!")
from train_utils import train_loop, evaluate, data_loading, eval_baseline, prune_trained_model_custom, collect_stats, compute_amax
sys.path.append("./vision/references/classification/")
try:
from train import load_data
import utils as utils_vision
except ImportError:
print("Error importing pytorch's vision repository!")
# QAT Toolkit
from pytorch_quantization import quant_modules
def train_qat(args, criterion, data_loader, data_loader_test, data_loader_val, data_loader_calib):
# Enable model quantization: relevant layers will be quantized except residual connections
quant_modules.initialize()
try:
# Instantiate model and quantize residual branches (quantize=True)
model_qat = models.__dict__[args.model_name](pretrained=True, quantize=True)
except NotImplementedError:
print("Model definition doesn't accept `quantize` parameter. Instantiating model without quantizing residual connections.")
model_qat = models.__dict__[args.model_name](pretrained=True)
# quant_modules.deactivate()
if args.distributed:
model_qat = torch.nn.parallel.DistributedDataParallel(model_qat, device_ids=[args.gpu])
else:
model_qat = torch.nn.DataParallel(model_qat.cuda(args.device))
model_qat_without_ddp = model_qat.module
# Set optimizer
optimizer = torch.optim.SGD(model_qat.parameters(), lr=args.qat_lr)
if not args.is_dense_training:
print("Training Sparse model!")
prune_trained_model_custom(model_qat, optimizer, compute_sparse_masks=False)
sparse_ckpt_path = os.path.join(args.output_dir, args.sparse_ckpt)
if os.path.exists(sparse_ckpt_path):
print("> Loading Sparse ckpt from {}!".format(sparse_ckpt_path))
try:
load_dict = torch.load(sparse_ckpt_path)
except Exception:
print("Loading checkpoint from distributed model. Mapping GPU location to local single-GPU setting.")
load_dict = torch.load(sparse_ckpt_path, map_location="cuda:{}".format(args.device))
try:
model_qat.load_state_dict(load_dict["model_state_dict"]) # , strict=False)
except Exception:
model_qat_without_ddp.load_state_dict(load_dict["model_state_dict"], strict=False)
qat_ckpt_path = os.path.join(args.output_dir, args.qat_ckpt)
if os.path.exists(qat_ckpt_path) and not args.rewrite_qat_weights:
print("> Loading QAT ckpt from {}!".format(qat_ckpt_path))
load_dict = torch.load(qat_ckpt_path)
model_qat_without_ddp.load_state_dict(load_dict["model_state_dict"])
else:
# ======== Model calibration ========
print("> Calibration started...")
calibrated_ckpt = os.path.join(args.output_dir, "calibrated_ckpt.pth")
if os.path.exists(calibrated_ckpt):
checkpoint = torch.load(calibrated_ckpt, map_location="cuda:{}".format(args.device))
model_qat_without_ddp.load_state_dict(checkpoint, strict=False)
else:
collect_stats(
model_qat_without_ddp,
data_loader_calib,
num_batches=len(data_loader_calib),
)
amax_computation_method = "entropy"
compute_amax(model_qat_without_ddp, method=amax_computation_method)
# Save the calibrated model
torch.save(model_qat_without_ddp.state_dict(), calibrated_ckpt)
# ======== QAT fine-tuning ========
print("> Fine-tuning started...")
# Set LR scheduler
if args.lr_scheduler == "step":
main_lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer,
step_size=args.lr_step_size,
gamma=args.lr_gamma
)
elif args.lr_scheduler == "multistep":
main_lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=args.lr_milestones, gamma=args.lr_gamma
)
else:
raise ("LR Scheduler {} not supported!".format(args.lr_scheduler))
if args.lr_warmup_epochs > 0:
if args.lr_warmup_method == "linear":
warmup_lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=args.lr_warmup_decay, total_iters=args.lr_warmup_epochs
)
elif args.lr_warmup_method == "constant":
warmup_lr_scheduler = torch.optim.lr_scheduler.ConstantLR(
optimizer, factor=args.lr_warmup_decay, total_iters=args.lr_warmup_epochs
)
lr_scheduler = torch.optim.lr_scheduler.SequentialLR(
optimizer,
schedulers=[warmup_lr_scheduler, main_lr_scheduler],
milestones=[args.lr_warmup_epochs]
)
else:
lr_scheduler = main_lr_scheduler
# Training loop
train_loop(model_qat, model_qat_without_ddp, criterion, optimizer, data_loader, data_loader_val,
torch.device("cuda:{}".format(args.device)),
lr_scheduler=lr_scheduler, epoch=args.qat_epoch, args=args,
summary_writer_dir=os.path.join(args.output_dir, "logs", "quant"),
save_ckpt_path=qat_ckpt_path, opset=13,
steps_per_epoch=args.qat_steps_per_epoch)
# Load BEST model
if os.path.exists(qat_ckpt_path):
print("> Loading QAT ckpt from {}!".format(qat_ckpt_path))
load_dict = torch.load(qat_ckpt_path)
model_qat_without_ddp.load_state_dict(load_dict["model_state_dict"])
# Evaluate model
acc1, acc5 = None, None
if args.eval_qat:
with torch.no_grad():
acc1, acc5, _ = evaluate(model_qat_without_ddp, criterion, data_loader_test, device="cuda", print_freq=args.print_freq)
return model_qat, acc1, acc5
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This script fine-tunes a sparse or dense model via QAT.")
parser.add_argument("--model_name", type=str, default="resnet34",
help="See more model names at https://pytorch.org/vision/stable/models.html and "
" https://github.com/pytorch/vision/tree/main/torchvision/models")
parser.add_argument("--data_dir", type=str, default="/media/Data/imagenet_data", help="Path to ImageNet dataset.")
parser.add_argument("--batch_size", type=int, default=128, help="Batch size.")
parser.add_argument("--train_data_size", type=int, default=None,
help="If None, take the entire train data. Otherwise, take subset.")
parser.add_argument("--test_data_size", type=int, default=None,
help="Dataset to be used for the final model evaluation (to obtain accuracy)."
" If None, take the entire val data. Otherwise, take subset.")
parser.add_argument("--calib_data_size", type=int, default=68,
help="Dataset to be used for model calibration."
" If None, take the entire val data. Otherwise, take subset.")
parser.add_argument("--val_data_size", type=int, default=None,
help="Dataset to be used during training to check for best checkpoint."
" If None, take the entire val data. Otherwise, take subset."
" Test and Val data are obtained from the same dataset. The only difference is the number"
" of samples. The motivation behind this is that a small val data should be enough to "
" check for the best checkpoint while removing the time bottleneck of the validation"
" step during training. After training is done, the model can then be evaluated on the "
" complete val data.")
parser.add_argument("--device", type=int, default=0, help="GPU number.")
parser.add_argument("--output_dir", type=str, default="./weights_qat",
help="Path to save outputs (log files, checkpoint, ...).")
# Sparse params
parser.add_argument("--sparse_ckpt", type=str, default="sparse-finetuned_best.pth",
help="Sparse checkpoint filename (must be inside `output_dir`). If checkpoint exists, simply "
"load it. Otherwise, perform Sparse fine-tuning and save checkpoint.")
# QAT params
parser.add_argument("--qat_epoch", type=int, default=10,
help="Number of epochs to fine-tune QAT model.")
parser.add_argument("--qat_steps_per_epoch", type=int, default=500,
help="Steps per epoch: number of steps = train_data_size/batch_size."
" If None, use the entire train data in each epoch. Otherwise, use a subset."
" Note that setting train_data_size=500*bs is equivalent to setting train_data_size=None"
" and steps_per_epoch=500. The only difference is that by setting train_data_size "
" directly, it will update the train_loop verbose print.")
parser.add_argument("--qat_lr", type=float, default=0.001, help="Base learning rate for QAT workflow.")
parser.add_argument("--qat_ckpt", type=str, default="quant-finetuned_best.pth",
help="QAT checkpoint filename (must be inside `output_dir` and of type .pth)."
" If checkpoint exists, simply load it. "
" Otherwise, perform QAT fine-tuning and save checkpoint.")
# LR scheduler params
parser.add_argument("--lr_scheduler", default="multistep", type=str, help="LR Scheduler, options={multistep, step}")
parser.add_argument("--lr_warmup_method", default="constant", type=str, help="Warmup method, options={constant, linear}")
parser.add_argument("--lr_warmup_epochs", default=1, type=int, help="the number of epochs to warmup (default: 0)")
parser.add_argument("--lr_warmup_decay", default=0.1, type=float, help="the decay for lr") # 0.01
parser.add_argument("--lr_step_size", default=4, type=int,
help="Decrease lr every step-size epochs. Needed for StepLR.")
parser.add_argument("--lr_gamma", default=0.1, type=float,
help="Decrease lr by a factor of lr-gamma. Needed for both Step and MultiStepR.")
parser.add_argument('--lr_milestones', nargs='+', type=int, default=[2, 7],
help='Milestones for MultiStepLR scheduler. Use like: --milestones 1 2 7')
# torchvision args
parser.add_argument("--print_freq", default=20, type=int, help="print frequency")
# distributed training parameters
parser.add_argument("--world_size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist_url", default="env://", type=str, help="url used to set up distributed training")
parser.add_argument("--model_ema", action="store_true", help="enable tracking Exponential Moving Average of model parameters")
parser.add_argument("--model_ema_steps", type=int, default=32,
help="the number of iterations that controls how often to update the EMA model (default: 32)")
parser.add_argument("--model_ema_decay", type=float, default=0.99998,
help="decay factor for Exponential Moving Average of model parameters (default: 0.99998)")
parser.add_argument("--clip_grad_norm", default=None, type=float, help="the maximum gradient norm (default None)")
# Dataloader arguments from 'vision' repo
parser.add_argument("--cache-dataset", dest="cache_dataset", action="store_true",
help="Cache the datasets for quicker initialization. It also serializes the transforms")
parser.add_argument("--test-only", dest="test_only", action="store_true", help="Only test the model")
parser.add_argument("--auto-augment", default=None, type=str, help="auto augment policy (default: None)")
parser.add_argument("--ra-magnitude", default=9, type=int, help="magnitude of auto augment policy")
parser.add_argument("--augmix-severity", default=3, type=int, help="severity of augmix policy")
parser.add_argument("--random-erase", default=0.0, type=float, help="random erasing probability (default: 0.0)")
parser.add_argument("--interpolation", default="bilinear", type=str, help="the interpolation method (default: bilinear)")
parser.add_argument("--val-resize-size", default=256, type=int, help="the resize size used for validation (default: 256)")
parser.add_argument("--val-crop-size", default=224, type=int, help="the central crop size used for validation (default: 224)")
parser.add_argument("--train-crop-size", default=224, type=int, help="the random crop size used for training (default: 224)")
parser.add_argument("--ra-sampler", action="store_true", help="whether to use Repeated Augmentation in training")
parser.add_argument("--ra-reps", default=3, type=int, help="number of repetitions for Repeated Augmentation (default: 3)")
parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load")
# Distributed
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
# Eval params
parser.add_argument("--save_baseline", dest="save_baseline", action="store_true", help="Save baseline model.")
parser.add_argument("--eval_baseline", dest="eval_baseline", action="store_true", help="Evaluate baseline model.")
parser.add_argument("--eval_qat", dest="eval_qat", action="store_true", help="Evaluate QAT model.")
parser.add_argument("--rewrite_qat_weights", dest="rewrite_qat_weights", action="store_true", help="Rewrite QAT checkpoint if it exists.")
parser.add_argument("--is_dense_training", dest="is_dense_training", action="store_true",
help="True if we should activate Dense QAT training instead of Sparse.")
args = parser.parse_args()
utils_vision.init_distributed_mode(args)
if args.distributed:
print("Running distributed script with world size of {}".format(args.world_size))
else:
print("Running script in non-distributed manner!")
args.output_dir = os.path.join(args.output_dir, args.model_name)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Data loading
print("---------- Loading data ----------")
_, _, data_loader_calib = data_loading(
args.data_dir, args.batch_size, args,
val_data_size=args.calib_data_size
)
data_loader, data_loader_test, data_loader_val = data_loading(
args.data_dir, args.batch_size, args,
args.train_data_size, args.test_data_size, args.val_data_size
)
# Set loss criteria
criterion = nn.CrossEntropyLoss()
# ############# BASELINE ##################
assert hasattr(models, args.model_name), print("Model {} not supported!".format(args.model_name))
model, acc1, acc5 = eval_baseline(args, criterion, data_loader_test)
# ############# QAT #######################
if args.is_dense_training:
print("---------- Fine-tuning Dense as QAT model for {} epochs ----------".format(args.qat_epoch))
else:
print("---------- Fine-tuning Sparse as QAT model for {} epochs ----------".format(args.qat_epoch))
model_qat, acc1_qat, acc5_qat = train_qat(
args, criterion, data_loader, data_loader_test, data_loader_val, data_loader_calib
)
# ############ Write logs to 'out.log' and Save args into 'args.json' ###############
results_str = " ------------ Evaluation Results ------------\n"
if args.eval_baseline:
results_str += "Baseline: Top-1 {:.3f}%, Top-5: {:.3f}%\n".format(acc1, acc5)
if args.eval_qat:
results_str += "QAT: Top-1 {:.3f}%, Top-5 {:.3f}%\n".format(acc1_qat, acc5_qat)
results_str += " ------------ CMD -------------------\n"
results_str += '\n'.join(sys.argv[1:])
with open(os.path.join(args.output_dir, "out_qat.log"), 'w') as f:
f.write(results_str)
with open(os.path.join(args.output_dir, "args_qat.json"), 'w') as f:
json.dump(args.__dict__, f, indent=2)
print("End!")
|
DL4AGX-master
|
SparsityINT8/step2_2_qat_training.py
|
# _base_ = [
# '../_base_/models/dest_simplemit-b0.py',
# '../_base_/datasets/cityscapes_1024x1024.py',
# '../_base_/default_runtime.py',
# '../_base_/schedules/schedule_160k.py'
# ]
_base_ = [
'../_base_/models/dest_simpatt-b0.py',
'../_base_/datasets/cityscapes_1024x1024_repeat.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k_adamw.py'
]
evaluation = dict(interval=4000, metric='mIoU')
# data = dict(samples_per_gpu=1)
checkpoint_config = dict(by_epoch=False, interval=20000)
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=1.)
}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
embed_dims = [32, 64, 160, 256]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='SimplifiedTransformer',
img_size=(1024,1024), #doesn't matter
in_chans=3,
num_classes=19,
embed_dims=embed_dims,
num_heads=[1, 2, 5, 8],
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.1,
depths=[2, 2, 2, 2],
sr_ratios=[8, 4, 2, 1]),
decode_head=dict(
type='DestHead',
in_channels=embed_dims,
in_index=[0, 1, 2, 3],
channels=256,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=True,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
# test_cfg=dict(mode='whole'))
test_cfg=dict(mode='slide', crop_size=(1024, 1024), stride=(768, 768)))
|
DL4AGX-master
|
DEST/semseg/dest_simpatt-b0_1024x1024_160k_cityscapes.py
|
# optimizer
optimizer = dict(type='AdamW', lr=0.0002, weight_decay=0.0001)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=0.0, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=160000)
checkpoint_config = dict(by_epoch=False, interval=4000)
evaluation = dict(interval=4000, metric='mIoU')
|
DL4AGX-master
|
DEST/semseg/schedule_160k_adamw.py
|
# _base_ = [
# '../_base_/models/dest_simplemit-b0.py',
# '../_base_/datasets/cityscapes_1024x1024.py',
# '../_base_/default_runtime.py',
# '../_base_/schedules/schedule_160k.py'
# ]
_base_ = [
'../_base_/models/dest_simpatt-b0.py',
'../_base_/datasets/cityscapes_1024x1024_repeat.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k_adamw.py'
]
evaluation = dict(interval=1000, metric='mIoU')
data = dict(samples_per_gpu=4)
checkpoint_config = dict(by_epoch=False, interval=20000)
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=1.)
}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
embed_dims = [64, 128, 250, 320]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='SimplifiedTransformer',
img_size=(1024, 1024),
in_chans=3,
num_classes=19,
embed_dims=embed_dims,
num_heads=[1, 2, 5, 8],
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.1,
depths=[3, 6, 8, 3],
sr_ratios=[8, 4, 2, 1]),
decode_head=dict(
type='DestHead',
in_channels=embed_dims,
in_index=[0, 1, 2, 3],
channels=512, #decoder param
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=True,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
# test_cfg=dict(mode='whole'))
test_cfg=dict(mode='slide', crop_size=(1024, 1024), stride=(768, 768)))
|
DL4AGX-master
|
DEST/semseg/dest_simpatt-b3_1024x1024_160k_cityscapes.py
|
# _base_ = [
# '../_base_/models/dest_simplemit-b0.py',
# '../_base_/datasets/cityscapes_1024x1024.py',
# '../_base_/default_runtime.py',
# '../_base_/schedules/schedule_160k.py'
# ]
_base_ = [
'../_base_/models/dest_simpatt-b0.py',
'../_base_/datasets/cityscapes_1024x1024_repeat.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k_adamw.py'
]
evaluation = dict(interval=1000, metric='mIoU')
data = dict(samples_per_gpu=3)
checkpoint_config = dict(by_epoch=False, interval=20000)
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=1.)
}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
embed_dims = [64, 128, 250, 320]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='SimplifiedTransformer',
img_size=(1024, 1024),
in_chans=3,
num_classes=19,
embed_dims=embed_dims,
num_heads=[1, 2, 5, 8],
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.1,
depths=[3, 10, 16, 5],
sr_ratios=[8, 4, 2, 1]),
decode_head=dict(
type='DestHead',
in_channels=embed_dims,
in_index=[0, 1, 2, 3],
channels=512, #decoder param
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=True,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
# test_cfg=dict(mode='whole'))
test_cfg=dict(mode='slide', crop_size=(1024, 1024), stride=(768, 768)))
|
DL4AGX-master
|
DEST/semseg/dest_simpatt-b5_1024x1024_160k_cityscapes.py
|
# model settings
embed_dims = [32, 64, 160, 256]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='SimplifiedTransformer',
img_size=(1024, 1024),
in_chans=3,
num_classes=19,
embed_dims=embed_dims,
num_heads=[1, 2, 5, 8],
mlp_ratios=[4, 4, 4, 4],
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.1,
depths=[2, 2, 2, 2],
sr_ratios=[8, 4, 2, 1]),
decode_head=dict(
type='DestHead',
in_channels=[32, 64, 160, 256],
in_index=[0, 1, 2, 3],
channels=256,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
# test_cfg=dict(mode='whole'))
test_cfg=dict(mode='slide', crop_size=(1024, 1024), stride=(768, 768)))
|
DL4AGX-master
|
DEST/semseg/dest_simpatt-b0.py
|
import torch
import torch.nn as nn
from mmseg.models.builder import HEADS
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
@HEADS.register_module()
class DestHead(BaseDecodeHead):
def __init__(self, segm=True, **kwargs):
super().__init__(input_transform='multiple_select', **kwargs)
num_inputs = len(self.in_channels)
assert num_inputs == len(self.in_channels)
self.fuse_conv1 = nn.Sequential(nn.Conv2d(self.in_channels[-1], self.in_channels[-1], 1), nn.ReLU(inplace=True))
self.fuse_conv2 = nn.Sequential(nn.Conv2d(self.in_channels[-2], self.in_channels[-2], 1), nn.ReLU(inplace=True))
self.fuse_conv3 = nn.Sequential(nn.Conv2d(self.in_channels[-3], self.in_channels[-3], 1), nn.ReLU(inplace=True))
self.fuse_conv4 = nn.Sequential(nn.Conv2d(self.in_channels[-4], self.in_channels[-4], 1), nn.ReLU(inplace=True))
self.upsample = nn.ModuleList([nn.Sequential(nn.Upsample(scale_factor=2, mode='nearest'))]*len(self.in_channels))
self.fused_1 = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(self.in_channels[-1], self.in_channels[-1], 3), nn.ReLU(inplace=True))
self.fused_2 = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(self.in_channels[-2] + self.in_channels[-1], self.in_channels[-2], 3), nn.ReLU(inplace=True))
self.fused_3 = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(self.in_channels[-3] + self.in_channels[-2], self.in_channels[-3], 3), nn.ReLU(inplace=True))
self.fused_4 = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(self.in_channels[-4] + self.in_channels[-3], self.in_channels[-4], 3), nn.ReLU(inplace=True))
self.conv_seg = nn.Conv2d(self.in_channels[-4], self.num_classes, kernel_size=1)
def dest_decoder(self, lay_out):
lay_out = lay_out[0]
fused_1 = self.fuse_conv1(lay_out[-1])
fused_1 = self.upsample[-1](fused_1)
fused_1 = self.fused_1(fused_1)
fused_2 = torch.cat([fused_1, self.fuse_conv2(lay_out[-2])], 1)
fused_2 = self.upsample[-2](fused_2)
fused_2 = self.fused_2(fused_2)
fused_3 = torch.cat([fused_2, self.fuse_conv3(lay_out[-3])], 1)
fused_3 = self.upsample[-3](fused_3)
fused_3 = self.fused_3(fused_3)
fused_4 = torch.cat([fused_3, self.fuse_conv4(lay_out[-4])], 1)
fused_4 = self.upsample[-4](fused_4)
fused_4 = self.fused_4(fused_4)
return self.conv_seg(fused_4)
def forward(self, x):
return self.dest_decoder(x)
|
DL4AGX-master
|
DEST/semseg/dest_head.py
|
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
import math
import torch
import torch.nn as nn
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from mmcv.cnn import build_norm_layer
from mmcv.runner import BaseModule
from ..builder import BACKBONES
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, drop=0., sync_norm=True):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv1d(in_features, hidden_features, 1)
self.dwconv = DWConv(hidden_features)
self.act = nn.ReLU()
self.fc2 = nn.Conv1d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
if sync_norm :
norm_cfg = dict(type='SyncBN', requires_grad=True)
self.norm1_name, norm1 = build_norm_layer(norm_cfg, hidden_features, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, hidden_features, postfix=2)
self.add_module(self.norm1_name, norm1)
self.add_module(self.norm2_name, norm2)
else :
self.norm1 = nn.BatchNorm1d(hidden_features)
self.norm2 = nn.BatchNorm1d(hidden_features)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv1d):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x, H, W):
x = self.fc1(x)
x = self.norm1(x)
x = self.dwconv(x, H, W)
x = self.norm2(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention_MaxPool(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1, sync_norm=True):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Conv1d(dim, dim, 1, bias=qkv_bias)
self.k = nn.Conv1d(dim, dim, 1, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv1d(dim, dim, 1)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
if sync_norm :
norm_cfg = dict(type='SyncBN', requires_grad=True)
self.norm1_name, norm1 = build_norm_layer(norm_cfg, dim, postfix=1)
self.add_module(self.norm1_name, norm1)
else :
self.norm1 = nn.BatchNorm1d(dim)
self.apply(self._init_weights)
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv1d):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x, H, W):
B, C, N = x.shape
q = self.q(x)
q = q.reshape(B, self.num_heads, C // self.num_heads, N)
q = q.permute(0, 1, 3, 2)
if self.sr_ratio > 1:
x_ = x.reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1)
x_ = self.norm1(x_)
k = self.k(x_).reshape(B, self.num_heads, C // self.num_heads, -1)
else:
k = self.k(x).reshape(B, self.num_heads, C // self.num_heads, -1)
v = torch.mean(x, 2, True).repeat(1, 1, self.num_heads).transpose(-2, -1)
attn = (q @ k) * self.scale
attn, _ = torch.max(attn, -1)
out = (attn.transpose(-2, -1) @ v)
out = out.transpose(-2, -1)
out = self.proj(out)
return out
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.ReLU, norm_layer=nn.LayerNorm, sr_ratio=1, sync_norm=True):
super().__init__()
if sync_norm :
norm_cfg = dict(type='SyncBN', requires_grad=True)
self.norm1_name, norm1 = build_norm_layer(norm_cfg, dim, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, dim, postfix=2)
self.add_module(self.norm1_name, norm1)
self.add_module(self.norm2_name, norm2)
else :
self.norm1 = nn.BatchNorm1d(dim)
self.norm2 = nn.BatchNorm1d(dim)
self.attn = Attention_MaxPool(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
return x
class OverlapPatchEmbed(BaseModule):
""" Image to Patch Embedding """
def __init__(self, img_size=(224,224), patch_size=7, stride=4, in_chans=3, embed_dim=768, sync_norm=True):
super().__init__()
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = img_size[0] // patch_size[0] * img_size[1] // patch_size[1]
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2))
self.H = (img_size[0] - patch_size[0] + 2 * (patch_size[0] // 2)) / stride + 1
self.W = (img_size[1] - patch_size[1] + 2 * (patch_size[1] // 2)) / stride + 1
self.feat_shape = (int(self.H), int(self.W))
self.N = int(self.feat_shape[0] * self.feat_shape[1])
if sync_norm:
norm_cfg = dict(type='SyncBN', requires_grad=True)
self.norm1_name, norm1 = build_norm_layer(norm_cfg, embed_dim, postfix=1)
self.add_module(self.norm1_name, norm1)
else :
self.norm1 = nn.BatchNorm2d(embed_dim)
self.apply(self._init_weights)
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.proj(x)
_, _, H, W = x.shape
x = self.norm1(x)
x = x.flatten(2)
return x, H, W
@BACKBONES.register_module()
class SimplifiedTransformer(nn.Module):
def __init__(self, img_size=(224,224), patch_size=16, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512],
num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], sync_norm=True
):
super().__init__()
self.num_classes = num_classes
self.depths = depths
self.embed_dims = embed_dims
self.sr_ratios = sr_ratios
self.num_layers = depths
# patch_embed
self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans,
embed_dim=embed_dims[0], sync_norm=sync_norm)
self.patch_embed2 = OverlapPatchEmbed(img_size=(img_size[0] // 4, img_size[1] // 4), patch_size=3, stride=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1], sync_norm=sync_norm)
self.patch_embed3 = OverlapPatchEmbed(img_size=(img_size[0] // 8, img_size[1] // 8), patch_size=3, stride=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2], sync_norm=sync_norm)
self.patch_embed4 = OverlapPatchEmbed(img_size=(img_size[0] // 16, img_size[1] // 16), patch_size=3, stride=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3], sync_norm=sync_norm)
# encoder
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
self.block1 = nn.ModuleList([Block(
dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0], sync_norm=sync_norm)
for i in range(depths[0])])
cur += depths[0]
self.block2 = nn.ModuleList([Block(
dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1], sync_norm=sync_norm)
for i in range(depths[1])])
cur += depths[1]
self.block3 = nn.ModuleList([Block(
dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2], sync_norm=sync_norm)
for i in range(depths[2])])
cur += depths[2]
self.block4 = nn.ModuleList([Block(
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[3], sync_norm=sync_norm)
for i in range(depths[3])])
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
outs = []
ref_feat = {'1': [], '2': [], '3': [], '4': [],}
# stage 1
x, H, W = self.patch_embed1(x)
for i, blk in enumerate(self.block1):
x = blk(x, H, W)
ref_feat['1'].append(x)
x = x.reshape(B, -1, H, W).contiguous()
outs.append(x)
# stage 2
x, H, W = self.patch_embed2(x)
for i, blk in enumerate(self.block2):
x = blk(x, H, W)
ref_feat['2'].append(x)
x = x.reshape(B, -1, H, W).contiguous()
outs.append(x)
# stage 3
x, H, W = self.patch_embed3(x)
for i, blk in enumerate(self.block3):
x = blk(x, H, W)
ref_feat['3'].append(x)
x = x.reshape(B, -1, H, W).contiguous()
outs.append(x)
# stage 4
x, H, W = self.patch_embed4(x)
for i, blk in enumerate(self.block4):
x = blk(x, H, W)
ref_feat['4'].append(x)
x = x.reshape(B, -1, H, W).contiguous()
outs.append(x)
return outs, ref_feat
def forward(self, x):
x, ref_feat = self.forward_features(x)
return x, ref_feat
class DWConv(nn.Module):
def __init__(self, dim=768):
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, x, H, W):
B, C, N = x.shape
x = x.reshape(B, C, H, W)
x = self.dwconv(x)
x = x.flatten(2)
return x
|
DL4AGX-master
|
DEST/semseg/simplified_attention_mmseg.py
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (1024, 1024)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=500,
dataset=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/train',
ann_dir='gtFine/train',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/val',
ann_dir='gtFine/val',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='leftImg8bit/val',
ann_dir='gtFine/val',
pipeline=test_pipeline))
|
DL4AGX-master
|
DEST/semseg/cityscapes_1024x1024_repeat.py
|
# _base_ = [
# '../_base_/models/dest_simplemit-b0.py',
# '../_base_/datasets/cityscapes_1024x1024.py',
# '../_base_/default_runtime.py',
# '../_base_/schedules/schedule_160k.py'
# ]
_base_ = [
'../_base_/models/dest_simpatt-b0.py',
'../_base_/datasets/cityscapes_1024x1024_repeat.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k_adamw.py'
]
evaluation = dict(interval=1000, metric='mIoU')
data = dict(samples_per_gpu=4)
checkpoint_config = dict(by_epoch=False, interval=20000)
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=1.)
}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
embed_dims = [64, 128, 250, 320]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='SimplifiedTransformer',
img_size=(1024, 1024),
in_chans=3,
num_classes=19,
embed_dims=embed_dims,
num_heads=[1, 2, 5, 8],
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.1,
depths=[3, 6, 8, 3],
sr_ratios=[8, 4, 2, 1]),
decode_head=dict(
type='DestHead',
in_channels=embed_dims,
in_index=[0, 1, 2, 3],
channels=512, #decoder param
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=True,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
# test_cfg=dict(mode='whole'))
test_cfg=dict(mode='slide', crop_size=(1024, 1024), stride=(768, 768)))
|
DL4AGX-master
|
DEST/semseg/dest_simpatt-b4_1024x1024_160k_cityscapes.py
|
# _base_ = [
# '../_base_/models/dest_simplemit-b0.py',
# '../_base_/datasets/cityscapes_1024x1024.py',
# '../_base_/default_runtime.py',
# '../_base_/schedules/schedule_160k.py'
# ]
_base_ = [
'../_base_/models/dest_simpatt-b0.py',
'../_base_/datasets/cityscapes_1024x1024_repeat.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k_adamw.py'
]
evaluation = dict(interval=4000, metric='mIoU')
# data = dict(samples_per_gpu=1)
checkpoint_config = dict(by_epoch=False, interval=20000)
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=1.)
}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
embed_dims = [64, 128, 250, 320]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='SimplifiedTransformer',
img_size=(1024,1024), #doesn't matter
in_chans=3,
num_classes=19,
embed_dims=embed_dims,
num_heads=[1, 2, 5, 8],
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.1,
depths=[3, 3, 6, 3],
sr_ratios=[8, 4, 2, 1]),
decode_head=dict(
type='DestHead',
in_channels=embed_dims,
in_index=[0, 1, 2, 3],
channels=256,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=True,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
# test_cfg=dict(mode='whole'))
test_cfg=dict(mode='slide', crop_size=(1024, 1024), stride=(768, 768)))
|
DL4AGX-master
|
DEST/semseg/dest_simpatt-b2_1024x1024_160k_cityscapes.py
|
# _base_ = [
# '../_base_/models/dest_simplemit-b0.py',
# '../_base_/datasets/cityscapes_1024x1024.py',
# '../_base_/default_runtime.py',
# '../_base_/schedules/schedule_160k.py'
# ]
_base_ = [
'../_base_/models/dest_simpatt-b0.py',
'../_base_/datasets/cityscapes_1024x1024_repeat.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k_adamw.py'
]
evaluation = dict(interval=4000, metric='mIoU')
# data = dict(samples_per_gpu=1)
checkpoint_config = dict(by_epoch=False, interval=20000)
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=1.)
}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
embed_dims = [64, 128, 250, 320]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='SimplifiedTransformer',
img_size=(1024,1024), #doesn't matter
in_chans=3,
num_classes=19,
embed_dims=embed_dims,
num_heads=[1, 2, 5, 8],
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.1,
depths=[2, 2, 2, 2],
sr_ratios=[8, 4, 2, 1]),
decode_head=dict(
type='DestHead',
in_channels=embed_dims,
in_index=[0, 1, 2, 3],
channels=256,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=True,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
# test_cfg=dict(mode='whole'))
test_cfg=dict(mode='slide', crop_size=(1024, 1024), stride=(768, 768)))
|
DL4AGX-master
|
DEST/semseg/dest_simpatt-b1_1024x1024_160k_cityscapes.py
|
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
import torch
from packnet_sfm.models.SfmModel_dest import SfmModel_dest
from packnet_sfm.losses.multiview_photometric_loss import MultiViewPhotometricLoss
from packnet_sfm.models.model_utils import merge_outputs
class SelfSupModel_dest(SfmModel_dest):
"""
Model that inherits a depth and pose network from SfmModel and
includes the photometric loss for self-supervised training.
Parameters
----------
kwargs : dict
Extra parameters
"""
def __init__(self, **kwargs):
# Initializes SfmModel
super().__init__(**kwargs)
# Initializes the photometric loss
self._photometric_loss = MultiViewPhotometricLoss(**kwargs)
@property
def logs(self):
"""Return logs."""
return {
**super().logs,
**self._photometric_loss.logs
}
def self_supervised_loss(self, image, ref_images, inv_depths, poses,
intrinsics, return_logs=False, progress=0.0):
"""
Calculates the self-supervised photometric loss.
Parameters
----------
image : torch.Tensor [B,3,H,W]
Original image
ref_images : list of torch.Tensor [B,3,H,W]
Reference images from context
inv_depths : torch.Tensor [B,1,H,W]
Predicted inverse depth maps from the original image
poses : list of Pose
List containing predicted poses between original and context images
intrinsics : torch.Tensor [B,3,3]
Camera intrinsics
return_logs : bool
True if logs are stored
progress :
Training progress percentage
Returns
-------
output : dict
Dictionary containing a "loss" scalar a "metrics" dictionary
"""
return self._photometric_loss(
image, ref_images, inv_depths, intrinsics, intrinsics, poses,
return_logs=return_logs, progress=progress)
def forward(self, batch, return_logs=False, progress=0.0):
"""
Processes a batch.
Parameters
----------
batch : dict
Input batch
return_logs : bool
True if logs are stored
progress :
Training progress percentage
Returns
-------
output : dict
Dictionary containing a "loss" scalar and different metrics and predictions
for logging and downstream usage.
"""
# Calculate predicted depth and pose output
output = super().forward(batch, return_logs=return_logs)
if not self.training:
# If not training, no need for self-supervised loss
return output
else:
# Otherwise, calculate self-supervised loss
self_sup_output = self.self_supervised_loss(
batch['rgb_original'], batch['rgb_context_original'],
output['inv_depths'], output['poses'], batch['intrinsics'],
return_logs=return_logs, progress=progress)
# Return loss and metrics
return {
'loss': self_sup_output['loss'],
**merge_outputs(output, self_sup_output),
}
|
DL4AGX-master
|
DEST/models/SelfSupModel_dest.py
|
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
import torch
from packnet_sfm.models.SelfSupModel_dest import SfmModel_dest, SelfSupModel_dest
from packnet_sfm.losses.supervised_loss import SupervisedLoss
from packnet_sfm.models.model_utils import merge_outputs
from packnet_sfm.utils.depth import depth2inv
class SemiSupModel_dest(SelfSupModel_dest):
"""
Model that inherits a depth and pose networks, plus the self-supervised loss from
SelfSupModel and includes a supervised loss for semi-supervision.
Parameters
----------
supervised_loss_weight : float
Weight for the supervised loss
kwargs : dict
Extra parameters
"""
def __init__(self, supervised_loss_weight=0.9, **kwargs):
# Initializes SelfSupModel
super().__init__(**kwargs)
# If supervision weight is 0.0, use SelfSupModel directly
assert 0. < supervised_loss_weight <= 1., "Model requires (0, 1] supervision"
# Store weight and initializes supervised loss
self.supervised_loss_weight = supervised_loss_weight
self._supervised_loss = SupervisedLoss(**kwargs)
# Pose network is only required if there is self-supervision
if self.supervised_loss_weight == 1:
self._network_requirements.remove('pose_net')
# GT depth is only required if there is supervision
if self.supervised_loss_weight > 0:
self._train_requirements.append('gt_depth')
@property
def logs(self):
"""Return logs."""
return {
**super().logs,
**self._supervised_loss.logs
}
def supervised_loss(self, inv_depths, gt_inv_depths,
return_logs=False, progress=0.0):
"""
Calculates the supervised loss.
Parameters
----------
inv_depths : torch.Tensor [B,1,H,W]
Predicted inverse depth maps from the original image
gt_inv_depths : torch.Tensor [B,1,H,W]
Ground-truth inverse depth maps from the original image
return_logs : bool
True if logs are stored
progress :
Training progress percentage
Returns
-------
output : dict
Dictionary containing a "loss" scalar a "metrics" dictionary
"""
return self._supervised_loss(
inv_depths, gt_inv_depths,
return_logs=return_logs, progress=progress)
def forward(self, batch, return_logs=False, progress=0.0):
"""
Processes a batch.
Parameters
----------
batch : dict
Input batch
return_logs : bool
True if logs are stored
progress :
Training progress percentage
Returns
-------
output : dict
Dictionary containing a "loss" scalar and different metrics and predictions
for logging and downstream usage.
"""
if not self.training:
# If not training, no need for self-supervised loss
return SfmModel_deptr.forward(self, batch)
else:
if self.supervised_loss_weight == 1.:
# If no self-supervision, no need to calculate loss
self_sup_output = SfmModel_deptr.forward(self, batch)
loss = torch.tensor([0.]).type_as(batch['rgb'])
else:
# Otherwise, calculate and weight self-supervised loss
self_sup_output = SelfSupModel_deptr.forward(self, batch)
loss = (1.0 - self.supervised_loss_weight) * self_sup_output['loss']
# Calculate and weight supervised loss
sup_output = self.supervised_loss(
self_sup_output['inv_depths'], depth2inv(batch['depth']),
return_logs=return_logs, progress=progress)
loss += self.supervised_loss_weight * sup_output['loss']
# Merge and return outputs
return {
'loss': loss,
**merge_outputs(self_sup_output, sup_output),
}
|
DL4AGX-master
|
DEST/models/SemiSupModel_dest.py
|
DL4AGX-master
|
DEST/models/__init__.py
|
|
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
import random
import torch.nn as nn
from packnet_sfm.geometry.pose import Pose
from packnet_sfm.models.base_model import BaseModel
from packnet_sfm.models.model_utils import flip_batch_input, flip_output, upsample_output
from packnet_sfm.utils.misc import filter_dict
class SfmModel_dest(BaseModel):
"""
Model class encapsulating a pose and depth networks.
Parameters
----------
depth_net : nn.Module
Depth network to be used
pose_net : nn.Module
Pose network to be used
rotation_mode : str
Rotation mode for the pose network
flip_lr_prob : float
Probability of flipping when using the depth network
upsample_depth_maps : bool
True if depth map scales are upsampled to highest resolution
kwargs : dict
Extra parameters
"""
def __init__(self, depth_net=None, pose_net=None,
rotation_mode='euler', flip_lr_prob=0.0,
upsample_depth_maps=False, **kwargs):
super().__init__()
self.depth_net = depth_net
self.pose_net = pose_net
self.rotation_mode = rotation_mode
self.flip_lr_prob = flip_lr_prob
self.upsample_depth_maps = upsample_depth_maps
self.mse_loss = nn.MSELoss(reduction='mean')
self._network_requirements = [
'depth_net',
'pose_net',
]
def add_depth_net(self, depth_net):
"""Add a depth network to the model"""
self.depth_net = depth_net
def add_pose_net(self, pose_net):
"""Add a pose network to the model"""
self.pose_net = pose_net
def depth_net_flipping(self, batch, flip):
"""
Runs depth net with the option of flipping
Parameters
----------
batch : dict
Input batch
flip : bool
True if the flip is happening
Returns
-------
output : dict
Dictionary with depth network output (e.g. 'inv_depths' and 'uncertainty')
"""
# Which keys are being passed to the depth network
batch_input = {key: batch[key] for key in filter_dict(batch, self._input_keys)}
if flip:
# Run depth network with flipped inputs
output = self.depth_net(**flip_batch_input(batch_input))
# Flip output back if training
output = flip_output(output)
else:
# Run depth network
output = self.depth_net(**batch_input)
return output
def compute_depth_net(self, batch, force_flip=False):
"""Computes inverse depth maps from single images"""
# Randomly flip and estimate inverse depth maps
flag_flip_lr = random.random() < self.flip_lr_prob if self.training else force_flip
output = self.depth_net_flipping(batch, flag_flip_lr)
# If upsampling depth maps at training time
if self.training and self.upsample_depth_maps:
output = upsample_output(output, mode='nearest', align_corners=None)
# Return inverse depth maps
return output
def compute_pose_net(self, image, contexts):
"""Compute poses from image and a sequence of context images"""
pose_vec = self.depth_net.pose(image, contexts)
# print('pose_vec.shape', pose_vec.size())
return [Pose.from_vec(pose_vec[:, i], self.rotation_mode)
for i in range(pose_vec.shape[1])]
def forward(self, batch, return_logs=False, force_flip=False):
"""
Processes a batch.
Parameters
----------
batch : dict
Input batch
return_logs : bool
True if logs are stored
force_flip : bool
If true, force batch flipping for inverse depth calculation
Returns
-------
output : dict
Dictionary containing the output of depth and pose networks
"""
# Generate inverse depth predictions
depth_output = self.compute_depth_net(batch, force_flip=force_flip)
# Generate pose predictions if available
pose_output = None
if 'rgb_context' in batch :
pose_output = self.compute_pose_net(batch['rgb'], batch['rgb_context'])
# Return output dictionary
return {
**depth_output,
'poses': pose_output,
}
|
DL4AGX-master
|
DEST/models/SfmModel_dest.py
|
DL4AGX-master
|
DEST/networks/__init__.py
|
|
import math
import torch
import torch.nn as nn
from packnet_sfm.networks.DEST.DEST_EncDec import DEST_Pose, SimpleTR_B0, SimpleTR_B1, SimpleTR_B2, SimpleTR_B3, SimpleTR_B4, SimpleTR_B5
class InvDepth(nn.Module):
"""Inverse depth layer"""
def __init__(self, in_channels, out_channels=1, min_depth=0.5):
"""
Initializes an InvDepth object.
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels
min_depth : float
Minimum depth value to calculate
"""
super().__init__()
self.min_depth = min_depth
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1)
self.pad = nn.ConstantPad2d([1] * 4, value=0)
self.activ = nn.Sigmoid()
self.scale = torch.tensor([self.min_depth])
def forward(self, x):
"""Runs the InvDepth layer."""
x = self.conv1(self.pad(x))
return self.activ(x) / self.min_depth
class DESTNet(nn.Module):
def __init__(self, model='B3', nb_ref_imgs=2, img_size=(192, 640)):
"""
Defines the size of DEST model
Parameters
----------
model : string
The size of DEST can be selected: 'B0' | 'B1' | 'B2' | 'B3' | 'B4' | 'B5'
nb_ref_imgs : int
Number of reference images for Pose-Net
img_size : tuple
Input image size (H, W)
"""
super().__init__()
self.nb_ref_imgs = nb_ref_imgs
self.connectivity = True
if model == 'B0':
self.num_out_ch, self.dest = SimpleTR_B0(img_size=img_size)
elif model == 'B1':
self.num_out_ch, self.dest = SimpleTR_B1(img_size=img_size)
elif model == 'B2':
self.num_out_ch, self.dest = SimpleTR_B2(img_size=img_size)
elif model == 'B3':
self.num_out_ch, self.dest = SimpleTR_B3(img_size=img_size)
elif model == 'B4':
self.num_out_ch, self.dest = SimpleTR_B4(img_size=img_size)
elif model == 'B5':
self.num_out_ch, self.dest = SimpleTR_B5(img_size=img_size)
self.disp1_layer = InvDepth(self.dest.dims[-4])
self.disp2_layer = InvDepth(self.dest.dims[-4])
self.disp3_layer = InvDepth(self.dest.dims[-3])
self.disp4_layer = InvDepth(self.dest.dims[-2])
num_out_ch, self.dest_pose = DEST_Pose(dims=self.dest.dest_encoder.embed_dims, channels=16,
num_layers=self.dest.dest_encoder.depths,
reduction_ratio=self.dest.dest_encoder.sr_ratios,
connectivity=self.connectivity)
self.pose_pred = nn.Sequential(nn.Conv2d(self.dest.dest_encoder.embed_dims[3], 6 * self.nb_ref_imgs, kernel_size=1, padding=0))
self.channel_reduction_pose = nn.Sequential(nn.Conv2d(9, 16, kernel_size=3, padding=0),
nn.BatchNorm2d(16),
nn.Tanh())
def measure_Complexity(self, input_size=(3, 192, 640), mode='Depth'):
input_shape = input_size
if mode == 'Depth':
model = Dummy_net_depth(self.dest, self.disp1_layer).eval()
macs, params = get_model_complexity_info(model.cpu(),
input_shape, as_strings=True,
print_per_layer_stat=False, verbose=False)
print('{:<30} {:<8}'.format('%sNet Computational complexity: ' % mode, macs))
print('{:<30} {:<8}'.format('%sNet Number of parameters: ' % mode, params))
def forward(self, rgb):
"""
Runs the network and returns inverse depth maps
(4 scales if training and 1 if not).
"""
out, _, self.ref_feat = self.dest(rgb)
x = self.disp1_layer(out[0])
if self.training:
x2 = self.disp2_layer(out[1])
x3 = self.disp3_layer(out[2])
x4 = self.disp4_layer(out[3])
if self.training:
return {'inv_depths': [x, x2, x3, x4]}
else:
return {'inv_depths': x }
def pose(self, image, context):
assert (len(context) == self.nb_ref_imgs)
input_ = [image]
input_.extend(context)
input_ = torch.cat(input_, 1)
return self._poseNet(input_)
def _poseNet(self, x_src):
x_src = self.channel_reduction_pose(x_src)
if self.connectivity:
x = self.dest_pose(self.ref_feat, x_src)
else :
x = self.dest_pose(x_src)
pose = self.pose_pred(x)
pose = pose.mean(3).mean(2)
pose = 0.01 * pose.view(pose.size(0), self.nb_ref_imgs, 6)
return pose
def reshape(self, ref_featss, b_):
for i, e_dim in enumerate(self.dest.dest_encoder.embed_dims):
ref_featss[i] = ref_featss[i].reshape(b_, -1, e_dim).repeat((1+self.nb_ref_imgs), 1, 1)
return ref_featss
|
DL4AGX-master
|
DEST/networks/DESTNet.py
|
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
import math
import torch
import torch.nn as nn
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv1d(in_features, hidden_features, 1)
self.dwconv = DWConv(hidden_features)
self.act = nn.ReLU()
self.fc2 = nn.Conv1d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.norm1 = nn.BatchNorm1d(hidden_features)
self.norm2 = nn.BatchNorm1d(hidden_features)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv1d):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x, H, W):
x = self.fc1(x)
x = self.norm1(x)
x = self.dwconv(x, H, W)
x = self.norm2(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention_MaxPool(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Conv1d(dim, dim, 1, bias=qkv_bias)
self.k = nn.Conv1d(dim, dim, 1, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv1d(dim, dim, 1)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.BatchNorm1d(dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv1d):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x, H, W):
B, C, N = x.shape
q = self.q(x)
q = q.reshape(B, self.num_heads, C // self.num_heads, N)
q = q.permute(0, 1, 3, 2)
if self.sr_ratio > 1:
x_ = x.reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1)
x_ = self.norm(x_)
k = self.k(x_).reshape(B, self.num_heads, C // self.num_heads, -1)
else:
k = self.k(x).reshape(B, self.num_heads, C // self.num_heads, -1)
v = torch.mean(x, 2, True).repeat(1, 1, self.num_heads).transpose(-2, -1)
attn = (q @ k) * self.scale
attn, _ = torch.max(attn, -1)
out = (attn.transpose(-2, -1) @ v)
out = out.transpose(-2, -1)
out = self.proj(out)
return out
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.ReLU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = nn.BatchNorm1d(dim)
self.norm2 = nn.BatchNorm1d(dim)
self.attn = Attention_MaxPool(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
return x
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=(224,224), patch_size=7, stride=4, in_chans=3, embed_dim=768):
super().__init__()
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = img_size[0] // patch_size[0] * img_size[1] // patch_size[1]
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2))
self.norm = nn.BatchNorm2d(embed_dim)
self.H = (img_size[0] - patch_size[0] + 2 * (patch_size[0] // 2)) / stride + 1
self.W = (img_size[1] - patch_size[1] + 2 * (patch_size[1] // 2)) / stride + 1
self.feat_shape = (int(self.H), int(self.W))
self.N = int(self.feat_shape[0] * self.feat_shape[1])
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.proj(x)
_, _, H, W = x.shape
x = self.norm(x)
x = x.flatten(2)
return x, H, W
class SimplifiedTransformer(nn.Module):
def __init__(self, img_size=(224,224), patch_size=16, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512],
num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]
):
super().__init__()
self.num_classes = num_classes
self.depths = depths
self.embed_dims = embed_dims
self.sr_ratios = sr_ratios
self.num_layers = depths
# patch_embed
self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans,
embed_dim=embed_dims[0])
self.patch_embed2 = OverlapPatchEmbed(img_size=(img_size[0] // 4, img_size[1] // 4), patch_size=3, stride=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1])
self.patch_embed3 = OverlapPatchEmbed(img_size=(img_size[0] // 8, img_size[1] // 8), patch_size=3, stride=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2])
self.patch_embed4 = OverlapPatchEmbed(img_size=(img_size[0] // 16, img_size[1] // 16), patch_size=3, stride=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3])
# encoder
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
self.block1 = nn.ModuleList([Block(
dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0])
for i in range(depths[0])])
cur += depths[0]
self.block2 = nn.ModuleList([Block(
dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1])
for i in range(depths[1])])
cur += depths[1]
self.block3 = nn.ModuleList([Block(
dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2])
for i in range(depths[2])])
cur += depths[2]
self.block4 = nn.ModuleList([Block(
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[3])
for i in range(depths[3])])
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
outs = []
ref_feat = {'1': [], '2': [], '3': [], '4': [],}
# stage 1
x, H, W = self.patch_embed1(x)
for i, blk in enumerate(self.block1):
x = blk(x, H, W)
ref_feat['1'].append(x)
x = x.reshape(B, -1, H, W).contiguous()
outs.append(x)
# stage 2
x, H, W = self.patch_embed2(x)
for i, blk in enumerate(self.block2):
x = blk(x, H, W)
ref_feat['2'].append(x)
x = x.reshape(B, -1, H, W).contiguous()
outs.append(x)
# stage 3
x, H, W = self.patch_embed3(x)
for i, blk in enumerate(self.block3):
x = blk(x, H, W)
ref_feat['3'].append(x)
x = x.reshape(B, -1, H, W).contiguous()
outs.append(x)
# stage 4
x, H, W = self.patch_embed4(x)
for i, blk in enumerate(self.block4):
x = blk(x, H, W)
ref_feat['4'].append(x)
x = x.reshape(B, -1, H, W).contiguous()
outs.append(x)
return outs, ref_feat
def forward(self, x):
x, ref_feat = self.forward_features(x)
return x, ref_feat
class DWConv(nn.Module):
def __init__(self, dim=768):
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, x, H, W):
B, C, N = x.shape
x = x.reshape(B, C, H, W)
x = self.dwconv(x)
x = x.flatten(2)
return x
|
DL4AGX-master
|
DEST/networks/DEST/simplified_attention.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.