python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from __future__ import absolute_import
import time
import os
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class LinkLockFile(LockBase):
"""Lock access to a file using atomic property of link(2).
>>> lock = LinkLockFile('somefile')
>>> lock = LinkLockFile('somefile', threaded=False)
"""
def acquire(self, timeout=None):
try:
open(self.unique_name, "wb").close()
except IOError:
raise LockFailed("failed to create %s" % self.unique_name)
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a hard link to it.
try:
os.link(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
nlinks = os.stat(self.unique_name).st_nlink
if nlinks == 2:
# The original link plus the one I created == 2. We're
# good to go.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
os.unlink(self.unique_name)
if timeout > 0:
raise LockTimeout
else:
raise AlreadyLocked
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked
elif not os.path.exists(self.unique_name):
raise NotMyLock
os.unlink(self.unique_name)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
def break_lock(self):
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
|
DeepBind-master
|
code/libs/deepity/deepity/_lockfile/linklockfile.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from __future__ import absolute_import, division
import time
import os
import sys
import errno
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class MkdirLockFile(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True):
"""
>>> lock = MkdirLockFile('somefile')
>>> lock = MkdirLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded)
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
self.tname,
self.pid))
def acquire(self, timeout=None):
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout
else:
# Someone else has the lock.
raise AlreadyLocked
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed("failed to create %s" % self.lock_file)
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked
elif not os.path.exists(self.unique_name):
raise NotMyLock
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
|
DeepBind-master
|
code/libs/deepity/deepity/_lockfile/mkdirlockfile.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
# -*- coding: utf-8 -*-
# pidlockfile.py
#
# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
import os
import sys
import errno
import time
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
LockTimeout)
class PIDLockFile(LockBase):
""" Lockfile implemented as a Unix PID file.
The lock file is a normal file named by the attribute `path`.
A lock's PID file contains a single line of text, containing
the process ID (PID) of the process that acquired the lock.
>>> lock = PIDLockFile('somefile')
>>> lock = PIDLockFile('somefile')
"""
def __init__(self, path, threaded=False):
# pid lockfiles don't support threaded operation, so always force
# False as the threaded arg.
LockBase.__init__(self, path, False)
dirname = os.path.dirname(self.lock_file)
basename = os.path.split(self.path)[-1]
self.unique_name = self.path
def read_pid(self):
""" Get the PID from the lock file.
"""
return read_pid_from_pidfile(self.path)
def is_locked(self):
""" Test if the lock is currently held.
The lock is held if the PID file for this lock exists.
"""
return os.path.exists(self.path)
def i_am_locking(self):
""" Test if the lock is held by the current process.
Returns ``True`` if the current process ID matches the
number stored in the PID file.
"""
return self.is_locked() and os.getpid() == self.read_pid()
def acquire(self, timeout=None):
""" Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
"""
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
try:
write_pid_to_pidfile(self.path)
except OSError, exc:
if exc.errno == errno.EEXIST:
# The lock creation failed. Maybe sleep a bit.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout
else:
raise AlreadyLocked
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
raise LockFailed
else:
return
def release(self):
""" Release the lock.
Removes the PID file to release the lock, or raises an
error if the current process does not hold the lock.
"""
if not self.is_locked():
raise NotLocked
if not self.i_am_locking():
raise NotMyLock
remove_existing_pidfile(self.path)
def break_lock(self):
""" Break an existing lock.
Removes the PID file if it already exists, otherwise does
nothing.
"""
remove_existing_pidfile(self.path)
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
"""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = 0644
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
line = "%(pid)d\n" % vars()
pidfile.write(line)
pidfile.close()
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError, exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
|
DeepBind-master
|
code/libs/deepity/deepity/_lockfile/pidlockfile.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from .. import globals
from ..node import supernode
class chain(supernode):
"""
A chain of nodes, automatically connected in sequence, like a neural net.
Any remaining unconnected plug gets exported to the 'surface' of this supernode.
"""
def __init__(self,children,name=None):
# First connect together each node in a chain, so that the supernode
# only exposes the remaining (unconnected) plugs.
for prev,next in zip(children[:-1],children[1:]):
prev >> next
super(chain,self).__init__(children,name)
|
DeepBind-master
|
code/libs/deepity/deepity/std/chain.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from elemwise import *
from softmax import *
from loss import *
from full import *
from chain import *
from trainable import *
|
DeepBind-master
|
code/libs/deepity/deepity/std/__init__.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import numpy as np
from ..node import node
from .. import globals
from .. import _ext
from smat import *
class full(node):
"""
A fully-connected affine function Z = dot(X,W).
Also outputs a cost value cost = decay*sum(abs(W))
ishape is used to visualize weights as if entering this layer (e.g. set to (28,28) for layer connected to MNIST inputs)
oshape is used to visualize weights as if exiting this layer (e.g. set to (28,28) for layer connected to auto-encoder output on MNIST)
"""
def __init__(self, num_units, weight_decay=None, init_scale=None, init_bias=None, ishape=None, oshape=None):
super(full,self).__init__(["X","W"],["Z","cost"])
self.size = num_units
self.init = init_scale if init_scale is not None else 0.001
self.init_mu = init_bias if init_bias is not None else 0.0
self.decay = weight_decay
self.ishape = ishape
self.oshape = oshape
self.zero_cost = None
self.W.inst_axis = 0 # the ROWS of W grow when there are multiple instances
def getfilters(self):
if not self.W.shape:
return None
if self.ishape:
F = self.W.fpval.asnumpy().T
F = np.require(F,requirements=['C'])
F = F.reshape((self.size,) + self.ishape)
return F
if self.oshape:
F = self.W.fpval.asnumpy()
F = np.require(F,requirements=['C'])
F = F.reshape((self.W.shape[0],) + self.oshape)
return F
def _slice_inst(self,i):
if self.W.shape:
self.W.fpval = self.W.fpval[i*self.W.shape[0]:(i+1)*self.W.shape[0],:].copy()
def _fprop(self,X,W):
if X is None:
return (None,0)
Z = _ext.blockwise_dot(X, W, self.ninst)
cost = self._fprop_cost(W)
return (Z,cost)
def _bprop(self, X, W, dZ):
dX = _ext.blockwise_dot_nt(dZ, W, self.ninst) if (self.X.has_upstream() or ("want_bprop_inputs" in globals.flags)) else None
dW = _ext.blockwise_dot_tn(X, dZ, self.ninst, W) if (self.W.has_upstream() or ("want_bprop_inputs" in globals.flags)) else None
self._bprop_cost(W, dW)
return (dX,dW)
def _fprop_cost(self,W):
# Now compute 'cost', only used when evaluating cost function.
# If we're in the middle of a gradient computation, then
# we don't need cost to be forward propagated. However, if
# we're doing a feed-forward test mode computation, then
# we do need a cost to be fprop'd
if isinstance(self.decay,np.ndarray):
self.decay = asarray(self.decay,dtype=W.dtype) if self.decay.size > 1 else np.asscalar(self.decay)
if self.zero_cost is None:
self.zero_cost = zeros((1,self.ninst))
cost = self.zero_cost
if (self.decay is not None) and ("bprop_mode" not in globals.flags) and (globals.flags.get("weight_decay_start",0) <= globals.flags.get("step",0)):
if self.ninst == 1:
cost = sum(abs(W)) * self.decay
else:
C = W.reshape((self.ninst,-1)) # Put each separate weight matrix into its own row.
C = sum(abs(C),axis=1) # Sum the absolute values across each row.
cost = C.T*self.decay # Turn into row vector of costs, weighted by decay coefficient.
return cost
def _bprop_cost(self, W, dW):
# Backprop weight decay to dW, if any
if (self.decay is not None) and (globals.flags.get("weight_decay_start",0) <= globals.flags.get("step",0)):
if self.ninst == 1:
if dW is not None:
dW += float(self.decay)*sign(W)
else:
# Add a separate decay for each instance
_ext.madd_bcast(sign(W),self.decay,W.size/self.ninst,dW)
def _calc_shapes(self,X,W,Z):
# First make sure (X.ncol) = (W.nrow)
if X._shape and W._shape: assert X._shape[1] == W._shape[0]
elif X._shape and not W._shape: W._shape = (X._shape[1],self.size) # W is (num inputs x num outputs)
elif W._shape and not X._shape: X._shape = (None,W._shape[0])
# Output dimension is determined by 'size' of this node (number of hidden units)
Z._shape = (None,self.size)
class combine(full):
"""
Effectively splices the input matrices X0...Xk and then implements
a fully-connected layer between those stacked matrices.
However, the matrices are never explicitly stacked, and instead the
matrix multiple is broken down into blocks, so all operations are
in-place without the extra copying/temporary memory.
"""
def __init__(self, num_inputs, size, decay=None, init=None, init_mu=None, start_training=None, ishape=None, oshape=None):
# Create input attributes X0..Xk for k=num_sources-1
super(full,self).__init__(["X%d"%i for i in range(num_inputs)] + ["W"],["Z","cost"])
self.num_inputs = num_inputs
self.size = size
self.init = init if init is not None else 0.001
self.init_mu = init_mu if init_mu is not None else 0.0
self.decay = decay
self.start_training = start_training
self.ishape = ishape
self.oshape = oshape
self.zero_cost = None
self.Xsizes = None
self.W.inst_axis = 0 # the ROWS of W grow when there are multiple instances
def _fprop(self, W):
X = [p.fpval for p in self.iplugs[:self.num_inputs]]
Xbroadcast = [self.ninst > 1 and (X[t].shape[1] == self.iplugs[t].shape[1] if X[t] is not None else True) for t in range(len(X))]
Z = _ext.blockwise_dot_combined(X, W, self.ninst, Xbroadcast)
cost = self._fprop_cost(W)
return (Z,cost)
def _bprop(self, W, dZ):
# TODO: avoid backprop to a specific Xi plug if it has no trainable nodes connected upstream
X = [p.fpval for p in self.iplugs[:self.num_inputs]]
Xbroadcast = [self.ninst > 1 and (X[t].shape[1] == self.iplugs[t].shape[1] if X[t] is not None else True) for t in range(len(X))]
# If we aren't allowed to start training a particular component, then kill its gradient for the time being.
dWmask = None
if self.start_training:
assert len(X) == len(self.start_training)
dWmask = [start < globals.flags.get("step",0) for start in self.start_training]
dX = _ext.blockwise_dot_nt_combined(X, dZ, W, self.ninst, Xbroadcast)
dW = _ext.blockwise_dot_tn_combined(X, dZ, W, self.ninst, Xbroadcast, dWmask)
self._bprop_cost(W, dW)
return tuple(dX) + (dW,)
def _calc_shapes(self,W,Z):
# First make sure sum([X.ncol for X in iplugs]) = (W.nrow)
Xsize_total = 0
for X in self.iplugs[:self.num_inputs]:
assert X.srcs, "%s has no input plugs; cannot determine shape" % X.name
X.srcs[0]._calc_shape([self]) # Calculate upstream shapes without recursively visiting ourselves by mistake
X._shape = X.srcs[0]._shape
#assert X._shape, "%s's shape was not defined; combine layer cannot backward-propagate shape to its inputs." % X.name
if not X._shape:
X._shape = (None,0) # Treat it as an empty matrix
Xsize_total += X._shape[1]
if W._shape: assert Xsize_total == W._shape[0]
else: W._shape = (Xsize_total,self.size) # W is (num inputs x num outputs)
# Output dimension is determined by 'size' of this node (number of hidden units)
Z._shape = (None,self.size)
|
DeepBind-master
|
code/libs/deepity/deepity/std/full.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from ..node import node
from smat import *
from .. import _ext
from . import globals
class loss_node(node):
"""Calculate a loss function applied to several instances."""
def __init__(self, ngroup=1):
super(loss_node,self).__init__(["Z","Y","Ymask"],["loss","Zmask"]) # Zmask is used to kill a certain prediction's contribution to the gradient, e.g. if reverse complement mode is enabled then half the rows will be disabled. It is a computed value, an output.
self.ngroup = ngroup
self.Y.trainable = False
self.Ymask.trainable = False
self.batchmean = True
def _calc_loss(self, Z, Y):
raise NotImplementedError()
def _fprop(self, Z, Y, Ymask):
Zmask = self._calc_Zmask(Z)
L = self._calc_loss(Z,Y)
if Ymask is not None:
#ratio = asarray(sum(M,axis=0),dtype=float32) / M.shape[0]
#print ratio
_ext.maskout(Ymask,L)
if Zmask is not None:
_ext.maskout(Zmask,L)
L = mean(L,axis=0) # Mean loss along each column
if self.ngroup == 1:
# Output a single overall mean loss associated with each instance.
# Coming into this if-statement, loss matrix L will be a concatenation
# of ninst row vectors:
# [ z_1, ..., z_k ]
# where each z_k is a row vector of outputs for model instance k.
# So, we need to reshape it to be
# [ z_1 ;
# ...
# z_k ]
# then take the sum along columns to get a column vector, and then
# transpose that back into a row vector with 'ninst' values.
assert L.shape[1]//self.ninst == self.Y.shape[1]
L = L.reshape((-1,self.Y.shape[1]))
L = self._scale_loss(sum(L,axis=1).reshape((1,-1))) # Mean of (sum of output component losses) over all cases in minibatch
return (L, Zmask)
raise NotImplementedError("Not finished")
# Multiple-output case 1: one column per output
m = Z.ncol
if m == self.ngroup:
return (self._scale_loss(L), Zmask) # Just return current row-vector of errors
# Multiple-output case 2: several columns per output
L = L.reshape((-1,m/self.ngroup)) # Make enough columns for just this group
L = sum(L,axis=1) # Calculate sum in those columns
L = L.reshape((1,-1)) # Convert back to row-vector.
L = self._scale_loss(L)/self.ninst
return (L, Zmask)
def _bprop(self, Z, Y, Ymask):
if self.ngroup == 1:
self.Zmask._fpval = self._calc_Zmask(Z)
dZ = self._calc_dZ(Z,Y)
if Ymask is not None:
_ext.maskout(Ymask,dZ)
if self.Zmask._fpval is not None:
_ext.maskout(self.Zmask._fpval,dZ)
if self.batchmean:
dZ *= (1./Z.nrow)
return dZ
raise NotImplementedError("Not finished")
def _calc_dZ(self, Z, Y):
return Z-Y
def _calc_Zmask(self, Z):
Zmask = None
if "reverse_complement" in globals.flags:
Zmask = zeros(Z.shape, bool)
_ext.calc_Zmask(Z, Zmask)
return Zmask
def _calc_shapes(self, Z, Y, loss):
# Make sure Z and Y have same number of columns
if Z._shape and Y._shape: assert Z._shape[1] == Y._shape[1]
elif Z._shape and not Y._shape: Y._shape = (None,Z._shape[1])
elif Y._shape and not Z._shape: Z._shape = (None,Y._shape[0])
# Output dimension is always scalar
loss._shape = (1,1)
class mse(loss_node):
"""Mean squared error."""
def __init__(self,ngroup=1):
super(mse,self).__init__(ngroup)
def _calc_loss(self,Z,Y): return (Z-Y)**2 # Elementwise squared errors
def _scale_loss(self,loss):
return 0.5*loss # Divide by 2
class nll(loss_node):
"""Negative log-likelihood of a softmax or logistic layer."""
def __init__(self,ngroup=1):
super(nll,self).__init__(ngroup)
def _calc_loss(self,Z,Y):
# If only a single output, then treat it as probability of class label 1
if Y.shape[1] == self.ninst:
return log(maximum(1e-15,Z))*Y + log(maximum(1e-15,1-Z))*(1-Y)
# Otherwise, treat it as a multiclass problem
return log(maximum(1e-15,Z))*Y # Elementwise negative log-likelihood, with max(eps,Z) to avoid NaNs
def _scale_loss(self,loss): return -loss # Negate
class hinge(loss_node):
"""Bidirectional hinge loss. Penalizes case "Z>0,Y=0" by cost Z, and case "Z<1,Y=1" by cost 1-Z."""
def __init__(self):
super(hinge,self).__init__(1)
def _calc_loss(self,Z,Y):
return maximum(0, (1-Y)*Z+Y*(1-Z))
def _calc_dZ(self, Z, Y):
L = (1-Y)*Z+Y*(1-Z)
dZ = 1-2*Y
_ext.maskout(L>0,dZ)
return dZ
def _scale_loss(self,loss):
return loss
|
DeepBind-master
|
code/libs/deepity/deepity/std/loss.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from .. import globals
from ..node import node
import smat as sm
import numpy as np
from .. import _ext
class elemwise(node):
def __init__(self,iplugs,oplugs):
super(elemwise,self).__init__(iplugs,oplugs)
def _calc_shapes(self,X,Z):
if X._shape: Z._shape = X._shape # All elemwise functions have same output dim as input dim
elif Z._shape: X._shape = Z._shape
class linear(elemwise):
"""Linear node. Computes Z = X"""
def __init__(self): super(linear,self).__init__(["X"],["Z"])
def _fprop(self,X): return X
def _bprop(self,dZ): return dZ
class exp(elemwise):
"""Exponential node. Computes Z = exp(X)"""
def __init__(self): super(exp,self).__init__(["X"],["Z"])
def _fprop(self,X): return sm.exp(X) if X is not None else None
def _bprop(self,dZ,Z): return dZ*Z if dZ is not None else None # Z = exp(X)
class sqr(elemwise):
"""Square node. Computes Z = X**2"""
def __init__(self): super(sqr,self).__init__(["X"],["Z"])
def _fprop(self,X): return sm.square(X) if X is not None else None
def _bprop(self,dZ,X): return 2*dZ*X if dZ is not None else None
class relu(elemwise):
"""Rectified-linear node. Computes Z = max(0,X)"""
def __init__(self): super(relu,self).__init__(["X"],["Z"])
def _fprop(self,X):
if "disable_relu" in globals.flags:
return X
return sm.maximum(0,X) if X is not None else None
def _bprop(self,dZ,Z): return dZ*sm.sign(Z) if dZ is not None else None # sign(Z) will be 0 or +1, never -1
class rectify(relu):
def __init__(self): super(rectify,self).__init__()
class wrelu(elemwise):
"""Weakly rectified-linear node. Computes Z = max(slope*X,X) for some 0<slope<1"""
def __init__(self,slope=0.1):
super(wrelu,self).__init__(["X"],["Z"])
self.slope = slope
def _fprop(self,X):
return sm.maximum(self.slope*X,X) if X is not None else None
def _bprop(self,dZ,Z,X):
if dZ is None:
return None
S = sm.sign(X)
S = sm.maximum(-self.slope,S) # where X<0 dZ*slope, otherwise dZ*1
return dZ*abs(S)
class tanh(elemwise):
"""Tanh node. Computes Z = tanh(X)"""
def __init__(self): super(tanh,self).__init__(["X"],["Z"])
def _fprop(self,X): return sm.tanh(X) if X is not None else None
def _bprop(self,dZ,Z): return dZ*(1-Z**2) if dZ is not None else None # tanh'(x) = 1-tanh(x)^2
class logistic(elemwise):
"""Logistic sigmoid node. Computes Z = logistic(X)."""
def __init__(self): super(logistic,self).__init__(["X"],["Z"])
def _fprop(self,X): return sm.logistic(X) if X is not None else None
def _bprop(self,dZ,Z): return dZ*(Z-Z**2) if dZ is not None else None # logisitic'(x) = logisitic(x)-logisitic(x)^2
class dropout(elemwise):
"""
Dropout node.
If the global "train_mode" flag is set, computes Z = X*M where M is a bernoulli mask with p=(1-rate).
Otherwise, computes Z = X*(1-rate).
"""
def __init__(self, rate=0.5, activerange=None):
super(dropout,self).__init__(["X"],["Z"])
self.M = None
self.rate = rate
self.activerange = activerange
def _fprop(self,X):
if X is None:
return None
if np.isscalar(self.rate):
if self.rate == 0:
return X
self.rate = sm.asarray([self.rate for i in range(self.ninst)], dtype=X.dtype)
elif not isinstance(self.rate, sm.sarray):
self.rate = sm.asarray(self.rate,dtype=X.dtype)
if "train_mode" in globals.flags:
Z,self.M = _ext.dropout_fp_train(X, self.rate, "reverse_complement" in globals.flags)
else:
Z = _ext.dropout_fp_test(X, self.rate)
return Z
def _bprop(self,dZ):
if np.isscalar(self.rate) and self.rate == 0:
return dZ
if "train_mode" in globals.flags:
dX = _ext.dropout_bp_tr(dZ,self.M)
self.M = None
else:
dX = _ext.dropout_bp_te(dZ,self.rate)
return dX
def _slice_inst(self,i):
assert self.rate.shape[0] % self.ninst == 0
chunksize = self.rate.shape[0] // self.ninst
self.rate = self.rate[i*chunksize:(i+1)*chunksize].copy()
################ trainable nodes #################
class bias(elemwise):
"""Bias node. Computes Z = X + b"""
def __init__(self, init=0.0, init_mu=0, negdecay=None, viz=True, start_training=0):
super(bias ,self).__init__(["X","b"], ["Z"])
self.init = init
self.init_mu = init_mu
self.viz = viz
self.negdecay = negdecay
self.start_training = start_training
def _fprop(self,X,b): return X + b if X is not None else None # broadcast row-vector b
def _bprop(self,dZ):
if dZ is None:
return (None,0)
db = sm.sum(dZ,axis=0)
if self.negdecay:
db += self.negdecay
if self.start_training > globals.flags.get("step",0):
db *= 0
return (dZ,db) # (dX,db)
def _calc_shapes(self,X,Z,b):
# First make sure X and b have equal number of columns
if X._shape and b._shape: assert X._shape[1] == b._shape[1]
elif X._shape and not b._shape: b._shape = (1,X._shape[1])
elif b._shape and not X._shape: X._shape = (None,b._shape[1])
# Then set the shape of Z to match the shape of X
elemwise._calc_shapes(self,X,Z)
def _slice_inst(self,i):
self.b.fpval = self.b.fpval[0,i*self.b.shape[1]:(i+1)*self.b.shape[1]].copy()
#print "bias: ", self.b.fpval
def getfilters(self):
if self.viz:
F = self.b.fpval.asnumpy().reshape((-1,1,1))
if self.X.srcs[0].node.__class__.__name__ == "corr1ord":
# HACK: since corr1ord node subtracts a constant to ensure
# each filter column has mean zero, we need to take
# add the total "visualization bias" to our own value,
# so that the visualization is still showing a
# correct (equivalent) model
filter_biases = self.X.srcs[0].node.getfilters(want_bias=True)
if filter_biases is not None:
filter_biases = filter_biases.sum(axis=1)
F += filter_biases.reshape((-1,1,1))
return F
class scale(elemwise):
"""Scale node. Computes Z = X*w"""
def __init__(self, init=0.0, init_mu=1.0, viz=False):
super(scale, self).__init__(["X","w"], ["Z"])
self.init = init
self.init_mu = init_mu
self.viz = viz
def _fprop(self,X,w): return X*w if X is not None else None # broadcast row-vector w
def _bprop(self,X,w,Z,dZ): return (dZ*w,sm.sum(dZ*X,axis=0)) # (dX,dw)
def _calc_shapes(self,X,Z,w):
# First make sure X and w have equal number of columns
if X._shape and w._shape: assert X._shape[1] == w._shape[1]
elif X._shape and not w._shape: w._shape = (1,X._shape[1])
elif w._shape and not X._shape: X._shape = (None,w._shape[1])
# Then set the shape of Z to match the shape of X
elemwise._calc_shapes(self,X,Z)
def _slice_inst(self,i):
self.w.fpval = self.w.fpval[0,i*self.w.shape[1]:(i+1)*self.w.shape[1]].copy()
#print "scale: ", self.w.fpval
def getfilters(self):
if self.viz:
F = self.w.fpval.asnumpy().reshape((-1,1,1))
return F
|
DeepBind-master
|
code/libs/deepity/deepity/std/elemwise.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from ..node import node
from ..plug import plug,plug_null
class trainable(node):
"""
A trainable node should be initialized with two things:
1. 'P' gets forward propagated to downstream plugs
2. 'dP' accumulates the backprop'd gradient contributions of 'value'
A training algorithm can connect trainable nodes to a dependency graph,
and have the backprop'd values accumulate in a giant contiguous vector
of parameters (i.e. dvalue is a view into a larger gradient vector).
"""
def __init__(self,P,dP=None):
super(trainable,self).__init__([],["Z"])
self.P = P
self.dP = dP
def _fprop(self):
return self.P
def enforce_constraints(self):
dstnode = self.Z.dsts[0].origin().node
if hasattr(dstnode,"enforce_constraints"):
dstnode.enforce_constraints(self.P)
def _bprop(self,dZ):
self.dP[:] = dZ
self.Z._bpval = plug_null
def _calc_shapes(self,Z):
pass
|
DeepBind-master
|
code/libs/deepity/deepity/std/trainable.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from ..node import node
from smat import *
from . import globals
class softmax(node):
"""
Softmax node.
Computes Z = softmax(X) wher softmax is applied row-wise.
If ngroup > 1, then each row is broken down into 'ngroup'
softmax computations (X.ncol must be divisible by ngroup).
"""
def __init__(self,ngroup=1):
super(softmax,self).__init__(["X"],["Z"])
self.ngroup = ngroup
def _fprop(self,X):
if globals.flags.get("disable_softmax",False):
return X
# Compute softmax on entire rows.
# If ngroup > 1, then softmax is applied separately to
# groups of elements within each row.
nchunk = self.ngroup * self.ninst
if nchunk == X.shape[1]:
# One output per target, which means we're doing logistic regression
# and can just pass each value through the logistic function.
Z = logistic(X)
elif nchunk == 1:
Z = exp(X-max(X,axis=1)) # Subtract max for numerical stability.
Z /= sum(Z,axis=1) # Normalize
else:
assert X.ncol % nchunk == 0, "Number of columns in X must be divisible by ngroup * ninst."
A = X.reshape((-1,X.ncol//nchunk))
Z = exp(A-max(A,axis=1)) # Subtract max for numerical stability.
Z /= sum(Z,axis=1) # Normalize
Z = Z.reshape(X.shape) # Put back in proper shape.
return Z
def _requirements(self):
return { "target" : "logistic" }
def _bprop(self,dZ):
return dZ # softmax must be used at final output, and does not alter the backpropagated error
def _calc_shapes(self,X,Z):
if X._shape: Z._shape = X._shape # All elemwise functions have same output dim as input dim
elif Z._shape: X._shape = Z._shape
softmaxnode = softmax
|
DeepBind-master
|
code/libs/deepity/deepity/std/softmax.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from deepity_smat import *
|
DeepBind-master
|
code/libs/deepity/deepity/_ext/__init__.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import numpy as np
from smat import *
from smat import smat_dll
from os.path import abspath,join,dirname
from ctypes import *
###################################################################
# Declare some useful ctypes based on the C++ types
c_isize_t = smat_dll.c_isize_t
c_usize_t = smat_dll.c_usize_t
c_smat_p = smat_dll.c_smat_p
###################################################################
# Now create the public 'dll' object exposed to smat.py, with all the methods
# exported by the DLL available for calling
#
_ext_dll = None
def ext_dll():
global _ext_dll
if _ext_dll is None:
_ext_dll = load_extension("deepity_smat")
_ext_dll.api_gradstep.declare( None, [c_smat_p,c_smat_p,c_smat_p,c_smat_p,c_smat_p])
_ext_dll.api_gradstep_nesterov1.declare(None, [c_smat_p,c_smat_p,c_smat_p])
_ext_dll.api_gradstep_nesterov2.declare(None, [c_smat_p,c_smat_p,c_smat_p,c_smat_p,c_smat_p])
_ext_dll.api_madd_bcast.declare( None, [c_smat_p,c_smat_p,c_usize_t,c_smat_p])
_ext_dll.api_maskout.declare( None, [c_smat_p,c_smat_p])
_ext_dll.api_calc_zmask.declare( None, [c_smat_p,c_smat_p])
_ext_dll.api_dropout_fp_tr.declare( None, [c_smat_p,c_smat_p,c_smat_p,c_smat_p,c_int])
_ext_dll.api_dropout_fp_te.declare( None, [c_smat_p,c_smat_p,c_smat_p])
_ext_dll.api_dropout_bp_tr.declare( None, [c_smat_p,c_smat_p,c_smat_p])
_ext_dll.api_dropout_bp_te.declare( None, [c_smat_p,c_smat_p,c_smat_p])
_ext_dll.api_blockwise_dot.declare( None, [c_int,c_smat_p,c_smat_p,c_smat_p])
_ext_dll.api_blockwise_dot_nt.declare( None, [c_int,c_smat_p,c_smat_p,c_smat_p])
_ext_dll.api_blockwise_dot_tn.declare( None, [c_int,c_smat_p,c_smat_p,c_smat_p])
_ext_dll.api_blockwise_dot_combined.declare( None, [c_int,c_int,c_int,c_smat_p,c_smat_p,c_smat_p])
_ext_dll.api_blockwise_dot_nt_combined.declare( None, [c_int,c_int,c_int,c_smat_p,c_smat_p,c_smat_p])
_ext_dll.api_blockwise_dot_tn_combined.declare( None, [c_int,c_int,c_int,c_smat_p,c_smat_p,c_smat_p])
return _ext_dll
#######################################################################
def gradstep(P,dP,drate,mP,mrate,grad,nesterov=False):
"""
Performs a gradient update step on parameters P,
using gradient dP with learning rate (drate), and
momentum vector mP with momentum rate (mrate).
grad() must be a function that computes:
dP[:] = gradient at current P
where 'grad' is assumed to have references to
P and to dP.
If nesterov is False, the computation is:
grad()
mP[:] = drate*dP + mrate*mP
P[:] = P + mP
If nesterov is True, the computation is:
P[:] = P + mrate*mP
grad()
mP[:] = drate*dP + mrate*mP
P[:] = P + drate*dP
"""
assert callable(grad)
if nesterov:
# P[:] += mrate*mP
ext_dll().api_gradstep_nesterov1(P._ptr,mP._ptr,mrate._ptr)
# dP[:] = gradient at P + mrate*mP
grad()
# mP[:] = drate*dP + mrate*mP
# P[:] += drate*dP
ext_dll().api_gradstep_nesterov2(P._ptr,dP._ptr,drate._ptr,mP._ptr,mrate._ptr)
else:
# dP[:] = gradient at P
grad()
# mP[:] = drate*dP + mrate*mP
# P[:] = P + mP
ext_dll().api_gradstep(P._ptr,dP._ptr,drate._ptr,mP._ptr,mrate._ptr)
return
#######################################################################
def madd_bcast(A,b,k,dst):
"""
Equivalent to dst[i] += A[i] * b[(i/k) % b.size]
where dst, A and b are all treated as 1D vectors.
"""
if np.isscalar(b):
dst += A*b
else:
if isinstance(b,np.ndarray):
b = asarray(b,dtype=A.dtype)
ext_dll().api_madd_bcast(A._ptr,b._ptr,k,dst._ptr)
#######################################################################
def maskout(M,A):
"""
Equivalent to A[i] = M[i] ? A[i] : 0 where M is of dtype bool.
Notice that this replaces NaN with zero, unlike A *= M
"""
ext_dll().api_maskout(M._ptr,A._ptr)
#######################################################################
def dropout_fp_train(X,rate,matchrows):
# If matchrows=True, then every pair of rows will have the same mask.
# Used for dropout with reverse complement enabled.
Z = empty_like(X)
M = empty(X.shape,dtype=bool)
ext_dll().api_dropout_fp_tr(X._ptr,rate._ptr,Z._ptr,M._ptr,matchrows)
return Z,M
def dropout_fp_test(X,rate):
Z = empty_like(X)
ext_dll().api_dropout_fp_te(X._ptr,rate._ptr,Z._ptr)
return Z
def dropout_bp_tr(dZ,M):
dX = empty_like(dZ)
ext_dll().api_dropout_bp_tr(dZ._ptr,M._ptr,dX._ptr)
return dX
def dropout_bp_te(dZ,rate):
dX = empty_like(dZ)
ext_dll().api_dropout_bp_te(dZ._ptr,rate._ptr,dX._ptr)
return dX
#######################################################################
def blockwise_dot(X, W, nblock):
"""
Computes Z[:,i] = dot(X[:,i],W[i,:]) for each submatrix indexed here by i.
Special case: if X.shape[1]*nblock == W.shape[0] then
the computation is dot(X,W[i,:]) each time.
"""
if nblock == 1:
return dot(X, W)
Z = empty((X.shape[0],W.shape[1]*nblock), X.dtype)
ext_dll().api_blockwise_dot(nblock, X._ptr, W._ptr, Z._ptr)
return Z
def blockwise_dot_nt(dZ, W, nblock):
"""
Given Computes dX[:,i] = dot_nt(dZ[:,i],W[i,:]) for each submatrix indexed here by i.
"""
if nblock == 1:
return dot_nt(dZ, W)
dX = empty((dZ.shape[0],W.shape[0]), dZ.dtype)
ext_dll().api_blockwise_dot_nt(nblock, dZ._ptr, W._ptr, dX._ptr)
return dX
def blockwise_dot_tn(X, dZ, nblock, W):
"""
Computes dW[i,:] = dot_tn(X[:,i],dZ[:,i]) for each submatrix indexed here by i.
"""
if nblock == 1:
return dot_tn(X, dZ)
dW = empty_like(W)
ext_dll().api_blockwise_dot_tn(nblock, X._ptr, dZ._ptr, dW._ptr)
return dW
def blockwise_dot_combined(X, W, nblock, Xbroadcast):
"""
A version of blockwise_dot that works by combining a LIST of X matrices,
each containing its own blocks of columns, rather than a single X matrix.
"""
Z = empty((X[0].shape[0], W.shape[1]*nblock), X[0].dtype)
Xoffset = 0
for Xindex in range(len(X)):
if X[Xindex] is not None:
ext_dll().api_blockwise_dot_combined(nblock, Xoffset, Xbroadcast[Xindex], X[Xindex]._ptr, W._ptr, Z._ptr)
Xoffset += X[Xindex].shape[1] // (1 if Xbroadcast[Xindex] else nblock)
return Z
def blockwise_dot_nt_combined(X, dZ, W, nblock, Xbroadcast):
"""
A version of blockwise_dot_nt that generates a LIST of output dX matrices
each containing its own blocks of columns.
"""
dX = []
Xoffset = 0
for Xindex in range(len(X)):
if X[Xindex] is not None:
if not Xbroadcast[Xindex]:
dX.append(zeros_like(X[Xindex]))
ext_dll().api_blockwise_dot_nt_combined(nblock, Xoffset, Xbroadcast[Xindex], dX[-1]._ptr, W._ptr, dZ._ptr)
else:
dX.append(None) # Can't currently backpropagate to a broadcasted input
Xoffset += X[Xindex].shape[1] // (1 if Xbroadcast[Xindex] else nblock)
else:
dX.append(None) # Can't currently backpropagate to a broadcasted input
return dX
def blockwise_dot_tn_combined(X, dZ, W, nblock, Xbroadcast, dWmask=None):
"""
A version of blockwise_dot_tn that fills the rows of the return value dW
using a LIST of input X matrices, each containing its own blocks of columns.
"""
dW = empty_like(W)
if dWmask is not None:
assert len(dWmask) == len(X)
dW[:] = 0
Xoffset = 0
for Xindex in range(len(X)):
if X[Xindex] is not None:
if dWmask is None or dWmask[Xindex]:
ext_dll().api_blockwise_dot_tn_combined(nblock, Xoffset, Xbroadcast[Xindex], X[Xindex]._ptr, dW._ptr, dZ._ptr)
Xoffset += X[Xindex].shape[1] // (1 if Xbroadcast[Xindex] else nblock)
return dW
def calc_Zmask(Z, Zmask):
ext_dll().api_calc_zmask(Z._ptr, Zmask._ptr)
|
DeepBind-master
|
code/libs/deepity/deepity/_ext/deepity_smat.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
# -*- coding: Latin-1 -*-
"""Graphviz's dot language Python interface.
This module provides with a full interface to create handle modify
and process graphs in Graphviz's dot language.
References:
pydot Homepage: http://code.google.com/p/pydot/
Graphviz: http://www.graphviz.org/
DOT Language: http://www.graphviz.org/doc/info/lang.html
Programmed and tested with Graphviz 2.26.3 and Python 2.6 on OSX 10.6.4
Copyright (c) 2005-2011 Ero Carrera <ero.carrera@gmail.com>
Distributed under MIT license [http://opensource.org/licenses/mit-license.html].
"""
__revision__ = "$LastChangedRevision: 28 $"
__author__ = 'Ero Carrera'
__version__ = '1.0.%d' % int( __revision__[21:-2] )
__license__ = 'MIT'
import os
import re
import subprocess
import tempfile
import copy
GRAPH_ATTRIBUTES = set( ['Damping', 'K', 'URL', 'aspect', 'bb', 'bgcolor',
'center', 'charset', 'clusterrank', 'colorscheme', 'comment', 'compound',
'concentrate', 'defaultdist', 'dim', 'dimen', 'diredgeconstraints',
'dpi', 'epsilon', 'esep', 'fontcolor', 'fontname', 'fontnames',
'fontpath', 'fontsize', 'id', 'label', 'labeljust', 'labelloc',
'landscape', 'layers', 'layersep', 'layout', 'levels', 'levelsgap',
'lheight', 'lp', 'lwidth', 'margin', 'maxiter', 'mclimit', 'mindist',
'mode', 'model', 'mosek', 'nodesep', 'nojustify', 'normalize', 'nslimit',
'nslimit1', 'ordering', 'orientation', 'outputorder', 'overlap',
'overlap_scaling', 'pack', 'packmode', 'pad', 'page', 'pagedir',
'quadtree', 'quantum', 'rankdir', 'ranksep', 'ratio', 'remincross',
'repulsiveforce', 'resolution', 'root', 'rotate', 'searchsize', 'sep',
'showboxes', 'size', 'smoothing', 'sortv', 'splines', 'start',
'stylesheet', 'target', 'truecolor', 'viewport', 'voro_margin',
# for subgraphs
'rank' ] )
EDGE_ATTRIBUTES = set( ['URL', 'arrowhead', 'arrowsize', 'arrowtail',
'color', 'colorscheme', 'comment', 'constraint', 'decorate', 'dir',
'edgeURL', 'edgehref', 'edgetarget', 'edgetooltip', 'fontcolor',
'fontname', 'fontsize', 'headURL', 'headclip', 'headhref', 'headlabel',
'headport', 'headtarget', 'headtooltip', 'href', 'id', 'label',
'labelURL', 'labelangle', 'labeldistance', 'labelfloat', 'labelfontcolor',
'labelfontname', 'labelfontsize', 'labelhref', 'labeltarget',
'labeltooltip', 'layer', 'len', 'lhead', 'lp', 'ltail', 'minlen',
'nojustify', 'penwidth', 'pos', 'samehead', 'sametail', 'showboxes',
'style', 'tailURL', 'tailclip', 'tailhref', 'taillabel', 'tailport',
'tailtarget', 'tailtooltip', 'target', 'tooltip', 'weight',
'rank' ] )
NODE_ATTRIBUTES = set( ['URL', 'color', 'colorscheme', 'comment',
'distortion', 'fillcolor', 'fixedsize', 'fontcolor', 'fontname',
'fontsize', 'group', 'height', 'id', 'image', 'imagescale', 'label',
'labelloc', 'layer', 'margin', 'nojustify', 'orientation', 'penwidth',
'peripheries', 'pin', 'pos', 'rects', 'regular', 'root', 'samplepoints',
'shape', 'shapefile', 'showboxes', 'sides', 'skew', 'sortv', 'style',
'target', 'tooltip', 'vertices', 'width', 'z',
# The following are attributes dot2tex
'texlbl', 'texmode' ] )
CLUSTER_ATTRIBUTES = set( ['K', 'URL', 'bgcolor', 'color', 'colorscheme',
'fillcolor', 'fontcolor', 'fontname', 'fontsize', 'label', 'labeljust',
'labelloc', 'lheight', 'lp', 'lwidth', 'nojustify', 'pencolor',
'penwidth', 'peripheries', 'sortv', 'style', 'target', 'tooltip'] )
#
# Extented version of ASPN's Python Cookbook Recipe:
# Frozen dictionaries.
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/414283
#
# This version freezes dictionaries used as values within dictionaries.
#
class frozendict(dict):
def _blocked_attribute(obj):
raise AttributeError, "A frozendict cannot be modified."
_blocked_attribute = property(_blocked_attribute)
__delitem__ = __setitem__ = clear = _blocked_attribute
pop = popitem = setdefault = update = _blocked_attribute
def __new__(cls, *args, **kw):
new = dict.__new__(cls)
args_ = []
for arg in args:
if isinstance(arg, dict):
arg = copy.copy(arg)
for k, v in arg.iteritems():
if isinstance(v, frozendict):
arg[k] = v
elif isinstance(v, dict):
arg[k] = frozendict(v)
elif isinstance(v, list):
v_ = list()
for elm in v:
if isinstance(elm, dict):
v_.append( frozendict(elm) )
else:
v_.append( elm )
arg[k] = tuple(v_)
args_.append( arg )
else:
args_.append( arg )
dict.__init__(new, *args_, **kw)
return new
def __init__(self, *args, **kw):
pass
def __hash__(self):
try:
return self._cached_hash
except AttributeError:
h = self._cached_hash = hash(tuple(sorted(self.iteritems())))
return h
def __repr__(self):
return "frozendict(%s)" % dict.__repr__(self)
dot_keywords = ['graph', 'subgraph', 'digraph', 'node', 'edge', 'strict']
id_re_alpha_nums = re.compile('^[_a-zA-Z][a-zA-Z0-9_,]*$', re.UNICODE)
id_re_alpha_nums_with_ports = re.compile('^[_a-zA-Z][a-zA-Z0-9_,:\"]*[a-zA-Z0-9_,\"]+$', re.UNICODE)
id_re_num = re.compile('^[0-9,]+$', re.UNICODE)
id_re_with_port = re.compile('^([^:]*):([^:]*)$', re.UNICODE)
id_re_dbl_quoted = re.compile('^\".*\"$', re.S|re.UNICODE)
id_re_html = re.compile('^<.*>$', re.S|re.UNICODE)
def needs_quotes( s ):
"""Checks whether a string is a dot language ID.
It will check whether the string is solely composed
by the characters allowed in an ID or not.
If the string is one of the reserved keywords it will
need quotes too but the user will need to add them
manually.
"""
# If the name is a reserved keyword it will need quotes but pydot
# can't tell when it's being used as a keyword or when it's simply
# a name. Hence the user needs to supply the quotes when an element
# would use a reserved keyword as name. This function will return
# false indicating that a keyword string, if provided as-is, won't
# need quotes.
if s in dot_keywords:
return False
chars = [ord(c) for c in s if ord(c)>0x7f or ord(c)==0]
if chars and not id_re_dbl_quoted.match(s) and not id_re_html.match(s):
return True
for test_re in [id_re_alpha_nums, id_re_num, id_re_dbl_quoted, id_re_html, id_re_alpha_nums_with_ports]:
if test_re.match(s):
return False
m = id_re_with_port.match(s)
if m:
return needs_quotes(m.group(1)) or needs_quotes(m.group(2))
return True
def quote_if_necessary(s):
if isinstance(s, bool):
if s is True:
return 'True'
return 'False'
if not isinstance( s, basestring ):
return s
if not s:
return s
if needs_quotes(s):
replace = {'"' : r'\"',
"\n" : r'\n',
"\r" : r'\r'}
for (a,b) in replace.items():
s = s.replace(a, b)
return '"' + s + '"'
return s
def graph_from_dot_data(data):
"""Load graph as defined by data in DOT format.
The data is assumed to be in DOT format. It will
be parsed and a Dot class will be returned,
representing the graph.
"""
return dot_parser.parse_dot_data(data)
def graph_from_dot_file(path):
"""Load graph as defined by a DOT file.
The file is assumed to be in DOT format. It will
be loaded, parsed and a Dot class will be returned,
representing the graph.
"""
fd = file(path, 'rb')
data = fd.read()
fd.close()
return graph_from_dot_data(data)
def graph_from_edges(edge_list, node_prefix='', directed=False):
"""Creates a basic graph out of an edge list.
The edge list has to be a list of tuples representing
the nodes connected by the edge.
The values can be anything: bool, int, float, str.
If the graph is undirected by default, it is only
calculated from one of the symmetric halves of the matrix.
"""
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for edge in edge_list:
if isinstance(edge[0], str):
src = node_prefix + edge[0]
else:
src = node_prefix + str(edge[0])
if isinstance(edge[1], str):
dst = node_prefix + edge[1]
else:
dst = node_prefix + str(edge[1])
e = Edge( src, dst )
graph.add_edge(e)
return graph
def graph_from_adjacency_matrix(matrix, node_prefix= u'', directed=False):
"""Creates a basic graph out of an adjacency matrix.
The matrix has to be a list of rows of values
representing an adjacency matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
node_orig = 1
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
if not directed:
skip = matrix.index(row)
r = row[skip:]
else:
skip = 0
r = row
node_dest = skip+1
for e in r:
if e:
graph.add_edge(
Edge( node_prefix + node_orig,
node_prefix + node_dest) )
node_dest += 1
node_orig += 1
return graph
def graph_from_incidence_matrix(matrix, node_prefix='', directed=False):
"""Creates a basic graph out of an incidence matrix.
The matrix has to be a list of rows of values
representing an incidence matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
node_orig = 1
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
nodes = []
c = 1
for node in row:
if node:
nodes.append(c*node)
c += 1
nodes.sort()
if len(nodes) == 2:
graph.add_edge(
Edge( node_prefix + abs(nodes[0]),
node_prefix + nodes[1] ))
if not directed:
graph.set_simplify(True)
return graph
def __find_executables(path):
"""Used by find_graphviz
path - single directory as a string
If any of the executables are found, it will return a dictionary
containing the program names as keys and their paths as values.
Otherwise returns None
"""
success = False
progs = {'dot': '', 'twopi': '', 'neato': '', 'circo': '', 'fdp': '', 'sfdp': ''}
was_quoted = False
path = path.strip()
if path.startswith('"') and path.endswith('"'):
path = path[1:-1]
was_quoted = True
if os.path.isdir(path) :
for prg in progs.iterkeys():
if progs[prg]:
continue
if os.path.exists( os.path.join(path, prg) ):
if was_quoted:
progs[prg] = '"' + os.path.join(path, prg) + '"'
else:
progs[prg] = os.path.join(path, prg)
success = True
elif os.path.exists( os.path.join(path, prg + '.exe') ):
if was_quoted:
progs[prg] = '"' + os.path.join(path, prg + '.exe') + '"'
else:
progs[prg] = os.path.join(path, prg + '.exe')
success = True
if success:
return progs
else:
return None
# The multi-platform version of this 'find_graphviz' function was
# contributed by Peter Cock
#
def find_graphviz():
"""Locate Graphviz's executables in the system.
Tries three methods:
First: Windows Registry (Windows only)
This requires Mark Hammond's pywin32 is installed.
Secondly: Search the path
It will look for 'dot', 'twopi' and 'neato' in all the directories
specified in the PATH environment variable.
Thirdly: Default install location (Windows only)
It will look for 'dot', 'twopi' and 'neato' in the default install
location under the "Program Files" directory.
It will return a dictionary containing the program names as keys
and their paths as values.
If this fails, it returns None.
"""
# Method 1 (Windows only)
#
if os.sys.platform == 'win32':
HKEY_LOCAL_MACHINE = 0x80000002
KEY_QUERY_VALUE = 0x0001
RegOpenKeyEx = None
RegQueryValueEx = None
RegCloseKey = None
try:
import win32api, win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegQueryValueEx = win32api.RegQueryValueEx
RegCloseKey = win32api.RegCloseKey
except ImportError:
# Print a messaged suggesting they install these?
#
pass
try:
import ctypes
def RegOpenKeyEx(key, subkey, opt, sam):
result = ctypes.c_uint(0)
ctypes.windll.advapi32.RegOpenKeyExA(key, subkey, opt, sam, ctypes.byref(result))
return result.value
def RegQueryValueEx( hkey, valuename ):
data_type = ctypes.c_uint(0)
data_len = ctypes.c_uint(1024)
data = ctypes.create_string_buffer( 1024 )
res = ctypes.windll.advapi32.RegQueryValueExA(hkey, valuename, 0,
ctypes.byref(data_type), data, ctypes.byref(data_len))
return data.value
RegCloseKey = ctypes.windll.advapi32.RegCloseKey
except ImportError:
# Print a messaged suggesting they install these?
#
pass
if RegOpenKeyEx is not None:
# Get the GraphViz install path from the registry
#
hkey = None
potentialKeys = [
"SOFTWARE\\ATT\\Graphviz",
"SOFTWARE\\AT&T Research Labs\\Graphviz",
]
for potentialKey in potentialKeys:
try:
hkey = RegOpenKeyEx( HKEY_LOCAL_MACHINE,
potentialKey, 0, KEY_QUERY_VALUE )
if hkey is not None:
path = RegQueryValueEx( hkey, "InstallPath" )
RegCloseKey( hkey )
# The regitry variable might exist, left by old installations
# but with no value, in those cases we keep searching...
if not path:
continue
# Now append the "bin" subdirectory:
#
path = os.path.join(path, "bin")
progs = __find_executables(path)
if progs is not None :
#print "Used Windows registry"
return progs
except Exception, excp:
#raise excp
pass
else:
break
# Method 2 (Linux, Windows etc)
#
if os.environ.has_key('PATH'):
for path in os.environ['PATH'].split(os.pathsep):
progs = __find_executables(path)
if progs is not None :
#print "Used path"
return progs
# Method 3 (Windows only)
#
if os.sys.platform == 'win32':
# Try and work out the equivalent of "C:\Program Files" on this
# machine (might be on drive D:, or in a different language)
#
if os.environ.has_key('PROGRAMFILES'):
# Note, we could also use the win32api to get this
# information, but win32api may not be installed.
path = os.path.join(os.environ['PROGRAMFILES'], 'ATT', 'GraphViz', 'bin')
else:
#Just in case, try the default...
path = r"C:\Program Files\att\Graphviz\bin"
progs = __find_executables(path)
if progs is not None :
#print "Used default install location"
return progs
for path in (
'/usr/bin', '/usr/local/bin',
'/opt/local/bin',
'/opt/bin', '/sw/bin', '/usr/share',
'/Applications/Graphviz.app/Contents/MacOS/' ):
progs = __find_executables(path)
if progs is not None :
#print "Used path"
return progs
# Failed to find GraphViz
#
return None
class Common:
"""Common information to several classes.
Should not be directly used, several classes are derived from
this one.
"""
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def __get_attribute__(self, attr):
"""Look for default attributes for this node"""
attr_val = self.obj_dict['attributes'].get(attr, None)
if attr_val is None:
# get the defaults for nodes/edges
default_node_name = self.obj_dict['type']
# The defaults for graphs are set on a node named 'graph'
if default_node_name in ('subgraph', 'digraph', 'cluster'):
default_node_name = 'graph'
g = self.get_parent_graph()
if g is not None:
defaults = g.get_node( default_node_name )
else:
return None
# Multiple defaults could be set by having repeated 'graph [...]'
# 'node [...]', 'edge [...]' statements. In such case, if the
# same attribute is set in different statements, only the first
# will be returned. In order to get all, one would call the
# get_*_defaults() methods and handle those. Or go node by node
# (of the ones specifying defaults) and modify the attributes
# individually.
#
if not isinstance(defaults, (list, tuple)):
defaults = [defaults]
for default in defaults:
attr_val = default.obj_dict['attributes'].get(attr, None)
if attr_val:
return attr_val
else:
return attr_val
return None
def set_parent_graph(self, parent_graph):
self.obj_dict['parent_graph'] = parent_graph
def get_parent_graph(self):
return self.obj_dict.get('parent_graph', None)
def set(self, name, value):
"""Set an attribute value by name.
Given an attribute 'name' it will set its value to 'value'.
There's always the possibility of using the methods:
set_'name'(value)
which are defined for all the existing attributes.
"""
self.obj_dict['attributes'][name] = value
def get(self, name):
"""Get an attribute value by name.
Given an attribute 'name' it will get its value.
There's always the possibility of using the methods:
get_'name'()
which are defined for all the existing attributes.
"""
return self.obj_dict['attributes'].get(name, None)
def get_attributes(self):
""""""
return self.obj_dict['attributes']
def set_sequence(self, seq):
self.obj_dict['sequence'] = seq
def get_sequence(self):
return self.obj_dict['sequence']
def create_attribute_methods(self, obj_attributes):
#for attr in self.obj_dict['attributes']:
for attr in obj_attributes:
# Generate all the Setter methods.
#
self.__setattr__( 'set_'+attr, lambda x, a=attr : self.obj_dict['attributes'].__setitem__(a, x) )
# Generate all the Getter methods.
#
self.__setattr__('get_'+attr, lambda a=attr : self.__get_attribute__(a))
class Error(Exception):
"""General error handling class.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class InvocationException(Exception):
"""To indicate that a ploblem occurred while running any of the GraphViz executables.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class Node(object, Common):
"""A graph node.
This class represents a graph's node with all its attributes.
node(name, attribute=value, ...)
name: node's name
All the attributes defined in the Graphviz dot language should
be supported.
"""
def __init__(self, name = '', obj_dict = None, **attrs):
#
# Nodes will take attributes of all other types because the defaults
# for any GraphViz object are dealt with as if they were Node definitions
#
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
# Copy the attributes
#
self.obj_dict[ 'attributes' ] = dict( attrs )
self.obj_dict[ 'type' ] = 'node'
self.obj_dict[ 'parent_graph' ] = None
self.obj_dict[ 'parent_node_list' ] = None
self.obj_dict[ 'sequence' ] = None
# Remove the compass point
#
port = None
if isinstance(name, basestring) and not name.startswith('"'):
idx = name.find(':')
if idx > 0 and idx+1 < len(name):
name, port = name[:idx], name[idx:]
if isinstance(name, (long, int)):
name = str(name)
self.obj_dict['name'] = quote_if_necessary( name )
self.obj_dict['port'] = port
self.create_attribute_methods(NODE_ATTRIBUTES)
def set_name(self, node_name):
"""Set the node's name."""
self.obj_dict['name'] = node_name
def get_name(self):
"""Get the node's name."""
return self.obj_dict['name']
def get_port(self):
"""Get the node's port."""
return self.obj_dict['port']
def add_style(self, style):
styles = self.obj_dict['attributes'].get('style', None)
if not styles and style:
styles = [ style ]
else:
styles = styles.split(',')
styles.append( style )
self.obj_dict['attributes']['style'] = ','.join( styles )
def to_string(self):
"""Returns a string representation of the node in dot language.
"""
# RMF: special case defaults for node, edge and graph properties.
#
node = quote_if_necessary(self.obj_dict['name'])
node_attr = list()
for attr, value in self.obj_dict['attributes'].iteritems():
if value is not None:
node_attr.append( '%s=%s' % (attr, quote_if_necessary(value) ) )
else:
node_attr.append( attr )
# No point in having nodes setting any defaults if the don't set
# any attributes...
#
if node in ('graph', 'node', 'edge') and len(node_attr) == 0:
return ''
node_attr = ', '.join(node_attr)
if node_attr:
node += ' [' + node_attr + ']'
return node + ';'
class Edge(object, Common ):
"""A graph edge.
This class represents a graph's edge with all its attributes.
edge(src, dst, attribute=value, ...)
src: source node's name
dst: destination node's name
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_label, set_fontname
or directly by using the instance's special dictionary:
Edge.obj_dict['attributes'][attribute name], i.e.
edge_instance.obj_dict['attributes']['label']
edge_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, src='', dst='', obj_dict=None, **attrs):
if isinstance(src, (list, tuple)) and dst == '':
src, dst = src
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
# Copy the attributes
#
self.obj_dict[ 'attributes' ] = dict( attrs )
self.obj_dict[ 'type' ] = 'edge'
self.obj_dict[ 'parent_graph' ] = None
self.obj_dict[ 'parent_edge_list' ] = None
self.obj_dict[ 'sequence' ] = None
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
points = ( quote_if_necessary( src) , quote_if_necessary( dst) )
self.obj_dict['points'] = points
self.create_attribute_methods(EDGE_ATTRIBUTES)
def get_source(self):
"""Get the edges source node name."""
return self.obj_dict['points'][0]
def get_destination(self):
"""Get the edge's destination node name."""
return self.obj_dict['points'][1]
def __hash__(self):
return hash( hash(self.get_source()) + hash(self.get_destination()) )
def __eq__(self, edge):
"""Compare two edges.
If the parent graph is directed, arcs linking
node A to B are considered equal and A->B != B->A
If the parent graph is undirected, any edge
connecting two nodes is equal to any other
edge connecting the same nodes, A->B == B->A
"""
if not isinstance(edge, Edge):
raise Error, "Can't compare and edge to a non-edge object."
if self.get_parent_graph().get_top_graph_type() == 'graph':
# If the graph is undirected, the edge has neither
# source nor destination.
#
if ( ( self.get_source() == edge.get_source() and self.get_destination() == edge.get_destination() ) or
( edge.get_source() == self.get_destination() and edge.get_destination() == self.get_source() ) ):
return True
else:
if self.get_source()==edge.get_source() and self.get_destination()==edge.get_destination() :
return True
return False
def parse_node_ref(self, node_str):
if not isinstance(node_str, str):
return node_str
if node_str.startswith('"') and node_str.endswith('"'):
return node_str
node_port_idx = node_str.rfind(':')
if node_port_idx>0 and node_str[0]=='"' and node_str[node_port_idx-1]=='"':
return node_str
if node_port_idx>0:
a = node_str[:node_port_idx]
b = node_str[node_port_idx+1:]
node = quote_if_necessary(a)
node += ':'+quote_if_necessary(b)
return node
return node_str
def to_string(self):
"""Returns a string representation of the edge in dot language.
"""
src = self.parse_node_ref( self.get_source() )
dst = self.parse_node_ref( self.get_destination() )
if isinstance(src, frozendict):
edge = [ Subgraph(obj_dict=src).to_string() ]
elif isinstance(src, (int, long)):
edge = [ str(src) ]
else:
edge = [ src ]
if (self.get_parent_graph() and
self.get_parent_graph().get_top_graph_type() and
self.get_parent_graph().get_top_graph_type() == 'digraph' ):
edge.append( '->' )
else:
edge.append( '--' )
if isinstance(dst, frozendict):
edge.append( Subgraph(obj_dict=dst).to_string() )
elif isinstance(dst, (int, long)):
edge.append( str(dst) )
else:
edge.append( dst )
edge_attr = list()
for attr, value in self.obj_dict['attributes'].iteritems():
if value is not None:
edge_attr.append( '%s=%s' % (attr, quote_if_necessary(value) ) )
else:
edge_attr.append( attr )
edge_attr = ', '.join(edge_attr)
if edge_attr:
edge.append( ' [' + edge_attr + ']' )
return ' '.join(edge) + ';'
class Graph(object, Common):
"""Class representing a graph in Graphviz's dot language.
This class implements the methods to work on a representation
of a graph in Graphviz's dot language.
graph( graph_name='G', graph_type='digraph',
strict=False, suppress_disconnected=False, attribute=value, ...)
graph_name:
the graph's name
graph_type:
can be 'graph' or 'digraph'
suppress_disconnected:
defaults to False, which will remove from the
graph any disconnected nodes.
simplify:
if True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Graph.obj_dict['attributes'][attribute name], i.e.
graph_instance.obj_dict['attributes']['label']
graph_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, graph_name='G', obj_dict=None, graph_type='digraph', strict=False,
suppress_disconnected=False, simplify=False, **attrs):
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
self.obj_dict['attributes'] = dict(attrs)
if graph_type not in ['graph', 'digraph']:
raise Error, 'Invalid type "%s". Accepted graph types are: graph, digraph, subgraph' % graph_type
self.obj_dict['name'] = quote_if_necessary(graph_name)
self.obj_dict['type'] = graph_type
self.obj_dict['strict'] = strict
self.obj_dict['suppress_disconnected'] = suppress_disconnected
self.obj_dict['simplify'] = simplify
self.obj_dict['current_child_sequence'] = 1
self.obj_dict['nodes'] = dict()
self.obj_dict['edges'] = dict()
self.obj_dict['subgraphs'] = dict()
self.set_parent_graph(self)
self.create_attribute_methods(GRAPH_ATTRIBUTES)
def get_graph_type(self):
return self.obj_dict['type']
def get_top_graph_type(self):
parent = self
while True:
parent_ = parent.get_parent_graph()
if parent_ == parent:
break
parent = parent_
return parent.obj_dict['type']
def set_graph_defaults(self, **attrs):
self.add_node( Node('graph', **attrs) )
def get_graph_defaults(self, **attrs):
graph_nodes = self.get_node('graph')
if isinstance( graph_nodes, (list, tuple)):
return [ node.get_attributes() for node in graph_nodes ]
return graph_nodes.get_attributes()
def set_node_defaults(self, **attrs):
self.add_node( Node('node', **attrs) )
def get_node_defaults(self, **attrs):
graph_nodes = self.get_node('node')
if isinstance( graph_nodes, (list, tuple)):
return [ node.get_attributes() for node in graph_nodes ]
return graph_nodes.get_attributes()
def set_edge_defaults(self, **attrs):
self.add_node( Node('edge', **attrs) )
def get_edge_defaults(self, **attrs):
graph_nodes = self.get_node('edge')
if isinstance( graph_nodes, (list, tuple)):
return [ node.get_attributes() for node in graph_nodes ]
return graph_nodes.get_attributes()
def set_simplify(self, simplify):
"""Set whether to simplify or not.
If True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
"""
self.obj_dict['simplify'] = simplify
def get_simplify(self):
"""Get whether to simplify or not.
Refer to set_simplify for more information.
"""
return self.obj_dict['simplify']
def set_type(self, graph_type):
"""Set the graph's type, 'graph' or 'digraph'."""
self.obj_dict['type'] = graph_type
def get_type(self):
"""Get the graph's type, 'graph' or 'digraph'."""
return self.obj_dict['type']
def set_name(self, graph_name):
"""Set the graph's name."""
self.obj_dict['name'] = graph_name
def get_name(self):
"""Get the graph's name."""
return self.obj_dict['name']
def set_strict(self, val):
"""Set graph to 'strict' mode.
This option is only valid for top level graphs.
"""
self.obj_dict['strict'] = val
def get_strict(self, val):
"""Get graph's 'strict' mode (True, False).
This option is only valid for top level graphs.
"""
return self.obj_dict['strict']
def set_suppress_disconnected(self, val):
"""Suppress disconnected nodes in the output graph.
This option will skip nodes in the graph with no incoming or outgoing
edges. This option works also for subgraphs and has effect only in the
current graph/subgraph.
"""
self.obj_dict['suppress_disconnected'] = val
def get_suppress_disconnected(self, val):
"""Get if suppress disconnected is set.
Refer to set_suppress_disconnected for more information.
"""
return self.obj_dict['suppress_disconnected']
def get_next_sequence_number(self):
seq = self.obj_dict['current_child_sequence']
self.obj_dict['current_child_sequence'] += 1
return seq
def add_node(self, graph_node):
"""Adds a node object to the graph.
It takes a node object as its only argument and returns
None.
"""
if not isinstance(graph_node, Node):
raise TypeError('add_node() received a non node class object: ' + str(graph_node))
node = self.get_node(graph_node.get_name())
if not node:
self.obj_dict['nodes'][graph_node.get_name()] = [ graph_node.obj_dict ]
#self.node_dict[graph_node.get_name()] = graph_node.attributes
graph_node.set_parent_graph(self.get_parent_graph())
else:
self.obj_dict['nodes'][graph_node.get_name()].append( graph_node.obj_dict )
graph_node.set_sequence(self.get_next_sequence_number())
def del_node(self, name, index=None):
"""Delete a node from the graph.
Given a node's name all node(s) with that same name
will be deleted if 'index' is not specified or set
to None.
If there are several nodes with that same name and
'index' is given, only the node in that position
will be deleted.
'index' should be an integer specifying the position
of the node to delete. If index is larger than the
number of nodes with that name, no action is taken.
If nodes are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance(name, Node):
name = name.get_name()
if self.obj_dict['nodes'].has_key(name):
if index is not None and index < len(self.obj_dict['nodes'][name]):
del self.obj_dict['nodes'][name][index]
return True
else:
del self.obj_dict['nodes'][name]
return True
return False
def get_node(self, name):
"""Retrieve a node from the graph.
Given a node's name the corresponding Node
instance will be returned.
If one or more nodes exist with that name a list of
Node instances is returned.
An empty list is returned otherwise.
"""
match = list()
if self.obj_dict['nodes'].has_key(name):
match.extend( [ Node( obj_dict = obj_dict ) for obj_dict in self.obj_dict['nodes'][name] ])
return match
def get_nodes(self):
"""Get the list of Node instances."""
return self.get_node_list()
def get_node_list(self):
"""Get the list of Node instances.
This method returns the list of Node instances
composing the graph.
"""
node_objs = list()
for node, obj_dict_list in self.obj_dict['nodes'].iteritems():
node_objs.extend( [ Node( obj_dict = obj_d ) for obj_d in obj_dict_list ] )
return node_objs
def add_edge(self, graph_edge):
"""Adds an edge object to the graph.
It takes a edge object as its only argument and returns
None.
"""
if not isinstance(graph_edge, Edge):
raise TypeError('add_edge() received a non edge class object: ' + str(graph_edge))
edge_points = ( graph_edge.get_source(), graph_edge.get_destination() )
if self.obj_dict['edges'].has_key(edge_points):
edge_list = self.obj_dict['edges'][edge_points]
edge_list.append(graph_edge.obj_dict)
else:
self.obj_dict['edges'][edge_points] = [ graph_edge.obj_dict ]
graph_edge.set_sequence( self.get_next_sequence_number() )
graph_edge.set_parent_graph( self.get_parent_graph() )
def del_edge(self, src_or_list, dst=None, index=None):
"""Delete an edge from the graph.
Given an edge's (source, destination) node names all
matching edges(s) will be deleted if 'index' is not
specified or set to None.
If there are several matching edges and 'index' is
given, only the edge in that position will be deleted.
'index' should be an integer specifying the position
of the edge to delete. If index is larger than the
number of matching edges, no action is taken.
If edges are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance( src_or_list, (list, tuple)):
if dst is not None and isinstance(dst, (int, long)):
index = dst
src, dst = src_or_list
else:
src, dst = src_or_list, dst
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
if self.obj_dict['edges'].has_key( (src, dst) ):
if index is not None and index < len(self.obj_dict['edges'][(src, dst)]):
del self.obj_dict['edges'][(src, dst)][index]
return True
else:
del self.obj_dict['edges'][(src, dst)]
return True
return False
def get_edge(self, src_or_list, dst=None):
"""Retrieved an edge from the graph.
Given an edge's source and destination the corresponding
Edge instance(s) will be returned.
If one or more edges exist with that source and destination
a list of Edge instances is returned.
An empty list is returned otherwise.
"""
if isinstance( src_or_list, (list, tuple)) and dst is None:
edge_points = tuple(src_or_list)
edge_points_reverse = (edge_points[1], edge_points[0])
else:
edge_points = (src_or_list, dst)
edge_points_reverse = (dst, src_or_list)
match = list()
if self.obj_dict['edges'].has_key( edge_points ) or (
self.get_top_graph_type() == 'graph' and self.obj_dict['edges'].has_key( edge_points_reverse )):
edges_obj_dict = self.obj_dict['edges'].get(
edge_points,
self.obj_dict['edges'].get( edge_points_reverse, None ))
for edge_obj_dict in edges_obj_dict:
match.append( Edge( edge_points[0], edge_points[1], obj_dict = edge_obj_dict ) )
return match
def get_edges(self):
return self.get_edge_list()
def get_edge_list(self):
"""Get the list of Edge instances.
This method returns the list of Edge instances
composing the graph.
"""
edge_objs = list()
for edge, obj_dict_list in self.obj_dict['edges'].iteritems():
edge_objs.extend( [ Edge( obj_dict = obj_d ) for obj_d in obj_dict_list ] )
return edge_objs
def add_subgraph(self, sgraph):
"""Adds an subgraph object to the graph.
It takes a subgraph object as its only argument and returns
None.
"""
if not isinstance(sgraph, Subgraph) and not isinstance(sgraph, Cluster):
raise TypeError('add_subgraph() received a non subgraph class object:' + str(sgraph))
if self.obj_dict['subgraphs'].has_key(sgraph.get_name()):
sgraph_list = self.obj_dict['subgraphs'][ sgraph.get_name() ]
sgraph_list.append( sgraph.obj_dict )
else:
self.obj_dict['subgraphs'][ sgraph.get_name() ] = [ sgraph.obj_dict ]
sgraph.set_sequence( self.get_next_sequence_number() )
sgraph.set_parent_graph( self.get_parent_graph() )
def get_subgraph(self, name):
"""Retrieved a subgraph from the graph.
Given a subgraph's name the corresponding
Subgraph instance will be returned.
If one or more subgraphs exist with the same name, a list of
Subgraph instances is returned.
An empty list is returned otherwise.
"""
match = list()
if self.obj_dict['subgraphs'].has_key( name ):
sgraphs_obj_dict = self.obj_dict['subgraphs'].get( name )
for obj_dict_list in sgraphs_obj_dict:
#match.extend( Subgraph( obj_dict = obj_d ) for obj_d in obj_dict_list )
match.append( Subgraph( obj_dict = obj_dict_list ) )
return match
def get_subgraphs(self):
return self.get_subgraph_list()
def get_subgraph_list(self):
"""Get the list of Subgraph instances.
This method returns the list of Subgraph instances
in the graph.
"""
sgraph_objs = list()
for sgraph, obj_dict_list in self.obj_dict['subgraphs'].iteritems():
sgraph_objs.extend( [ Subgraph( obj_dict = obj_d ) for obj_d in obj_dict_list ] )
return sgraph_objs
def set_parent_graph(self, parent_graph):
self.obj_dict['parent_graph'] = parent_graph
for obj_list in self.obj_dict['nodes'].itervalues():
for obj in obj_list:
obj['parent_graph'] = parent_graph
for obj_list in self.obj_dict['edges'].itervalues():
for obj in obj_list:
obj['parent_graph'] = parent_graph
for obj_list in self.obj_dict['subgraphs'].itervalues():
for obj in obj_list:
Graph(obj_dict=obj).set_parent_graph(parent_graph)
def to_string(self):
"""Returns a string representation of the graph in dot language.
It will return the graph and all its subelements in string from.
"""
graph = list()
if self.obj_dict.get('strict', None) is not None:
if self==self.get_parent_graph() and self.obj_dict['strict']:
graph.append('strict ')
if self.obj_dict['name'] == '':
if 'show_keyword' in self.obj_dict and self.obj_dict['show_keyword']:
graph.append( 'subgraph {\n' )
else:
graph.append( '{\n' )
else:
graph.append( '%s %s {\n' % (self.obj_dict['type'], self.obj_dict['name']) )
for attr in self.obj_dict['attributes'].iterkeys():
if self.obj_dict['attributes'].get(attr, None) is not None:
val = self.obj_dict['attributes'].get(attr)
if val is not None:
graph.append( '%s=%s' % (attr, quote_if_necessary(val)) )
else:
graph.append( attr )
graph.append( ';\n' )
edges_done = set()
edge_obj_dicts = list()
for e in self.obj_dict['edges'].itervalues():
edge_obj_dicts.extend(e)
if edge_obj_dicts:
edge_src_set, edge_dst_set = zip( *[obj['points'] for obj in edge_obj_dicts] )
edge_src_set, edge_dst_set = set(edge_src_set), set(edge_dst_set)
else:
edge_src_set, edge_dst_set = set(), set()
node_obj_dicts = list()
for e in self.obj_dict['nodes'].itervalues():
node_obj_dicts.extend(e)
sgraph_obj_dicts = list()
for sg in self.obj_dict['subgraphs'].itervalues():
sgraph_obj_dicts.extend(sg)
obj_list = [ (obj['sequence'], obj) for obj in (edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts) ]
obj_list.sort()
for idx, obj in obj_list:
if obj['type'] == 'node':
node = Node(obj_dict=obj)
if self.obj_dict.get('suppress_disconnected', False):
if (node.get_name() not in edge_src_set and
node.get_name() not in edge_dst_set):
continue
graph.append( node.to_string()+'\n' )
elif obj['type'] == 'edge':
edge = Edge(obj_dict=obj)
if self.obj_dict.get('simplify', False) and edge in edges_done:
continue
graph.append( edge.to_string() + '\n' )
edges_done.add(edge)
else:
sgraph = Subgraph(obj_dict=obj)
graph.append( sgraph.to_string()+'\n' )
graph.append( '}\n' )
return ''.join(graph)
class Subgraph(Graph):
"""Class representing a subgraph in Graphviz's dot language.
This class implements the methods to work on a representation
of a subgraph in Graphviz's dot language.
subgraph(graph_name='subG', suppress_disconnected=False, attribute=value, ...)
graph_name:
the subgraph's name
suppress_disconnected:
defaults to false, which will remove from the
subgraph any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Subgraph.obj_dict['attributes'][attribute name], i.e.
subgraph_instance.obj_dict['attributes']['label']
subgraph_instance.obj_dict['attributes']['fontname']
"""
# RMF: subgraph should have all the attributes of graph so it can be passed
# as a graph to all methods
#
def __init__(self, graph_name='', obj_dict=None, suppress_disconnected=False,
simplify=False, **attrs):
Graph.__init__(self, graph_name=graph_name, obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected, simplify=simplify, **attrs)
if obj_dict is None:
self.obj_dict['type'] = 'subgraph'
class Cluster(Graph):
"""Class representing a cluster in Graphviz's dot language.
This class implements the methods to work on a representation
of a cluster in Graphviz's dot language.
cluster(graph_name='subG', suppress_disconnected=False, attribute=value, ...)
graph_name:
the cluster's name (the string 'cluster' will be always prepended)
suppress_disconnected:
defaults to false, which will remove from the
cluster any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_color, set_fontname
or using the instance's attributes:
Cluster.obj_dict['attributes'][attribute name], i.e.
cluster_instance.obj_dict['attributes']['label']
cluster_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, graph_name='subG', obj_dict=None, suppress_disconnected=False,
simplify=False, **attrs):
Graph.__init__(self, graph_name=graph_name, obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected, simplify=simplify, **attrs)
if obj_dict is None:
self.obj_dict['type'] = 'subgraph'
self.obj_dict['name'] = 'cluster_'+graph_name
self.create_attribute_methods(CLUSTER_ATTRIBUTES)
class Dot(Graph):
"""A container for handling a dot language file.
This class implements methods to write and process
a dot language file. It is a derived class of
the base class 'Graph'.
"""
def __init__(self, *argsl, **argsd):
Graph.__init__(self, *argsl, **argsd)
self.shape_files = list()
self.progs = None
self.formats = ['canon', 'cmap', 'cmapx', 'cmapx_np', 'dia', 'dot',
'fig', 'gd', 'gd2', 'gif', 'hpgl', 'imap', 'imap_np', 'ismap',
'jpe', 'jpeg', 'jpg', 'mif', 'mp', 'pcl', 'pdf', 'pic', 'plain',
'plain-ext', 'png', 'ps', 'ps2', 'svg', 'svgz', 'vml', 'vmlz',
'vrml', 'vtx', 'wbmp', 'xdot', 'xlib' ]
self.prog = 'dot'
# Automatically creates all the methods enabling the creation
# of output in any of the supported formats.
for frmt in self.formats:
self.__setattr__(
'create_'+frmt,
lambda f=frmt, prog=self.prog : self.create(format=f, prog=prog))
f = self.__dict__['create_'+frmt]
f.__doc__ = '''Refer to the docstring accompanying the 'create' method for more information.'''
for frmt in self.formats+['raw']:
self.__setattr__(
'write_'+frmt,
lambda path, f=frmt, prog=self.prog : self.write(path, format=f, prog=prog))
f = self.__dict__['write_'+frmt]
f.__doc__ = '''Refer to the docstring accompanying the 'write' method for more information.'''
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def set_shape_files(self, file_paths):
"""Add the paths of the required image files.
If the graph needs graphic objects to be used as shapes or otherwise
those need to be in the same folder as the graph is going to be rendered
from. Alternatively the absolute path to the files can be specified when
including the graphics in the graph.
The files in the location pointed to by the path(s) specified as arguments
to this method will be copied to the same temporary location where the
graph is going to be rendered.
"""
if isinstance( file_paths, basestring ):
self.shape_files.append( file_paths )
if isinstance( file_paths, (list, tuple) ):
self.shape_files.extend( file_paths )
def set_prog(self, prog):
"""Sets the default program.
Sets the default program in charge of processing
the dot file into a graph.
"""
self.prog = prog
def set_graphviz_executables(self, paths):
"""This method allows to manually specify the location of the GraphViz executables.
The argument to this method should be a dictionary where the keys are as follows:
{'dot': '', 'twopi': '', 'neato': '', 'circo': '', 'fdp': ''}
and the values are the paths to the corresponding executable, including the name
of the executable itself.
"""
self.progs = paths
def write(self, path, prog=None, format='raw'):
"""Writes a graph to a file.
Given a filename 'path' it will open/create and truncate
such file and write on it a representation of the graph
defined by the dot object and in the format specified by
'format'.
The format 'raw' is used to dump the string representation
of the Dot object, without further processing.
The output can be processed by any of graphviz tools, defined
in 'prog', which defaults to 'dot'
Returns True or False according to the success of the write
operation.
There's also the preferred possibility of using:
write_'format'(path, prog='program')
which are automatically defined for all the supported formats.
[write_ps(), write_gif(), write_dia(), ...]
"""
if prog is None:
prog = self.prog
dot_fd = file(path, "w+b")
if format == 'raw':
data = self.to_string()
if isinstance(data, basestring):
if not isinstance(data, unicode):
try:
data = unicode(data, 'utf-8')
except:
pass
try:
data = data.encode('utf-8')
except:
pass
dot_fd.write(data)
else:
dot_fd.write(self.create(prog, format))
dot_fd.close()
return True
def create(self, prog=None, format='ps'):
"""Creates and returns a Postscript representation of the graph.
create will write the graph to a temporary dot file and process
it with the program given by 'prog' (which defaults to 'twopi'),
reading the Postscript output and returning it as a string is the
operation is successful.
On failure None is returned.
There's also the preferred possibility of using:
create_'format'(prog='program')
which are automatically defined for all the supported formats.
[create_ps(), create_gif(), create_dia(), ...]
If 'prog' is a list instead of a string the fist item is expected
to be the program name, followed by any optional command-line
arguments for it:
[ 'twopi', '-Tdot', '-s10' ]
"""
if prog is None:
prog = self.prog
if isinstance(prog, (list, tuple)):
prog, args = prog[0], prog[1:]
else:
args = []
if self.progs is None:
self.progs = find_graphviz()
if self.progs is None:
raise InvocationException(
'GraphViz\'s executables not found' )
if not self.progs.has_key(prog):
raise InvocationException(
'GraphViz\'s executable "%s" not found' % prog )
if not os.path.exists( self.progs[prog] ) or not os.path.isfile( self.progs[prog] ):
raise InvocationException(
'GraphViz\'s executable "%s" is not a file or doesn\'t exist' % self.progs[prog] )
tmp_fd, tmp_name = tempfile.mkstemp()
os.close(tmp_fd)
self.write(tmp_name)
tmp_dir = os.path.dirname(tmp_name )
# For each of the image files...
#
for img in self.shape_files:
# Get its data
#
f = file(img, 'rb')
f_data = f.read()
f.close()
# And copy it under a file with the same name in the temporary directory
#
f = file( os.path.join( tmp_dir, os.path.basename(img) ), 'wb' )
f.write(f_data)
f.close()
cmdline = [self.progs[prog], '-T'+format, tmp_name] + args
p = subprocess.Popen(
cmdline,
cwd=tmp_dir,
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stderr = p.stderr
stdout = p.stdout
stdout_output = list()
while True:
data = stdout.read()
if not data:
break
stdout_output.append(data)
stdout.close()
stdout_output = ''.join(stdout_output)
if not stderr.closed:
stderr_output = list()
while True:
data = stderr.read()
if not data:
break
stderr_output.append(data)
stderr.close()
if stderr_output:
stderr_output = ''.join(stderr_output)
#pid, status = os.waitpid(p.pid, 0)
status = p.wait()
if status != 0 :
raise InvocationException(
'Program terminated with status: %d. stderr follows: %s' % (
status, stderr_output) )
elif stderr_output:
print stderr_output
# For each of the image files...
#
for img in self.shape_files:
# remove it
#
os.unlink( os.path.join( tmp_dir, os.path.basename(img) ) )
os.unlink(tmp_name)
return stdout_output
|
DeepBind-master
|
code/libs/deepity/deepity/_io_/pydot.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from load import load,deferred_load
from write_svg import write_svg
|
DeepBind-master
|
code/libs/deepity/deepity/_io_/__init__.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from . import pydot
##########################################################################
def write_svg(filename,root_node):
nodes = []
edges = []
_collect_dot_graph(root_node,nodes,edges)
g = pydot.Dot(splines="ortho")
g_nodes = [pydot.Node(name=node.__class__.__name__,
shape="box")
for node in nodes]
for n in g_nodes:
g.add_node(n)
for tail,head in edges:
g_tail = g_nodes[nodes.index(tail.node)]
g_head = g_nodes[nodes.index(head.node)]
g.add_edge(pydot.Edge(g_tail,g_head,
taillabel=tail.name,
headlabel=head.name,
sametail=tail.name,
samehead=head.name,
fontname="courier",
fontsize=10,
arrowsize=0.4,
dir="both",
arrowtail="box",
arrowhead="obox"))
g.write_svg(filename)
def _collect_dot_graph(node,nodes,edges):
if node in nodes:
return
nodes.append(node)
for head in node.iplugs:
for tail in head.srcs:
_collect_dot_graph(src.node,nodes,edges)
for tail in node.oplugs:
for head in tail.dsts:
edge = (tail,head)
if edge not in edges:
edges.append(edge)
_collect_dot_graph(dst.node,nodes,edges)
|
DeepBind-master
|
code/libs/deepity/deepity/_io_/write_svg.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from ..node import node
from ..trainer import trainer
from .. import std
import numpy
import re
def _hpsearch_not_found_error(self,i):
raise ImportError("Cannot create hpsearch object; the hpsearch module could not be imported.")
def _all_subclasses(basetype):
subtypes = basetype.__subclasses__()
subtypes += sum([_all_subclasses(st) for st in subtypes],[])
return subtypes
def convert_cfg_to_instance(cfg, cfg_globals):
# hack that totally breaks any last semblance of 'modularity' but nonetheless makes config files simpler-looking
if not isinstance(cfg, list):
return cfg
is_after_pooling = False
for i in range(len(cfg)):
if type(cfg[i]).__name__ in ("allpool", "maxpool", "avgpool"):
is_after_pooling = True
if type(cfg[i]).__name__ in ("full"):
combiner_layer = i
break
convnet = std.chain(cfg[:combiner_layer], name="seq")
combiner = cfg[combiner_layer]
outputnet = std.chain(cfg[combiner_layer+1:])
return cfg_globals["sequencenet"]([convnet], outputnet,
combiner_size = combiner.size,
combiner_decay = combiner.decay,
combiner_init = combiner.init,
)
##########################################################################
def load(filename, *args):
assert type(filename)==str, "Expected a string filename."
# Set up the namespace for config files, including all possible
# node types, and (if available) all possible hyperparameter types
load_cfg_locals = {}
load_cfg_globals = {}
load_cfg_globals.update({ ntype.__name__ : ntype for ntype in _all_subclasses(node)})
load_cfg_globals.update({ ttype.__name__ : ttype for ttype in _all_subclasses(trainer)})
load_cfg_globals.update({'numpy' : numpy })
try:
from .. import hpsearch
hparam_types = { name : getattr(hpsearch,name) for name in dir(hpsearch)
if type(getattr(hpsearch,name)) == type and issubclass(getattr(hpsearch,name),hpsearch.paramdef) }
load_cfg_globals.update(hparam_types)
except:
load_cfg_globals.update({ name : _hpsearch_not_found_error for name in ("choice","uniform","loguniform")})
try:
with open(filename) as f:
cfg_code = f.read()
# Convert "return model" to "__result=model"
retmatch = re.search("^return", cfg_code, re.MULTILINE)
if retmatch:
cfg_code = cfg_code[:retmatch.start(0)] + "__result=" + cfg_code[retmatch.end(0):]
# Execute the config file as a python script
exec cfg_code in load_cfg_globals,load_cfg_locals
# Extract the return value, either through a single __result or through specified 'args'
if "__result" in load_cfg_locals:
cfg_inst = load_cfg_locals["__result"] # If there was a return statement, return that object
elif len(args) > 0:
cfg_inst = (load_cfg_locals[arg] for arg in args)
else:
cfg_inst = load_cfg_locals # Otherwise, return the complete dictionary of locals that were created
return convert_cfg_to_instance(cfg_inst, load_cfg_globals)
except Exception as err:
print "ERROR while parsing config file \"%s\"." % filename
raise
class deferred_load(object):
"""
A deferred_load object stores a filename and, at some future point,
will load the object from the file. If the file defines multiple objects,
e.g. a "model" and a "trainer", then use objectname to specify which object
should be constructed.
"""
def __init__(self,filename,objectname=None):
self.filename = filename
self.objectname = objectname
def create(self):
return load(self.filename)[self.objectname]
|
DeepBind-master
|
code/libs/deepity/deepity/_io_/load.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import numpy as np
import argparse
from smat import *
parser = argparse.ArgumentParser(description="Train a 784-600-400-10 neural net on MNIST and print out the error rates.")
parser.add_argument("--activation",type=str,dest="activation",metavar="[logistic|tanh|relu]",default="relu",help="Activation function to use. Default is relu.")
parser.add_argument("--device",type=int,default=None,help="Device # to use (which GPU). Default is 0.")
parser.add_argument("--f64",action="store_true",default=False,help="Use float64, if supported by the GPU. Default is float32.")
args = parser.parse_args()
if args.activation == "logistic":
def func(A): return 1./(1+exp(-A)) # logistic sigmoid activation function, returns Z=logistic(A)
def dfunc(Z): return Z-Z**2 # derivative d/dx(logistic)(x) = logistic(x)-logistic(x)^2
elif args.activation == "tanh":
def func(A): return tanh(A) # tanh sigmoid activation function, returns Z=tanh(A)
def dfunc(Z): return 1-Z**2 # derivative d/dx(tanh)(x) = 1-tanh(x)^2
elif args.activation == "relu":
def func(A): return maximum(0, A) # 'rectified linear' (relu) activation function, returns Z=max(0,A)
def dfunc(Z): return sign(Z) # derivative d/dx(relu)(x) = sign(max(0,x))
else:
quit("Unrecognized activation function \"%s\"." % args.activation)
if args.device is not None:
set_backend_options(device=args.device)
dt = float64 if args.f64 else float32
set_default_dtype(dt)
print "Using device", get_backend_info().device
##############################################################################
# Functions for loading DATA
##############################################################################
# Load a data as of pairs (X,Y) where:
# X is a (batchsize x inputsize) matrix of inputs
# Y is a (batchsize x outputsize) matrix of corresponding targets
# MNIST has 60000 training examples total.
with np.load("data/mnist/mnist_train.npz") as mnist_file:
Xtrain = asarray(mnist_file['X'], dtype=dt)/255*2-1 # Load 60000 x 784 matrix of training inputs, scaled to range [-1,1]
Ytrain = asarray(mnist_file['Y'], dtype=dt) # Load 60000 x 10 matrix of training targets
with np.load("data/mnist/mnist_test.npz") as mnist_file:
Xtest = asarray(mnist_file['X'], dtype=dt)/255*2-1 # Load 10000 x 784 matrix of testing inputs, scaled to range [-1,1]
Ytest = asarray(mnist_file['Y'], dtype=dt) # Load 10000 x 10 matrix of testing targets
##############################################################################
# Script for TRAINING
##############################################################################
# Generate minibatches out of the full dataset.
trainsize = Xtrain.shape[0]
batchsize = 150 ; assert trainsize % batchsize == 0 # Make sure we use all training examples
batches = [(Xtrain[i:i+batchsize],Ytrain[i:i+batchsize]) for i in range(0,trainsize,batchsize)]
# Size of each neural net layer
inputsize = 28*28 # MNIST dataset is 28x28 pixel images, so 784 inputs
layersize1 = 600 # Number of neurons in first layer (filters)
layersize2 = 400 # Number of neurons in second layer
outputsize = 10 # 10 classes representing digits 0..9
# Parameters of the network
def randweights(n, m):
return rand(n, m)*0.002-0.001 # initialize to small values [-0.001,0.001]
W1 = randweights(inputsize, layersize1); b1 = randweights(1, layersize1)
W2 = randweights(layersize1, layersize2); b2 = randweights(1, layersize2)
W3 = randweights(layersize2, outputsize); b3 = randweights(1, outputsize)
# Evaluate our 3-layer neural network using weights W1,W2,W3 above.
# Returns final outputs and, if targets Y are given, returns gradients as well.
def nnet_eval(X, Y=None):
global W1,W2,W3,b1,b2,b3
# Forward propagate minibatch inputs X to generate predictions Z3
Z1 = func(dot( X, W1) + b1); # Z1 = outputs for layer 1
Z2 = func(dot(Z1, W2) + b2); # Z2 = outputs for layer 2
Z3 = softmax(dot(Z2, W3) + b3); # Z3 = outputs for layer 3 (final output)
if Y is None:
return Z3 # If no gradient requested, just return the predictions
# Backward propagate error between Z3 and targets Y
D3 = (Z3-Y)/batchsize # Backprop prediction error as delta to layer 3
D2 = dfunc(Z2)*dot_nt(D3, W3) # Backprop layer 3 deltas to layer 2
D1 = dfunc(Z1)*dot_nt(D2, W2) # Backprop layer 2 deltas to layer 1
# Compute gradient of training error w.r.t. network weights
dW3 = dot_tn(Z2, D3); db3 = sum(D3, axis=0) # Gradient w.r.t. W3
dW2 = dot_tn(Z1, D2); db2 = sum(D2, axis=0) # Gradient w.r.t. W2
dW1 = dot_tn( X, D1); db1 = sum(D1, axis=0) # Gradient w.r.t. W1
return Z3,dW1,dW2,dW3,db1,db2,db3 # Return predictions and gradients
##############################################################################
# Functions for PRINTING
##############################################################################
def error_rate(X, Y):
Z = nnet_eval(X).asnumpy()
num_errors = np.sum( Z[np.where(Y.asnumpy()==1)] != Z.max(axis=1) )
return 100.*num_errors/X.shape[0]
def print_status(epoch=None):
update_interval = 10
if epoch is None or (epoch+1) % update_interval == 0: # Only print status every 5 epochs.
time_per_epoch = toc() / update_interval
train_error = error_rate(Xtrain, Ytrain)
test_error = error_rate(Xtest, Ytest)
status_msg = "start" if epoch is None else ("epoch[%d]"% (epoch+1))
time_msg = "(%.2fs/epoch)" % time_per_epoch if epoch is not None else ""
print "%s: %.2f%% train err, %.2f%% test err %s" % (status_msg, train_error, test_error, time_msg)
tic()
tic()
print_status()
##############################################################################
# TRAINING LOOP
##############################################################################
# Parameters of SGD training
num_epoch = 50
learn_rate = 0.02
momentum = 0.90
# Current momentum of the weights
mW1 = zeros_like(W1); mb1 = zeros_like(b1)
mW2 = zeros_like(W2); mb2 = zeros_like(b2)
mW3 = zeros_like(W3); mb3 = zeros_like(b3)
tic("training time")
# Start training!
for epoch in range(num_epoch):
for X,Y in batches:
# Generate predictions Z, along with per-layer gradient based on targets Y
Z,dW1,dW2,dW3,db1,db2,db3 = nnet_eval(X, Y)
# Gradient step with very basic momentum
for P,dP,mP in zip(( W1, W2, W3, b1, b2, b3),
(dW1, dW2, dW3, db1, db2, db3),
(mW1, mW2, mW3, mb1, mb2, mb3)):
dP *= -learn_rate
mP *= momentum
mP += dP
P += mP
# Print current classification error on training data
print_status(epoch)
print "Total training time = %.1fs" % toc("training time")
|
DeepBind-master
|
code/libs/smat/py/demo_nnet.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import numpy
import numpy.random
import smat
import smat.util
import argparse
import scipy.optimize
parser = argparse.ArgumentParser(description="Train a 784-1000-1000-10 neural net on MNIST and print out the error rates.")
parser.add_argument("-d","--device",type=int,default=None,help="The device to use, e.g. CUDA device.")
parser.add_argument("-m","--method",type=str,default="L-BFGS-B",help="The optimization algorithm to use. Valid values are COBYLA and L-BFGS-B.")
args = parser.parse_args()
if args.device is not None:
smat.set_backend_options(device=args.device)
print "Using device",smat.get_backend_info().device
print "Using method",args.method,"with float64"
# Load some sample bio data. Specifically this is a subset of the
# RNAcompete protein binding affinities from Ray et al., Nature, 2013.
y = numpy.load('data/rnac/rnac_subset.npz')['y']
n,m = y.shape
def objective_function(x,y,lib):
# The test objective function below happens to be that corresponding to
# "Variance Stabilization" (Huber et al., Bioinformatics, 2002).
# The specific objective is not important.
# The point is that the parameters can be sent to the GPU,
# evaluated, pulled back, and STILL be much faster than CPU.
# Shorthand for some functions that we're getting from lib=smat/numpy
asarray,arcsinh,sqrt,mean,log,sum = lib.asarray,lib.arcsinh,lib.sqrt,lib.mean,lib.log,lib.sum
# Push coefficients to GPU and get separate views to 'a' and 'b'
a,b = asarray(x).reshape((2,-1))
# Calculate h(y) and h'(y); see Huber et al., equation (6)
y = a+y*b
h = arcsinh(y)
hprime = b/sqrt(y**2+1)
# Calculate negative log-likelihood of current variance distribution; see Huber et al., equation (13)
hmean = mean(h,axis=1).reshape((-1,1))
term1 = log(sum((h-hmean)**2))
term2 = sum(log(hprime))
variance_nll = (.5*n*m)*term1 - term2
# Pull final objective value back from GPU
return float(variance_nll)
def run_minimize(y,method,lib):
print "\nOptimizing with %s..." % lib.__name__
# Push y to GPU ahead of time, in the case of smat
y = lib.asarray(y)
# Set up initial parameter vector x=[a;b]
a = numpy.zeros((1,m))
b = numpy.ones((1,m))
x = numpy.vstack([a,b]).ravel()
# Set up bounds for vector a (unbounded) and vector b (positive)
bounds = [(None,None) for i in range(m)] + [(1e-5,None) for i in range(m)]
# Call scipy to do the optimization
if method == "COBYLA": maxiter = 1000
elif method == "L-BFGS-B": maxiter = 5
else: quit("Unsupported \"method\".")
time = 0
print " iter 0: objective = %.1f at start" % (objective_function(x,y,lib))
for t in range(5):
smat.util.tic()
x = scipy.optimize.minimize(objective_function,x,args=(y,lib),bounds=bounds,method=method,options={'maxiter':maxiter},tol=1e-20).x
time += smat.util.toc()
print " iter %3d: objective = %.1f, time elapsed = %.1fs" % ((t+1)*maxiter,objective_function(x,y,lib),time)
run_minimize(y,args.method,smat)
run_minimize(y,args.method,numpy)
|
DeepBind-master
|
code/libs/smat/py/demo_minimize.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import argparse
import smat
import smat.tests
smat.set_backend_options(device=0)
parser = argparse.ArgumentParser(description="Run the smat unit tests and/or performance tests.")
parser.add_argument("-p","--perf",action="store_true",default=False,help="Run performance tests instead of unit tests.")
parser.add_argument("-d","--device",type=int,default=None,help="The device to use, e.g. CUDA device.")
parser.add_argument("-b","--backend",type=str,default=None,help="The backend to use. Currently only \"cuda\" is supported.")
args = parser.parse_args()
if args.backend is not None:
smat.set_backend(args.backend)
if args.device is not None:
smat.set_backend_options(device=args.device)
print smat.get_backend_info()
print smat.get_heap_status()
if args.perf:
smat.tests.perftest()
else:
smat.tests.unittest()
|
DeepBind-master
|
code/libs/smat/py/run_tests.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import sys, os
import numpy as np
import argparse
from smat import *
#os.environ["PYTHONUNBUFFERED"] = "1" # Disable output buffering
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
parser = argparse.ArgumentParser(description="Train a convolutional neural net on MNIST and print out the error rates.")
parser.add_argument("--device",type=int,default=None,help="Device # to use (which GPU). Default is 0.")
parser.add_argument("--show_filters",action="store_true",default=False,help="Plot the filters after each update, showing in a popup window.")
parser.add_argument("--f64",action="store_true",default=False,help="Use float64, if supported by the GPU. Default is float32.")
args = parser.parse_args()
if args.device is not None:
set_backend_options(device=args.device)
dt = float64 if args.f64 else float32
set_default_dtype(dt)
print "Using device", get_backend_info().device
print "Checking cuDNN ...",
try: cudnn_dll()
except: quit("Failed to load cuDNN shared library. Quitting.");
print "OK."
##############################################################################
# Functions for loading DATA
##############################################################################
# Load a data as of pairs (X,Y) where:
# X is a (batchsize x inputsize) matrix of inputs
# Y is a (batchsize x outputsize) matrix of corresponding targets
# MNIST has 60000 training examples total.
with np.load("data/mnist/mnist_train.npz") as mnist_file:
inputs_train = asarray(mnist_file['X'], dtype=dt)/255 # Load 60000 x 784 matrix of training inputs, scaled to range [0,1]
targets_train = asarray(mnist_file['Y'], dtype=dt) # Load 60000 x 10 matrix of training targets
with np.load("data/mnist/mnist_test.npz") as mnist_file:
inputs_test = asarray(mnist_file['X'], dtype=dt)/255 # Load 10000 x 784 matrix of testing inputs, scaled to range [0,1]
targets_test = asarray(mnist_file['Y'], dtype=dt) # Load 10000 x 10 matrix of testing targets
# Generate minibatches out of the full dataset.
trainsize = len(inputs_train)
testsize = len(inputs_test)
batchsize = 100 ; assert trainsize % batchsize == 0 # Make sure we use all training examples
batches_train = [(inputs_train[i:i+batchsize], targets_train[i:i+batchsize]) for i in range(0, trainsize, batchsize)]
batches_test = [(inputs_test [i:i+batchsize], targets_test [i:i+batchsize]) for i in range(0, testsize, batchsize)]
##############################################################################
# CONVNET CONFIGURATION
##############################################################################
# Configuration of neural net layers (number of filters, hidden units, etc)
input_w = 28 # Width of MNIST image
input_h = 28 # Height of MNIST image
input_c = 1 # Number of input channels for MNIST
# Layer 1 is convolution, so call it C1
C1_filter_c = 32 # Number of filters in C1
C1_filter_w = 5 # Width of filters in C1
C1_filter_h = 5 # Height of filters in C1
C1_stride = 1 # Stride of C1
C1_w = (input_w-C1_filter_w)//C1_stride + 1 # Width of C1 output featuremap
C1_h = (input_h-C1_filter_h)//C1_stride + 1 # Height of C1 output featuremap
# Layer 2 is pooling, so call it P1
P1_mode = "max" # Pooling type ("max" or "avg")
P1_window_w = 3 # Width of pooling windows in P1
P1_window_h = 3 # Height of pooling windows in P1
P1_stride = 2 # Stride of P1
P1_w = (C1_w-P1_window_w)//P1_stride + 1 # Width of P1 output featuremap
P1_h = (C1_h-P1_window_h)//P1_stride + 1 # Height of P1 output featuremap
# Layer 3 is fully connected, so call it F1
F1_size = 1000 # Number of neurons in F1
F1_dropout_rate = 0.5 # Dropout rate for F1
# Layer 4 is fully connected softmax, so call it F2
F2_size = 10 # 10 classes representing digits 0..9
##############################################################################
# CONVNET PARAMETER ALLOCATION and INITIALIZATION
##############################################################################
# Count how many parameters there are in total for each later,
# so that we can allocate one big vector P for all parameters
num_params = { 'C1_weights' : C1_filter_c*C1_filter_w*C1_filter_h * (input_c),
'C1_bias' : C1_filter_c,
'F1_weights' : F1_size * (C1_filter_c*P1_w*P1_h),
'F1_bias' : F1_size,
'F2_weights' : F2_size * (F1_size),
'F2_bias' : F2_size }
num_params_total = sum(num_params.values())
P = zeros(num_params_total, dt)
P_grad = zeros_like(P)
# Slice the param vector P into parameters for each layer.
_ = 0 # temp counter
C1_weights = P[_:_+num_params['C1_weights']].reshape((C1_filter_c, input_c*C1_filter_h*C1_filter_w)); _ += C1_weights.size;
C1_bias = P[_:_+num_params['C1_bias' ]]; _ += C1_bias.size;
F1_weights = P[_:_+num_params['F1_weights']].reshape((C1_filter_c*P1_w*P1_h, F1_size)); _ += F1_weights.size;
F1_bias = P[_:_+num_params['F1_bias' ]].reshape((1, F1_size)); _ += F1_bias.size;
F2_weights = P[_:_+num_params['F2_weights']].reshape((F1_size, F2_size)); _ += F2_weights.size;
F2_bias = P[_:_+num_params['F2_bias' ]].reshape((1, F2_size)); _ += F2_bias.size;
assert _ == num_params_total
# Slice the gradient vector P_grad into parameters for each layer.
_ = 0
C1_weights_grad = P_grad[_:_+num_params['C1_weights']].reshape((C1_filter_c, input_c*C1_filter_h*C1_filter_w)); _ += C1_weights_grad.size;
C1_bias_grad = P_grad[_:_+num_params['C1_bias' ]]; _ += C1_bias_grad.size;
F1_weights_grad = P_grad[_:_+num_params['F1_weights']].reshape((C1_filter_c*P1_w*P1_h, F1_size)); _ += F1_weights_grad.size;
F1_bias_grad = P_grad[_:_+num_params['F1_bias' ]].reshape((1, F1_size)); _ += F1_bias_grad.size;
F2_weights_grad = P_grad[_:_+num_params['F2_weights']].reshape((F1_size, F2_size)); _ += F2_weights_grad.size;
F2_bias_grad = P_grad[_:_+num_params['F2_bias' ]].reshape((1, F2_size)); _ += F2_bias_grad.size;
assert _ == num_params_total
# Initialize parameters in P for each layer with a different random scale.
def set_rand(target, scale):
target.ravel()[:] = randn(target.size) * scale
set_rand(C1_weights, 0.01); C1_bias += 0.0001; # Initialize biases as small positive values
set_rand(F1_weights, 0.01); F1_bias += 0.0001;
set_rand(F2_weights, 0.01);
##############################################################################
# CONVNET FORWARDPROP / BACKPROP
##############################################################################
# Send input mini-batch through our convnet.
# Returns final outputs and, if targets are given, returns gradients as well.
def eval_convnet(inputs, targets=None):
# Network parameters stored in global variables, for simplicity of this demo
global C1_weights, C1_bias
global F1_weights, F1_bias
global F2_weights, F2_bias
# Gradients of parameters also stored in global variables, for simplicity
global C1_weights_grad, C1_bias_grad
global F1_weights_grad, F1_bias_grad
global F2_weights_grad, F2_bias_grad
# Forward propagate C1
C1_hidden = relu(conv2(inputs, input_w, input_h, C1_weights, C1_filter_w, C1_filter_h, bias=C1_bias, stride=C1_stride))
# Forward propagate P1
P1_hidden = pool2(P1_mode, C1_hidden, C1_w, C1_h, P1_window_w, P1_window_h, stride=P1_stride)
# Forward propagate F1
F1_hidden = relu(dot(P1_hidden, F1_weights) + F1_bias)
F1_hidden, F1_mask = dropout(F1_hidden, F1_dropout_rate, test_mode=(targets is None))
# Forward propagate F2
F2_hidden = softmax(dot(F1_hidden, F2_weights) + F2_bias)
# If no targets provided (no gradient requested), just return the predictions of final layer
if targets is None:
return F2_hidden
# Compute residuals
F2_delta = F2_hidden - targets
# Backward propagate F2_delta
F2_bias_grad[:] = sum(F2_delta, axis=0)
F2_weights_grad[:] = dot_tn(F1_hidden, F2_delta)
F1_delta = dot_nt(F2_delta, F2_weights)
# Backward propagate F1_delta
F1_delta = dropout_grad(F1_delta, F1_mask) # Backprop through dropout after F1 layer
F1_delta *= relu_grad(F1_hidden) # Backprop through relu after F1 layer
F1_bias_grad[:] = sum(F1_delta, axis=0)
F1_weights_grad[:] = dot_tn(P1_hidden, F1_delta)
P1_delta = dot_nt(F1_delta, F1_weights)
# Backward propagate P1_delta
C1_delta = pool2_grad(P1_mode, C1_hidden, C1_w, C1_h, P1_window_w, P1_window_h, P1_hidden, P1_delta, stride=P1_stride)
# Backward propagate C1_delta
C1_delta *= relu_grad(C1_hidden) # Backprop through relu after C1 layer
conv2_biasgrad(C1_bias, C1_delta, C1_bias_grad)
conv2_filtersgrad(inputs, input_w, input_h, C1_filter_w, C1_filter_h, C1_delta, C1_weights_grad, stride=C1_stride)
##############################################################################
# Functions for PRINTING
##############################################################################
def make_filter_grid(filter_weights, filter_w, filter_h, max_cols=8):
# Determine the range [-vmin, vmax] to map to [0, 255]
vmin = float(filter_weights.min())
vmax = float(filter_weights.max())
vmax, vmin = max(vmax, -vmin), min(-vmax, vmin)
# Scale all filters to range [0, 255] and reshape to a single (n, width, height) array
images = ((filter_weights.asnumpy() - vmin) / (vmax - vmin) * 255).astype(np.uint8).reshape((-1, filter_h, filter_w))
n = len(images)
# Create a big image to store the filters, then copy each filter into its slot in the grid
num_cols = min(n, max_cols)
num_rows = (n + num_cols - 1) // num_cols
grid = np.zeros((num_rows*(filter_h+1)+1, num_cols*(filter_w+1)+1), np.uint8)
for col in range(num_cols):
for row in range(num_rows):
i = row*num_cols + col
if i < len(images):
grid[1+row*(filter_h+1):(row+1)*(filter_h+1),
1+col*(filter_w+1):(col+1)*(filter_w+1)] = images[i]
return grid
def error_rate(batches):
predictions = np.vstack([eval_convnet(inputs).asnumpy() for inputs, targets in batches])
targets = np.vstack([targets.asnumpy() for inputs, targets in batches])
num_errors = np.sum( predictions[np.where(targets==1)] != predictions.max(axis=1) )
return 100.*num_errors/len(predictions)
filter_plot_img = None # Global variable to hold reference to the filter image currently being plotted in a pyplot window
def plot_filter_grid():
global filter_plot_img
import matplotlib.pyplot as plt
import matplotlib.cm as cm
filter_grid = make_filter_grid(C1_weights, C1_filter_w, C1_filter_h)
if filter_plot_img is None:
# The first time we plot the filters, use imshow and pop up a new window
filter_plot_img = plt.imshow(filter_grid, cmap=cm.Greys_r, interpolation="NEAREST")
plt.show(block=False)
else:
# When we want to just update the filters, replace the data and give pyplot event loop a chance to draw
filter_plot_img.set_data(filter_grid)
plt.pause(0.001)
def print_status(epoch=None):
update_interval = 5
if epoch is not None:
print ".",
if epoch is None or (epoch+1) % update_interval == 0: # Only print status every 5 epochs.
time_per_epoch = toc() / update_interval
train_error = error_rate(batches_train)
test_error = error_rate(batches_test)
status_msg = "start" if epoch is None else ("epoch[%d]"% (epoch+1))
time_msg = "(%.2fs/epoch)" % time_per_epoch if epoch is not None else ""
print "\n%s: %.2f%% train err, %.2f%% test err %s " % (status_msg, train_error, test_error, time_msg),
if args.show_filters:
plot_filter_grid()
tic()
tic()
print_status()
##############################################################################
# SGD TRAINING LOOP
##############################################################################
# Parameters of SGD training
num_epoch = 50
learn_rate = 0.02
momentum = 0.90
# Allocate array to store momentum of every parameter; updated during training
P_momentum = zeros_like(P)
tic("training time")
# Start training!
for epoch in range(num_epoch):
for inputs, targets in batches_train:
# Generate compute per-layer gradient based on targets
eval_convnet(inputs, targets)
# Gradient step with very basic momentum
P_grad *= -learn_rate/batchsize
P_momentum *= momentum
P_momentum += P_grad
P += P_momentum
# Print current classification error on training data
print_status(epoch)
print "\nTotal training time = %.1fs" % toc("training time")
|
DeepBind-master
|
code/libs/smat/py/demo_convnet.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import math,string
import numpy as np
import time
import __builtin__
#
# MATLAB-like tic/toc for convenience
#
_tics = {None: 0.0}
def tic(id=None):
global _tics
now = time.time()
_tics[id] = now
return now
def toc(id=None):
global _tics
now = time.time()
return now - _tics[id]
##################################################
_int2dtype = { 0 : np.bool,
1 : np.int8,
2 : np.uint8,
3 : np.int16,
4 : np.uint16,
5 : np.int32,
6 : np.uint32,
7 : np.int64,
8 : np.uint64,
9 : np.float32,
10: np.float64 }
_dtype2int = { np.bool : 0,
np.int8 : 1,
np.uint8 : 2,
np.int16 : 3,
np.uint16 : 4,
np.int32 : 5,
np.uint32 : 6,
np.int64 : 7,
np.uint64 : 8,
np.float32 : 9,
np.float64 : 10 }
_arg2dtype = { "bool" : np.bool, np.dtype('bool') : np.bool, np.bool : np.bool, __builtin__.bool : np.bool,
"int8" : np.int8, np.dtype('int8') : np.int8, np.int8 : np.int8, __builtin__.chr : np.int8,
"uint8" : np.uint8, np.dtype('uint8') : np.uint8, np.uint8 : np.uint8,
"int16" : np.int16, np.dtype('int16') : np.int16, np.int16 : np.int16,
"uint16" : np.uint16, np.dtype('uint16') : np.uint16, np.uint16 : np.uint16,
"int32" : np.int32, np.dtype('int32') : np.int32, np.int32 : np.int32, __builtin__.int : np.int32,
"uint32" : np.uint32, np.dtype('uint32') : np.uint32, np.uint32 : np.uint32,
"int64" : np.int64, np.dtype('int64') : np.int64, np.int64 : np.int64, __builtin__.long : np.int64,
"uint64" : np.uint64, np.dtype('uint64') : np.uint64, np.uint64 : np.uint64,
"float32": np.float32, np.dtype('float32'): np.float32, np.float32: np.float32,
"float64": np.float64, np.dtype('float64'): np.float64, np.float64: np.float64, __builtin__.float: np.float64 }
# copied from http://code.activestate.com/recipes/578323-human-readable-filememory-sizes-v2/
def format_bytecount(val,fmt=".2cM"):
""" define a size class to allow custom formatting
format specifiers supported :
em : formats the size as bits in IEC format i.e. 1024 bits (128 bytes) = 1Kib
eM : formats the size as Bytes in IEC format i.e. 1024 bytes = 1KiB
sm : formats the size as bits in SI format i.e. 1000 bits = 1kb
sM : formats the size as bytes in SI format i.e. 1000 bytes = 1KB
cm : format the size as bit in the common format i.e. 1024 bits (128 bytes) = 1Kb
cM : format the size as bytes in the common format i.e. 1024 bytes = 1KB
"""
if val == 0:
return "0"
# work out the scale, suffix and base
factor, suffix = (8, "b") if fmt[-1] in string.lowercase else (1,"B")
base = 1024 if fmt[-2] in ["e","c"] else 1000
# Add the i for the IEC format
suffix = "i"+ suffix if fmt[-2] == "e" else suffix
mult = ["","K","M","G","T","P"]
val = float(val) * factor
i = 0 if val < 1 else int(math.log(val, base))+1
v = val / math.pow(base,i)
v,i = (v,i) if v > 0.5 else (v*base,i-1)
# Identify if there is a width and extract it
width = "" if fmt.find(".") == -1 else fmt[:fmt.index(".")]
precis = fmt[:-2] if width == "" else fmt[fmt.index("."):-2]
# do the precision bit first, so width/alignment works with the suffix
t = ("{0:{1}f}"+mult[i]+suffix).format(v, precis)
return "{0:{1}}".format(t,width) if width != "" else t
|
DeepBind-master
|
code/libs/smat/py/smat/util.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from smat import *
#import util
|
DeepBind-master
|
code/libs/smat/py/smat/__init__.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from smat_dll import *
from ctypes import *
from exceptions import *
import numpy as np
import string,atexit,__builtin__
from copy import copy as _copy
from copy import deepcopy as _deepcopy
import cPickle as pickle
import util
import gc
bool = np.bool
int8 = np.int8
uint8 = np.uint8
int16 = np.int16
uint16 = np.uint16
int32 = np.int32
uint32 = np.uint32
int64 = np.int64
uint64 = np.uint64
float32= np.float32
float64= np.float64
index = int32 # should be same as c_index_t
uindex = uint32 # should be same as c_uindex_t
tic = util.tic
toc = util.toc
_int2dtype = util._int2dtype
_dtype2int = util._dtype2int
_arg2dtype = util._arg2dtype
def int2dtype(i):
return _int2dtype[i]
def dtype2int(dt):
if type(dt) == type(None):return -1
return _dtype2int[arg2dtype(dt)]
def dtype2str(dtype):
dtype = _int2dtype(dll.actual_dtype(_dtype2int[dtype]))
return dtype.__name__
def arg2dtype(s):
# Can't just test s == None due to some weird bug where "np.dtype('float64') == None" returns true
if type(s) == type(None): return _int2dtype[dll.api_get_default_dtype()]
return _arg2dtype[s]
_integral_types = (__builtin__.chr,__builtin__.int,__builtin__.long,int8,int16,int32,int64)
_py_max = __builtin__.max
_py_min = __builtin__.min
_py_sum = __builtin__.sum
_py_all = __builtin__.all
_py_any = __builtin__.any
_smat_exiting = False
_span = c_slice_t(0,c_slice_end)
_axis2int = { None: -1, 0:1, 1:0 } # Only x and y axes supported right now
##############################################################
class sarray(object):
'''
A numpy-like wrapper around a streaming-mode matrix (smat) object.
'''
def __init__(self,ptr):
self._ptr = ptr # Just wrap a pointer to the C++ smat instance.
self._attr = None # List of "extra" attributes that have been attached to this smat instance, if any
def __del__(self):
if dll != None and not _smat_exiting: # dll may have already been unloaded if exiting application
dll.api_delete(self._ptr)
@property
def size(self): return dll.api_size(self._ptr)
@property
def shape(self):
s = c_shape_t(0,0,0)
dll.api_shape(self._ptr,byref(s))
return (s.y,s.x)
@property
def nrow(self): return dll.api_nrow(self._ptr) # number of rows
@property
def ncol(self): return dll.api_ncol(self._ptr) # number of cols
@property
def ndim(self): return 2 # TODO: support variable dimension arrays
@property
def dtype(self): return np.dtype(_int2dtype[dll.api_dtype(self._ptr)])
@property
def T(self): return transpose(self)
def setattr(self,name,val):
if self._attr is None:
self._attr = set()
self._attr.add(name)
self.__setattr__(name,val)
def getattr(self,name,default=None):
return getattr(self,name) if self.hasattr(name) else default
def hasattr(self,name):
return self._attr is not None and name in self._attr
def clearattr(self):
if self._attr:
for attr in _copy(self._attr):
delattr(self,attr)
def copyattr(self,A,deep=False):
if A is self:
return
self.clearattr()
if A._attr:
for attr in A._attr:
val = A.getattr(attr)
if deep:
val = _deepcopy(val)
self.setattr(attr,val)
def copy(self): return _deepcopy(self)
def __delattr__(self,name):
object.__delattr__(self,name)
if self._attr is not None:
self._attr.remove(name)
# These provided for the standard library copy.copy() and copy.deepcopy() functions
def __deepcopy__(self,memo):
A = empty(self.shape,self.dtype) # deep copy, including all data and extra attributes
A[:] = self
sync()
if self._attr is not None:
for attr in self._attr:
A.setattr(attr,_deepcopy(getattr(self,attr),memo))
return A
def __copy__(self): # shallow copy, where things like shape can be modified separately from the original, but shares the same underlying data array
A = self[:]
sync()
if self._attr is not None:
for attr in self._attr:
A.setattr(attr,getattr(self,attr))
return A
def astype(self,dtype,copy=False):
return as_sarray(self,dtype,copy)
def asnumpy(self,async=False,out=None):
if out == None: out = np.empty(self.shape,dtype=self.dtype,order='C')
elif not isinstance(out,np.ndarray): raise ValueError("Keyword argument 'out' must be a numpy array.\n")
elif out.shape != A.shape: raise ValueError("Keyword argument 'out' must have matching dimensions.\n")
if out.ndim > 1:
rstride = out.strides[0]
cstride = out.strides[1]
else:
cstride = out.strides[0]
rstride = cstride*A.size
# Looks good, so issue the copy operation.
dll.api_copy_to(self._ptr,out.ctypes.data_as(c_void_p),rstride,cstride)
if not async:
sync() # Wait for writes to finish.
return out
def isscalar(self):
return self.size == 1
# Do NOT provide len() and iteration, because it gives numpy a way to
# silently cause terrible performance problems when mixed with sarrays.
def __len__(self):
#raise NotImplementedError("An sarray does not support __len__, to prevent accidental operations mixing numpy ndarrays.")
return self.nrow
def __iter__(self):
#raise NotImplementedError("An sarray does not support __iter__, to prevent accidental operations mixing numpy ndarrays.")
for row in xrange(self.shape[0]): # Iterate over rows
yield self[row]
############################### PICKLING ##############################
def __getstate__(self):
data = self.asnumpy()
attrdict = {name : pickle.dumps(self.getattr(name)) for name in self._attr} if self._attr is not None else None
return (data,attrdict)
def __setstate__(self,state):
data,attrdict = state
self._ptr = dll.api_empty(_make_shape_p(data.shape),dtype2int(data.dtype))
dll.api_copy_from(self._ptr,data.ctypes.data_as(c_void_p),data.strides[0],data.strides[1]) # Copy from data
self._attr = None
if attrdict:
for name,value in attrdict.items():
self.setattr(name,pickle.loads(value))
############################### RESHAPING ##############################
def reshape(self,shape):
return sarray(dll.api_reshape(self._ptr,_make_shape_p(shape)))
def ravel(self):
return sarray(dll.api_reshape(self._ptr,_make_shape_p((-1,1))))
############################### SLICING ################################
def __getitem__(self,i):
ti = type(i)
if ti != tuple:
# One slice dimension
if _is_full_slice(i):
return self
rows = _make_gettable_slice_p(i)
cols = _span
else:
# Two slice dimensions
if len(i) != 2:
raise IndexError("Too many indices.\n")
rows = _make_gettable_slice_p(i[0])
cols = _make_gettable_slice_p(i[1]) if i[1] != slice(None) else _make_gettable_slice_p(slice(0,self.ncol))
if type(rows) == c_slice_t and type(cols) == c_slice_t:
# Contiguous row indices, contiguous col indices
return sarray(dll.api_slice(self._ptr,byref(rows),byref(cols)))
elif type(rows) == c_slice_t and type(cols) == np.ndarray:
# Contiguous row indices, list of individual col indices
raise NotImplementedError("List-based slicing not implemented")
else:
raise NotImplementedError("Gettable slicing only supports slice-, integer-, or list-based indexing.")
def __setitem__(self,i,val):
ti = type(i)
if ti != tuple:
# One slice dimension
if _is_full_slice(i):
self.assign(val)
return
rows = _make_settable_slice_p(i)
cols = _span
else:
# Two slice dimensions
if len(i) != 2:
raise IndexError("Too many indices.\n")
rows = _make_settable_slice_p(i[0])
cols = _make_settable_slice_p(i[1]) if i[1] != slice(None) else _make_settable_slice_p(slice(0,self.ncol))
if type(rows) == c_slice_t and type(cols) == c_slice_t:
# Contiguous row indices, contiguous col indices
view = sarray(dll.api_slice(self._ptr,byref(rows),byref(cols)))
view.assign(val) # use .assign to avoid recursion
else:
raise NotImplementedError("Settable slicing only supports slice- or integer-based indexing.")
def assign(self,val):
val = as_sarray(val)
dll.api_assign(self._ptr,val._ptr)
return self
######################## COMPARISON OPERATIONS #########################
def __eq__(self,other):
if isscalar(other): other = _scalar2smat(other)
if type(other) == sarray: return sarray(dll.api_eq(self._ptr,other._ptr))
return False
def __ne__(self,other): return self.__eq__(other).__invert__()
def __lt__(self,other): other = _scalar2smat(other); return sarray(dll.api_lt(self._ptr,other._ptr))
def __le__(self,other): other = _scalar2smat(other); return sarray(dll.api_le(self._ptr,other._ptr))
def __gt__(self,other): other = _scalar2smat(other); return sarray(dll.api_gt(self._ptr,other._ptr))
def __ge__(self,other): other = _scalar2smat(other); return sarray(dll.api_ge(self._ptr,other._ptr))
######################## LOGICAL/BITWISE OPERATIONS #########################
def __or__(self,other): other = _scalar2smat(other); return sarray(dll.api_or (self._ptr,other._ptr))
def __xor__(self,other): other = _scalar2smat(other); return sarray(dll.api_xor(self._ptr,other._ptr))
def __and__(self,other): other = _scalar2smat(other); return sarray(dll.api_and(self._ptr,other._ptr))
def __ror__(self,other): other = _scalar2smat(other); return sarray(dll.api_or (other._ptr,self._ptr))
def __rxor__(self,other): other = _scalar2smat(other); return sarray(dll.api_xor(other._ptr,self._ptr))
def __rand__(self,other): other = _scalar2smat(other); return sarray(dll.api_and(other._ptr,self._ptr))
def __ior__(self,other): other = _scalar2smat(other); dll.api_ior (self._ptr,other._ptr); return self
def __ixor__(self,other): other = _scalar2smat(other); dll.api_ixor(self._ptr,other._ptr); return self
def __iand__(self,other): other = _scalar2smat(other); dll.api_iand(self._ptr,other._ptr); return self
########################## UNARY OPERATORS ##########################
def __neg__(self): return sarray(dll.api_neg(self._ptr))
def __abs__(self): return sarray(dll.api_abs(self._ptr))
def __invert__(self):return sarray(dll.api_not(self._ptr))
def __nonzero__(self):
if self.size != 1:
raise ValueError("Truth value of matrix is ambiguous; use all() or any().")
return self.asnumpy().__nonzero__() # must pull back from device
def __int__(self): return int(self.asnumpy())
def __long__(self): return long(self.asnumpy())
def __float__(self): return float(self.asnumpy())
########################## ARITHMETIC OPERATORS #########################
def __add__(self,other): other = _scalar2smat(other); return sarray(dll.api_add(self._ptr,other._ptr))
def __sub__(self,other): other = _scalar2smat(other); return sarray(dll.api_sub(self._ptr,other._ptr))
def __mul__(self,other): other = _scalar2smat(other); return sarray(dll.api_mul(self._ptr,other._ptr))
def __div__(self,other): other = _scalar2smat(other); return sarray(dll.api_div(self._ptr,other._ptr))
def __mod__(self,other): other = _scalar2smat(other); return sarray(dll.api_mod(self._ptr,other._ptr))
def __pow__(self,other): other = _scalar2smat(other); return sarray(dll.api_pow(self._ptr,other._ptr))
def __radd__(self,other): other = _scalar2smat(other); return sarray(dll.api_add(other._ptr,self._ptr))
def __rsub__(self,other): other = _scalar2smat(other); return sarray(dll.api_sub(other._ptr,self._ptr))
def __rmul__(self,other): other = _scalar2smat(other); return sarray(dll.api_mul(other._ptr,self._ptr))
def __rdiv__(self,other): other = _scalar2smat(other); return sarray(dll.api_div(other._ptr,self._ptr))
def __rmod__(self,other): other = _scalar2smat(other); return sarray(dll.api_mod(other._ptr,self._ptr))
def __rpow__(self,other): other = _scalar2smat(other); return sarray(dll.api_pow(other._ptr,self._ptr))
def __iadd__(self,other): other = _scalar2smat(other); dll.api_iadd(self._ptr,other._ptr); return self
def __isub__(self,other): other = _scalar2smat(other); dll.api_isub(self._ptr,other._ptr); return self
def __imul__(self,other): other = _scalar2smat(other); dll.api_imul(self._ptr,other._ptr); return self
def __idiv__(self,other): other = _scalar2smat(other); dll.api_idiv(self._ptr,other._ptr); return self
def __imod__(self,other): other = _scalar2smat(other); dll.api_imod(self._ptr,other._ptr); return self
def __ipow__(self,other): other = _scalar2smat(other); dll.api_ipow(self._ptr,other._ptr); return self
########################## REDUCE OPERATIONS ##########################
def max(self,axis=None): return sarray(dll.api_max( self._ptr,_axis2int[axis])) #.reshape((1,-1)) # mimick numpy's conversion to 1d row vector ? Naaaah, it's too annoying; pretend keep_dim is on by default
def min(self,axis=None): return sarray(dll.api_min( self._ptr,_axis2int[axis]))
def sum(self,axis=None): return sarray(dll.api_sum( self._ptr,_axis2int[axis]))
def mean(self,axis=None): return sarray(dll.api_mean(self._ptr,_axis2int[axis]))
def nnz(self,axis=None): return sarray(dll.api_nnz( self._ptr,_axis2int[axis]))
def any(self,axis=None): return sarray(dll.api_any( self._ptr,_axis2int[axis]))
def all(self,axis=None): return sarray(dll.api_all( self._ptr,_axis2int[axis]))
########################## REPEAT OPERATORS ##########################
def _rep_op(self,n,axis,op):
if axis not in (None,0,1): raise ValueError("Axis must be None, 0 or 1.")
A = self
if isinstance(n,(tuple,list)):
if axis is not None: raise ValueError("Axis must be None if n is a tuple")
if len(n) == 1:
n = (n[0],1) if axis == 0 else (1,n[0])
else:
if axis is None:
A = self.ravel() # emulate numpy flattening on axis=None
n = (n,1) if axis == 0 else (1,n)
B = sarray(op(A._ptr,_make_shape_p(n)))
return B if axis is not None else B.reshape((-1,1))
def repeat(self,n,axis=None): return self._rep_op(n,axis,dll.api_repeat)
def tile(self,n,axis=None): return self._rep_op(n,axis,dll.api_tile)
########################## OTHER OPERATORS ##########################
def __repr__(self):
max_device_rows = 512 if self.shape[1] > 16 else 2048
if True or self.shape[0] <= max_device_rows:
A = self.asnumpy()
else:
# If this is a huge matrix, only copy the start and end of the matrix to the host,
# so that printing is faster, and so that interactive debuggers like Visual Studio
# are faster (otherwise have to wait for huge memory transfers at each breakpoint,
# to update the variable values in Visual Studio's Locals window).
# For now, just handle the case when there are many rows.
A = np.empty((max_device_rows,)+self.shape[1:],self.dtype)
A[:max_device_rows/2] = self[:max_device_rows/2].asnumpy()
A[max_device_rows/2:] = self[-max_device_rows/2:].asnumpy()
txt = A.__repr__().replace('array(', 'sarray(').replace(' [',' [')
if txt.find("dtype=") == -1:
txt = txt[:-1] + (",dtype=%s)" % A.dtype)
return txt
def __str__(self): return self.asnumpy().__str__()
##############################################################
def empty(shape,dtype=None): return sarray(dll.api_empty(_make_shape_p(shape),dtype2int(dtype)))
def zeros(shape,dtype=None): return sarray(dll.api_zeros(_make_shape_p(shape),dtype2int(dtype)))
def ones (shape,dtype=None): return sarray(dll.api_ones (_make_shape_p(shape),dtype2int(dtype)))
def empty_like(A,dtype=None):return sarray(dll.api_empty_like(A._ptr,dtype2int(dtype)))
def zeros_like(A,dtype=None):return sarray(dll.api_zeros_like(A._ptr,dtype2int(dtype)))
def ones_like (A,dtype=None):return sarray(dll.api_ones_like (A._ptr,dtype2int(dtype)))
def eye (n,m=None,k=0,dtype=None):
if _dtype2int.has_key(m): dtype = m; m = None
if m != None and n != m: raise NotImplementedError("Non-square identity matrices not supported.\n")
if k != 0: raise NotImplementedError("Off-diagonal identity matrices not supported.\n")
return sarray(dll.api_eye(n,dtype2int(dtype)))
def identity(n,dtype=None):
return sarray(dll.api_eye(n,dtype2int(dtype)))
def arange(*args,**kwargs):
if len(args) == 0: raise ValueError("Not enough arguments.\n")
if len(args) == 1: start = 0; stop = args[0]
if len(args) == 2: start = args[0]; stop = args[1]
return sarray(dll.api_arange(start,stop,dtype2int(kwargs.get("dtype",None))))
def rand(n,m=1,dtype=None): return sarray(dll.api_rand( _make_shape_p((int(n),int(m))),dtype2int(dtype)))
def randn(n,m=1,dtype=None): return sarray(dll.api_randn( _make_shape_p((int(n),int(m))),dtype2int(dtype)))
def bernoulli(shape,p,dtype=None): return sarray(dll.api_bernoulli(_make_shape_p(shape),p,dtype2int(dtype)))
def rand_seed(seed): dll.api_set_rand_seed(seed)
def sync(): dll.api_sync()
###############################################################
# Maps python/numpy type to corresponding smat scalar constructor.
_smat_const_lookup = {
__builtin__.bool: (dll.api_const_b8, c_bool),
bool: (dll.api_const_b8, c_bool),
__builtin__.chr: (dll.api_const_i8, c_byte),
int8: (dll.api_const_i8, c_byte),
uint8: (dll.api_const_u8, c_ubyte),
int16: (dll.api_const_i16,c_short),
uint16: (dll.api_const_u16,c_ushort),
__builtin__.int: (dll.api_const_i32,c_int),
int32: (dll.api_const_i32,c_int),
uint32: (dll.api_const_u32,c_uint),
__builtin__.long: (dll.api_const_i64,c_longlong),
int64: (dll.api_const_i64,c_longlong),
uint64: (dll.api_const_u64,c_ulonglong),
float32:(dll.api_const_f32,c_float),
float64:(dll.api_const_f64,c_double),
__builtin__.float: (dll.api_const_f64,c_double)
}
def as_sarray(A,dtype=None,copy=False,force=False):
if isinstance(A,sarray):
# Type SARRAY (smat)
if dtype is None or A.dtype == dtype:
return A if not copy else A.copy() # Return a reference to A, or a direct copy
B = empty(A.shape,dtype)
B[:] = A # Return type-converted copy of A
return B
if isinstance(A,list):
if dtype is None:
if type(A[0]) == float or (isinstance(A[0],(list,tuple)) and type(A[0][0]) == float):
dtype = get_default_dtypef() # Convert to "float32" or "float64" depending on current default for floats
A = np.asarray(A,dtype=dtype) # Let numpy do the dirty work of rearranging the data, and fall through to the next if statement.
if isinstance(A,np.ndarray):
# Type NDARRAY (numpy)
if dtype != None and dtype != A.dtype:
A = np.require(A,dtype=dtype) # Implicit conversion first, since simultaneous copy-and-convert is not supported by smat.
if A.ndim > 2:
raise NotImplementedError("Only 1- or 2-D sarrays are supported.")
if not A.flags['C_CONTIGUOUS']:
if not force:
raise TypeError("Expected C-contiguous ndarray, but received F-contiguous; use force=True to allow automatic conversion.")
A = np.require(A,requirements=["C_CONTIGUOUS"])
if A.ndim > 1:
rstride = A.strides[0]
cstride = A.strides[1]
else:
rstride = A.strides[0]
cstride = rstride
B = empty(A.shape,A.dtype)
dll.api_copy_from(B._ptr,A.ctypes.data_as(c_void_p),rstride,cstride) # Return copy of A
return B
if np.isscalar(A):
# Type SCALAR; convert to a 1x1 sarray of the appropriate type
func,ctype = _smat_const_lookup[type(A) if dtype is None else arg2dtype(dtype)]
b = sarray(func(ctype(A))) # Return scalar wrapped in an smat
return b
raise TypeError("Unrecognized type '%s'.\n" % str(type(A)))
asarray = as_sarray
array = as_sarray
def index_array(A): return as_sarray(A,dtype=index)
def uindex_array(A): return as_sarray(A,dtype=uindex)
def asnumpy(A,async=False,out=None):
try:
if isinstance(A,list): return list(as_numpy(item) for item in A)
if isinstance(A,tuple): return tuple(as_numpy(item) for item in A)
if isinstance(A,sarray): return A.asnumpy(async,out)
if out != None: raise ValueError("Keyword argument 'out' only supported when input is of type sarray.")
# If not an SARRAY, pass it along to the regular numpy asarray() function
return np.asarray(A) if A is not None else None
except MemoryError as mem:
print ("OUT OF MEMORY in asnumpy() with A=%s (%d bytes)" % (str(A.shape),A.size))
raise
as_numpy = asnumpy
def as_numpy_array(A): return as_numpy(A) # gnumpy calls it as_numpy_array
def isarray(x): return type(x) == sarray
def isscalar(x):
if type(x) == sarray: return x.isscalar()
if type(x) == str: return False # for some reason np.isscalar returns true for strings
return np.isscalar(x)
def sign(A): return sarray(dll.api_sign(A._ptr)) if isinstance(A,sarray) else np.sign(A)
def signbit(A): return sarray(dll.api_signbit(A._ptr)) if isinstance(A,sarray) else np.signbit(A,out=np.empty(A.shape,A.dtype)) # force numpy to use input dtype instead of bool
def sqrt(A): return sarray(dll.api_sqrt(A._ptr)) if isinstance(A,sarray) else np.sqrt(A)
def square(A): return sarray(dll.api_square(A._ptr)) if isinstance(A,sarray) else np.square(A)
def sin(A): return sarray(dll.api_sin(A._ptr)) if isinstance(A,sarray) else np.sin(A)
def cos(A): return sarray(dll.api_cos(A._ptr)) if isinstance(A,sarray) else np.cos(A)
def tan(A): return sarray(dll.api_tan(A._ptr)) if isinstance(A,sarray) else np.tan(A)
def arcsin(A): return sarray(dll.api_arcsin(A._ptr)) if isinstance(A,sarray) else np.arcsin(A)
def arccos(A): return sarray(dll.api_arccos(A._ptr)) if isinstance(A,sarray) else np.arccos(A)
def arctan(A): return sarray(dll.api_arctan(A._ptr)) if isinstance(A,sarray) else np.arctan(A)
def sinh(A): return sarray(dll.api_sinh(A._ptr)) if isinstance(A,sarray) else np.sinh(A)
def cosh(A): return sarray(dll.api_cosh(A._ptr)) if isinstance(A,sarray) else np.cosh(A)
def tanh(A): return sarray(dll.api_tanh(A._ptr)) if isinstance(A,sarray) else np.tanh(A)
def arcsinh(A): return sarray(dll.api_arcsinh(A._ptr)) if isinstance(A,sarray) else np.arcsinh(A)
def arccosh(A): return sarray(dll.api_arccosh(A._ptr)) if isinstance(A,sarray) else np.arccosh(A)
def arctanh(A): return sarray(dll.api_arctanh(A._ptr)) if isinstance(A,sarray) else np.arctanh(A)
def exp(A): return sarray(dll.api_exp(A._ptr)) if isinstance(A,sarray) else np.exp(A)
def exp2(A): return sarray(dll.api_exp2(A._ptr)) if isinstance(A,sarray) else np.exp2(A)
def log(A): return sarray(dll.api_log(A._ptr)) if isinstance(A,sarray) else np.log(A)
def log2(A): return sarray(dll.api_log2(A._ptr)) if isinstance(A,sarray) else np.log2(A)
def logistic(A): return sarray(dll.api_logistic(A._ptr)) if isinstance(A,sarray) else 1/(1+np.exp(-A))
def round(A): return sarray(dll.api_round(A._ptr)) if isinstance(A,sarray) else np.round(A)
def floor(A): return sarray(dll.api_floor(A._ptr)) if isinstance(A,sarray) else np.floor(A)
def ceil(A): return sarray(dll.api_ceil(A._ptr)) if isinstance(A,sarray) else np.ceil(A)
def clip(A,lo=0.,hi=1.):return sarray(dll.api_clip(A._ptr,lo,hi)) if isinstance(A,sarray) else np.clip(A,lo,hi)
def isinf(A): return sarray(dll.api_isinf(A._ptr)) if isinstance(A,sarray) else np.isinf(A)
def isnan(A): return sarray(dll.api_isnan(A._ptr)) if isinstance(A,sarray) else np.isnan(A)
def transpose(A): return sarray(dll.api_trans(A._ptr)) if isinstance(A,sarray) else np.transpose(A)
def dot(A,B,out=None):
if not isinstance(A,sarray) or not isinstance(B,sarray):
assert not isinstance(A,sarray) and not isinstance(B,sarray), "Cannot perform product on sarray and numpy array."
return np.dot(A,B,out=out)
if out is None:
return sarray(dll.api_dot(A._ptr,B._ptr))
assert isinstance(out,sarray), "Output must be an sarray."
dll.api_dot_out(A._ptr,B._ptr,out._ptr)
def dot_tn(A,B,out=None):
if not isinstance(A,sarray) or not isinstance(B,sarray):
assert not isinstance(A,sarray) and not isinstance(B,sarray), "Cannot perform product on sarray and numpy array."
return np.dot(A.T,B,out=out)
if out is None:
return sarray(dll.api_dot_tn(A._ptr,B._ptr))
assert isinstance(out,sarray), "Output must be an sarray."
dll.api_dot_tn_out(A._ptr,B._ptr,out._ptr)
def dot_nt(A,B,out=None):
if not isinstance(A,sarray) or not isinstance(B,sarray):
assert not isinstance(A,sarray) and not isinstance(B,sarray), "Cannot perform product on sarray and numpy array."
return np.dot(A,B.T,out=out)
if out is None:
return sarray(dll.api_dot_nt(A._ptr,B._ptr))
assert isinstance(out,sarray), "Output must be an sarray."
dll.api_dot_nt_out(A._ptr,B._ptr,out._ptr)
def dot_tt(A,B,out=None):
if not isinstance(A,sarray) or not isinstance(B,sarray):
assert not isinstance(A,sarray) and not isinstance(B,sarray), "Cannot perform product on sarray and numpy array."
return np.dot(A.T,B.T,out=out)
if out is None:
return sarray(dll.api_dot_tt(A._ptr,B._ptr))
assert isinstance(out,sarray), "Output must be an sarray."
dll.api_dot_tt_out(A._ptr,B._ptr,out._ptr)
def relu(A): return maximum(0,A) if isinstance(A,sarray) else np.maximum(0,A) # Returns Z = relu(A)
def relu_grad(Z): return sign(Z) if isinstance(Z,sarray) else np.sign(Z) # Returns d/dA(relu)(A) = sign(Z) where Z = relu(A)
def _binary_elemwise(sop,nop,A,B,*args):
if type(A) == sarray and np.isscalar(B): B = as_sarray(B,dtype=A.dtype)
if type(B) == sarray and np.isscalar(A): A = as_sarray(A,dtype=B.dtype)
if type(A) == sarray and type(B) == sarray: return sarray(sop(A._ptr,B._ptr))
if nop is not None: return nop(A,B,*args)
raise RuntimeException("Both arguments should be of type sarray.")
def maximum(A,B): return _binary_elemwise(dll.api_maximum,np.maximum,A,B)
def minimum(A,B): return _binary_elemwise(dll.api_minimum,np.minimum,A,B)
def isclose(A,B,rtol=None,atol=None):
if rtol == None: rtol = _default_rtol(A.dtype)
if atol == None: atol = _default_atol(A.dtype)
return _binary_elemwise(dll.api_isclose,None,A,B,rtol,atol)
def allclose(A,B,rtol=None,atol=None):
if rtol == None: rtol = _default_rtol(A.dtype)
if atol == None: atol = _default_atol(A.dtype)
return _binary_elemwise(dll.api_allclose,np.allclose,A,B,rtol,atol)
def _reduce_op(A,axis,sop,nop,pyop):
if isinstance(A,sarray): return sop(A,axis)
if isinstance(A,np.ndarray): return nop(A,axis)
if pyop == None: raise TypeError("Invalid type for reduce operation.")
if isinstance(A,list) and axis==None: return pyop(A)
return pyop(A,axis) # A is first item, axis is second item (e.g. call __builtin__.min(A,axis))
def max(A,axis=None): return _reduce_op(A,axis,sarray.max,np.ndarray.max,_py_max)
def min(A,axis=None): return _reduce_op(A,axis,sarray.min,np.ndarray.min,_py_min)
def sum(A,axis=None): return _reduce_op(A,axis,sarray.sum,np.ndarray.sum,_py_sum)
def mean(A,axis=None): return _reduce_op(A,axis,sarray.mean,np.ndarray.mean,None)
def nnz(A,axis=None): return A.nnz(axis) if isinstance(A,sarray) else (np.count_nonzero(A) if axis == None else np.sum(A!=0,axis))
def all(A,axis=None): return _reduce_op(A,axis,sarray.all,np.ndarray.all,_py_all)
def any(A,axis=None): return _reduce_op(A,axis,sarray.any,np.ndarray.any,_py_any)
def count_nonzero(A): return A.nnz() if isinstance(A,sarray) else np.count_nonzero(A)
def repeat(A,n,axis=None):
if isinstance(A,sarray):
return A.repeat(n,axis)
return np.repeat(A,n,axis)
def tile(A,n,axis=None):
if isinstance(A,sarray):
return A.tile(n,axis)
assert axis is None
return np.tile(A,n)
def diff(A,n=1,axis=1):
if not isinstance(A,sarray): return np.diff(A,n,axis)
if n <= 0: return A
B = diff(A,n-1,axis)
return sarray(dll.api_diff(B._ptr,_axis2int[axis]))
def softmax(A,axis=1): return sarray(dll.api_softmax(A._ptr,_axis2int[axis]))
def apply_mask(A,mask): dll.api_apply_mask(A._ptr,mask._ptr)
def logical_not(A): return sarray(dll.api_lnot(A._ptr)) if isinstance(A,sarray) else np.logical_not(A)
def logical_or(A,B): return sarray(dll.api_lor(A._ptr,B._ptr)) if isinstance(A,sarray) and isinstance(B,sarray) else np.logical_or(A,B)
def logical_and(A,B): return sarray(dll.api_land(A._ptr,B._ptr)) if isinstance(A,sarray) and isinstance(B,sarray) else np.logical_and(A,B)
###############################################################
# These extra global functions are provided so that there's an
# easy, named function available for all smat operations.
def eq(A,B): return A == B
def ne(A,B): return A != B
def lt(A,B): return A < B
def le(A,B): return A <= B
def gt(A,B): return A > B
def ge(A,B): return A >= B
def _or(A,B): return A | B
def _xor(A,B): return A ^ B
def _and(A,B): return A & B
def _abs(A): return abs(A)
def invert(A): return ~A
def reciprocal(A): return 1./A
def negative(A): return -A
def add(A,B): return A+B
def subtract(A,B): return A-B
def multiply(A,B): return A*B
def divide(A,B): return A/B
def mod(A,B): return A%B
def power(A,B): return A**B
def max_x(A): return A.max(axis=1)
def max_y(A): return A.max(axis=0)
def min_x(A): return A.min(axis=1)
def min_y(A): return A.min(axis=0)
def sum_x(A): return A.sum(axis=1)
def sum_y(A): return A.sum(axis=0)
def mean_x(A): return A.mean(axis=1)
def mean_y(A): return A.mean(axis=0)
def nnz_x(A): return A.nnz(axis=1)
def nnz_y(A): return A.nnz(axis=0)
def any_x(A): return A.any(axis=1)
def any_y(A): return A.any(axis=0)
def all_x(A): return A.all(axis=1)
def all_y(A): return A.all(axis=0)
def diff_x(A): return A.diff(axis=1)
def diff_y(A): return A.diff(axis=0)
def repeat_x(A,n): return A.repeat(n,axis=1)
def repeat_y(A,n): return A.repeat(n,axis=0)
def tile_x(A,n): return A.tile(n,axis=1)
def tile_y(A,n): return A.tile(n,axis=0)
def softmax_x(A): return softmax(A,axis=1)
def softmax_y(A): return softmax(A,axis=0)
###############################################################
def _as_tuple(x): return x if type(x) != tuple else (x,)
def _is_full_slice(x): return type(x) == slice and x.start == None and x.stop == None
def _scalar2smat(x):
if type(x) == sarray: return x
if not np.isscalar(x): raise TypeError("Type %s not directly supported in this operation.\n" % str(type(x)))
func,ctype = _smat_const_lookup[type(x)]
b = sarray(func(ctype(x))) # Return scalar wrapped in an smat
return b
def _make_settable_slice_p(x):
tx = type(x)
if tx == slice:
if not x.step in (None, 1):
raise NotImplementedError("Settable slicing is only supported for contiguous ranges.\n")
return c_slice_t(x.start or 0L, x.stop if x.stop != None else c_slice_end)
if tx in _integral_types:
return c_slice_t(x,x+1)
if tx == sarray:
if x.dtype == bool: raise NotImplementedError("Logical slicing not yet implemented.\n")
else: raise NotImplementedError("Settable list-based slicing not yet implemented.\n")
raise NotImplementedError("Settable index must be integral or contiguous slice.\n")
def _make_gettable_slice_p(x):
tx = type(x)
if tx == slice:
if not x.step in (None, 1):
return np.arange(x.start,x.stop,x.step,dtype=index)
return c_slice_t(x.start or 0L, x.stop if x.stop != None else c_slice_end)
if tx in _integral_types:
return c_slice_t(x,x+1)
if tx == list or tx == tuple:
x = np.asarray(x)
tx = np.ndarray
if tx == np.ndarray:
x = as_sarray(x)
tx = sarray
if tx == sarray:
if x.dtype == bool: raise NotImplementedError("Logical slicing not yet implemented.\n")
if x.ndim != 1: raise NotImplementedError("List-based slicing must use 1-dimensional vector.")
return x
raise NotImplementedError("Gettable index must be integral, slice, or list.\n")
def _make_shape_p(shape):
if isinstance(shape,int): return byref(c_shape_t(1,shape,1))
if not isinstance(shape,tuple) or not len(shape) in [1,2]:
raise ValueError("Shape must be a tuple of length 1 or 2.\n")
if len(shape) == 1: return byref(c_shape_t(1,shape[0],1))
return byref(c_shape_t(shape[1],shape[0],1))
def _kwargs2argv(kwargs):
as_str = lambda v: str(val) if not isinstance(val,list) else string.join([str(v) for v in val],",")
args = [key + '=' + as_str(val) for key,val in kwargs.items()]
argv = (c_char_p * len(args))() # convert list into ctype array of char*
argv[:] = args # make each char* item point to the corresponding string in 'args'
return argv
###############################################################
def set_backend(name,**kwargs):
gc.collect()
argv = _kwargs2argv(kwargs)
return dll.api_set_backend(c_char_p(name),len(argv),argv)
def set_backend_options(**kwargs):
gc.collect()
argv = _kwargs2argv(kwargs)
return dll.api_set_backend_options(len(argv),argv)
def reset_backend(**kwargs):
gc.collect()
argv = _kwargs2argv(kwargs)
return dll.api_reset_backend(len(argv),argv)
def get_backend_name(): return str(dll.api_get_backend_info().name)
def get_supported_dtypes(): return [_int2dtype[dt] for dt in _dtype2int.values() if dll.api_is_dtype_supported(dt) == True]
def set_default_dtype(dt): dll.api_set_default_dtype(dtype2int(dt))
def set_default_dtypef(dt): dll.api_set_default_dtypef(dtype2int(dt))
def get_default_dtype(): return int2dtype(dll.api_get_default_dtype())
def get_default_dtypef(): return int2dtype(dll.api_get_default_dtypef())
def get_dtype_size(dt): return int(dll.api_dtype_size(dtype2int(dt)))
def get_backend_info():
info = c_backend_info()
dll.api_get_backend_info(byref(info))
return backend_info(info)
class backend_info(object):
def __init__(self,info):
self.uuid = int(info.uuid)
self.name = str(info.name)
self.version = str(info.version)
self.device = str(info.device)
def __repr__(self):
return "%s (v%s) using %s\n" % (self.name,self.version,self.device)
def get_heap_status():
info = c_heap_status()
dll.api_get_heap_status(byref(info))
return heap_status(info)
class heap_status(object):
def __init__(self,info):
self.host_total = long(info.host_total)
self.host_avail = long(info.host_avail)
self.host_used = long(info.host_used)
self.device_total = long(info.device_total)
self.device_avail = long(info.device_avail)
self.device_used = long(info.device_used)
self.device_committed = long(info.device_committed)
def __repr__(self):
string = ''
for name in ['host_total','host_avail','host_used','device_total','device_avail','device_used','device_committed']:
string += '%s: %s\n' % (name,util.format_bytecount(self.__dict__[name],fmt="2.2cM"))
return string
def autotune():
dll.api_autotune_backend()
def destroy_backend(force=False):
"""
Destroys the backend, including any device resources associated with the current thread.
If there are outstanding handles to memory alloations (e.g. an sarray instance still
holding on to memory used by the backend) then the call will fail; use force=True to override,
though the program may later crash due to those objects holding invalid pointers.
"""
gc.collect()
dll.api_destroy_backend(force)
#########################################################################
# dropout functions
def dropout(X, rate, test_mode=False):
"""
Applies dropout to input matrix X using the given dropout rate [0,1).
If test_mode is false, returns pair (Z, M) where Z = M * X and M is a
matrix of bernoulli trials (M.dtype is bool).
If test_mode is True, returns pair (Z, None) where Z = (1-rate)*X.
"""
if test_mode:
Z = (1-rate)*X
return Z, None
else:
Z = empty_like(X)
M = empty(X.shape,dtype=bool)
dll.api_dropout_fp_tr(X._ptr, rate, Z._ptr, M._ptr)
return Z, M
def dropout_grad(dZ, M=None, rate=None):
"""
Backpropagates differentials dZ at the output of dropout operation,
returning the differentials dX at the input of that operation.
If M is specified, the return value is dX = M * dZ.
If rate is specified, then it is assumed you are trying to back-propagate
through the mean network (e.g. for backprop-to-input on an already trained model)
and so it returns dX = (1-rate)*dZ.
"""
if M is not None:
dX = empty_like(dZ)
dll.api_dropout_bp_tr(dZ._ptr, M._ptr, dX._ptr)
return dX
else:
dX = (1-rate)*dZ
return dX
def maskout(X, M):
"""
Inplace masking of A with mask M
Equivalent to X[i] = M[i] ? X[i] : 0 where M is of dtype bool.
Notice that this replaces NaN with zero, unlike X *= M
"""
dll.api_maskout(X._ptr, A._ptr)
#########################################################################
# CUDNN functions
def featuremap_bias(fmaps, dims, bias, cpu_check=False):
"""
Adds a separate bias to each featuremap generated by a convolution,
where 'dims' is the size of each feature map, either (wd, ht) or wd*ht.
The operation is in-place, so it modifies the existing fmaps array.
Let
n = number of images
c = number of feature maps
d = number of elements in each feature map (e.g. width * height)
Then
fmaps must be (n) x (c*d)
bias must be (c) x (1)
The computation adds bias[i] to all elements of featuremap i,
across all images:
for i in range(c):
fmaps[:][i*d:(i+1)*d] += bias[i]
If cpu_check is True, then the result of conv2 will be compared to a
simple CPU implementation to make sure absolute and relative error is
below an internal certain threshold; used for unit tests.
"""
cfg = c_featuremap_bias_cfg_t(int(np.prod(dims)), True, cpu_check)
cudnn_dll().api_featuremap_bias(fmaps._ptr, bias._ptr, byref(cfg))
def featuremap_bias_grad(fmapsgrad, dims, biasgrad=None, accumulate=False, cpu_check=False):
"""
Computes the bias gradient (biasgrad) from the given source featuremap
gradients (fmapsgrad).
Let
n = number of images
c = number of feature maps
d = number of elements in each feature map (e.g. width * height)
Then
fmapsgrad must be (n) x (c*d)
biasgrad must be (c) x (1)
The computation accumulates all elements of fmapsgrad stored in in featuremap i
into scalar stored in bias[i]:
for i in range(c):
biasgrad[i] = sum( fmapsgrad[:][i*d:(i+1)*d] )
If biasgrad is None, a new array of the correct size will be created and returned.
"""
if biasgrad is None:
c = fmapsgrad.shape[1] // int(np.prod(dims))
biasgrad = (zeros if accumulate else empty)((c, 1), fmapsgrad.dtype)
cfg = c_featuremap_bias_cfg_t(int(np.prod(dims)), accumulate, cpu_check)
cudnn_dll().api_featuremap_bias_grad(fmapsgrad._ptr, biasgrad._ptr, byref(cfg))
return biasgrad
def conv2(src, src_w, src_h, filters, filter_w, filter_h, dst=None, bias=None, stride=1, accumulate=False, cpu_check=False):
"""
Convolves a set of 2D filters (filters) across a mini-batch of 2D images (src),
to generate a batch of 2D feature maps (dst). If a bias is given (bias != None),
also adds a separate bias for each feature map.
Let
n = number of src images
c = number of channels per src image
k = number of filters
Then
src must be (n) x (c*src_h*src_w)
dst must be (n) x (k*dst_h*dst_w)
filters must be (k) x (c*filter_h*filter_w)
bias must be (k) x (1)
where dst_w = (src_w-filter_w)//stride + 1
dst_h = (src_h-filter_h)//stride + 1
The memory layouts of each array are, in C-order notation,
src[image][in_channel][pixel_y][pixel_x]
dst[image][out_channel][pixel_y][pixel_x]
filters[out_channel][in_channel][filter_y][filter_x]
bias[out_channel]
If dst is None, a new array of the correct size will be created and returned.
If accumulate is True, the output will be added to the current value of dst.
If cpu_check is True, then the result of conv2 will be compared to a
simple CPU implementation to make sure absolute and relative error is
below an internal certain threshold; used for unit tests.
For the user's convenience, the "dst" instance will contain
attributes dst.w = dst_w and dst.h = dst_h so that the output
size can be returned, rather than re-computed by the user.
"""
dst_h = (src_h-filter_h)//stride + 1
dst_w = (src_w-filter_w)//stride + 1
if dst is None:
k = len(filters);
n = len(src);
dst = (zeros if accumulate else empty)((n, k*dst_h*dst_w), src.dtype)
cfg = c_conv2cfg_t(src_w, src_h, filter_w, filter_h, stride, accumulate, cpu_check)
cudnn_dll().api_conv2(src._ptr, filters._ptr, dst._ptr, byref(cfg))
# If bias was specified, add it to our final feature maps
if bias is not None:
featuremap_bias(dst, dst_w*dst_h, bias, cpu_check)
# For convenience return the width and height
setattr(dst, "w", dst_w)
setattr(dst, "h", dst_h)
return dst
def conv2_srcgrad(src_w, src_h, filters, filter_w, filter_h, dstgrad, srcgrad=None, stride=1, accumulate=False, cpu_check=False):
"""
Computes gradient differentials at src (srcgrad) of a convolution
using gradient differentials given at dst (dstgrad). The shape and
memory layout of each array correspond to conv2.
If srcgrad is None, a new array of the correct size will be created and returned.
If accumulate is True, the output will be added to the current value of srcgrad.
"""
if srcgrad is None:
c = filters.shape[1] // (filter_h*filter_w);
n = len(dstgrad);
srcgrad = (zeros if accumulate else empty)((n, c*src_h*src_w), dstgrad.dtype)
cfg = c_conv2cfg_t(src_w, src_h, filter_w, filter_h, stride, accumulate, cpu_check)
cudnn_dll().api_conv2_srcgrad(srcgrad._ptr, filters._ptr, dstgrad._ptr, byref(cfg))
return srcgrad
def conv2_filtersgrad(src, src_w, src_h, filter_w, filter_h, dstgrad, filtersgrad=None, stride=1, accumulate=False, cpu_check=False):
"""
Computes gradient differentials for filters (filtersgrad) of a convolution
using gradient differentials given at dst (dstgrad) and original inputs (src).
The shape and memory layout of each array correspond to conv2.
If filtersgrad is None, a new array of the correct size will be created and returned.
If accumulate is True, the output will be added to the current value of filtersgrad.
"""
if filtersgrad is None:
dst_h = (src_h-filter_h)//stride+1
dst_w = (src_w-filter_w)//stride+1
k = dstgrad.shape[1] // (dst_h*dst_w);
c = src.shape[1] // (src_h*src_w);
filtersgrad = (zeros if accumulate else empty)((k, c*filter_h*filter_w), src.dtype)
cfg = c_conv2cfg_t(src_w, src_h, filter_w, filter_h, stride, accumulate, cpu_check)
cudnn_dll().api_conv2_filtersgrad(src._ptr, filtersgrad._ptr, dstgrad._ptr, byref(cfg))
return filtersgrad
def conv2_biasgrad(bias, dstgrad, biasgrad=None, accumulate=False, cpu_check=False):
"""
Computes gradient differentials at bias (biasgrad) of a convolution
using gradient differentials given at dst (dstgrad). The shape and
memory layout of each array correspond to conv2.
If biasgrad is None, a new array of the correct size will be created and returned.
If accumulate is True, the output will be added to the current value of biasgrad.
"""
dims = dstgrad.shape[1] // bias.size
return featuremap_bias_grad(dstgrad, dims, biasgrad, accumulate, cpu_check)
def pool2(mode, src, src_w, src_h, window_w, window_h, dst=None, stride=1, accumulate=False, cpu_check=False):
"""
Pools a set of 2D regions across a batch of 2D images (src)
to generate a batch of 2D smaller feature maps (dst).
Let
n = number of src images
c = number of channels (feature maps) in src and dst
Then
src must be (n) x (c*src_h*src_w)
dst must be (n) x (c*dst_h*dst_w)
where dst_w = (src_w-window_w)//stride + 1
dst_h = (src_h-window_h)//stride + 1
The memory layouts of each array are, in C-order notation,
src[image][channel][pixel_y][pixel_x]
dst[image][channel][pixel_y][pixel_x]
If mode is "max", the pooling will take the maximum value over the region.
If mode is "avg", the pooling will compute the average over the region.
If dst is None, a new array of the correct size will be created and returned.
If accumulate is True, the output will be added to the current value of dst.
If cpu_check is True, then the result of pool2 will be compared to a
simple CPU implementation to make sure absolute and relative error is
below an internal certain threshold; used for unit tests.
For the user's convenience, the "dst" instance will contain
attributes dst.w = dst_w and dst.h = dst_h so that the output
size can be returned, rather than re-computed by the user.
"""
if mode == "max": mode_int = 0
elif mode == "avg": mode_int = 1
else: raise ValueError("Unrecognized mode '%s'" % mode)
dst_h = (src_h-window_h)//stride + 1
dst_w = (src_w-window_w)//stride + 1
if dst is None:
c = src.shape[1] // (src_w*src_h);
n = len(src);
dst = (zeros if accumulate else empty)((n, c*dst_h*dst_w), src.dtype)
cfg = c_pool2cfg_t(mode_int, src_w, src_h, window_w, window_h, stride, accumulate, cpu_check)
cudnn_dll().api_pool2(src._ptr, dst._ptr, byref(cfg))
# For convenience return the width and height
setattr(dst, "w", dst_w)
setattr(dst, "h", dst_h)
return dst
def pool2_grad(mode, src, src_w, src_h, window_w, window_h, dst, dstgrad, srcgrad=None, stride=1, accumulate=False, cpu_check=False):
"""
Backpropagates a gradient through a pool2 operation. The original src and dst for the forward pool2 operation
are needed, as well as the incoming gradients for the pooling outputs (dstgrad).
The function sets the outgoing gradients for the pooling inputs (srcgrad).
Let
n = number of images in all arrays
c = number of channels (feature maps) in all arrays
Then
src and srcgrad must be (n) x (c*src_h*src_w)
dst and dstgrad must be (n) x (c*dst_h*dst_w)
where dst_w = (src_w-window_w)//stride + 1
dst_h = (src_h-window_h)//stride + 1
The memory layouts of each array are, in C-order notation,
src [image][channel][pixel_y][pixel_x]
srcgrad[image][channel][pixel_y][pixel_x]
dst [image][channel][pixel_y][pixel_x]
dstgrad[image][channel][pixel_y][pixel_x]
If mode is "max", each dstgrad value will be backpropagated to (accumulated to) srcgrad
at the position corresponding to the maximum element of its corresponding window in 'src'.
If mode is "avg", each dstgrad value will be backpropagated to (accumulated to) srcgrad
at all the positions in the corresponding window in 'src'.
If srcgrad is None, a new array of the correct size will be created and returned.
If accumulate is True, the output will be added to the current value of dstgrad.
If cpu_check is True, then the result of pool2_grad will be compared to a
simple CPU implementation to make sure absolute and relative error is
below an internal certain threshold; used for unit tests.
"""
if mode == "max": mode_int = 0
elif mode == "avg": mode_int = 1
else: raise ValueError("Unrecognized mode '%s'" % mode)
if srcgrad is None:
srcgrad = (zeros_like if accumulate else empty_like)(src)
cfg = c_pool2cfg_t(mode_int, src_w, src_h, window_w, window_h, stride, accumulate, cpu_check)
cudnn_dll().api_pool2_grad(src._ptr, srcgrad._ptr, dst._ptr, dstgrad._ptr, byref(cfg))
return srcgrad
|
DeepBind-master
|
code/libs/smat/py/smat/smat.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import os,platform,warnings,sys
from os.path import abspath,join,dirname
from ctypes import *
dllconfig = "release"
#dllconfig = "debug"
__all__ = ["dll","safe_dll","load_cdll","cudnn_dll",
"load_extension", "unload_extension",
"c_shape_t","c_index_t","c_isize_t","c_slice_t","c_axis_t","c_slice_end",
"c_dtype_t","c_char_p_p",
"c_heap_status","c_backend_info",
"c_smat_p",
"c_conv2cfg_t","c_conv2cfg_p",
"c_pool2cfg_t","c_pool2cfg_p",
"c_featuremap_bias_cfg_t","c_featuremap_bias_cfg_p",
]
###################################################################
# Declare some useful ctypes based on the C++ types
c_char_p_p = POINTER(c_char_p)
c_index_t = c_int
c_uindex_t = c_uint
c_isize_t = c_int
c_usize_t = c_uint
c_dtype_t = c_int
c_vtype_t = c_int
c_axis_t = c_int
class c_shape_t(Structure):
_fields_ = [("x", c_isize_t),
("y", c_isize_t),
("z", c_isize_t)]
c_shape_p = POINTER(c_shape_t)
class c_coord_t(Structure):
_fields_ = [("x", c_index_t),
("y", c_index_t),
("z", c_index_t)]
c_coord_p = POINTER(c_coord_t)
c_strides_t = c_coord_t
c_strides_p = c_coord_p
class c_slice_t(Structure):
_fields_ = [("first", c_index_t),
("last", c_index_t)]
c_slice_p = POINTER(c_slice_t)
c_slice_end = c_isize_t(0x7f000000) # this constant from types.h
class c_smat(Structure):
_fields_ = [("vtype", c_vtype_t),
("dtype", c_dtype_t),
("shape", c_shape_t),
("strides", c_strides_t),
] # NOT a complete list of members within an smat object
# -- do NOT allocate c_smat instances directly in Python!
c_smat_p = POINTER(c_smat)
class c_heap_status(Structure):
_fields_ = [("host_total", c_size_t),
("host_avail", c_size_t),
("host_used" , c_size_t),
("device_total", c_size_t),
("device_avail", c_size_t),
("device_used", c_size_t),
("device_committed", c_size_t)]
c_heap_status_p = POINTER(c_heap_status)
class c_backend_info(Structure):
_fields_ = [("uuid", c_int),
("name", c_char*32),
("version", c_char*32),
("device", c_char*128)]
c_backend_info_p = POINTER(c_backend_info)
###################################################################
###################################################################
# Load smat.dll
def load_cdll(dllname,search_dirs=None):
module_dir = dirname(abspath(__file__)) # .../smat/py/smat
parent_dir = dirname(dirname(dirname(module_dir))) # .../smat
devbin_dir = join(parent_dir,"smat","build",dllconfig,"bin") # .../smat/build/{release|debug}/bin (C++ development smat, standalone)
instbin_dir = join(module_dir,"bin") # site-packages/smat/bin (C++ installed smat, as python package)
if search_dirs is None:
search_dirs = []
search_dirs += [devbin_dir,instbin_dir]
# First determine the platform-specific file name of the dll
dllfiles = { "Windows": "%s.dll"%dllname,
"Linux" : "lib%s.so"%dllname,
"Unix" : "lib%s.so"%dllname }
dllfile = dllfiles.get(platform.system(),None)
if dllfile is None:
raise NotImplementedError("Platform not yet supported by smat")
# Then try to find it in one of the standard search paths.
for search_dir in search_dirs:
dllpath = join(search_dir,dllfile) # deepity/build/{release|debug}/bin/smat.dll
try:
os.environ["PATH"] += os.pathsep + search_dir
if os.environ.get("LD_LIBRARY_PATH","") == "":
os.environ["LD_LIBRARY_PATH"] = search_dir
else:
os.environ["LD_LIBRARY_PATH"] += os.pathsep + search_dir
dll = cdll.LoadLibrary(dllpath)
_smat_load_err = None
break
except OSError as err:
_smat_load_err = err
if _smat_load_err is not None:
print "**** Failed to load %s from:" % dllfile
for search_dir in search_dirs:
print ' ',search_dir
raise _smat_load_err
return dll
class SmatException(Exception):
pass
# safe_dll_func
# Each insance of safe_dll_func wraps a callable function
# on a ctypes.CDLL or WinDLL object.
# The difference is that safe_dll_func will catch exceptions
# and call _get_last_error to retrieve an error message from
# the DLL before raising the exception further.
#
class safe_dll_func(object):
def __init__(self,name,func,get_last_error,clear_last_error):
self._name = name
self._func = func
self._get_last_error = get_last_error
self._clear_last_error = clear_last_error
def __call__(self,*args):
rval = self._func(*args)
# If the dll threw an exception, _get_last_error() should get the error message
msg = self._get_last_error()
if msg is None:
return rval
self._clear_last_error()
msg = "%s(...) raised an exception\n%s" % (str(self._name.replace("api_","")), msg)
raise SmatException(msg)
def declare(self,restype,argtypes):
self._func.restype = restype
self._func.argtypes = argtypes
def __repr__(self): return "smat_dll.%s(...)" % self._name
# safe_dll
# Simply wraps a ctypes.CDLL so that function calls to that DLL
# are all called through safe_dll_func objects.
#
class safe_dll(object):
def __init__(self,dll,get_last_error,clear_last_error):
self._dll = dll
self._get_last_error = get_last_error
self._clear_last_error = clear_last_error
self._funcs = {}
def __getattr__(self,name):
if not self._funcs.has_key(name):
func = safe_dll_func(name,self._dll.__getattr__(name),self._get_last_error,self._clear_last_error)
self._funcs[name] = func
return func
return self._funcs[name]
def load_extension(dllname,search_dirs=None):
dll.api_sync()
handle = dll.api_load_extension(dllname)
ext_dll = CDLL(dllname,handle=handle)
ext_dll = safe_dll(ext_dll,dll.api_get_last_error,dll.api_clear_last_error)
return ext_dll
def unload_extension(ext_dll):
dll.api_sync()
dll.api_unload_extension(ext_dll._dll._handle)
del ext_dll._dll
# Now create the public 'dll' object exposed to smat.py, with all the methods
# exported by the DLL available for calling
#
smat_cdll = load_cdll('smat')
smat_cdll.api_get_last_error.restype = c_char_p
dll = safe_dll(smat_cdll,smat_cdll.api_get_last_error,smat_cdll.api_clear_last_error)
###################################################################
# Configure function prototypes exported from smat.dll
# dtypes.cpp
dll.api_set_default_dtype.declare( None, [c_dtype_t])
dll.api_set_default_dtypef.declare( None, [c_dtype_t])
dll.api_get_default_dtype.declare( c_dtype_t,[])
dll.api_get_default_dtypef.declare( c_dtype_t,[])
dll.api_dtype_size.declare( c_int, [c_dtype_t])
# context.cpp
dll.api_set_backend.declare( c_bool, [c_char_p,c_int,c_char_p_p])
dll.api_set_backend_options.declare(None, [c_int,c_char_p_p])
dll.api_get_backend_info.declare( None, [c_backend_info_p])
dll.api_reset_backend.declare( None, [c_int,c_char_p_p])
dll.api_destroy_backend.declare( None, [c_bool])
dll.api_get_heap_status.declare( None, [c_heap_status_p])
dll.api_is_dtype_supported.declare(c_bool,[c_dtype_t])
dll.api_load_extension.declare( c_size_t, [c_char_p])
dll.api_unload_extension.declare( None, [c_size_t])
dll.api_set_rand_seed.declare( None, [c_size_t])
# smat.cpp
dll.api_get_last_error.declare(c_char_p, [])
dll.api_clear_last_error.declare(None, [])
dll.api_set_debug_break.declare(None, [c_bool])
dll.api_empty_like.declare(c_smat_p, [c_smat_p,c_dtype_t])
dll.api_zeros_like.declare(c_smat_p, [c_smat_p,c_dtype_t])
dll.api_ones_like.declare( c_smat_p, [c_smat_p,c_dtype_t])
dll.api_empty.declare( c_smat_p, [c_shape_p,c_dtype_t])
dll.api_zeros.declare( c_smat_p, [c_shape_p,c_dtype_t])
dll.api_ones.declare( c_smat_p, [c_shape_p,c_dtype_t])
dll.api_eye.declare( c_smat_p, [c_isize_t,c_dtype_t])
dll.api_arange.declare( c_smat_p, [c_index_t,c_index_t,c_dtype_t])
dll.api_rand.declare( c_smat_p, [c_shape_p,c_dtype_t])
dll.api_randn.declare( c_smat_p, [c_shape_p,c_dtype_t])
dll.api_bernoulli.declare( c_smat_p, [c_shape_p,c_float,c_dtype_t])
dll.api_const_b8.declare( c_smat_p, [c_bool])
dll.api_const_i8.declare( c_smat_p, [c_byte])
dll.api_const_u8.declare( c_smat_p, [c_ubyte])
dll.api_const_i16.declare( c_smat_p, [c_short])
dll.api_const_u16.declare( c_smat_p, [c_ushort])
dll.api_const_i32.declare( c_smat_p, [c_int])
dll.api_const_u32.declare( c_smat_p, [c_uint])
dll.api_const_i64.declare( c_smat_p, [c_longlong])
dll.api_const_u64.declare( c_smat_p, [c_ulonglong])
dll.api_const_f32.declare( c_smat_p, [c_float])
dll.api_const_f64.declare( c_smat_p, [c_double])
dll.api_delete.declare( None, [c_smat_p])
dll.api_nrow.declare( c_isize_t, [c_smat_p])
dll.api_ncol.declare( c_isize_t, [c_smat_p])
dll.api_size.declare( c_size_t, [c_smat_p])
dll.api_shape.declare( None, [c_smat_p,c_shape_p])
dll.api_reshape.declare( c_smat_p, [c_smat_p,c_shape_p])
dll.api_dtype.declare( c_int, [c_smat_p])
dll.api_slice.declare( c_smat_p, [c_smat_p,c_slice_p,c_slice_p])
dll.api_assign.declare( None, [c_smat_p,c_smat_p])
dll.api_copy_from.declare(None, [c_smat_p,c_void_p,c_isize_t,c_isize_t])
dll.api_copy_to.declare( None, [c_smat_p,c_void_p,c_isize_t,c_isize_t])
dll.api_sync.declare( None, [])
dll.api_add.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_sub.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_mul.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_div.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_mod.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_pow.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_iadd.declare( None, [c_smat_p,c_smat_p])
dll.api_isub.declare( None, [c_smat_p,c_smat_p])
dll.api_imul.declare( None, [c_smat_p,c_smat_p])
dll.api_idiv.declare( None, [c_smat_p,c_smat_p])
dll.api_imod.declare( None, [c_smat_p,c_smat_p])
dll.api_ipow.declare( None, [c_smat_p,c_smat_p])
dll.api_dot.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_dot_tn.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_dot_nt.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_dot_tt.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_dot_out.declare( None, [c_smat_p,c_smat_p,c_smat_p])
dll.api_dot_tn_out.declare( None, [c_smat_p,c_smat_p,c_smat_p])
dll.api_dot_nt_out.declare( None, [c_smat_p,c_smat_p,c_smat_p])
dll.api_dot_tt_out.declare( None, [c_smat_p,c_smat_p,c_smat_p])
dll.api_eq.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_ne.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_lt.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_le.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_gt.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_ge.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_not.declare( c_smat_p, [c_smat_p])
dll.api_or.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_xor.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_and.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_lnot.declare( c_smat_p, [c_smat_p])
dll.api_lor.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_land.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_ior.declare( None, [c_smat_p,c_smat_p])
dll.api_ixor.declare( None, [c_smat_p,c_smat_p])
dll.api_iand.declare( None, [c_smat_p,c_smat_p])
dll.api_neg.declare( c_smat_p, [c_smat_p])
dll.api_abs.declare( c_smat_p, [c_smat_p])
dll.api_sign.declare( c_smat_p, [c_smat_p])
dll.api_signbit.declare( c_smat_p, [c_smat_p])
dll.api_sin.declare( c_smat_p, [c_smat_p])
dll.api_cos.declare( c_smat_p, [c_smat_p])
dll.api_tan.declare( c_smat_p, [c_smat_p])
dll.api_arcsin.declare( c_smat_p, [c_smat_p])
dll.api_arccos.declare( c_smat_p, [c_smat_p])
dll.api_arctan.declare( c_smat_p, [c_smat_p])
dll.api_sinh.declare( c_smat_p, [c_smat_p])
dll.api_cosh.declare( c_smat_p, [c_smat_p])
dll.api_tanh.declare( c_smat_p, [c_smat_p])
dll.api_arcsinh.declare( c_smat_p, [c_smat_p])
dll.api_arccosh.declare( c_smat_p, [c_smat_p])
dll.api_arctanh.declare( c_smat_p, [c_smat_p])
dll.api_exp.declare( c_smat_p, [c_smat_p])
dll.api_exp2.declare( c_smat_p, [c_smat_p])
dll.api_log.declare( c_smat_p, [c_smat_p])
dll.api_log2.declare( c_smat_p, [c_smat_p])
dll.api_logistic.declare( c_smat_p, [c_smat_p])
dll.api_sqrt.declare( c_smat_p, [c_smat_p])
dll.api_square.declare( c_smat_p, [c_smat_p])
dll.api_round.declare( c_smat_p, [c_smat_p])
dll.api_floor.declare( c_smat_p, [c_smat_p])
dll.api_ceil.declare( c_smat_p, [c_smat_p])
dll.api_clip.declare( c_smat_p, [c_smat_p,c_double,c_double])
dll.api_isinf.declare( c_smat_p, [c_smat_p])
dll.api_isnan.declare( c_smat_p, [c_smat_p])
dll.api_isclose.declare( c_smat_p, [c_smat_p,c_smat_p,c_double,c_double])
dll.api_allclose.declare( c_smat_p, [c_smat_p,c_smat_p,c_double,c_double])
dll.api_maximum.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_minimum.declare( c_smat_p, [c_smat_p,c_smat_p])
dll.api_max.declare( c_smat_p, [c_smat_p,c_axis_t])
dll.api_min.declare( c_smat_p, [c_smat_p,c_axis_t])
dll.api_sum.declare( c_smat_p, [c_smat_p,c_axis_t])
dll.api_mean.declare( c_smat_p, [c_smat_p,c_axis_t])
dll.api_nnz.declare( c_smat_p, [c_smat_p,c_axis_t])
dll.api_all.declare( c_smat_p, [c_smat_p,c_axis_t])
dll.api_any.declare( c_smat_p, [c_smat_p,c_axis_t])
dll.api_diff.declare( c_smat_p, [c_smat_p,c_axis_t])
dll.api_repeat.declare( c_smat_p, [c_smat_p,c_shape_p])
dll.api_tile.declare( c_smat_p, [c_smat_p,c_shape_p])
dll.api_trace.declare( c_smat_p, [c_smat_p])
dll.api_trans.declare( c_smat_p, [c_smat_p])
dll.api_softmax.declare( c_smat_p, [c_smat_p,c_axis_t])
dll.api_apply_mask.declare(None, [c_smat_p,c_smat_p])
dll.api_dropout_fp_tr.declare( None, [c_smat_p,c_double,c_smat_p,c_smat_p])
dll.api_dropout_bp_tr.declare( None, [c_smat_p,c_smat_p,c_smat_p])
####################################################################
dll.api_set_debug_break(False) # disable debug break events so that, within an integrated debugger,
# the error of interest can immediately propagate up to Python rather
# than stopping the debugger at the C++ breakpoint.
###################################################################
# CUDNN extension
###################################################################
# Declare some useful ctypes based on the C++ types
class c_conv2cfg_t(Structure):
_fields_ = [("src_w", c_int), ("src_h", c_int),
("filter_w", c_int), ("filter_h", c_int),
("stride", c_int),
("accumulate", c_int),
("cpu_check", c_int),]
c_conv2cfg_p = POINTER(c_conv2cfg_t)
class c_featuremap_bias_cfg_t(Structure):
_fields_ = [("dims", c_int),
("accumulate", c_int),
("cpu_check", c_int),]
c_featuremap_bias_cfg_p = POINTER(c_featuremap_bias_cfg_t)
class c_pool2cfg_t(Structure):
_fields_ = [("mode", c_int),
("src_w", c_int), ("src_h", c_int),
("window_w", c_int), ("window_h", c_int),
("stride", c_int),
("accumulate", c_int),
("cpu_check", c_int),]
c_pool2cfg_p = POINTER(c_pool2cfg_t)
_cudnn_dll = None
def cudnn_dll():
"""Get handle to smat_cudnn.dll, loading it if necessary."""
global _cudnn_dll
if _cudnn_dll is None:
path_sep = ";" if platform.system()=="Windows" else ":"
path_vars = os.environ['PATH'].split(path_sep)
cudnn_path = os.environ.get('CUDNN_PATH')
if cudnn_path and cudnn_path not in path_vars:
path_vars += [cudnn_path]
os.environ['PATH'] = path_sep.join(path_vars)
_cudnn_dll = load_extension("smat_cudnn")
_cudnn_dll.api_conv2.declare( None, [c_smat_p, c_smat_p, c_smat_p, c_conv2cfg_p])
_cudnn_dll.api_conv2_srcgrad.declare( None, [c_smat_p, c_smat_p, c_smat_p, c_conv2cfg_p])
_cudnn_dll.api_conv2_filtersgrad.declare(None, [c_smat_p, c_smat_p, c_smat_p, c_conv2cfg_p])
_cudnn_dll.api_featuremap_bias.declare( None, [c_smat_p, c_smat_p, c_featuremap_bias_cfg_p])
_cudnn_dll.api_featuremap_bias_grad.declare( None, [c_smat_p, c_smat_p, c_featuremap_bias_cfg_p])
_cudnn_dll.api_pool2.declare( None, [c_smat_p, c_smat_p, c_pool2cfg_p])
_cudnn_dll.api_pool2_grad.declare( None, [c_smat_p, c_smat_p, c_smat_p, c_smat_p, c_pool2cfg_p])
return _cudnn_dll
|
DeepBind-master
|
code/libs/smat/py/smat/smat_dll.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from testutil import *
import numpy as np
import smat # want module name too
from smat import *
import cPickle as pickle
import os,os.path
####################################################
# GLOBAL VARIABLES USED IN EACH TEST (as read-only)
n,m = 123,21
Z,_Z = None,None # Z = numpy.zeros(n,m), _Z = smat.zeros(n,m)
O,_O = None,None # ones
I,_I = None,None # identity
A,_A = None,None # random matrix 1 is whatever
B,_B = None,None # random matrix 2 is whatever
C,_C = None,None # random matrix 3 has non-zero values (good as a denominator)
W,_W = None,None # row vector taken from ravel'd C
def clear_test_matrices():
global Z, O, I, A, B, C, W
global _Z,_O,_I,_A,_B,_C,_W
Z, O, I, A, B, C, W = None, None, None, None, None, None, None
_Z,_O,_I,_A,_B,_C,_W= None, None, None, None, None, None, None
def alloc_test_matrices(dt):
global n,m
global Z, O, I, A, B, C, W
global _Z,_O,_I,_A,_B,_C,_W
Z = np.zeros((n,m),dt); _Z = zeros((n,m),dt);
O = np.ones((n,m),dt); _O = ones((n,m),dt);
I = np.eye(n,dtype=dt); _I = eye(n,dtype=dt);
A = make_rand(n,m,dt); A = A.astype(dt)
B = make_rand(n,m,dt); B = abs(B); B = B.astype(dt)
C = make_rand(n,m,dt); C = abs(C); C[C==0] += 1; C = C.astype(dt)
W = C.ravel()[10:n+10].reshape((-1,1))
_A = as_sarray(A)
_B = as_sarray(B)
_C = as_sarray(C)
_W = as_sarray(W)
assert_eq(_Z,Z)
#####################################################
def test_create(dt):
# Machine-created matrices should match numpy versions
assert_eq(_Z,Z)
assert_eq(_O,O)
assert_eq(_I,I)
assert_eq(_A,A)
assert_eq(_B,B)
assert_eq(_C,C)
assert_eq(_W,W)
# Properties.
assert _Z.size == Z.size
assert _Z.ndim == Z.ndim
assert _Z.shape== Z.shape
assert _Z.dtype== Z.dtype
assert _Z.nrow == Z.shape[0]
assert _Z.ncol == Z.shape[1]
# Create range-valued array.
assert_eq(arange(5,67),np.arange(5,67))
# Use _like creation functions.
_X = empty_like(_Z)
assert _X.shape == _Z.shape
assert _X.dtype == _Z.dtype
_X = empty_like(_Z,uint8)
assert _X.shape == _Z.shape
assert _X.dtype == uint8
_X = zeros_like(_O)
assert_eq(_X,_Z)
_X = ones_like(_Z)
assert_eq(_X,_O)
# Create from list of values, make sure result is same as numpy
L = [0.1*i for i in range(300)]
_X = array(L, dt)
X = np.array(L, dt).reshape((-1,1))
assert_eq(_X, X)
#####################################################
def test_copy(dt):
# Upload and then download from machine
assert_eq(as_sarray(A), A)
_A_copy = _A.copy()
A_copy = A.copy()
assert_eq(_A_copy,A_copy)
_A_copy[5] = -123
A_copy[5] = -123
assert_ne(_A_copy,A)
assert_eq(_A_copy,A_copy)
# Type casting.
assert _A.astype(float32).dtype == float32
if int32 in get_supported_dtypes():
assert _A.astype(int32).dtype == int32
#####################################################
ext_demo_dll = None
class c_clamp_args_t(Structure): # This same structure is defined in cuda_ext_clamp.cu
_fields_ = [("lo", c_double),
("hi", c_double)]
c_clamp_args_p = POINTER(c_clamp_args_t)
def load_extension_demo():
global ext_demo_dll
ext_demo_dll = load_extension("smat_ext_demo")
ext_demo_dll.api_lerp.declare( c_smat_p, [c_smat_p,c_smat_p,c_double]) # C, [A,B,alpha]
ext_demo_dll.api_clamp.declare( None, [c_smat_p,c_clamp_args_p]) # [A,(lo,hi)]
def unload_extension_demo():
global ext_demo_dll
unload_extension(ext_demo_dll)
ext_demo_dll = None
def lerp(A,B,alpha):
C_ptr = ext_demo_dll.api_lerp(A._ptr,B._ptr,c_double(alpha))
return sarray(C_ptr)
def clamp(A,lo,hi):
args = c_clamp_args_t(lo,hi)
ext_demo_dll.api_clamp(A._ptr,byref(args))
def test_smat_extension(dt):
load_extension_demo()
# Function lerp(A,B) computes A*(1-B) and returns the result
_X = lerp(_A,_B,0.25)
X = (1-0.25)*A + 0.25*B
assert_close(_X,X)
# Function clamp(A,lo,hi) computes A[:] = maximum(lo,minimum(hi,A)) inplace
_X = _A.copy(); clamp(_X,-0.5,0.5)
X = A.copy(); X = np.maximum(-0.5,np.minimum(0.5,X))
assert_eq(_X,X)
unload_extension_demo()
#####################################################
def test_random(dt):
# Bernoulli random numbers
_A1 = bernoulli((n,m),0.5,dt)
_A2 = bernoulli((n,m),0.5,dt)
_A3 = bernoulli((n,m),0.2,dt)
assert_ne(_A1,_A2) # pretty pathetic test of randomness, but whatever
assert_ne(_A2,_A3)
assert_any(_A1 == 0)
assert_any(_A1 == 1)
assert_all(logical_or(_A1 == 1,_A1 == 0))
assert_all(nnz(_A1) > nnz(_A3)*1.1)
#####################################################
def test_random_int(dt):
# Integral random numbers
_A1 = rand(n,m,dt)
_A2 = rand(n,m,dt)
_A3 = rand(n,m,dt)
assert_ne(_A1,_A2) # pretty pathetic test of randomness, but whatever
assert_ne(_A2,_A3)
rand_seed(1234)
_A4 = rand(n,m,dt)
assert_ne(_A1,_A4)
rand_seed(1234)
_A5 = rand(n,m,dt)
assert_eq(_A4,_A5) # same random seed should give same random stream
#######################################################################
def test_random_float(dt):
# Floating point random numbers
_A1 = randn(n,m,dt)
_A2 = randn(n,m,dt)
_A3 = randn(n,m,dt)
assert_ne(_A1,_A2) # pretty pathetic test of randomness, but whatever
assert_ne(_A2,_A3)
rand_seed(1234)
_A4 = randn(n,m,dt)
assert_ne(_A1,_A4)
rand_seed(1234)
_A5 = randn(n,m,dt)
assert_eq(_A4,_A5) # same random seed should give same random stream
#######################################################################
def test_closeness(dt):
A1 = np.require(make_rand(n,m,dt)*1e-5,dtype=float32)
_A1 = asarray(A1)
assert allclose(A1,A1*(1+1e-6),rtol=1e-5,atol=0)
assert not allclose(A1,A1*(1+1e-4),rtol=1e-5,atol=0)
assert allclose(A1,A1+1e-6,rtol=0,atol=1e-5)
assert not allclose(A1,A1+1e-4,rtol=0,atol=1e-5)
#####################################################
def test_attributes():
"""Test setattr and getattr functions."""
A = empty((5,5))
A.setattr("foo",1)
A.setattr("bar",10)
assert A.foo == 1
assert A.bar == 10
del A.foo
assert A.bar == 10
del A.bar
#######################################################################
def test_serialize(dt):
"""
Tests that an smat array of any type can be
serialized to disk, including its attributes.
"""
A1 = rand(30,10,dtype=dt)
X1 = rand(256,5,dtype=dt)
X1.setattr("A",A1)
fname = "smat_unittest_serialize.pkl"
with open(fname,"wb") as file:
pickle.dump(X1,file)
with open(fname,"rb") as file:
X2 = pickle.load(file)
os.remove(fname)
assert isinstance(X2,sarray)
assert_eq(X1,X2)
assert(X2.hasattr("A")) # Make sure that attributes are also serialized
A2 = X2.getattr("A")
assert_eq(A1,A2)
#####################################################
def test_slicing(dt):
# Row slicing.
assert_eq(_A[0], A[0])
assert_eq(_A[0,:], A[0,:])
assert_eq(_A[11], A[11])
assert_eq(_A[11,:], A[11,:])
assert_eq(_A[-1], A[-1])
assert_eq(_A[-1,:], A[-1,:])
assert_eq(_A[:], A[:])
assert_eq(_A[:,:], A[:,:])
assert_eq(_A[:21], A[:21])
assert_eq(_A[:21,:], A[:21,:])
assert_eq(_A[-21:], A[-21:])
assert_eq(_A[-21:-16],A[-21:-16:])
assert_eq(_A[-21:,:], A[-21:,:])
assert_eq(_A[21:-21], A[21:-21:])
assert_eq(_A[21:-21,:],A[21:-21,:])
# Row slicing on a row vector
_a,a = _A[3,:],A[3:4,:]
assert_eq(_a, a)
assert_eq(_a[0], a[0])
# Column slicing.
assert_eq(_A[:,0], A[:,0:1])
assert_eq(_A[:,1], A[:,1:2])
assert_eq(_A[:,:5], A[:,:5])
assert_eq(_A[:,-1], A[:,-1:])
assert_eq(_A[:,-5], A[:,-5:-4])
assert_eq(_A[:,-5:], A[:,-5:])
assert_eq(_A[:,-5:-1], A[:,-5:-1])
# Column slicing on a column vector
_a,a = _A[:,3],A[:,3:4]
assert_eq(_a, a)
assert_eq(_a[:,0], a[:,0:1])
# Row + Column slicing.
assert_eq(_A[5,5], A[5,5])
assert_eq(_A[:5,5], A[:5,5:6])
assert_eq(_A[2:5,5], A[2:5,5:6])
assert_eq(_A[2:5,5:7], A[2:5,5:7])
assert_eq(_A[-6:,-10:], A[-6:,-10:])
# Row-sliced assignments.
_X,X = _A.copy(),A.copy(); _X[:] ,X[:] = 789 ,789; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[:] ,X[:] = _B[:] ,B[:]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[0] ,X[0] = _B[0] ,B[0]; assert_eq(_X,X) # Broadcast copy.
_X,X = _A.copy(),A.copy(); _X[:] ,X[:] = _B[0] ,B[0]; assert_eq(_X,X) # Broadcast copy.
_X,X = _A.copy(),A.copy(); _X[:] ,X[:] = _W ,W; assert_eq(_X,X) # Broadcast copy.
_X,X = _A.copy(),A.copy(); _X[-1] ,X[-1] = _B[-1] ,B[-1]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[:11] ,X[:11] = _B[:11] ,B[:11]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[-11:],X[-11:] = _B[-11:],B[-11:]; assert_eq(_X,X)
# Col-sliced assignments.
# _X,X = _A.copy(),A.copy(); _X[:,0] ,X[:,0] = 789 ,789; assert_eq(_X,X) # Assigning const to column strided array not implemented
_X,X = _A.copy(),A.copy(); _X[:,0] ,X[:,0] = _B[:,0] ,B[:,0]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[:,1] ,X[:,1] = _B[:,1] ,B[:,1]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[:,:10] ,X[:,:10] = _B[:,:10] ,B[:,:10]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[:,:10] ,X[:,:10] = _B[:,:10] ,B[:,:10]; assert_eq(_X,X)
_X,X = _A.copy(),A.copy(); _X[:,10:-10] ,X[:,10:-10] = _B[:,10:-10] ,B[:,10:-10]; assert_eq(_X,X)
# Row+Col-sliced assignments.
_X,X = _A.copy(),A.copy(); _X[5:10,7:13] ,X[5:10,7:13] = _B[5:10,7:13] ,B[5:10,7:13]; assert_eq(_X,X)
#####################################################
def test_reshape(dt):
_X,X = _A[:45,:].copy(),A[:45,:].copy()
assert_eq(_X.reshape((7,135)),X.reshape((7,135)))
assert_eq(_X.reshape((-1,7)),X.reshape((-1,7)))
_Y,Y = _X[:9],X[:9]; _Y[:,:],Y[:,:] = 1,1
assert_eq(_Y,Y)
assert_eq(_X,X)
assert_eq(_X.reshape((7,135)),X.reshape((7,135)))
assert_eq(_X.reshape((135,-1)),X.reshape((135,-1)))
#####################################################
def test_transpose(dt):
assert_eq(transpose(_I),I)
assert_eq(transpose(_A.reshape((-1,1))),np.transpose(A.reshape((-1,1))))
assert_eq(transpose(_A.reshape((1,-1))),np.transpose(A.reshape((1,-1))))
assert_eq(transpose(_A.reshape((3,-1))),np.transpose(A.reshape((3,-1))))
assert_eq(transpose(_A.reshape((-1,3))),np.transpose(A.reshape((-1,3))))
assert_eq(transpose(_A),np.transpose(A))
assert_eq(transpose(_B),np.transpose(B))
assert_eq(_A.T, np.transpose(A))
assert_eq(_B.T, np.transpose(B))
#######################################################################
def test_dot(dt):
assert_eq(dot(_I,_I), I)
assert_close(dot(_A.reshape((1,-1)),_B.reshape((-1,1))), np.dot(A.reshape((1,-1)),B.reshape((-1,1))))
assert_close(dot(_A,_B.T) ,np.dot(A,B.T))
assert_close(dot_nt(_A,_B),np.dot(A,B.T))
assert_close(dot(_A.T,_B) ,np.dot(A.T,B))
assert_close(dot_tn(_A,_B),np.dot(A.T,B))
#######################################################################
def test_bitwise(dt):
# Bitwise and logical (NOT,AND,OR,XOR).
assert_eq(~_A, ~A)
assert_eq(_A | 0, A | 0)
assert_eq( 1 | _B, 1 | B)
assert_eq(_A | _B, A | B)
assert_eq(_A ^ 0, A ^ 0)
assert_eq( 1 ^ _B, 1 ^ B)
assert_eq(_A ^ _B, A ^ B)
assert_eq(_A & 0, A & 0)
assert_eq( 1 & _B, 1 & B)
assert_eq(_A & _B, A & B)
#######################################################################
def test_logical(dt):
# Logical operations (as opposed to bitwise)
assert_eq(logical_not(_A), np.logical_not(A))
assert_eq(logical_or(_A,_B), np.logical_or(A,B))
assert_eq(logical_and(_A,_B), np.logical_and(A,B))
#######################################################################
def test_modulo(dt):
_X,X = _A,A
_Y,Y = _C,C
if dt in dtypes_sint:
_X,X = abs(_X),abs(X) # cuda modulo for signed types differs from numpy,
_Y,Y = abs(_Y),abs(Y) # so don't compare that case
assert_eq(_X % 7, (X % np.asarray(7,dtype=dt)).astype(dt))
assert_eq( 7 % _Y, (np.asarray(7,dtype=dt) % Y).astype(dt))
assert_eq(_X % _Y, (X % Y).astype(dt))
#######################################################################
def test_naninf(dt):
_X = _A.copy(); _X[3] = np.nan; _X[5] = np.inf
X = A.copy(); X[3] = np.nan; X[5] = np.inf
assert_eq(isnan(_X), np.isnan(X))
assert_eq(isinf(_X), np.isinf(X))
assert_eq(isinf(_A/0),np.ones(A.shape,dtype=bool))
assert_eq(isnan(0*_A/0),np.ones(A.shape,dtype=bool))
#######################################################################
def test_math_float(dt):
Amin = A.min()
Amax = A.max()
A2 = (2*( A-Amin)/(Amax-Amin)-1)*.999
_A2 = (2*(_A-Amin)/(Amax-Amin)-1)*.999
assert_eq(clip(_A,0,1),np.clip(A,0,1))
assert_eq(abs(_O), np.abs(O))
assert_eq(abs(_A), np.abs(A))
assert_eq(square(_A), np.square(A))
assert_eq(round(_A), np.round(A))
assert_eq(floor(_A), np.floor(A))
assert_eq(ceil(_A), np.ceil(A))
assert_close(sin(_A), np.sin(A))
assert_close(cos(_A), np.cos(A))
assert_close(tan(_A), np.tan(A))
assert_close(arcsin(_A2), np.arcsin(A2))
assert_close(arccos(_A2), np.arccos(A2))
assert_close(arctan(_A2), np.arctan(A2))
assert_close(sinh(_A), np.sinh(A))
assert_close(cosh(_A), np.cosh(A))
assert_close(tanh(_A), np.tanh(A))
assert_close(arcsinh(_A2), np.arcsinh(A2))
assert_close(arccosh(1+abs(_A2)), np.arccosh(1+np.abs(A2)))
assert_close(arctanh(_A2), np.arctanh(A2))
assert_close(exp(_C), np.exp(C))
assert_close(exp2(_C), np.exp2(C))
assert_close(log(_C), np.log(C))
assert_close(log2(_C), np.log2(C))
assert_close(logistic(_A), 1 / (1 + np.exp(-A)))
# Handle sign and sqrt separately...
if dt == bool:
assert_eq(sign(_O), np.sign(np.asarray(O,dtype=uint8))) # numpy doesn't support sign on type bool
assert_eq(sign(_A), np.sign(np.asarray(A,dtype=uint8)))
else:
assert_eq(sign(_O), np.sign(O))
assert_eq(sign(_I), np.sign(I))
if dt in (int8,int16,int32,int64,float32,float64):
assert_eq(sign(-_I), np.sign(-I))
assert_eq(sign(_A), np.sign(A))
assert_eq(signbit(_O), np.signbit(O,out=np.empty(O.shape,dtype=dt)))
assert_eq(signbit(_I), np.signbit(I,out=np.empty(I.shape,dtype=dt)))
if dt in (int8,int16,int32,int64,float32,float64):
assert_eq(signbit(-_I), np.signbit(-I,out=np.empty(I.shape,dtype=dt)))
assert_eq(signbit(_A), np.signbit(A,out=np.empty(A.shape,dtype=dt)))
if dt in dtypes_float:
assert_close(sqrt(abs(_A)),np.sqrt(np.abs(A))) # numpy converts integer types to float16/float32/float64, and we don't want that.
#######################################################################
def test_reduce(dt):
X = np.asarray([[12.5],[1]])
_X = as_sarray(X)
assert_eq(sum(_X,axis=1),np.sum(X,axis=1).reshape((-1,1)))
# Operations that reduce in one or more dimensions.
reducers = [(max,np.max,assert_eq),
(min,np.min,assert_eq),
(sum,np.sum,assert_close),
(mean,np.mean,assert_close),
(nnz,np.nnz,assert_eq),
(any,np.any,assert_eq),
(all,np.all,assert_eq),
]
shapes = [_A.shape,(-1,1),(3,-1),(-1,3),(-1,7),(1,-1),(7,-1)]
for shape in shapes:
for sreduce,nreduce,check in reducers:
_X = _A.reshape(shape).copy(); _X.ravel()[5:100] = 0;
X = A.reshape(shape).copy(); X.ravel()[5:100] = 0;
assert_eq(_X,X)
check(sreduce(_X,axis=1), nreduce(X,axis=1).reshape((-1,1))) # reshape because we don't want to follow numpy's convention of turning all reduces into dimension-1 vector
check(sreduce(_X,axis=0), nreduce(X,axis=0).reshape((1,-1)))
check(sreduce(_X), nreduce(X))
#######################################################################
def test_trace(dt):
#assert_eq(trace(_I), np.trace(I)) # not yet implemented
pass
#######################################################################
def test_diff(dt):
for axis in (0,1):
if axis == 1: continue # TODO: axis=1 not yet implemented
for n in range(5):
assert_eq(diff(_A,n,axis=axis), np.diff(A,n,axis=axis))
#######################################################################
def test_repeat(dt):
for n in range(5): assert_eq(repeat(_A,n,axis=1), np.repeat(A,n,axis=1))
for n in range(5): assert_eq(repeat(_A,n), np.repeat(A,n).reshape((-1,1)))
# TODO: axis=0 not yet implemented
#######################################################################
def test_tile(dt):
for n in range(5): assert_eq(tile(_A,n,axis=1), np.tile(A,(1,n)))
for n in range(5): assert_eq(tile(_A,n), np.tile(A.reshape((-1,1)),n).reshape((-1,1)))
# TODO: axis=0 not yet implemented
#######################################################################
def test_arithmetic(dt):
# Arithmetic operators (+,-,*,/)
_X,X = _A,A
_Y,Y = _B,B
_D,D = _C,C
if dt in dtypes_sint:
_Y,Y = abs(_Y),abs(Y) # cuda/numpy differ on how signed integer types
_D,D = abs(_D),abs(D) # are rounded under division, so skip that comparison
assert_eq(_X+_Y, X+Y)
assert_eq(_X+_Y[5,:], X+Y[5,:]) # test broadcast of row vector
assert_eq(_X[0,:]+_Y, X[0,:]+Y) # test broadcast of row vector
assert_eq(_X+_W, X+W) # test broadcast of col vector
assert_eq(_X+3 , np.asarray(X+3,dtype=dt))
assert_eq(3+_X , np.asarray(3+X,dtype=dt))
assert_eq(_X-_Y, X-Y)
assert_eq(_X-_Y[5,:], X-Y[5,:])
assert_eq(_X[0,:]-_Y, X[0,:]-Y)
assert_eq(_X-_W, X-W)
assert_eq(_X-3 , X-np.asarray(3,dtype=dt))
assert_eq(3-_X , np.asarray(3,dtype=dt)-X)
assert_eq(_X*_Y, X*Y)
assert_eq(_X*_Y[5,:], X*Y[5,:])
assert_eq(_X[0,:]*_Y, X[0,:]*Y)
assert_eq(_X*_W, X*W)
assert_eq(_X*3 , X*np.asarray(3,dtype=dt))
assert_eq(3*_X , np.asarray(3,dtype=dt)*X)
assert_close(_Y/_D[5,:], Y/D[5,:])
assert_close(_Y[0,:]/_D, Y[0,:]/D)
assert_close(_Y/_W, Y/W)
assert_close(_Y/_D, np.asarray(Y/D,dtype=dt))
assert_close(_Y/3 , np.asarray(Y/np.asarray(3,dtype=dt),dtype=dt))
assert_close(3/_D , np.asarray(np.asarray(3,dtype=dt)/D,dtype=dt))
if dt != bool:
_X = _A.copy(); X = A.copy(); _X += 2; X += 2; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X += _C; X += C; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X -= 2; X -= 2; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X -= _C; X -= C; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X *= 2; X *= 2; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X *= _C; X *= C; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X *= 0; X *= 0; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X *= 1; X *= 1; assert_eq(_X,X)
_X = _A.copy(); X = A.copy(); _X /= 1; X /= 1; assert_eq(_X,X)
#######################################################################
def test_elemwise_minmax(dt):
# Elementwise minimum/maximum
assert_eq(maximum(_A, 9),np.maximum(A,np.asarray(9,dtype=dt)).astype(dt))
assert_eq(maximum( 9,_B),np.maximum(np.asarray(9,dtype=dt),B).astype(dt))
assert_eq(maximum(_A,_B),np.maximum(A,B))
assert_eq(minimum(_A, 9),np.minimum(A,np.asarray(9,dtype=dt)).astype(dt))
assert_eq(minimum( 9,_B),np.minimum(np.asarray(9,dtype=dt),B).astype(dt))
assert_eq(minimum(_A,_B),np.minimum(A,B))
#######################################################################
def test_pow(dt):
if dt in [int64,uint64]: # Currently not work well with int64 and compute capability 1.2 (no doubles)
return
# Power (**).
_X,X = abs(_A),np.abs(A);
_Y,Y = (_I[:21,:].reshape((-1,21))+1.2).astype(dt),(I[:21,:].reshape((-1,21))+1.2).astype(dt)
assert_close(_X**_Y, X**Y)
assert_close(_X**_Y[0,:], X**Y[0,:]) # broadcast
assert_close(_X**2.1 , (X**np.asarray(2.1,dtype=dt)).astype(dt))
assert_close(7**_Y , np.asarray(7**Y,dtype=dt))
#######################################################################
def test_softmax(dt):
assert_close(softmax(_A,axis=0),numpy_softmax(A,axis=0))
assert_close(softmax(_A,axis=1),numpy_softmax(A,axis=1))
#######################################################################
def test_apply_mask(dt):
for _ in range(5):
# smat version
_X = _A.copy()
_M = bernoulli(_A.shape, 0.8, dtype=np.bool)
_X[5:7] = np.nan
_M[5:7] = False
apply_mask(_X, _M)
# numpy version
X = A.copy()
X[5:7] = 0
X *= _M.asnumpy()
X[np.where(X==-0.)] = 0
# compare
assert_eq(_X, X)
#######################################################################
def test_dropout(dt):
# Forward prop (training mode)
rate = 0.4
X = rand(100, 400, dt)
Z,M = dropout(X, rate)
assert np.sum(M.asnumpy().ravel()) > 0 # Almost zero probability of this failing
assert np.sum(M.asnumpy().ravel()) < M.size-1 # Almost zero probability of this failing
assert_eq(Z, X.asnumpy()*M.asnumpy())
# Back prop (training mode)
dZ = Z*(-.33)
dX = dropout_grad(dZ, M)
assert_eq(dX, dZ.asnumpy()*M.asnumpy())
# Forward prop (testing mode)
Ztest,_ = dropout(X, rate, test_mode=True)
assert_close(Ztest, X.asnumpy()*(1-rate))
#######################################################################
def test_conv2(dt):
# src must be (n) x (c*src_h*src_w)
# dst must be (n) x (k*dst_h*dst_w)
# filters must be (k) x (c*filter_h*filter_w)
# So here we make src (3) x (3*4*2) so that src[n,c,y,x] = n.cyx
# filters (2) x (3*2*2) so that filters[k,c,y,x] = k.cyx
src = array([[0.000, 0.001, 0.002, 0.010, 0.011, 0.012, # src[0][c,y,x]
0.100, 0.101, 0.102, 0.110, 0.111, 0.112,
0.200, 0.201, 0.202, 0.210, 0.211, 0.212,
0.300, 0.301, 0.302, 0.310, 0.311, 0.312,],
[1.000, 1.001, 1.002, 1.010, 1.011, 1.012, # src[1][c,y,x]
1.100, 1.101, 1.102, 1.110, 1.111, 1.112,
1.200, 1.201, 1.202, 1.210, 1.211, 1.212,
1.300, 1.301, 1.302, 1.310, 1.311, 1.312,],
[2.000, 2.001, 2.002, 2.010, 2.011, 2.012, # src[2][c,y,x]
2.100, 2.101, 2.102, 2.110, 2.111, 2.112,
2.200, 2.201, 2.202, 2.210, 2.211, 2.212,
2.300, 2.301, 2.302, 2.310, 2.311, 2.312,],
], dt)
filters = array([[0.000, 0.001, 0.010, 0.011, # filters[0][c,y,x]
0.100, 0.101, 0.110, 0.111,
0.200, 0.201, 0.210, 0.211,],
[1.000, 1.001, 1.010, 1.011, # filters[1][c,y,x]
1.100, 1.101, 1.110, 1.111,
1.200, 1.201, 1.210, 1.211,],
], dt)
sw, sh = 2, 4
fw, fh = 2, 2
# test conv2
dst1 = conv2(src, sw, sh, filters, fw, fh, cpu_check=True) # cpu_check=True will check against cpu implementation before returning
dst2 = conv2(src, sw, sh, filters, fw, fh, cpu_check=True)
conv2(src, sw, sh, filters, fw, fh, dst2, accumulate=True) # re-compute dst2 and add it to itself.
assert_close(2*dst1, dst2) # assert that accumulate=True worked
dst3 = conv2(src, sw, sh, filters, fw, fh, stride=2, cpu_check=True)
assert dst3.shape == (3,4) # check that stride=2 had expected effect on shape of dst3
assert dst3.w == 1
assert dst3.h == 2
# try large random matrices, compare to CPU implementation; throw away return value
conv2(randn(47,(23*21)*5,dtype=dt), 23, 21, randn(3,5*(4*3),dtype=dt), 4, 3, cpu_check=True)
# test conv2 with bias
bias1 = array([0.5,0.75], dt)
dst1b = conv2(src, sw, sh, filters, fw, fh, bias=bias1, cpu_check=True) # cpu_check=True will check against cpu implementation before returning
assert_close((dst1b-dst1)[:,:3], 0.5*ones_like(dst1b[:,:3]))
assert_close((dst1b-dst1)[:,3:], 0.75*ones_like(dst1b[:,:3]))
# test conv2_srcgrad
dstgrad = conv2(src, sw, sh, filters, fw, fh, cpu_check=True)*(-.33);
srcgrad1 = conv2_srcgrad(sw, sh, filters, fw, fh, dstgrad, cpu_check=True)
srcgrad2 = conv2_srcgrad(sw, sh, filters, fw, fh, dstgrad, cpu_check=True)
srcgrad2 = conv2_srcgrad(sw, sh, filters, fw, fh, dstgrad, srcgrad2, accumulate=True)
assert_close(2*srcgrad1, srcgrad2) # assert that accumulate=True worked
# test conv2_srcgrad
dstgrad = conv2(src, sw, sh, filters, fw, fh, cpu_check=True)*(-.33);
filtersgrad1 = conv2_filtersgrad(src, sw, sh, fw, fh, dstgrad, cpu_check=True)
filtersgrad2 = conv2_filtersgrad(src, sw, sh, fw, fh, dstgrad, cpu_check=True)
filtersgrad2 = conv2_filtersgrad(src, sw, sh, fw, fh, dstgrad, filtersgrad2, accumulate=True)
assert_close(2*filtersgrad1, filtersgrad2) # assert that accumulate=True worked
#######################################################################
def test_featuremap_bias(dt):
# fmaps must be (n) x (c*h*w)
# So here we make src (3) x (3*4*2) so that src[n,c,y,x] = n.cyx
src = array([[0.000, 0.001, 0.002, 0.010, 0.011, 0.012, # fmaps[0][c,y,x]
0.100, 0.101, 0.102, 0.110, 0.111, 0.112,
0.200, 0.201, 0.202, 0.210, 0.211, 0.212,
0.300, 0.301, 0.302, 0.310, 0.311, 0.312,],
[1.000, 1.001, 1.002, 1.010, 1.011, 1.012, # fmaps[1][c,y,x]
1.100, 1.101, 1.102, 1.110, 1.111, 1.112,
1.200, 1.201, 1.202, 1.210, 1.211, 1.212,
1.300, 1.301, 1.302, 1.310, 1.311, 1.312,],
[2.000, 2.001, 2.002, 2.010, 2.011, 2.012, # fmaps[2][c,y,x]
2.100, 2.101, 2.102, 2.110, 2.111, 2.112,
2.200, 2.201, 2.202, 2.210, 2.211, 2.212,
2.300, 2.301, 2.302, 2.310, 2.311, 2.312,],
], dt)
w, h = 2, 4
# Check that each bias is added to each element of its corresponding feature map
fmaps1 = src.copy()
bias = array([10., 20., 30.], dt)
featuremap_bias(fmaps1, (w, h), bias, cpu_check=True) # cpu_check=True will check against cpu implementation before returning
# Check that accumulation works
featuremap_bias(fmaps1, (w, h), bias, cpu_check=True)
fmaps2 = src.copy()
featuremap_bias(fmaps2, (w, h), 2*bias, cpu_check=True)
assert_close(fmaps1, fmaps2) # assert that accumulate=True worked
# Check that large matrix works
featuremap_bias(randn(17, (19*21*13), dt), (21, 13), randn(19,1,dt), cpu_check=True) # cpu_check=True will check against cpu implementation before returning
# Check that each element of feature map k is accumulated in bias unit k
fmapsgrad = src.copy()*(-0.33)
biasgrad = ones_like(bias) # make sure non-accumulate overwrites biasgrad
featuremap_bias_grad(fmapsgrad, (w, h), biasgrad, cpu_check=True)
return
def test_pool2(dt):
# src must be (n) x (c*h*w)
# So here we make src (3) x (3*4*2) so that src[n,c,y,x] = n.cyx
src = array([[0.000, 0.001, 0.002, 0.010, 0.011, 0.012, # src[0][c,y,x]
0.100, 0.101, 0.102, 0.110, 0.111, 0.112,
0.200, 0.201, 0.202, 0.210, 0.211, 0.212,
0.300, 0.301, 0.302, 0.310, 0.311, 0.312,],
[1.000, 1.001, 1.002, 1.010, 1.011, 1.012, # src[1][c,y,x]
1.100, 1.101, 1.102, 1.110, 1.111, 1.112,
1.200, 1.201, 1.202, 1.210, 1.211, 1.212,
1.300, 1.301, 1.302, 1.310, 1.311, 1.312,],
[2.000, 2.001, 2.002, 2.010, 2.011, 2.012, # src[2][c,y,x]
2.100, 2.101, 2.102, 2.110, 2.111, 2.112,
2.200, 2.201, 2.202, 2.210, 2.211, 2.212,
2.300, 2.301, 2.302, 2.310, 2.311, 2.312,],
], dt)
sw, sh = 2, 4
ww, wh = 2, 2
for mode in ("max", "avg"):
# Super simple 1-image, 1-channel input feature map
easy1 = array([[1, 7, 3, # easy[0][c,y,x]
4, 6, 1,
9, 3, 5]], dt)
dst1 = pool2(mode, easy1, 3, 3, 3, 2, cpu_check=True)
dst2 = pool2(mode, easy1, 3, 3, 2, 3, cpu_check=True)
dst3 = pool2(mode, easy1, 3, 3, 2, 2, cpu_check=True)
if mode == "max":
assert dst1[0,0] == 7
assert dst1[0,1] == 9
assert dst2[0,0] == 9
assert dst2[0,1] == 7
assert dst3[0,0] == 7
assert dst3[0,1] == 7
assert dst3[0,2] == 9
assert dst3[0,3] == 6
# Simple 1-image, 2-channel input feature map
easy2 = array([[1., 7., 3., # easy[0][0,y,x]
4., 6., 1.,
9., 3., 5.,
.1, .7, .3, # easy[0][1,y,x]
.4, .6, .1,
.9, .3, .5]], dt)
dst1 = pool2(mode, easy2, 3, 3, 3, 2, cpu_check=True)
dst2 = pool2(mode, easy2, 3, 3, 2, 3, cpu_check=True)
dst3 = pool2(mode, easy2, 3, 3, 2, 2, cpu_check=True)
if mode == "max":
assert dst1[0,0] == 7.
assert dst1[0,1] == 9.
assert dst1[0,2] == .7
assert dst1[0,3] == .9
assert dst2[0,0] == 9.
assert dst2[0,1] == 7.
assert dst2[0,2] == .9
assert dst2[0,3] == .7
assert dst3[0,0] == 7.
assert dst3[0,1] == 7.
assert dst3[0,2] == 9.
assert dst3[0,3] == 6.
assert dst3[0,4] == .7
assert dst3[0,5] == .7
assert dst3[0,6] == .9
assert dst3[0,7] == .6
# Simple 2-image, 1-channel input feature map
easy3 = array([[1., 7., 3., # easy[0][0,y,x]
4., 6., 1.,
9., 3., 5.],
[.1, .7, .3, # easy[1][0,y,x]
.4, .6, .1,
.9, .3, .5]], dt)
dst1 = pool2(mode, easy3, 3, 3, 3, 2, cpu_check=True)
dst2 = pool2(mode, easy3, 3, 3, 2, 3, cpu_check=True)
dst3 = pool2(mode, easy3, 3, 3, 2, 2, cpu_check=True)
if mode == "max":
assert dst1[0,0] == 7.
assert dst1[0,1] == 9.
assert dst1[1,0] == .7
assert dst1[1,1] == .9
assert dst2[0,0] == 9.
assert dst2[0,1] == 7.
assert dst2[1,0] == .9
assert dst2[1,1] == .7
assert dst3[0,0] == 7.
assert dst3[0,1] == 7.
assert dst3[0,2] == 9.
assert dst3[0,3] == 6.
assert dst3[1,0] == .7
assert dst3[1,1] == .7
assert dst3[1,2] == .9
assert dst3[1,3] == .6
# Check that each bias is added to each element of its corresponding feature map
dst1 = pool2(mode, src, sw, sh, ww, wh, cpu_check=True) # cpu_check=True will check against cpu implementation before returning
dst2 = pool2(mode, src, sw, sh, ww, wh, cpu_check=True)
pool2(mode, src, sw, sh, ww, wh, dst2, accumulate=True) # re-compute dst2 and add it to itself.
assert_close(2*dst1, dst2) # assert that accumulate=True worked
dst3 = pool2(mode, src, sw, sh, ww, wh, stride=2, cpu_check=True)
assert dst3.shape == (3,6) # check that stride=2 had expected effect on shape of dst3
assert dst3.w == 1
assert dst3.h == 2
# try large random matrices, compare to CPU implementation
pool2(mode, randn(47,5*(23*21),dtype=dt), 23, 21, 4, 3, cpu_check=True)
dst4 = pool2(mode, randn(47,5*(23*21),dtype=dt), 23, 21, 23, 21, cpu_check=True)
assert dst4.shape == (47,5) # 3 input channels, 3 output channels
assert dst4.w == 1 # but only one giant pooling region, so only one pooling output
assert dst4.h == 1
# Test pool2_grad on a matrix same shape as src, and make sure accumulate=True works
dst = pool2(mode, src, sw, sh, ww, wh, cpu_check=True)
dstgrad = dst*(-.33)
srcgrad1 = pool2_grad(mode, src, sw, sh, ww, wh, dst, dstgrad, cpu_check=True)
srcgrad2 = pool2_grad(mode, src, sw, sh, ww, wh, dst, dstgrad, cpu_check=True)
srcgrad2 = pool2_grad(mode, src, sw, sh, ww, wh, dst, dstgrad, srcgrad2, accumulate=True)
assert_close(2*srcgrad1, srcgrad2) # assert that accumulate=True worked
return
#######################################################################
def test_memory_manager():
#reset_backend()
#reset_backend(verbose=1,log=["heap"]) # for debugging, if there's a problem
size = 10*1024*1024 # 10 million element chunks
m = 1024
n = size/m
status0 = get_heap_status()
Y = ones((n,m),dtype=float32)
status1 = get_heap_status()
Y = None
status2 = get_heap_status()
Y = ones((n,m),dtype=float32)
status3 = get_heap_status()
Y = None
status4 = get_heap_status()
Y = ones((n,3*m//4),dtype=float32)
status5 = get_heap_status()
Y = None
status6 = get_heap_status()
assert status1.device_used >= status0.device_used + n*m # use >= n*m instead of == n*m because sanity checks/alignment constraints might allocate a few extra bytes
assert status1.device_committed >= status0.device_committed
assert status2.device_used == status0.device_used
assert status2.device_committed == status1.device_committed
assert status3.device_used == status1.device_used
assert status3.device_committed == status1.device_committed
assert status4.device_used == status0.device_used
assert status4.device_committed == status1.device_committed
assert status5.device_used < status1.device_used # allocated smaller array, but should use same block
assert status5.device_committed == status1.device_committed
assert status6.device_used == status0.device_used
assert status6.device_committed == status1.device_committed
for i in range(2): # try to alloc and free all memory, several times
# Each trial allocates (and continues to reference)
# enough matrix data to nearly fill the available device memory,
# then syncs with the machine.
mem = get_heap_status()
X = []
Y = ones((n,m),dtype=float32)
elem_to_alloc = int(mem.device_avail*0.9)/4
chunks_to_alloc = elem_to_alloc/size-2
for j in range(chunks_to_alloc):
X.append(ones((n,m),dtype=float32))
Y = Y + X[-1]
sync()
X = None
Y = None
sync()
#reset_backend()
#######################################################################
def run_unittest(test,dtypes=None):
print rpad("%s..." % test.__name__.partition("_")[2],19),
if dtypes == None:
test()
else:
supported = get_supported_dtypes()
for dt in [bool, int8, int16, int32, int64,
uint8,uint16,uint32,uint64,float32,float64]:
if not dt in supported:
continue
print ("%3s" % dtype_short_name[dt] if dt in dtypes else " "),
if dt in dtypes:
alloc_test_matrices(dt)
test(dt)
clear_test_matrices()
print
#######################################################################
def unittest(want_cudnn=False):
print '\n---------------------- UNIT TESTS -------------------------\n'
np.random.seed(42)
set_backend_options(randseed=42,verbose=0,sanitycheck=False)
run_unittest(test_memory_manager)
run_unittest(test_create ,dtypes_generic)
run_unittest(test_copy ,dtypes_generic)
run_unittest(test_random ,dtypes_generic)
run_unittest(test_random_int ,dtypes_integral)
run_unittest(test_random_float ,dtypes_float)
run_unittest(test_smat_extension,dtypes_float)
run_unittest(test_closeness ,dtypes_float)
run_unittest(test_attributes)
run_unittest(test_serialize ,dtypes_generic)
run_unittest(test_slicing ,dtypes_generic)
run_unittest(test_reshape ,dtypes_generic)
run_unittest(test_transpose ,dtypes_generic)
run_unittest(test_dot ,dtypes_float)
run_unittest(test_bitwise ,dtypes_integral)
run_unittest(test_logical ,dtypes_integral)
run_unittest(test_modulo ,dtypes_integral)
run_unittest(test_naninf ,dtypes_float)
run_unittest(test_math_float ,dtypes_float)
run_unittest(test_reduce ,dtypes_generic)
run_unittest(test_trace ,dtypes_generic)
run_unittest(test_diff ,dtypes_generic)
run_unittest(test_repeat ,dtypes_generic)
run_unittest(test_tile ,dtypes_generic)
run_unittest(test_arithmetic ,dtypes_generic)
run_unittest(test_elemwise_minmax,dtypes_generic)
run_unittest(test_pow ,dtypes_generic)
run_unittest(test_softmax ,dtypes_float)
run_unittest(test_apply_mask ,dtypes_float)
run_unittest(test_dropout ,dtypes_float)
#run_unittest(test_repmul_iadd ,dtypes_float)
if want_cudnn:
run_unittest(test_conv2, dtypes_float)
run_unittest(test_featuremap_bias, dtypes_float)
run_unittest(test_pool2, dtypes_float)
|
DeepBind-master
|
code/libs/smat/py/smat/tests/unittest.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from testutil import *
import numpy as np
import smat # want module name too
from smat import *
import timeit
import os,os.path
import matplotlib
matplotlib.use('Agg') # off-screen rendering
import matplotlib.pyplot as plt
#######################################################################
def _apply_unary(b,func,repeats,A,*args,**kwargs):
for i in range(repeats):
func(A,*args,**kwargs)
b.sync()
def apply_unary(dt,b,n,m,repeats,func,*args,**kwargs): # dtype to test, backend module to test
A = b.rand(n,m,dt)
_apply = lambda: _apply_unary(b,func,repeats,A,*args,**kwargs)
_apply() # run once to stabilize running time
b.sync()
trials = [timeit.timeit(_apply,number=1)/repeats for i in range(5)]
trials.sort()
return trials[0] # return best time
#######################################################################
def perftest_logistic(dt,b): return apply_unary(dt,b,128,1000,20,b.logistic),None
def perftest_exp(dt,b): return apply_unary(dt,b,128,1000,20,b.exp),None
def perftest_tanh(dt,b): return apply_unary(dt,b,128,1000,20,b.tanh),None
def perftest_softmax(dt,b): return apply_unary(dt,b,1000,10,20,b.softmax),None
def perftest_repeat_x(dt,b): return apply_unary(dt,b,512,256,20,b.repeat,16,axis=1),None
def perftest_tile_x(dt,b): return apply_unary(dt,b,512,256,20,b.tile,(1,16)),None
#######################################################################
def perftest_reduce_5Kx1(dt,b): return apply_unary(dt,b,5000,1,100,b.sum,axis=None),None
def perftest_reducex_5Kx10(dt,b): return apply_unary(dt,b,5000,10,100,b.sum,axis=1),None
def perftest_reducey_5Kx10(dt,b): return apply_unary(dt,b,5000,10,100,b.sum,axis=0),None
def perftest_reducex_10x5K(dt,b): return apply_unary(dt,b,10,5000,100,b.sum,axis=1),None
def perftest_reducey_10x5K(dt,b): return apply_unary(dt,b,10,5000,100,b.sum,axis=0),None
def perftest_reduce_1Mx1(dt,b): return apply_unary(dt,b,1000000,1,5,b.sum,axis=None),None
def perftest_reducex_1Mx10(dt,b): return apply_unary(dt,b,1000000,10,5,b.sum,axis=1),None
def perftest_reducey_1Mx10(dt,b): return apply_unary(dt,b,1000000,10,5,b.sum,axis=0),None
def perftest_reducex_10x1M(dt,b): return apply_unary(dt,b,10,1000000,5,b.sum,axis=1),None
def perftest_reducey_10x1M(dt,b): return apply_unary(dt,b,10,1000000,5,b.sum,axis=0),None
#######################################################################
def _apply_binary(b,func,repeats,A,B,*args,**kwargs):
for i in range(repeats):
func(A,B,*args,**kwargs)
b.sync()
def apply_binary(dt,b,n,m,p,q,repeats,func,*args,**kwargs): # dtype to test, backend module to test
A = b.rand(n,m,dt)
B = b.rand(p,q,dt)
_apply = lambda: _apply_binary(b,func,repeats,A,B,*args,**kwargs)
_apply() # run once to stabilize running time
b.sync()
trials = []
for i in range(5):
# push everything out of the cache, if any
#X = b.ones((1024*1024,1))
#X = None
# do the performance test
trials.append(timeit.timeit(_apply,number=1)/repeats)
b.sync()
trials.sort()
return trials[0] # return best time
#######################################################################
def mulsum(b,A,B):
#return
b.sum(A*B)
def perftest_mul(dt,b,N): return apply_binary(dt,b,1,2**N,1,2**N,10,b.multiply),2**N
def perftest_dot(dt,b): return apply_binary(dt,b,128,784,784,500,10,b.dot),128*784*500
def perftest_dot_nt(dt,b): return apply_binary(dt,b,128,784,500,784,10,b.dot_nt),128*784*500
def perftest_dot_tn(dt,b): return apply_binary(dt,b,784,128,784,500,10,b.dot_tn),128*784*500
def perftest_dot_tt(dt,b): return apply_binary(dt,b,784,128,500,784,10,b.dot_tt),128*784*500
def perftest_dot_nt_vec(dt,b): return apply_binary(dt,b,1,1024,1,1024,20,b.dot_nt),None
def perftest_mulsum_vec(dt,b): return apply_binary(dt,b,1,1024*1024,1,1024*1024,20,lambda A,B: mulsum(b,A,B)),None
#######################################################################
def perftest_bprop(dt,b):
# Simulate training a 784-800-800-10 network on subset of MNIST
trainsize = 2000
batchsize = 200
insize = 28*28
hiddensize = 800
outsize = 10
dt_X = uint8 if uint8 in get_supported_dtypes() else float32
times = {}
X = b.rand(trainsize,insize,dtype=dt_X)
Y = b.rand(trainsize,outsize,dt)
W1 = b.rand(insize,hiddensize,dt)
b1 = b.rand(1,hiddensize,dt)
W2 = b.rand(hiddensize,hiddensize,dt)
b2 = b.rand(1,hiddensize,dt)
W3 = b.rand(hiddensize,outsize,dt)
b3 = b.rand(1,outsize,dt)
eta = 0.001
num_epoch = 2
b.sync()
tic()
for epoch in range(num_epoch):
for i in range(trainsize/batchsize):
Z0 = X[i*batchsize:i*batchsize+batchsize].astype(dt)
Y0 = Y[i*batchsize:i*batchsize+batchsize]
# forward pass
A1 = b.dot(Z0,W1) + b1
Z1 = b.logistic(A1)
A2 = b.dot(Z1,W2) + b2
Z2 = b.logistic(A2)
A3 = b.dot(Z2,W3) + b3
A3 -= b.max(A3,axis=1).reshape((batchsize,1)) # for softmax stability
Z3 = b.exp(A3)/b.sum(exp(A3),axis=1).reshape((batchsize,1)) # calculate softmax
# backward pass
D3 = (Z3-Y0)/trainsize
dW3 = b.dot_tn(Z2,D3)
db3 = sum(D3,axis=0)
D2 = (Z2-Z2**2) * b.dot_nt(D3,W3)
dW2 = b.dot_tn(Z1,D2)
db2 = sum(D2,axis=0)
D1 = (Z1-Z1**2) * b.dot_nt(D2,W2)
dW1 = b.dot_tn(Z0,D1)
db1 = sum(D1,axis=0)
# Take gradient step
W3 -= eta*dW3
b3 -= eta*db3
W2 -= eta*dW2
b2 -= eta*db2
W1 -= eta*dW1
b1 -= eta*db1
b.sync()
return toc() / num_epoch, None
#######################################################################
class gridtest_reduce(object):
def __init__(self,name,reduce,axis):
self.name = name
self.reduce = reduce
self.A = None
self.b = None
self.axis = axis
self.nrepeat = 1
def configure(self,b,dt,n,m,nrepeat):
self.A = b.rand(n,m,dt)
self.b = b
self.nrepeat = nrepeat
def __call__(self):
#print self.A.shape
for i in range(self.nrepeat):
x = self.reduce(self.A,axis=self.axis)
'''
y = np.sum(as_numpy(self.A),axis=self.axis)
try:
assert_close(x,y)
except:
print x.ravel()
print y
quit()
'''
self.b.sync()
def nflop(self):
n,m = self.A.shape
if self.axis == 1:
return (m-1)*n
else:
return (n-1)*m
#######################################################################
def run_perftest(log,dt,test,dtypes,argsets=None):
testname = test.__name__.partition("_")[2]
if dt not in dtypes:
log.write(testname+"\n")
return
if argsets is None:
argsets = [()]
for args in argsets:
print rpad("%s%s:%s..." % (testname,str(args),dtype_short_name[dt]),24),
backends = [smat,np]
best = { backend : np.inf for backend in backends }
for backend in backends:
flop = None
for trial in range(3):
runtime,flop = test(dt,backend,*args)
best[backend] = min(best[backend],runtime) # Take the best of three runs
if flop is None:
print(rpad("%s=%.4fms," % (backend.__package__,best[backend]*1000),17)), # print out the best milliseconds
else:
print(rpad("%s=%.3f GFLOPS," % (backend.__package__,flop/best[backend]/1e9),17)), # print out the best GFLOPS
if best[np] > best[smat]:
print("(%.1fx faster)" % (best[np]/best[smat]))
else:
print("(%.1fx SLOWER)" % (best[smat]/best[np]))
log.write( rpad(testname,16)
+rpad("%.6f" % best[smat],10)
+rpad("%.6f" % best[np],10)
+"\n")
def run_gridtest(log,dt,gridtest,dtypes):
if dt not in dtypes:
log.write(gridtest.name+"\n")
return
#backends = [(smat,"smat"),(np,"numpy")]
backends = [(smat,"smat")]
base = 5L
nsteps = 8
nrepeat = 3
max_size = 128*1024*1024
for b,bname in backends:
testname = "%s_%s_%s" % (bname,gridtest.name,dtype_short_name[dt])
print rpad("%s..." % testname,24),
gflops = np.zeros((nsteps,nsteps))
#flops[:] = np.nan
for mexp in range(nsteps):
for nexp in range(nsteps):
n,m = base**(nexp+1),base**(mexp+1)
if n*m > max_size:
continue
gridtest.configure(b,dt,n,m,nrepeat)
b.sync()
seconds = timeit.timeit(gridtest,number=1)/nrepeat
gflops[nexp,mexp] = gridtest.nflop()/seconds/1000/1000/1000
print
msg = ""
for row in gflops:
for val in row:
if not np.isnan(val):
msg += str(val)
msg += "\t"
msg.strip('\t')
msg += "\n"
log.write( rpad(testname,16) + "\n")
log.write(msg)
plt.figure(dpi=60)
plt.title(testname + " performance (GFLOPS)")
plt.xlabel('shape.x')
plt.ylabel('shape.y')
img = plt.imshow(gflops.squeeze(),origin='lower') #Needs to be in row,col order
img.set_interpolation('nearest')
plt.xticks(np.arange(nsteps),[base**(i+1) for i in range(nsteps)])
plt.yticks(np.arange(nsteps),[base**(i+1) for i in range(nsteps)])
plt.colorbar()
#plt.show()
plt.savefig(os.path.join("log",testname+".png"))
#######################################################################
def perftest():
print '\n------------------- PERFORMANCE TESTS ----------------------\n'
np.random.seed(42)
set_backend_options(randseed=42,verbose=0,sanitycheck=False)
if not os.path.exists("log"):
os.makedirs("log")
for dt in [float32,float64,int32,bool]:
if dt not in get_supported_dtypes():
continue
# Record the performance results in a text file that can be
# imported into a spreadsheet if so desired.
perflog = os.path.join("log","smatperf-%s.txt" % dt.__name__)
print "----- Generating %s ------" % perflog
with open(perflog,"w") as log:
log.write( rpad("test",16)
+rpad("smat",10)
+rpad("numpy",10)
+"\n")
# Performance tests with dead code elimination disabled
reset_backend(sanitycheck=False,elimdeadcode=False) # ,verbose=1,log=["exec"]
#run_perftest(log,dt,perftest_mul,dtypes_float,((i,) for i in range(4,25)))
run_perftest(log,dt,perftest_mul,dtypes_float,((i,) for i in [5,10,20,26]))
'''
run_perftest(log,dt,perftest_logistic ,dtypes_float)
run_perftest(log,dt,perftest_exp ,dtypes_float)
run_perftest(log,dt,perftest_tanh ,dtypes_float)
run_perftest(log,dt,perftest_softmax ,dtypes_float)
run_perftest(log,dt,perftest_dot ,dtypes_float)
run_perftest(log,dt,perftest_dot_nt ,dtypes_float)
run_perftest(log,dt,perftest_dot_tn ,dtypes_float)
run_perftest(log,dt,perftest_dot_tt ,dtypes_float)
run_perftest(log,dt,perftest_dot_nt_vec ,dtypes_float)
'''
#run_perftest(log,dt,perftest_mulsum_vec ,dtypes_float)
'''
run_perftest(log,dt,perftest_repeat_x ,dtypes_generic)
run_perftest(log,dt,perftest_tile_x ,dtypes_generic)
run_perftest(log,dt,perftest_reduce_5Kx1 ,dtypes_generic)
run_perftest(log,dt,perftest_reducex_5Kx10,dtypes_generic)
run_perftest(log,dt,perftest_reducey_5Kx10,dtypes_generic)
run_perftest(log,dt,perftest_reducex_10x5K,dtypes_generic)
run_perftest(log,dt,perftest_reducey_10x5K,dtypes_generic)
run_perftest(log,dt,perftest_reduce_1Mx1 ,dtypes_generic)
run_perftest(log,dt,perftest_reducex_1Mx10,dtypes_generic)
run_perftest(log,dt,perftest_reducey_1Mx10,dtypes_generic)
run_perftest(log,dt,perftest_reducex_10x1M,dtypes_generic)
run_perftest(log,dt,perftest_reducey_10x1M,dtypes_generic)
# More performance tests, where dead code elimination is now allowed (the default)
reset_backend(elimdeadcode=True)
run_perftest(log,dt,perftest_bprop,dtypes_float)
reset_backend(elimdeadcode=True)
run_gridtest(log,dt,gridtest_reduce("sum",sum,None),dtypes_float)
run_gridtest(log,dt,gridtest_reduce("sum_y",sum,0),dtypes_float)
run_gridtest(log,dt,gridtest_reduce("sum_x",sum,1),dtypes_float)
'''
|
DeepBind-master
|
code/libs/smat/py/smat/tests/perftest.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from unittest import unittest
from perftest import perftest
|
DeepBind-master
|
code/libs/smat/py/smat/tests/__init__.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import numpy as np
import numpy.random as npr
import smat as sm
def rpad(string,minlen):
return "%s%s" % (string," "*(minlen-len(string)))
def numpy_softmax(M,axis=None):
M = np.exp(M - np.max(M,axis=axis,keepdims=True))
return M/np.sum(M,axis=axis,keepdims=True)
np.rand = lambda n,m,dtype=None: np.asarray(npr.rand(n,m),dtype=dtype)
np.randn = lambda n,m,dtype=None: np.asarray(npr.randn(n,m),dtype=dtype)
np.logistic = lambda A: 1/(1+np.exp(-A))
np.dot_tn = lambda A,B: np.dot(A.T,B)
np.dot_nt = lambda A,B: np.dot(A,B.T)
np.dot_tt = lambda A,B: np.dot(A.T,B.T)
np.nnz = lambda X,axis=None: np.sum(X!=0,axis=axis) # numpy doesn't have a nnz that operates only along axes, so make a lambda for it.
np.softmax = numpy_softmax
np.sync = lambda: None
dtypes_logical = {np.bool}
dtypes_sint = {np.int8 ,np.int16 ,np.int32 ,np.int64}
dtypes_uint = {np.uint8,np.uint16,np.uint32,np.uint64}
dtypes_float = {np.float32,np.float64}
dtypes_signed = dtypes_sint | dtypes_float
dtypes_integral= dtypes_logical | dtypes_sint | dtypes_uint
dtypes_numeric = dtypes_sint | dtypes_uint | dtypes_float
dtypes_generic = dtypes_logical | dtypes_numeric
def supported(dts):
return dts.intersection(sm.get_supported_dtypes())
def assert_all(X): assert(np.all(sm.as_numpy(X)))
def assert_any(X): assert(np.any(sm.as_numpy(X)))
def assert_eq(X,Y): assert(np.all(sm.as_numpy(X) == sm.as_numpy(Y)))
def assert_ne(X,Y): assert(np.any(sm.as_numpy(X) != sm.as_numpy(Y)))
def assert_close(X,Y):
X = sm.as_numpy(X).ravel()
Y = sm.as_numpy(Y).ravel()
if X.dtype == np.float32: np.testing.assert_allclose(X,Y,rtol=1e-3,atol=np.inf)
elif X.dtype == np.float64: np.testing.assert_allclose(X,Y,rtol=1e-6,atol=np.inf)
else: assert_all(X == Y)
int_ranges = {np.bool: (0,2),
np.int8: (-7,8), np.uint8: (0,8),
np.int16: (-63,64), np.uint16: (0,64),
np.int32: (-255,256), np.uint32: (0,256),
np.int64: (-128**2+1,128**2), np.uint64: (0,128**2)}
dtype_short_name={np.bool: "b8",
np.int8: "i8", np.uint8: "u8",
np.int16: "i16", np.uint16: "u16",
np.int32: "i32", np.uint32: "u32",
np.int64: "i64", np.uint64: "u64",
np.float32:"f32",np.float64:"f64",
}
def make_rand(n,m,dt):
if dt in dtypes_float:
return npr.randn(n,m) # deliberately don't cast to dtype "dt" yet
else:
return npr.randint(int_ranges[dt][0],int_ranges[dt][1],size=(n,m))
|
DeepBind-master
|
code/libs/smat/py/smat/tests/testutil.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import os
import os.path
import copy
import time
import numpy as np
import numpy.random as npr
import smat as sm
import logging
import itertools
import deepity
import deepity.hpsearch
from . import _ext
from . import util
from . import globals
from .train import train, calibrate
from .predict import predict, load_modelinfos, load_model
import multiprocessing
class filtersampler(deepity.hpsearch.paramdef):
def __init__(self, filters, samplesize, name=None):
assert samplesize <= filters.shape[1]
# Take only columns (filters) where the max weight magnitude is
# at least 0.05 times the max weight magnitude of the whole filterbank.
colmax = np.max(abs(filters), axis=0)
allmax = colmax.max()
colmask = colmax >= allmax*0.05
while sum(colmask) < samplesize:
colmask[npr.randint(0,colmask.size)] = True
self.samplesize = samplesize
self.filters = filters[:, colmask].copy()
#self.filters = npr.randn(self.filters.shape[0],self.filters.shape[1])*1.1
self.subsets = list(itertools.combinations(range(self.filters.shape[1]), samplesize))
super(filtersampler,self).__init__(name, filters.dtype)
def sample(self, task_idx, sample_ids):
return np.asarray([self.filters[:,self.subsets[npr.randint(0,len(self.subsets))]]
for id in sample_ids])
def _bind_filterinit_hyperparam(path, obj, model):
if not isinstance(obj, _ext.corr1ord):
return
convnode = getattr(model, "conv_" + obj.parent.name)[0]
obj.init = filtersampler(convnode.W.fpval.asnumpy(), obj.nfilter)
obj.fsize = convnode.fsize
def _bind_filterinit_hyperparam_main(args):
simple_model, modelinfo = args
model = load_model(modelinfo)
simple_model.visit(lambda path, obj: _bind_filterinit_hyperparam(path, obj, model))
return simple_model
def bind_filterinit_hyperparams(simple_model, modelinfo):
# Have to load the model and do substitutions in a subprocess because otherwise a CUDA context will get created
# in the MainProcess, which will screw everything up on the next fork() that follows
pool = multiprocessing.Pool(1)
bound_model = pool.map(_bind_filterinit_hyperparam_main, [(simple_model, modelinfo)])[0]
pool.close()
pool.join()
return bound_model
def simplify(simple_model, trainer, data, modeldir, ncalibration=18, calibration_steps=None, nfold=2, nsample=1, outdir=None):
# srcdir is the directory of previously trained models
if not outdir:
outdir = "out"
globals._set_default_logging(outdir)
# Generate predictions for each full model on the given input sequences.
predictions = predict(modeldir, data, outdir=modeldir)
# For each full model, train several simplified models and take the best
for modelname, modelinfo in load_modelinfos(modeldir, include=data.targetnames).iteritems():
# simple_model will be trained on *predictions* of the input data,
# so here we replace the targets of that data with predictions
simple_data = copy.copy(data)
simple_data.targetnames = [modelname]
simple_data.targets = predictions[modelname].copy()
simple_data.Y = predictions[modelname].copy()
simple_data.Ymask[:] = True
# Use the 'calibration' phase to select hyperparameters, where we
# have made "which subset of filters to initialize with" one of
# the hyperparameters
bound_model = bind_filterinit_hyperparams(simple_model, modelinfo)
calibrate_trainer = copy.copy(trainer)
if calibration_steps is not None:
calibrate_trainer.max_steps = calibration_steps
calibration = calibrate(bound_model, calibrate_trainer, simple_data, outdir=outdir, nfold=nfold, ncalibration=ncalibration)
# Train final model
train(bound_model, trainer, simple_data, calibration, outdir=outdir, nsample=nsample, nfold=nfold)
return
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/simplify.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
# gradmap.py
#
import os
import os.path
import sys
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from . import util
import deepity
import deepity.tape2logo
import matplotlib
matplotlib.rcParams.update({'font.size': 9,
'font.family': 'sans serif',
'text.usetex' : False})
if (not os.environ.has_key("DISPLAY")) and (not os.environ.has_key("HOMEDRIVE")):
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from matplotlib import colors as colors
from matplotlib import cm
from matplotlib.figure import Figure
gridimg_pal_RedBlue = matplotlib.colors.LinearSegmentedColormap('RedBlue', {
'red': ((0.00, 0.0, 0.0),
(0.50, 1.0, 1.0),
(1.00, 1.0, 1.0)),
'green': ((0.00, 0.0, 0.0),
(0.50, 1.0, 1.0),
(1.00, 0.0, 0.0)),
'blue': ((0.00, 1.0, 1.0),
(0.50, 1.0, 1.0),
(1.00, 0.0, 0.0)),
},gamma =1.0)(np.arange(256))
_redbluepal = np.asarray(gridimg_pal_RedBlue[:,:3]*255, dtype=np.uint8)
gridimg_pal_Black = matplotlib.colors.LinearSegmentedColormap('Black', {
'red': ((0.00, 1.0, 1.0),
(0.50, 1.0, 1.0),
(1.00, 0.0, 0.0)),
'green': ((0.00, 1.0, 1.0),
(0.50, 1.0, 1.0),
(1.00, 0.0, 0.0)),
'blue': ((0.00, 1.0, 1.0),
(0.50, 1.0, 1.0),
(1.00, 0.0, 0.0)),
},gamma =1.0)(np.arange(256))
_blackpal = np.asarray(gridimg_pal_Black[:,:3]*255, dtype=np.uint8)
gridimg_pal_Gray = matplotlib.colors.LinearSegmentedColormap('Gray', {
'red': ((0.00, 0.0, 0.0),
(0.50, 0.0, 0.0),
(1.00, 1.0, 1.0)),
'green': ((0.00, 1.0, 1.0),
(0.50, 0.0, 0.0),
(1.00, 1.0, 1.0)),
'blue': ((0.00, 1.0, 1.0),
(0.50, 0.0, 0.0),
(1.00, 1.0, 1.0)),
},gamma =1.0)(np.arange(256))
_graypal = np.asarray(gridimg_pal_Gray[:,:3]*255, dtype=np.uint8)
_fixedwidth_font = ImageFont.truetype(os.path.abspath(os.path.dirname(__file__))+"/cour.ttf", 10)
#_fixedwidth_font = ImageFont.load_default()
import scipy
import scipy.misc
def _zoomimg(I, zoom, smooth=False):
if smooth:
img = Image.fromarray(I) if isinstance(I,np.ndarray) else I
img = img.resize((int(img.size[0]*zoom),int(img.size[1]*zoom)), Image.ANTIALIAS)
I = np.array(img) if isinstance(I,np.ndarray) else img
else:
# Zoom 3x3
if isinstance(I,np.ndarray):
I = np.repeat(I,zoom,0)
I = np.repeat(I,zoom,1)
else:
I = I.resize((int(I.size[0]*zoom),int(I.size[1]*zoom)), Image.NEAREST)
return I
def _gray2rgb(I, pal=None):
if pal is None:
pal = _redbluepal
# Convert to colour
return np.array(scipy.misc.toimage(I, pal=pal).convert("RGBA"))
def _array2img(A, vmax):
if A.ndim==1:
A = A.reshape((-1,1))
I = A.T.copy()
I -= -vmax
I /= 2*vmax
I = np.maximum(0,I)
I = np.minimum(1,I)
I *= 255
I = np.uint8(I)
return I
def _save_seqimg(filename, X, dX, vmax, zoom=1, style="grow", pal=None, complement=False, trim=(0,0)):
trim = slice(trim[0],len(X)-trim[1])
X = X[trim]
dX = dX[trim]
if style != "tape":
# Use the tape2logo script
dXlogo = deepity.tape2logo.tape2logo(dX.T, height=45*zoom, letterwidth=6*zoom, vmax=vmax, refseq=X, style=style, complement=complement)
scipy.misc.imsave(filename, dXlogo)
return
# Otherwise, create a ticker-tape representation
I = _array2img(dX, vmax)
I = _zoomimg(I, 3)
I = _zoomimg(I, 4, True)
I = _zoomimg(I, 0.5, True)
I = _gray2rgb(I, pal=pal)
if True:
# Publish-quality version
Ipub = _array2img(dX, vmax)
Ipub = _zoomimg(Ipub, 5)
Ipub = _zoomimg(Ipub, 6, True)
Ipub = _zoomimg(Ipub, 0.5, True)
Ipub = _gray2rgb(Ipub, pal=pal)
dXpub = deepity.tape2logo.tape2logo(dX.T, height=100, letterwidth=15, vmax=vmax, refseq=X, style="grow", complement=complement)
# Add 1 pixel white border so that interpolation goes to white at the edges
hb = np.zeros_like(Ipub[:,:2,:])+210
hb[:,:,3] = 255
Ipub= np.hstack([hb, Ipub, hb])
vb = np.zeros_like(Ipub[:2,:,:])+210
vb[:,:,3] = 255
Ipub= np.vstack([vb, Ipub, vb])
if complement:
Ipub = Ipub[::-1,:,:]
hb = np.zeros_like(dXpub[:,:2,:])+255
dXpub= np.hstack([hb, dXpub, hb])
vb = np.zeros_like(dXpub[:2,:,:])+255
dXpub= np.vstack([vb, dXpub, vb])
pub_img = np.vstack([Ipub, vb, dXpub] if complement else [dXpub, vb, Ipub])
hb = np.zeros_like(pub_img[:,:1,:])+255
pub_img= np.hstack([hb, pub_img, hb])
vb = np.zeros_like(pub_img[:1,:,:])+255
pub_img= np.vstack([pub_img, vb])
scipy.misc.imsave(os.path.splitext(filename)[0]+"_pub.png", pub_img)
Ivec = _array2img((abs(dX).max(axis=1)-abs(dX).min(axis=1)).reshape((-1,1)), vmax)
Ivec = _zoomimg(Ivec, 3)
Ivec = _zoomimg(Ivec, 4, True)
Ivec = _zoomimg(Ivec, 0.5, True)
Ivec = _gray2rgb(Ivec, pal=_blackpal)
# Add 1 pixel border
hb = np.zeros_like(I[:,:1,:])+192
I = np.hstack([hb, I, hb])
vb = np.zeros_like(I[:1,:,:])+192
I = np.vstack([vb, I, vb])
hb = np.zeros_like(Ivec[:,:1,:])
Ivec = np.hstack([hb+255, Ivec, hb+255])
#Ivec = Ivec[:4]
colors = { 'A' : ( 0,205, 0),
'C' : ( 0, 30,205),
'G' : (245,175, 0),
'T' : (205, 0, 0),
'N' : (128,128,128) }
for synonym, base in [("a","A"),("c","C"),("g","G"),("t","T"),("u","T"),("U","T"),(".","N")]:
colors[synonym] = colors[base]
I = np.vstack([Ivec,
255*np.ones((1,I.shape[1],4),np.uint8),
255*np.ones((12,I.shape[1],4),np.uint8),
I])
I = Image.fromarray(I) # convert to Image so that we can use ImageDraw
draw = ImageDraw.Draw(I)
for j in range(len(X)):
draw.text((j*6+1,Ivec.shape[0]+1),X[j],colors[X[j]],font=_fixedwidth_font)
I = np.array(I)
scipy.misc.imsave(filename, I)
def _save_vecimg(filename, X, dX, vmax):
# Convert to colour
X = _array2img(X, vmax)
dX = _array2img(dX, vmax)
X = _zoomimg(X, 3)
X = _zoomimg(X, 4, True)
X = _zoomimg(X, 0.5, True)
dX = _zoomimg(dX, 3)
dX = _zoomimg(dX, 4, True)
dX = _zoomimg(dX, 0.5, True)
X = _gray2rgb(X)
dX = _gray2rgb(dX)
# Add 1 pixel border
hb = np.zeros_like(X[:,:1,:])
X = np.hstack([hb, X, hb])
dX = np.hstack([hb, dX, hb])
vb = np.ones_like(X[:1,:,:])
I = np.vstack([vb*0,
X,
vb*0,
vb*255,
vb*255,
vb*255,
vb*0,
dX,
vb*0])
scipy.misc.imsave(filename, I)
def save_gradientmaps(data, predictions, outdir, maxrow=50, zoom=1, want_indices=False, apply_sigmoid=False, want_sort=True, trim=(0,0)):
if not want_indices:
# Create main index of all targets
util.makepath(os.path.join(outdir))
index_html = open(os.path.join(outdir, "index.html"), "w")
index_html.write("<html><head><title>Gradient maps</title></head><body>\n")
index_html.write("<table cellspacing=0 cellpadding=5 border=1>\n")
index_html.write("<tr><th>Name</th></tr>\n")
# For each target that has a ".gmaps" entry in predictions, generate a report and
# add it to the index_html
indices = {}
for targetname in predictions:
if targetname.endswith(".gmaps"):
continue
targetdata = data[targetname]
rowidx = targetdata.rowidx
Z = predictions[targetname]
print targetname, Z
if targetdata.targetnames:
Y = targetdata.Y
else:
Y = np.zeros_like(Z)
Y[:] = np.nan
nrow = len(Z)
maxrow = min(maxrow,nrow)
# Only use the first one
Y = Y[:,0]
Z = Z[:,0]
if want_sort:
roworder = np.argsort(-Z.ravel(), kind="mergesort")
else:
roworder = np.arange(len(Z))
if maxrow < nrow:
#roworder = roworder[:maxrow]
Ysum = np.nansum(Y)
if np.isnan(Ysum) or Ysum < 1:
Ysum = len(Z)
nrow_pos = max(nrow//3,min(nrow,int()))
roworder = [roworder[i] for i in range(0,nrow_pos,max(1,nrow_pos//maxrow))]
if want_indices:
indices[targetname] = roworder
continue
gmaps = predictions[targetname+".gmaps"]
inputnames = sorted(gmaps.keys())
# Add this target to the index_html
index_html.write("<tr><td><a href=\"%s/index.html\">%s</a></td></tr>\n" % (targetname, targetname))
# Determine the min/max gradmap value range, across all inputs
vmax = -np.inf
vmin = np.inf
for inputname in inputnames:
gmap_values = gmaps[inputname]
for row in roworder:
X,dX = gmap_values[row]
# First, for any sequences, subtract the channel mean to account for the
# fact that the sum of channel inputs must equal one
if isinstance(X,str):
#dX -= dX.mean(axis=1).reshape((-1,1)) # sense map
idx = util.acgt2ord(X).ravel()
for i,base in enumerate(idx):
if base in range(0,4):
dX[i,:] -= dX[i,base]
else:
dX[i,:] -= dX[i,:].mean()
# Now, take note of the absmax value
vmax = max(vmax, dX.max())
vmin = min(vmin, dX.min())
#vmin,vmax = 0.0, 1.0 # mutation map
# Create HTML report for this specific target
util.makepath(os.path.join(outdir, targetname))
target_html = open(os.path.join(outdir, targetname, "index.html"), "w")
target_html.write("<html><head><title>Gradient maps - %s</title>\n" % targetname)
target_html.write("""
<script language="javascript">
function show_logo()
{
var smaps = document.getElementsByClassName('sensitivitymap')
for (var i = 0; i < smaps.length; i++)
if (smaps[i].src.search("_tape") != -1)
smaps[i].src = smaps[i].src.replace("_tape","_logo").replace("_pfm","_logo");
}
function show_tape()
{
var smaps = document.getElementsByClassName('sensitivitymap')
for (var i = 0; i < smaps.length; i++)
if (smaps[i].src.search("_logo") != -1)
smaps[i].src = smaps[i].src.replace("_logo","_tape").replace("_pfm","_tape");
}
function show_pfm()
{
var smaps = document.getElementsByClassName('sensitivitymap')
for (var i = 0; i < smaps.length; i++)
if (smaps[i].src.search("_pfm") != -1)
smaps[i].src = smaps[i].src.replace("_logo","_pfm").replace("_tape","_pfm");
}
function onkeypress() {
var smaps = document.getElementsByClassName('sensitivitymap');
for (var i = 0; i < smaps.length; i++) {
if (smaps[i].src.search("_tape") != -1) {
show_logo();
break;
}
if (smaps[i].src.search("_logo") != -1) {
//show_pfm();
show_tape();
break;
}
//if (smaps[i].src.search("_pfm") != -1) {
// show_tape();
// break;
//}
}
}
document['onkeypress'] = onkeypress
</script></head><body>
""")
target_html.write("<h2>%s</h2><hr/>\n"%targetname)
target_html.write("<input type=\"button\" value=\"logo\" onclick=\"show_logo();\"/>")
target_html.write("<input type=\"button\" value=\"tape\" onclick=\"show_tape();\"/>")
target_html.write("range = [%.3f, %.3f]<br/>\n"%(vmin,vmax))
target_html.write("<table cellspacing=0 cellpadding=5 border=1 style=\"font-size:7pt\">\n")
target_html.write("<tr><th>Row#</th><th>Y</th><th>Z</th>%s</tr>\n" % "".join(["<th align=left>%s<br/>%s</th>"%(name[1:],name) for name in inputnames]))
np.savez(os.path.join(outdir, targetname, "X.npz"),
X=np.array([{"row" : rowidx[row]+1, "X" : gmaps["dX_seq"][row][0], "dX" : gmaps["dX_seq"][row][1], "Y" : Y[row], "Z" : Z[row] } for i,row in enumerate(roworder)], dtype=object))
# For each row in the data/predictions, write out a sequence and its corresponding gradient
for i,row in enumerate(roworder):
# For each row, loop over the inputnames
target_html.write("<tr><td>%d</td><td>%.4f</td><td>%.4f</td>"%(rowidx[row]+1, Y[row], Z[row]))
for inputname in inputnames:
X,dX = gmaps[inputname][row]
if isinstance(X,str):
# Format X like a sequence, in two styles
dXfilename_logo = "%06d_%s_logo.png" % (i, inputname)
dXfilename_tape = "%06d_%s_tape.png" % (i, inputname)
dXfilename = dXfilename_tape
dXsig = 1./(1+np.exp(-dX)) if apply_sigmoid else dX
complement = False
vrange = max(-vmin,vmax)
_save_seqimg(os.path.join(outdir, targetname, dXfilename_logo), X, dXsig, vrange, zoom=zoom, style="grow", complement=complement, trim=trim)
_save_seqimg(os.path.join(outdir, targetname, dXfilename_tape), X, dX, vrange, zoom=zoom, style="tape", complement=complement, trim=trim) # sensemap
else:
# Format X like a vector
dXfilename = "%06d_%s.png" % (i, inputname)
_save_vecimg(os.path.join(outdir, targetname, dXfilename), X, dX, vmax)
target_html.write("<td><img src=\"%s\" class=\"sensitivitymap\"/></td>" % (dXfilename))
target_html.write("</tr>\n")
target_html.close()
index_html.close()
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/gradmap.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import numpy as np
import time
import os
import os.path
#################################################
# GATC string <-> 0123 array conversion
_ord2acgt = ['N']*256; # lookup table for str.translate, so that 0123 => GATC
_ord2acgt[0] = 'A';
_ord2acgt[1] = 'C';
_ord2acgt[2] = 'G';
_ord2acgt[3] = 'T';
_ord2acgt = "".join(_ord2acgt)
_acgt2ord = ['\xff']*256; # lookup table for str.translate, so that GATC => 0123
_acgt2ord[ord('a')] = _acgt2ord[ord('A')] = '\x00';
_acgt2ord[ord('c')] = _acgt2ord[ord('C')] = '\x01';
_acgt2ord[ord('g')] = _acgt2ord[ord('G')] = '\x02';
_acgt2ord[ord('t')] = _acgt2ord[ord('T')] = '\x03';
_acgt2ord[ord('u')] = _acgt2ord[ord('U')] = '\x03';
_acgt2ord = "".join(_acgt2ord)
_acgtcomplement = ['\xff']*256; # lookup table for str.translate, so that GATC => CTAG
_acgtcomplement[ord('a')] = _acgtcomplement[ord('A')] = 'T';
_acgtcomplement[ord('c')] = _acgtcomplement[ord('C')] = 'G';
_acgtcomplement[ord('g')] = _acgtcomplement[ord('G')] = 'C';
_acgtcomplement[ord('t')] = _acgtcomplement[ord('T')] = 'A';
_acgtcomplement[ord('u')] = _acgtcomplement[ord('U')] = 'A';
_acgtcomplement[ord('n')] = _acgtcomplement[ord('N')] = 'N';
_acgtcomplement = "".join(_acgtcomplement)
def acgt2ord(s):
"""
Convert an RNA string ("ACGT") into a numpy row-vector
of ordinals in range {0,1,2,3,255} where 255 indicates "padding".
"""
x = s.translate(_acgt2ord)
return np.ndarray(shape=(1,len(x)),buffer=x,dtype=np.uint8)
def ord2acgt(x):
"""
Convert a vector of integral values in range {0,1,2,3}
to an RNA string ("ACGT"). Integers outside that range will
be translated to a "padding" character (".").
"""
s = str(np.asarray(x,dtype=np.uint8).data).translate(_ord2acgt)
return s
def ord2mask(x):
"""
Convert a vector of length N with integral values in range {0,1,2,3}
into an Nx4 numpy array, where for example "2" is represented by
row [0,0,1,0].
"""
mask = np.zeros((x.size,4))
mask[np.arange(x.size),x] = 1
return mask
def acgt2mask(s):
"""
Convert an RNA string ("ACGT") of length N into an Nx4 numpy
array, where for example "G" is represented by row [0,0,1,0].
"""
return ord2mask(acgt2ord(s))
def acgtcomplement(s):
"""
Complement a DNA string ("ACGT" to "TGCA").
"""
return s.translate(_acgtcomplement)
def revcomp(s):
"""
Reverse complement a DNA string ("ATTGC" to "GCAAT").
"""
return s.translate(_acgtcomplement)[::-1]
##########################################
def str2intlist(arg):
if arg is None:
return None
ints = []
for part in arg.split(","):
if '-' in part: id0,id1 = part.split('-')
else: id0 = id1 = part
assert int(id0) >= 0 and int(id0) <= int(id1)
ints.extend(range(int(id0), int(id1)+1))
return sorted(set(ints))
def str2time(arg):
if arg.endswith("s"): return float(arg.rstrip("s"))
if arg.endswith("m"): return float(arg.rstrip("m"))*60.
if arg.endswith("h"): return float(arg.rstrip("h"))*60.*60.
if arg.endswith("d"): return float(arg.rstrip("d"))*60.*60.*24
raise ValueError("Could not parse time argument \"%s\". Must end in 's','m','h', or 'd'." % arg)
def makepath(dir):
if os.path.exists(dir):
return dir
retries = 8
while retries >= 0:
try:
time.sleep(0.001)
os.makedirs(dir)
retries = -1
except Exception, e:
if retries == 0:
raise
retries -= 1
return dir
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/util.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import os
import sys
import time
import logging
import os
import os.path
import copy
import time
import logging
import deepity
_logfile = None
_randseed = None
_devices = [0]
_allow_multiprocessing = True
flags = deepity.globals.flags
def set_devices(devices):
"""List of device IDs that can be used by worker processes."""
global _devices
_devices = devices
def set_randseed(seed):
"""Random seed used each time a training session begins."""
global _randseed
_randseed = seed
def set_multiprocessing(enabled):
global _allow_multiprocessing
_allow_multiprocessing = enabled
deepity.globals.set_multiprocessing(enabled)
def set_logging(outdir):
global _logfile
_logfile = os.path.join(outdir, "kangaroo.log")
deepity.set_logging(_logfile, level=1)
logging.debug("\n----------------------------- %s -----------------------------" % time.strftime("%y-%m-%d %H-%M-%S",time.localtime()))
def _set_default_logging(outdir):
global _logfile
if _logfile is None:
set_logging(outdir)
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/globals.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
# predict.py
#
import os
import os.path
import re
import gc
import glob
import signal
import logging
import traceback
import multiprocessing
import cPickle as cp
import deepity
import warnings
import numpy as np
import smat as sm
from . import globals
from . import util
def load_modelinfos(modeldir, include=None):
modelinfos = {}
pfmfiles = glob.glob(os.path.join(modeldir, "*.pfm"))
if pfmfiles:
# The directory is full of PFMs, so simply return the list of PFM files
for pfmfile in pfmfiles:
match = re.findall('(\w+)_AB\.pfm', os.path.basename(pfmfile))
if len(match) == 0:
match = re.findall('([-\w]+)\.pfm', os.path.basename(pfmfile))
if len(match) == 1:
modelid = match[0]
modelinfos[modelid] = { "PFM" : pfmfile }
else:
# The directory was generated by Kangaroo, so visit each subdirectory
# and identify the model it's associated with.
for dir in os.listdir(modeldir):
path = os.path.join(modeldir, dir)
if os.path.isdir(path) and os.path.isfile(os.path.join(path, "model.pkl")):
modelid = os.path.basename(dir)
modelinfos[modelid] = { "model" : path }
# Filter out any model ids that weren't explicitly mention in the "include" list.
if include:
# The user has specified to only include a models from a specific list,
# so parse the list and then filter out the modelids not mentioned.
if isinstance(include, str):
if os.path.isfile(include):
with open(include) as f:
include = [line.split("#")[0].rstrip() for line in f.readlines()] # Ignore comments "# ..."
else:
include = include.split(',')
for id in modelinfos.keys():
if id not in include:
modelinfos.pop(id)
if not modelinfos:
raise ValueError("Could not find any models that match criteria.")
return modelinfos
def load_model(modelinfo):
if "PFM" in modelinfo:
path = modelinfo["PFM"]
with open(path) as f:
f.readline() # Throw away first line
model = np.asarray([[float(x) for x in line.rstrip().split('\t')[1:]] for line in f.readlines()])
elif "model" in modelinfo:
path = modelinfo["model"]
with open(os.path.join(path, "model.pkl")) as f:
model = cp.load(f)
else:
raise NotImplementedError("Unrecognized modelinfo. Expected it to contain 'PFM' or 'model' value.")
return model
def gen_pfm_predictionmaps(pfm, data, windowsize=24, want_gmaps=False):
if len(data.sequencenames) > 1:
raise ValueError("Cannot apply PFMs to multi-sequence data.")
predictionmaps = []
gmaps = []
nseq = len(data)
strands = ["fwd", "rev"] if "reverse_complement" in globals.flags else ["fwd"]
for si in range(nseq):
pmaps = []
for strand in strands:
s = getattr(data, data._seqattrnames(0)[0])[si]
if strand == "rev":
s = util.revcomp(s)
k = len(pfm)
x = util.acgt2ord(("N"*(k-1)) + s + ("N"*(k-1))).ravel()
n = len(x)
idx = np.arange(k)
pfmN = np.hstack([pfm, 0.25*np.ones((len(pfm),1),pfm.dtype)])
x = np.minimum(x, 4)
pmap = np.array([np.prod(pfmN[idx, x[i:i+k]]) for i in range(max(1, n-k+1))], dtype=np.float32)
if strand == "rev":
pmap = pmap[::-1]
pmaps.append(pmap)
if len(pmaps) > 1:
pmaps[0] = pmaps[0]+pmaps[1]
predictionmaps.append(pmaps[0])
return (predictionmaps, gmaps) if want_gmaps else (predictionmaps, None)
def maskout_revcomp(Z):
Zmask = None
if "reverse_complement" in globals.flags:
Zcols = Z.reshape((-1,2))
Zmask = np.zeros_like(Zcols,np.bool)
if globals.flags["reverse_complement"] == "force":
Zmask[:,0] = False
Zmask[:,1] = True
# Hack to visualize TCF7L2/Rfx3 reverse strand on same color scale as forward strand
#n = len(Zcols)
#Zmask[0*n/4:1*n/4,0] = True
#Zmask[1*n/4:2*n/4,1] = True
#Zmask[2*n/4:3*n/4,0] = True
#Zmask[3*n/4:4*n/4,1] = True
else:
Zmask[:,0] = Zcols[:,0] >= Zcols[:,1]
Zmask[:,1] = Zcols[:,0] < Zcols[:,1]
Zmask = Zmask.reshape((-1,1))
Z = Z[Zmask.ravel()]
return Z, Zmask
def gen_convnet_predictions(model, data, want_gmaps=False):
# We must feed each sequence through the model several times
# by applying the model repeatedly on sliding a window along the sequence.
# That generates a prediction map, from which we can take max, sum, etc.
predictions = []
gmaps = {}
batches = data.asbatches(batchsize=2048, reshuffle=False)
for batch in batches:
args = batch.input_data()
args["want_bprop_inputs"] = bool(want_gmaps)
if isinstance(model.Z.origin().node,deepity.std.softmaxnode):
args["bprop_inputs_loss"] = deepity.std.nll()
else:
args["bprop_inputs_loss"] = deepity.std.mse()
globals.flags.push("collect_argmax",None)
outputs = model.eval(**args)
I = globals.flags.pop("collect_argmax")
Z = outputs['Z'].asnumpy()
Z, Zmask = maskout_revcomp(Z)
if Zmask is not None:
if "collect_Zmask" in globals.flags:
global_Zmask = globals.flags.pop("collect_Zmask")
if not isinstance(global_Zmask,np.ndarray):
global_Zmask = Zmask
else:
global_Zmask = np.vstack([global_Zmask, Zmask])
globals.flags.push("collect_Zmask", global_Zmask)
predictions.append(Z)
# If user wants gradientmaps, then for every sequence we need one
if want_gmaps:
for key in args:
dkey = "d"+key
if outputs.get(dkey,None) is not None:
X = args[key].asnumpy()
dX = outputs[dkey].asnumpy()
if X.dtype == np.uint8: # Is it an sequence of ordinals (bytes)?
pad = data.requirements.get("padding",0)
R = args["R"+key[1:]].asnumpy() # regions associated with X
if want_gmaps == "finite_diff":
is_rc = "reverse_complement" in globals.flags
#globals.flags.push("force_argmax",I)
rcindex = [3,2,1,0]
oldF = args['F']
# If user specifically asked for finite differences, not instantaneous gradient,
# then we need to explicitly mutate every position, generate predictions, and
# subtract the result from Z to find the actual delta for each base
Xlen = R[:,1]-R[:,0]
nbase = dX.shape[1]
for i in range(Xlen.max()):
for j in range(nbase):
mtX = X.copy()
mtF = args['F'].asnumpy().copy()
for k in range(len(R)):
a,b = R[k]
if i < b-a:
if (k % 2 == 0) or not is_rc:
mtX[pad+a+i] = j # mutate position i in sequence k (which starts at byte index a) to base j
else:
mtX[b-i-1] = rcindex[j]
mtF[k] = data._generate_dinuc_featurevec(mtX[pad+a:b])
args[key] = sm.asarray(mtX) # This time use the mutated X instead of the original
args['F'] = sm.asarray(mtF)
mtoutputs = model.eval(**args)
mtZ = mtoutputs['Z'].asnumpy()
mtZ, mtZmask = maskout_revcomp(mtZ)
dZ = mtZ-Z # output
dZ *= np.maximum(0,np.sign(np.maximum(Z,mtZ)))
for k in range(len(R)):
if (k % 2 == 0) or not is_rc:
a,b = R[k]
if i < b-a:
dX[pad+a+i,j] = dZ[(k//2) if is_rc else k]
#globals.flags.pop("force_argmax")
args['F'] = oldF
# Only include forward strand in finite_diff results
if is_rc:
dX = [(util.ord2acgt(X[a+pad:b]), dX[a+pad:b]) for a,b in R[np.arange(0,len(R),2)]]
else:
dX = [(util.ord2acgt(X[a+pad:b]), dX[a+pad:b]) for a,b in R]
else:
dX = [(util.ord2acgt(X[a+pad:b]), dX[a+pad:b]) for a,b in R]
if Zmask is not None:
dX = [dX[i] for i in range(len(dX)) if Zmask[i]]
else:
if Zmask is not None:
X = X[Zmask.ravel()]
dX = dX[Zmask.ravel()]
dX *= np.maximum(0,Z)
dX = [(X[i], dX[i]) for i in range(len(dX))]
if dkey not in gmaps:
gmaps[dkey] = []
gmaps[dkey] += dX
# Concatenate all numpy arrays if they're the same size
predictions = np.vstack(predictions)
return (predictions, gmaps) if want_gmaps else (predictions, None)
def gen_convnet_predictionmaps(model, data, stride=1, windowsize=20, want_pmaps=False, want_gmaps=False):
# We must feed each sequence through the model several times
# by applying the model repeatedly on sliding a window along the sequence.
# That generates a prediction map, from which we can take max, sum, etc.
if len(data.sequencenames) > 1:
raise ValueError("Cannot currently use --scan on multi-sequence data.")
# Name of attributes to that we'll be manipulating to generate
# an artificual chunk of rows for gen_predictions.
Xname, Rname = data._seqattrnames(0)
# Each "chunk" will contain raw sequences from a subset of the data.
# For each of these sequences, set of short sequences will then be generated
# by slicing out sliding window from the raw sequence.
# Those new sequences will then be sent through gen_predictions, and
# we will take max/avg over appropriate sets of the resulting predictions.
predictionmaps = []
gradientmaps = []
max_chunksize = 32
nchunk = (len(data) + max_chunksize - 1) // max_chunksize
for ci in range(nchunk):
# Slice a our data attributes row-wise, according to chunk index
chunk = data[ci*max_chunksize:(ci+1)*max_chunksize]
chunksize = len(chunk)
rawseqs = [s for s in getattr(chunk, Xname)]
# Generate a list of subwindows along each sequence in this chunk
chunk_X = []
for rawseq in rawseqs:
padseq = "N"*(windowsize-1) + rawseq + "N"*(windowsize-1)
chunk_X.append([padseq[i:i+windowsize] for i in range(0,max(1,len(padseq)-windowsize+1), stride)])
setattr(chunk, Xname, sum(chunk_X,[])) # Append all the sublists into one giant list
nwindows = [ len(seqlist) for seqlist in chunk_X ]
for attrname in data.data_attrs() + ("rowidx","foldids","features"):
if attrname != Xname:
attr = getattr(chunk, attrname)
if attr is not None:
if isinstance(attr, np.ndarray):
setattr(chunk, attrname, np.vstack([np.repeat(attr[i,:], nwindows[i], axis=0).reshape((-1,attr.shape[1])) for i in range(chunksize)]))
else:
setattr(chunk, attrname, [attr[i] for i in range(chunksize) for j in range(nwindows[i])])
chunk.sequences = getattr(chunk, Xname)
chunk.targets = chunk.Y
chunk_predictions,chunk_gmaps = gen_convnet_predictions(model, chunk, want_gmaps=want_gmaps)
chunk_regions = np.cumsum([0]+nwindows)
for i in range(chunksize):
# Append a new prediction map. One prediction value per window position.
# All windows are subsequences of original sequence i.
pmap = chunk_predictions[chunk_regions[i]:chunk_regions[i+1]].ravel().copy()
predictionmaps.append(pmap)
if chunk_gmaps is not None:
# Build a new gradient map. This is done by aligning all the individual window gradient maps,
# to their position in the original sequence, and taking the average gradientmap value at each location.
gmap = np.zeros((len(rawseqs[i])+windowsize-1, 4), chunk_predictions[0].dtype)
denom = np.zeros_like(gmap)
dX = chunk_gmaps["dX_seq"] # TODO: this assumes one single sequence attribute called X_seq
start_window_idx = chunk_regions[i]
end_window_idx = chunk_regions[i+1]
j_pmap = np.argmax(pmap)
for j in range(0,end_window_idx-start_window_idx):
#if j != j_pmap: continue
dX_j = dX[start_window_idx+j][1][:min(windowsize, (end_window_idx-start_window_idx-j)*stride)]
gmap[ j*stride:j*stride+windowsize] += dX_j
denom[j*stride:j*stride+windowsize] += np.ones_like(dX_j)
gmap /= denom
gmap = np.nan_to_num(gmap)
gradientmaps.append(gmap)
return (predictionmaps, gradientmaps) if want_gmaps else (predictionmaps, None)
_workerid = 0 # This will end up being assigned a different value in each worker process, via the gen_predictions_worker_init function
def is_pfm(model):
return isinstance(model, np.ndarray)
_predict_worker_inst = None
def _predict_worker_init(devices, global_flags):
global _predict_worker_inst
_predict_worker_inst = predict_worker(devices, global_flags)
def _predict_worker_main(params):
global _predict_worker_inst
return _predict_worker_inst(params)
def _predict_worker_delete():
global _predict_worker_inst
del _predict_worker_inst
_predict_worker_inst = None
gc.collect()
class predict_worker(object):
def __init__(self, devices, global_flags):
global _workerid
signal.signal(signal.SIGINT, signal.SIG_IGN) # Keyboard interrupts go up to main process
globals.set_devices(devices) # On windows we need to do this because we didn't actually fork
globals.flags.copy_from(global_flags)
process = multiprocessing.current_process()
if process.name == "MainProcess":
_workerid = 0
else:
process_type, process_id = process.name.split("-") # Get unique 1-base "process_id", which gets larger every time a new pool is created
_workerid = (int(process_id)-1) % len(devices) # Get unique 0-based "worker_id" index, always in range {0,...,nprocess-1}
# This function is the entry point of a worker process.
#logging.info("prediction %d on device %d" % (_workerid, globals._devices[_workerid]))
sm.set_backend_options(device=globals._devices[_workerid])
def __del__(self):
process = multiprocessing.current_process()
if process.name != "MainProcess":
sm.destroy_backend()
def __call__(self, params):
global _workerid
try:
modelid, modelinfo, scan, stride, data, outdir, verbose, want_pmaps, want_gmaps = params
# Let the user know what device is working on what modelid
if verbose:
print "%d:%s" % (_workerid, modelid)
# Load the first model
model = load_model(modelinfo)
if not is_pfm(model):
data.requirements = model.data_requirements()
data._reversecomplement = False
if not data.preprocessors:
data.load_preprocessors(modelinfo.values()[0])
predictionmaps = None
# Generate a prediction for every single sequence in the datasrc
if is_pfm(model):
#if want_gmaps:
# raise NotImplementedError("gradientmaps not supported for PFM models")
predictions_direct = np.repeat([[np.nan]], len(data), axis=0)
pmaps, gmaps = gen_pfm_predictionmaps(model, data, windowsize=scan, want_gmaps=want_gmaps)
else:
# Generate "direct" predictions and, if requested, also generate
# "prediction maps" by scanning the model
# along the sequence.
if scan:
predictions_direct, _ = gen_convnet_predictions(model, data)
pmaps, gmaps = gen_convnet_predictionmaps(model, data, windowsize=scan, stride=stride, want_gmaps=want_gmaps) # each sequence gets a gmap, and each gmap is itself a list, with one entry per window position along the corresponding sequence
else:
assert not want_pmaps, "In direct evaluation mode, it does not make sense to ask for a predictionmap (pmap)"
predictions_direct, gmaps = gen_convnet_predictions(model, data, want_gmaps=want_gmaps) # each sequence gets a gmaps, which is just a single array from directly applying the sequence
if scan or is_pfm(model):
# In scan mode, we generate several final predictions from the prediction map (take max, take avg etc)
predictions = {}
predictions[modelid+".direct"] = predictions_direct
predictions[modelid+".max"] = np.asarray([np.max(pmap) for pmap in pmaps])
predictions[modelid+".avg"] = np.asarray([np.mean(pmap) for pmap in pmaps])
predictions[modelid+".sum"] = np.asarray([np.sum(pmap) for pmap in pmaps])
if want_pmaps:
predictions[modelid+".pmaps"] = pmaps
if want_gmaps:
predictions[modelid+".gmaps"] = gmaps
else:
# In direct mode, we just report a single prediction
predictions = { modelid : predictions_direct }
if want_gmaps:
predictions[modelid+".gmaps"] = gmaps
return predictions
except Exception as err:
traceback_str = traceback.format_exc()
logging.info(err.message + "\n" + traceback_str) # Do not allow the error to propagate during _call_objective,
if not globals._allow_multiprocessing:
raise
return (err,traceback_str)
def _check_worker_result(result):
if isinstance(result, tuple) and isinstance(result[0], Exception):
worker_exception, traceback_str = result
quit("Error in Worker...\n" + worker_exception.message + "\n" + traceback_str)
return result
def predict(data, modeldir, outdir, include=None, scan=None, stride=1, verbose=False,
want_pmaps=False, want_gmaps=False):
if not isinstance(data,dict):
data = { id : data.astargets([id]) for id in data.targetnames }
if include is None:
include = []
include = include + data.keys()
globals._set_default_logging(outdir)
modelinfos = load_modelinfos(modeldir, include)
# Generate a process for each device we're allowed to use
nmodel = len(modelinfos)
nworker = len(globals._devices)
# Each worker is invoked with a model id, path to the model's pickle file, and a datasource with corresponding targets
workerargs = [(id, modelinfos[id], scan, stride, data[id], outdir, verbose, want_pmaps, want_gmaps) for id in sorted(modelinfos.keys())]
if globals._allow_multiprocessing:
pool = multiprocessing.Pool(nworker, initializer=_predict_worker_init, initargs=(globals._devices, globals.flags))
try:
predictions = {}
for worker_predictions in pool.map(_predict_worker_main, workerargs):
predictions.update(_check_worker_result(worker_predictions))
except:
pool.terminate()
pool.join()
raise
else:
pool.close()
pool.join()
else:
# For interactive debugging
_predict_worker_init([0],globals.flags)
predictions = {}
for workerarg in workerargs:
predictions.update(_check_worker_result(_predict_worker_main(workerarg)))
_predict_worker_delete()
return predictions
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/predict.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import os.path
import sys
_parentdir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(os.path.join(_parentdir, "smat_py"))
sys.path.append(os.path.join(_parentdir, "deepity"))
from .data import datasource, loadtxt
from .model import sequencenet, loadcfg
from .train import train, calibrate, load_calib, save_calib
from .simplify import simplify
from .predict import predict, load_modelinfos
from .gradmap import save_gradientmaps
from .statistics import statistics
from .globals import set_randseed, set_logging, set_devices, flags
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/__init__.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import deepity
from . import _ext
import smat as sm
###############################################################
def loadcfg(cfgfile, *args):
return deepity.io.load(cfgfile, *args)
class sequencenet(deepity.supernode):
"""
A basic model where the input is:
- several sequences X0..Xk
- a single feature vector F
The output is computed by convolving a separate
convnet over each input sequence, then stacking the
output of each convnet along with F, and sending it through
the outputnet to generate prediction Z.
X0 -- convnet0 --+-- outputnet -> Z
... |
Xk -- convnetk --+
|
F -------------- +
"""
def __init__(self, convnets, outputnet, combiner_size=None, combiner_decay=None, combiner_init=None, combiner_init_mu=None, combiner_start=None, featurenet_size=32):
# By default, the combiner layer jas as many hidden units as there are featuremaps
if not combiner_size:
combiner_size = sum([convnet.Z.shape[1] for convnet in convnets])
# Create a combiner node that does two things:
# 1. it implicitly concatenates all the input matrices,
# but does so in a way that suppports self.ninst>1
# (i.e. it is aware that groups of columns of input matrices
# interleave separate model instances)
#
# 2. acts as a fully-connected layer between the concatenated
# inputs and the combiner's output.
#
self.combiner = deepity.std.combine(len(convnets)+1,
size = combiner_size,
ishape = (1,-1),
decay = combiner_decay,
init = combiner_init,
init_mu = combiner_init_mu,
start_training = combiner_start)
self.outputnet = outputnet
self.combiner.Z >> outputnet.X # Connect combiner node to the outputnet's input
# Connect the convnets to the combiner's inputs.
# Each output plug "convnet[i].Z" is connected input plug "combiner.Xi"
for i,convnet in enumerate(convnets):
convnet_attrname = "conv"+("_"+convnet.name if convnet.name else str(i))
self.__dict__[convnet_attrname] = convnet
convnet.Z >> getattr(self.combiner, "X%d"%i) # Connect to the ith input attribute of combiner
# Create a linear node that:
# - Has an input plug X that will be renamed "F" and thereby
# connected to the separate "features" vector.
# - Forwards the features vector to the combiner's LAST input
# position (hence the len(convnets)+1)
#
self.featurenet = deepity.std.linear()
"""
self.featurenet = deepity.std.chain([
deepity.std.full(size = featurenet_size,
init = 0.005,
oshape = (1,featurenet_size)),
deepity.std.bias(viz = True),
deepity.std.relu()
])
self.featurenet[0].X.trainable = False
"""
self.featurenet.Z >> getattr(self.combiner, "X%d"%len(convnets))
# Call supernode constructor to create the (renumbered) public plugs.
super(sequencenet,self).__init__(convnets + [self.featurenet, self.combiner, self.outputnet])
# Finally, rename some of the plugs after their parent convnet node,
# so that the datasource's attributes will automatically
# be connected to the convnet with the matching plug name.
# Convnet i's input plug "convnet[i].X" will end up
# being named "Xi" on this supernode.
for i,convnet in enumerate(convnets):
Xp = getattr(self,"X%d"%i)
Rp = getattr(self,"R%d"%i,None) or getattr(self,"R")
assert convnet.path in Xp.origin().node.path
Xp.rename(("X_%s" % convnet.name) if convnet.name else ("X%d"%i))
Rp.rename(("R_%s" % convnet.name) if convnet.name else ("R%d"%i))
# Rename the featurenet's input attribute "F"
getattr(self,"X%d"%len(convnets)).rename("F")
return
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/model.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import os
import os.path
import copy
import glob
import time
import logging
import numpy as np
import shutil
import deepity
import deepity.hpsearch
from . import util
from . import globals
def getworkdir(outdir, targetname):
dir = "%s/%s" % (outdir, targetname)
return dir
def load_calib(filename):
return deepity.load_hparams_result(filename)
def save_calib(filename, calib):
deepity.save_hparams_result(filename, calib)
def load_calib_samples(filename):
# Save one calibration sample per line
samples = []
with open(filename) as f:
for line in f:
args = eval(line,{ "nan" : np.nan })
samples.append(deepity.hpsearch.sample(args[1], args[0]))
return samples
def save_calib_samples(filename, mode, samples):
# Save one calibration sample per line
util.makepath(os.path.dirname(filename))
with open(filename, mode) as f:
for sample in samples:
f.write(str([sample.metrics, sample.params])+"\n")
def calibrate(cfgs, data, outdir, nfold=1, allfolds=True, ncalib=10, auxfilter=None, append=False):
globals._set_default_logging(outdir)
if not append:
for targetname in data.targetnames:
workdir = getworkdir(outdir, targetname)
if os.path.exists(workdir+"/calib.all.txt"):
os.remove(workdir+"/calib.all.txt")
best_hparams = {}
for cfgname, cfg in cfgs.iteritems():
samples = deepity.hypertrain(cfg["model"],
cfg["trainer"],
data,
outdir=outdir,
nfold=nfold,
allfolds=allfolds,
nsample=ncalib/len(cfgs),
devices=globals._devices,
auxfilter=auxfilter,
)
for targetname in data.targetnames:
workdir = getworkdir(outdir, targetname)
target_samples = samples.get(targetname, []) # Default is for case when model/trainer configs contained no hyperparameters at all!
for sample in target_samples:
sample.params[":cfgname"] = cfgname
save_calib_samples(workdir+"/calib.all.txt", "a", target_samples)
'''
# Remember which hparams was best
if (targetname not in best_hparams) or (target_hparams.result < best_hparams[targetname].result):
best_hparams[targetname] = target_hparams
best_hparams[targetname].params[":cfgname"] = cfgname
for targetname in data.targetnames:
workdir = getworkdir(outdir, targetname)
save_calib(workdir, best_hparams[targetname])'''
def train(cfgs, data, calibdir, outdir, nfold=1, ntrial=1, auxfilter=None, metric_key="loss"):
globals._set_default_logging(outdir)
for targetname in sorted(data.targetnames):
calib_workdir = getworkdir(calibdir, targetname)
samples = load_calib_samples(calib_workdir+"/calib.all.txt")
# Get the best sample for this specific model
# cfgbest = deepity.hpsearch.get_best_sample([_ for _ in samples if _.params[":cfgname"] == cfgname], "loss")
cfgbest = deepity.hpsearch.get_best_sample(samples, metric_key, wantmax="loss" not in metric_key)
cfgname = cfgbest.params[":cfgname"]
cfg = cfgs[cfgname]
outpattern = [outdir, ("target","%s")]
outpattern += [("trial", "trial%s")]
if nfold > 1:
outpattern += [("fold","/fold%s")]
deepity.train(cfg["model"], cfg["trainer"],
data.astargets([targetname]),
hparams={targetname : cfgbest}, hparams_metric=metric_key,
outdir=outpattern,
nfold=nfold,
nsample=ntrial,
devices=globals._devices,
auxfilter=auxfilter,
dumpviz=False,
)
# Collect the performance of each trial
performances = []
for trial in range(ntrial):
instdir = deepity.getinstdir(outpattern, targetname, trial, None)
with open(instdir+"/metrics.txt") as f:
header = f.readline().rstrip().split() # discard column headers
for line in f:
line = line.rstrip().split()
metricname, trainvalue = line[:2]
if metricname == metric_key:
performances.append(float(trainvalue))
break
# Find the trial with best performance
besttrial = np.argmin(performances) if "loss" in metric_key else np.argmax(performances)
print "trial metrics:", performances
# Copy the best trial into the parent directory, and delete all other trials
# to save space and to not drive btsync so crazy.
instdir = deepity.getinstdir(outpattern, targetname, besttrial, None)
files = glob.glob(instdir+"/*")
for file in files:
dst = os.path.dirname(os.path.dirname(file))+"/"+os.path.basename(file)
if os.path.isdir(file):
if os.path.exists(dst):
shutil.rmtree(dst, ignore_errors=True)
shutil.copytree(file, dst)
else:
shutil.copyfile(file, dst)
time.sleep(0.1) # rmtree sometimes fails if the folder is scanned by btsync; this seems to help a bit
for i in range(len(performances)):
shutil.rmtree(deepity.getinstdir(outpattern, targetname, i, None), ignore_errors=True)
deepity.call_dumpviz(os.path.dirname(instdir))
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/train.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
# predict.py
#
import os
import os.path
import numpy as np
import numpy.random as npr
import scipy
import scipy.misc
import scipy.stats
import scipy.stats.mstats
def is_binary(y):
return np.all(np.logical_or(y==0, y==1))
def _calc_auc(z, y, want_curve = False):
z = z.ravel()
y = y.ravel()
assert len(z) == len(y)
m = ~np.isnan(y)
y = y[m]
z = z[m]
assert is_binary(y), "Cannot calculate AUC for non-binary targets"
order = np.argsort(z,axis=0)[::-1].ravel() # Sort by decreasing order of prediction strength
z = z[order]
y = y[order]
npos = np.count_nonzero(y) # Total number of positives.
nneg = len(y)-npos # Total number of negatives.
if nneg == 0 or npos == 0:
return (np.nan,None) if want_curve else 1
n = len(y)
fprate = np.zeros((n+1,1))
tprate = np.zeros((n+1,1))
ntpos,nfpos = 0.,0.
for i,yi in enumerate(y):
if yi: ntpos += 1
else: nfpos += 1
tprate[i+1] = ntpos/npos
fprate[i+1] = nfpos/nneg
auc = float(np.trapz(tprate,fprate,axis=0))
if want_curve:
curve = np.hstack([fprate,tprate])
return auc, curve
return auc
def _bootstrap_auc(z, y, ntrial=20):
if ntrial <= 1:
return _calc_auc(z, y), np.nan
n = len(y)
aucs = np.zeros(ntrial)
for t in range(ntrial):
sample = npr.randint(0,n,n)
zt = z[sample].copy()
yt = y[sample].copy()
aucs[t] = _calc_auc(zt, yt)
return np.mean(aucs), np.std(aucs)
#########################################################################
def statistics(predictions, data, bootstrap=20, auc_zscore=4):
"""
Calculates correlations between predictions and the corresponding column of data.targets.
If the targets are binary, then also calculates AUCs.
The input 'predictions' should be a dictionary where each key is a target name and
each value is a Nx1 numpy array, where N is the number of rows in data.
If the targets are not binary, the AUC will be computed by assigning 1
to all targets with Z-score >= auc_zscore, and 0 to the others.
"""
stats = {}
for targetname, predict in predictions.iteritems():
if targetname not in data.targetnames:
continue
targetidx = data.targetnames.index(targetname)
targets = data.Y[:,targetidx]
targetmask = data.Ymask[:,targetidx]
pearson = scipy.stats.pearsonr(predict[targetmask].ravel(), targets[targetmask].ravel())
spearman = scipy.stats.spearmanr(predict[targetmask].ravel(), targets[targetmask].ravel())
stats[targetname] = { 'pearson' : { 'r' : pearson[0], 'p' : pearson[1] },
'spearman' : { 'r' : spearman[0], 'p' : spearman[1] },
}
if is_binary(targets):
labels = targets
else:
labels = targets.copy()
labels[targetmask] = (scipy.stats.mstats.zscore(targets[targetmask].ravel()) >= auc_zscore).astype(np.float)
auc_mean, auc_std = _bootstrap_auc(predict, labels, ntrial=bootstrap)
auc, auc_curve = _calc_auc(predict, labels, want_curve=True)
stats[targetname]['AUC'] = { 'mean' : auc_mean, 'std' : auc_std, 'value' : auc, 'curve' : auc_curve }
return stats
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/statistics.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import re
import os
import csv
import copy
import time
import logging
import tempfile
import itertools
import smat as sm
import numpy as np
import numpy.random as npr
import deepity
import scipy
import scipy.stats
import gzip
import cPickle
from .util import acgt2ord,acgtcomplement,ord2acgt
from . import globals
from deepity.util import tic,toc
from os.path import join,basename,splitext,exists
from math import tanh
_dinucs = ["".join(dinuc) for dinuc in itertools.product(['A','C','G','T'],['A','C','G','T'])]
def dinuc_enrichment_features(s):
# Assumption: all kmers have same length
n = len(s)
k = len(_dinucs[0])
expected = float(n-k+1) / (4.**k)
feats = []
for dinuc in _dinucs:
count = sum(1 for _ in re.finditer('(?=%s)'%dinuc, s)) # count all occurrances of kmer; str.count doesn't count overlapping kmers
#feats.append(count/expected-1.0)
feats.append(0)
return feats
#########################################################################
class datasource(deepity.resident_datasource):
"""
A kangaroo datasource that serves input attributes:
- X_Sequence0...X_Sequencek: a list of "sequence columns",
where each column has the same size, and
is provided under the name X_SequenceName (where SequenceName
was taken from the column header in the sequencefile)
- F: a single table of features F, taken from the featuresfile.
and output attributes:
- Y: the targets, with one column per target
- Ymask: the mask of non-NaN elements in Y
"""
@staticmethod
def fromtxt(sequencefile, featurefile=None, targetfile=None, foldfilter=None, maxrows=None, targetcols=None, sequencenames=None, featurenames=None, targetnames=None, dinucfeatures=True, **kwargs):
# Load each text file, possible from cache
sequencenames, sequences = loadtxt(sequencefile, maxrows=maxrows, colnames=sequencenames)
featurenames, features = loadtxt(featurefile, maxrows=maxrows, colnames=featurenames)
targetnames, targets = loadtxt(targetfile, maxrows=maxrows, colnames=targetnames, usecols=targetcols)
# If the sequence file contained the targets, then split off that extra column
if targets is None and sequencenames[-1].lower() == "bound":
targetnames = [sequencenames.pop()]
targets = [row[-1] for row in sequences]
sequences = [row[:-1] for row in sequences]
rowidx = np.arange(len(sequences)).astype(np.uint32).reshape((-1,1))
# Filter out rows that are not actually sequences
if foldfilter:
idx = [i for i in range(len(sequences)) if sequences[i][0] in foldfilter]
sequences = [sequences[i] for i in idx]
rowidx = rowidx[idx]
if features is not None:
features = [features[i] for i in idx]
if targets is not None:
targets = [targets[i] for i in idx]
# Strip out the Fold ID and Event ID columns of the sequence array.
if sequencenames and sequencenames[0].lower() in ("fold","foldid","fold id"):
sequencenames = sequencenames[2:]
foldids = [row[0] for row in sequences]
sequences = [row[2:] for row in sequences]
else:
foldids = ["A" for i in range(len(sequences))]
# Automatically add dinucleotide frequency features for each input sequence
if dinucfeatures:
if not featurenames:
featurenames = []
features = [[] for row in sequences]
for seqname in sequencenames:
featurenames += [seqname+"."+dinuc for dinuc in _dinucs]
for rowfeats, rowseqs in zip(features, sequences):
for s in rowseqs:
rowfeats += dinuc_enrichment_features(s)
return datasource(sequencenames, sequences, featurenames, features, targetnames, targets, foldids, rowidx, **kwargs)
@staticmethod
def _generate_dinuc_featurevec(X):
return dinuc_enrichment_features(ord2acgt(X))
def __init__(self, sequencenames, sequences, featurenames, features, targetnames, targets, foldids, rowidx):
self.sequencenames = sequencenames
self.featurenames = featurenames if features is not None else []
self.targetnames = targetnames if targets is not None else []
nsequence = len(self.sequencenames)
seqattrs = sum([self._seqattrnames(i) for i in range(nsequence)],())
featattr = [("F",),("features",)] if features is not None else [(),()]
targattrs = [("Y","Ymask"),("targets",)] if targets is not None else [(),()]
foldattr = ("foldids",) if foldids is not None else ()
# Initialize the datasource superclass by telling it how many
# input attributes to expect, based on
super(datasource,self).__init__(input_attrs = seqattrs + featattr[0],
output_attrs = targattrs[0],
extra_attrs = ("rowidx","sequences") + featattr[1] + targattrs[1] + foldattr, # Attributes not batched or sent to the GPU
)
nrow = len(sequences)
self.rowidx = rowidx
self.sequences = sequences
self.features = np.asarray(features, dtype=np.float32).reshape((nrow,-1)) if features is not None else None
self.targets = np.asarray(targets, dtype=np.float32).reshape((nrow,-1)) if targets is not None else None
self.foldids = foldids
self._task_ids = sorted(self.targetnames)
self.preprocessors = {"features" : [], "targets" : []}
self.requirements = {}
self._create_attributes()
def extract_fold(self, foldid):
idx = np.asarray([i for i in range(len(self)) if self.foldids[i] == foldid])
return self[idx]
def add_requirements(self, reqs):
self.requirements.update(reqs)
def clamp_extremes(self, lo, hi):
self.Y = self.Y.copy() # Make a copy in case we're looking at a row-slice of a larger datasource
self.Ymask = self.Ymask.copy()
self.preprocessors = copy.deepcopy(self.preprocessors)
pp = _clamp_extremes_preprocessor(self.Y, lo, hi)
self.targets = self.Y.copy()
self.Ymask = ~np.isnan(self.Y)
self.preprocessors["targets"].append(pp)
def logtransform_targets(self):
self.Y = self.Y.copy() # Make a copy in case we're looking at a row-slice of a larger datasource
self.Ymask = self.Ymask.copy()
self.preprocessors = copy.deepcopy(self.preprocessors)
pp = _logtransform_preprocessor(self.Y)
self.preprocessors["targets"].append(pp)
def arcsinhtransform_targets(self):
self.Y = self.Y.copy() # Make a copy in case we're looking at a row-slice of a larger datasource
self.Ymask = self.Ymask.copy()
self.preprocessors = copy.deepcopy(self.preprocessors)
pp = _arcsinhtransform_preprocessor(self.Y)
self.preprocessors["targets"].append(pp)
def normalize_targets(self, **requirements):
requirements.update(self.requirements)
if any([value == 'logistic' for value in requirements.values()]):
intercept_mode = "min"
else:
intercept_mode = "mean"
self.Y = self.Y.copy() # Make a copy in case we're looking at a row-slice of a larger datasource
self.Ymask = self.Ymask.copy()
self.preprocessors = copy.deepcopy(self.preprocessors)
pp = _normalize_preprocessor(self.Y, intercept_mode)
self.preprocessors["targets"].append(pp)
def normalize_features(self):
if hasattr(self,"F"):
self.F = self.F.copy() # Make a copy in case we're looking at a row-slice of a larger datasource
self.preprocessors = copy.deepcopy(self.preprocessors)
pp = _normalize_preprocessor(self.F, "mean")
self.preprocessors["features"].append(pp)
def _create_attributes(self):
# Adds public attributes with names matching
nrow = len(self)
nseq = len(self.sequencenames)
for i in range(nseq):
Xname,Rname = self._seqattrnames(i)
self.__dict__[Xname] = [row[i] for row in self.sequences]
self.__dict__[Rname] = np.zeros((nrow,1), np.uint32) # empty until set during asbatches()
if self.features is not None:
self.__dict__['F'] = self.features.copy()
if self.targets is not None:
self.__dict__['Y'] = self.targets.copy()
self.__dict__['Ymask'] = ~np.isnan(self.targets)
def _seqattrnames(self, index):
return ('X_%s'%self.sequencenames[index], 'R_%s'%self.sequencenames[index])
def __len__(self):
return len(self.rowidx)
def open(self):
return
def load_preprocessors(self, indir):
if not os.path.exists(join(indir, 'preprocessors.pkl')):
return
with open(join(indir, 'preprocessors.pkl'),'rb') as f:
assert not self.preprocessors['features'], "Cannot load preprocessors for a datasource with already-preprocessed features."
assert not self.preprocessors['targets'], "Cannot load preprocessors for a datasource with already-preprocessed targets."
self.preprocessors = cPickle.load(f)
for pp in self.preprocessors['features']:
self.F = self.F.copy()
pp.apply(self.F)
for pp in self.preprocessors['targets']:
self.Y = self.Y.copy()
self.Ymask = self.Ymask.copy()
pp.apply(self.Y)
def dump_preprocessors(self, outdir, cols=None):
if cols is None:
cols = slice(None)
preprocessors_sliced = { 'features' : self.preprocessors['features'],
'targets' : [pp.slice(cols) for pp in self.preprocessors['targets']] }
with open(join(outdir, 'preprocessors.pkl'), 'wb') as f:
cPickle.dump(preprocessors_sliced, f)
def _insert_reversecomplements(self):
if "reverse_complement" not in globals.flags:
return
nseq = len(self.sequencenames)
for i in range(nseq):
Xname,Rname = self._seqattrnames(i)
X = getattr(self, Xname)
rows = range(len(X))
Xrev = [acgtcomplement(x[::-1]) for x in X]
newX = [Xrev[i] if j else X[i] for i in rows for j in (0,1)]
setattr(self, Xname, newX)
# For all the other attributes, simply duplicate their rows.
duprows = np.repeat(np.arange(len(self)), 2)
if hasattr(self, "rowidx"):self.rowidx = self.rowidx[duprows,:]
if hasattr(self, "Y"): self.Y = self.Y[duprows,:]
if hasattr(self, "Ymask"): self.Ymask = self.Ymask[duprows,:]
if hasattr(self, "F"):
self.F = self.F[duprows,:]
# HACK: For dinuc statistic features, adjust columns.
fwdrows = np.arange(0,len(self.F),2)
revrows = np.arange(1,len(self.F),2)
for j in range(len(self.featurenames)):
fname = self.featurenames[j]
if "." in fname:
prefix, suffix = fname.rsplit(".",1)
if suffix in _dinucs:
rcsuffix = acgtcomplement(suffix[::-1])
k = self.featurenames.index(prefix+"."+rcsuffix)
self.F[revrows,k] = self.F[fwdrows,j]
return
def asbatches(self, batchsize=64, reshuffle=False):
n = len(self)
assert n > 0
nbatch = (n + batchsize - 1) // batchsize
nseq = len(self.sequencenames)
padding = self.requirements.get('padding',0)
batches = []
for i in range(nbatch):
# Slice a our data attributes row-wise, according to batch index
batch = self[np.arange(i*batchsize,min(n,(i+1)*batchsize))]
batch._insert_reversecomplements()
# Convert each sequence attribute from a list of strings ("GATC") to a
# single contiguous numpy array X (0..3), along with a list of
# regions R that identify the batch-relative offsets to the start/end
# of each individual sequence
for i in range(nseq):
Xname,Rname = self._seqattrnames(i)
batchX = getattr(batch, Xname)
batchR = np.asarray(np.cumsum([0]+[padding+len(x) for x in batchX]),np.uint32).reshape((-1,1))
batchR = np.hstack([batchR[:-1],batchR[1:]])
# Convert list of strings to giant contiguous array of integers 0..3,
# with padding values of 255 put between the individual sequences
batchX = acgt2ord(("."*padding).join([""]+[x for x in batchX]+[""])).reshape((-1,1))
# Convert each batch from numpy array to sarray,
# and then quickly forget about the numpy batch
batchX = sm.asarray(batchX)
batchR = sm.asarray(batchR)
setattr(batch, Xname, batchX)
setattr(batch, Rname, batchR)
setattr(batch, "regions", batchR)
batch._data_attrs = batch._data_attrs + ("regions",)
if hasattr(batch,"F") and batch.F is not None:
batch.F = sm.asarray(batch.F,sm.get_default_dtype())
if hasattr(batch,"Y") and batch.Y is not None:
batch.Y = sm.asarray(batch.Y,sm.get_default_dtype())
if isinstance(batch.Ymask,np.ndarray):
batch.Ymask = sm.asarray(batch.Ymask)
batches.append(batch)
return deepity.shuffled_repeat_iter(batches, reshuffle)
###################################################################################
class _preprocessor(object):
def apply(self, data): raise NotImplementedError("Subclass should implement this.")
def undo(self, data): raise NotImplementedError("Subclass should implement this.")
def slice(self, cols): return self # Do nothing by default
class _normalize_preprocessor(_preprocessor):
def __init__(self, data, intercept_mode):
self.scales = []
self.biases = []
# Preprocess each column to have unit variance and zero mean
ncol = data.shape[1]
for i in range(ncol):
col = data[:,i:i+1]
mask = ~np.isnan(col)
if intercept_mode == "mean":
bias = np.mean(col[mask].ravel())
scale = np.std(col[mask].ravel())
elif intercept_mode == "min":
bias = np.min(col[mask].ravel())
scale = np.max(col[mask].ravel()) - bias
else:
raise NotImplementedError()
# Save the scales and biases for later, in case we're asked to undo this transformation
self.scales.append(scale)
self.biases.append(bias)
self.scales = np.asarray(self.scales)
self.biases = np.asarray(self.biases)
self.apply(data)
def apply(self, data):
# Preprocess each column to have unit variance and zero mean
ncol = data.shape[1]
for i in range(ncol):
col = data[:,i:i+1]
mask = ~np.isnan(col)
# Basically assigns col[:] = (col-bias) / scale
col[mask] -= self.biases[i]
if self.scales[i]:
col[mask] /= self.scales[i]
def undo(self, data, colindex=None):
if colindex is None:
colindex = slice(None)
scales = self.scales[colindex]
biases = self.biases[colindex]
# Undo the preprocessing on each column of 'data', by scaling the variance back up and adding back the bias
ncol = data.shape[1]
assert len(scales) == ncol
assert len(biases) == ncol
for i in range(ncol):
col = data[:,i:i+1]
mask = ~np.isnan(col)
if scales[i]:
col[mask] *= scales[i]
col[mask] += biases[i]
def slice(self, cols):
other = copy.deepcopy(self)
other.scales = other.scales[cols]
other.biases = other.biases[cols]
return other
class _clamp_extremes_preprocessor(_preprocessor):
def __init__(self, data, lo, hi):
# Preprocess each column by removing its highest/lowest values according to hi/lo percentiles.
ncol = data.shape[1]
for i in range(ncol):
col = data[:,i:i+1]
mask = ~np.isnan(col)
lo_i,hi_i = np.percentile(col[mask], [lo, hi])
# Convert everything below the "lo" threshold to lo
tmp = col[mask]
tmp[tmp < lo_i] = lo_i
col[mask] = tmp
# Convert everything above the "hi" threshold to hi
tmp = col[mask]
tmp[tmp > hi_i] = hi_i
col[mask] = tmp
def apply(self, data):
return # Do nothing. Should not apply this to new data.
def undo(self, data):
return # Do nothing. Can't really undo this particular operation in any meaningful sense -- it only effects things like 'normalize'.
class _logtransform_preprocessor(_preprocessor):
def __init__(self, data):
mask = ~np.isnan(data)
lo = np.min(data[mask])
assert lo >= 0, "All data must be non-negative in order to apply log transform"
self.bias = 1. if lo == 0 else 0. # If min is exactly 0, then assume user wants log(1+data) instead of log(data)
def apply(self, data):
mask = ~np.isnan(data)
data[mask] = np.log(data[mask]+self.bias)
def undo(self, data, colindex=None):
mask = ~np.isnan(data)
data[mask] = np.exp(data[mask])-self.bias
class _arcsinhtransform_preprocessor(_preprocessor):
def __init__(self, data):
mask = ~np.isnan(data)
self.intercept = np.median(data[mask])
def apply(self, data):
mask = ~np.isnan(data)
data[mask] = np.arcsinh(data[mask]-self.intercept)
def undo(self, data, colindex=None):
mask = ~np.isnan(data)
data[mask] = np.sinh(data[mask])+self.intercept
###################################################################################
def _is_sequence(string):
return all((c in "ACGTUNacgtun") for c in string)
def _is_numeric(string):
return all((c in "+-0123456789.eE") for c in string) or string in ("nan", "NaN", "NAN")
def _dtype_of(colname, colvalue):
if colname.lower() in ("fold", "foldid", "fold id"):
return "a1" # Fold ID column gets a single char identifier
if colname.lower() in ("event", "eventid", "event id"):
return "a20" # Event ID column gets a fixed-length string
if _is_sequence(colvalue):
return object # Sequence columns get a variable-length string (object)
if _is_numeric(colvalue):
return "f4" # Numeric columns get a 32-bit float
raise ValueError("Could not recognize data type of value \"%s\" in column \"%s\"" % (colvalue, colname))
# Helper function turns string "a.txt[:3]" into a pair ("a.txt", slice(None,3))
def _split_filename(s):
if s is None:
return None, None
match = re.findall('(.+)\[(.+)\]',s)
if len(match) != 1:
return s, slice(None)
filename, colslice = match[0]
if colslice and ":" in colslice:
class _slice_getter(object):
def __getitem__(self, i):
return i
colslice = eval("_slice_getter()[%s]" % colslice)
else:
colslice = slice(int(colslice), int(colslice)+1)
return filename, colslice
def loadtxt(txtfile, separator=None, usecols=None, maxrows=None, colnames=None):
"""
Reads txtfile and returns a tuple (colnames, rows) where
values is a list with one entry per row. Each row is itself
a list of strings that were found in the file.
To convert "rows" into a large numpy array, simply use
np.asarray(rows, dtype=np.float32)
"""
if txtfile is None:
return None, None
if usecols is None:
txtfile, usecols = _split_filename(txtfile)
if not os.path.exists(txtfile):
raise IOError("Could not open \"%s\"" % txtfile)
openfunc = gzip.open if txtfile.endswith(".gz") else open
with openfunc(txtfile,'rb') as f:
reader = csv.reader(f, delimiter='\t')
if colnames is None:
colnames = reader.next()
if not usecols:
usecols = range(len(colnames))
colnames = colnames[usecols] if isinstance(usecols,slice) else [colnames[i] for i in usecols]
if maxrows:
rows = []
for row in reader:
rows.append(row[usecols] if isinstance(usecols,slice) else [row[i] for i in usecols])
if len(rows) >= maxrows:
break
else:
rows = [row[usecols] if isinstance(usecols,slice) else [row[i] for i in usecols] for row in reader]
return colnames, rows
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/data.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from data import basic_datasource
from model import basic_model
from report import basic_report
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/basic/__init__.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import deepity
from .. import _ext
import smat as sm
###############################################################
class basic_model(deepity.supernode):
"""
A basic model where the input is:
- several sequences X0..Xk
- a single feature vector F
The output is computed by convolving a separate
convnet over each input sequence, then stacking the
output of each convnet along with F, and sending it through
the outputnet to generate prediction Z.
X0 -- convnet0 --+-- outputnet -> Z
... |
Xk -- convnetk --+
|
F -------------- +
"""
def __init__(self, convnets, outputnet, combiner_size=None, combiner_decay=None, combiner_init=None):
# By default, the combiner layer jas as many hidden units as there are featuremaps
if not combiner_size:
combiner_size = sum([convnet.Z.shape[1] for convnet in convnets])
# Create a combiner node that does two things:
# 1. it implicitly concatenates all the input matrices,
# but does so in a way that suppports self.ninst>1
# (i.e. it is aware that groups of columns of input matrices
# interleave separate model instances)
#
# 2. acts as a fully-connected layer between the concatenated
# inputs and the combiner's output.
#
self.combiner = deepity.std.combine(len(convnets)+1,
size = combiner_size,
oshape = (1,combiner_size),
decay = combiner_decay,
init = combiner_init)
self.outputnet = outputnet
self.combiner.Z >> outputnet.X # Connect combiner node to the outputnet's input
# Connect the convnets to the combiner's inputs.
# Each output plug "convnet[i].Z" is connected input plug "combiner.Xi"
for i,convnet in enumerate(convnets):
convnet_attrname = "conv"+("_"+convnet.name if convnet.name else str(i))
self.__dict__[convnet_attrname] = convnet
convnet.Z >> getattr(self.combiner, "X%d"%i) # Connect to the ith input attribute of combiner
# Create a linear node that:
# - Has an input plug X that will be renamed "F" and thereby
# connected to the separate "features" vector.
# - Forwards the features vector to the combiner's LAST input
# position (hence the len(convnets)+1)
#
self.featurenet = deepity.std.linear()
self.featurenet.Z >> getattr(self.combiner, "X%d"%len(convnets))
self.featurenet.X.trainable = False
# Call supernode constructor to create the (renumbered) public plugs.
super(basic_model,self).__init__(convnets + [self.featurenet, self.combiner, self.outputnet])
# Finally, rename some of the plugs after their parent convnet node,
# so that the datasource's attributes will automatically
# be connected to the convnet with the matching plug name.
# Convnet i's input plug "convnet[i].X" will end up
# being named "Xi" on this supernode.
for i,convnet in enumerate(convnets):
Xp = getattr(self,"X%d"%i)
Rp = getattr(self,"R%d"%i,None) or getattr(self,"R")
assert convnet.path in Xp.origin().node.path
Xp.rename(("X_%s" % convnet.name) if convnet.name else ("X%d"%i))
Rp.rename(("R_%s" % convnet.name) if convnet.name else ("R%d"%i))
# Rename the featurenet's input attribute "F"
getattr(self,"X%d"%len(convnets)).rename("F")
return
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/basic/model.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import deepity
import deepity.report
class basic_report(deepity.report.training_report):
def __init__(self,logfile_pattern, task_ids, sample_ids):
super(basic_report,self).__init__(logfile_pattern, task_ids, sample_ids)
#def
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/basic/report.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import re
import os
import copy
import logging
import tempfile
import smat as sm
import numpy as np
import numpy.random as npr
import deepity
import scipy
import scipy.stats
import cPickle
from ..util import acgt2ord,acgtcomplement
from deepity.util import tic,toc
from os.path import join,basename,splitext,exists
def get_filename_and_cols(s):
if s is None:
return None, None
match = re.findall('(.+)\[(.+)\]',s)
if len(match) != 1:
return s, None
filename, colslice = match[0]
if colslice and ":" in colslice:
class _slice_getter(object):
def __getitem__(self, i):
return i
colslice = eval("_slice_getter()[%s]" % colslice)
else:
colslice = slice(int(colslice), int(colslice)+1)
return filename, colslice
#########################################################################
class basic_datasource(deepity.resident_datasource):
"""
A datasource that serves input attributes:
- X_Sequence0...X_Sequencek: a list of "sequence columns",
where each column has the same size, and
is provided under the name X_SequenceName (where SequenceName
was taken from the column header in the sequencefile)
- F: a single table of features F, taken from the featuresfile.
and output attributes:
- Y: the targets, with one column per target
- Ymask: the mask of non-NaN elements in Y
"""
def __init__(self, sequencefile, featurefile=None, targetfile=None, foldfilter=None,
requirements=None, maxrows=None, usecols=None, preprocess=None, reverse_complement=False):
# If sequencefile is matches filename[slice]
# then separate the filename itself from the column slice
self._sequencefile, self._sequencefile_cols = get_filename_and_cols(sequencefile)
self._featurefile, self._featurefile_cols = get_filename_and_cols(featurefile)
self._targetfile, self._targetfile_cols = get_filename_and_cols(targetfile)
self.sequence_names = self._read_sequencenames()
self.feature_names = self._read_featurenames()
self.target_names = self._read_targetnames()
self._task_ids = sorted(self.target_names)
self._preprocess = preprocess or []
self._reverse_complement = reverse_complement
if usecols is not None:
for col in usecols:
assert col in self.target_names, "Column name \"%s\" does not exist in Targets file header." % col
self._task_ids = [name for name in self._task_ids if name in usecols]
nseq = len(self.sequence_names)
seq_attrs = sum([self._seqattrnames(i) for i in range(nseq)],())
feat_attr = ("F",) if self._featurefile else ()
# Initialize the datasource superclass by telling it how many
# input attributes to expect, based on
super(basic_datasource,self).__init__(input_attrs = seq_attrs + feat_attr,
output_attrs = ("Y","Ymask"),
extra_attrs = ("rowidx",), # Attributes not batched or sent to the GPU
)
# Add some attributes that will be initialized when we load the actual data.
self._sequences = None
self._features = None
self._features_preprocess = None
self._targets = None
self._targets_preprocess = None
self.rowidx = None
self._requirements = requirements or {}
self._maxrows = maxrows if maxrows else (1L << 40L)
self._foldfilter = foldfilter if foldfilter is not None else "ABC"
def _read_sequencenames(self):
with open(self._sequencefile) as f:
header = f.readline().rstrip().split('\t') # read header
firstrow = f.readline().rstrip().split('\t') # read first non-header line
assert len(header) >= 3, "Sequences file must have at least 3 columns: \"Fold ID\", \"Event ID\", then at least one sequence column."
assert header[0] in ("Fold", "FoldID", "Fold ID"), "Sequences file must have first column titled \"Fold ID\"."
assert header[1] in ("Event", "EventID", "Event ID"), "Sequences file must have second column titled \"Event ID\"."
assert len(firstrow) == len(header), "Sequences file must have rows with same number of columns as header."
if self._sequencefile_cols:
header = header[self._sequencefile_cols]
return header[2:]
def _read_featurenames(self):
if self._featurefile is None:
return []
with open(self._featurefile) as f:
header = f.readline().rstrip().split('\t') # read header
firstrow = f.readline().rstrip().split('\t') # read first non-header line
assert len(header) >= 1, "Targets file must have at least 1 column."
if self._featurefile_cols:
header = header[self._featurefile_cols]
return header
def _read_targetnames(self):
if self._targetfile is None:
return []
with open(self._targetfile) as f:
header = f.readline().rstrip().split('\t') # read header
firstrow = f.readline().rstrip().split('\t') # read first non-header line
assert len(header) >= 1, "Targets file must have at least 1 column."
if self._targetfile_cols:
header = header[self._targetfile_cols]
return header
def open(self):
# Called by a process when it's about to actually start pulling data
"""
Loads a collection of RNA sequences and protein binding targets.
Returns a dictionary of deepity.datasource instances, with one entry
for each fold A,B,C.
"""
self._sequences = None
self._features = None
self._features_preprocess = []
self._targets = None
self._targets_mask = None
self._targets_preprocess = []
self.rowidx = None
self._open_sequences()
self._open_features()
self._open_targets()
self._create_attributes()
def _open_sequences(self):
"""Loads the raw sequences, storing them as lists of strings."""
logging.info("loading %s ..." % basename(self._sequencefile))
tic()
# Read entire data file into one big string.
# Manually scanning the string for newlines and tabs is 3x faster than
# using readlines() and then calling split() on each line.
with open(self._sequencefile) as f:
f.readline() # discard header
txt = f.read()
assert txt[-1] == '\n', "Sequence file must end with a newline."
for name in self.sequence_names:
logging.info(" %s" % name)
foldfilter = self._foldfilter
maxrows = self._maxrows
seqloop = range(len(self.sequence_names)-1) # Used in innermost loop
revcomp = self._reverse_complement
# Store each column as its own list of sequences.
# Scan through the txt string until we've hit the end.
sequences = [[] for s in self.sequence_names]
rowidx = []
i,j = 0,txt.find('\n')
for row_index in xrange(len(txt)):
if j == -1 or len(rowidx) >= maxrows:
break
# Check FoldID is wanted (first char of any new line)
if txt[i] in foldfilter:
# Add each sequence in this row to its corresponding list
k = txt.find('\t', i+2) # k = index of first char of first sequence in this row
for s in seqloop:
i, k = k+1, txt.find('\t', k+1)
sequences[s].append(txt[i:k]) # Pull out column 's' sequence
i, k = k+1, txt.find('\t', k+1)
if k == -1 or k > j: # If the next tab is on the next line, then break at the newline
k = j
sequences[-1].append(txt[i:k]) # Pull out the last column's sequence
rowidx.append(row_index) # Also remember the original row index of this example.
# Advance so that txt[i:j] is the next line. The last character of the file must be a '\n'.
i,j = j+1,txt.find('\n',j+1)
txt = None # Release memory for gigantic string immediately, for the stability of debugger
# Convert row indices numpy array for faster indexing when loading features/targets
self.rowidx = np.asarray(rowidx,np.uint32).reshape((-1,1))
self._sequences = sequences
logging.info("... load took %.2fs" % toc())
def _open_features(self):
if self._featurefile is None:
return
logging.info("loading %s ..." % basename(self._featurefile))
tic()
# Read the entire features file, convert it to numpy array as a string, and slice
# just the rows that we're using.
# It turns out this strategy is MUCH faster than using numpy.loadtxt:
# features = np.loadtxt(self._featurefile, np.float32,
# delimiter='\t', skiprows=1, ndmin=2)
with open(self._featurefile) as f:
f.readline() # discard header
txt = f.read()
for name in self.feature_names:
logging.info(" %s" % name)
nfeature = len(self.feature_names)
rowidx = self.rowidx
maxrows_to_read = rowidx[-1]+1
if self._featurefile_cols:
# np.fromstring is fast but doesn't support the presence of non-numeric columns
raise NotImplementedError("This code should work but has not been tested.")
features = np.asarray([[float(x) for x in line.split('\t')[self._featurefile_cols]]
for line in txt.split('\n',maxrows_to_read)[:-1]])
else:
features = np.fromstring(txt, np.float32, sep='\t',
count=nfeature*maxrows_to_read).reshape(-1, nfeature)
txt = None
if len(features) > len(rowidx):
features = features[rowidx.ravel(),:]
# Preprocess each feature by normalizing and setting mean to 0
a,b = [],[]
for i in range(nfeature):
col = features[:,i:i+1]
mask = ~np.isnan(col)
lo = np.min(col[mask], axis=0)
hi = np.max(col[mask], axis=0)
if lo == hi:
hi += 1 # Avoid divide by zero for degenerate targets
meani = np.mean(col[mask])
ai = 1./(hi-lo)
bi = -meani*ai
col[mask] = ai*col[mask] + bi
a.append(ai)
b.append(bi)
self._feature_preprocess = [ ('normalize', np.asarray(a).reshape((1,-1)),
np.asarray(b).reshape((1,-1))) ]
nsequence = len(self._sequences[0])
assert len(features) == nsequence, "Number of rows in Features file must match number of rows in Sequences file."
self._features = features
logging.info("... load took %.2fs" % toc())
def _open_targets(self):
if self._targetfile is None:
return
_log_xform_warned = False
logging.info("loading %s ..." % basename(self._targetfile))
tic()
# Read the entire targets file, convert it to numpy array as a string, and slice
# just the rows that we're using.
# It turns out this strategy is MUCH faster than using numpy.loadtxt:
# features = np.loadtxt(self._featurefile, np.float32,
# delimiter='\t', skiprows=1, ndmin=2)
with open(self._targetfile) as f:
f.readline() # discard header
txt = f.read()
ntarget = len(self.target_names)
ntask = len(self._task_ids)
rowidx = self.rowidx
maxrows_to_read = rowidx[-1]+1
if self._targetfile_cols:
# np.fromstring is fast but doesn't support the presence of non-numeric columns
targets = np.asarray([[float(x) for x in line.split('\t')[self._targetfile_cols]]
for line in txt.split('\n',maxrows_to_read)[:-1]])
else:
targets = np.fromstring(txt, np.float32, sep='\t',
count=ntarget*maxrows_to_read).reshape(-1, ntarget)
txt = None
if len(targets) > len(rowidx):
targets = targets[rowidx.ravel(),:]
# Select columns using '_task_ids' no matter what, since the order
# might be different.
usecols = np.asarray([self.target_names.index(name) for name in self._task_ids]) # nparray for faster indexing in
targets = targets[:,usecols]
# Normalize targets by scaling min/max range to [0,1]
if targets.size > 0:
# OPTIONAL: clamp all originally negative values at zero
#targets = np.maximum(0, targets)
# For each individual column, get lo/hi percentile
# and then normalize the non-NaN values in that column
a,b = [],[]
for i in range(ntask):
target_i = targets[:,i]
mask_i = ~np.isnan(target_i)
is_boolean = np.all(np.logical_or(target_i[mask_i] == 0, target_i[mask_i] == 1))
if is_boolean:
# Automatically assume 0/1 classification target
logging.info(" %s \t(classification)" % self._task_ids[i])
ai,bi = 1,0
else:
# Automatically assume regression target
logging.info(" %s \t(regression)" % self._task_ids[i])
if "log" in self._preprocess:
if (not np.all(target_i[mask_i] >= 0)):
if not _log_xform_warned:
_log_xform_warned = True
print "Warning: log transform requires all original targets to be non-negative; biasing the data and proceeding anyway."
target_i[mask_i] -= target_i[mask_i].min()
target_i[mask_i] = np.log(1+target_i[mask_i])
elif "sqrt" in self._preprocess:
if (not np.all(target_i[mask_i] >= 0)):
if not _log_xform_warned:
_log_xform_warned = True
print "Warning: sqrt transform requires all original targets to be non-negative; biasing the data and proceeding anyway."
target_i[mask_i] -= target_i[mask_i].min()
target_i[mask_i] = np.sqrt(target_i[mask_i])
lo_i,hi_i = np.percentile(target_i[mask_i], [0.0, 1.0])
#lo_i,hi_i = np.percentile(target_i[mask_i], [0.05, 99.95])
if lo_i == hi_i:
hi_i += 1 # Avoid divide by zero for degenerate targets
# Convert everything below the "lo" threshold to NaNs
tmp = target_i[mask_i]
tmp[tmp < lo_i] = np.nan
target_i[mask_i] = tmp
mask_i = ~np.isnan(target_i)
# Convert everything above the "hi" threshold to NaNs
tmp = target_i[mask_i]
tmp[tmp > hi_i] = np.nan
target_i[mask_i] = tmp
mask_i = ~np.isnan(target_i)
# Clamp everything to the range [lo,hi]
#target_i[mask_i] = np.maximum(lo_i, target_i[mask_i])
#target_i[mask_i] = np.minimum(hi_i, target_i[mask_i]) # Assume anything above hi_i is a "large" outlier
# Subtract the mean
if self._requirements.get('target',None) == 'logistic':
intercept_i = lo_i
else:
intercept_i = np.mean(target_i[mask_i])
ai = 1./(hi_i-lo_i)
bi = -intercept_i*ai
target_i[mask_i] = ai*target_i[mask_i] + bi
#mask_pos = target_i[mask_i] > 0
#target_i[mask_i][mask_pos] **= 0.5
a.append(ai)
b.append(bi)
if "log" in self._preprocess:
self._targets_preprocess.append(('log',))
self._targets_preprocess.append(('normalize', np.asarray(a).reshape((1,-1)),
np.asarray(b).reshape((1,-1))) )
#targets[self._targets_mask] = np.maximum(0,targets[self._targets_mask])
#targets[self._targets_mask] = np.minimum(1,targets[self._targets_mask])
self._targets = targets
self._targets_mask = ~np.isnan(targets)
logging.info("... load took %.2fs" % toc())
def _create_attributes(self):
# Adds public attributes with names matching
for i in range(len(self._sequences)):
Xname,Rname = self._seqattrnames(i)
self.__dict__[Xname] = self._sequences[i]
self.__dict__[Rname] = np.zeros((len(self._sequences[i]),1), np.uint32) # empty until set during asbatches()
if self._features is not None:
self.__dict__['F'] = self._features
if self._targets is not None:
self.__dict__['Y'] = self._targets
self.__dict__['Ymask'] = self._targets_mask
def _seqattrnames(self,index):
return ('X_%s'%self.sequence_names[index], 'R_%s'%self.sequence_names[index])
def __len__(self):
return len(self.rowidx)
def load_preprocessing(self, indir):
with open(join(outdir, 'features_preprocess.pkl'), 'w') as f:
ppsliced = []
def dump_preprocessing(self, outdir, cols=None):
if self._features_preprocess:
if cols is None:
cols = slice(None)
with open(join(outdir, 'features_preprocess.pkl'), 'w') as f:
ppsliced = []
for pp in self._features_preprocess:
if pp[0] == 'normalize':
ppsliced.append((pp[0], pp[1][cols], pp[2][cols]))
cPickle.dump(ppsliced, f)
def _apply_reverse_complement(self):
if not self._reverse_complement:
return
nseq = len(self.sequence_names)
for i in range(nseq):
Xname,Rname = self._seqattrnames(i)
X = getattr(self, Xname)
rows = range(len(X))
Xrev = [acgtcomplement(x[::-1]) for x in X]
newX = [Xrev[i] if j else X[i] for i in rows for j in (0,1)]
setattr(self, Xname, newX)
# For all the other attributes, simply duplicate their rows.
duprows = np.repeat(np.arange(len(self)), 2)
if hasattr(self, "rowidx"):self.rowidx = self.rowidx[duprows,:]
if hasattr(self, "F"): self.F = self.F[duprows,:]
if hasattr(self, "Y"): self.Y = self.Y[duprows,:]
if hasattr(self, "Ymask"): self.Ymask = self.Ymask[duprows,:]
def asbatches(self, batchsize=128, reshuffle=True):
n = len(self)
assert n > 0
nbatch = (n + batchsize - 1) // batchsize
nseq = len(self.sequence_names)
padding = self._requirements['padding']
batches = []
for i in range(nbatch):
# Slice a our data attributes row-wise, according to batch index
batch = self[np.arange(i*batchsize,min(n,(i+1)*batchsize))]
batch._apply_reverse_complement()
# Convert each sequence attribute from a list of strings ("GATC") to a
# single contiguous numpy array X (0..3), along with a list of
# regions R that identify the batch-relative offsets to the start/end
# of each individual sequence
for i in range(nseq):
Xname,Rname = self._seqattrnames(i)
batchX = getattr(batch, Xname)
batchR = np.asarray(np.cumsum([0]+[padding+len(x) for x in batchX]),np.uint32).reshape((-1,1))
batchR = np.hstack([batchR[:-1],batchR[1:]])
# Convert list of strings to giant contiguous array of integers 0..3,
# with padding values of 255 put between the individual sequences
batchX = acgt2ord(("."*padding).join([""]+[x for x in batchX]+[""])).reshape((-1,1))
# Convert each batch from numpy array to sarray,
# and then quickly forget about the numpy batch
batchX = sm.asarray(batchX)
batchR = sm.asarray(batchR)
setattr(batch, Xname, batchX)
setattr(batch, Rname, batchR)
if hasattr(batch,"F") and batch.F is not None:
batch.F = sm.asarray(batch.F,sm.get_default_dtype())
if hasattr(batch,"Y") and batch.Y is not None:
batch.Y = sm.asarray(batch.Y,sm.get_default_dtype())
if isinstance(batch.Ymask,np.ndarray):
batch.Ymask = sm.asarray(batch.Ymask)
batches.append(batch)
return deepity.shuffled_repeat_iter(batches, reshuffle)
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/basic/data.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import deepity
import deepity._ext
import numpy as np
from smat import *
from . import kangaroo_smat
class dropoutord(deepity.std.elemwise):
"""
Dropout on ordinal sequence input.
"""
def __init__(self, rate):
super(dropoutord,self).__init__(["X","regions"],["Z"])
self.rate = rate
self.M = None
def _fprop(self,X,regions):
if X is None:
return None
if "train_mode" in deepity.globals.flags:
Z,self.M = kangaroo_smat.dropoutord_fp_train(X, self.rate)
if "reverse_complement" in deepity.globals.flags:
# TODO: should be done on GPU
_M = self.M.asnumpy()
_R = regions.asnumpy()
padsize = _M.size - _R[-1,-1]
for i in range(0,len(_R),2):
a,b = _R[i]
c,d = _R[i+1]
_M[c+padsize:d] = np.flipud(_M[a+padsize:b])
self.M = asarray(_M)
_Z = X.asnumpy()
_Z[~_M] = 254
Z = asarray(_Z)
pass
else:
Z = X
return Z
def _bprop(self,dZ):
self.M = None
return dZ
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/_ext/dropoutord.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from corr1ord import *
from poolrgn import *
from dropoutord import *
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/_ext/__init__.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import deepity
import deepity._ext
import numpy as np
from smat import *
from . import kangaroo_smat
from . import dropoutord
class corr1ord(deepity.node):
"""
A 1D correlation on an ordinal sequence.
"""
def __init__(self, nfilter, fsize=None, decay=0, init=1e-3, nchannel=4, name=None, constraint=None, start_training=0):
super(corr1ord,self).__init__(["X","W"],["Z","cost"],name)
self.nfilter = nfilter
self.fsize = fsize
self.decay = decay
self.init = init
self.nchannel = nchannel
self.constraint = constraint
self.start_training = start_training
def _slice_inst(self,i):
self.W.fpval = self.W.fpval[:,i*self.nfilter:(i+1)*self.nfilter]
def enforce_constraints(self, W):
if self.constraint is None:
return
if self.constraint in ("pfm", "psam"):
F = exp(W) # Each column will be 4 elements of a filter column
for i in range(self.fsize):
Fi = F[i*4:(i+1)*4,:]
if self.constraint == "pfm":
Fi[:] = Fi / sum(Fi,axis=0) # Normalize
else:
Fi[:] = Fi / max(Fi,axis=0) # Normalize
W[:] = log(F)
elif type(self.constraint) == tuple:
lo,hi = self.constraint
W[:] = minimum(W,hi)
W[:] = maximum(W,lo)
else:
raise NotImplementedError("constraint \"%s\" not implemented" % str(self.constraint))
def getfilters(self, want_bias=False):
F = self.W.fpval.asnumpy().T
F = np.require(F,requirements=['C'])
F = F.reshape((self.nfilter,self.fsize,self.nchannel))
if isinstance(self.constraint,str):
F = exp(F)
bias = None
else:
bias = F.mean(axis=2).reshape((self.nfilter,self.fsize,1)) # shift bias from filter positions to the actual bias, so that mean is always zero at each filter position (100% equivalent model, just a reparameterization)
if want_bias:
return bias
F -= bias
F2 = np.empty((self.nfilter,self.nchannel,self.fsize),F.dtype)
for i in range(self.nfilter):
F2[i,:,:] = F[i,:,:].T
return F2
def _fprop(self,X,W):
if isinstance(self.decay,np.ndarray):
self.decay = asarray(self.decay,dtype=W.dtype) if self.decay.size > 1 else np.asscalar(self.decay)
Z = zeros((X.shape[0],W.shape[1]))
kangaroo_smat.corr1ord(W,X,Z,self.nchannel)
if "train_mode" not in deepity.globals.flags:
upstream = self.X.srcs[0].node
if type(upstream) == dropoutord.dropoutord:
if upstream.rate > 0:
Z *= (1-upstream.rate)
cost = 0
if (self.decay is not None) and ("bprop_mode" not in deepity.globals.flags) and (deepity.globals.flags.get("weight_decay_start",0) <= deepity.globals.flags.get("step",0)):
C = sum(abs(W),axis=0) # Sum the absolute values across each row.
C = C.reshape((self.ninst,-1)) # Put each separate weight matrix into its own row.
cost = sum(C,axis=1).T*self.decay # Turn into row vector of costs, weighted by decay coefficient.
return (Z,cost)
def _bprop(self,X,W,dZ):
dW = zeros_like(W)
kangaroo_smat.corr1ord_bprop_W(dW,X,dZ,self.nchannel) # backprop to filters
# Add a separate decay for each instance
if (self.decay is not None) and (deepity.globals.flags.get("weight_decay_start",0) <= deepity.globals.flags.get("step",0)):
deepity._ext.madd_bcast(sign(W),self.decay,W.shape[0]/self.ninst,dW)
#assert not self.X.has_upstream() # backprop to dX not implemented
dX = None
if ("want_bprop_inputs" in deepity.globals.flags):
dX = zeros((X.size,4), W.dtype)
kangaroo_smat.corr1ord_bprop_X(dX,W,dZ,self.nchannel) # backprop to filters
if self.start_training > deepity.globals.flags.get("step",0):
dW *= 0
return (dX,dW)
def _requirements(self):
assert self.fsize is not None, "Must set corr1ord.fsize attribute before any operation that requires shape"
return { "sequence_padding" : self.fsize - 1 }
def _calc_shapes(self,X,W,Z):
assert self.fsize is not None, "Must set corr1ord.fsize attribute before any operation that requires shape"
# X must be a single 1D sequence of variable length
if not X._shape: X._shape = (None,1)
# W size is determined entirely by internal parameters
if not W._shape: W._shape = (self.fsize*self.nchannel,self.nfilter)
# Output dimension is determined by 'nfilter' of this node (number of feature maps)
Z._shape = (None,self.nfilter)
class motif_scan(corr1ord):
def __init__(self, num_motifs, motif_len=None, weight_decay=0, init_scale=1e-3):
super(motif_scan,self).__init__(nfilter=num_motifs,fsize=num_motifs,decay=weight_decay,init=init_scale)
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/_ext/corr1ord.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
import deepity
import smat as sm
import numpy as np
from . import kangaroo_smat
class maxpool(deepity.node):
"""
Max pooling node.
X = input values (N x M)
R = input regions (P x 2)
Z = output values (P x M) where Z[i,j] = max(X[R[i,0]:R[i,1],j],axis=0)
where
M = number of feature maps,
N = number of input positions,
P = number of output positions
"""
def __init__(self, regions=None):
super(maxpool,self).__init__(["X","R"],["Z"])
self.I = None
self.regions = regions or [(0,None)]
def _fprop(self,X,R):
if deepity.globals.flags.get("collect_featuremaps",False):
old = deepity.globals.flags.pop("collect_featuremaps")
fmaps = old if isinstance(old, list) else []
fmaps += self._collect_featuremaps(X,R)
deepity.globals.flags.push("collect_featuremaps",fmaps)
Z,self.I = kangaroo_smat.poolrgn(X,R,ptype="max",
want_argmax = "bprop_mode" in deepity.globals.flags)
if "collect_argmax" in deepity.globals.flags:
deepity.globals.flags.pop("collect_argmax")
deepity.globals.flags.push("collect_argmax", self.I.asnumpy())
elif "force_argmax" in deepity.globals.flags:
_I = deepity.globals.flags.get("force_argmax")
_X = X.asnumpy()
_Z = _X.ravel()[_I.ravel()].reshape(Z.shape)
Z = sm.asarray(_Z)
return Z
def _bprop(self,X,R,dZ):
dX = sm.zeros_like(X)
kangaroo_smat.poolrgn_bprop(dX,R,dZ,self.I,ptype="max")
self.I = None
return (dX,None)
def _calc_shapes(self,X,R,Z):
# X.shape[1] must match Z.shape[1]
if X._shape and Z._shape: assert X._shape[1] == Z._shape[1]
if X._shape and not Z._shape: Z._shape = (None,X._shape[1])
if Z._shape and not X._shape: X._shape = (None,Z._shape[1])
def _requirements(self):
return { "sequence_pooling" : self.regions }
def _collect_featuremaps(self,X,R):
X = X.asnumpy()
R = R.asnumpy()
batch_fmaps = []
for i in range(len(R)):
fmaps = X[R[i,0]:R[i,1],:].T.copy()
batch_fmaps.append(fmaps)
return batch_fmaps
class avgpool(deepity.node):
"""
Average pooling node.
X = input values (N x M)
R = input regions (P x 2)
Z = output values (P x M) where Z[i,j] = mean(X[R[i,0]:R[i,1],j],axis=0)
where
M = number of feature maps,
N = number of input positions,
P = number of output positions
"""
def __init__(self, regions=None):
super(avgpool,self).__init__(["X","R"],["Z"])
self.regions = regions or [(0,None)]
def _fprop(self,X,R):
Z = kangaroo_smat.poolrgn(X,R,ptype="avg",want_argmax=False)
return Z
def _bprop(self,X,R,dZ):
dX = sm.zeros_like(X)
kangaroo_smat.poolrgn_bprop(dX,R,dZ,None,ptype="avg")
return (dX,None)
def _calc_shapes(self,X,R,Z):
# X.shape[1] must match Z.shape[1]
if X._shape and Z._shape: assert X._shape[1] == Z._shape[1]
if X._shape and not Z._shape: Z._shape = (None,X._shape[1])
if Z._shape and not X._shape: X._shape = (None,Z._shape[1])
def _requirements(self):
return { "sequence_pooling" : self.regions }
class allpool(deepity.node):
"""
Max *and* average pooling node.
X = input values (N x M)
R = input regions (P x 2)
Z = output values (P x 2M) where Z[i,2*j+0] = max( X[R[i,0]:R[i,1],j],axis=0)
and Z[i,2*j+1] = mean(X[R[i,0]:R[i,1],j],axis=0)
where
M = number of feature maps,
N = number of input positions,
P = number of output positions
"""
def __init__(self, regions=None):
super(allpool,self).__init__(["X","R"],["Z"])
self.I = None
self.regions = regions or [(0,None)]
def _fprop(self,X,R):
if deepity.globals.flags.get("collect_featuremaps",False):
old = deepity.globals.flags.pop("collect_featuremaps")
fmaps = old if isinstance(old, list) else []
fmaps += self._collect_featuremaps(X,R)
deepity.globals.flags.push("collect_featuremaps",fmaps)
Z,self.I = kangaroo_smat.poolrgn(X,R,ptype="all",
want_argmax = "bprop_mode" in deepity.globals.flags)
if "collect_argmax" in deepity.globals.flags:
deepity.globals.flags.pop("collect_argmax")
deepity.globals.flags.push("collect_argmax", self.I.asnumpy())
elif "force_argmax" in deepity.globals.flags:
_I = deepity.globals.flags.get("force_argmax")
_X = X.asnumpy()
_Zmax = _X.ravel()[_I.ravel()].reshape(_I.shape)
_Z = Z.asnumpy()
_Z[:,np.arange(0,Z.shape[1],2)] = _Zmax
Z = sm.asarray(_Z)
return Z
def _bprop(self,X,R,dZ):
dX = sm.zeros_like(X)
kangaroo_smat.poolrgn_bprop(dX,R,dZ,self.I,ptype="all")
self.I = None
return (dX,None)
def _calc_shapes(self,X,R,Z):
# Z.shape[1] must be 2*X.shape[1]
if X._shape and Z._shape: assert 2*X._shape[1] == Z._shape[1]
if X._shape and not Z._shape: Z._shape = (None,2*X._shape[1])
if Z._shape and not X._shape: X._shape = (None,Z._shape[1]/2)
def _requirements(self):
return { "sequence_pooling" : self.regions }
def _collect_featuremaps(self,X,R):
X = X.asnumpy()
R = R.asnumpy()
batch_fmaps = []
for i in range(len(R)):
fmaps = X[R[i,0]:R[i,1],:].T.copy()
batch_fmaps.append(fmaps)
return batch_fmaps
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/_ext/poolrgn.py
|
# Copyright (c) 2015, Andrew Delong and Babak Alipanahi All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author's note:
# This file was distributed as part of the Nature Biotechnology
# supplementary software release for DeepBind. Users of DeepBind
# are encouraged to instead use the latest source code and binaries
# for scoring sequences at
# http://tools.genes.toronto.edu/deepbind/
#
from smat import *
from smat import smat_dll
from os.path import abspath,join,dirname
from ctypes import *
import os
###################################################################
# Declare some useful ctypes based on the C++ types
c_padmode_t = c_int
c_pm_zero_output = 0
c_pooltype_t = c_int
c_pt_max = 0
c_pt_avg = 1
c_pt_sum = 2
c_pt_all = 3
c_isize_t = smat_dll.c_isize_t
c_usize_t = smat_dll.c_usize_t
c_smat_p = smat_dll.c_smat_p
class c_corr1ord_options_t(Structure):
_fields_ = [("padmode", c_int),
("nchannel", c_usize_t)]
c_corr1ord_options_p = POINTER(c_corr1ord_options_t)
class c_poolrgn_options_t(Structure):
_fields_ = [("ptype", c_pooltype_t)]
c_poolrgn_options_p = POINTER(c_poolrgn_options_t)
###################################################################
# Now create the public 'dll' object exposed to smat.py, with all the methods
# exported by the DLL available for calling
#
_ext_dll = None
def ext_dll():
global _ext_dll
if _ext_dll is None:
_ext_dll = load_extension("kangaroo_smat")
_ext_dll.api_corr1ord.declare( None, [c_smat_p,c_smat_p,c_smat_p,c_corr1ord_options_p])
_ext_dll.api_corr1ord_bprop_W.declare(None,[c_smat_p,c_smat_p,c_smat_p,c_corr1ord_options_p])
_ext_dll.api_corr1ord_bprop_X.declare(None,[c_smat_p,c_smat_p,c_smat_p,c_corr1ord_options_p])
_ext_dll.api_poolrgn.declare( None, [c_smat_p,c_smat_p,c_smat_p,c_smat_p,c_poolrgn_options_p])
_ext_dll.api_poolrgn_bprop.declare( None, [c_smat_p,c_smat_p,c_smat_p,c_smat_p,c_poolrgn_options_p])
_ext_dll.api_dropoutord_fp_tr.declare(None, [c_smat_p,c_smat_p,c_smat_p,c_float])
_ext_dll.api_autotune.declare( None, [])
return _ext_dll
str2pooltype = { "max" : c_pt_max, "avg" : c_pt_avg, "sum" : c_pt_sum, "all" : c_pt_all }
def corr1ord(W,X,Z,nchannel):
"""
Cross-correlates a set of 1D filters W with a vector of ordinals X.
X is a 1xN uint8 matrix, where X[i] is range {0,...,nchannel-1} or the value 255 ("all ordinals").
W is a (M*nchannel)xP float matrix, where P = num filters, M = filter length.
Return value Z is an NxP matrix (P feature maps, each of length N).
NOTE: currently the final M-1 positions of Z will be left uninitialized,
so it is up to you to pad the vector with M-1 copies of value 255, if that is
the desired padding behaviour.
"""
options = c_corr1ord_options_t()
options.padmode = c_pm_zero_output
options.nchannel = nchannel
ext_dll().api_corr1ord(W._ptr,X._ptr,Z._ptr,byref(options))
def corr1ord_bprop_W(dW,X,dZ,nchannel):
"""
Backpropagates a matrix of feature map deltas dZ into filter deltas dW.
All the quantities are of the same dimensions as described in corr1ord.
"""
options = c_corr1ord_options_t()
options.padmode = c_pm_zero_output
options.nchannel = nchannel
ext_dll().api_corr1ord_bprop_W(dW._ptr,X._ptr,dZ._ptr,byref(options))
def corr1ord_bprop_X(dX,W,dZ,nchannel):
"""
Backpropagates a matrix of feature map deltas dZ into input sequence deltas dX.
All the quantities are of the same dimensions as described in corr1ord.
"""
options = c_corr1ord_options_t()
options.padmode = c_pm_zero_output
options.nchannel = nchannel
ext_dll().api_corr1ord_bprop_X(dX._ptr,W._ptr,dZ._ptr,byref(options))
def poolrgn(unpooledmaps,regions,ptype,want_argmax=False):
noutputs = 2 if ptype == "all" else 1
options = c_poolrgn_options_t()
options.ptype = str2pooltype[ptype]
nregion = regions.shape[0]
nfeaturemap = unpooledmaps.shape[1]
pooledmaps = empty((nregion,nfeaturemap*noutputs),dtype=unpooledmaps.dtype)
pooledmaps_argmax = empty((nregion,nfeaturemap),dtype=uindex) if (ptype in ("max","all") and want_argmax) else None
ext_dll().api_poolrgn(unpooledmaps._ptr,regions._ptr,pooledmaps._ptr,pooledmaps_argmax._ptr if (pooledmaps_argmax is not None) else None,byref(options))
if want_argmax:
return pooledmaps,pooledmaps_argmax
return pooledmaps,None
def poolrgn_bprop(unpooledmaps,regions,pooledmaps,pooledmaps_argmax,ptype):
options = c_poolrgn_options_t()
options.ptype = str2pooltype[ptype]
pooledmaps_argmax_ptr = pooledmaps_argmax._ptr if (pooledmaps_argmax is not None) and (ptype in ("max","all")) else None
ext_dll().api_poolrgn_bprop(unpooledmaps._ptr,regions._ptr,pooledmaps._ptr,pooledmaps_argmax_ptr,byref(options))
def dropoutord_fp_train(X,rate):
# If matchrows=True, then every pair of rows will have the same mask.
# Used for dropout with reverse complement enabled.
Z = empty_like(X)
M = empty(X.shape,dtype=bool)
ext_dll().api_dropoutord_fp_tr(X._ptr, Z._ptr, M._ptr, rate)
return Z,M
def autotune():
ext_dll().api_autotune()
|
DeepBind-master
|
code/libs/kangaroo/kangaroo/_ext/kangaroo_smat.py
|
from setuptools import setup, find_packages
setup(
name = 'timesformer-pytorch',
packages = find_packages(),
version = '0.4.1',
license='MIT',
description = 'TimeSformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/TimeSformer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers',
'video classification',
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
TimeSformer-pytorch-main
|
setup.py
|
from timesformer_pytorch.timesformer_pytorch import TimeSformer
|
TimeSformer-pytorch-main
|
timesformer_pytorch/__init__.py
|
from math import log, pi
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
def apply_rot_emb(q, k, rot_emb):
sin, cos = rot_emb
rot_dim = sin.shape[-1]
(q, q_pass), (k, k_pass) = map(lambda t: (t[..., :rot_dim], t[..., rot_dim:]), (q, k))
q, k = map(lambda t: t * cos + rotate_every_two(t) * sin, (q, k))
q, k = map(lambda t: torch.cat(t, dim = -1), ((q, q_pass), (k, k_pass)))
return q, k
class AxialRotaryEmbedding(nn.Module):
def __init__(self, dim, max_freq = 10):
super().__init__()
self.dim = dim
scales = torch.logspace(0., log(max_freq / 2) / log(2), self.dim // 4, base = 2)
self.register_buffer('scales', scales)
def forward(self, h, w, device):
scales = rearrange(self.scales, '... -> () ...')
scales = scales.to(device)
h_seq = torch.linspace(-1., 1., steps = h, device = device)
h_seq = h_seq.unsqueeze(-1)
w_seq = torch.linspace(-1., 1., steps = w, device = device)
w_seq = w_seq.unsqueeze(-1)
h_seq = h_seq * scales * pi
w_seq = w_seq * scales * pi
x_sinu = repeat(h_seq, 'i d -> i j d', j = w)
y_sinu = repeat(w_seq, 'j d -> i j d', i = h)
sin = torch.cat((x_sinu.sin(), y_sinu.sin()), dim = -1)
cos = torch.cat((x_sinu.cos(), y_sinu.cos()), dim = -1)
sin, cos = map(lambda t: rearrange(t, 'i j d -> (i j) d'), (sin, cos))
sin, cos = map(lambda t: repeat(t, 'n d -> () n (d j)', j = 2), (sin, cos))
return sin, cos
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freqs = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freqs', inv_freqs)
def forward(self, n, device):
seq = torch.arange(n, device = device)
freqs = einsum('i, j -> i j', seq, self.inv_freqs)
freqs = torch.cat((freqs, freqs), dim = -1)
freqs = rearrange(freqs, 'n d -> () n d')
return freqs.sin(), freqs.cos()
|
TimeSformer-pytorch-main
|
timesformer_pytorch/rotary.py
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from timesformer_pytorch.rotary import apply_rot_emb, AxialRotaryEmbedding, RotaryEmbedding
# helpers
def exists(val):
return val is not None
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
# time token shift
def shift(t, amt):
if amt is 0:
return t
return F.pad(t, (0, 0, 0, 0, amt, -amt))
class PreTokenShift(nn.Module):
def __init__(self, frames, fn):
super().__init__()
self.frames = frames
self.fn = fn
def forward(self, x, *args, **kwargs):
f, dim = self.frames, x.shape[-1]
cls_x, x = x[:, :1], x[:, 1:]
x = rearrange(x, 'b (f n) d -> b f n d', f = f)
# shift along time frame before and after
dim_chunk = (dim // 3)
chunks = x.split(dim_chunk, dim = -1)
chunks_to_shift, rest = chunks[:3], chunks[3:]
shifted_chunks = tuple(map(lambda args: shift(*args), zip(chunks_to_shift, (-1, 0, 1))))
x = torch.cat((*shifted_chunks, *rest), dim = -1)
x = rearrange(x, 'b f n d -> b (f n) d')
x = torch.cat((cls_x, x), dim = 1)
return self.fn(x, *args, **kwargs)
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
# attention
def attn(q, k, v, mask = None):
sim = einsum('b i d, b j d -> b i j', q, k)
if exists(mask):
max_neg_value = -torch.finfo(sim.dtype).max
sim.masked_fill_(~mask, max_neg_value)
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
return out
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
dropout = 0.
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, einops_from, einops_to, mask = None, cls_mask = None, rot_emb = None, **einops_dims):
h = self.heads
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (q, k, v))
q = q * self.scale
# splice out classification token at index 1
(cls_q, q_), (cls_k, k_), (cls_v, v_) = map(lambda t: (t[:, :1], t[:, 1:]), (q, k, v))
# let classification token attend to key / values of all patches across time and space
cls_out = attn(cls_q, k, v, mask = cls_mask)
# rearrange across time or space
q_, k_, v_ = map(lambda t: rearrange(t, f'{einops_from} -> {einops_to}', **einops_dims), (q_, k_, v_))
# add rotary embeddings, if applicable
if exists(rot_emb):
q_, k_ = apply_rot_emb(q_, k_, rot_emb)
# expand cls token keys and values across time or space and concat
r = q_.shape[0] // cls_k.shape[0]
cls_k, cls_v = map(lambda t: repeat(t, 'b () d -> (b r) () d', r = r), (cls_k, cls_v))
k_ = torch.cat((cls_k, k_), dim = 1)
v_ = torch.cat((cls_v, v_), dim = 1)
# attention
out = attn(q_, k_, v_, mask = mask)
# merge back time or space
out = rearrange(out, f'{einops_to} -> {einops_from}', **einops_dims)
# concat back the cls token
out = torch.cat((cls_out, out), dim = 1)
# merge back the heads
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
# combine heads out
return self.to_out(out)
# main classes
class TimeSformer(nn.Module):
def __init__(
self,
*,
dim,
num_frames,
num_classes,
image_size = 224,
patch_size = 16,
channels = 3,
depth = 12,
heads = 8,
dim_head = 64,
attn_dropout = 0.,
ff_dropout = 0.,
rotary_emb = True,
shift_tokens = False
):
super().__init__()
assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_size // patch_size) ** 2
num_positions = num_frames * num_patches
patch_dim = channels * patch_size ** 2
self.heads = heads
self.patch_size = patch_size
self.to_patch_embedding = nn.Linear(patch_dim, dim)
self.cls_token = nn.Parameter(torch.randn(1, dim))
self.use_rotary_emb = rotary_emb
if rotary_emb:
self.frame_rot_emb = RotaryEmbedding(dim_head)
self.image_rot_emb = AxialRotaryEmbedding(dim_head)
else:
self.pos_emb = nn.Embedding(num_positions + 1, dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
ff = FeedForward(dim, dropout = ff_dropout)
time_attn = Attention(dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)
spatial_attn = Attention(dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)
if shift_tokens:
time_attn, spatial_attn, ff = map(lambda t: PreTokenShift(num_frames, t), (time_attn, spatial_attn, ff))
time_attn, spatial_attn, ff = map(lambda t: PreNorm(dim, t), (time_attn, spatial_attn, ff))
self.layers.append(nn.ModuleList([time_attn, spatial_attn, ff]))
self.to_out = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_classes)
)
def forward(self, video, mask = None):
b, f, _, h, w, *_, device, p = *video.shape, video.device, self.patch_size
assert h % p == 0 and w % p == 0, f'height {h} and width {w} of video must be divisible by the patch size {p}'
# calculate num patches in height and width dimension, and number of total patches (n)
hp, wp = (h // p), (w // p)
n = hp * wp
# video to patch embeddings
video = rearrange(video, 'b f c (h p1) (w p2) -> b (f h w) (p1 p2 c)', p1 = p, p2 = p)
tokens = self.to_patch_embedding(video)
# add cls token
cls_token = repeat(self.cls_token, 'n d -> b n d', b = b)
x = torch.cat((cls_token, tokens), dim = 1)
# positional embedding
frame_pos_emb = None
image_pos_emb = None
if not self.use_rotary_emb:
x += self.pos_emb(torch.arange(x.shape[1], device = device))
else:
frame_pos_emb = self.frame_rot_emb(f, device = device)
image_pos_emb = self.image_rot_emb(hp, wp, device = device)
# calculate masking for uneven number of frames
frame_mask = None
cls_attn_mask = None
if exists(mask):
mask_with_cls = F.pad(mask, (1, 0), value = True)
frame_mask = repeat(mask_with_cls, 'b f -> (b h n) () f', n = n, h = self.heads)
cls_attn_mask = repeat(mask, 'b f -> (b h) () (f n)', n = n, h = self.heads)
cls_attn_mask = F.pad(cls_attn_mask, (1, 0), value = True)
# time and space attention
for (time_attn, spatial_attn, ff) in self.layers:
x = time_attn(x, 'b (f n) d', '(b n) f d', n = n, mask = frame_mask, cls_mask = cls_attn_mask, rot_emb = frame_pos_emb) + x
x = spatial_attn(x, 'b (f n) d', '(b f) n d', f = f, cls_mask = cls_attn_mask, rot_emb = image_pos_emb) + x
x = ff(x) + x
cls_token = x[:, 0]
return self.to_out(cls_token)
|
TimeSformer-pytorch-main
|
timesformer_pytorch/timesformer_pytorch.py
|
from setuptools import setup, find_packages
setup(
name = 'RQ-transformer',
packages = find_packages(exclude=[]),
version = '0.1.9',
license='MIT',
description = 'RQ Transformer - Autoregressive Transformer for Residual Quantized Codes',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/RQ-transformer',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention-mechanism',
'autoregressive',
],
install_requires=[
'einops>=0.4',
'einops-exts',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
RQ-Transformer-main
|
setup.py
|
from rq_transformer import HierarchicalCausalTransformer
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
PRIME_LEN = 100
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = HierarchicalCausalTransformer(
num_tokens = 256,
dim = 512,
depth = (4, 3, 3, 3),
max_seq_len = (4, 4, 8, 8)
).cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime_inp = inp[:PRIME_LEN]
prime = decode_tokens(prime_inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(prime_inp[None, :])
sample = sample.flatten(1)
output_str = decode_tokens(sample[0][PRIME_LEN:])
print(output_str)
|
RQ-Transformer-main
|
train.py
|
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops_exts import rearrange_with_anon_dims
from einops import rearrange, reduce, repeat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def remainder_to_mult(num, mult):
return (mult - num % mult) % mult
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# helper classes
def FeedForward(*, dim, mult = 4, dropout = 0.):
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
class Attention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
dropout = 0.
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.dropout = nn.Dropout(dropout)
self.norm = nn.LayerNorm(dim)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(self, x):
h, device = self.heads, x.device
x = self.norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
i, j = sim.shape[-2:]
mask_value = -torch.finfo(sim.dtype).max
mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
sim = sim.masked_fill(mask, mask_value)
sim = sim - sim.amax(dim = -1, keepdim = True).detach()
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
layers,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
ff_mult = 4
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(layers):
self.layers.append(nn.ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout),
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
]))
self.norm = nn.LayerNorm(dim)
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return self.norm(x)
# main class
class RQTransformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
max_spatial_seq_len,
depth_seq_len,
spatial_layers,
depth_layers,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.,
pad_id = 0
):
super().__init__()
self.dim = dim
self.max_spatial_seq_len = max_spatial_seq_len
self.depth_seq_len = depth_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.spatial_start_token = nn.Parameter(torch.randn(dim))
self.spatial_pos_emb = nn.Embedding(max_spatial_seq_len + 1, dim) # account for a boundary case
self.depth_pos_emb = nn.Embedding(depth_seq_len, dim)
self.spatial_transformer = Transformer(
dim = dim,
layers = spatial_layers,
dim_head = dim_head,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
ff_mult = ff_mult
)
self.depth_transformer = Transformer(
dim = dim,
layers = depth_layers,
dim_head = dim_head,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
ff_mult = ff_mult
)
self.to_logits = nn.Linear(dim, num_tokens)
self.pad_id = pad_id
def generate(self, prime = None, filter_thres = 0.9, temperature = 1., default_batch_size = 1):
total_seq_len = self.depth_seq_len * self.max_spatial_seq_len
device = next(self.parameters()).device
if not exists(prime):
prime = torch.empty((default_batch_size, 0), dtype = torch.long, device = device)
seq = prime
for _ in range(total_seq_len - seq.shape[-1]):
logits = self.forward(seq)[:, -1]
logits = top_k(logits, thres = filter_thres)
sampled = gumbel_sample(logits, dim = -1, temperature = temperature)
seq = torch.cat((seq, rearrange(sampled, 'b -> b 1')), dim = -1)
return rearrange(seq, 'b (s d) -> b s d', d = self.depth_seq_len)
def forward_empty(self, batch_size):
# take care of special case
# where you sample from input of 0 (start token only)
spatial_tokens = repeat(self.spatial_start_token, 'd -> b 1 d', b = batch_size)
depth_tokens = self.spatial_transformer(spatial_tokens)
depth_tokens = self.depth_transformer(depth_tokens)
return self.to_logits(depth_tokens)
def forward(self, ids, return_loss = False):
assert ids.ndim in {2, 3}
flattened_dim = ids.ndim == 2
ids_orig_ndim = ids.ndim
if ids.numel() == 0:
return self.forward_empty(ids.shape[0])
if flattened_dim:
# allow for ids to be given in the shape of (batch, seq)
# in which case it will be auto-padded to the next nearest multiple of depth seq len
seq_len = ids.shape[-1]
padding = remainder_to_mult(seq_len, self.depth_seq_len)
ids = F.pad(ids, (0, padding), value = self.pad_id)
ids = rearrange(ids, 'b (s d) -> b s d', d = self.depth_seq_len)
else:
seq_len = ids.shape[1] * ids.shape[2]
b, space, depth, device = *ids.shape, ids.device
assert space <= (self.max_spatial_seq_len + 1), 'spatial dimension is greater than the max_spatial_seq_len set'
assert depth == self.depth_seq_len, 'depth dimension must be equal to depth_seq_len'
# get token embeddings
tokens = self.token_emb(ids)
spatial_pos = self.spatial_pos_emb(torch.arange(space, device = device))
depth_pos = self.depth_pos_emb(torch.arange(depth, device = device))
tokens_with_depth_pos = tokens + depth_pos
# spatial tokens is tokens with depth pos reduced along depth dimension + spatial positions
spatial_tokens = reduce(tokens_with_depth_pos, 'b s d f -> b s f', 'sum') + spatial_pos
spatial_tokens = torch.cat((
repeat(self.spatial_start_token, 'f -> b 1 f', b = b),
spatial_tokens
), dim = -2)
spatial_tokens = self.spatial_transformer(spatial_tokens)
spatial_tokens = rearrange(spatial_tokens, 'b s f -> b s 1 f')
# spatial tokens become the start tokens of the depth dimension
tokens_with_depth_pos = F.pad(tokens_with_depth_pos, (0, 0, 0, 0, 0, 1), value = 0.)
depth_tokens = torch.cat((spatial_tokens, tokens_with_depth_pos), dim = -2)
depth_tokens = rearrange(depth_tokens, '... n d -> (...) n d')
depth_tokens = self.depth_transformer(depth_tokens)
depth_tokens = rearrange(depth_tokens, '(b s) d f -> b s d f', b = b)
logits = self.to_logits(depth_tokens)
logits = rearrange(logits, 'b ... f -> b (...) f')
logits = logits[:, :(seq_len + 1)]
if not return_loss:
logits = logits[:, 1:]
if flattened_dim:
return rearrange(logits, 'b ... n -> b (...) n')
return logits
logits = logits[:, :-1]
preds = rearrange(logits, 'b ... c -> b c (...)')
labels = rearrange(ids, 'b s d -> b (s d)')
loss = F.cross_entropy(preds, labels, ignore_index = self.pad_id)
return loss
|
RQ-Transformer-main
|
rq_transformer/rq_transformer.py
|
from rq_transformer.rq_transformer import RQTransformer
from rq_transformer.hierarchical_causal_transformer import HierarchicalCausalTransformer
|
RQ-Transformer-main
|
rq_transformer/__init__.py
|
import math
import functools
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops_exts import rearrange_with_anon_dims
from einops import rearrange, reduce, repeat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def remainder_to_mult(num, mult):
return (mult - num % mult) % mult
def cast_tuple(t, length = 1):
return t if isinstance(t, tuple) else ((t,) * length)
def reduce_mult(nums):
return functools.reduce(lambda x, y: x * y, nums, 1)
# tensor helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# positional bias
class Alibi(nn.Module):
def __init__(self, heads, **kwargs):
super().__init__()
self.heads = heads
slopes = torch.Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
def forward(self, i, j, device):
if exists(self.bias) and self.bias.shape[-1] >= j:
return self.bias[..., :j]
bias = torch.arange(j, device = device)
bias = rearrange(bias, 'j -> 1 1 j')
bias = bias * self.slopes
self.register_buffer('bias', bias, persistent = False)
return self.bias
# norm
class RMSNorm(nn.Module):
def __init__(self, dim, eps = 1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
return x / norm.clamp(min = self.eps) * self.g
# helper classes
def FeedForward(*, dim, mult = 4, dropout = 0.):
return nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
class Attention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
dropout = 0.
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.dropout = nn.Dropout(dropout)
self.norm = RMSNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(self, x, attn_bias = None):
h, device = self.heads, x.device
x = self.norm(x)
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
q = q * self.scale
sim = einsum('b h i d, b j d -> b h i j', q, k)
if exists(attn_bias):
sim = sim + attn_bias
i, j = sim.shape[-2:]
mask_value = -torch.finfo(sim.dtype).max
mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
sim = sim.masked_fill(mask, mask_value)
sim = sim - sim.amax(dim = -1, keepdim = True).detach()
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
out = einsum('b h i j, b j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
layers,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
ff_mult = 4,
rel_pos_bias = True
):
super().__init__()
self.alibi = Alibi(heads = heads) if rel_pos_bias else None
self.layers = nn.ModuleList([])
for _ in range(layers):
self.layers.append(nn.ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout),
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
]))
self.norm = RMSNorm(dim)
def forward(self, x):
n = x.shape[-2]
attn_bias = self.alibi(n, n, device = x.device) if exists(self.alibi) else None
for attn, ff in self.layers:
x = attn(x, attn_bias = attn_bias) + x
x = ff(x) + x
return self.norm(x)
# main class
class HierarchicalCausalTransformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
max_seq_len,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_mult = 4,
ff_dropout = 0.,
pad_id = 0,
rel_pos_bias = True
):
super().__init__()
# simplified configuration for each stage of the hierarchy
# depth = (2, 2, 4) would translate to depth 2 at first stage, depth 2 second stage, depth 4 third
# max_seq_len = (16, 8, 4) would translate to max sequence length of 16 at first stage, length of 8 at second stage, length of 4 for last
assert isinstance(depth, tuple) and isinstance(max_seq_len, tuple)
assert len(depth) == len(max_seq_len)
self.stages = len(depth)
self.token_emb = nn.Embedding(num_tokens, dim)
self.start_tokens = nn.Parameter(torch.randn(dim))
self.max_seq_len = max_seq_len
self.pos_embs = nn.ModuleList([nn.Embedding(seq_len, dim) for seq_len in max_seq_len])
self.transformers = nn.ModuleList([])
for stage_depth in depth:
self.transformers.append(Transformer(
dim = dim,
layers = stage_depth,
dim_head = dim_head,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
ff_mult = ff_mult,
rel_pos_bias = rel_pos_bias
))
self.to_logits = nn.Linear(dim, num_tokens)
self.pad_id = pad_id
def generate(self, prime = None, filter_thres = 0.9, temperature = 1., default_batch_size = 1):
total_seq_len = reduce_mult(self.max_seq_len)
device = next(self.parameters()).device
if not exists(prime):
prime = torch.empty((default_batch_size, 0), dtype = torch.long, device = device)
seq = prime
for _ in range(total_seq_len - seq.shape[-1]):
logits = self.forward(seq)[:, -1]
logits = top_k(logits, thres = filter_thres)
sampled = gumbel_sample(logits, dim = -1, temperature = temperature)
seq = torch.cat((seq, rearrange(sampled, 'b -> b 1')), dim = -1)
return rearrange_with_anon_dims(seq, 'b (...d) -> b ...d', d = self.max_seq_len)
def forward_empty(self, batch_size):
# take care of special case
# where you sample from input of 0 (start token only)
tokens = repeat(self.start_tokens, 'd -> b 1 d', b = batch_size)
for transformer in self.transformers:
tokens = transformer(tokens)
return self.to_logits(tokens)
def forward(self, ids, return_loss = False):
assert ids.ndim in {2, self.stages + 1}
flattened_dims = ids.ndim == 2
ids_orig_ndim = ids.ndim
if ids.numel() == 0:
return self.forward_empty(ids.shape[0])
if flattened_dims:
# allow for ids to be given in the shape of (batch, seq)
# in which case it will be auto-padded to the next nearest multiple of depth seq len
seq_len = ids.shape[-1]
multiple_of = reduce_mult(self.max_seq_len[1:])
padding = remainder_to_mult(seq_len, multiple_of)
ids = F.pad(ids, (0, padding), value = self.pad_id)
ids = rearrange_with_anon_dims(ids, 'b (l ...d) -> b l ...d', d = self.max_seq_len[1:])
b, *prec_dims, device = *ids.shape, ids.device
# check some dimensions
assert prec_dims[0] <= self.max_seq_len[0], 'the first dimension of your axial autoregressive transformer must be less than the first tuple element of max_seq_len (like any autoregressive transformer)'
assert tuple(prec_dims[1:]) == tuple(self.max_seq_len[1:]), 'all subsequent dimensions must match exactly'
# get token embeddings
tokens = self.token_emb(ids)
# get tokens for all hierarchical stages, reducing by appropriate dimensions
# and adding the absolute positional embeddings
tokens_at_stages = []
reduced_tokens = tokens
for ind, pos_emb in zip(range(len(prec_dims)), reversed(self.pos_embs)):
is_first = ind == 0
if not is_first:
reduced_tokens = reduce(reduced_tokens, 'b ... r d -> b ... d', 'sum')
positions = pos_emb(torch.arange(reduced_tokens.shape[-2], device = device))
tokens_with_position = reduced_tokens + positions
tokens_at_stages.insert(0, tokens_with_position)
# get start tokens and append to the coarsest stage
start_tokens = repeat(self.start_tokens, 'f -> b 1 f', b = b)
# spatial tokens is tokens with depth pos reduced along depth dimension + spatial positions
for ind, (stage_tokens, transformer) in enumerate(zip(tokens_at_stages, self.transformers)):
is_last = ind == (self.stages - 1)
stage_tokens = torch.cat((
start_tokens,
stage_tokens,
), dim = -2)
*prec_dims, _, _ = stage_tokens.shape
stage_tokens = rearrange(stage_tokens, '... n d -> (...) n d')
attended = transformer(stage_tokens)
attended = rearrange_with_anon_dims(attended, '(...b) n d -> ...b n d', b = prec_dims)
start_tokens = rearrange(attended[..., :-1, :], '... n d -> ... n 1 d')
logits = self.to_logits(attended)
logits = logits[..., 1:, :]
if not return_loss:
if flattened_dims:
logits = rearrange(logits, 'b ... n -> b (...) n')
logits = logits[:, :seq_len]
return logits
preds = rearrange(logits, 'b ... c -> b c (...)')
labels = rearrange(ids, 'b ... -> b (...)')
loss = F.cross_entropy(
preds[..., :-1],
labels[..., 1:],
ignore_index = self.pad_id
)
return loss
|
RQ-Transformer-main
|
rq_transformer/hierarchical_causal_transformer.py
|
from all_normalization_transformer import TransformerLM
from all_normalization_transformer.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 3e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 512
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = TransformerLM(
num_tokens = 256,
dim = 512,
depth = 12,
max_seq_len = SEQ_LEN,
heads = 8,
causal = True,
only_norm = True,
shared_kv = True
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
inp = inp[:SEQ_LEN]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
|
all-normalization-transformer-master
|
train_enwik8.py
|
from functools import partial
import torch
import random
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
def default(value, default):
return value if value is not None else default
def log(t, eps=1e-9):
return torch.log(t + eps)
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > 1.0 - thres
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = None, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = default(ignore_index, pad_value)
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
input_mask = kwargs.pop('src_mask', None)
if input_mask is None:
input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
input_mask = input_mask[:, -self.max_seq_len:]
logits = self.net(x, src_mask=input_mask, **kwargs)
logits = logits[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
gumbel_noise = -log(-log(torch.zeros_like(filtered_logits).uniform_(0, 1)))
sample = ((filtered_logits / temperature) + gumbel_noise).argmax(dim=-1)
out = torch.cat((out, sample[:, None]), dim=-1)
input_mask = F.pad(input_mask, (1, 0), value=True)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, *args, **kwargs):
pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value)
m = kwargs.pop('input_mask', None)
xi, xo = x[:, :-1], x[:, 1:]
if m is not None:
assert m.shape == x.shape[0:2], 'input mask must be the same shape as the input of the auto-regressive wrapper to automatically handle'
kwargs.update(input_mask = m[:, :-1])
out = self.net(xi, *args, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
|
all-normalization-transformer-master
|
all_normalization_transformer/autoregressive_wrapper.py
|
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
# helpers
def cum_mean(t):
device = t.device
running_num = torch.arange(t.shape[-1], device=t.device) + 1
return t.cumsum(dim=-1) / running_num
def normalize(t, eps=1e-8):
t -= t.mean(dim=-1, keepdim=True)
s = (t ** 2).mean(dim=-1, keepdim=True)
return t * torch.rsqrt(s + eps)
def causal_normalize(t, eps=1e-8):
t -= cum_mean(t).diagonal(dim1=-2, dim2=-1)[..., None]
s = cum_mean(t ** 2).diagonal(dim1=-2, dim2=-1)[..., None]
return t * torch.rsqrt(s + eps)
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
class PostNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
x = self.fn(x)
return self.norm(x)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * 4),
nn.GELU(),
nn.Linear(dim * 4, dim)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, causal = False, shared_kv = False):
super().__init__()
self.causal = causal
self.heads = heads
self.scale = dim ** -0.5
self.shared_kv = shared_kv
self.num_qkv = 3 if not shared_kv else 2
self.to_qkv = nn.Linear(dim, dim * self.num_qkv, bias = False)
self.to_out = nn.Linear(dim, dim)
self.norm_g = nn.Parameter(torch.ones(1, heads, 1, 1))
self.norm_b = nn.Parameter(torch.zeros(1, heads, 1, 1))
def forward(self, x):
b, n, _, h, device = *x.shape, self.heads, x.device
qkv = self.to_qkv(x)
qkv = rearrange(qkv, 'b n (qkv h d) -> qkv b h n d', qkv = self.num_qkv, h = h)
if self.shared_kv:
q, k = qkv
v = k
else:
q, k, v = qkv
dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale
if self.causal:
mask = torch.ones(n, n, device = device).triu_(1).bool()
dots.masked_fill_(mask, 0.)
normalize_fn = causal_normalize if self.causal else normalize
normed_attn = normalize_fn(dots)
attn = normed_attn * self.norm_g + self.norm_b
if self.causal:
attn.masked_fill_(mask, 0.)
out = torch.einsum('bhij,bhjd->bhid', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
class Transformer(nn.Module):
def __init__(self, dim, depth, heads = 8, causal = False, only_norm = False, shared_kv = False):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Residual(PostNorm(dim, Attention(dim, heads, causal = causal, shared_kv = shared_kv))),
Residual(PreNorm(dim, FeedForward(dim))) if not only_norm else nn.Identity(),
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x)
x = ff(x)
return x
class TransformerLM(nn.Module):
def __init__(self, *, num_tokens, dim, depth, max_seq_len, heads = 8, causal = False, only_norm = False, shared_kv = False):
super().__init__()
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.transformer = Transformer(dim, depth, heads, causal = causal, only_norm = only_norm, shared_kv = shared_kv)
self.to_logits = nn.Linear(dim, num_tokens)
def forward(self, x, **kwargs):
_, n = x.shape
x = self.token_emb(x)
x += self.pos_emb(torch.arange(n, device=x.device))
x = self.transformer(x)
x = self.to_logits(x)
return x
|
all-normalization-transformer-master
|
all_normalization_transformer/all_normalization_transformer.py
|
from all_normalization_transformer.all_normalization_transformer import TransformerLM
from all_normalization_transformer.autoregressive_wrapper import AutoregressiveWrapper
|
all-normalization-transformer-master
|
all_normalization_transformer/__init__.py
|
from setuptools import setup, find_packages
setup(
name = 'gsa-pytorch',
packages = find_packages(),
version = '0.2.2',
license='MIT',
description = 'Global Self-attention Network (GSA) - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/global-self-attention-network',
keywords = [
'artificial intelligence',
'attention mechanism',
'image recognition'
],
install_requires=[
'torch>=1.6',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
global-self-attention-network-main
|
setup.py
|
from gsa_pytorch.gsa_pytorch import GSA
|
global-self-attention-network-main
|
gsa_pytorch/__init__.py
|
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange
from inspect import isfunction
# helpers
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def exists(val):
return val is not None
def calc_reindexing_tensor(l, L, device):
"""
Appendix B - (5)
"""
x = torch.arange(l, device = device)[:, None, None]
i = torch.arange(l, device = device)[None, :, None]
r = torch.arange(-(L - 1), L, device = device)[None, None, :]
mask = ((i - x) == r) & ((i - x).abs() <= L)
return mask.float()
# classes
class GSA(nn.Module):
def __init__(self, dim, *, rel_pos_length = None, dim_out = None, heads = 8, dim_key = 64, norm_queries = False, batch_norm = True):
super().__init__()
dim_out = default(dim_out, dim)
dim_hidden = dim_key * heads
self.heads = heads
self.dim_out = dim_out
self.rel_pos_length = rel_pos_length
self.norm_queries = norm_queries
self.to_qkv = nn.Conv2d(dim, dim_hidden * 3, 1, bias = False)
self.to_out = nn.Conv2d(dim_hidden, dim_out, 1)
self.rel_pos_length = rel_pos_length
if exists(rel_pos_length):
num_rel_shifts = 2 * rel_pos_length - 1
self.norm = nn.BatchNorm2d(dim_key) if batch_norm else None
self.rel_rows = nn.Parameter(torch.randn(num_rel_shifts, dim_key))
self.rel_columns = nn.Parameter(torch.randn(num_rel_shifts, dim_key))
def forward(self, img):
b, c, x, y, h, c_out, L, device = *img.shape, self.heads, self.dim_out, self.rel_pos_length, img.device
qkv = self.to_qkv(img).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) c (x y)', h = h), qkv)
k = k.softmax(dim = -1)
context = einsum('ndm,nem->nde', k, v)
content_q = q if not self.norm_queries else q.softmax(dim=-2)
content_out = einsum('nde,ndm->nem', context, content_q)
content_out = rearrange(content_out, 'n d (x y) -> n d x y', x = x, y = y)
# this largely follows the mathematical implementation details
# spelled out in appendix B (6) - (8)
if exists(self.rel_pos_length):
q, v = map(lambda t: rearrange(t, 'n c (x y) -> n c x y', x = x, y = y), (q, v))
Ix = calc_reindexing_tensor(x, L, device)
Px = einsum('xir,rd->xid', Ix, self.rel_rows)
Sx = einsum('ndxy,xid->nixy', q, Px)
Yh = einsum('nixy,neiy->nexy', Sx, v)
if exists(self.norm):
Yh = self.norm(Yh)
Iy = calc_reindexing_tensor(y, L, device)
Py = einsum('yir,rd->yid', Iy, self.rel_columns)
Sy = einsum('ndxy,yid->nixy', q, Py)
rel_pos_out = einsum('nixy,nexi->nexy', Sy, Yh)
content_out = content_out + rel_pos_out.contiguous()
content_out = rearrange(content_out, '(b h) c x y -> b (h c) x y', h = h)
return self.to_out(content_out)
|
global-self-attention-network-main
|
gsa_pytorch/gsa_pytorch.py
|
from setuptools import setup, find_packages
setup(
name="protein-bert-pytorch",
packages=find_packages(),
version="0.1.0",
license="MIT",
description="ProteinBERT - Pytorch",
author="Phil Wang",
author_email="lucidrains@gmail.com",
url="https://github.com/lucidrains/protein-bert-pytorch",
keywords=[
"artificial intelligence",
"deep learning",
"attention mechanism",
"protein sequences",
"unsupervised learning"
],
install_requires=[
"einops>=0.3",
"torch>=1.6",
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
],
)
|
protein-bert-pytorch-main
|
setup.py
|
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops.layers.torch import Rearrange, Reduce
from einops import rearrange, repeat
# helpers
def exists(val):
return val is not None
def max_neg_value(t):
return -torch.finfo(t.dtype).max
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class GlobalLinearSelfAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head,
heads
):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, feats, mask = None):
h = self.heads
q, k, v = self.to_qkv(feats).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
if exists(mask):
mask = rearrange(mask, 'b n -> b () n ()')
k = k.masked_fill(~mask, -torch.finfo(k.dtype).max)
q = q.softmax(dim = -1)
k = k.softmax(dim = -2)
q = q * self.scale
if exists(mask):
v = v.masked_fill(~mask, 0.)
context = einsum('b h n d, b h n e -> b h d e', k, v)
out = einsum('b h d e, b h n d -> b h n e', context, q)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class CrossAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_keys,
dim_out,
heads,
dim_head = 64,
qk_activation = nn.Tanh()
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.qk_activation = qk_activation
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim_keys, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim_out)
self.null_key = nn.Parameter(torch.randn(dim_head))
self.null_value = nn.Parameter(torch.randn(dim_head))
def forward(self, x, context, mask = None, context_mask = None):
b, h, device = x.shape[0], self.heads, x.device
q = self.to_q(x)
k, v = self.to_kv(context).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
null_k, null_v = map(lambda t: repeat(t, 'd -> b h () d', b = b, h = h), (self.null_key, self.null_value))
k = torch.cat((null_k, k), dim = -2)
v = torch.cat((null_v, v), dim = -2)
q, k = map(lambda t: self.qk_activation(t), (q, k))
sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
if exists(mask) or exists(context_mask):
i, j = sim.shape[-2:]
if not exists(mask):
mask = torch.ones(b, i, dtype = torch.bool, device = device)
if exists(context_mask):
context_mask = F.pad(context_mask, (1, 0), value = True)
else:
context_mask = torch.ones(b, j, dtype = torch.bool, device = device)
mask = rearrange(mask, 'b i -> b () i ()') * rearrange(context_mask, 'b j -> b () () j')
sim.masked_fill_(~mask, max_neg_value(sim))
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Layer(nn.Module):
def __init__(
self,
*,
dim,
dim_global,
narrow_conv_kernel = 9,
wide_conv_kernel = 9,
wide_conv_dilation = 5,
attn_heads = 8,
attn_dim_head = 64,
attn_qk_activation = nn.Tanh(),
local_to_global_attn = False,
local_self_attn = False,
glu_conv = False
):
super().__init__()
self.seq_self_attn = GlobalLinearSelfAttention(dim = dim, dim_head = attn_dim_head, heads = attn_heads) if local_self_attn else None
conv_mult = 2 if glu_conv else 1
self.narrow_conv = nn.Sequential(
nn.Conv1d(dim, dim * conv_mult, narrow_conv_kernel, padding = narrow_conv_kernel // 2),
nn.GELU() if not glu_conv else nn.GLU(dim = 1)
)
wide_conv_padding = (wide_conv_kernel + (wide_conv_kernel - 1) * (wide_conv_dilation - 1)) // 2
self.wide_conv = nn.Sequential(
nn.Conv1d(dim, dim * conv_mult, wide_conv_kernel, dilation = wide_conv_dilation, padding = wide_conv_padding),
nn.GELU() if not glu_conv else nn.GLU(dim = 1)
)
self.local_to_global_attn = local_to_global_attn
if local_to_global_attn:
self.extract_global_info = CrossAttention(
dim = dim,
dim_keys = dim_global,
dim_out = dim,
heads = attn_heads,
dim_head = attn_dim_head
)
else:
self.extract_global_info = nn.Sequential(
Reduce('b n d -> b d', 'mean'),
nn.Linear(dim_global, dim),
nn.GELU(),
Rearrange('b d -> b () d')
)
self.local_norm = nn.LayerNorm(dim)
self.local_feedforward = nn.Sequential(
Residual(nn.Sequential(
nn.Linear(dim, dim),
nn.GELU(),
)),
nn.LayerNorm(dim)
)
self.global_attend_local = CrossAttention(dim = dim_global, dim_out = dim_global, dim_keys = dim, heads = attn_heads, dim_head = attn_dim_head, qk_activation = attn_qk_activation)
self.global_dense = nn.Sequential(
nn.Linear(dim_global, dim_global),
nn.GELU()
)
self.global_norm = nn.LayerNorm(dim_global)
self.global_feedforward = nn.Sequential(
Residual(nn.Sequential(
nn.Linear(dim_global, dim_global),
nn.GELU()
)),
nn.LayerNorm(dim_global),
)
def forward(self, tokens, annotation, mask = None):
if self.local_to_global_attn:
global_info = self.extract_global_info(tokens, annotation, mask = mask)
else:
global_info = self.extract_global_info(annotation)
# process local (protein sequence)
global_linear_attn = self.seq_self_attn(tokens) if exists(self.seq_self_attn) else 0
conv_input = rearrange(tokens, 'b n d -> b d n')
if exists(mask):
conv_input_mask = rearrange(mask, 'b n -> b () n')
conv_input = conv_input.masked_fill(~conv_input_mask, 0.)
narrow_out = self.narrow_conv(conv_input)
narrow_out = rearrange(narrow_out, 'b d n -> b n d')
wide_out = self.wide_conv(conv_input)
wide_out = rearrange(wide_out, 'b d n -> b n d')
tokens = tokens + narrow_out + wide_out + global_info + global_linear_attn
tokens = self.local_norm(tokens)
tokens = self.local_feedforward(tokens)
# process global (annotations)
annotation = self.global_attend_local(annotation, tokens, context_mask = mask)
annotation = self.global_dense(annotation)
annotation = self.global_norm(annotation)
annotation = self.global_feedforward(annotation)
return tokens, annotation
# main model
class ProteinBERT(nn.Module):
def __init__(
self,
*,
num_tokens = 26,
num_annotation = 8943,
dim = 512,
dim_global = 256,
depth = 6,
narrow_conv_kernel = 9,
wide_conv_kernel = 9,
wide_conv_dilation = 5,
attn_heads = 8,
attn_dim_head = 64,
attn_qk_activation = nn.Tanh(),
local_to_global_attn = False,
local_self_attn = False,
num_global_tokens = 1,
glu_conv = False
):
super().__init__()
self.num_tokens = num_tokens
self.token_emb = nn.Embedding(num_tokens, dim)
self.num_global_tokens = num_global_tokens
self.to_global_emb = nn.Linear(num_annotation, num_global_tokens * dim_global)
self.layers = nn.ModuleList([Layer(dim = dim, dim_global = dim_global, narrow_conv_kernel = narrow_conv_kernel, wide_conv_dilation = wide_conv_dilation, wide_conv_kernel = wide_conv_kernel, attn_qk_activation = attn_qk_activation, local_to_global_attn = local_to_global_attn, local_self_attn = local_self_attn, glu_conv = glu_conv) for layer in range(depth)])
self.to_token_logits = nn.Linear(dim, num_tokens)
self.to_annotation_logits = nn.Sequential(
Reduce('b n d -> b d', 'mean'),
nn.Linear(dim_global, num_annotation)
)
def forward(self, seq, annotation, mask = None):
tokens = self.token_emb(seq)
annotation = self.to_global_emb(annotation)
annotation = rearrange(annotation, 'b (n d) -> b n d', n = self.num_global_tokens)
for layer in self.layers:
tokens, annotation = layer(tokens, annotation, mask = mask)
tokens = self.to_token_logits(tokens)
annotation = self.to_annotation_logits(annotation)
return tokens, annotation
# pretraining wrapper
def get_mask_subset_with_prob(mask, prob):
batch, seq_len, device = *mask.shape, mask.device
max_masked = math.ceil(prob * seq_len)
num_tokens = mask.sum(dim=-1, keepdim=True)
mask_excess = (mask.cumsum(dim=-1) > (num_tokens * prob).ceil())
mask_excess = mask_excess[:, :max_masked]
rand = torch.rand((batch, seq_len), device=device).masked_fill(~mask, -1e9)
_, sampled_indices = rand.topk(max_masked, dim=-1)
sampled_indices = (sampled_indices + 1).masked_fill_(mask_excess, 0)
new_mask = torch.zeros((batch, seq_len + 1), device=device)
new_mask.scatter_(-1, sampled_indices, 1)
return new_mask[:, 1:].bool()
class PretrainingWrapper(nn.Module):
def __init__(
self,
model,
random_replace_token_prob = 0.05,
remove_annotation_prob = 0.25,
add_annotation_prob = 0.01,
remove_all_annotations_prob = 0.5,
seq_loss_weight = 1.,
annotation_loss_weight = 1.,
exclude_token_ids = (0, 1, 2) # for excluding padding, start, and end tokens from being masked
):
super().__init__()
assert isinstance(model, ProteinBERT), 'model must be an instance of ProteinBERT'
self.model = model
self.random_replace_token_prob = random_replace_token_prob
self.remove_annotation_prob = remove_annotation_prob
self.add_annotation_prob = add_annotation_prob
self.remove_all_annotations_prob = remove_all_annotations_prob
self.seq_loss_weight = seq_loss_weight
self.annotation_loss_weight = annotation_loss_weight
self.exclude_token_ids = exclude_token_ids
def forward(self, seq, annotation, mask = None):
batch_size, device = seq.shape[0], seq.device
seq_labels = seq
annotation_labels = annotation
if not exists(mask):
mask = torch.ones_like(seq).bool()
# prepare masks for noising sequence
excluded_tokens_mask = mask
for token_id in self.exclude_token_ids:
excluded_tokens_mask = excluded_tokens_mask & (seq != token_id)
random_replace_token_prob_mask = get_mask_subset_with_prob(excluded_tokens_mask, self.random_replace_token_prob)
# prepare masks for noising annotation
batch_mask = torch.ones(batch_size, device = device, dtype = torch.bool)
batch_mask = rearrange(batch_mask, 'b -> b ()')
remove_annotation_from_batch_mask = get_mask_subset_with_prob(batch_mask, self.remove_all_annotations_prob)
annotation_mask = annotation > 0
remove_annotation_prob_mask = get_mask_subset_with_prob(annotation_mask, self.remove_annotation_prob)
add_annotation_prob_mask = get_mask_subset_with_prob(~annotation_mask, self.add_annotation_prob)
remove_annotation_mask = remove_annotation_from_batch_mask & remove_annotation_prob_mask
# generate random tokens
random_tokens = torch.randint(0, self.model.num_tokens, seq.shape, device=seq.device)
for token_id in self.exclude_token_ids:
random_replace_token_prob_mask = random_replace_token_prob_mask & (random_tokens != token_id) # make sure you never substitute a token with an excluded token type (pad, start, end)
# noise sequence
noised_seq = torch.where(random_replace_token_prob_mask, random_tokens, seq)
# noise annotation
noised_annotation = annotation + add_annotation_prob_mask.type(annotation.dtype)
noised_annotation = noised_annotation * remove_annotation_mask.type(annotation.dtype)
# denoise with model
seq_logits, annotation_logits = self.model(noised_seq, noised_annotation, mask = mask)
# calculate loss
seq_logits = seq_logits[mask]
seq_labels = seq_labels[mask]
seq_loss = F.cross_entropy(seq_logits, seq_labels, reduction = 'sum')
annotation_loss = F.binary_cross_entropy_with_logits(annotation_logits, annotation_labels, reduction = 'sum')
return seq_loss * self.seq_loss_weight + annotation_loss * self.annotation_loss_weight
|
protein-bert-pytorch-main
|
protein_bert_pytorch/protein_bert_pytorch.py
|
from protein_bert_pytorch.protein_bert_pytorch import ProteinBERT, PretrainingWrapper
|
protein-bert-pytorch-main
|
protein_bert_pytorch/__init__.py
|
from setuptools import setup, find_packages
exec(open('audiolm_pytorch/version.py').read())
setup(
name = 'audiolm-pytorch',
packages = find_packages(exclude=[]),
version = __version__,
license='MIT',
description = 'AudioLM - Language Modeling Approach to Audio Generation from Google Research - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/audiolm-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'audio generation'
],
install_requires=[
'accelerate',
'beartype',
'einops>=0.6.1',
'ema-pytorch>=0.2.2',
'encodec',
'fairseq',
'joblib',
'lion-pytorch',
'local-attention>=1.8.4',
'scikit-learn',
'sentencepiece',
'torch>=1.12',
'torchaudio',
'transformers',
'tqdm',
'vector-quantize-pytorch>=1.7.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
audiolm-pytorch-main
|
setup.py
|
__version__ = '1.4.1'
|
audiolm-pytorch-main
|
audiolm_pytorch/version.py
|
import torch
import transformers
from transformers import T5Tokenizer, T5EncoderModel, T5Config
from beartype import beartype
from beartype.typing import Union, List
# less warning messages since only using encoder
transformers.logging.set_verbosity_error()
# helper functions
def exists(val):
return val is not None
# config
MAX_LENGTH = 256
DEFAULT_T5_NAME = 'google/t5-v1_1-base'
T5_CONFIGS = {}
# singleton globals
def get_tokenizer(name):
tokenizer = T5Tokenizer.from_pretrained(name)
return tokenizer
def get_model(name):
model = T5EncoderModel.from_pretrained(name)
return model
def get_model_and_tokenizer(name):
global T5_CONFIGS
if name not in T5_CONFIGS:
T5_CONFIGS[name] = dict()
if "model" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["model"] = get_model(name)
if "tokenizer" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["tokenizer"] = get_tokenizer(name)
return T5_CONFIGS[name]['model'], T5_CONFIGS[name]['tokenizer']
def get_encoded_dim(name):
if name not in T5_CONFIGS:
config = T5Config.from_pretrained(name)
T5_CONFIGS[name] = dict(config = config)
elif "config" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["config"]
elif "model" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["model"].config
else:
raise ValueError(f'unknown t5 name {name}')
return config.d_model
# encoding text
@beartype
def t5_encode_text(
texts: Union[str, List[str]],
name = DEFAULT_T5_NAME,
output_device = None
):
if isinstance(texts, str):
texts = [texts]
t5, tokenizer = get_model_and_tokenizer(name)
if torch.cuda.is_available():
t5 = t5.cuda()
device = next(t5.parameters()).device
encoded = tokenizer.batch_encode_plus(
texts,
return_tensors = 'pt',
padding = 'longest',
max_length = MAX_LENGTH,
truncation = True
)
input_ids = encoded.input_ids.to(device)
attn_mask = encoded.attention_mask.to(device)
t5.eval()
with torch.inference_mode():
output = t5(input_ids = input_ids, attention_mask = attn_mask)
encoded_text = output.last_hidden_state.detach()
attn_mask = attn_mask[..., None].bool()
if not exists(output_device):
encoded_text = encoded_text.masked_fill(~attn_mask, 0.)
return encoded_text
encoded_text.to(output_device)
attn_mask.to(output_device)
encoded_text = encoded_text.masked_fill(~attn_mask, 0.)
return encoded_text
|
audiolm-pytorch-main
|
audiolm_pytorch/t5.py
|
from pathlib import Path
import torch
from torch import nn, einsum
from torchaudio.functional import resample
from einops import rearrange, repeat, pack, unpack
from audiolm_pytorch.utils import curtail_to_multiple
# suppress a few warnings
def noop(*args, **kwargs):
pass
import warnings
import logging
logging.root.setLevel(logging.ERROR)
warnings.warn = noop
# import fairseq and joblib for hubert
import joblib
import fairseq
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class HubertWithKmeans(nn.Module):
"""
checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert
or you can train your own
"""
def __init__(
self,
checkpoint_path,
kmeans_path,
target_sample_hz = 16000,
seq_len_multiple_of = None,
output_layer = 9
):
super().__init__()
self.target_sample_hz = target_sample_hz
self.seq_len_multiple_of = seq_len_multiple_of
self.output_layer = output_layer
model_path = Path(checkpoint_path)
kmeans_path = Path(kmeans_path)
assert model_path.exists(), f'path {checkpoint_path} does not exist'
assert kmeans_path.exists(), f'path {kmeans_path} does not exist'
checkpoint = torch.load(checkpoint_path)
load_model_input = {checkpoint_path: checkpoint}
model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input)
self.model = model[0]
self.model.eval()
kmeans = joblib.load(kmeans_path)
self.kmeans = kmeans
self.register_buffer(
'cluster_centers',
torch.from_numpy(kmeans.cluster_centers_)
)
@property
def groups(self):
return 1
@property
def codebook_size(self):
return self.kmeans.n_clusters
@property
def downsample_factor(self):
# todo: double check
return 320
@torch.inference_mode()
def forward(
self,
wav_input,
flatten = True,
input_sample_hz = None
):
batch, device = wav_input.shape[0], wav_input.device
if exists(input_sample_hz):
wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
if exists(self.seq_len_multiple_of):
wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of)
embed = self.model(
wav_input,
features_only = True,
mask = False, # thanks to @maitycyrus for noticing that mask is defaulted to True in the fairseq code
output_layer = self.output_layer
)['x']
batched_cluster_centers = repeat(self.cluster_centers, 'c d -> b c d', b = embed.shape[0])
dists = -torch.cdist(embed, batched_cluster_centers, p = 2)
clusters = dists.argmax(dim = -1)
if flatten:
return clusters
return rearrange(clusters, 'b ... -> b (...)')
|
audiolm-pytorch-main
|
audiolm_pytorch/hubert_kmeans.py
|
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from audiolm_pytorch.audiolm_pytorch import AudioLM
from audiolm_pytorch.soundstream import SoundStream, AudioLMSoundStream, MusicLMSoundStream
from audiolm_pytorch.encodec import EncodecWrapper
from audiolm_pytorch.audiolm_pytorch import SemanticTransformer, CoarseTransformer, FineTransformer
from audiolm_pytorch.audiolm_pytorch import FineTransformerWrapper, CoarseTransformerWrapper, SemanticTransformerWrapper
from audiolm_pytorch.vq_wav2vec import FairseqVQWav2Vec
from audiolm_pytorch.hubert_kmeans import HubertWithKmeans
from audiolm_pytorch.trainer import SoundStreamTrainer, SemanticTransformerTrainer, FineTransformerTrainer, CoarseTransformerTrainer
from audiolm_pytorch.audiolm_pytorch import get_embeds
|
audiolm-pytorch-main
|
audiolm_pytorch/__init__.py
|
import functools
from itertools import cycle
from pathlib import Path
from functools import partial, wraps
from itertools import zip_longest
from typing import Optional
import torch
from torch import nn, einsum
from torch.autograd import grad as torch_grad
import torch.nn.functional as F
from torch.linalg import vector_norm
import torchaudio.transforms as T
from torchaudio.functional import resample
from einops import rearrange, reduce, pack, unpack
from vector_quantize_pytorch import GroupedResidualVQ
from local_attention import LocalMHA
from local_attention.transformer import FeedForward, DynamicPositionBias
from audiolm_pytorch.utils import curtail_to_multiple
from audiolm_pytorch.version import __version__
from packaging import version
parsed_version = version.parse(__version__)
import pickle
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(t, l = 1):
return ((t,) * l) if not isinstance(t, tuple) else t
def filter_by_keys(fn, d):
return {k: v for k, v in d.items() if fn(k)}
def map_keys(fn, d):
return {fn(k): v for k, v in d.items()}
# gan losses
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def hinge_discr_loss(fake, real):
return (F.relu(1 + fake) + F.relu(1 - real)).mean()
def hinge_gen_loss(fake):
return -fake.mean()
def leaky_relu(p = 0.1):
return nn.LeakyReLU(p)
def gradient_penalty(wave, output, weight = 10):
batch_size, device = wave.shape[0], wave.device
gradients = torch_grad(
outputs = output,
inputs = wave,
grad_outputs = torch.ones_like(output),
create_graph = True,
retain_graph = True,
only_inputs = True
)[0]
gradients = rearrange(gradients, 'b ... -> b (...)')
return weight * ((vector_norm(gradients, dim = 1) - 1) ** 2).mean()
# better sequential
def Sequential(*mods):
return nn.Sequential(*filter(exists, mods))
# discriminators
class MultiScaleDiscriminator(nn.Module):
def __init__(
self,
channels = 16,
layers = 4,
groups = (4, 16, 64, 256),
chan_max = 1024,
input_channels = 1
):
super().__init__()
self.init_conv = nn.Conv1d(input_channels, channels, 15, padding = 7)
self.conv_layers = nn.ModuleList([])
curr_channels = channels
for _, group in zip(range(layers), groups):
chan_out = min(curr_channels * 4, chan_max)
self.conv_layers.append(nn.Sequential(
nn.Conv1d(curr_channels, chan_out, 41, stride = 4, padding = 20, groups = group),
leaky_relu()
))
curr_channels = chan_out
self.final_conv = nn.Sequential(
nn.Conv1d(curr_channels, curr_channels, 5, padding = 2),
leaky_relu(),
nn.Conv1d(curr_channels, 1, 3, padding = 1),
)
def forward(
self,
x,
return_intermediates = False
):
x = self.init_conv(x)
intermediates = []
for layer in self.conv_layers:
x = layer(x)
intermediates.append(x)
out = self.final_conv(x)
if not return_intermediates:
return out
return out, intermediates
# autoregressive squeeze excitation
# https://arxiv.org/abs/1709.01507
class SqueezeExcite(nn.Module):
def __init__(self, dim, reduction_factor = 4, dim_minimum = 8):
super().__init__()
dim_inner = max(dim_minimum, dim // reduction_factor)
self.net = nn.Sequential(
nn.Conv1d(dim, dim_inner, 1),
nn.SiLU(),
nn.Conv1d(dim_inner, dim, 1),
nn.Sigmoid()
)
def forward(self, x):
seq, device = x.shape[-2], x.device
# cumulative mean - since it is autoregressive
cum_sum = x.cumsum(dim = -2)
denom = torch.arange(1, seq + 1, device = device).float()
cum_mean = cum_sum / rearrange(denom, 'n -> n 1')
# glu gate
gate = self.net(cum_mean)
return x * gate
# complex stft discriminator
class ModReLU(nn.Module):
"""
https://arxiv.org/abs/1705.09792
https://github.com/pytorch/pytorch/issues/47052#issuecomment-718948801
"""
def __init__(self):
super().__init__()
self.b = nn.Parameter(torch.tensor(0.))
def forward(self, x):
return F.relu(torch.abs(x) + self.b) * torch.exp(1.j * torch.angle(x))
class ComplexConv2d(nn.Module):
def __init__(
self,
dim,
dim_out,
kernel_size,
stride = 1,
padding = 0
):
super().__init__()
conv = nn.Conv2d(dim, dim_out, kernel_size, dtype = torch.complex64)
self.weight = nn.Parameter(torch.view_as_real(conv.weight))
self.bias = nn.Parameter(torch.view_as_real(conv.bias))
self.stride = stride
self.padding = padding
def forward(self, x):
weight, bias = map(torch.view_as_complex, (self.weight, self.bias))
x = x.to(weight.dtype)
return F.conv2d(x, weight, bias, stride = self.stride, padding = self.padding)
def ComplexSTFTResidualUnit(chan_in, chan_out, strides):
kernel_sizes = tuple(map(lambda t: t + 2, strides))
paddings = tuple(map(lambda t: t // 2, kernel_sizes))
return nn.Sequential(
Residual(Sequential(
ComplexConv2d(chan_in, chan_in, 3, padding = 1),
ModReLU(),
ComplexConv2d(chan_in, chan_in, 3, padding = 1)
)),
ComplexConv2d(chan_in, chan_out, kernel_sizes, stride = strides, padding = paddings)
)
class ComplexSTFTDiscriminator(nn.Module):
def __init__(
self,
*,
channels = 32,
strides = ((1, 2), (2, 2), (1, 2), (2, 2), (1, 2), (2, 2)),
chan_mults = (1, 2, 4, 4, 8, 8),
input_channels = 1,
n_fft = 1024,
hop_length = 256,
win_length = 1024,
stft_normalized = False,
logits_abs = True
):
super().__init__()
self.init_conv = ComplexConv2d(input_channels, channels, 7, padding = 3)
layer_channels = tuple(map(lambda mult: mult * channels, chan_mults))
layer_channels = (channels, *layer_channels)
layer_channels_pairs = tuple(zip(layer_channels[:-1], layer_channels[1:]))
curr_channels = channels
self.layers = nn.ModuleList([])
for layer_stride, (chan_in, chan_out) in zip(strides, layer_channels_pairs):
self.layers.append(ComplexSTFTResidualUnit(chan_in, chan_out, layer_stride))
self.final_conv = ComplexConv2d(layer_channels[-1], 1, (16, 1)) # todo: remove hardcoded 16
# stft settings
self.stft_normalized = stft_normalized
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
# how to output the logits into real space
self.logits_abs = logits_abs
def forward(self, x, return_intermediates = False):
x = rearrange(x, 'b 1 n -> b n')
'''
reference: The content of the paper( https://arxiv.org/pdf/2107.03312.pdf)is as follows:
The STFT-based discriminator is illustrated in Figure 4
and operates on a single scale, computing the STFT with a
window length of W = 1024 samples and a hop length of
H = 256 samples
'''
x = torch.stft(
x,
self.n_fft,
hop_length = self.hop_length,
win_length = self.win_length,
normalized = self.stft_normalized,
return_complex = True
)
x = rearrange(x, 'b ... -> b 1 ...')
intermediates = []
x = self.init_conv(x)
intermediates.append(x)
for layer in self.layers:
x = layer(x)
intermediates.append(x)
complex_logits = self.final_conv(x)
if self.logits_abs:
complex_logits = complex_logits.abs()
else:
complex_logits = torch.view_as_real(complex_logits)
if not return_intermediates:
return complex_logits
return complex_logits, intermediates
# sound stream
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class CausalConv1d(nn.Module):
def __init__(self, chan_in, chan_out, kernel_size, pad_mode = 'reflect', **kwargs):
super().__init__()
kernel_size = kernel_size
dilation = kwargs.get('dilation', 1)
stride = kwargs.get('stride', 1)
self.pad_mode = pad_mode
self.causal_padding = dilation * (kernel_size - 1) + (1 - stride)
self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, **kwargs)
def forward(self, x):
x = F.pad(x, (self.causal_padding, 0), mode = self.pad_mode)
return self.conv(x)
class CausalConvTranspose1d(nn.Module):
def __init__(self, chan_in, chan_out, kernel_size, stride, **kwargs):
super().__init__()
self.upsample_factor = stride
self.padding = kernel_size - 1
self.conv = nn.ConvTranspose1d(chan_in, chan_out, kernel_size, stride, **kwargs)
def forward(self, x):
n = x.shape[-1]
out = self.conv(x)
out = out[..., :(n * self.upsample_factor)]
return out
def ResidualUnit(chan_in, chan_out, dilation, kernel_size = 7, squeeze_excite = False, pad_mode = 'reflect'):
return Residual(Sequential(
CausalConv1d(chan_in, chan_out, kernel_size, dilation = dilation, pad_mode = pad_mode),
nn.ELU(),
CausalConv1d(chan_out, chan_out, 1, pad_mode = pad_mode),
nn.ELU(),
SqueezeExcite(chan_out) if squeeze_excite else None
))
def EncoderBlock(chan_in, chan_out, stride, cycle_dilations = (1, 3, 9), squeeze_excite = False, pad_mode = 'reflect'):
it = cycle(cycle_dilations)
residual_unit = partial(ResidualUnit, squeeze_excite = squeeze_excite, pad_mode = pad_mode)
return nn.Sequential(
residual_unit(chan_in, chan_in, next(it)),
residual_unit(chan_in, chan_in, next(it)),
residual_unit(chan_in, chan_in, next(it)),
CausalConv1d(chan_in, chan_out, 2 * stride, stride = stride)
)
def DecoderBlock(chan_in, chan_out, stride, cycle_dilations = (1, 3, 9), squeeze_excite = False, pad_mode = 'reflect'):
even_stride = (stride % 2 == 0)
padding = (stride + (0 if even_stride else 1)) // 2
output_padding = 0 if even_stride else 1
residual_unit = partial(ResidualUnit, squeeze_excite = squeeze_excite, pad_mode = pad_mode)
it = cycle(cycle_dilations)
return nn.Sequential(
CausalConvTranspose1d(chan_in, chan_out, 2 * stride, stride = stride),
residual_unit(chan_out, chan_out, next(it)),
residual_unit(chan_out, chan_out, next(it)),
residual_unit(chan_out, chan_out, next(it)),
)
class LocalTransformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
heads,
window_size,
dynamic_pos_bias = False,
**kwargs
):
super().__init__()
self.window_size = window_size
self.layers = nn.ModuleList([])
self.pos_bias = None
if dynamic_pos_bias:
self.pos_bias = DynamicPositionBias(dim = dim // 2, heads = heads)
for _ in range(depth):
self.layers.append(nn.ModuleList([
LocalMHA(dim = dim, heads = heads, qk_rmsnorm = True, window_size = window_size, use_rotary_pos_emb = not dynamic_pos_bias, use_xpos = True, **kwargs),
FeedForward(dim = dim)
]))
def forward(self, x):
w = self.window_size
attn_bias = self.pos_bias(w, w * 2) if exists(self.pos_bias) else None
for attn, ff in self.layers:
x = attn(x, attn_bias = attn_bias) + x
x = ff(x) + x
return x
class FiLM(nn.Module):
def __init__(self, dim, dim_cond):
super().__init__()
self.to_cond = nn.Linear(dim_cond, dim * 2)
def forward(self, x, cond):
gamma, beta = self.to_cond(cond).chunk(2, dim = -1)
return x * gamma + beta
class SoundStream(nn.Module):
def __init__(
self,
*,
channels = 32,
strides = (2, 4, 5, 8),
channel_mults = (2, 4, 8, 16),
codebook_dim = 512,
codebook_size = 1024,
rq_num_quantizers = 8,
rq_commitment_weight = 1.,
rq_ema_decay = 0.95,
rq_quantize_dropout_multiple_of = 1,
rq_groups = 1,
rq_stochastic_sample_codes = False,
rq_kwargs: dict = {},
input_channels = 1,
discr_multi_scales = (1, 0.5, 0.25),
stft_normalized = False,
enc_cycle_dilations = (1, 3, 9),
dec_cycle_dilations = (1, 3, 9),
multi_spectral_window_powers_of_two = tuple(range(6, 12)),
multi_spectral_n_ffts = 512,
multi_spectral_n_mels = 64,
recon_loss_weight = 1.,
multi_spectral_recon_loss_weight = 1e-5,
adversarial_loss_weight = 1.,
feature_loss_weight = 100,
quantize_dropout_cutoff_index = 1,
target_sample_hz = 16000,
use_local_attn = True,
attn_window_size = 128,
attn_dim_head = 64,
attn_heads = 8,
attn_depth = 1,
attn_xpos_scale_base = None,
attn_dynamic_pos_bias = False,
squeeze_excite = False,
complex_stft_discr_logits_abs = True,
pad_mode = 'reflect',
stft_discriminator: Optional[nn.Module] = None # can pass in own stft discriminator
):
super().__init__()
# for autosaving the config
_locals = locals()
_locals.pop('self', None)
_locals.pop('__class__', None)
self._configs = pickle.dumps(_locals)
# rest of the class
self.target_sample_hz = target_sample_hz # for resampling on the fly
self.single_channel = input_channels == 1
self.strides = strides
layer_channels = tuple(map(lambda t: t * channels, channel_mults))
layer_channels = (channels, *layer_channels)
chan_in_out_pairs = tuple(zip(layer_channels[:-1], layer_channels[1:]))
encoder_blocks = []
for ((chan_in, chan_out), layer_stride) in zip(chan_in_out_pairs, strides):
encoder_blocks.append(EncoderBlock(chan_in, chan_out, layer_stride, enc_cycle_dilations, squeeze_excite, pad_mode))
self.encoder = nn.Sequential(
CausalConv1d(input_channels, channels, 7, pad_mode = pad_mode),
*encoder_blocks,
CausalConv1d(layer_channels[-1], codebook_dim, 3, pad_mode = pad_mode)
)
attn_kwargs = dict(
dim = codebook_dim,
dim_head = attn_dim_head,
heads = attn_heads,
depth = attn_depth,
window_size = attn_window_size,
xpos_scale_base = attn_xpos_scale_base,
dynamic_pos_bias = attn_dynamic_pos_bias,
prenorm = True,
causal = True
)
self.encoder_attn = LocalTransformer(**attn_kwargs) if use_local_attn else None
self.encoder_film = FiLM(codebook_dim, dim_cond = 2)
self.num_quantizers = rq_num_quantizers
self.codebook_dim = codebook_dim
self.codebook_size = codebook_size
self.rq_groups = rq_groups
self.rq = GroupedResidualVQ(
dim = codebook_dim,
num_quantizers = rq_num_quantizers,
codebook_size = codebook_size,
groups = rq_groups,
decay = rq_ema_decay,
commitment_weight = rq_commitment_weight,
quantize_dropout_multiple_of = rq_quantize_dropout_multiple_of,
kmeans_init = True,
threshold_ema_dead_code = 2,
quantize_dropout = True,
quantize_dropout_cutoff_index = quantize_dropout_cutoff_index,
stochastic_sample_codes = rq_stochastic_sample_codes,
**rq_kwargs
)
self.decoder_film = FiLM(codebook_dim, dim_cond = 2)
self.decoder_attn = LocalTransformer(**attn_kwargs) if use_local_attn else None
decoder_blocks = []
for ((chan_in, chan_out), layer_stride) in zip(reversed(chan_in_out_pairs), reversed(strides)):
decoder_blocks.append(DecoderBlock(chan_out, chan_in, layer_stride, dec_cycle_dilations, squeeze_excite, pad_mode))
self.decoder = nn.Sequential(
CausalConv1d(codebook_dim, layer_channels[-1], 7, pad_mode = pad_mode),
*decoder_blocks,
CausalConv1d(channels, input_channels, 7, pad_mode = pad_mode)
)
# discriminators
self.discr_multi_scales = discr_multi_scales
self.discriminators = nn.ModuleList([MultiScaleDiscriminator() for _ in range(len(discr_multi_scales))])
discr_rel_factors = [int(s1 / s2) for s1, s2 in zip(discr_multi_scales[:-1], discr_multi_scales[1:])]
self.downsamples = nn.ModuleList([nn.Identity()] + [nn.AvgPool1d(2 * factor, stride = factor, padding = factor) for factor in discr_rel_factors])
self.stft_discriminator = stft_discriminator
if not exists(self.stft_discriminator):
self.stft_discriminator = ComplexSTFTDiscriminator(
stft_normalized = stft_normalized,
logits_abs = complex_stft_discr_logits_abs # whether to output as abs() or use view_as_real
)
# multi spectral reconstruction
self.mel_spec_transforms = nn.ModuleList([])
self.mel_spec_recon_alphas = []
num_transforms = len(multi_spectral_window_powers_of_two)
multi_spectral_n_ffts = cast_tuple(multi_spectral_n_ffts, num_transforms)
multi_spectral_n_mels = cast_tuple(multi_spectral_n_mels, num_transforms)
for powers, n_fft, n_mels in zip_longest(multi_spectral_window_powers_of_two, multi_spectral_n_ffts, multi_spectral_n_mels):
win_length = 2 ** powers
alpha = (win_length / 2) ** 0.5
calculated_n_fft = default(max(n_fft, win_length), win_length) # @AndreyBocharnikov said this is usually win length, but overridable
# if any audio experts have an opinion about these settings, please submit a PR
melspec_transform = T.MelSpectrogram(
sample_rate = target_sample_hz,
n_fft = calculated_n_fft,
win_length = win_length,
hop_length = win_length // 4,
n_mels = n_mels,
normalized = stft_normalized
)
self.mel_spec_transforms.append(melspec_transform)
self.mel_spec_recon_alphas.append(alpha)
# loss weights
self.recon_loss_weight = recon_loss_weight
self.multi_spectral_recon_loss_weight = multi_spectral_recon_loss_weight
self.adversarial_loss_weight = adversarial_loss_weight
self.feature_loss_weight = feature_loss_weight
self.register_buffer('zero', torch.tensor([0.]), persistent = False)
@property
def device(self):
return next(self.parameters()).device
@property
def configs(self):
return pickle.loads(self._configs)
def decode_from_codebook_indices(self, quantized_indices):
quantized_indices = rearrange(quantized_indices, 'b n (g q) -> g b n q', g = self.rq_groups)
codes = self.rq.get_codes_from_indices(quantized_indices)
x = reduce(codes, 'g q b n d -> b n (g d)', 'sum')
return self.decode(x)
def decode(self, x, quantize = False):
if quantize:
x, *_ = self.rq(x)
x = self.decoder_attn(x)
x = rearrange(x, 'b n c -> b c n')
return self.decoder(x)
def save(self, path):
path = Path(path)
pkg = dict(
model = self.state_dict(),
config = self._configs,
version = __version__
)
torch.save(pkg, str(path))
@classmethod
def init_and_load_from(cls, path, strict = True):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = 'cpu')
assert 'config' in pkg, 'model configs were not found in this saved checkpoint'
config = pickle.loads(pkg['config'])
soundstream = cls(**config)
soundstream.load(path, strict = strict)
return soundstream
def load(self, path, strict = True):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = 'cpu')
# check version
if 'version' in pkg and version.parse(pkg['version']) < parsed_version:
print(f'soundstream model being loaded was trained on an older version of audiolm-pytorch ({pkg["version"]})')
has_ema = 'ema_model' in pkg
model_pkg = pkg['ema_model'] if has_ema else pkg['model']
if has_ema:
model_pkg = filter_by_keys(lambda k: k.startswith('ema_model.'), model_pkg)
model_pkg = map_keys(lambda k: k[len('ema_model.'):], model_pkg)
self.load_state_dict(model_pkg, strict = strict)
def load_from_trainer_saved_obj(self, path):
path = Path(path)
assert path.exists()
obj = torch.load(str(path))
self.load_state_dict(obj['model'])
def non_discr_parameters(self):
return [
*self.encoder.parameters(),
*self.decoder.parameters(),
*(self.encoder_attn.parameters() if exists(self.encoder_attn) else []),
*(self.decoder_attn.parameters() if exists(self.decoder_attn) else []),
*self.encoder_film.parameters(),
*self.decoder_film.parameters()
]
@property
def seq_len_multiple_of(self):
return functools.reduce(lambda x, y: x * y, self.strides)
@property
def downsample_factor(self):
return self.seq_len_multiple_of
def process_input(
self,
x,
input_sample_hz = None,
curtail_from_left = False
):
x, ps = pack([x], '* n')
if exists(input_sample_hz):
x = resample(x, input_sample_hz, self.target_sample_hz)
x = curtail_to_multiple(x, self.seq_len_multiple_of, from_left = curtail_from_left)
if x.ndim == 2:
x = rearrange(x, 'b n -> b 1 n')
return x, ps
def forward(
self,
x,
target = None,
is_denoising = None, # if you want to learn film conditioners that teach the soundstream to denoise - target would need to be passed in above
return_encoded = False,
return_discr_loss = False,
return_discr_losses_separately = False,
return_loss_breakdown = False,
return_recons_only = False,
input_sample_hz = None,
apply_grad_penalty = False,
curtail_from_left = False
):
assert not (exists(is_denoising) and not exists(target))
process_input = partial(self.process_input, input_sample_hz = input_sample_hz, curtail_from_left = curtail_from_left)
x, ps = process_input(x)
if exists(target):
target, _ = process_input(target)
orig_x = x.clone()
x = self.encoder(x)
x = rearrange(x, 'b c n -> b n c')
if exists(self.encoder_attn):
x = self.encoder_attn(x)
if exists(is_denoising):
denoise_input = torch.tensor([is_denoising, not is_denoising], dtype = x.dtype, device = self.device) # [1, 0] for denoise, [0, 1] for not denoising
x = self.encoder_film(x, denoise_input)
x, indices, commit_loss = self.rq(x)
if return_encoded:
indices = rearrange(indices, 'g b n q -> b n (g q)')
return x, indices, commit_loss
if exists(is_denoising):
x = self.decoder_film(x, denoise_input)
if exists(self.decoder_attn):
x = self.decoder_attn(x)
x = rearrange(x, 'b n c -> b c n')
recon_x = self.decoder(x)
if return_recons_only:
recon_x, = unpack(recon_x, ps, '* c n')
return recon_x
# multi-scale discriminator loss
if return_discr_loss:
real, fake = orig_x, recon_x.detach()
stft_discr_loss = None
stft_grad_penalty = None
discr_losses = []
discr_grad_penalties = []
if self.single_channel:
real, fake = orig_x.clone(), recon_x.detach()
stft_real_logits, stft_fake_logits = map(self.stft_discriminator, (real.requires_grad_(), fake))
stft_discr_loss = hinge_discr_loss(stft_fake_logits, stft_real_logits)
if apply_grad_penalty:
stft_grad_penalty = gradient_penalty(real, stft_discr_loss)
scaled_real, scaled_fake = real, fake
for discr, downsample in zip(self.discriminators, self.downsamples):
scaled_real, scaled_fake = map(downsample, (scaled_real, scaled_fake))
real_logits, fake_logits = map(discr, (scaled_real.requires_grad_(), scaled_fake))
one_discr_loss = hinge_discr_loss(fake_logits, real_logits)
discr_losses.append(one_discr_loss)
if apply_grad_penalty:
discr_grad_penalties.append(gradient_penalty(scaled_real, one_discr_loss))
if not return_discr_losses_separately:
all_discr_losses = torch.stack(discr_losses).mean()
if exists(stft_discr_loss):
all_discr_losses = all_discr_losses + stft_discr_loss
if exists(stft_grad_penalty):
all_discr_losses = all_discr_losses + stft_grad_penalty
return all_discr_losses
# return a list of discriminator losses with List[Tuple[str, Tensor]]
discr_losses_pkg = []
discr_losses_pkg.extend([(f'scale:{scale}', multi_scale_loss) for scale, multi_scale_loss in zip(self.discr_multi_scales, discr_losses)])
discr_losses_pkg.extend([(f'scale_grad_penalty:{scale}', discr_grad_penalty) for scale, discr_grad_penalty in zip(self.discr_multi_scales, discr_grad_penalties)])
if exists(stft_discr_loss):
discr_losses_pkg.append(('stft', stft_discr_loss))
if exists(stft_grad_penalty):
discr_losses_pkg.append(('stft_grad_penalty', stft_grad_penalty))
return discr_losses_pkg
# recon loss
target = default(target, orig_x) # target can also be passed in, in the case of denoising
recon_loss = F.mse_loss(target, recon_x)
# multispectral recon loss - eq (4) and (5) in https://arxiv.org/abs/2107.03312
multi_spectral_recon_loss = self.zero
if self.multi_spectral_recon_loss_weight > 0:
for mel_transform, alpha in zip(self.mel_spec_transforms, self.mel_spec_recon_alphas):
orig_mel, recon_mel = map(mel_transform, (orig_x, recon_x))
log_orig_mel, log_recon_mel = map(log, (orig_mel, recon_mel))
l1_mel_loss = (orig_mel - recon_mel).abs().sum(dim = -2).mean()
l2_log_mel_loss = alpha * vector_norm(log_orig_mel - log_recon_mel, dim = -2).mean()
multi_spectral_recon_loss = multi_spectral_recon_loss + l1_mel_loss + l2_log_mel_loss
# adversarial loss
adversarial_losses = []
discr_intermediates = []
# adversarial loss for multi-scale discriminators
real, fake = orig_x, recon_x
# features from stft
(stft_real_logits, stft_real_intermediates), (stft_fake_logits, stft_fake_intermediates) = map(partial(self.stft_discriminator, return_intermediates=True), (real, fake))
discr_intermediates.append((stft_real_intermediates, stft_fake_intermediates))
scaled_real, scaled_fake = real, fake
for discr, downsample in zip(self.discriminators, self.downsamples):
scaled_real, scaled_fake = map(downsample, (scaled_real, scaled_fake))
(real_logits, real_intermediates), (fake_logits, fake_intermediates) = map(partial(discr, return_intermediates = True), (scaled_real, scaled_fake))
discr_intermediates.append((real_intermediates, fake_intermediates))
one_adversarial_loss = hinge_gen_loss(fake_logits)
adversarial_losses.append(one_adversarial_loss)
feature_losses = []
for real_intermediates, fake_intermediates in discr_intermediates:
losses = [F.l1_loss(real_intermediate, fake_intermediate) for real_intermediate, fake_intermediate in zip(real_intermediates, fake_intermediates)]
feature_losses.extend(losses)
feature_loss = torch.stack(feature_losses).mean()
# adversarial loss for stft discriminator
adversarial_losses.append(hinge_gen_loss(stft_fake_logits))
adversarial_loss = torch.stack(adversarial_losses).mean()
# sum commitment loss
all_commitment_loss = commit_loss.sum()
total_loss = recon_loss * self.recon_loss_weight + multi_spectral_recon_loss * self.multi_spectral_recon_loss_weight + adversarial_loss * self.adversarial_loss_weight + feature_loss * self.feature_loss_weight + all_commitment_loss
if return_loss_breakdown:
return total_loss, (recon_loss, multi_spectral_recon_loss, adversarial_loss, feature_loss, all_commitment_loss)
return total_loss
# some default soundstreams
def AudioLMSoundStream(
strides = (2, 4, 5, 8),
target_sample_hz = 16000,
rq_num_quantizers = 12,
**kwargs
):
return SoundStream(
strides = strides,
target_sample_hz = target_sample_hz,
rq_num_quantizers = rq_num_quantizers,
**kwargs
)
def MusicLMSoundStream(
strides = (3, 4, 5, 8),
target_sample_hz = 24000,
rq_num_quantizers = 12,
**kwargs
):
return SoundStream(
strides = strides,
target_sample_hz = target_sample_hz,
rq_num_quantizers = rq_num_quantizers,
**kwargs
)
|
audiolm-pytorch-main
|
audiolm_pytorch/soundstream.py
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from einops import rearrange
# constants
Config = namedtuple('Config', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
causal = False,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
causal = self.causal
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
if causal:
causal_mask = torch.ones((q_len, k_len), device = q.device, dtype = torch.bool).triu(k_len - q_len + 1)
mask = mask & ~causal_mask
causal = False
config = self.cuda_config if is_cuda else self.cpu_config
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out
def forward(self, q, k, v, mask = None, attn_bias = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.flash:
assert not exists(attn_bias), 'attention bias not supported for flash attention'
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k) * scale
# attention bias
if exists(attn_bias):
sim = sim + attn_bias
# key padding mask
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), device = sim.device, dtype = torch.bool).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
return out
|
audiolm-pytorch-main
|
audiolm_pytorch/attend.py
|
from torch import nn
# functions
def round_down_nearest_multiple(num, divisor):
return num // divisor * divisor
def curtail_to_multiple(t, mult, from_left = False):
data_len = t.shape[-1]
rounded_seq_len = round_down_nearest_multiple(data_len, mult)
seq_slice = slice(None, rounded_seq_len) if not from_left else slice(-rounded_seq_len, None)
return t[..., seq_slice]
# base class
class AudioConditionerBase(nn.Module):
pass
|
audiolm-pytorch-main
|
audiolm_pytorch/utils.py
|
from pathlib import Path
import torch
from torch import nn
from einops import rearrange
import fairseq
from torchaudio.functional import resample
from audiolm_pytorch.utils import curtail_to_multiple
import logging
logging.root.setLevel(logging.ERROR)
def exists(val):
return val is not None
class FairseqVQWav2Vec(nn.Module):
"""
checkpoint path can be found at https://github.com/facebookresearch/fairseq/blob/main/examples/wav2vec/README.md#vq-wav2vec
specifically download the kmeans model for now
$ wget https://dl.fbaipublicfiles.com/fairseq/wav2vec/vq-wav2vec_kmeans.pt
"""
def __init__(
self,
checkpoint_path,
target_sample_hz = 24000,
seq_len_multiple_of = None
):
super().__init__()
self.target_sample_hz = target_sample_hz
self.seq_len_multiple_of = seq_len_multiple_of
path = Path(checkpoint_path)
assert path.exists(), f'path {checkpoint_path} does not exist'
checkpoint = torch.load(checkpoint_path)
load_model_input = {checkpoint_path: checkpoint}
model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input)
self.model = model[0]
self.model.eval()
assert hasattr(self.model, 'vector_quantizer') and hasattr(self.model.vector_quantizer, 'embedding'), 'the vq wav2vec model does not seem to be valid'
@property
def groups(self):
return self.model.vector_quantizer.groups
@property
def downsample_factor(self):
# todo: double check architecture
return 80
@property
def codebook_size(self):
return self.model.vector_quantizer.embedding.shape[0]
@torch.inference_mode()
def forward(
self,
wav_input,
flatten = True,
input_sample_hz = None
):
if exists(input_sample_hz):
wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
if exists(self.seq_len_multiple_of):
wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of)
embed = self.model.feature_extractor(wav_input)
_, codebook_indices = self.model.vector_quantizer.forward_idx(embed)
if not flatten:
return codebook_indices
return rearrange(codebook_indices, 'b ... -> b (...)')
|
audiolm-pytorch-main
|
audiolm_pytorch/vq_wav2vec.py
|
from lion_pytorch import Lion
from torch.optim import AdamW, Adam
def separate_weight_decayable_params(params):
wd_params, no_wd_params = [], []
for param in params:
param_list = no_wd_params if param.ndim < 2 else wd_params
param_list.append(param)
return wd_params, no_wd_params
def get_optimizer(
params,
lr = 1e-4,
wd = 1e-2,
betas = (0.9, 0.99),
eps = 1e-8,
filter_by_requires_grad = False,
group_wd_params = True,
use_lion = False,
**kwargs
):
has_wd = wd > 0
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
if group_wd_params and has_wd:
wd_params, no_wd_params = separate_weight_decayable_params(params)
params = [
{'params': wd_params},
{'params': no_wd_params, 'weight_decay': 0},
]
if use_lion:
return Lion(params, lr = lr, betas = betas, weight_decay = wd)
if not has_wd:
return Adam(params, lr = lr, betas = betas, eps = eps)
return AdamW(params, lr = lr, weight_decay = wd, betas = betas, eps = eps)
|
audiolm-pytorch-main
|
audiolm_pytorch/optimizer.py
|
import math
from functools import partial, wraps
from beartype.typing import Optional, Union, List
from beartype import beartype
import torch
from torch import nn, einsum, Tensor
from torch.autograd import grad as torch_grad
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
import torchaudio
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
from audiolm_pytorch.vq_wav2vec import FairseqVQWav2Vec
from audiolm_pytorch.hubert_kmeans import HubertWithKmeans
from audiolm_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME
from torchaudio.functional import resample
from audiolm_pytorch.soundstream import SoundStream
from audiolm_pytorch.encodec import EncodecWrapper
from audiolm_pytorch.utils import AudioConditionerBase
from audiolm_pytorch.attend import Attend
from tqdm import tqdm
from pathlib import Path
from audiolm_pytorch.version import __version__
from packaging import version
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def always(val):
def inner(*args, **kwargs):
return val
return inner
def maybe(fn):
if not exists(fn):
return always(None)
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
def ceil_div(numer, denom):
return (numer + denom - 1) // denom
def remainder_needed_until_multiple(n, mult):
return (ceil_div(n, mult) * mult) - n
def round_down_nearest_multiple(val, mult):
return (val // mult) * mult
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# tensor helpers
def generate_mask_with_prob(shape, mask_prob, device):
seq = shape[-1]
rand = torch.randn(shape, device = device)
rand[:, 0] = -torch.finfo(rand.dtype).max
num_mask = min(int(seq * mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros(shape, device = device).scatter(1, indices, 1.).bool()
return mask
# attention related utils
def grad_shrink(t, alpha = 0.1):
return t * alpha + t.detach() * (1 - alpha)
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t + eps)
def l2norm(t):
return F.normalize(t, dim = -1)
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def mask_out_after_eos_id(t, eos_id, mask_value = -1, keep_eos = True):
eos_mask = (t == eos_id).float()
if keep_eos:
eos_mask = F.pad(eos_mask, (1, -1))
after_eos_mask = eos_mask.cumsum(dim = -1) > 0
return t.masked_fill(after_eos_mask, mask_value)
def all_rows_have_eos_id(t, eos_id):
eos_mask = (t == eos_id)
return torch.any(eos_mask, dim = -1).all()
# classifier free guidance functions
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
# removing unique consecutives in the semantic token ids
# important detail noted by @eonglints
def append_eos_id(ids, eos_id):
b, device = ids.shape[0], ids.device
eos_ids = torch.ones(1, device = device).long() * eos_id
eos_ids = repeat(eos_ids, '1 -> b 1', b = b)
ids = torch.cat((ids, eos_ids), dim = -1)
return ids
def batch_unique_consecutive(t, pad_value = 0.):
unique_arr = [torch.unique_consecutive(el) for el in t.unbind(dim = 0)]
return pad_sequence(unique_arr, batch_first = True, padding_value = pad_value)
# function for getting embeds from nn.Embedding but with padding as some designated value (-1) outside the range of the embed table
@beartype
def get_embeds(
embeddings: nn.Embedding,
codes: torch.Tensor,
pad_id = -1,
return_mask = False,
mask_pad_pos_to = 0
):
pad_mask = codes == pad_id
codes_without_pad = codes.masked_fill(pad_mask, 0) # just retrieve first code as dummy
embeds = embeddings(codes_without_pad)
if exists(mask_pad_pos_to):
embeds = embeds.masked_fill(rearrange(pad_mask, '... -> ... 1'), mask_pad_pos_to)
if return_mask:
return embeds, ~pad_mask
return embeds
# bias-less layernorm, being used in more recent T5s, PaLM, also in @borisdayma 's experiments shared with me
# greater stability
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# relative positional bias
class RelativePositionBias(nn.Module):
""" from https://arxiv.org/abs/2111.09883 """
def __init__(
self,
*,
dim,
heads,
layers = 3
):
super().__init__()
self.net = nn.ModuleList([])
self.net.append(nn.Sequential(nn.Linear(1, dim), nn.SiLU()))
for _ in range(layers - 1):
self.net.append(nn.Sequential(nn.Linear(dim, dim), nn.SiLU()))
self.net.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, n):
device = self.device
pos = torch.arange(n, device = device)
rel_pos = (rearrange(pos, 'i -> i 1') - rearrange(pos, 'j -> 1 j'))
rel_pos += (n - 1)
x = torch.arange(-n + 1, n, device = device).float()
x = rearrange(x, '... -> ... 1')
for layer in self.net:
x = layer(x)
x = x[rel_pos]
return rearrange(x, 'i j h -> h i j')
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x
def FeedForward(dim, mult = 4, dropout = 0.1):
inner_dim = int(dim * 2 * mult / 3)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, inner_dim * 2, bias = False),
GEGLU(),
LayerNorm(inner_dim),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim, bias = False)
)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
causal = False,
dim_head = 64,
dim_context = None,
heads = 8,
norm_context = False,
num_null_kv = 0,
dropout = 0.1,
scale = 8,
flash = False
):
super().__init__()
self.heads = heads
self.causal = causal
inner_dim = dim_head * heads
dim_context = default(dim_context, dim)
self.norm = LayerNorm(dim)
self.context_norm = LayerNorm(dim_context) if norm_context else nn.Identity()
self.attn_dropout = nn.Dropout(dropout)
self.num_null_kv = num_null_kv
self.null_kv = nn.Parameter(torch.randn(2, num_null_kv, dim_head)) if num_null_kv > 0 else None
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim_context, dim_head * 2, bias = False)
self.attend = Attend(
flash = flash,
dropout = dropout,
causal = causal
)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
nn.Dropout(dropout)
)
def forward(
self,
x,
context = None,
mask = None,
attn_bias = None,
prefix_context = None,
prefix_context_mask = None
):
b, n, _, device = *x.shape, x.device
if exists(context):
context = self.context_norm(context)
kv_input = default(context, x)
# take care of prefix-based self attention conditioning
# make sure to either concat the to the self attention mask or lengthen it accordingly
if exists(prefix_context):
kv_input = torch.cat((prefix_context, kv_input), dim = -2)
prefix_seq_len = prefix_context.shape[-2]
if not exists(mask):
mask = torch.ones((b, n), device = device, dtype = torch.bool)
if exists(prefix_context_mask):
mask = torch.cat((prefix_context_mask, mask), dim = -1)
else:
mask = F.pad(mask, (prefix_seq_len, 0), value = True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (prefix_seq_len, 0), value = 0.)
# prenorm
x = self.norm(x)
# project for queries, keys, values
q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1)
# null key / values
if self.num_null_kv > 0:
null_k, null_v = repeat(self.null_kv, 'kv n d -> kv b n d', b = b).unbind(dim = 0)
k = torch.cat((null_k, k), dim = -2)
v = torch.cat((null_v, v), dim = -2)
# split for multi-headed attention
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# handle mask and null key / value
if exists(mask):
mask = F.pad(mask, (self.num_null_kv, 0), value = True)
# attention
out = self.attend(q, k, v, attn_bias = attn_bias, mask = mask)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# transformer
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
heads,
dim_context = None,
cross_attend = False,
attn_dropout = 0.,
ff_dropout = 0.,
grad_shrink_alpha = 0.1,
cond_as_self_attn_prefix = False,
rel_pos_bias = True,
flash_attn = False,
**kwargs
):
super().__init__()
rel_pos_bias = rel_pos_bias and not flash_attn
assert not (cross_attend and cond_as_self_attn_prefix)
self.dim_context = default(dim_context, dim)
self.cond_as_self_attn_prefix = cond_as_self_attn_prefix
self.grad_shrink = partial(grad_shrink, alpha = grad_shrink_alpha)
self.layers = nn.ModuleList([])
self.rel_pos_bias = RelativePositionBias(dim = dim // 2, heads = heads) if rel_pos_bias else None
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, heads = heads, dropout = attn_dropout, flash = flash_attn, causal = True, **kwargs),
Attention(dim = dim, heads = heads, dropout = attn_dropout, dim_context = dim_context, flash = flash_attn, num_null_kv = 1, norm_context = True, **kwargs) if cross_attend else None,
FeedForward(dim = dim, dropout = ff_dropout)
]))
self.norm = LayerNorm(dim)
def forward(
self,
x,
self_attn_mask = None,
context = None,
context_mask = None,
attn_bias = None
):
assert not (self.cond_as_self_attn_prefix and not exists(context))
assert not (exists(context) and context.shape[-1] != self.dim_context), f'you had specified a conditioning dimension of {self.dim_context}, yet what was received by the transformer has dimension of {context.shape[-1]}'
n, device = x.shape[1], x.device
x = self.grad_shrink(x) # from cogview paper, adopted by GLM 130B LLM, decreases likelihood of attention net instability
if exists(attn_bias):
rel_pos_bias = attn_bias
else:
rel_pos_bias = maybe(self.rel_pos_bias)(n)
self_attn_kwargs = dict()
if self.cond_as_self_attn_prefix:
self_attn_kwargs = dict(
prefix_context = context,
prefix_context_mask = context_mask
)
for attn, cross_attn, ff in self.layers:
x = attn(x, attn_bias = rel_pos_bias, mask = self_attn_mask, **self_attn_kwargs) + x
if exists(cross_attn):
assert exists(context)
x = cross_attn(x, context = context, mask = context_mask) + x
x = ff(x) + x
return self.norm(x)
# the three hierarchical transformers
class SemanticTransformer(nn.Module):
@beartype
def __init__(
self,
*,
dim,
depth,
num_semantic_tokens,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
t5_name = DEFAULT_T5_NAME,
cond_dim = None,
has_condition = False,
audio_text_condition = False,
cond_as_self_attn_prefix = False,
cond_drop_prob = 0.5,
grad_shrink_alpha = 0.1,
rel_pos_bias = True,
flash_attn = False,
**kwargs
):
super().__init__()
rel_pos_bias = rel_pos_bias and not flash_attn
self.num_semantic_tokens = num_semantic_tokens
if audio_text_condition:
has_condition = True
cond_dim = default(cond_dim, dim)
self.has_condition = has_condition
self.embed_text = partial(t5_encode_text, name = t5_name)
self.cond_drop_prob = cond_drop_prob
self.start_token = nn.Parameter(torch.randn(dim))
self.semantic_embedding = nn.Embedding(num_semantic_tokens + 1, dim)
self.eos_id = num_semantic_tokens
text_dim = default(cond_dim, get_encoded_dim(t5_name))
self.proj_text_embed = nn.Linear(text_dim, dim, bias = False) if text_dim != dim else nn.Identity()
self.transformer = Transformer(
dim = dim,
depth = depth,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
cross_attend = has_condition and not cond_as_self_attn_prefix,
cond_as_self_attn_prefix = cond_as_self_attn_prefix,
grad_shrink_alpha = grad_shrink_alpha,
rel_pos_bias = rel_pos_bias,
flash_attn = flash_attn,
**kwargs
)
self.to_logits = nn.Linear(dim, num_semantic_tokens + 1)
@property
def device(self):
return next(self.parameters()).device
def load(self, path):
# Return pkg so that if this function gets called from within a Trainer function call,
# the trainer can also access the package loaded from the checkpoint.
device = self.device
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = device)
# check version
if 'version' in pkg and version.parse(pkg['version']) < version.parse(__version__):
print(f'model was trained on older version {pkg["version"]} of audiolm-pytorch')
self.load_state_dict(pkg['model'])
return pkg
def forward_with_cond_scale(
self,
*args,
cond_scale = 3,
**kwargs
):
logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1 or not self.has_condition:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
@beartype
def forward(
self,
*,
ids = None,
return_loss = False,
text: Optional[List[str]] = None,
text_embeds = None,
self_attn_mask = None,
cond_drop_prob = None,
unique_consecutive = None
):
device = self.device
b = ids.shape[0]
has_text = exists(text) or exists(text_embeds)
assert not (self.has_condition ^ has_text)
text_mask = None
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.embed_text(text, output_device = device)
text_mask = torch.any(text_embeds != 0, dim = -1)
if exists(text_embeds):
text_embeds = self.proj_text_embed(text_embeds)
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
if exists(text_mask) and cond_drop_prob > 0:
keep_mask = prob_mask_like((b,), 1 - cond_drop_prob, device = device)
text_mask = rearrange(keep_mask, 'b -> b 1') & text_mask
if return_loss:
labels, ids = ids.clone(), ids[:, :-1]
tokens = get_embeds(self.semantic_embedding, ids)
start_tokens = repeat(self.start_token, 'd -> b 1 d', b = ids.shape[0])
tokens = torch.cat((start_tokens, tokens), dim = 1)
if exists(self_attn_mask):
self_attn_mask = F.pad(self_attn_mask, (1, 0), value = True)
tokens = self.transformer(tokens, context = text_embeds, self_attn_mask = self_attn_mask, context_mask = text_mask)
return self.to_logits(tokens)
class CoarseTransformer(nn.Module):
@beartype
def __init__(
self,
*,
codebook_size,
num_coarse_quantizers,
dim,
depth,
num_semantic_tokens,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
t5_name = DEFAULT_T5_NAME,
has_condition = False,
cond_dim = None,
audio_text_condition = False,
cond_as_self_attn_prefix = False,
cond_drop_prob = 0.5,
grad_shrink_alpha = 0.1,
project_semantic_logits = True,
rel_pos_bias = True,
flash_attn = False,
**kwargs
):
super().__init__()
rel_pos_bias = rel_pos_bias and not flash_attn
self.num_semantic_tokens = num_semantic_tokens
if audio_text_condition:
has_condition = True
cond_dim = default(cond_dim, dim)
self.has_condition = has_condition
self.embed_text = partial(t5_encode_text, name = t5_name)
self.cond_drop_prob = cond_drop_prob
self.semantic_start_token = nn.Parameter(torch.randn(dim))
self.coarse_start_token = nn.Parameter(torch.randn(dim))
self.semantic_eos_id = num_semantic_tokens
self.semantic_embedding = nn.Embedding(num_semantic_tokens + 1, dim)
self.coarse_eos_id = codebook_size
codebook_size_with_eos = codebook_size + 1
self.coarse_embedding = nn.Embedding(num_coarse_quantizers * codebook_size_with_eos, dim)
self.coarse_quantize_embedding = nn.Embedding(num_coarse_quantizers, dim)
text_dim = default(cond_dim, get_encoded_dim(t5_name))
self.proj_text_embed = nn.Linear(text_dim, dim, bias = False) if text_dim != dim else nn.Identity()
self.cross_attn_bias = nn.Parameter(torch.zeros(heads, 1, 1)) if rel_pos_bias else None
self.transformer = Transformer(
dim = dim,
depth = depth,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
cross_attend = has_condition and not cond_as_self_attn_prefix,
cond_as_self_attn_prefix = cond_as_self_attn_prefix,
grad_shrink_alpha = grad_shrink_alpha,
rel_pos_bias = rel_pos_bias,
flash_attn = flash_attn,
**kwargs
)
self.codebook_size = codebook_size
self.num_coarse_quantizers = num_coarse_quantizers
self.to_semantic_logits = nn.Linear(dim, num_semantic_tokens + 1) if project_semantic_logits else None
self.coarse_logit_weights = nn.Parameter(torch.randn(num_coarse_quantizers, codebook_size_with_eos, dim))
@property
def device(self):
return next(self.parameters()).device
def load(self, path):
# Return pkg so that if this function gets called from within a Trainer function call,
# the trainer can also access the package loaded from the checkpoint.
device = self.device
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = device)
# check version
if 'version' in pkg and version.parse(pkg['version']) < version.parse(__version__):
print(f'model was trained on older version {pkg["version"]} of audiolm-pytorch')
self.load_state_dict(pkg['model'])
return pkg
def forward_with_cond_scale(
self,
*args,
cond_scale = 3,
**kwargs
):
semantic_logits, coarse_logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1 or not self.has_condition:
return semantic_logits, coarse_logits
null_semantic_logits, null_coarse_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
scaled_semantic_logits = None
if exists(null_semantic_logits):
scaled_semantic_logits = null_semantic_logits + (semantic_logits - null_semantic_logits) * cond_scale
scaled_coarse_logits = null_coarse_logits + (coarse_logits - null_coarse_logits) * cond_scale
return scaled_semantic_logits, scaled_coarse_logits
@beartype
def forward(
self,
*,
semantic_token_ids,
coarse_token_ids,
self_attn_mask = None,
text: Optional[List[str]] = None,
text_embeds = None,
cond_drop_prob = None,
return_only_coarse_logits = False
):
b, device = semantic_token_ids.shape[0], semantic_token_ids.device
arange = partial(torch.arange, device = device)
has_text = exists(text) or exists(text_embeds)
assert not (self.has_condition ^ has_text)
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.embed_text(text, output_device = device)
text_mask = None
if exists(text_embeds):
text_mask = torch.any(text_embeds != 0, dim = -1)
text_embeds = self.proj_text_embed(text_embeds)
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
if exists(text_mask) and cond_drop_prob > 0:
keep_mask = prob_mask_like((b,), 1 - cond_drop_prob, device = device)
text_mask = rearrange(keep_mask, 'b -> b 1') & text_mask
coarse_token_ids, semantic_token_ids = map(lambda t: rearrange(t, 'b ... -> b (...)'), (coarse_token_ids, semantic_token_ids))
offsets = self.codebook_size * arange(self.num_coarse_quantizers)
offsets = repeat(offsets, 'q -> 1 (n q)', n = ceil_div(coarse_token_ids.shape[-1], self.num_coarse_quantizers))
offsets = offsets[:, :coarse_token_ids.shape[-1]]
coarse_token_ids = coarse_token_ids + offsets
semantic_tokens = get_embeds(self.semantic_embedding, semantic_token_ids)
coarse_tokens = self.coarse_embedding(coarse_token_ids)
coarse_quantize_tokens = repeat(self.coarse_quantize_embedding.weight, 'q d -> (n q) d', n = ceil_div(coarse_token_ids.shape[-1], self.num_coarse_quantizers))
coarse_quantize_tokens = coarse_quantize_tokens[:coarse_token_ids.shape[-1], ...]
coarse_tokens = coarse_tokens + coarse_quantize_tokens
semantic_seq_len = semantic_tokens.shape[1]
semantic_start_tokens = repeat(self.semantic_start_token, 'd -> b 1 d', b = b)
coarse_start_tokens = repeat(self.coarse_start_token, 'd -> b 1 d', b = b)
tokens = torch.cat((
semantic_start_tokens,
semantic_tokens,
coarse_start_tokens,
coarse_tokens
), dim = 1)
# engineer the attention bias so that cross attention is not dominated by relative positions
seq_len = tokens.shape[-2]
attn_bias = None
if exists(self.transformer.rel_pos_bias):
attn_bias = self.transformer.rel_pos_bias(seq_len)
is_semantic = arange(seq_len) < (semantic_seq_len + 1) # semantic seq len + start token
is_cross_attn = rearrange(is_semantic, 'i -> i 1') ^ rearrange(is_semantic, 'j -> 1 j')
attn_bias = torch.where(
is_cross_attn,
self.cross_attn_bias,
attn_bias
)
# attend
tokens = self.transformer(
tokens,
context = text_embeds,
attn_bias = attn_bias,
self_attn_mask = self_attn_mask,
context_mask = text_mask
)
pred_semantic_tokens, pred_coarse_tokens = tokens[:, :semantic_seq_len], tokens[:, (semantic_seq_len + 1):]
# semantic logits
semantic_logits = self.to_semantic_logits(pred_semantic_tokens) if not return_only_coarse_logits and exists(self.to_semantic_logits) else None
# get coarse logits
n = pred_coarse_tokens.shape[1]
nq = round_down_nearest_multiple(n, self.num_coarse_quantizers)
pred_coarse_tokens_groupable, pred_coarse_tokens_remainder = pred_coarse_tokens[:, :nq], pred_coarse_tokens[:, nq:]
pred_coarse_tokens_groupable = rearrange(pred_coarse_tokens_groupable, 'b (n q) d -> b n q d', q = self.num_coarse_quantizers)
coarse_logits_groupable = einsum('q c d, b n q d -> b n q c', self.coarse_logit_weights, pred_coarse_tokens_groupable)
coarse_logits_groupable = rearrange(coarse_logits_groupable, 'b n q c -> b (n q) c')
remainder_num_quantizers = pred_coarse_tokens_remainder.shape[1]
if remainder_num_quantizers > 0:
coarse_logits_remainder = einsum('q c d, b q d -> b q c', self.coarse_logit_weights[:remainder_num_quantizers], pred_coarse_tokens_remainder)
coarse_logits = torch.cat((coarse_logits_groupable, coarse_logits_remainder), dim = 1)
else:
coarse_logits = coarse_logits_groupable
return semantic_logits, coarse_logits
class FineTransformer(nn.Module):
def __init__(
self,
*,
num_coarse_quantizers,
num_fine_quantizers,
codebook_size,
dim,
depth,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
t5_name = DEFAULT_T5_NAME,
has_condition = False,
cond_dim = None,
audio_text_condition = False,
cond_as_self_attn_prefix = False,
cond_drop_prob = 0.5,
grad_shrink_alpha = 0.1,
project_coarse_logits = True,
pad_id = -1,
rel_pos_bias = True,
flash_attn = False,
**kwargs
):
super().__init__()
rel_pos_bias = rel_pos_bias and not flash_attn
if audio_text_condition:
has_condition = True
cond_dim = default(cond_dim, dim)
self.has_condition = has_condition
self.embed_text = partial(t5_encode_text, name = t5_name)
self.cond_drop_prob = cond_drop_prob
self.num_coarse_quantizers = num_coarse_quantizers
self.coarse_start_token = nn.Parameter(torch.randn(dim))
self.fine_start_token = nn.Parameter(torch.randn(dim))
self.coarse_embedding = nn.Embedding(num_coarse_quantizers * codebook_size, dim)
self.fine_embedding = nn.Embedding(num_fine_quantizers * codebook_size, dim)
self.coarse_quantize_embedding = nn.Embedding(num_coarse_quantizers, dim)
self.fine_quantize_embedding = nn.Embedding(num_fine_quantizers, dim)
self.pad_id = pad_id
self.eos_id = codebook_size
text_dim = default(cond_dim, get_encoded_dim(t5_name))
self.proj_text_embed = nn.Linear(text_dim, dim, bias = False) if text_dim != dim else nn.Identity()
self.transformer = Transformer(
dim = dim,
depth = depth,
heads = heads,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
cross_attend = has_condition and not cond_as_self_attn_prefix,
cond_as_self_attn_prefix = cond_as_self_attn_prefix,
rel_pos_bias = False,
grad_shrink_alpha = grad_shrink_alpha,
flash_attn = flash_attn,
**kwargs
)
# doing a specialized attn bias so that corresponding time steps at fine and coarse sequences attend to each other better
self.null_pos_bias = nn.Parameter(torch.randn(heads, 1, 1)) if rel_pos_bias else None
pos_bias_mlp_dim = dim // 2
self.pos_bias_mlp = nn.Sequential(
nn.Linear(2, pos_bias_mlp_dim),
nn.SiLU(),
nn.Linear(pos_bias_mlp_dim, pos_bias_mlp_dim),
nn.SiLU(),
nn.Linear(pos_bias_mlp_dim, heads)
) if rel_pos_bias else None
self.codebook_size = codebook_size
self.num_coarse_quantizers = num_coarse_quantizers
self.num_fine_quantizers = num_fine_quantizers
self.coarse_logit_weights = nn.Parameter(torch.randn(num_coarse_quantizers, codebook_size, dim)) if project_coarse_logits else None
self.fine_logit_weights = nn.Parameter(torch.randn(num_fine_quantizers, codebook_size, dim))
@property
def device(self):
return next(self.parameters()).device
def load(self, path):
# Return pkg so that if this function gets called from within a Trainer function call,
# the trainer can also access the package loaded from the checkpoint.
device = self.device
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = device)
# check version
if 'version' in pkg and version.parse(pkg['version']) < version.parse(__version__):
print(f'model was trained on older version {pkg["version"]} of audiolm-pytorch')
self.load_state_dict(pkg['model'])
return pkg
def forward_with_cond_scale(
self,
*args,
cond_scale = 3,
**kwargs
):
coarse_logits, fine_logits = self.forward(*args, cond_drop_prob = 0., **kwargs)
if cond_scale == 1 or not self.has_condition:
return coarse_logits, fine_logits
null_coarse_logits, null_fine_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
scaled_coarse_logits = None
if exists(null_coarse_logits):
scaled_coarse_logits = null_coarse_logits + (coarse_logits - null_coarse_logits) * cond_scale
scaled_fine_logits = null_fine_logits + (fine_logits - null_fine_logits) * cond_scale
return scaled_coarse_logits, scaled_fine_logits
def forward(
self,
coarse_token_ids,
fine_token_ids,
text: Optional[List[str]] = None,
text_embeds = None,
cond_drop_prob = None,
self_attn_mask = None,
return_only_fine_logits = False
):
b, device = coarse_token_ids.shape[0], coarse_token_ids.device
# handle text conditioning
has_text = exists(text) or exists(text_embeds)
assert not (self.has_condition ^ has_text)
text_mask = None
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.embed_text(text, output_device = device)
text_mask = torch.any(text_embeds != 0, dim = -1)
if exists(text_embeds):
text_embeds = self.proj_text_embed(text_embeds)
cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob)
if exists(text_mask) and cond_drop_prob > 0:
keep_mask = prob_mask_like((b,), 1 - cond_drop_prob, device = device)
text_mask = rearrange(keep_mask, 'b -> b 1') & text_mask
coarse_token_ids, fine_token_ids = map(lambda t: rearrange(t, 'b ... -> b (...)'), (coarse_token_ids, fine_token_ids))
# do not attend to any of the coarse padding tokens or coarse end token either
coarse_self_attn_mask = (coarse_token_ids != self.pad_id) & (coarse_token_ids != self.eos_id)
coarse_token_ids = coarse_token_ids.masked_fill(~coarse_self_attn_mask, 0)
fine_token_seq_len = fine_token_ids.shape[-1]
coarse_self_attn_mask = F.pad(coarse_self_attn_mask, (1, fine_token_seq_len + 1), value = True)
if exists(self_attn_mask):
self_attn_mask &= coarse_self_attn_mask
else:
self_attn_mask = coarse_self_attn_mask
# prepare coarse and fine token embeddings
b, n = coarse_token_ids.shape
coarse_length = coarse_token_ids.shape[-1]
coarse_offsets = torch.arange(self.num_coarse_quantizers, device = device)
coarse_seq_length = ceil_div(coarse_token_ids.shape[-1], self.num_coarse_quantizers)
coarse_offsets = repeat(coarse_offsets, 'q -> (n q)', n = coarse_seq_length)
coarse_offsets = coarse_offsets[:coarse_length]
coarse_token_ids = coarse_token_ids + rearrange(coarse_offsets, '... -> 1 ...') * self.codebook_size
fine_length = fine_token_ids.shape[-1]
fine_offsets = torch.arange(self.num_fine_quantizers, device = device)
fine_seq_length = ceil_div(fine_token_ids.shape[-1], self.num_fine_quantizers)
fine_offsets = repeat(fine_offsets, 'q -> (n q)', n = fine_seq_length)
fine_offsets = fine_offsets[:fine_length]
fine_token_ids = fine_token_ids + rearrange(fine_offsets, '... -> 1 ...') * self.codebook_size
coarse_tokens = self.coarse_embedding(coarse_token_ids)
fine_tokens = self.fine_embedding(fine_token_ids)
coarse_quantize_tokens = repeat(self.coarse_quantize_embedding.weight, 'q d -> (n q) d', n = ceil_div(coarse_token_ids.shape[-1], self.num_coarse_quantizers))
coarse_quantize_tokens = coarse_quantize_tokens[:coarse_token_ids.shape[-1], ...]
coarse_tokens = coarse_tokens + coarse_quantize_tokens
fine_quantize_tokens = repeat(self.fine_quantize_embedding.weight, 'q d -> (n q) d', n = ceil_div(fine_token_ids.shape[-1], self.num_fine_quantizers))
fine_quantize_tokens = fine_quantize_tokens[:fine_token_ids.shape[-1], ...]
fine_tokens = fine_tokens + fine_quantize_tokens
coarse_start_tokens = repeat(self.coarse_start_token, 'd -> b 1 d', b = b)
fine_start_tokens = repeat(self.fine_start_token, 'd -> b 1 d', b = b)
tokens = torch.cat((
coarse_start_tokens,
coarse_tokens,
fine_start_tokens,
fine_tokens
), dim = 1)
# an engineered attention bias so coarse and fine sequences attend to each other better
attn_bias = None
if exists(self.pos_bias_mlp):
max_seq_len = max(coarse_seq_length, fine_seq_length)
coarse_pos = torch.arange(coarse_seq_length, device = device)
fine_pos = torch.arange(fine_seq_length, device = device)
coarse_pos = repeat(coarse_pos, 'n -> (n q)', q = self.num_coarse_quantizers)[:coarse_length]
fine_pos = repeat(fine_pos, 'n -> (n q)', q = self.num_fine_quantizers)[:fine_length]
coarse_pos = F.pad(coarse_pos, (1, 0), value = -1)
fine_pos = F.pad(fine_pos, (1, 0), value = -1)
seq_positions = torch.cat((coarse_pos, fine_pos), dim = -1)
coarse_offsets = F.pad(coarse_offsets, (1, 0), value = 0)
fine_offsets = fine_offsets + self.num_coarse_quantizers
fine_offsets = F.pad(fine_offsets, (1, 0), value = 0)
seq_offsets = torch.cat((coarse_offsets, fine_offsets), dim = -1)
pos_mlp_input = torch.stack((seq_positions.clamp(min = 0), seq_offsets), dim = -1)
num_offsets = self.num_fine_quantizers + self.num_coarse_quantizers
# relative positions are always (2 * N - 1), where N is the length of the dimension
rel_seq_len, rel_offsets = map(lambda n: 2 * n - 1, (max_seq_len, num_offsets))
# get all relative distances
rel_dist = (rearrange(pos_mlp_input, 'i c -> i 1 c') - rearrange(pos_mlp_input, 'j c -> 1 j c'))
# get all possible relative distances for the attention bias to be computed from the mlp
# which would be - (2 * N - 1) * (2 * Q - 1) - where N = sequence length and Q = total quantizers
rel_seq_len_range = repeat(torch.arange(rel_seq_len, device = device), 'n -> (n q)', q = rel_offsets)
rel_offset_range = repeat(torch.arange(rel_offsets, device = device), 'q -> (n q)', n = rel_seq_len)
mlp_inputs = torch.stack((rel_seq_len_range, rel_offset_range), dim = -1)
# implicitly parameterized relative distances, by sequence and quantizer positions
attn_bias = self.pos_bias_mlp(mlp_inputs.float())
# translate coordinates of (rel_seq_pos, rel_quantizer_offset) -> positive index to select from attn bias
rel_dist_seq_pos, rel_dist_seq_offset = rel_dist.unbind(dim = -1)
rel_dist_seq_pos += max_seq_len - 1
rel_dist_seq_offset += num_offsets - 1
rel_dist_indices = rel_dist_seq_pos * rel_offsets + rel_dist_seq_offset
# select the relative positional attention bias outputted by the MLP
# savings go from (N * Q) ^ 2 -> ~ (4 * N * Q)
attn_bias = attn_bias[rel_dist_indices]
attn_bias = rearrange(attn_bias, '... h -> h ...')
# need to make sure start token has a custom positional bias
is_start_token_seq = seq_positions == -1
start_token_mask = rearrange(is_start_token_seq, 'i -> i 1') | rearrange(is_start_token_seq, 'j -> 1 j')
attn_bias = torch.where(
start_token_mask,
self.null_pos_bias,
attn_bias,
)
# attention
tokens = self.transformer(
tokens,
context = text_embeds,
self_attn_mask = self_attn_mask,
context_mask = text_mask,
attn_bias = attn_bias
)
pred_coarse_tokens, pred_fine_tokens = tokens[:, :n], tokens[:, (n + 1):]
# get coarse logits
pred_coarse_seq_len = pred_coarse_tokens.shape[1]
padding = remainder_needed_until_multiple(pred_coarse_seq_len, self.num_coarse_quantizers)
if padding != 0:
pred_coarse_tokens = F.pad(pred_coarse_tokens, (0, 0, 0, padding), value = 0.)
pred_coarse_tokens = rearrange(pred_coarse_tokens, 'b (n q) d -> b n q d', q = self.num_coarse_quantizers)
coarse_logits = None
if not return_only_fine_logits and exists(self.coarse_logit_weights):
coarse_logits = einsum('q c d, b n q d -> b n q c', self.coarse_logit_weights, pred_coarse_tokens)
coarse_logits = rearrange(coarse_logits, 'b n q c -> b (n q) c')
coarse_logits = coarse_logits[:, :pred_coarse_seq_len]
# get fine logits
pred_fine_seq_len = pred_fine_tokens.shape[1]
nq = round_down_nearest_multiple(pred_fine_seq_len, self.num_fine_quantizers)
pred_fine_tokens_groupable, pred_fine_tokens_remainder = pred_fine_tokens[:, :nq], pred_fine_tokens[:, nq:]
pred_fine_tokens_groupable = rearrange(pred_fine_tokens_groupable, 'b (n q) d -> b n q d', q = self.num_fine_quantizers)
fine_logits_groupable = einsum('q c d, b n q d -> b n q c', self.fine_logit_weights, pred_fine_tokens_groupable)
fine_logits_groupable = rearrange(fine_logits_groupable, 'b n q c -> b (n q) c')
remainder_num_quantizers = pred_fine_tokens_remainder.shape[1]
if remainder_num_quantizers > 0:
fine_logits_remainder = einsum('q c d, b q d -> b q c', self.fine_logit_weights[:remainder_num_quantizers], pred_fine_tokens_remainder)
fine_logits = torch.cat((fine_logits_groupable, fine_logits_remainder), dim = 1)
else:
fine_logits = fine_logits_groupable
return coarse_logits, fine_logits
# training wrappers
class SemanticTransformerWrapper(nn.Module):
@beartype
def __init__(
self,
*,
transformer: SemanticTransformer,
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]] = None,
audio_conditioner: Optional[AudioConditionerBase] = None,
pad_id = -1,
unique_consecutive = True,
mask_prob = 0.15
):
super().__init__()
self.wav2vec = wav2vec
self.transformer = transformer
self.to(transformer.device)
self.audio_conditioner = audio_conditioner
assert not (exists(audio_conditioner) and not transformer.has_condition), 'if conditioning on audio embeddings from mulan, transformer has_condition must be set to True'
assert not exists(self.wav2vec) or self.wav2vec.codebook_size == transformer.num_semantic_tokens, f'num_semantic_tokens on SemanticTransformer must be set to {self.wav2vec.codebook_size}'
self.unique_consecutive = unique_consecutive
self.pad_id = pad_id
self.eos_id = transformer.eos_id
self.mask_prob = mask_prob
@property
def device(self):
return next(self.parameters()).device
def embed_text(self, text):
return self.transformer.embed_text(text, output_device = self.device)
@eval_decorator
@torch.inference_mode()
@beartype
def generate(
self,
*,
max_length,
text: Optional[List[str]] = None,
text_embeds = None,
prime_wave = None,
prime_wave_input_sample_hz = None,
prime_ids = None,
batch_size = 1,
cond_scale = 3,
filter_thres = 0.9,
temperature = 1.,
include_eos_in_output = True, # if doing hierarchical sampling, eos must be kept for an easy time
**kwargs
):
device = self.device
# derive wav2vec ids from the input wave
if exists(prime_wave):
assert not exists(prime_ids)
assert exists(self.wav2vec)
ids = self.wav2vec(
prime_wave,
flatten = False,
input_sample_hz = prime_wave_input_sample_hz
)
elif exists(prime_ids):
ids = prime_ids
else:
ids = torch.empty((batch_size, 0), dtype = torch.long, device = device)
if self.unique_consecutive:
ids = batch_unique_consecutive(ids, pad_value = self.pad_id)
# derive joint audio-text embeddings if needed
if exists(self.audio_conditioner) and exists(prime_wave):
assert not exists(text) and not exists(text_embeds)
text_embeds = self.audio_conditioner(wavs = prime_wave, namespace = 'semantic')
# derive text embeddings if needed
has_text = exists(text) or exists(text_embeds)
assert not (self.transformer.has_condition ^ has_text)
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.transformer.embed_text(text, output_device = device)
# start length and get running id output
batch = ids.shape[0]
start_length = ids.shape[-1]
sample_semantic_ids = ids.clone()
last_logit_indices = (ids != self.pad_id).sum(dim = -1).long()
# sample from transformer
for ind in tqdm(range(start_length, max_length), desc = 'generating semantic'):
logits = self.transformer.forward_with_cond_scale(
ids = sample_semantic_ids,
text_embeds = text_embeds,
cond_scale = cond_scale,
**kwargs
)
last_logit_indices_expanded = repeat(last_logit_indices, 'b -> b 1 c', b = batch, c = logits.shape[-1])
last_logits = logits.gather(1, last_logit_indices_expanded)
last_logits = rearrange(last_logits, 'b 1 c -> b c')
filtered_logits = top_k(last_logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sampled = rearrange(sampled, 'b -> b 1')
sample_semantic_ids = torch.cat((sample_semantic_ids, sampled), dim = -1)
if all_rows_have_eos_id(sample_semantic_ids, self.eos_id):
break
last_logit_indices += 1
sample_semantic_ids = mask_out_after_eos_id(sample_semantic_ids, self.eos_id, keep_eos = False)
return sample_semantic_ids
def forward(
self,
*,
semantic_token_ids = None,
raw_wave = None,
text = None,
text_embeds = None,
return_loss = False,
**kwargs
):
assert exists(raw_wave) or exists(semantic_token_ids), 'either raw waveform (raw_wave) is given or semantic token ids are given (semantic_token_ids)'
if exists(self.audio_conditioner):
assert exists(raw_wave)
assert not exists(text) and not exists(text_embeds)
text_embeds = self.audio_conditioner(wavs = raw_wave, namespace = 'semantic')
if not exists(semantic_token_ids):
assert exists(self.wav2vec), 'VQWav2Vec must be be provided if given raw wave for training'
semantic_token_ids = self.wav2vec(raw_wave, flatten = False)
semantic_token_ids = rearrange(semantic_token_ids, 'b ... -> b (...)')
if self.training:
semantic_token_ids = append_eos_id(semantic_token_ids, self.transformer.eos_id)
if self.unique_consecutive:
semantic_token_ids = batch_unique_consecutive(semantic_token_ids, pad_value = self.pad_id)
input_ids = semantic_token_ids
if return_loss:
input_ids = semantic_token_ids[:, :-1]
self_attn_mask = None
if self.mask_prob > 0. and self.training:
self_attn_mask = generate_mask_with_prob(input_ids.shape, self.mask_prob, input_ids.device)
logits = self.transformer(
ids = input_ids,
text = text,
text_embeds = text_embeds,
self_attn_mask = self_attn_mask,
**kwargs
)
if not return_loss:
return logits
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
semantic_token_ids,
ignore_index = self.pad_id
)
return loss
class CoarseTransformerWrapper(nn.Module):
@beartype
def __init__(
self,
*,
transformer: CoarseTransformer,
codec: Optional[Union[SoundStream, EncodecWrapper]] = None,
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]] = None,
audio_conditioner: Optional[AudioConditionerBase] = None,
pad_id = -1,
unique_consecutive = True,
semantic_cross_entropy_loss_weight = 1.,
mask_prob = 0.15
):
super().__init__()
self.codec = codec
self.wav2vec = wav2vec
self.transformer = transformer
self.to(transformer.device)
self.audio_conditioner = audio_conditioner
assert not (exists(audio_conditioner) and not transformer.has_condition), 'if conditioning on audio embeddings from mulan, transformer has_condition must be set to True'
self.unique_consecutive = unique_consecutive
self.pad_id = pad_id
self.semantic_cross_entropy_loss_weight = semantic_cross_entropy_loss_weight
self.num_coarse_quantizers = transformer.num_coarse_quantizers * codec.rq_groups
self.semantic_eos_id = transformer.semantic_eos_id
self.coarse_eos_id = transformer.coarse_eos_id
self.mask_prob = mask_prob
@property
def device(self):
return next(self.parameters()).device
@eval_decorator
@torch.inference_mode()
@beartype
def generate(
self,
*,
semantic_token_ids,
prime_wave: Optional[Tensor] = None,
prime_wave_input_sample_hz = None,
prime_coarse_token_ids: Optional[Tensor] = None,
text: Optional[List[str]] = None,
text_embeds = None,
max_time_steps = 512,
cond_scale = 3.,
filter_thres = 0.9,
temperature = 1.,
reconstruct_wave = False,
**kwargs
):
batch, device = semantic_token_ids.shape[0], self.device
semantic_token_ids = semantic_token_ids.to(device)
# initialize coarse token ids
# if a prime audio wave was supplied, then start off with appropriate acoustic tokens
assert not (exists(prime_wave) and exists(prime_coarse_token_ids)), 'you can either pass in the prime as a raw wave (codec required) or as preprocessed acoustic token ids'
if exists(prime_coarse_token_ids):
coarse_token_ids = prime_coarse_token_ids
elif exists(prime_wave):
assert exists(self.codec)
with torch.inference_mode():
self.codec.eval()
_, indices, _ = self.codec(
prime_wave,
return_encoded = True,
input_sample_hz = prime_wave_input_sample_hz
)
coarse_token_ids = indices[..., :self.num_coarse_quantizers]
coarse_token_ids = rearrange(coarse_token_ids, 'b ... -> b (...)')
else:
coarse_token_ids = torch.empty((batch, 0), device = device, dtype = torch.long)
# derive text embeddings if needed
has_text = exists(text) or exists(text_embeds)
assert not (self.transformer.has_condition ^ has_text)
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.transformer.embed_text(text, output_device = device)
if self.unique_consecutive:
semantic_token_ids = batch_unique_consecutive(semantic_token_ids, pad_value=self.pad_id)
# initialize
init_coarse_time_step = 0
sampled_coarse_token_ids = coarse_token_ids.clone()
for time_step in tqdm(range(init_coarse_time_step, max_time_steps), desc = 'generating coarse'):
for ind in range(self.num_coarse_quantizers):
just_finished_quantizer_step = (ind == 0 and time_step > 0)
_, coarse_logits = self.transformer.forward_with_cond_scale(
coarse_token_ids = sampled_coarse_token_ids,
semantic_token_ids = semantic_token_ids,
text_embeds = text_embeds,
cond_scale = cond_scale,
return_only_coarse_logits = True,
**kwargs
)
last_coarse_logits = coarse_logits[:, -1]
if not just_finished_quantizer_step:
last_coarse_logits[:, -1] = float('-inf') # prevent from eos in the middle of a time step
filtered_logits = top_k(last_coarse_logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sampled = rearrange(sampled, 'b -> b 1')
sampled_coarse_token_ids = torch.cat((sampled_coarse_token_ids, sampled), dim = -1)
sampled_coarse_token_ids = mask_out_after_eos_id(sampled_coarse_token_ids, self.coarse_eos_id, keep_eos = False)
sampled_coarse_token_ids = rearrange(sampled_coarse_token_ids, 'b (n q) -> b n q', q = self.num_coarse_quantizers)
if not reconstruct_wave:
return sampled_coarse_token_ids
assert exists(self.codec)
wav = self.codec.decode_from_codebook_indices(sampled_coarse_token_ids)
return rearrange(wav, 'b 1 n -> b n')
def forward(
self,
*,
semantic_token_ids = None,
raw_wave = None,
raw_wave_for_codec = None,
text = None,
text_embeds = None,
coarse_token_ids = None,
return_loss = False,
**kwargs
):
assert exists(raw_wave) or exists(semantic_token_ids), 'either raw waveform (raw_wave) is given or semantic token ids are given (semantic_token_ids)'
raw_wave_for_codec = default(raw_wave_for_codec, raw_wave)
assert exists(raw_wave_for_codec) or exists(coarse_token_ids), 'either raw waveform (raw_wav) is given, or coarse and fine token ids (coarse_token_ids, fine_token_ids)'
assert not all(map(exists, (raw_wave, raw_wave_for_codec, semantic_token_ids, coarse_token_ids)))
if exists(self.audio_conditioner):
assert exists(raw_wave)
assert not exists(text) and not exists(text_embeds)
text_embeds = self.audio_conditioner(wavs = raw_wave, namespace = 'coarse') # technically audio embeds, but shared text-audio joint embedding space for mulan
if not exists(semantic_token_ids):
assert exists(self.wav2vec), 'VQWav2Vec must be be provided if given raw wave for training'
semantic_token_ids = self.wav2vec(raw_wave, flatten = False)
if not exists(coarse_token_ids):
assert exists(self.codec), 'Codec must be provided if given raw wave for training'
with torch.inference_mode():
self.codec.eval()
_, indices, _ = self.codec(raw_wave_for_codec, return_encoded = True)
batch, num_timesteps = raw_wave_for_codec.shape
num_frames = int(num_timesteps / self.codec.seq_len_multiple_of)
assert indices.shape[0] == batch and indices.shape[1] == num_frames, \
f'Expected indices to have shape (batch, num_frames, num_coarse_quantizers + num_fine_quantizers), but got {indices.shape}'
coarse_token_ids = indices[..., :self.num_coarse_quantizers]
semantic_token_ids = rearrange(semantic_token_ids, 'b ... -> b (...)')
coarse_token_ids = rearrange(coarse_token_ids, 'b ... -> b (...)')
if self.training:
semantic_token_ids = append_eos_id(semantic_token_ids, self.transformer.semantic_eos_id)
coarse_token_ids = append_eos_id(coarse_token_ids, self.transformer.coarse_eos_id)
if self.unique_consecutive:
semantic_token_ids = batch_unique_consecutive(semantic_token_ids, pad_value = self.pad_id)
if return_loss:
semantic_labels, coarse_labels = semantic_token_ids, coarse_token_ids.clone()
coarse_token_ids = coarse_token_ids[:, :-1]
# self attention mask would omit any padding and eos tokens in the semantic prime
self_attn_mask = (semantic_token_ids != self.pad_id) & (semantic_token_ids != self.semantic_eos_id)
semantic_token_ids = semantic_token_ids.masked_fill(~self_attn_mask, 0)
coarse_token_len = coarse_token_ids.shape[-1]
self_attn_mask = F.pad(self_attn_mask, (1, coarse_token_len + 1), value = True) # attend to semantic bos and all coarse tokens
# forgetful causal mask - structured dropout
if self.mask_prob > 0 and self.training:
self_attn_mask &= generate_mask_with_prob(self_attn_mask.shape, self.mask_prob, device = self_attn_mask.device)
semantic_logits, coarse_logits = self.transformer(
semantic_token_ids = semantic_token_ids,
coarse_token_ids = coarse_token_ids,
self_attn_mask = self_attn_mask,
text = text,
text_embeds = text_embeds,
**kwargs
)
# whether to early return the logits
if not return_loss:
return semantic_logits, coarse_logits
coarse_logits, semantic_logits = map(lambda t: maybe(rearrange)(t, 'b n c -> b c n'), (coarse_logits, semantic_logits))
if self.unique_consecutive:
num_coarse_logits, _num_semantic_logits = coarse_labels.numel(), (semantic_labels != self.pad_id).sum()
else:
num_coarse_logits, _num_semantic_logits = coarse_logits.shape[-1], semantic_logits.shape[-1]
semantic_loss = 0.
num_semantic_logits = 0
if self.semantic_cross_entropy_loss_weight > 0 and exists(semantic_logits):
num_semantic_logits = _num_semantic_logits
semantic_loss = F.cross_entropy(
semantic_logits,
semantic_labels,
ignore_index = self.pad_id
)
coarse_loss = F.cross_entropy(
coarse_logits,
coarse_labels,
ignore_index = self.pad_id
)
return (
semantic_loss * num_semantic_logits * self.semantic_cross_entropy_loss_weight +
coarse_loss * num_coarse_logits
) / (num_semantic_logits + num_coarse_logits)
class FineTransformerWrapper(nn.Module):
@beartype
def __init__(
self,
*,
transformer: FineTransformer,
codec: Optional[Union[SoundStream, EncodecWrapper]] = None,
audio_conditioner: Optional[AudioConditionerBase] = None,
coarse_cross_entropy_loss_weight = 1.,
pad_id = -1,
mask_prob = 0.15
):
super().__init__()
self.codec = codec
self.transformer = transformer
self.to(transformer.device)
self.audio_conditioner = audio_conditioner
assert not (exists(audio_conditioner) and not transformer.has_condition), 'if conditioning on audio embeddings from mulan, transformer has_condition must be set to True'
self.num_fine_quantizers = transformer.num_fine_quantizers * codec.rq_groups
self.num_coarse_quantizers = transformer.num_coarse_quantizers * codec.rq_groups
if exists(codec):
assert (self.num_fine_quantizers + self.num_coarse_quantizers) == (codec.num_quantizers * codec.rq_groups), 'number of fine and coarse quantizers on fine transformer must add up to total number of quantizers on codec'
self.eos_id = transformer.eos_id
assert self.num_coarse_quantizers > 0
self.pad_id = pad_id
self.coarse_cross_entropy_loss_weight = coarse_cross_entropy_loss_weight
self.mask_prob = mask_prob
@property
def device(self):
return next(self.parameters()).device
@eval_decorator
@torch.inference_mode()
@beartype
def generate(
self,
*,
coarse_token_ids,
prime_wave: Optional[Tensor] = None,
prime_wave_input_sample_hz = None,
prime_fine_token_ids: Optional[Tensor] = None,
text: Optional[List[str]] = None,
text_embeds = None,
cond_scale = 3.,
filter_thres = 0.9,
temperature = 1.,
reconstruct_wave = False,
mask_out_generated_fine_tokens = False,
**kwargs
):
coarse_token_ids = rearrange(coarse_token_ids, 'b ... -> b (...)')
batch, device = coarse_token_ids.shape[0], self.device
coarse_token_ids = coarse_token_ids.to(device)
# derive text embeddings if needed
has_text = exists(text) or exists(text_embeds)
assert not (self.transformer.has_condition ^ has_text)
if not exists(text_embeds) and exists(text):
with torch.inference_mode():
text_embeds = self.transformer.embed_text(text, output_device = device)
# initialize fine token ids
# if a prime wave was supplied, start off with fine acoustic tokens
assert not (exists(prime_wave) and exists(prime_fine_token_ids)), 'you can either pass in the prime as a raw wave (codec required) or as preprocessed acoustic token ids'
if exists(prime_fine_token_ids):
fine_token_ids = prime_fine_token_ids
elif exists(prime_wave):
assert exists(self.codec)
with torch.inference_mode():
self.codec.eval()
_, token_ids, _ = self.codec(
prime_wave,
return_encoded = True,
input_sample_hz = prime_wave_input_sample_hz
)
fine_token_ids = token_ids[..., self.num_coarse_quantizers:]
fine_token_ids = rearrange(fine_token_ids, 'b ... -> b (...)')
else:
fine_token_ids = torch.empty((batch, 0), device = device, dtype = torch.long)
# calculate number of sampling steps
init_fine_time_step = fine_token_ids.shape[-1] // self.num_fine_quantizers
max_time_steps = coarse_token_ids.shape[1] // self.num_coarse_quantizers
sampled_fine_token_ids = fine_token_ids.clone()
for time_step in tqdm(range(init_fine_time_step, max_time_steps), desc = 'generating fine'):
for ind in range(self.num_fine_quantizers):
just_finished_quantizer_step = (ind == 0 and time_step > 0)
_, fine_logits = self.transformer.forward_with_cond_scale(
coarse_token_ids = coarse_token_ids,
fine_token_ids = sampled_fine_token_ids,
text_embeds = text_embeds,
cond_scale = cond_scale,
return_only_fine_logits = True,
**kwargs
)
last_fine_logits = fine_logits[:, -1]
if not just_finished_quantizer_step:
last_fine_logits[:, -1] = float('-inf') # prevent from eos in the middle of a time step
filtered_logits = top_k(last_fine_logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sampled = rearrange(sampled, 'b -> b 1')
sampled_fine_token_ids = torch.cat((sampled_fine_token_ids, sampled), dim = -1)
sampled_fine_token_ids = mask_out_after_eos_id(sampled_fine_token_ids, self.eos_id, keep_eos = False)
# reshape coarse and fine tokens for quantization dimension
sampled_fine_token_ids = rearrange(sampled_fine_token_ids, 'b (n q) -> b n q', q = self.num_fine_quantizers)
coarse_token_ids = rearrange(coarse_token_ids, 'b (n q) -> b n q', q = self.num_coarse_quantizers)
# whether to mask out fine token positions where the coarse token ids are all padding (variable lengthed training)
if mask_out_generated_fine_tokens:
pos_is_all_padding = (coarse_token_ids == self.pad_id).all(dim = -1, keepdim = True)
sampled_fine_token_ids = sampled_fine_token_ids.masked_fill(pos_is_all_padding, self.pad_id)
# if not reconstructing wave, return just the fine token ids
if not reconstruct_wave:
return sampled_fine_token_ids
# reconstruct the wave using codec, concatting the fine and coarse token ids together first across quantization dimension
assert exists(self.codec)
coarse_and_fine_ids = torch.cat((coarse_token_ids, sampled_fine_token_ids), dim = -1)
wav = self.codec.decode_from_codebook_indices(coarse_and_fine_ids)
return rearrange(wav, 'b 1 n -> b n')
def forward(
self,
*,
raw_wave = None,
text = None,
text_embeds = None,
token_ids = None,
coarse_token_ids = None,
fine_token_ids = None,
return_loss = False,
**kwargs
):
assert exists(raw_wave) ^ (exists(token_ids) ^ (exists(coarse_token_ids) and exists(fine_token_ids))), 'either raw waveform (raw_wav) is given, or coarse and fine token ids (coarse_token_ids, fine_token_ids)'
if exists(self.audio_conditioner):
assert exists(raw_wave)
assert not exists(text) and not exists(text_embeds)
text_embeds = self.audio_conditioner(wavs = raw_wave, namespace = 'fine') # technically audio embeds, but shared text-audio joint embedding space for mulan
if exists(raw_wave):
assert exists(self.codec), 'Codec must be provided if given raw wave for training'
with torch.inference_mode():
self.codec.eval()
_, token_ids, _ = self.codec(raw_wave, return_encoded = True)
batch, num_timesteps = raw_wave.shape
num_frames = int(num_timesteps / self.codec.seq_len_multiple_of)
assert token_ids.shape == torch.Size((batch, num_frames, self.num_coarse_quantizers + self.num_fine_quantizers)), \
f'Expected token ids to have shape (batch, num_frames, num_coarse_quantizers + num_fine_quantizers), but got {token_ids.shape}'
if exists(token_ids):
coarse_token_ids, fine_token_ids = token_ids[..., :self.num_coarse_quantizers], token_ids[..., self.num_coarse_quantizers:]
coarse_token_ids = rearrange(coarse_token_ids, 'b ... -> b (...)')
fine_token_ids = rearrange(fine_token_ids, 'b ... -> b (...)')
# if training, determine labels, should remove one from fine token ids
if return_loss:
coarse_labels = coarse_token_ids
fine_labels = fine_token_ids
fine_token_ids = fine_token_ids[:, :-1]
# forgetful causal mask - structured dropout
self_attn_mask = None
if self.mask_prob > 0 and self.training:
mask_shape = (
coarse_token_ids.shape[0],
coarse_token_ids.shape[-1] + fine_token_ids.shape[-1] + 2
)
self_attn_mask = generate_mask_with_prob(mask_shape, self.mask_prob, device = self.device)
coarse_logits, fine_logits = self.transformer(
coarse_token_ids = coarse_token_ids,
fine_token_ids = fine_token_ids,
self_attn_mask = self_attn_mask,
text = text,
text_embeds = text_embeds,
**kwargs
)
# early return the logits
if not return_loss:
return coarse_logits, fine_logits
coarse_logits, fine_logits = map(lambda t: maybe(rearrange)(t, 'b n c -> b c n'), (coarse_logits, fine_logits))
num_fine_logits = fine_logits.shape[-1]
num_coarse_logits = 0
coarse_loss = 0.
if self.coarse_cross_entropy_loss_weight > 0 and exists(coarse_logits):
num_coarse_logits = coarse_logits.shape[-1]
coarse_loss = F.cross_entropy(
coarse_logits,
coarse_labels,
ignore_index = self.pad_id
)
fine_loss = F.cross_entropy(
fine_logits,
fine_labels,
ignore_index = self.pad_id
)
return (
coarse_loss * num_coarse_logits * self.coarse_cross_entropy_loss_weight +
fine_loss * num_fine_logits
) / (num_coarse_logits + num_fine_logits)
# audio LM
class AudioLM(nn.Module):
@beartype
def __init__(
self,
*,
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]],
codec: Union[SoundStream, EncodecWrapper],
semantic_transformer: SemanticTransformer,
coarse_transformer: CoarseTransformer,
fine_transformer: FineTransformer,
audio_conditioner: Optional[AudioConditionerBase] = None,
unique_consecutive = True
):
super().__init__()
self.audio_conditioner = audio_conditioner
assert semantic_transformer.num_semantic_tokens == coarse_transformer.num_semantic_tokens
assert coarse_transformer.codebook_size == fine_transformer.codebook_size
assert coarse_transformer.num_coarse_quantizers == fine_transformer.num_coarse_quantizers
assert (fine_transformer.num_coarse_quantizers + fine_transformer.num_fine_quantizers) == codec.num_quantizers
self.semantic_has_condition = semantic_transformer.has_condition
self.coarse_has_condition = coarse_transformer.has_condition
self.fine_has_condition = fine_transformer.has_condition
self.needs_text = any([self.semantic_has_condition, self.coarse_has_condition, self.fine_has_condition])
self.semantic = SemanticTransformerWrapper(
wav2vec = wav2vec,
transformer = semantic_transformer,
audio_conditioner = audio_conditioner,
unique_consecutive = unique_consecutive
)
self.coarse = CoarseTransformerWrapper(
wav2vec = wav2vec,
codec = codec,
transformer = coarse_transformer,
audio_conditioner = audio_conditioner,
unique_consecutive = unique_consecutive
)
self.fine = FineTransformerWrapper(
codec= codec,
transformer = fine_transformer,
audio_conditioner = audio_conditioner
)
@property
def device(self):
return next(self.parameters()).device
@eval_decorator
@torch.inference_mode()
def forward(
self,
*,
batch_size = 1,
text: Optional[List[str]] = None,
text_embeds: Optional[Tensor] = None,
prime_wave = None,
prime_wave_input_sample_hz = None,
prime_wave_path = None,
max_length = 2048,
return_coarse_generated_wave = False,
mask_out_generated_fine_tokens = False
):
assert not (self.needs_text and (not exists(text) and not exists(text_embeds))), 'text needs to be passed in if one of the transformer requires conditioning'
if self.needs_text:
if exists(text):
text_embeds = self.semantic.embed_text(text)
assert not (exists(prime_wave) and exists(prime_wave_path)), 'prompt audio must be given as either `prime_wave: Tensor` or `prime_wave_path: str`'
if exists(prime_wave):
assert exists(prime_wave_input_sample_hz), 'the input sample frequency for the prompt audio must be given as `prime_wave_input_sample_hz: int`'
prime_wave = prime_wave.to(self.device)
elif exists(prime_wave_path):
prime_wave_path = Path(prime_wave_path)
assert exists(prime_wave_path), f'file does not exist at {str(prime_wave_path)}'
prime_wave, prime_wave_input_sample_hz = torchaudio.load(str(prime_wave_path))
prime_wave = prime_wave.to(self.device)
semantic_token_ids = self.semantic.generate(
text_embeds = text_embeds if self.semantic_has_condition else None,
batch_size = batch_size,
prime_wave = prime_wave,
prime_wave_input_sample_hz = prime_wave_input_sample_hz,
max_length = max_length
)
coarse_token_ids_or_recon_wave = self.coarse.generate(
text_embeds = text_embeds if self.coarse_has_condition else None,
semantic_token_ids = semantic_token_ids,
prime_wave = prime_wave,
prime_wave_input_sample_hz = prime_wave_input_sample_hz,
reconstruct_wave = return_coarse_generated_wave
)
if return_coarse_generated_wave:
return coarse_token_ids_or_recon_wave
generated_wave = self.fine.generate(
text_embeds = text_embeds if self.fine_has_condition else None,
coarse_token_ids = coarse_token_ids_or_recon_wave,
prime_wave = prime_wave,
prime_wave_input_sample_hz = prime_wave_input_sample_hz,
reconstruct_wave = True,
mask_out_generated_fine_tokens = mask_out_generated_fine_tokens
)
return generated_wave
|
audiolm-pytorch-main
|
audiolm_pytorch/audiolm_pytorch.py
|
import re
from math import sqrt
import copy
from random import choice
from pathlib import Path
from shutil import rmtree
from collections import Counter
from beartype.typing import Union, List, Optional, Tuple
from typing_extensions import Annotated
from beartype import beartype
from beartype.door import is_bearable
from beartype.vale import Is
import torch
import torchaudio
from torch import nn
from torch.utils.data import Dataset, DataLoader, random_split
from einops import rearrange
from audiolm_pytorch.optimizer import get_optimizer
from ema_pytorch import EMA
from audiolm_pytorch.soundstream import SoundStream
from audiolm_pytorch.encodec import EncodecWrapper
from audiolm_pytorch.audiolm_pytorch import (
SemanticTransformer,
SemanticTransformerWrapper,
CoarseTransformer,
CoarseTransformerWrapper,
FineTransformer,
FineTransformerWrapper,
FairseqVQWav2Vec,
HubertWithKmeans
)
from audiolm_pytorch.data import SoundDataset, get_dataloader
from audiolm_pytorch.utils import AudioConditionerBase
from audiolm_pytorch.version import __version__
from packaging import version
from accelerate import (Accelerator, DistributedType)
from accelerate.utils import DistributedDataParallelKwargs
# constants
DEFAULT_SAMPLE_RATE = 16000
# make sure only one trainer is instantiated
ONE_TRAINER_INSTANTIATED = False
def check_one_trainer():
global ONE_TRAINER_INSTANTIATED
assert not ONE_TRAINER_INSTANTIATED, 'only one Trainer can be instantiated at a time for training'
ONE_TRAINER_INSTANTIATED = True
# for automatically routing data emitted from a dataset to keywords of the transformer wrappers
DATASET_FIELD_TYPE_CONFIG = dict(
raw_wave = Annotated[
torch.Tensor,
Is[lambda t: t.dtype == torch.float and t.ndim in {2, 3}]
],
text = List[str],
text_embeds = Annotated[
torch.Tensor,
Is[lambda t: t.dtype == torch.float and t.ndim == 3]
],
)
# helpers
def exists(val):
return val is not None
def noop(*args, **kwargs):
pass
def cycle(dl):
while True:
for data in dl:
yield data
def cast_tuple(t):
return t if isinstance(t, (tuple, list)) else (t,)
def yes_or_no(question):
answer = input(f'{question} (y/n) ')
return answer.lower() in ('yes', 'y')
def accum_log(log, new_logs):
for key, new_value in new_logs.items():
old_value = log.get(key, 0.)
log[key] = old_value + new_value
return log
# auto data to module keyword argument routing functions
def has_duplicates(tup):
counts = dict(Counter(tup))
return any(filter(lambda count: count > 1, counts.values()))
def determine_types(data, config):
output = []
for el in data:
for name, data_type in config.items():
if is_bearable(el, data_type):
output.append(name)
break
else:
raise TypeError(f'unable to determine type of {data}')
return tuple(output)
def checkpoint_num_steps(checkpoint_path):
"""Returns the number of steps trained from a checkpoint based on the filename.
Filename format assumed to be something like "/path/to/semantic.transformer.20000.pt" which is
for 20k train steps. Returns 20000 in that case.
"""
results = re.findall(r'\d+', str(checkpoint_path))
if len(results) == 0:
return 0
return int(results[-1])
# main trainer class
class SoundStreamTrainer(nn.Module):
@beartype
def __init__(
self,
soundstream: SoundStream,
*,
num_train_steps: int,
batch_size: int,
data_max_length: int = None,
data_max_length_seconds: Union[int, float] = None,
folder: str = None,
train_dataloader: DataLoader = None,
val_dataloader: DataLoader = None,
lr: float = 2e-4,
grad_accum_every: int = 4,
wd: float = 0.,
max_grad_norm: float = 0.5,
discr_max_grad_norm: float = None,
save_results_every: int = 100,
save_model_every: int= 1000,
log_losses_every: int= 1,
results_folder: str = './results',
valid_frac: float = 0.05,
random_split_seed: int = 42,
use_ema: bool = True,
ema_beta: float = 0.995,
ema_update_after_step: int = 500,
ema_update_every: int = 10,
apply_grad_penalty_every: int = 4,
dl_num_workers: int = 0,
accelerator: Accelerator = None,
accelerate_kwargs: dict = dict(),
dataloader_drop_last = True,
split_batches = False,
use_lion: bool = False,
force_clear_prev_results: bool = None # set to True | False to skip the prompt
):
"""
Initialize with a SoundStream instance and either a folder containing audio data or
train/val DataLoader instances.
"""
super().__init__()
check_one_trainer()
if accelerator:
self.accelerator = accelerator
assert len(accelerate_kwargs) == 0
else:
kwargs = DistributedDataParallelKwargs(find_unused_parameters = True)
self.accelerator = Accelerator(
kwargs_handlers = [kwargs],
split_batches = split_batches,
**accelerate_kwargs
)
self.soundstream = soundstream
self.use_ema = use_ema
if self.use_ema:
self.ema_soundstream = EMA(soundstream, beta = ema_beta, update_after_step = ema_update_after_step, update_every = ema_update_every)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
hyperparameters = {
"num_train_steps": num_train_steps,
"batch_size": batch_size,
"gradient_accum_every": grad_accum_every,
"learning_rate": lr,
"target_sample_hz": soundstream.target_sample_hz,
}
# optimizers
self.optim = get_optimizer(soundstream.non_discr_parameters(), lr = lr, wd = wd)
for discr_optimizer_key, discr in self.multiscale_discriminator_iter():
one_multiscale_discr_optimizer = get_optimizer(discr.parameters(), lr = lr, wd = wd)
setattr(self, discr_optimizer_key, one_multiscale_discr_optimizer)
self.discr_optim = get_optimizer(soundstream.stft_discriminator.parameters(), lr = lr, wd = wd, use_lion = use_lion)
# max grad norm
self.max_grad_norm = max_grad_norm
self.discr_max_grad_norm = discr_max_grad_norm
if folder is None:
assert train_dataloader is not None
assert val_dataloader is not None
self.dl = train_dataloader
self.valid_dl = val_dataloader
else:
assert train_dataloader is None
assert val_dataloader is None
# create dataset
if exists(data_max_length_seconds):
assert not exists(data_max_length)
data_max_length = int(data_max_length_seconds * soundstream.target_sample_hz)
else:
assert exists(data_max_length)
hyperparameters['data_max_length'] = data_max_length
self.ds = SoundDataset(
folder,
max_length = data_max_length,
target_sample_hz = soundstream.target_sample_hz,
seq_len_multiple_of = soundstream.seq_len_multiple_of
)
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, num_workers = dl_num_workers, shuffle = True, drop_last = dataloader_drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, num_workers = dl_num_workers, shuffle = True, drop_last = dataloader_drop_last)
# prepare with accelerator
(
self.soundstream,
self.optim,
self.discr_optim,
self.dl
) = self.accelerator.prepare(
self.soundstream,
self.optim,
self.discr_optim,
self.dl
)
# prepare the multiscale discriminators with accelerator
for name, _ in self.multiscale_discriminator_iter():
optimizer = getattr(self, name)
optimizer = self.accelerator.prepare(optimizer)
setattr(self, name, optimizer)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.log_losses_every = log_losses_every
self.apply_grad_penalty_every = apply_grad_penalty_every
self.results_folder = Path(results_folder)
if self.is_main and force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
# Initialize experiment trackers if an external Accelerator is not passed in
if not accelerator:
self.accelerator.init_trackers("soundstream", config=hyperparameters)
assert self.accelerator.distributed_type != DistributedType.FSDP, 'FSDP not supported for soundstream trainer due to complex-valued stft discriminator'
def set_model_as_ema_model_(self):
""" this will force the main 'online' model to have same parameters as the exponentially moving averaged model """
assert self.use_ema
self.ema_soundstream.ema_model.load_state_dict(self.soundstream.state_dict())
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.soundstream),
optim = self.optim.state_dict(),
config = self.unwrapped_soundstream._configs,
discr_optim = self.discr_optim.state_dict(),
version = __version__
)
if self.use_ema:
pkg['ema_model'] = self.ema_soundstream.state_dict()
for key, _ in self.multiscale_discriminator_iter():
discr_optim = getattr(self, key)
pkg[key] = discr_optim.state_dict()
torch.save(pkg, path)
@property
def unwrapped_soundstream(self):
return self.accelerator.unwrap_model(self.soundstream)
def load(self, path):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = 'cpu')
# if loading from old version, make a hacky guess
if len(pkg.keys()) > 20:
self.unwrapped_soundstream.load_state_dict(pkg)
if self.use_ema:
self.ema_soundstream.ema_model.load_state_dict(pkg)
return
# check version
if 'version' in pkg and version.parse(pkg['version']) < version.parse(__version__):
print(f'model was trained on older version {pkg["version"]} of audiolm-pytorch')
# otherwise load things normally
self.unwrapped_soundstream.load_state_dict(pkg['model'])
if self.use_ema:
assert 'ema_model' in pkg
self.ema_soundstream.load_state_dict(pkg['ema_model'])
self.optim.load_state_dict(pkg['optim'])
self.discr_optim.load_state_dict(pkg['discr_optim'])
for key, _ in self.multiscale_discriminator_iter():
discr_optim = getattr(self, key)
discr_optim.load_state_dict(pkg[key])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def multiscale_discriminator_iter(self):
for ind, discr in enumerate(self.unwrapped_soundstream.discriminators):
yield f'multiscale_discr_optimizer_{ind}', discr
def multiscale_discriminator_optim_iter(self):
for name, _ in self.multiscale_discriminator_iter():
yield name, getattr(self, name)
def print(self, msg):
self.accelerator.print(msg)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def train_step(self):
device = self.device
steps = int(self.steps.item())
apply_grad_penalty = self.apply_grad_penalty_every > 0 and not (steps % self.apply_grad_penalty_every)
log_losses = self.log_losses_every > 0 and not (steps % self.log_losses_every)
self.soundstream.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
wave, = next(self.dl_iter)
wave = wave.to(device)
loss, (recon_loss, multi_spectral_recon_loss, adversarial_loss, feature_loss, all_commitment_loss) = self.soundstream(wave, return_loss_breakdown = True)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, dict(
loss = loss.item() / self.grad_accum_every,
recon_loss = recon_loss.item() / self.grad_accum_every,
))
if log_losses:
accum_log(logs, dict(
multi_spectral_recon_loss = multi_spectral_recon_loss.item() / self.grad_accum_every,
adversarial_loss = adversarial_loss.item() / self.grad_accum_every,
feature_loss = feature_loss.item() / self.grad_accum_every,
all_commitment_loss = all_commitment_loss.item() / self.grad_accum_every,
))
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.soundstream.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# update discriminator
self.discr_optim.zero_grad()
for name, multiscale_discr_optim in self.multiscale_discriminator_optim_iter():
multiscale_discr_optim.zero_grad()
for _ in range(self.grad_accum_every):
wave, = next(self.dl_iter)
wave = wave.to(device)
discr_losses = self.soundstream(
wave,
apply_grad_penalty = apply_grad_penalty,
return_discr_loss = True,
return_discr_losses_separately = True
)
for name, discr_loss in discr_losses:
self.accelerator.backward(discr_loss / self.grad_accum_every, retain_graph = True)
accum_log(logs, {name: discr_loss.item() / self.grad_accum_every})
if exists(self.discr_max_grad_norm):
self.accelerator.clip_grad_norm_(self.soundstream.stft_discriminator.parameters(), self.discr_max_grad_norm)
# gradient step for all discriminators
self.discr_optim.step()
for name, multiscale_discr_optim in self.multiscale_discriminator_optim_iter():
multiscale_discr_optim.step()
# build pretty printed losses
losses_str = f"{steps}: soundstream total loss: {logs['loss']:.3f}, soundstream recon loss: {logs['recon_loss']:.3f}"
if log_losses:
self.accelerator.log({
"total_loss": logs['loss'],
"recon_loss": logs['recon_loss'],
"multi_spectral_recon_loss": logs['multi_spectral_recon_loss'],
"adversarial_loss": logs['adversarial_loss'],
"feature_loss": logs['feature_loss'],
"all_commitment_loss": logs['all_commitment_loss'],
"stft_discr_loss": logs['stft']
}, step=steps)
for key, loss in logs.items():
if not key.startswith('scale:'):
continue
_, scale_factor = key.split(':')
losses_str += f" | discr (scale {scale_factor}) loss: {loss:.3f}"
if log_losses:
self.accelerator.log({f"discr_loss (scale {scale_factor})": loss}, step=steps)
# log
self.print(losses_str)
# update exponential moving averaged generator
self.accelerator.wait_for_everyone()
if self.is_main and self.use_ema:
self.ema_soundstream.update()
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
models = [(self.unwrapped_soundstream, str(steps))]
if self.use_ema:
models.append((self.ema_soundstream.ema_model if self.use_ema else self.unwrapped_soundstream, f'{steps}.ema'))
wave, = next(self.valid_dl_iter)
wave = wave.to(device)
for model, label in models:
model.eval()
with torch.inference_mode():
recons = model(wave, return_recons_only = True)
for ind, recon in enumerate(recons.unbind(dim = 0)):
filename = str(self.results_folder / f'sample_{label}.flac')
torchaudio.save(filename, recon.cpu().detach(), self.unwrapped_soundstream.target_sample_hz)
self.print(f'{steps}: saving to {str(self.results_folder)}')
# save model every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'soundstream.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
# semantic transformer trainer
class SemanticTransformerTrainer(nn.Module):
@beartype
def __init__(
self,
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]],
transformer: SemanticTransformer,
*,
num_train_steps,
batch_size,
audio_conditioner: Optional[AudioConditionerBase] = None,
dataset: Optional[Dataset] = None,
data_max_length = None,
data_max_length_seconds = None,
folder = None,
lr = 3e-4,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
split_batches = False,
drop_last = False,
force_clear_prev_results = None,
average_valid_loss_over_grad_accum_every: bool = True, # if False, valid loss on a single batch
):
super().__init__()
check_one_trainer()
self.accelerator = Accelerator(
split_batches = split_batches,
**accelerate_kwargs
)
self.wav2vec = wav2vec
self.transformer = transformer
self.audio_conditioner = audio_conditioner
self.train_wrapper = SemanticTransformerWrapper(
wav2vec = wav2vec,
transformer = transformer,
audio_conditioner = audio_conditioner
)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
# optimizers
self.optim = get_optimizer(transformer.parameters(), lr = lr, wd = wd)
# max grad norm
self.max_grad_norm = max_grad_norm
# create dataset
self.ds = dataset
if not exists(self.ds):
assert exists(folder), 'folder must be passed in, if not passing in a custom dataset for text conditioned audio synthesis training'
assert not (exists(data_max_length) and exists(data_max_length_seconds))
if exists(data_max_length_seconds):
data_max_length = data_max_length_seconds * wav2vec.target_sample_hz
self.ds = SoundDataset(
folder,
max_length = data_max_length,
target_sample_hz = wav2vec.target_sample_hz,
seq_len_multiple_of = wav2vec.seq_len_multiple_of
)
self.ds_fields = None
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
# prepare with accelerator
(
self.train_wrapper,
self.optim,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.train_wrapper,
self.optim,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.results_folder = Path(results_folder)
if self.is_main and force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
hps = {"num_train_steps": num_train_steps, "data_max_length": data_max_length, "learning_rate": lr}
self.accelerator.init_trackers("semantic", config=hps)
self.average_valid_loss_over_grad_accum_every = average_valid_loss_over_grad_accum_every
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.transformer),
optim = self.optim.state_dict(),
version = __version__
)
torch.save(pkg, path)
def load(self, path):
transformer = self.accelerator.unwrap_model(self.transformer)
pkg = transformer.load(path)
# trainer-specific things
self.optim.load_state_dict(pkg['optim'])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def print(self, msg):
self.accelerator.print(msg)
def generate(self, *args, **kwargs):
return self.train_wrapper.generate(*args, **kwargs)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def data_tuple_to_kwargs(self, data):
if not exists(self.ds_fields):
self.ds_fields = determine_types(data, DATASET_FIELD_TYPE_CONFIG)
assert not has_duplicates(self.ds_fields), 'dataset fields must not have duplicate field names'
return dict(zip(self.ds_fields, data))
def train_step(self):
device = self.device
steps = int(self.steps.item())
self.transformer.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
data_kwargs = self.data_tuple_to_kwargs(next(self.dl_iter))
loss = self.train_wrapper(**data_kwargs, return_loss = True)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.transformer.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
self.print(f"{steps}: loss: {logs['loss']}")
self.accelerator.log({"train_loss": logs['loss']}, step=steps)
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
valid_loss = 0
for _ in range(self.average_valid_loss_over_grad_accum_every):
data_kwargs = self.data_tuple_to_kwargs(next(self.valid_dl_iter))
with torch.inference_mode():
self.train_wrapper.eval()
valid_loss += self.train_wrapper(**data_kwargs, return_loss = True)
valid_loss = valid_loss.clone() # avoid inference mode to non-inference mode error
valid_loss /= self.average_valid_loss_over_grad_accum_every
self.print(f'{steps}: valid loss {valid_loss}')
self.accelerator.log({"valid_loss": valid_loss}, step=steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'semantic.transformer.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
# fine transformer trainer
class CoarseTransformerTrainer(nn.Module):
@beartype
def __init__(
self,
transformer: CoarseTransformer,
codec: Union[SoundStream, EncodecWrapper],
wav2vec: Optional[Union[FairseqVQWav2Vec, HubertWithKmeans]],
*,
num_train_steps,
batch_size,
audio_conditioner: Optional[AudioConditionerBase] = None,
dataset: Optional[Dataset] = None,
ds_fields: Tuple[str, ...] = ('raw_wave', 'raw_wave_for_codec', 'text'),
data_max_length = None,
data_max_length_seconds = None,
folder = None,
lr = 3e-4,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
split_batches = False,
drop_last = False,
force_clear_prev_results = None,
average_valid_loss_over_grad_accum_every: bool = True, # if False, valid loss on a single batch
):
super().__init__()
check_one_trainer()
self.accelerator = Accelerator(
split_batches = split_batches,
**accelerate_kwargs
)
self.transformer = transformer
self.codec = codec
self.wav2vec = wav2vec
self.audio_conditioner = audio_conditioner
self.train_wrapper = CoarseTransformerWrapper(
codec = codec,
wav2vec = wav2vec,
transformer = transformer,
audio_conditioner = audio_conditioner
)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
# optimizers
self.optim = get_optimizer(transformer.parameters(), lr = lr, wd = wd)
# max grad norm
self.max_grad_norm = max_grad_norm
# create dataset
self.ds = dataset
if not exists(self.ds):
assert exists(folder), 'folder must be passed in, if not passing in a custom dataset for text conditioned audio synthesis training'
assert not (exists(data_max_length) and exists(data_max_length_seconds))
if exists(data_max_length_seconds):
data_max_length = tuple(data_max_length_seconds * hz for hz in (wav2vec.target_sample_hz, codec.target_sample_hz))
self.ds = SoundDataset(
folder,
max_length = data_max_length,
target_sample_hz = (
wav2vec.target_sample_hz,
codec.target_sample_hz
), # need 2 waves resampled differently here
seq_len_multiple_of = codec.seq_len_multiple_of
)
self.ds_fields = ds_fields
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
# prepare with accelerator
(
self.transformer,
self.optim,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.transformer,
self.optim,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.results_folder = Path(results_folder)
if self.is_main and force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
hps = {"num_train_steps": num_train_steps, "data_max_length": data_max_length, "learning_rate": lr}
self.accelerator.init_trackers("coarse", config=hps)
self.train_wrapper.to(self.device)
self.average_valid_loss_over_grad_accum_every = average_valid_loss_over_grad_accum_every
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.transformer),
optim = self.optim.state_dict(),
version = __version__
)
torch.save(pkg, path)
def load(self, path):
transformer = self.accelerator.unwrap_model(self.transformer)
pkg = transformer.load(path)
# trainer-specific things
self.optim.load_state_dict(pkg['optim'])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def print(self, msg):
self.accelerator.print(msg)
def generate(self, *args, **kwargs):
return self.train_wrapper.generate(*args, **kwargs)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def train_step(self):
device = self.device
steps = int(self.steps.item())
self.transformer.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
data_kwargs = dict(zip(self.ds_fields, next(self.dl_iter)))
loss = self.train_wrapper(
**data_kwargs,
return_loss = True
)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.transformer.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
self.print(f"{steps}: loss: {logs['loss']}")
self.accelerator.log({"train_loss": logs['loss']}, step=steps)
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
valid_loss = 0
for i in range(self.average_valid_loss_over_grad_accum_every):
data_kwargs = dict(zip(self.ds_fields, next(self.valid_dl_iter)))
with torch.inference_mode():
self.train_wrapper.eval()
valid_loss += self.train_wrapper(
**data_kwargs,
return_loss = True
)
valid_loss = valid_loss.clone() # avoid inference mode to non-inference mode error
valid_loss /= self.average_valid_loss_over_grad_accum_every
self.print(f'{steps}: valid loss {valid_loss}')
self.accelerator.log({"valid_loss": valid_loss}, step=steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'coarse.transformer.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
# fine transformer trainer
class FineTransformerTrainer(nn.Module):
@beartype
def __init__(
self,
transformer: FineTransformer,
codec: Union[SoundStream, EncodecWrapper],
*,
num_train_steps,
batch_size,
audio_conditioner: Optional[AudioConditionerBase] = None,
dataset: Optional[Dataset] = None,
data_max_length = None,
data_max_length_seconds = None,
dataset_normalize = False,
folder = None,
lr = 3e-4,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
split_batches = False,
drop_last = False,
force_clear_prev_results = None,
average_valid_loss_over_grad_accum_every: bool = True, # if False, valid loss on a single batch
):
super().__init__()
check_one_trainer()
self.accelerator = Accelerator(
split_batches = split_batches,
**accelerate_kwargs
)
self.transformer = transformer
self.codec = codec
self.audio_conditioner = audio_conditioner
self.train_wrapper = FineTransformerWrapper(
codec = codec,
transformer = transformer,
audio_conditioner = audio_conditioner
)
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
# optimizers
self.optim = get_optimizer(transformer.parameters(), lr = lr, wd = wd)
# max grad norm
self.max_grad_norm = max_grad_norm
# create dataset
self.ds = dataset
if not exists(self.ds):
assert exists(folder), 'folder must be passed in, if not passing in a custom dataset for text conditioned audio synthesis training'
assert not (exists(data_max_length) and exists(data_max_length_seconds))
if exists(data_max_length_seconds):
data_max_length = data_max_length_seconds * codec.target_sample_hz
self.ds = SoundDataset(
folder,
max_length = data_max_length,
target_sample_hz = codec.target_sample_hz,
seq_len_multiple_of = codec.seq_len_multiple_of
)
self.ds_fields = None
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
# prepare with accelerator
(
self.transformer,
self.optim,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.transformer,
self.optim,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.results_folder = Path(results_folder)
if force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
hps = {"num_train_steps": num_train_steps, "data_max_length": data_max_length, "learning_rate": lr}
self.accelerator.init_trackers("fine", config=hps)
self.train_wrapper.to(self.device)
self.average_valid_loss_over_grad_accum_every = average_valid_loss_over_grad_accum_every
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.transformer),
optim = self.optim.state_dict(),
version = __version__
)
torch.save(pkg, path)
def load(self, path):
transformer = self.accelerator.unwrap_model(self.transformer)
pkg = transformer.load(path)
# trainer-specific things
self.optim.load_state_dict(pkg['optim'])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def print(self, msg):
self.accelerator.print(msg)
def generate(self, *args, **kwargs):
return self.train_wrapper.generate(*args, **kwargs)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def data_tuple_to_kwargs(self, data):
if not exists(self.ds_fields):
self.ds_fields = determine_types(data, DATASET_FIELD_TYPE_CONFIG)
assert not has_duplicates(self.ds_fields), 'dataset fields must not have duplicate field names'
return dict(zip(self.ds_fields, data))
def train_step(self):
device = self.device
steps = int(self.steps.item())
self.transformer.train()
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
data_kwargs = self.data_tuple_to_kwargs(next(self.dl_iter))
loss = self.train_wrapper(**data_kwargs, return_loss = True)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.transformer.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
self.print(f"{steps}: loss: {logs['loss']}")
self.accelerator.log({"train_loss": logs['loss']}, step=steps)
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
valid_loss = 0
for i in range(self.average_valid_loss_over_grad_accum_every):
data_kwargs = self.data_tuple_to_kwargs(next(self.valid_dl_iter))
with torch.inference_mode():
self.train_wrapper.eval()
valid_loss += self.train_wrapper(**data_kwargs, return_loss = True)
valid_loss = valid_loss.clone() # avoid inference mode to non-inference mode error
valid_loss /= self.average_valid_loss_over_grad_accum_every
self.print(f'{steps}: valid loss {valid_loss}')
self.accelerator.log({"valid_loss": valid_loss}, step=steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'fine.transformer.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
|
audiolm-pytorch-main
|
audiolm_pytorch/trainer.py
|
from functools import reduce
from einops import rearrange, pack, unpack
import torch
from torch import nn
from torchaudio.functional import resample
from vector_quantize_pytorch import ResidualVQ
from encodec import EncodecModel
from encodec.utils import _linear_overlap_add
# helper functions
def exists(val):
return val is not None
# hacky way to get num quantizers
def get_num_quantizers(model: EncodecModel, audio_length = 512):
out = model.encode(torch.randn(1, 1, audio_length))
return out[0][0].shape[1]
class EncodecWrapper(nn.Module):
"""
Support pretrained 24kHz Encodec by Meta AI, if you want to skip training SoundStream.
TODO:
- see if we need to keep the scaled version and somehow persist the scale factors for when we need to decode? Right
now I'm just setting self.model.normalize = False to sidestep all of that
- see if we can use the 48kHz model, which is specifically for music. Right now we're using the 24kHz model because
that's what was used in MusicLM and avoids any resampling issues.
-
"""
def __init__(
self,
target_sample_hz = 24000,
strides = (2, 4, 5, 8),
num_quantizers = 8,
bandwidth = 6.0
):
super().__init__()
# Instantiate a pretrained EnCodec model
self.model = EncodecModel.encodec_model_24khz()
self.model.normalize = False # this means we don't need to scale codes e.g. when running model.encode(wav)
# The number of codebooks used will be determined bythe bandwidth selected.
# E.g. for a bandwidth of 6kbps, `n_q = 8` codebooks are used.
# Supported bandwidths are 1.5kbps (n_q = 2), 3 kbps (n_q = 4), 6 kbps (n_q = 8) and 12 kbps (n_q =16) and 24kbps (n_q=32).
# For the 48 kHz model, only 3, 6, 12, and 24 kbps are supported. The number
# of codebooks for each is half that of the 24 kHz model as the frame rate is twice as much.
# bandwidth affects num quantizers used: https://github.com/facebookresearch/encodec/pull/41
self.model.set_target_bandwidth(bandwidth)
num_quantizers = get_num_quantizers(self.model)
# Fields that SoundStream has that get used externally. We replicate them here.
self.target_sample_hz = target_sample_hz
assert self.target_sample_hz == 24000, "haven't done anything with non-24kHz yet"
self.codebook_dim = 128
self.rq_groups = 1
self.num_quantizers = num_quantizers
self.strides = strides # used in seq_len_multiple_of
# cross entropy loss to indices passed in on l2 distance logits introduced in vector-quantize-pytorch 1.2.2
self.rq = ResidualVQ(
dim = 128,
codebook_size = 1024,
num_quantizers = num_quantizers
)
# copy codebook over to ResidualVQ for cross entropy loss logic from naturalspeech2
# luckily, it seems Meta AI basically used my ResidualVQ code verbatim. makes porting it over easy
for encodec_rq_layer, rq_layer in zip(self.model.quantizer.vq.layers, self.rq.layers):
encodec_codebook = dict(encodec_rq_layer._codebook.named_buffers()).get('embed')
vq_codebook = dict(rq_layer._codebook.named_buffers()).get('embed')
encodec_codebook = rearrange(encodec_codebook, '... -> 1 ...')
vq_codebook.copy_(encodec_codebook)
@property
def seq_len_multiple_of(self):
return reduce(lambda x, y: x * y, self.strides)
@property
def downsample_factor(self):
return self.seq_len_multiple_of
def forward(
self,
x,
input_sample_hz = None,
return_encoded = False,
**kwargs
):
x, ps = pack([x], '* n')
if exists(input_sample_hz):
x = resample(x, input_sample_hz, self.target_sample_hz)
# kwargs for stuff like return_encoded=True, which SoundStream uses but Encodec doesn't
assert not self.model.training, "Encodec is pretrained and should never be called outside eval mode."
# Unlike in the Encodec sample code in its README, x has already been resampled so we don't need to call
# convert_audio and unsqueeze. The convert_audio function also doesn't play nicely with batches.
# b = batch, t = timesteps, 1 channel for the 24kHz model, 2 channels for the 48kHz model
wav = rearrange(x, f'b t -> b {self.model.channels} t')
# Extract discrete codes from EnCodec
with torch.inference_mode():
encoded_frames = self.model.encode(wav)
# encoded_frames is a list of (frame, scale) tuples. Scale is a scalar but we don't use it. Frame is a tensor
# of shape [batch, num_quantizers, num_samples_per_frame]. We want to concatenate the frames to get all the
# timesteps concatenated.
codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1) # [batch, num_quantizers, timesteps]
# transformer code that uses codec expects codes to be [batch, timesteps, num_quantizers]
codes = rearrange(codes, 'b q n -> b n q') # result: [batch, timesteps, num_quantizers]
# in original soundstream, is x, indices, commit_loss. But we only use indices in eval mode, so just keep that.
# allow for returning of sum of quantized embeddings
emb = None
if return_encoded:
emb = self.get_emb_from_indices(codes)
emb, = unpack(emb, ps, '* n c')
codes, = unpack(codes, ps, '* n q')
return emb, codes, None
def decode_from_codebook_indices(self, quantized_indices):
# Input: batch x num tokens x num quantizers
# Output: batch x 1 x num samples
assert self.model.sample_rate == 24000,\
"if changing to 48kHz, that model segments its audio into lengths of 1.0 second with 1% overlap, whereas " \
"the 24kHz doesn't segment at all. this means the frame decode logic might change; this is a reminder to " \
"double check that."
# Since 24kHz pretrained doesn't do any segmenting, we have all the frames already (1 frame = 1 token in quantized_indices)
# The following code is hacked in from self.model.decode() (Encodec version 0.1.1) where we skip the part about
# scaling.
# Shape: 1 x (num_frames * stride product). 1 because we have 1 frame (because no segmenting)
frames = self._decode_frame(quantized_indices)
result = _linear_overlap_add(frames, self.model.segment_stride or 1)
# TODO: I'm not overly pleased with this because when this function gets called, we just rearrange the result
# back to b n anyways, but we'll keep this as a temporary hack just to make things work for now
return rearrange(result, 'b n -> b 1 n')
def get_emb_from_indices(self, indices):
codes = rearrange(indices, 'b t q -> q b t')
emb = self.model.quantizer.decode(codes)
return rearrange(emb, 'b c n -> b n c')
def decode(self, emb):
emb = rearrange(emb, 'b n c -> b c n')
return self.model.decoder(emb)
def _decode_frame(self, quantized_indices):
# The following code is hacked in from self.model._decode_frame() (Encodec version 0.1.1) where we assume we've
# already unwrapped the EncodedFrame
# Input: batch x num tokens x num quantizers
# Output: batch x new_num_samples, where new_num_samples is num_frames * stride product (may be slightly
# larger than original num samples as a result, because the last frame might not be "fully filled" with samples
# if num_samples doesn't divide perfectly).
# num_frames == the number of acoustic tokens you have, one token per frame
codes = rearrange(quantized_indices, 'b t q -> q b t')
emb = self.model.quantizer.decode(codes)
# emb shape: batch x self.model.quantizer.dimension x T. Note self.model.quantizer.dimension is the embedding dimension
return self.model.decoder(emb)
|
audiolm-pytorch-main
|
audiolm_pytorch/encodec.py
|
from pathlib import Path
from functools import partial, wraps
from beartype import beartype
from beartype.typing import Tuple, Union, Optional
from beartype.door import is_bearable
import torchaudio
from torchaudio.functional import resample
import torch
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader
from audiolm_pytorch.utils import curtail_to_multiple
from einops import rearrange, reduce
# helper functions
def exists(val):
return val is not None
def cast_tuple(val, length = 1):
return val if isinstance(val, tuple) else ((val,) * length)
def is_unique(arr):
return len(set(arr)) == len(arr)
# dataset functions
class SoundDataset(Dataset):
@beartype
def __init__(
self,
folder,
target_sample_hz: Union[int, Tuple[int, ...]], # target sample hz must be specified, or a tuple of them if one wants to return multiple resampled
exts = ['flac', 'wav', 'mp3', 'webm'],
max_length: Optional[int] = None, # max length would apply to the highest target_sample_hz, if there are multiple
seq_len_multiple_of: Optional[Union[int, Tuple[Optional[int], ...]]] = None
):
super().__init__()
path = Path(folder)
assert path.exists(), 'folder does not exist'
files = [file for ext in exts for file in path.glob(f'**/*.{ext}')]
assert len(files) > 0, 'no sound files found'
self.files = files
self.max_length = max_length
self.target_sample_hz = cast_tuple(target_sample_hz)
num_outputs = len(self.target_sample_hz)
# strategy, if there are multiple target sample hz, would be to resample to the highest one first
# apply the max lengths, and then resample to all the others
self.max_target_sample_hz = max(self.target_sample_hz)
self.seq_len_multiple_of = cast_tuple(seq_len_multiple_of, num_outputs)
assert len(self.target_sample_hz) == len(self.seq_len_multiple_of)
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
file = self.files[idx]
data, sample_hz = torchaudio.load(file)
assert data.numel() > 0, f'one of your audio file ({file}) is empty. please remove it from your folder'
if data.shape[0] > 1:
# the audio has more than 1 channel, convert to mono
data = reduce(data, 'c ... -> 1 ...', 'mean')
# first resample data to the max target freq
data = resample(data, sample_hz, self.max_target_sample_hz)
sample_hz = self.max_target_sample_hz
# then curtail or pad the audio depending on the max length
max_length = self.max_length
audio_length = data.size(1)
if exists(max_length):
if audio_length > max_length:
max_start = audio_length - max_length
start = torch.randint(0, max_start, (1, ))
data = data[:, start:start + max_length]
else:
data = F.pad(data, (0, max_length - audio_length), 'constant')
data = rearrange(data, '1 ... -> ...')
# resample if target_sample_hz is not None in the tuple
num_outputs = len(self.target_sample_hz)
data = cast_tuple(data, num_outputs)
data_tuple = tuple(resample(d, sample_hz, target_sample_hz) for d, target_sample_hz in zip(data, self.target_sample_hz))
output = []
# process each of the data resample at different frequencies individually for curtailing to multiple
for data, seq_len_multiple_of in zip(data_tuple, self.seq_len_multiple_of):
if exists(seq_len_multiple_of):
data = curtail_to_multiple(data, seq_len_multiple_of)
output.append(data.float())
# cast from list to tuple
output = tuple(output)
# return only one audio, if only one target resample freq
if num_outputs == 1:
return output[0]
return output
# dataloader functions
def collate_one_or_multiple_tensors(fn):
@wraps(fn)
def inner(data):
is_one_data = not isinstance(data[0], tuple)
if is_one_data:
data = fn(data)
return (data,)
outputs = []
for datum in zip(*data):
if is_bearable(datum, Tuple[str, ...]):
output = list(datum)
else:
output = fn(datum)
outputs.append(output)
return tuple(outputs)
return inner
@collate_one_or_multiple_tensors
def curtail_to_shortest_collate(data):
min_len = min(*[datum.shape[0] for datum in data])
data = [datum[:min_len] for datum in data]
return torch.stack(data)
@collate_one_or_multiple_tensors
def pad_to_longest_fn(data):
return pad_sequence(data, batch_first = True)
def get_dataloader(ds, pad_to_longest = True, **kwargs):
collate_fn = pad_to_longest_fn if pad_to_longest else curtail_to_shortest_collate
return DataLoader(ds, collate_fn = collate_fn, **kwargs)
|
audiolm-pytorch-main
|
audiolm_pytorch/data.py
|
from setuptools import setup, find_packages
exec(open('imagen_pytorch/version.py').read())
setup(
name = 'imagen-pytorch',
packages = find_packages(exclude=[]),
include_package_data = True,
entry_points={
'console_scripts': [
'imagen_pytorch = imagen_pytorch.cli:main',
'imagen = imagen_pytorch.cli:imagen'
],
},
version = __version__,
license='MIT',
description = 'Imagen - unprecedented photorealism × deep level of language understanding',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/imagen-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'text-to-image',
'denoising-diffusion'
],
install_requires=[
'accelerate',
'beartype',
'click',
'datasets',
'einops>=0.6.1',
'ema-pytorch>=0.0.3',
'fsspec',
'kornia',
'lion-pytorch',
'numpy',
'packaging',
'pillow',
'pydantic>=2',
'pytorch-lightning',
'pytorch-warmup',
'sentencepiece',
'torch>=1.6',
'torchvision',
'transformers',
'triton',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
imagen-pytorch-main
|
setup.py
|
import math
import copy
import operator
import functools
from typing import List
from tqdm.auto import tqdm
from functools import partial, wraps
from contextlib import contextmanager, nullcontext
from collections import namedtuple
from pathlib import Path
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from einops_exts.torch import EinopsToAndFrom
from imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME
# helper functions
def exists(val):
return val is not None
def identity(t, *args, **kwargs):
return t
def first(arr, d = None):
if len(arr) == 0:
return d
return arr[0]
def divisible_by(numer, denom):
return (numer % denom) == 0
def maybe(fn):
@wraps(fn)
def inner(x):
if not exists(x):
return x
return fn(x)
return inner
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cast_tuple(val, length = None):
if isinstance(val, list):
val = tuple(val)
output = val if isinstance(val, tuple) else ((val,) * default(length, 1))
if exists(length):
assert len(output) == length
return output
def cast_uint8_images_to_float(images):
if not images.dtype == torch.uint8:
return images
return images / 255
def module_device(module):
return next(module.parameters()).device
def zero_init_(m):
nn.init.zeros_(m.weight)
if exists(m.bias):
nn.init.zeros_(m.bias)
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def pad_tuple_to_length(t, length, fillvalue = None):
remain_length = length - len(t)
if remain_length <= 0:
return t
return (*t, *((fillvalue,) * remain_length))
# helper classes
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, *args, **kwargs):
return x
def Sequential(*modules):
return nn.Sequential(*filter(exists, modules))
# tensor helpers
def log(t, eps: float = 1e-12):
return torch.log(t.clamp(min = eps))
def l2norm(t):
return F.normalize(t, dim = -1)
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims))
def masked_mean(t, *, dim, mask = None):
if not exists(mask):
return t.mean(dim = dim)
denom = mask.sum(dim = dim, keepdim = True)
mask = rearrange(mask, 'b n -> b n 1')
masked_t = t.masked_fill(~mask, 0.)
return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)
def resize_video_to(
video,
target_image_size,
target_frames = None,
clamp_range = None,
mode = 'nearest'
):
orig_video_size = video.shape[-1]
frames = video.shape[2]
target_frames = default(target_frames, frames)
target_shape = (target_frames, target_image_size, target_image_size)
if tuple(video.shape[-3:]) == target_shape:
return video
out = F.interpolate(video, target_shape, mode = mode)
if exists(clamp_range):
out = out.clamp(*clamp_range)
return out
def scale_video_time(
video,
downsample_scale = 1,
mode = 'nearest'
):
if downsample_scale == 1:
return video
image_size, frames = video.shape[-1], video.shape[-3]
assert divisible_by(frames, downsample_scale), f'trying to temporally downsample a conditioning video frames of length {frames} by {downsample_scale}, however it is not neatly divisible'
target_frames = frames // downsample_scale
resized_video = resize_video_to(
video,
image_size,
target_frames = target_frames,
mode = mode
)
return resized_video
# classifier free guidance functions
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
# norms and residuals
class LayerNorm(nn.Module):
def __init__(self, dim, stable = False):
super().__init__()
self.stable = stable
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
if self.stable:
x = x / x.amax(dim = -1, keepdim = True).detach()
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = -1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = -1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class ChanLayerNorm(nn.Module):
def __init__(self, dim, stable = False):
super().__init__()
self.stable = stable
self.g = nn.Parameter(torch.ones(1, dim, 1, 1, 1))
def forward(self, x):
if self.stable:
x = x / x.amax(dim = 1, keepdim = True).detach()
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class Always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class Parallel(nn.Module):
def __init__(self, *fns):
super().__init__()
self.fns = nn.ModuleList(fns)
def forward(self, x):
outputs = [fn(x) for fn in self.fns]
return sum(outputs)
# rearranging
class RearrangeTimeCentric(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
x = rearrange(x, 'b c f ... -> b ... f c')
x, ps = pack([x], '* f c')
x = self.fn(x)
x, = unpack(x, ps, '* f c')
x = rearrange(x, 'b ... f c -> b c f ...')
return x
# attention pooling
class PerceiverAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
scale = 8
):
super().__init__()
self.scale = scale
self.heads = heads
inner_dim = dim_head * heads
self.norm = nn.LayerNorm(dim)
self.norm_latents = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
nn.LayerNorm(dim)
)
def forward(self, x, latents, mask = None):
x = self.norm(x)
latents = self.norm_latents(latents)
b, h = x.shape[0], self.heads
q = self.to_q(latents)
# the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to
kv_input = torch.cat((x, latents), dim = -2)
k, v = self.to_kv(kv_input).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# qk rmsnorm
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# similarities and masking
sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale
if exists(mask):
max_neg_value = -torch.finfo(sim.dtype).max
mask = F.pad(mask, (0, latents.shape[-2]), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
# attention
attn = sim.softmax(dim = -1)
out = einsum('... i j, ... j d -> ... i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)', h = h)
return self.to_out(out)
class PerceiverResampler(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
num_latents = 64,
num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence
max_seq_len = 512,
ff_mult = 4
):
super().__init__()
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.latents = nn.Parameter(torch.randn(num_latents, dim))
self.to_latents_from_mean_pooled_seq = None
if num_latents_mean_pooled > 0:
self.to_latents_from_mean_pooled_seq = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, dim * num_latents_mean_pooled),
Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)
)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),
FeedForward(dim = dim, mult = ff_mult)
]))
def forward(self, x, mask = None):
n, device = x.shape[1], x.device
pos_emb = self.pos_emb(torch.arange(n, device = device))
x_with_pos = x + pos_emb
latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])
if exists(self.to_latents_from_mean_pooled_seq):
meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))
meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)
latents = torch.cat((meanpooled_latents, latents), dim = -2)
for attn, ff in self.layers:
latents = attn(x_with_pos, latents, mask = mask) + latents
latents = ff(latents) + latents
return latents
# main contribution from make-a-video - pseudo conv3d
# axial space-time convolutions, but made causal to keep in line with the design decisions of imagen-video paper
class Conv3d(nn.Module):
def __init__(
self,
dim,
dim_out = None,
kernel_size = 3,
*,
temporal_kernel_size = None,
**kwargs
):
super().__init__()
dim_out = default(dim_out, dim)
temporal_kernel_size = default(temporal_kernel_size, kernel_size)
self.spatial_conv = nn.Conv2d(dim, dim_out, kernel_size = kernel_size, padding = kernel_size // 2)
self.temporal_conv = nn.Conv1d(dim_out, dim_out, kernel_size = temporal_kernel_size) if kernel_size > 1 else None
self.kernel_size = kernel_size
if exists(self.temporal_conv):
nn.init.dirac_(self.temporal_conv.weight.data) # initialized to be identity
nn.init.zeros_(self.temporal_conv.bias.data)
def forward(
self,
x,
ignore_time = False
):
b, c, *_, h, w = x.shape
is_video = x.ndim == 5
ignore_time &= is_video
if is_video:
x = rearrange(x, 'b c f h w -> (b f) c h w')
x = self.spatial_conv(x)
if is_video:
x = rearrange(x, '(b f) c h w -> b c f h w', b = b)
if ignore_time or not exists(self.temporal_conv):
return x
x = rearrange(x, 'b c f h w -> (b h w) c f')
# causal temporal convolution - time is causal in imagen-video
if self.kernel_size > 1:
x = F.pad(x, (self.kernel_size - 1, 0))
x = self.temporal_conv(x)
x = rearrange(x, '(b h w) c f -> b c f h w', h = h, w = w)
return x
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
*,
dim_head = 64,
heads = 8,
causal = False,
context_dim = None,
rel_pos_bias = False,
rel_pos_bias_mlp_depth = 2,
init_zero = False,
scale = 8
):
super().__init__()
self.scale = scale
self.causal = causal
self.rel_pos_bias = DynamicPositionBias(dim = dim, heads = heads, depth = rel_pos_bias_mlp_depth) if rel_pos_bias else None
self.heads = heads
inner_dim = dim_head * heads
self.norm = LayerNorm(dim)
self.null_attn_bias = nn.Parameter(torch.randn(heads))
self.null_kv = nn.Parameter(torch.randn(2, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
LayerNorm(dim)
)
if init_zero:
nn.init.zeros_(self.to_out[-1].g)
def forward(
self,
x,
context = None,
mask = None,
attn_bias = None
):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> b 1 d', b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# add text conditioning, if present
if exists(context):
assert exists(self.to_context)
ck, cv = self.to_context(context).chunk(2, dim = -1)
k = torch.cat((ck, k), dim = -2)
v = torch.cat((cv, v), dim = -2)
# qk rmsnorm
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# calculate query / key similarities
sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale
# relative positional encoding (T5 style)
if not exists(attn_bias) and exists(self.rel_pos_bias):
attn_bias = self.rel_pos_bias(n, device = device, dtype = q.dtype)
if exists(attn_bias):
null_attn_bias = repeat(self.null_attn_bias, 'h -> h n 1', n = n)
attn_bias = torch.cat((null_attn_bias, attn_bias), dim = -1)
sim = sim + attn_bias
# masking
max_neg_value = -torch.finfo(sim.dtype).max
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, max_neg_value)
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
# attention
attn = sim.softmax(dim = -1)
# aggregate values
out = einsum('b h i j, b j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# pseudo conv2d that uses conv3d but with kernel size of 1 across frames dimension
def Conv2d(dim_in, dim_out, kernel, stride = 1, padding = 0, **kwargs):
kernel = cast_tuple(kernel, 2)
stride = cast_tuple(stride, 2)
padding = cast_tuple(padding, 2)
if len(kernel) == 2:
kernel = (1, *kernel)
if len(stride) == 2:
stride = (1, *stride)
if len(padding) == 2:
padding = (0, *padding)
return nn.Conv3d(dim_in, dim_out, kernel, stride = stride, padding = padding, **kwargs)
class Pad(nn.Module):
def __init__(self, padding, value = 0.):
super().__init__()
self.padding = padding
self.value = value
def forward(self, x):
return F.pad(x, self.padding, value = self.value)
# decoder
def Upsample(dim, dim_out = None):
dim_out = default(dim_out, dim)
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
Conv2d(dim, dim_out, 3, padding = 1)
)
class PixelShuffleUpsample(nn.Module):
def __init__(self, dim, dim_out = None):
super().__init__()
dim_out = default(dim_out, dim)
conv = Conv2d(dim, dim_out * 4, 1)
self.net = nn.Sequential(
conv,
nn.SiLU()
)
self.pixel_shuffle = nn.PixelShuffle(2)
self.init_conv_(conv)
def init_conv_(self, conv):
o, i, f, h, w = conv.weight.shape
conv_weight = torch.empty(o // 4, i, f, h, w)
nn.init.kaiming_uniform_(conv_weight)
conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')
conv.weight.data.copy_(conv_weight)
nn.init.zeros_(conv.bias.data)
def forward(self, x):
out = self.net(x)
frames = x.shape[2]
out = rearrange(out, 'b c f h w -> (b f) c h w')
out = self.pixel_shuffle(out)
return rearrange(out, '(b f) c h w -> b c f h w', f = frames)
def Downsample(dim, dim_out = None):
dim_out = default(dim_out, dim)
return nn.Sequential(
Rearrange('b c f (h p1) (w p2) -> b (c p1 p2) f h w', p1 = 2, p2 = 2),
Conv2d(dim * 4, dim_out, 1)
)
# temporal up and downsamples
class TemporalPixelShuffleUpsample(nn.Module):
def __init__(self, dim, dim_out = None, stride = 2):
super().__init__()
self.stride = stride
dim_out = default(dim_out, dim)
conv = nn.Conv1d(dim, dim_out * stride, 1)
self.net = nn.Sequential(
conv,
nn.SiLU()
)
self.pixel_shuffle = Rearrange('b (c r) n -> b c (n r)', r = stride)
self.init_conv_(conv)
def init_conv_(self, conv):
o, i, f = conv.weight.shape
conv_weight = torch.empty(o // self.stride, i, f)
nn.init.kaiming_uniform_(conv_weight)
conv_weight = repeat(conv_weight, 'o ... -> (o r) ...', r = self.stride)
conv.weight.data.copy_(conv_weight)
nn.init.zeros_(conv.bias.data)
def forward(self, x):
b, c, f, h, w = x.shape
x = rearrange(x, 'b c f h w -> (b h w) c f')
out = self.net(x)
out = self.pixel_shuffle(out)
return rearrange(out, '(b h w) c f -> b c f h w', h = h, w = w)
def TemporalDownsample(dim, dim_out = None, stride = 2):
dim_out = default(dim_out, dim)
return nn.Sequential(
Rearrange('b c (f p) h w -> b (c p) f h w', p = stride),
Conv2d(dim * stride, dim_out, 1)
)
# positional embedding
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)
emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')
return torch.cat((emb.sin(), emb.cos()), dim = -1)
class LearnedSinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim))
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
class Block(nn.Module):
def __init__(
self,
dim,
dim_out,
groups = 8,
norm = True
):
super().__init__()
self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()
self.activation = nn.SiLU()
self.project = Conv3d(dim, dim_out, 3, padding = 1)
def forward(
self,
x,
scale_shift = None,
ignore_time = False
):
x = self.groupnorm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.activation(x)
return self.project(x, ignore_time = ignore_time)
class ResnetBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
*,
cond_dim = None,
time_cond_dim = None,
groups = 8,
linear_attn = False,
use_gca = False,
squeeze_excite = False,
**attn_kwargs
):
super().__init__()
self.time_mlp = None
if exists(time_cond_dim):
self.time_mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_cond_dim, dim_out * 2)
)
self.cross_attn = None
if exists(cond_dim):
attn_klass = CrossAttention if not linear_attn else LinearCrossAttention
self.cross_attn = attn_klass(
dim = dim_out,
context_dim = cond_dim,
**attn_kwargs
)
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)
self.res_conv = Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()
def forward(
self,
x,
time_emb = None,
cond = None,
ignore_time = False
):
scale_shift = None
if exists(self.time_mlp) and exists(time_emb):
time_emb = self.time_mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, ignore_time = ignore_time)
if exists(self.cross_attn):
assert exists(cond)
h = rearrange(h, 'b c ... -> b ... c')
h, ps = pack([h], 'b * c')
h = self.cross_attn(h, context = cond) + h
h, = unpack(h, ps, 'b * c')
h = rearrange(h, 'b ... c -> b c ...')
h = self.block2(h, scale_shift = scale_shift, ignore_time = ignore_time)
h = h * self.gca(h)
return h + self.res_conv(x)
class CrossAttention(nn.Module):
def __init__(
self,
dim,
*,
context_dim = None,
dim_head = 64,
heads = 8,
norm_context = False,
scale = 8
):
super().__init__()
self.scale = scale
self.heads = heads
inner_dim = dim_head * heads
context_dim = default(context_dim, dim)
self.norm = LayerNorm(dim)
self.norm_context = LayerNorm(context_dim) if norm_context else Identity()
self.null_kv = nn.Parameter(torch.randn(2, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
LayerNorm(dim)
)
def forward(self, x, context, mask = None):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
context = self.norm_context(context)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> b h 1 d', h = self.heads, b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# qk rmsnorm
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# similarities
sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
# masking
max_neg_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
attn = sim.softmax(dim = -1, dtype = torch.float32)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class LinearCrossAttention(CrossAttention):
def forward(self, x, context, mask = None):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
context = self.norm_context(context)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = self.heads), (q, k, v))
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> (b h) 1 d', h = self.heads, b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# masking
max_neg_value = -torch.finfo(x.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b n -> b n 1')
k = k.masked_fill(~mask, max_neg_value)
v = v.masked_fill(~mask, 0.)
# linear attention
q = q.softmax(dim = -1)
k = k.softmax(dim = -2)
q = q * self.scale
context = einsum('b n d, b n e -> b d e', k, v)
out = einsum('b n d, b d e -> b n e', q, context)
out = rearrange(out, '(b h) n d -> b n (h d)', h = self.heads)
return self.to_out(out)
class LinearAttention(nn.Module):
def __init__(
self,
dim,
dim_head = 32,
heads = 8,
dropout = 0.05,
context_dim = None,
**kwargs
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm = ChanLayerNorm(dim)
self.nonlin = nn.SiLU()
self.to_q = nn.Sequential(
nn.Dropout(dropout),
Conv2d(dim, inner_dim, 1, bias = False),
Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)
)
self.to_k = nn.Sequential(
nn.Dropout(dropout),
Conv2d(dim, inner_dim, 1, bias = False),
Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)
)
self.to_v = nn.Sequential(
nn.Dropout(dropout),
Conv2d(dim, inner_dim, 1, bias = False),
Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)
)
self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, inner_dim * 2, bias = False)) if exists(context_dim) else None
self.to_out = nn.Sequential(
Conv2d(inner_dim, dim, 1, bias = False),
ChanLayerNorm(dim)
)
def forward(self, fmap, context = None):
h, x, y = self.heads, *fmap.shape[-2:]
fmap = self.norm(fmap)
q, k, v = map(lambda fn: fn(fmap), (self.to_q, self.to_k, self.to_v))
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) (x y) c', h = h), (q, k, v))
if exists(context):
assert exists(self.to_context)
ck, cv = self.to_context(context).chunk(2, dim = -1)
ck, cv = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (ck, cv))
k = torch.cat((k, ck), dim = -2)
v = torch.cat((v, cv), dim = -2)
q = q.softmax(dim = -1)
k = k.softmax(dim = -2)
q = q * self.scale
context = einsum('b n d, b n e -> b d e', k, v)
out = einsum('b n d, b d e -> b n e', q, context)
out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)
out = self.nonlin(out)
return self.to_out(out)
class GlobalContext(nn.Module):
""" basically a superior form of squeeze-excitation that is attention-esque """
def __init__(
self,
*,
dim_in,
dim_out
):
super().__init__()
self.to_k = Conv2d(dim_in, 1, 1)
hidden_dim = max(3, dim_out // 2)
self.net = nn.Sequential(
Conv2d(dim_in, hidden_dim, 1),
nn.SiLU(),
Conv2d(hidden_dim, dim_out, 1),
nn.Sigmoid()
)
def forward(self, x):
context = self.to_k(x)
x, context = map(lambda t: rearrange(t, 'b n ... -> b n (...)'), (x, context))
out = einsum('b i n, b c n -> b c i', context.softmax(dim = -1), x)
out = rearrange(out, '... -> ... 1 1')
return self.net(out)
def FeedForward(dim, mult = 2):
hidden_dim = int(dim * mult)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, hidden_dim, bias = False),
nn.GELU(),
LayerNorm(hidden_dim),
nn.Linear(hidden_dim, dim, bias = False)
)
class TimeTokenShift(nn.Module):
def forward(self, x):
if x.ndim != 5:
return x
x, x_shift = x.chunk(2, dim = 1)
x_shift = F.pad(x_shift, (0, 0, 0, 0, 1, -1), value = 0.)
return torch.cat((x, x_shift), dim = 1)
def ChanFeedForward(dim, mult = 2, time_token_shift = True): # in paper, it seems for self attention layers they did feedforwards with twice channel width
hidden_dim = int(dim * mult)
return Sequential(
ChanLayerNorm(dim),
Conv2d(dim, hidden_dim, 1, bias = False),
nn.GELU(),
TimeTokenShift() if time_token_shift else None,
ChanLayerNorm(hidden_dim),
Conv2d(hidden_dim, dim, 1, bias = False)
)
class TransformerBlock(nn.Module):
def __init__(
self,
dim,
*,
depth = 1,
heads = 8,
dim_head = 32,
ff_mult = 2,
ff_time_token_shift = True,
context_dim = None
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),
ChanFeedForward(dim = dim, mult = ff_mult, time_token_shift = ff_time_token_shift)
]))
def forward(self, x, context = None):
for attn, ff in self.layers:
x = rearrange(x, 'b c ... -> b ... c')
x, ps = pack([x], 'b * c')
x = attn(x, context = context) + x
x, = unpack(x, ps, 'b * c')
x = rearrange(x, 'b ... c -> b c ...')
x = ff(x) + x
return x
class LinearAttentionTransformerBlock(nn.Module):
def __init__(
self,
dim,
*,
depth = 1,
heads = 8,
dim_head = 32,
ff_mult = 2,
ff_time_token_shift = True,
context_dim = None,
**kwargs
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
LinearAttention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),
ChanFeedForward(dim = dim, mult = ff_mult, time_token_shift = ff_time_token_shift)
]))
def forward(self, x, context = None):
for attn, ff in self.layers:
x = attn(x, context = context) + x
x = ff(x) + x
return x
class CrossEmbedLayer(nn.Module):
def __init__(
self,
dim_in,
kernel_sizes,
dim_out = None,
stride = 2
):
super().__init__()
assert all([*map(lambda t: (t % 2) == (stride % 2), kernel_sizes)])
dim_out = default(dim_out, dim_in)
kernel_sizes = sorted(kernel_sizes)
num_scales = len(kernel_sizes)
# calculate the dimension at each scale
dim_scales = [int(dim_out / (2 ** i)) for i in range(1, num_scales)]
dim_scales = [*dim_scales, dim_out - sum(dim_scales)]
self.convs = nn.ModuleList([])
for kernel, dim_scale in zip(kernel_sizes, dim_scales):
self.convs.append(Conv2d(dim_in, dim_scale, kernel, stride = stride, padding = (kernel - stride) // 2))
def forward(self, x):
fmaps = tuple(map(lambda conv: conv(x), self.convs))
return torch.cat(fmaps, dim = 1)
class UpsampleCombiner(nn.Module):
def __init__(
self,
dim,
*,
enabled = False,
dim_ins = tuple(),
dim_outs = tuple()
):
super().__init__()
dim_outs = cast_tuple(dim_outs, len(dim_ins))
assert len(dim_ins) == len(dim_outs)
self.enabled = enabled
if not self.enabled:
self.dim_out = dim
return
self.fmap_convs = nn.ModuleList([Block(dim_in, dim_out) for dim_in, dim_out in zip(dim_ins, dim_outs)])
self.dim_out = dim + (sum(dim_outs) if len(dim_outs) > 0 else 0)
def forward(self, x, fmaps = None):
target_size = x.shape[-1]
fmaps = default(fmaps, tuple())
if not self.enabled or len(fmaps) == 0 or len(self.fmap_convs) == 0:
return x
fmaps = [resize_video_to(fmap, target_size) for fmap in fmaps]
outs = [conv(fmap) for fmap, conv in zip(fmaps, self.fmap_convs)]
return torch.cat((x, *outs), dim = 1)
class DynamicPositionBias(nn.Module):
def __init__(
self,
dim,
*,
heads,
depth
):
super().__init__()
self.mlp = nn.ModuleList([])
self.mlp.append(nn.Sequential(
nn.Linear(1, dim),
LayerNorm(dim),
nn.SiLU()
))
for _ in range(max(depth - 1, 0)):
self.mlp.append(nn.Sequential(
nn.Linear(dim, dim),
LayerNorm(dim),
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
def forward(self, n, device, dtype):
i = torch.arange(n, device = device)
j = torch.arange(n, device = device)
indices = rearrange(i, 'i -> i 1') - rearrange(j, 'j -> 1 j')
indices += (n - 1)
pos = torch.arange(-n + 1, n, device = device, dtype = dtype)
pos = rearrange(pos, '... -> ... 1')
for layer in self.mlp:
pos = layer(pos)
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class Unet3D(nn.Module):
def __init__(
self,
*,
dim,
text_embed_dim = get_encoded_dim(DEFAULT_T5_NAME),
num_resnet_blocks = 1,
cond_dim = None,
num_image_tokens = 4,
num_time_tokens = 2,
learned_sinu_pos_emb_dim = 16,
out_dim = None,
dim_mults = (1, 2, 4, 8),
temporal_strides = 1,
cond_images_channels = 0,
channels = 3,
channels_out = None,
attn_dim_head = 64,
attn_heads = 8,
ff_mult = 2.,
ff_time_token_shift = True, # this would do a token shift along time axis, at the hidden layer within feedforwards - from successful use in RWKV (Peng et al), and other token shift video transformer works
lowres_cond = False, # for cascading diffusion - https://cascaded-diffusion.github.io/
layer_attns = False,
layer_attns_depth = 1,
layer_attns_add_text_cond = True, # whether to condition the self-attention blocks with the text embeddings, as described in Appendix D.3.1
attend_at_middle = True, # whether to have a layer of attention at the bottleneck (can turn off for higher resolution in cascading DDPM, before bringing in efficient attention)
time_rel_pos_bias_depth = 2,
time_causal_attn = True,
layer_cross_attns = True,
use_linear_attn = False,
use_linear_cross_attn = False,
cond_on_text = True,
max_text_len = 256,
init_dim = None,
resnet_groups = 8,
init_conv_kernel_size = 7, # kernel size of initial conv, if not using cross embed
init_cross_embed = True,
init_cross_embed_kernel_sizes = (3, 7, 15),
cross_embed_downsample = False,
cross_embed_downsample_kernel_sizes = (2, 4),
attn_pool_text = True,
attn_pool_num_latents = 32,
dropout = 0.,
memory_efficient = False,
init_conv_to_final_conv_residual = False,
use_global_context_attn = True,
scale_skip_connection = True,
final_resnet_block = True,
final_conv_kernel_size = 3,
self_cond = False,
combine_upsample_fmaps = False, # combine feature maps from all upsample blocks, used in unet squared successfully
pixel_shuffle_upsample = True, # may address checkboard artifacts
resize_mode = 'nearest'
):
super().__init__()
# guide researchers
assert attn_heads > 1, 'you need to have more than 1 attention head, ideally at least 4 or 8'
if dim < 128:
print_once('The base dimension of your u-net should ideally be no smaller than 128, as recommended by a professional DDPM trainer https://nonint.com/2022/05/04/friends-dont-let-friends-train-small-diffusion-models/')
# save locals to take care of some hyperparameters for cascading DDPM
self._locals = locals()
self._locals.pop('self', None)
self._locals.pop('__class__', None)
self.self_cond = self_cond
# determine dimensions
self.channels = channels
self.channels_out = default(channels_out, channels)
# (1) in cascading diffusion, one concats the low resolution image, blurred, for conditioning the higher resolution synthesis
# (2) in self conditioning, one appends the predict x0 (x_start)
init_channels = channels * (1 + int(lowres_cond) + int(self_cond))
init_dim = default(init_dim, dim)
# optional image conditioning
self.has_cond_image = cond_images_channels > 0
self.cond_images_channels = cond_images_channels
init_channels += cond_images_channels
# initial convolution
self.init_conv = CrossEmbedLayer(init_channels, dim_out = init_dim, kernel_sizes = init_cross_embed_kernel_sizes, stride = 1) if init_cross_embed else Conv2d(init_channels, init_dim, init_conv_kernel_size, padding = init_conv_kernel_size // 2)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
# time conditioning
cond_dim = default(cond_dim, dim)
time_cond_dim = dim * 4 * (2 if lowres_cond else 1)
# embedding time for log(snr) noise from continuous version
sinu_pos_emb = LearnedSinusoidalPosEmb(learned_sinu_pos_emb_dim)
sinu_pos_emb_input_dim = learned_sinu_pos_emb_dim + 1
self.to_time_hiddens = nn.Sequential(
sinu_pos_emb,
nn.Linear(sinu_pos_emb_input_dim, time_cond_dim),
nn.SiLU()
)
self.to_time_cond = nn.Sequential(
nn.Linear(time_cond_dim, time_cond_dim)
)
# project to time tokens as well as time hiddens
self.to_time_tokens = nn.Sequential(
nn.Linear(time_cond_dim, cond_dim * num_time_tokens),
Rearrange('b (r d) -> b r d', r = num_time_tokens)
)
# low res aug noise conditioning
self.lowres_cond = lowres_cond
if lowres_cond:
self.to_lowres_time_hiddens = nn.Sequential(
LearnedSinusoidalPosEmb(learned_sinu_pos_emb_dim),
nn.Linear(learned_sinu_pos_emb_dim + 1, time_cond_dim),
nn.SiLU()
)
self.to_lowres_time_cond = nn.Sequential(
nn.Linear(time_cond_dim, time_cond_dim)
)
self.to_lowres_time_tokens = nn.Sequential(
nn.Linear(time_cond_dim, cond_dim * num_time_tokens),
Rearrange('b (r d) -> b r d', r = num_time_tokens)
)
# normalizations
self.norm_cond = nn.LayerNorm(cond_dim)
# text encoding conditioning (optional)
self.text_to_cond = None
if cond_on_text:
assert exists(text_embed_dim), 'text_embed_dim must be given to the unet if cond_on_text is True'
self.text_to_cond = nn.Linear(text_embed_dim, cond_dim)
# finer control over whether to condition on text encodings
self.cond_on_text = cond_on_text
# attention pooling
self.attn_pool = PerceiverResampler(dim = cond_dim, depth = 2, dim_head = attn_dim_head, heads = attn_heads, num_latents = attn_pool_num_latents) if attn_pool_text else None
# for classifier free guidance
self.max_text_len = max_text_len
self.null_text_embed = nn.Parameter(torch.randn(1, max_text_len, cond_dim))
self.null_text_hidden = nn.Parameter(torch.randn(1, time_cond_dim))
# for non-attention based text conditioning at all points in the network where time is also conditioned
self.to_text_non_attn_cond = None
if cond_on_text:
self.to_text_non_attn_cond = nn.Sequential(
nn.LayerNorm(cond_dim),
nn.Linear(cond_dim, time_cond_dim),
nn.SiLU(),
nn.Linear(time_cond_dim, time_cond_dim)
)
# attention related params
attn_kwargs = dict(heads = attn_heads, dim_head = attn_dim_head)
num_layers = len(in_out)
# temporal attention - attention across video frames
temporal_peg_padding = (0, 0, 0, 0, 2, 0) if time_causal_attn else (0, 0, 0, 0, 1, 1)
temporal_peg = lambda dim: Residual(nn.Sequential(Pad(temporal_peg_padding), nn.Conv3d(dim, dim, (3, 1, 1), groups = dim)))
temporal_attn = lambda dim: RearrangeTimeCentric(Residual(Attention(dim, **{**attn_kwargs, 'causal': time_causal_attn, 'init_zero': True, 'rel_pos_bias': True})))
# resnet block klass
num_resnet_blocks = cast_tuple(num_resnet_blocks, num_layers)
resnet_groups = cast_tuple(resnet_groups, num_layers)
resnet_klass = partial(ResnetBlock, **attn_kwargs)
layer_attns = cast_tuple(layer_attns, num_layers)
layer_attns_depth = cast_tuple(layer_attns_depth, num_layers)
layer_cross_attns = cast_tuple(layer_cross_attns, num_layers)
assert all([layers == num_layers for layers in list(map(len, (resnet_groups, layer_attns, layer_cross_attns)))])
# temporal downsample config
temporal_strides = cast_tuple(temporal_strides, num_layers)
self.total_temporal_divisor = functools.reduce(operator.mul, temporal_strides, 1)
# downsample klass
downsample_klass = Downsample
if cross_embed_downsample:
downsample_klass = partial(CrossEmbedLayer, kernel_sizes = cross_embed_downsample_kernel_sizes)
# initial resnet block (for memory efficient unet)
self.init_resnet_block = resnet_klass(init_dim, init_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[0], use_gca = use_global_context_attn) if memory_efficient else None
self.init_temporal_peg = temporal_peg(init_dim)
self.init_temporal_attn = temporal_attn(init_dim)
# scale for resnet skip connections
self.skip_connect_scale = 1. if not scale_skip_connection else (2 ** -0.5)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
layer_params = [num_resnet_blocks, resnet_groups, layer_attns, layer_attns_depth, layer_cross_attns, temporal_strides]
reversed_layer_params = list(map(reversed, layer_params))
# downsampling layers
skip_connect_dims = [] # keep track of skip connection dimensions
for ind, ((dim_in, dim_out), layer_num_resnet_blocks, groups, layer_attn, layer_attn_depth, layer_cross_attn, temporal_stride) in enumerate(zip(in_out, *layer_params)):
is_last = ind >= (num_resolutions - 1)
layer_use_linear_cross_attn = not layer_cross_attn and use_linear_cross_attn
layer_cond_dim = cond_dim if layer_cross_attn or layer_use_linear_cross_attn else None
transformer_block_klass = TransformerBlock if layer_attn else (LinearAttentionTransformerBlock if use_linear_attn else Identity)
current_dim = dim_in
# whether to pre-downsample, from memory efficient unet
pre_downsample = None
if memory_efficient:
pre_downsample = downsample_klass(dim_in, dim_out)
current_dim = dim_out
skip_connect_dims.append(current_dim)
# whether to do post-downsample, for non-memory efficient unet
post_downsample = None
if not memory_efficient:
post_downsample = downsample_klass(current_dim, dim_out) if not is_last else Parallel(Conv2d(dim_in, dim_out, 3, padding = 1), Conv2d(dim_in, dim_out, 1))
self.downs.append(nn.ModuleList([
pre_downsample,
resnet_klass(current_dim, current_dim, cond_dim = layer_cond_dim, linear_attn = layer_use_linear_cross_attn, time_cond_dim = time_cond_dim, groups = groups),
nn.ModuleList([ResnetBlock(current_dim, current_dim, time_cond_dim = time_cond_dim, groups = groups, use_gca = use_global_context_attn) for _ in range(layer_num_resnet_blocks)]),
transformer_block_klass(dim = current_dim, depth = layer_attn_depth, ff_mult = ff_mult, ff_time_token_shift = ff_time_token_shift, context_dim = cond_dim, **attn_kwargs),
temporal_peg(current_dim),
temporal_attn(current_dim),
TemporalDownsample(current_dim, stride = temporal_stride) if temporal_stride > 1 else None,
post_downsample
]))
# middle layers
mid_dim = dims[-1]
self.mid_block1 = ResnetBlock(mid_dim, mid_dim, cond_dim = cond_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[-1])
self.mid_attn = EinopsToAndFrom('b c f h w', 'b (f h w) c', Residual(Attention(mid_dim, **attn_kwargs))) if attend_at_middle else None
self.mid_temporal_peg = temporal_peg(mid_dim)
self.mid_temporal_attn = temporal_attn(mid_dim)
self.mid_block2 = ResnetBlock(mid_dim, mid_dim, cond_dim = cond_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[-1])
# upsample klass
upsample_klass = Upsample if not pixel_shuffle_upsample else PixelShuffleUpsample
# upsampling layers
upsample_fmap_dims = []
for ind, ((dim_in, dim_out), layer_num_resnet_blocks, groups, layer_attn, layer_attn_depth, layer_cross_attn, temporal_stride) in enumerate(zip(reversed(in_out), *reversed_layer_params)):
is_last = ind == (len(in_out) - 1)
layer_use_linear_cross_attn = not layer_cross_attn and use_linear_cross_attn
layer_cond_dim = cond_dim if layer_cross_attn or layer_use_linear_cross_attn else None
transformer_block_klass = TransformerBlock if layer_attn else (LinearAttentionTransformerBlock if use_linear_attn else Identity)
skip_connect_dim = skip_connect_dims.pop()
upsample_fmap_dims.append(dim_out)
self.ups.append(nn.ModuleList([
resnet_klass(dim_out + skip_connect_dim, dim_out, cond_dim = layer_cond_dim, linear_attn = layer_use_linear_cross_attn, time_cond_dim = time_cond_dim, groups = groups),
nn.ModuleList([ResnetBlock(dim_out + skip_connect_dim, dim_out, time_cond_dim = time_cond_dim, groups = groups, use_gca = use_global_context_attn) for _ in range(layer_num_resnet_blocks)]),
transformer_block_klass(dim = dim_out, depth = layer_attn_depth, ff_mult = ff_mult, ff_time_token_shift = ff_time_token_shift, context_dim = cond_dim, **attn_kwargs),
temporal_peg(dim_out),
temporal_attn(dim_out),
TemporalPixelShuffleUpsample(dim_out, stride = temporal_stride) if temporal_stride > 1 else None,
upsample_klass(dim_out, dim_in) if not is_last or memory_efficient else Identity()
]))
# whether to combine feature maps from all upsample blocks before final resnet block out
self.upsample_combiner = UpsampleCombiner(
dim = dim,
enabled = combine_upsample_fmaps,
dim_ins = upsample_fmap_dims,
dim_outs = dim
)
# whether to do a final residual from initial conv to the final resnet block out
self.init_conv_to_final_conv_residual = init_conv_to_final_conv_residual
final_conv_dim = self.upsample_combiner.dim_out + (dim if init_conv_to_final_conv_residual else 0)
# final optional resnet block and convolution out
self.final_res_block = ResnetBlock(final_conv_dim, dim, time_cond_dim = time_cond_dim, groups = resnet_groups[0], use_gca = True) if final_resnet_block else None
final_conv_dim_in = dim if final_resnet_block else final_conv_dim
final_conv_dim_in += (channels if lowres_cond else 0)
self.final_conv = Conv2d(final_conv_dim_in, self.channels_out, final_conv_kernel_size, padding = final_conv_kernel_size // 2)
zero_init_(self.final_conv)
# resize mode
self.resize_mode = resize_mode
# if the current settings for the unet are not correct
# for cascading DDPM, then reinit the unet with the right settings
def cast_model_parameters(
self,
*,
lowres_cond,
text_embed_dim,
channels,
channels_out,
cond_on_text
):
if lowres_cond == self.lowres_cond and \
channels == self.channels and \
cond_on_text == self.cond_on_text and \
text_embed_dim == self._locals['text_embed_dim'] and \
channels_out == self.channels_out:
return self
updated_kwargs = dict(
lowres_cond = lowres_cond,
text_embed_dim = text_embed_dim,
channels = channels,
channels_out = channels_out,
cond_on_text = cond_on_text
)
return self.__class__(**{**self._locals, **updated_kwargs})
# methods for returning the full unet config as well as its parameter state
def to_config_and_state_dict(self):
return self._locals, self.state_dict()
# class method for rehydrating the unet from its config and state dict
@classmethod
def from_config_and_state_dict(klass, config, state_dict):
unet = klass(**config)
unet.load_state_dict(state_dict)
return unet
# methods for persisting unet to disk
def persist_to_file(self, path):
path = Path(path)
path.parents[0].mkdir(exist_ok = True, parents = True)
config, state_dict = self.to_config_and_state_dict()
pkg = dict(config = config, state_dict = state_dict)
torch.save(pkg, str(path))
# class method for rehydrating the unet from file saved with `persist_to_file`
@classmethod
def hydrate_from_file(klass, path):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path))
assert 'config' in pkg and 'state_dict' in pkg
config, state_dict = pkg['config'], pkg['state_dict']
return Unet.from_config_and_state_dict(config, state_dict)
# forward with classifier free guidance
def forward_with_cond_scale(
self,
*args,
cond_scale = 1.,
**kwargs
):
logits = self.forward(*args, **kwargs)
if cond_scale == 1:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
x,
time,
*,
lowres_cond_img = None,
lowres_noise_times = None,
text_embeds = None,
text_mask = None,
cond_images = None,
cond_video_frames = None,
post_cond_video_frames = None,
self_cond = None,
cond_drop_prob = 0.,
ignore_time = False
):
assert x.ndim == 5, 'input to 3d unet must have 5 dimensions (batch, channels, time, height, width)'
batch_size, frames, device, dtype = x.shape[0], x.shape[2], x.device, x.dtype
assert ignore_time or divisible_by(frames, self.total_temporal_divisor), f'number of input frames {frames} must be divisible by {self.total_temporal_divisor}'
# add self conditioning if needed
if self.self_cond:
self_cond = default(self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x, self_cond), dim = 1)
# add low resolution conditioning, if present
assert not (self.lowres_cond and not exists(lowres_cond_img)), 'low resolution conditioning image must be present'
assert not (self.lowres_cond and not exists(lowres_noise_times)), 'low resolution conditioning noise time must be present'
if exists(lowres_cond_img):
x = torch.cat((x, lowres_cond_img), dim = 1)
if exists(cond_video_frames):
lowres_cond_img = torch.cat((cond_video_frames, lowres_cond_img), dim = 2)
cond_video_frames = torch.cat((cond_video_frames, cond_video_frames), dim = 1)
if exists(post_cond_video_frames):
lowres_cond_img = torch.cat((lowres_cond_img, post_cond_video_frames), dim = 2)
post_cond_video_frames = torch.cat((post_cond_video_frames, post_cond_video_frames), dim = 1)
# conditioning on video frames as a prompt
num_preceding_frames = 0
if exists(cond_video_frames):
cond_video_frames_len = cond_video_frames.shape[2]
assert divisible_by(cond_video_frames_len, self.total_temporal_divisor)
cond_video_frames = resize_video_to(cond_video_frames, x.shape[-1])
x = torch.cat((cond_video_frames, x), dim = 2)
num_preceding_frames = cond_video_frames_len
# conditioning on video frames as a prompt
num_succeeding_frames = 0
if exists(post_cond_video_frames):
cond_video_frames_len = post_cond_video_frames.shape[2]
assert divisible_by(cond_video_frames_len, self.total_temporal_divisor)
post_cond_video_frames = resize_video_to(post_cond_video_frames, x.shape[-1])
x = torch.cat((post_cond_video_frames, x), dim = 2)
num_succeeding_frames = cond_video_frames_len
# condition on input image
assert not (self.has_cond_image ^ exists(cond_images)), 'you either requested to condition on an image on the unet, but the conditioning image is not supplied, or vice versa'
if exists(cond_images):
assert cond_images.ndim == 4, 'conditioning images must have 4 dimensions only, if you want to condition on frames of video, use `cond_video_frames` instead'
assert cond_images.shape[1] == self.cond_images_channels, 'the number of channels on the conditioning image you are passing in does not match what you specified on initialiation of the unet'
cond_images = repeat(cond_images, 'b c h w -> b c f h w', f = x.shape[2])
cond_images = resize_video_to(cond_images, x.shape[-1], mode = self.resize_mode)
x = torch.cat((cond_images, x), dim = 1)
# ignoring time in pseudo 3d resnet blocks
conv_kwargs = dict(
ignore_time = ignore_time
)
# initial convolution
x = self.init_conv(x)
if not ignore_time:
x = self.init_temporal_peg(x)
x = self.init_temporal_attn(x)
# init conv residual
if self.init_conv_to_final_conv_residual:
init_conv_residual = x.clone()
# time conditioning
time_hiddens = self.to_time_hiddens(time)
# derive time tokens
time_tokens = self.to_time_tokens(time_hiddens)
t = self.to_time_cond(time_hiddens)
# add lowres time conditioning to time hiddens
# and add lowres time tokens along sequence dimension for attention
if self.lowres_cond:
lowres_time_hiddens = self.to_lowres_time_hiddens(lowres_noise_times)
lowres_time_tokens = self.to_lowres_time_tokens(lowres_time_hiddens)
lowres_t = self.to_lowres_time_cond(lowres_time_hiddens)
t = t + lowres_t
time_tokens = torch.cat((time_tokens, lowres_time_tokens), dim = -2)
# text conditioning
text_tokens = None
if exists(text_embeds) and self.cond_on_text:
# conditional dropout
text_keep_mask = prob_mask_like((batch_size,), 1 - cond_drop_prob, device = device)
text_keep_mask_embed = rearrange(text_keep_mask, 'b -> b 1 1')
text_keep_mask_hidden = rearrange(text_keep_mask, 'b -> b 1')
# calculate text embeds
text_tokens = self.text_to_cond(text_embeds)
text_tokens = text_tokens[:, :self.max_text_len]
if exists(text_mask):
text_mask = text_mask[:, :self.max_text_len]
text_tokens_len = text_tokens.shape[1]
remainder = self.max_text_len - text_tokens_len
if remainder > 0:
text_tokens = F.pad(text_tokens, (0, 0, 0, remainder))
if exists(text_mask):
if remainder > 0:
text_mask = F.pad(text_mask, (0, remainder), value = False)
text_mask = rearrange(text_mask, 'b n -> b n 1')
text_keep_mask_embed = text_mask & text_keep_mask_embed
null_text_embed = self.null_text_embed.to(text_tokens.dtype) # for some reason pytorch AMP not working
text_tokens = torch.where(
text_keep_mask_embed,
text_tokens,
null_text_embed
)
if exists(self.attn_pool):
text_tokens = self.attn_pool(text_tokens)
# extra non-attention conditioning by projecting and then summing text embeddings to time
# termed as text hiddens
mean_pooled_text_tokens = text_tokens.mean(dim = -2)
text_hiddens = self.to_text_non_attn_cond(mean_pooled_text_tokens)
null_text_hidden = self.null_text_hidden.to(t.dtype)
text_hiddens = torch.where(
text_keep_mask_hidden,
text_hiddens,
null_text_hidden
)
t = t + text_hiddens
# main conditioning tokens (c)
c = time_tokens if not exists(text_tokens) else torch.cat((time_tokens, text_tokens), dim = -2)
# normalize conditioning tokens
c = self.norm_cond(c)
# initial resnet block (for memory efficient unet)
if exists(self.init_resnet_block):
x = self.init_resnet_block(x, t, **conv_kwargs)
# go through the layers of the unet, down and up
hiddens = []
for pre_downsample, init_block, resnet_blocks, attn_block, temporal_peg, temporal_attn, temporal_downsample, post_downsample in self.downs:
if exists(pre_downsample):
x = pre_downsample(x)
x = init_block(x, t, c, **conv_kwargs)
for resnet_block in resnet_blocks:
x = resnet_block(x, t, **conv_kwargs)
hiddens.append(x)
x = attn_block(x, c)
if not ignore_time:
x = temporal_peg(x)
x = temporal_attn(x)
hiddens.append(x)
if exists(temporal_downsample) and not ignore_time:
x = temporal_downsample(x)
if exists(post_downsample):
x = post_downsample(x)
x = self.mid_block1(x, t, c, **conv_kwargs)
if exists(self.mid_attn):
x = self.mid_attn(x)
if not ignore_time:
x = self.mid_temporal_peg(x)
x = self.mid_temporal_attn(x)
x = self.mid_block2(x, t, c, **conv_kwargs)
add_skip_connection = lambda x: torch.cat((x, hiddens.pop() * self.skip_connect_scale), dim = 1)
up_hiddens = []
for init_block, resnet_blocks, attn_block, temporal_peg, temporal_attn, temporal_upsample, upsample in self.ups:
if exists(temporal_upsample) and not ignore_time:
x = temporal_upsample(x)
x = add_skip_connection(x)
x = init_block(x, t, c, **conv_kwargs)
for resnet_block in resnet_blocks:
x = add_skip_connection(x)
x = resnet_block(x, t, **conv_kwargs)
x = attn_block(x, c)
if not ignore_time:
x = temporal_peg(x)
x = temporal_attn(x)
up_hiddens.append(x.contiguous())
x = upsample(x)
# whether to combine all feature maps from upsample blocks
x = self.upsample_combiner(x, up_hiddens)
# final top-most residual if needed
if self.init_conv_to_final_conv_residual:
x = torch.cat((x, init_conv_residual), dim = 1)
if exists(self.final_res_block):
x = self.final_res_block(x, t, **conv_kwargs)
if exists(lowres_cond_img):
x = torch.cat((x, lowres_cond_img), dim = 1)
out = self.final_conv(x)
if num_preceding_frames > 0:
out = out[:, :, num_preceding_frames:]
if num_succeeding_frames > 0:
out = out[:, :, :-num_succeeding_frames]
return out
|
imagen-pytorch-main
|
imagen_pytorch/imagen_video.py
|
from math import sqrt
from random import random
from functools import partial
from contextlib import contextmanager, nullcontext
from typing import List, Union
from collections import namedtuple
from tqdm.auto import tqdm
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.cuda.amp import autocast
from torch.nn.parallel import DistributedDataParallel
import torchvision.transforms as T
import kornia.augmentation as K
from einops import rearrange, repeat, reduce
from imagen_pytorch.imagen_pytorch import (
GaussianDiffusionContinuousTimes,
Unet,
NullUnet,
first,
exists,
identity,
maybe,
default,
cast_tuple,
cast_uint8_images_to_float,
eval_decorator,
pad_tuple_to_length,
resize_image_to,
calc_all_frame_dims,
safe_get_tuple_index,
right_pad_dims_to,
module_device,
normalize_neg_one_to_one,
unnormalize_zero_to_one,
compact,
maybe_transform_dict_key
)
from imagen_pytorch.imagen_video import (
Unet3D,
resize_video_to,
scale_video_time
)
from imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME
# constants
Hparams_fields = [
'num_sample_steps',
'sigma_min',
'sigma_max',
'sigma_data',
'rho',
'P_mean',
'P_std',
'S_churn',
'S_tmin',
'S_tmax',
'S_noise'
]
Hparams = namedtuple('Hparams', Hparams_fields)
# helper functions
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
# main class
class ElucidatedImagen(nn.Module):
def __init__(
self,
unets,
*,
image_sizes, # for cascading ddpm, image size at each stage
text_encoder_name = DEFAULT_T5_NAME,
text_embed_dim = None,
channels = 3,
cond_drop_prob = 0.1,
random_crop_sizes = None,
resize_mode = 'nearest',
temporal_downsample_factor = 1,
resize_cond_video_frames = True,
lowres_sample_noise_level = 0.2, # in the paper, they present a new trick where they noise the lowres conditioning image, and at sample time, fix it to a certain level (0.1 or 0.3) - the unets are also made to be conditioned on this noise level
per_sample_random_aug_noise_level = False, # unclear when conditioning on augmentation noise level, whether each batch element receives a random aug noise value - turning off due to @marunine's find
condition_on_text = True,
auto_normalize_img = True, # whether to take care of normalizing the image from [0, 1] to [-1, 1] and back automatically - you can turn this off if you want to pass in the [-1, 1] ranged image yourself from the dataloader
dynamic_thresholding = True,
dynamic_thresholding_percentile = 0.95, # unsure what this was based on perusal of paper
only_train_unet_number = None,
lowres_noise_schedule = 'linear',
num_sample_steps = 32, # number of sampling steps
sigma_min = 0.002, # min noise level
sigma_max = 80, # max noise level
sigma_data = 0.5, # standard deviation of data distribution
rho = 7, # controls the sampling schedule
P_mean = -1.2, # mean of log-normal distribution from which noise is drawn for training
P_std = 1.2, # standard deviation of log-normal distribution from which noise is drawn for training
S_churn = 80, # parameters for stochastic sampling - depends on dataset, Table 5 in apper
S_tmin = 0.05,
S_tmax = 50,
S_noise = 1.003,
):
super().__init__()
self.only_train_unet_number = only_train_unet_number
# conditioning hparams
self.condition_on_text = condition_on_text
self.unconditional = not condition_on_text
# channels
self.channels = channels
# automatically take care of ensuring that first unet is unconditional
# while the rest of the unets are conditioned on the low resolution image produced by previous unet
unets = cast_tuple(unets)
num_unets = len(unets)
# randomly cropping for upsampler training
self.random_crop_sizes = cast_tuple(random_crop_sizes, num_unets)
assert not exists(first(self.random_crop_sizes)), 'you should not need to randomly crop image during training for base unet, only for upsamplers - so pass in `random_crop_sizes = (None, 128, 256)` as example'
# lowres augmentation noise schedule
self.lowres_noise_schedule = GaussianDiffusionContinuousTimes(noise_schedule = lowres_noise_schedule)
# get text encoder
self.text_encoder_name = text_encoder_name
self.text_embed_dim = default(text_embed_dim, lambda: get_encoded_dim(text_encoder_name))
self.encode_text = partial(t5_encode_text, name = text_encoder_name)
# construct unets
self.unets = nn.ModuleList([])
self.unet_being_trained_index = -1 # keeps track of which unet is being trained at the moment
for ind, one_unet in enumerate(unets):
assert isinstance(one_unet, (Unet, Unet3D, NullUnet))
is_first = ind == 0
one_unet = one_unet.cast_model_parameters(
lowres_cond = not is_first,
cond_on_text = self.condition_on_text,
text_embed_dim = self.text_embed_dim if self.condition_on_text else None,
channels = self.channels,
channels_out = self.channels
)
self.unets.append(one_unet)
# determine whether we are training on images or video
is_video = any([isinstance(unet, Unet3D) for unet in self.unets])
self.is_video = is_video
self.right_pad_dims_to_datatype = partial(rearrange, pattern = ('b -> b 1 1 1' if not is_video else 'b -> b 1 1 1 1'))
self.resize_to = resize_video_to if is_video else resize_image_to
self.resize_to = partial(self.resize_to, mode = resize_mode)
# unet image sizes
self.image_sizes = cast_tuple(image_sizes)
assert num_unets == len(self.image_sizes), f'you did not supply the correct number of u-nets ({len(self.unets)}) for resolutions {self.image_sizes}'
self.sample_channels = cast_tuple(self.channels, num_unets)
# cascading ddpm related stuff
lowres_conditions = tuple(map(lambda t: t.lowres_cond, self.unets))
assert lowres_conditions == (False, *((True,) * (num_unets - 1))), 'the first unet must be unconditioned (by low resolution image), and the rest of the unets must have `lowres_cond` set to True'
self.lowres_sample_noise_level = lowres_sample_noise_level
self.per_sample_random_aug_noise_level = per_sample_random_aug_noise_level
# classifier free guidance
self.cond_drop_prob = cond_drop_prob
self.can_classifier_guidance = cond_drop_prob > 0.
# normalize and unnormalize image functions
self.normalize_img = normalize_neg_one_to_one if auto_normalize_img else identity
self.unnormalize_img = unnormalize_zero_to_one if auto_normalize_img else identity
self.input_image_range = (0. if auto_normalize_img else -1., 1.)
# dynamic thresholding
self.dynamic_thresholding = cast_tuple(dynamic_thresholding, num_unets)
self.dynamic_thresholding_percentile = dynamic_thresholding_percentile
# temporal interpolations
temporal_downsample_factor = cast_tuple(temporal_downsample_factor, num_unets)
self.temporal_downsample_factor = temporal_downsample_factor
self.resize_cond_video_frames = resize_cond_video_frames
self.temporal_downsample_divisor = temporal_downsample_factor[0]
assert temporal_downsample_factor[-1] == 1, 'downsample factor of last stage must be 1'
assert tuple(sorted(temporal_downsample_factor, reverse = True)) == temporal_downsample_factor, 'temporal downsample factor must be in order of descending'
# elucidating parameters
hparams = [
num_sample_steps,
sigma_min,
sigma_max,
sigma_data,
rho,
P_mean,
P_std,
S_churn,
S_tmin,
S_tmax,
S_noise,
]
hparams = [cast_tuple(hp, num_unets) for hp in hparams]
self.hparams = [Hparams(*unet_hp) for unet_hp in zip(*hparams)]
# one temp parameter for keeping track of device
self.register_buffer('_temp', torch.tensor([0.]), persistent = False)
# default to device of unets passed in
self.to(next(self.unets.parameters()).device)
def force_unconditional_(self):
self.condition_on_text = False
self.unconditional = True
for unet in self.unets:
unet.cond_on_text = False
@property
def device(self):
return self._temp.device
def get_unet(self, unet_number):
assert 0 < unet_number <= len(self.unets)
index = unet_number - 1
if isinstance(self.unets, nn.ModuleList):
unets_list = [unet for unet in self.unets]
delattr(self, 'unets')
self.unets = unets_list
if index != self.unet_being_trained_index:
for unet_index, unet in enumerate(self.unets):
unet.to(self.device if unet_index == index else 'cpu')
self.unet_being_trained_index = index
return self.unets[index]
def reset_unets_all_one_device(self, device = None):
device = default(device, self.device)
self.unets = nn.ModuleList([*self.unets])
self.unets.to(device)
self.unet_being_trained_index = -1
@contextmanager
def one_unet_in_gpu(self, unet_number = None, unet = None):
assert exists(unet_number) ^ exists(unet)
if exists(unet_number):
unet = self.unets[unet_number - 1]
cpu = torch.device('cpu')
devices = [module_device(unet) for unet in self.unets]
self.unets.to(cpu)
unet.to(self.device)
yield
for unet, device in zip(self.unets, devices):
unet.to(device)
# overriding state dict functions
def state_dict(self, *args, **kwargs):
self.reset_unets_all_one_device()
return super().state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
self.reset_unets_all_one_device()
return super().load_state_dict(*args, **kwargs)
# dynamic thresholding
def threshold_x_start(self, x_start, dynamic_threshold = True):
if not dynamic_threshold:
return x_start.clamp(-1., 1.)
s = torch.quantile(
rearrange(x_start, 'b ... -> b (...)').abs(),
self.dynamic_thresholding_percentile,
dim = -1
)
s.clamp_(min = 1.)
s = right_pad_dims_to(x_start, s)
return x_start.clamp(-s, s) / s
# derived preconditioning params - Table 1
def c_skip(self, sigma_data, sigma):
return (sigma_data ** 2) / (sigma ** 2 + sigma_data ** 2)
def c_out(self, sigma_data, sigma):
return sigma * sigma_data * (sigma_data ** 2 + sigma ** 2) ** -0.5
def c_in(self, sigma_data, sigma):
return 1 * (sigma ** 2 + sigma_data ** 2) ** -0.5
def c_noise(self, sigma):
return log(sigma) * 0.25
# preconditioned network output
# equation (7) in the paper
def preconditioned_network_forward(
self,
unet_forward,
noised_images,
sigma,
*,
sigma_data,
clamp = False,
dynamic_threshold = True,
**kwargs
):
batch, device = noised_images.shape[0], noised_images.device
if isinstance(sigma, float):
sigma = torch.full((batch,), sigma, device = device)
padded_sigma = self.right_pad_dims_to_datatype(sigma)
net_out = unet_forward(
self.c_in(sigma_data, padded_sigma) * noised_images,
self.c_noise(sigma),
**kwargs
)
out = self.c_skip(sigma_data, padded_sigma) * noised_images + self.c_out(sigma_data, padded_sigma) * net_out
if not clamp:
return out
return self.threshold_x_start(out, dynamic_threshold)
# sampling
# sample schedule
# equation (5) in the paper
def sample_schedule(
self,
num_sample_steps,
rho,
sigma_min,
sigma_max
):
N = num_sample_steps
inv_rho = 1 / rho
steps = torch.arange(num_sample_steps, device = self.device, dtype = torch.float32)
sigmas = (sigma_max ** inv_rho + steps / (N - 1) * (sigma_min ** inv_rho - sigma_max ** inv_rho)) ** rho
sigmas = F.pad(sigmas, (0, 1), value = 0.) # last step is sigma value of 0.
return sigmas
@torch.no_grad()
def one_unet_sample(
self,
unet,
shape,
*,
unet_number,
clamp = True,
dynamic_threshold = True,
cond_scale = 1.,
use_tqdm = True,
inpaint_videos = None,
inpaint_images = None,
inpaint_masks = None,
inpaint_resample_times = 5,
init_images = None,
skip_steps = None,
sigma_min = None,
sigma_max = None,
**kwargs
):
# video
is_video = len(shape) == 5
frames = shape[-3] if is_video else None
resize_kwargs = dict(target_frames = frames) if exists(frames) else dict()
# get specific sampling hyperparameters for unet
hp = self.hparams[unet_number - 1]
sigma_min = default(sigma_min, hp.sigma_min)
sigma_max = default(sigma_max, hp.sigma_max)
# get the schedule, which is returned as (sigma, gamma) tuple, and pair up with the next sigma and gamma
sigmas = self.sample_schedule(hp.num_sample_steps, hp.rho, sigma_min, sigma_max)
gammas = torch.where(
(sigmas >= hp.S_tmin) & (sigmas <= hp.S_tmax),
min(hp.S_churn / hp.num_sample_steps, sqrt(2) - 1),
0.
)
sigmas_and_gammas = list(zip(sigmas[:-1], sigmas[1:], gammas[:-1]))
# images is noise at the beginning
init_sigma = sigmas[0]
images = init_sigma * torch.randn(shape, device = self.device)
# initializing with an image
if exists(init_images):
images += init_images
# keeping track of x0, for self conditioning if needed
x_start = None
# prepare inpainting images and mask
inpaint_images = default(inpaint_videos, inpaint_images)
has_inpainting = exists(inpaint_images) and exists(inpaint_masks)
resample_times = inpaint_resample_times if has_inpainting else 1
if has_inpainting:
inpaint_images = self.normalize_img(inpaint_images)
inpaint_images = self.resize_to(inpaint_images, shape[-1], **resize_kwargs)
inpaint_masks = self.resize_to(rearrange(inpaint_masks, 'b ... -> b 1 ...').float(), shape[-1], **resize_kwargs).bool()
# unet kwargs
unet_kwargs = dict(
sigma_data = hp.sigma_data,
clamp = clamp,
dynamic_threshold = dynamic_threshold,
cond_scale = cond_scale,
**kwargs
)
# gradually denoise
initial_step = default(skip_steps, 0)
sigmas_and_gammas = sigmas_and_gammas[initial_step:]
total_steps = len(sigmas_and_gammas)
for ind, (sigma, sigma_next, gamma) in tqdm(enumerate(sigmas_and_gammas), total = total_steps, desc = 'sampling time step', disable = not use_tqdm):
is_last_timestep = ind == (total_steps - 1)
sigma, sigma_next, gamma = map(lambda t: t.item(), (sigma, sigma_next, gamma))
for r in reversed(range(resample_times)):
is_last_resample_step = r == 0
eps = hp.S_noise * torch.randn(shape, device = self.device) # stochastic sampling
sigma_hat = sigma + gamma * sigma
added_noise = sqrt(sigma_hat ** 2 - sigma ** 2) * eps
images_hat = images + added_noise
self_cond = x_start if unet.self_cond else None
if has_inpainting:
images_hat = images_hat * ~inpaint_masks + (inpaint_images + added_noise) * inpaint_masks
model_output = self.preconditioned_network_forward(
unet.forward_with_cond_scale,
images_hat,
sigma_hat,
self_cond = self_cond,
**unet_kwargs
)
denoised_over_sigma = (images_hat - model_output) / sigma_hat
images_next = images_hat + (sigma_next - sigma_hat) * denoised_over_sigma
# second order correction, if not the last timestep
has_second_order_correction = sigma_next != 0
if has_second_order_correction:
self_cond = model_output if unet.self_cond else None
model_output_next = self.preconditioned_network_forward(
unet.forward_with_cond_scale,
images_next,
sigma_next,
self_cond = self_cond,
**unet_kwargs
)
denoised_prime_over_sigma = (images_next - model_output_next) / sigma_next
images_next = images_hat + 0.5 * (sigma_next - sigma_hat) * (denoised_over_sigma + denoised_prime_over_sigma)
images = images_next
if has_inpainting and not (is_last_resample_step or is_last_timestep):
# renoise in repaint and then resample
repaint_noise = torch.randn(shape, device = self.device)
images = images + (sigma - sigma_next) * repaint_noise
x_start = model_output if not has_second_order_correction else model_output_next # save model output for self conditioning
images = images.clamp(-1., 1.)
if has_inpainting:
images = images * ~inpaint_masks + inpaint_images * inpaint_masks
return self.unnormalize_img(images)
@torch.no_grad()
@eval_decorator
def sample(
self,
texts: List[str] = None,
text_masks = None,
text_embeds = None,
cond_images = None,
cond_video_frames = None,
post_cond_video_frames = None,
inpaint_videos = None,
inpaint_images = None,
inpaint_masks = None,
inpaint_resample_times = 5,
init_images = None,
skip_steps = None,
sigma_min = None,
sigma_max = None,
video_frames = None,
batch_size = 1,
cond_scale = 1.,
lowres_sample_noise_level = None,
start_at_unet_number = 1,
start_image_or_video = None,
stop_at_unet_number = None,
return_all_unet_outputs = False,
return_pil_images = False,
use_tqdm = True,
use_one_unet_in_gpu = True,
device = None,
):
device = default(device, self.device)
self.reset_unets_all_one_device(device = device)
cond_images = maybe(cast_uint8_images_to_float)(cond_images)
if exists(texts) and not exists(text_embeds) and not self.unconditional:
assert all([*map(len, texts)]), 'text cannot be empty'
with autocast(enabled = False):
text_embeds, text_masks = self.encode_text(texts, return_attn_mask = True)
text_embeds, text_masks = map(lambda t: t.to(device), (text_embeds, text_masks))
if not self.unconditional:
assert exists(text_embeds), 'text must be passed in if the network was not trained without text `condition_on_text` must be set to `False` when training'
text_masks = default(text_masks, lambda: torch.any(text_embeds != 0., dim = -1))
batch_size = text_embeds.shape[0]
# inpainting
inpaint_images = default(inpaint_videos, inpaint_images)
if exists(inpaint_images):
if self.unconditional:
if batch_size == 1: # assume researcher wants to broadcast along inpainted images
batch_size = inpaint_images.shape[0]
assert inpaint_images.shape[0] == batch_size, 'number of inpainting images must be equal to the specified batch size on sample `sample(batch_size=<int>)``'
assert not (self.condition_on_text and inpaint_images.shape[0] != text_embeds.shape[0]), 'number of inpainting images must be equal to the number of text to be conditioned on'
assert not (self.condition_on_text and not exists(text_embeds)), 'text or text encodings must be passed into imagen if specified'
assert not (not self.condition_on_text and exists(text_embeds)), 'imagen specified not to be conditioned on text, yet it is presented'
assert not (exists(text_embeds) and text_embeds.shape[-1] != self.text_embed_dim), f'invalid text embedding dimension being passed in (should be {self.text_embed_dim})'
assert not (exists(inpaint_images) ^ exists(inpaint_masks)), 'inpaint images and masks must be both passed in to do inpainting'
outputs = []
is_cuda = next(self.parameters()).is_cuda
device = next(self.parameters()).device
lowres_sample_noise_level = default(lowres_sample_noise_level, self.lowres_sample_noise_level)
num_unets = len(self.unets)
cond_scale = cast_tuple(cond_scale, num_unets)
# handle video and frame dimension
if self.is_video and exists(inpaint_images):
video_frames = inpaint_images.shape[2]
if inpaint_masks.ndim == 3:
inpaint_masks = repeat(inpaint_masks, 'b h w -> b f h w', f = video_frames)
assert inpaint_masks.shape[1] == video_frames
assert not (self.is_video and not exists(video_frames)), 'video_frames must be passed in on sample time if training on video'
# determine the frame dimensions, if needed
all_frame_dims = calc_all_frame_dims(self.temporal_downsample_factor, video_frames)
# initializing with an image or video
init_images = cast_tuple(init_images, num_unets)
init_images = [maybe(self.normalize_img)(init_image) for init_image in init_images]
skip_steps = cast_tuple(skip_steps, num_unets)
sigma_min = cast_tuple(sigma_min, num_unets)
sigma_max = cast_tuple(sigma_max, num_unets)
# handle starting at a unet greater than 1, for training only-upscaler training
if start_at_unet_number > 1:
assert start_at_unet_number <= num_unets, 'must start a unet that is less than the total number of unets'
assert not exists(stop_at_unet_number) or start_at_unet_number <= stop_at_unet_number
assert exists(start_image_or_video), 'starting image or video must be supplied if only doing upscaling'
prev_image_size = self.image_sizes[start_at_unet_number - 2]
img = self.resize_to(start_image_or_video, prev_image_size)
# go through each unet in cascade
for unet_number, unet, channel, image_size, frame_dims, unet_hparam, dynamic_threshold, unet_cond_scale, unet_init_images, unet_skip_steps, unet_sigma_min, unet_sigma_max in tqdm(zip(range(1, num_unets + 1), self.unets, self.sample_channels, self.image_sizes, all_frame_dims, self.hparams, self.dynamic_thresholding, cond_scale, init_images, skip_steps, sigma_min, sigma_max), disable = not use_tqdm):
if unet_number < start_at_unet_number:
continue
assert not isinstance(unet, NullUnet), 'cannot sample from null unet'
context = self.one_unet_in_gpu(unet = unet) if is_cuda and use_one_unet_in_gpu else nullcontext()
with context:
lowres_cond_img = lowres_noise_times = None
shape = (batch_size, channel, *frame_dims, image_size, image_size)
resize_kwargs = dict()
video_kwargs = dict()
if self.is_video:
resize_kwargs = dict(target_frames = frame_dims[0])
video_kwargs = dict(
cond_video_frames = cond_video_frames,
post_cond_video_frames = post_cond_video_frames
)
video_kwargs = compact(video_kwargs)
# handle video conditioning frames
if self.is_video and self.resize_cond_video_frames:
downsample_scale = self.temporal_downsample_factor[unet_number - 1]
temporal_downsample_fn = partial(scale_video_time, downsample_scale = downsample_scale)
video_kwargs = maybe_transform_dict_key(video_kwargs, 'cond_video_frames', temporal_downsample_fn)
video_kwargs = maybe_transform_dict_key(video_kwargs, 'post_cond_video_frames', temporal_downsample_fn)
# low resolution conditioning
if unet.lowres_cond:
lowres_noise_times = self.lowres_noise_schedule.get_times(batch_size, lowres_sample_noise_level, device = device)
lowres_cond_img = self.resize_to(img, image_size, **resize_kwargs)
lowres_cond_img = self.normalize_img(lowres_cond_img)
lowres_cond_img, *_ = self.lowres_noise_schedule.q_sample(x_start = lowres_cond_img, t = lowres_noise_times, noise = torch.randn_like(lowres_cond_img))
if exists(unet_init_images):
unet_init_images = self.resize_to(unet_init_images, image_size, **resize_kwargs)
shape = (batch_size, self.channels, *frame_dims, image_size, image_size)
img = self.one_unet_sample(
unet,
shape,
unet_number = unet_number,
text_embeds = text_embeds,
text_mask = text_masks,
cond_images = cond_images,
inpaint_images = inpaint_images,
inpaint_masks = inpaint_masks,
inpaint_resample_times = inpaint_resample_times,
init_images = unet_init_images,
skip_steps = unet_skip_steps,
sigma_min = unet_sigma_min,
sigma_max = unet_sigma_max,
cond_scale = unet_cond_scale,
lowres_cond_img = lowres_cond_img,
lowres_noise_times = lowres_noise_times,
dynamic_threshold = dynamic_threshold,
use_tqdm = use_tqdm,
**video_kwargs
)
outputs.append(img)
if exists(stop_at_unet_number) and stop_at_unet_number == unet_number:
break
output_index = -1 if not return_all_unet_outputs else slice(None) # either return last unet output or all unet outputs
if not return_pil_images:
return outputs[output_index]
if not return_all_unet_outputs:
outputs = outputs[-1:]
assert not self.is_video, 'automatically converting video tensor to video file for saving is not built yet'
pil_images = list(map(lambda img: list(map(T.ToPILImage(), img.unbind(dim = 0))), outputs))
return pil_images[output_index] # now you have a bunch of pillow images you can just .save(/where/ever/you/want.png)
# training
def loss_weight(self, sigma_data, sigma):
return (sigma ** 2 + sigma_data ** 2) * (sigma * sigma_data) ** -2
def noise_distribution(self, P_mean, P_std, batch_size):
return (P_mean + P_std * torch.randn((batch_size,), device = self.device)).exp()
def forward(
self,
images, # rename to images or video
unet: Union[Unet, Unet3D, NullUnet, DistributedDataParallel] = None,
texts: List[str] = None,
text_embeds = None,
text_masks = None,
unet_number = None,
cond_images = None,
**kwargs
):
if self.is_video and images.ndim == 4:
images = rearrange(images, 'b c h w -> b c 1 h w')
kwargs.update(ignore_time = True)
assert images.shape[-1] == images.shape[-2], f'the images you pass in must be a square, but received dimensions of {images.shape[2]}, {images.shape[-1]}'
assert not (len(self.unets) > 1 and not exists(unet_number)), f'you must specify which unet you want trained, from a range of 1 to {len(self.unets)}, if you are training cascading DDPM (multiple unets)'
unet_number = default(unet_number, 1)
assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you can only train on unet #{self.only_train_unet_number}'
images = cast_uint8_images_to_float(images)
cond_images = maybe(cast_uint8_images_to_float)(cond_images)
assert images.dtype == torch.float, f'images tensor needs to be floats but {images.dtype} dtype found instead'
unet_index = unet_number - 1
unet = default(unet, lambda: self.get_unet(unet_number))
assert not isinstance(unet, NullUnet), 'null unet cannot and should not be trained'
target_image_size = self.image_sizes[unet_index]
random_crop_size = self.random_crop_sizes[unet_index]
prev_image_size = self.image_sizes[unet_index - 1] if unet_index > 0 else None
hp = self.hparams[unet_index]
batch_size, c, *_, h, w, device, is_video = *images.shape, images.device, (images.ndim == 5)
frames = images.shape[2] if is_video else None
all_frame_dims = tuple(safe_get_tuple_index(el, 0) for el in calc_all_frame_dims(self.temporal_downsample_factor, frames))
ignore_time = kwargs.get('ignore_time', False)
target_frame_size = all_frame_dims[unet_index] if is_video and not ignore_time else None
prev_frame_size = all_frame_dims[unet_index - 1] if is_video and not ignore_time and unet_index > 0 else None
frames_to_resize_kwargs = lambda frames: dict(target_frames = frames) if exists(frames) else dict()
assert images.shape[1] == self.channels
assert h >= target_image_size and w >= target_image_size
if exists(texts) and not exists(text_embeds) and not self.unconditional:
assert all([*map(len, texts)]), 'text cannot be empty'
assert len(texts) == len(images), 'number of text captions does not match up with the number of images given'
with autocast(enabled = False):
text_embeds, text_masks = self.encode_text(texts, return_attn_mask = True)
text_embeds, text_masks = map(lambda t: t.to(images.device), (text_embeds, text_masks))
if not self.unconditional:
text_masks = default(text_masks, lambda: torch.any(text_embeds != 0., dim = -1))
assert not (self.condition_on_text and not exists(text_embeds)), 'text or text encodings must be passed into decoder if specified'
assert not (not self.condition_on_text and exists(text_embeds)), 'decoder specified not to be conditioned on text, yet it is presented'
assert not (exists(text_embeds) and text_embeds.shape[-1] != self.text_embed_dim), f'invalid text embedding dimension being passed in (should be {self.text_embed_dim})'
# handle video conditioning frames
if self.is_video and self.resize_cond_video_frames:
downsample_scale = self.temporal_downsample_factor[unet_index]
temporal_downsample_fn = partial(scale_video_time, downsample_scale = downsample_scale)
kwargs = maybe_transform_dict_key(kwargs, 'cond_video_frames', temporal_downsample_fn)
kwargs = maybe_transform_dict_key(kwargs, 'post_cond_video_frames', temporal_downsample_fn)
# low resolution conditioning
lowres_cond_img = lowres_aug_times = None
if exists(prev_image_size):
lowres_cond_img = self.resize_to(images, prev_image_size, **frames_to_resize_kwargs(prev_frame_size), clamp_range = self.input_image_range)
lowres_cond_img = self.resize_to(lowres_cond_img, target_image_size, **frames_to_resize_kwargs(target_frame_size), clamp_range = self.input_image_range)
if self.per_sample_random_aug_noise_level:
lowres_aug_times = self.lowres_noise_schedule.sample_random_times(batch_size, device = device)
else:
lowres_aug_time = self.lowres_noise_schedule.sample_random_times(1, device = device)
lowres_aug_times = repeat(lowres_aug_time, '1 -> b', b = batch_size)
images = self.resize_to(images, target_image_size, **frames_to_resize_kwargs(target_frame_size))
# normalize to [-1, 1]
images = self.normalize_img(images)
lowres_cond_img = maybe(self.normalize_img)(lowres_cond_img)
# random cropping during training
# for upsamplers
if exists(random_crop_size):
aug = K.RandomCrop((random_crop_size, random_crop_size), p = 1.)
if is_video:
images, lowres_cond_img = map(lambda t: rearrange(t, 'b c f h w -> (b f) c h w'), (images, lowres_cond_img))
# make sure low res conditioner and image both get augmented the same way
# detailed https://kornia.readthedocs.io/en/latest/augmentation.module.html?highlight=randomcrop#kornia.augmentation.RandomCrop
images = aug(images)
lowres_cond_img = aug(lowres_cond_img, params = aug._params)
if is_video:
images, lowres_cond_img = map(lambda t: rearrange(t, '(b f) c h w -> b c f h w', f = frames), (images, lowres_cond_img))
# noise the lowres conditioning image
# at sample time, they then fix the noise level of 0.1 - 0.3
lowres_cond_img_noisy = None
if exists(lowres_cond_img):
lowres_cond_img_noisy, *_ = self.lowres_noise_schedule.q_sample(x_start = lowres_cond_img, t = lowres_aug_times, noise = torch.randn_like(lowres_cond_img))
# get the sigmas
sigmas = self.noise_distribution(hp.P_mean, hp.P_std, batch_size)
padded_sigmas = self.right_pad_dims_to_datatype(sigmas)
# noise
noise = torch.randn_like(images)
noised_images = images + padded_sigmas * noise # alphas are 1. in the paper
# unet kwargs
unet_kwargs = dict(
sigma_data = hp.sigma_data,
text_embeds = text_embeds,
text_mask = text_masks,
cond_images = cond_images,
lowres_noise_times = self.lowres_noise_schedule.get_condition(lowres_aug_times),
lowres_cond_img = lowres_cond_img_noisy,
cond_drop_prob = self.cond_drop_prob,
**kwargs
)
# self conditioning - https://arxiv.org/abs/2208.04202 - training will be 25% slower
# Because 'unet' can be an instance of DistributedDataParallel coming from the
# ImagenTrainer.unet_being_trained when invoking ImagenTrainer.forward(), we need to
# access the member 'module' of the wrapped unet instance.
self_cond = unet.module.self_cond if isinstance(unet, DistributedDataParallel) else unet.self_cond
if self_cond and random() < 0.5:
with torch.no_grad():
pred_x0 = self.preconditioned_network_forward(
unet.forward,
noised_images,
sigmas,
**unet_kwargs
).detach()
unet_kwargs = {**unet_kwargs, 'self_cond': pred_x0}
# get prediction
denoised_images = self.preconditioned_network_forward(
unet.forward,
noised_images,
sigmas,
**unet_kwargs
)
# losses
losses = F.mse_loss(denoised_images, images, reduction = 'none')
losses = reduce(losses, 'b ... -> b', 'mean')
# loss weighting
losses = losses * self.loss_weight(hp.sigma_data, sigmas)
# return average loss
return losses.mean()
|
imagen-pytorch-main
|
imagen_pytorch/elucidated_imagen.py
|
import json
from pydantic import BaseModel, validator
from typing import List, Iterable, Optional, Union, Tuple, Dict, Any
from enum import Enum
from imagen_pytorch.imagen_pytorch import Imagen, Unet, Unet3D, NullUnet
from imagen_pytorch.trainer import ImagenTrainer
from imagen_pytorch.elucidated_imagen import ElucidatedImagen
from imagen_pytorch.t5 import DEFAULT_T5_NAME, get_encoded_dim
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def ListOrTuple(inner_type):
return Union[List[inner_type], Tuple[inner_type]]
def SingleOrList(inner_type):
return Union[inner_type, ListOrTuple(inner_type)]
# noise schedule
class NoiseSchedule(Enum):
cosine = 'cosine'
linear = 'linear'
class AllowExtraBaseModel(BaseModel):
class Config:
extra = "allow"
use_enum_values = True
# imagen pydantic classes
class NullUnetConfig(BaseModel):
is_null: bool
def create(self):
return NullUnet()
class UnetConfig(AllowExtraBaseModel):
dim: int
dim_mults: ListOrTuple(int)
text_embed_dim: int = get_encoded_dim(DEFAULT_T5_NAME)
cond_dim: int = None
channels: int = 3
attn_dim_head: int = 32
attn_heads: int = 16
def create(self):
return Unet(**self.dict())
class Unet3DConfig(AllowExtraBaseModel):
dim: int
dim_mults: ListOrTuple(int)
text_embed_dim: int = get_encoded_dim(DEFAULT_T5_NAME)
cond_dim: int = None
channels: int = 3
attn_dim_head: int = 32
attn_heads: int = 16
def create(self):
return Unet3D(**self.dict())
class ImagenConfig(AllowExtraBaseModel):
unets: ListOrTuple(Union[UnetConfig, Unet3DConfig, NullUnetConfig])
image_sizes: ListOrTuple(int)
video: bool = False
timesteps: SingleOrList(int) = 1000
noise_schedules: SingleOrList(NoiseSchedule) = 'cosine'
text_encoder_name: str = DEFAULT_T5_NAME
channels: int = 3
loss_type: str = 'l2'
cond_drop_prob: float = 0.5
@validator('image_sizes')
def check_image_sizes(cls, image_sizes, values):
unets = values.get('unets')
if len(image_sizes) != len(unets):
raise ValueError(f'image sizes length {len(image_sizes)} must be equivalent to the number of unets {len(unets)}')
return image_sizes
def create(self):
decoder_kwargs = self.dict()
unets_kwargs = decoder_kwargs.pop('unets')
is_video = decoder_kwargs.pop('video', False)
unets = []
for unet, unet_kwargs in zip(self.unets, unets_kwargs):
if isinstance(unet, NullUnetConfig):
unet_klass = NullUnet
elif is_video:
unet_klass = Unet3D
else:
unet_klass = Unet
unets.append(unet_klass(**unet_kwargs))
imagen = Imagen(unets, **decoder_kwargs)
imagen._config = self.dict().copy()
return imagen
class ElucidatedImagenConfig(AllowExtraBaseModel):
unets: ListOrTuple(Union[UnetConfig, Unet3DConfig, NullUnetConfig])
image_sizes: ListOrTuple(int)
video: bool = False
text_encoder_name: str = DEFAULT_T5_NAME
channels: int = 3
cond_drop_prob: float = 0.5
num_sample_steps: SingleOrList(int) = 32
sigma_min: SingleOrList(float) = 0.002
sigma_max: SingleOrList(int) = 80
sigma_data: SingleOrList(float) = 0.5
rho: SingleOrList(int) = 7
P_mean: SingleOrList(float) = -1.2
P_std: SingleOrList(float) = 1.2
S_churn: SingleOrList(int) = 80
S_tmin: SingleOrList(float) = 0.05
S_tmax: SingleOrList(int) = 50
S_noise: SingleOrList(float) = 1.003
@validator('image_sizes')
def check_image_sizes(cls, image_sizes, values):
unets = values.get('unets')
if len(image_sizes) != len(unets):
raise ValueError(f'image sizes length {len(image_sizes)} must be equivalent to the number of unets {len(unets)}')
return image_sizes
def create(self):
decoder_kwargs = self.dict()
unets_kwargs = decoder_kwargs.pop('unets')
is_video = decoder_kwargs.pop('video', False)
unet_klass = Unet3D if is_video else Unet
unets = []
for unet, unet_kwargs in zip(self.unets, unets_kwargs):
if isinstance(unet, NullUnetConfig):
unet_klass = NullUnet
elif is_video:
unet_klass = Unet3D
else:
unet_klass = Unet
unets.append(unet_klass(**unet_kwargs))
imagen = ElucidatedImagen(unets, **decoder_kwargs)
imagen._config = self.dict().copy()
return imagen
class ImagenTrainerConfig(AllowExtraBaseModel):
imagen: dict
elucidated: bool = False
video: bool = False
use_ema: bool = True
lr: SingleOrList(float) = 1e-4
eps: SingleOrList(float) = 1e-8
beta1: float = 0.9
beta2: float = 0.99
max_grad_norm: Optional[float] = None
group_wd_params: bool = True
warmup_steps: SingleOrList(Optional[int]) = None
cosine_decay_max_steps: SingleOrList(Optional[int]) = None
def create(self):
trainer_kwargs = self.dict()
imagen_config = trainer_kwargs.pop('imagen')
elucidated = trainer_kwargs.pop('elucidated')
imagen_config_klass = ElucidatedImagenConfig if elucidated else ImagenConfig
imagen = imagen_config_klass(**{**imagen_config, 'video': video}).create()
return ImagenTrainer(imagen, **trainer_kwargs)
|
imagen-pytorch-main
|
imagen_pytorch/configs.py
|
__version__ = '1.25.6'
|
imagen-pytorch-main
|
imagen_pytorch/version.py
|
import torch
import transformers
from typing import List
from transformers import T5Tokenizer, T5EncoderModel, T5Config
from einops import rearrange
transformers.logging.set_verbosity_error()
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
# config
MAX_LENGTH = 256
DEFAULT_T5_NAME = 'google/t5-v1_1-base'
T5_CONFIGS = {}
# singleton globals
def get_tokenizer(name):
tokenizer = T5Tokenizer.from_pretrained(name, model_max_length=MAX_LENGTH)
return tokenizer
def get_model(name):
model = T5EncoderModel.from_pretrained(name)
return model
def get_model_and_tokenizer(name):
global T5_CONFIGS
if name not in T5_CONFIGS:
T5_CONFIGS[name] = dict()
if "model" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["model"] = get_model(name)
if "tokenizer" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["tokenizer"] = get_tokenizer(name)
return T5_CONFIGS[name]['model'], T5_CONFIGS[name]['tokenizer']
def get_encoded_dim(name):
if name not in T5_CONFIGS:
# avoids loading the model if we only want to get the dim
config = T5Config.from_pretrained(name)
T5_CONFIGS[name] = dict(config=config)
elif "config" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["config"]
elif "model" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["model"].config
else:
assert False
return config.d_model
# encoding text
def t5_tokenize(
texts: List[str],
name = DEFAULT_T5_NAME
):
t5, tokenizer = get_model_and_tokenizer(name)
if torch.cuda.is_available():
t5 = t5.cuda()
device = next(t5.parameters()).device
encoded = tokenizer.batch_encode_plus(
texts,
return_tensors = "pt",
padding = 'longest',
max_length = MAX_LENGTH,
truncation = True
)
input_ids = encoded.input_ids.to(device)
attn_mask = encoded.attention_mask.to(device)
return input_ids, attn_mask
def t5_encode_tokenized_text(
token_ids,
attn_mask = None,
pad_id = None,
name = DEFAULT_T5_NAME
):
assert exists(attn_mask) or exists(pad_id)
t5, _ = get_model_and_tokenizer(name)
attn_mask = default(attn_mask, lambda: (token_ids != pad_id).long())
t5.eval()
with torch.no_grad():
output = t5(input_ids = token_ids, attention_mask = attn_mask)
encoded_text = output.last_hidden_state.detach()
attn_mask = attn_mask.bool()
encoded_text = encoded_text.masked_fill(~rearrange(attn_mask, '... -> ... 1'), 0.) # just force all embeddings that is padding to be equal to 0.
return encoded_text
def t5_encode_text(
texts: List[str],
name = DEFAULT_T5_NAME,
return_attn_mask = False
):
token_ids, attn_mask = t5_tokenize(texts, name = name)
encoded_text = t5_encode_tokenized_text(token_ids, attn_mask = attn_mask, name = name)
if return_attn_mask:
attn_mask = attn_mask.bool()
return encoded_text, attn_mask
return encoded_text
|
imagen-pytorch-main
|
imagen_pytorch/t5.py
|
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from imagen_pytorch.imagen_pytorch import Imagen, Unet
from imagen_pytorch.imagen_pytorch import NullUnet
from imagen_pytorch.imagen_pytorch import BaseUnet64, SRUnet256, SRUnet1024
from imagen_pytorch.trainer import ImagenTrainer
from imagen_pytorch.version import __version__
# imagen using the elucidated ddpm from Tero Karras' new paper
from imagen_pytorch.elucidated_imagen import ElucidatedImagen
# config driven creation of imagen instances
from imagen_pytorch.configs import UnetConfig, ImagenConfig, ElucidatedImagenConfig, ImagenTrainerConfig
# utils
from imagen_pytorch.utils import load_imagen_from_checkpoint
# video
from imagen_pytorch.imagen_video import Unet3D
|
imagen-pytorch-main
|
imagen_pytorch/__init__.py
|
import click
import torch
from pathlib import Path
import pkgutil
from imagen_pytorch import load_imagen_from_checkpoint
from imagen_pytorch.version import __version__
from imagen_pytorch.data import Collator
from imagen_pytorch.utils import safeget
from imagen_pytorch import ImagenTrainer, ElucidatedImagenConfig, ImagenConfig
from datasets import load_dataset
import json
def exists(val):
return val is not None
def simple_slugify(text, max_length = 255):
return text.replace('-', '_').replace(',', '').replace(' ', '_').replace('|', '--').strip('-_')[:max_length]
def main():
pass
@click.group()
def imagen():
pass
@imagen.command(help = 'Sample from the Imagen model checkpoint')
@click.option('--model', default = './imagen.pt', help = 'path to trained Imagen model')
@click.option('--cond_scale', default = 5, help = 'conditioning scale (classifier free guidance) in decoder')
@click.option('--load_ema', default = True, help = 'load EMA version of unets if available')
@click.argument('text')
def sample(
model,
cond_scale,
load_ema,
text
):
model_path = Path(model)
full_model_path = str(model_path.resolve())
assert model_path.exists(), f'model not found at {full_model_path}'
loaded = torch.load(str(model_path))
# get version
version = safeget(loaded, 'version')
print(f'loading Imagen from {full_model_path}, saved at version {version} - current package version is {__version__}')
# get imagen parameters and type
imagen = load_imagen_from_checkpoint(str(model_path), load_ema_if_available = load_ema)
imagen.cuda()
# generate image
pil_image = imagen.sample(text, cond_scale = cond_scale, return_pil_images = True)
image_path = f'./{simple_slugify(text)}.png'
pil_image[0].save(image_path)
print(f'image saved to {str(image_path)}')
return
@imagen.command(help = 'Generate a config for the Imagen model')
@click.option('--path', default = './imagen_config.json', help = 'Path to the Imagen model config')
def config(
path
):
data = pkgutil.get_data(__name__, 'default_config.json').decode("utf-8")
with open(path, 'w') as f:
f.write(data)
@imagen.command(help = 'Train the Imagen model')
@click.option('--config', default = './imagen_config.json', help = 'Path to the Imagen model config')
@click.option('--unet', default = 1, help = 'Unet to train', type = click.IntRange(1, 3, False, True, True))
@click.option('--epoches', default = 1000, help = 'Amount of epoches to train for')
@click.option('--text', required = False, help = 'Text to sample with between epoches', type=str)
@click.option('--valid', is_flag = False, flag_value=50, default = 0, help = 'Do validation between epoches', show_default = True)
def train(
config,
unet,
epoches,
text,
valid
):
# check config path
config_path = Path(config)
full_config_path = str(config_path.resolve())
assert config_path.exists(), f'config not found at {full_config_path}'
with open(config_path, 'r') as f:
config_data = json.loads(f.read())
assert 'checkpoint_path' in config_data, 'checkpoint path not found in config'
model_path = Path(config_data['checkpoint_path'])
full_model_path = str(model_path.resolve())
# setup imagen config
imagen_config_klass = ElucidatedImagenConfig if config_data['type'] == 'elucidated' else ImagenConfig
imagen = imagen_config_klass(**config_data['imagen']).create()
trainer = ImagenTrainer(
imagen = imagen,
**config_data['trainer']
)
# load pt
if model_path.exists():
loaded = torch.load(str(model_path))
version = safeget(loaded, 'version')
print(f'loading Imagen from {full_model_path}, saved at version {version} - current package version is {__version__}')
trainer.load(model_path)
if torch.cuda.is_available():
trainer = trainer.cuda()
size = config_data['imagen']['image_sizes'][unet-1]
max_batch_size = config_data['max_batch_size'] if 'max_batch_size' in config_data else 1
channels = 'RGB'
if 'channels' in config_data['imagen']:
assert config_data['imagen']['channels'] > 0 and config_data['imagen']['channels'] < 5, 'Imagen only support 1 to 4 channels L, LA, RGB, RGBA'
if config_data['imagen']['channels'] == 4:
channels = 'RGBA' # Color with alpha
elif config_data['imagen']['channels'] == 2:
channels == 'LA' # Luminance (Greyscale) with alpha
elif config_data['imagen']['channels'] == 1:
channels = 'L' # Luminance (Greyscale)
assert 'batch_size' in config_data['dataset'], 'A batch_size is required in the config file'
# load and add train dataset and valid dataset
ds = load_dataset(config_data['dataset_name'])
trainer.add_train_dataset(
ds = ds['train'],
collate_fn = Collator(
image_size = size,
image_label = config_data['image_label'],
text_label = config_data['text_label'],
url_label = config_data['url_label'],
name = imagen.text_encoder_name,
channels = channels
),
**config_data['dataset']
)
if not trainer.split_valid_from_train and valid != 0:
assert 'valid' in ds, 'There is no validation split in the dataset'
trainer.add_valid_dataset(
ds = ds['valid'],
collate_fn = Collator(
image_size = size,
image_label = config_data['image_label'],
text_label= config_data['text_label'],
url_label = config_data['url_label'],
name = imagen.text_encoder_name,
channels = channels
),
**config_data['dataset']
)
for i in range(epoches):
loss = trainer.train_step(unet_number = unet, max_batch_size = max_batch_size)
print(f'loss: {loss}')
if valid != 0 and not (i % valid) and i > 0:
valid_loss = trainer.valid_step(unet_number = unet, max_batch_size = max_batch_size)
print(f'valid loss: {valid_loss}')
if not (i % 100) and i > 0 and trainer.is_main and text is not None:
images = trainer.sample(texts = [text], batch_size = 1, return_pil_images = True, stop_at_unet_number = unet)
images[0].save(f'./sample-{i // 100}.png')
trainer.save(model_path)
|
imagen-pytorch-main
|
imagen_pytorch/cli.py
|
import torch
from torch import nn
from functools import reduce
from pathlib import Path
from imagen_pytorch.configs import ImagenConfig, ElucidatedImagenConfig
from ema_pytorch import EMA
def exists(val):
return val is not None
def safeget(dictionary, keys, default = None):
return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split('.'), dictionary)
def load_imagen_from_checkpoint(
checkpoint_path,
load_weights = True,
load_ema_if_available = False
):
model_path = Path(checkpoint_path)
full_model_path = str(model_path.resolve())
assert model_path.exists(), f'checkpoint not found at {full_model_path}'
loaded = torch.load(str(model_path), map_location='cpu')
imagen_params = safeget(loaded, 'imagen_params')
imagen_type = safeget(loaded, 'imagen_type')
if imagen_type == 'original':
imagen_klass = ImagenConfig
elif imagen_type == 'elucidated':
imagen_klass = ElucidatedImagenConfig
else:
raise ValueError(f'unknown imagen type {imagen_type} - you need to instantiate your Imagen with configurations, using classes ImagenConfig or ElucidatedImagenConfig')
assert exists(imagen_params) and exists(imagen_type), 'imagen type and configuration not saved in this checkpoint'
imagen = imagen_klass(**imagen_params).create()
if not load_weights:
return imagen
has_ema = 'ema' in loaded
should_load_ema = has_ema and load_ema_if_available
imagen.load_state_dict(loaded['model'])
if not should_load_ema:
print('loading non-EMA version of unets')
return imagen
ema_unets = nn.ModuleList([])
for unet in imagen.unets:
ema_unets.append(EMA(unet))
ema_unets.load_state_dict(loaded['ema'])
for unet, ema_unet in zip(imagen.unets, ema_unets):
unet.load_state_dict(ema_unet.ema_model.state_dict())
print('loaded EMA version of unets')
return imagen
|
imagen-pytorch-main
|
imagen_pytorch/utils.py
|
import os
import time
import copy
from pathlib import Path
from math import ceil
from contextlib import contextmanager, nullcontext
from functools import partial, wraps
from collections.abc import Iterable
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import random_split, DataLoader
from torch.optim import Adam
from lion_pytorch import Lion
from torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR
from torch.cuda.amp import autocast, GradScaler
import pytorch_warmup as warmup
from imagen_pytorch.imagen_pytorch import Imagen, NullUnet
from imagen_pytorch.elucidated_imagen import ElucidatedImagen
from imagen_pytorch.data import cycle
from imagen_pytorch.version import __version__
from packaging import version
import numpy as np
from ema_pytorch import EMA
from accelerate import Accelerator, DistributedType, DistributedDataParallelKwargs
from fsspec.core import url_to_fs
from fsspec.implementations.local import LocalFileSystem
# helper functions
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cast_tuple(val, length = 1):
if isinstance(val, list):
val = tuple(val)
return val if isinstance(val, tuple) else ((val,) * length)
def find_first(fn, arr):
for ind, el in enumerate(arr):
if fn(el):
return ind
return -1
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
# url to fs, bucket, path - for checkpointing to cloud
def url_to_bucket(url):
if '://' not in url:
return url
_, suffix = url.split('://')
if prefix in {'gs', 's3'}:
return suffix.split('/')[0]
else:
raise ValueError(f'storage type prefix "{prefix}" is not supported yet')
# decorators
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def cast_torch_tensor(fn, cast_fp16 = False):
@wraps(fn)
def inner(model, *args, **kwargs):
device = kwargs.pop('_device', model.device)
cast_device = kwargs.pop('_cast_device', True)
should_cast_fp16 = cast_fp16 and model.cast_half_at_training
kwargs_keys = kwargs.keys()
all_args = (*args, *kwargs.values())
split_kwargs_index = len(all_args) - len(kwargs_keys)
all_args = tuple(map(lambda t: torch.from_numpy(t) if exists(t) and isinstance(t, np.ndarray) else t, all_args))
if cast_device:
all_args = tuple(map(lambda t: t.to(device) if exists(t) and isinstance(t, torch.Tensor) else t, all_args))
if should_cast_fp16:
all_args = tuple(map(lambda t: t.half() if exists(t) and isinstance(t, torch.Tensor) and t.dtype != torch.bool else t, all_args))
args, kwargs_values = all_args[:split_kwargs_index], all_args[split_kwargs_index:]
kwargs = dict(tuple(zip(kwargs_keys, kwargs_values)))
out = fn(model, *args, **kwargs)
return out
return inner
# gradient accumulation functions
def split_iterable(it, split_size):
accum = []
for ind in range(ceil(len(it) / split_size)):
start_index = ind * split_size
accum.append(it[start_index: (start_index + split_size)])
return accum
def split(t, split_size = None):
if not exists(split_size):
return t
if isinstance(t, torch.Tensor):
return t.split(split_size, dim = 0)
if isinstance(t, Iterable):
return split_iterable(t, split_size)
return TypeError
def find_first(cond, arr):
for el in arr:
if cond(el):
return el
return None
def split_args_and_kwargs(*args, split_size = None, **kwargs):
all_args = (*args, *kwargs.values())
len_all_args = len(all_args)
first_tensor = find_first(lambda t: isinstance(t, torch.Tensor), all_args)
assert exists(first_tensor)
batch_size = len(first_tensor)
split_size = default(split_size, batch_size)
num_chunks = ceil(batch_size / split_size)
dict_len = len(kwargs)
dict_keys = kwargs.keys()
split_kwargs_index = len_all_args - dict_len
split_all_args = [split(arg, split_size = split_size) if exists(arg) and isinstance(arg, (torch.Tensor, Iterable)) else ((arg,) * num_chunks) for arg in all_args]
chunk_sizes = num_to_groups(batch_size, split_size)
for (chunk_size, *chunked_all_args) in tuple(zip(chunk_sizes, *split_all_args)):
chunked_args, chunked_kwargs_values = chunked_all_args[:split_kwargs_index], chunked_all_args[split_kwargs_index:]
chunked_kwargs = dict(tuple(zip(dict_keys, chunked_kwargs_values)))
chunk_size_frac = chunk_size / batch_size
yield chunk_size_frac, (chunked_args, chunked_kwargs)
# imagen trainer
def imagen_sample_in_chunks(fn):
@wraps(fn)
def inner(self, *args, max_batch_size = None, **kwargs):
if not exists(max_batch_size):
return fn(self, *args, **kwargs)
if self.imagen.unconditional:
batch_size = kwargs.get('batch_size')
batch_sizes = num_to_groups(batch_size, max_batch_size)
outputs = [fn(self, *args, **{**kwargs, 'batch_size': sub_batch_size}) for sub_batch_size in batch_sizes]
else:
outputs = [fn(self, *chunked_args, **chunked_kwargs) for _, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs)]
if isinstance(outputs[0], torch.Tensor):
return torch.cat(outputs, dim = 0)
return list(map(lambda t: torch.cat(t, dim = 0), list(zip(*outputs))))
return inner
def restore_parts(state_dict_target, state_dict_from):
for name, param in state_dict_from.items():
if name not in state_dict_target:
continue
if param.size() == state_dict_target[name].size():
state_dict_target[name].copy_(param)
else:
print(f"layer {name}({param.size()} different than target: {state_dict_target[name].size()}")
return state_dict_target
class ImagenTrainer(nn.Module):
locked = False
def __init__(
self,
imagen = None,
imagen_checkpoint_path = None,
use_ema = True,
lr = 1e-4,
eps = 1e-8,
beta1 = 0.9,
beta2 = 0.99,
max_grad_norm = None,
group_wd_params = True,
warmup_steps = None,
cosine_decay_max_steps = None,
only_train_unet_number = None,
fp16 = False,
precision = None,
split_batches = True,
dl_tuple_output_keywords_names = ('images', 'text_embeds', 'text_masks', 'cond_images'),
verbose = True,
split_valid_fraction = 0.025,
split_valid_from_train = False,
split_random_seed = 42,
checkpoint_path = None,
checkpoint_every = None,
checkpoint_fs = None,
fs_kwargs: dict = None,
max_checkpoints_keep = 20,
use_lion = False,
**kwargs
):
super().__init__()
assert not ImagenTrainer.locked, 'ImagenTrainer can only be initialized once per process - for the sake of distributed training, you will now have to create a separate script to train each unet (or a script that accepts unet number as an argument)'
assert exists(imagen) ^ exists(imagen_checkpoint_path), 'either imagen instance is passed into the trainer, or a checkpoint path that contains the imagen config'
# determine filesystem, using fsspec, for saving to local filesystem or cloud
self.fs = checkpoint_fs
if not exists(self.fs):
fs_kwargs = default(fs_kwargs, {})
self.fs, _ = url_to_fs(default(checkpoint_path, './'), **fs_kwargs)
assert isinstance(imagen, (Imagen, ElucidatedImagen))
ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)
# elucidated or not
self.is_elucidated = isinstance(imagen, ElucidatedImagen)
# create accelerator instance
accelerate_kwargs, kwargs = groupby_prefix_and_trim('accelerate_', kwargs)
assert not (fp16 and exists(precision)), 'either set fp16 = True or forward the precision ("fp16", "bf16") to Accelerator'
accelerator_mixed_precision = default(precision, 'fp16' if fp16 else 'no')
self.accelerator = Accelerator(**{
'split_batches': split_batches,
'mixed_precision': accelerator_mixed_precision,
'kwargs_handlers': [DistributedDataParallelKwargs(find_unused_parameters = True)]
, **accelerate_kwargs})
ImagenTrainer.locked = self.is_distributed
# cast data to fp16 at training time if needed
self.cast_half_at_training = accelerator_mixed_precision == 'fp16'
# grad scaler must be managed outside of accelerator
grad_scaler_enabled = fp16
# imagen, unets and ema unets
self.imagen = imagen
self.num_unets = len(self.imagen.unets)
self.use_ema = use_ema and self.is_main
self.ema_unets = nn.ModuleList([])
# keep track of what unet is being trained on
# only going to allow 1 unet training at a time
self.ema_unet_being_trained_index = -1 # keeps track of which ema unet is being trained on
# data related functions
self.train_dl_iter = None
self.train_dl = None
self.valid_dl_iter = None
self.valid_dl = None
self.dl_tuple_output_keywords_names = dl_tuple_output_keywords_names
# auto splitting validation from training, if dataset is passed in
self.split_valid_from_train = split_valid_from_train
assert 0 <= split_valid_fraction <= 1, 'split valid fraction must be between 0 and 1'
self.split_valid_fraction = split_valid_fraction
self.split_random_seed = split_random_seed
# be able to finely customize learning rate, weight decay
# per unet
lr, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, eps, warmup_steps, cosine_decay_max_steps))
for ind, (unet, unet_lr, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps) in enumerate(zip(self.imagen.unets, lr, eps, warmup_steps, cosine_decay_max_steps)):
if use_lion:
optimizer = Lion(
unet.parameters(),
lr = unet_lr,
betas = (beta1, beta2)
)
else:
optimizer = Adam(
unet.parameters(),
lr = unet_lr,
eps = unet_eps,
betas = (beta1, beta2),
**kwargs
)
if self.use_ema:
self.ema_unets.append(EMA(unet, **ema_kwargs))
scaler = GradScaler(enabled = grad_scaler_enabled)
scheduler = warmup_scheduler = None
if exists(unet_cosine_decay_max_steps):
scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)
if exists(unet_warmup_steps):
warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps)
if not exists(scheduler):
scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)
# set on object
setattr(self, f'optim{ind}', optimizer) # cannot use pytorch ModuleList for some reason with optimizers
setattr(self, f'scaler{ind}', scaler)
setattr(self, f'scheduler{ind}', scheduler)
setattr(self, f'warmup{ind}', warmup_scheduler)
# gradient clipping if needed
self.max_grad_norm = max_grad_norm
# step tracker and misc
self.register_buffer('steps', torch.tensor([0] * self.num_unets))
self.verbose = verbose
# automatic set devices based on what accelerator decided
self.imagen.to(self.device)
self.to(self.device)
# checkpointing
assert not (exists(checkpoint_path) ^ exists(checkpoint_every))
self.checkpoint_path = checkpoint_path
self.checkpoint_every = checkpoint_every
self.max_checkpoints_keep = max_checkpoints_keep
self.can_checkpoint = self.is_local_main if isinstance(checkpoint_fs, LocalFileSystem) else self.is_main
if exists(checkpoint_path) and self.can_checkpoint:
bucket = url_to_bucket(checkpoint_path)
if not self.fs.exists(bucket):
self.fs.mkdir(bucket)
self.load_from_checkpoint_folder()
# only allowing training for unet
self.only_train_unet_number = only_train_unet_number
self.prepared = False
def prepare(self):
assert not self.prepared, f'The trainer is allready prepared'
self.validate_and_set_unet_being_trained(self.only_train_unet_number)
self.prepared = True
# computed values
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
@property
def unwrapped_unet(self):
return self.accelerator.unwrap_model(self.unet_being_trained)
# optimizer helper functions
def get_lr(self, unet_number):
self.validate_unet_number(unet_number)
unet_index = unet_number - 1
optim = getattr(self, f'optim{unet_index}')
return optim.param_groups[0]['lr']
# function for allowing only one unet from being trained at a time
def validate_and_set_unet_being_trained(self, unet_number = None):
if exists(unet_number):
self.validate_unet_number(unet_number)
assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'
self.only_train_unet_number = unet_number
self.imagen.only_train_unet_number = unet_number
if not exists(unet_number):
return
self.wrap_unet(unet_number)
def wrap_unet(self, unet_number):
if hasattr(self, 'one_unet_wrapped'):
return
unet = self.imagen.get_unet(unet_number)
unet_index = unet_number - 1
optimizer = getattr(self, f'optim{unet_index}')
scheduler = getattr(self, f'scheduler{unet_index}')
if self.train_dl:
self.unet_being_trained, self.train_dl, optimizer = self.accelerator.prepare(unet, self.train_dl, optimizer)
else:
self.unet_being_trained, optimizer = self.accelerator.prepare(unet, optimizer)
if exists(scheduler):
scheduler = self.accelerator.prepare(scheduler)
setattr(self, f'optim{unet_index}', optimizer)
setattr(self, f'scheduler{unet_index}', scheduler)
self.one_unet_wrapped = True
# hacking accelerator due to not having separate gradscaler per optimizer
def set_accelerator_scaler(self, unet_number):
unet_number = self.validate_unet_number(unet_number)
scaler = getattr(self, f'scaler{unet_number - 1}')
self.accelerator.scaler = scaler
for optimizer in self.accelerator._optimizers:
optimizer.scaler = scaler
# helper print
def print(self, msg):
if not self.is_main:
return
if not self.verbose:
return
return self.accelerator.print(msg)
# validating the unet number
def validate_unet_number(self, unet_number = None):
if self.num_unets == 1:
unet_number = default(unet_number, 1)
assert 0 < unet_number <= self.num_unets, f'unet number should be in between 1 and {self.num_unets}'
return unet_number
# number of training steps taken
def num_steps_taken(self, unet_number = None):
if self.num_unets == 1:
unet_number = default(unet_number, 1)
return self.steps[unet_number - 1].item()
def print_untrained_unets(self):
print_final_error = False
for ind, (steps, unet) in enumerate(zip(self.steps.tolist(), self.imagen.unets)):
if steps > 0 or isinstance(unet, NullUnet):
continue
self.print(f'unet {ind + 1} has not been trained')
print_final_error = True
if print_final_error:
self.print('when sampling, you can pass stop_at_unet_number to stop early in the cascade, so it does not try to generate with untrained unets')
# data related functions
def add_train_dataloader(self, dl = None):
if not exists(dl):
return
assert not exists(self.train_dl), 'training dataloader was already added'
assert not self.prepared, f'You need to add the dataset before preperation'
self.train_dl = dl
def add_valid_dataloader(self, dl):
if not exists(dl):
return
assert not exists(self.valid_dl), 'validation dataloader was already added'
assert not self.prepared, f'You need to add the dataset before preperation'
self.valid_dl = dl
def add_train_dataset(self, ds = None, *, batch_size, **dl_kwargs):
if not exists(ds):
return
assert not exists(self.train_dl), 'training dataloader was already added'
valid_ds = None
if self.split_valid_from_train:
train_size = int((1 - self.split_valid_fraction) * len(ds))
valid_size = len(ds) - train_size
ds, valid_ds = random_split(ds, [train_size, valid_size], generator = torch.Generator().manual_seed(self.split_random_seed))
self.print(f'training with dataset of {len(ds)} samples and validating with randomly splitted {len(valid_ds)} samples')
dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)
self.add_train_dataloader(dl)
if not self.split_valid_from_train:
return
self.add_valid_dataset(valid_ds, batch_size = batch_size, **dl_kwargs)
def add_valid_dataset(self, ds, *, batch_size, **dl_kwargs):
if not exists(ds):
return
assert not exists(self.valid_dl), 'validation dataloader was already added'
dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)
self.add_valid_dataloader(dl)
def create_train_iter(self):
assert exists(self.train_dl), 'training dataloader has not been registered with the trainer yet'
if exists(self.train_dl_iter):
return
self.train_dl_iter = cycle(self.train_dl)
def create_valid_iter(self):
assert exists(self.valid_dl), 'validation dataloader has not been registered with the trainer yet'
if exists(self.valid_dl_iter):
return
self.valid_dl_iter = cycle(self.valid_dl)
def train_step(self, *, unet_number = None, **kwargs):
if not self.prepared:
self.prepare()
self.create_train_iter()
kwargs = {'unet_number': unet_number, **kwargs}
loss = self.step_with_dl_iter(self.train_dl_iter, **kwargs)
self.update(unet_number = unet_number)
return loss
@torch.no_grad()
@eval_decorator
def valid_step(self, **kwargs):
if not self.prepared:
self.prepare()
self.create_valid_iter()
context = self.use_ema_unets if kwargs.pop('use_ema_unets', False) else nullcontext
with context():
loss = self.step_with_dl_iter(self.valid_dl_iter, **kwargs)
return loss
def step_with_dl_iter(self, dl_iter, **kwargs):
dl_tuple_output = cast_tuple(next(dl_iter))
model_input = dict(list(zip(self.dl_tuple_output_keywords_names, dl_tuple_output)))
loss = self.forward(**{**kwargs, **model_input})
return loss
# checkpointing functions
@property
def all_checkpoints_sorted(self):
glob_pattern = os.path.join(self.checkpoint_path, '*.pt')
checkpoints = self.fs.glob(glob_pattern)
sorted_checkpoints = sorted(checkpoints, key = lambda x: int(str(x).split('.')[-2]), reverse = True)
return sorted_checkpoints
def load_from_checkpoint_folder(self, last_total_steps = -1):
if last_total_steps != -1:
filepath = os.path.join(self.checkpoint_path, f'checkpoint.{last_total_steps}.pt')
self.load(filepath)
return
sorted_checkpoints = self.all_checkpoints_sorted
if len(sorted_checkpoints) == 0:
self.print(f'no checkpoints found to load from at {self.checkpoint_path}')
return
last_checkpoint = sorted_checkpoints[0]
self.load(last_checkpoint)
def save_to_checkpoint_folder(self):
self.accelerator.wait_for_everyone()
if not self.can_checkpoint:
return
total_steps = int(self.steps.sum().item())
filepath = os.path.join(self.checkpoint_path, f'checkpoint.{total_steps}.pt')
self.save(filepath)
if self.max_checkpoints_keep <= 0:
return
sorted_checkpoints = self.all_checkpoints_sorted
checkpoints_to_discard = sorted_checkpoints[self.max_checkpoints_keep:]
for checkpoint in checkpoints_to_discard:
self.fs.rm(checkpoint)
# saving and loading functions
def save(
self,
path,
overwrite = True,
without_optim_and_sched = False,
**kwargs
):
self.accelerator.wait_for_everyone()
if not self.can_checkpoint:
return
fs = self.fs
assert not (fs.exists(path) and not overwrite)
self.reset_ema_unets_all_one_device()
save_obj = dict(
model = self.imagen.state_dict(),
version = __version__,
steps = self.steps.cpu(),
**kwargs
)
save_optim_and_sched_iter = range(0, self.num_unets) if not without_optim_and_sched else tuple()
for ind in save_optim_and_sched_iter:
scaler_key = f'scaler{ind}'
optimizer_key = f'optim{ind}'
scheduler_key = f'scheduler{ind}'
warmup_scheduler_key = f'warmup{ind}'
scaler = getattr(self, scaler_key)
optimizer = getattr(self, optimizer_key)
scheduler = getattr(self, scheduler_key)
warmup_scheduler = getattr(self, warmup_scheduler_key)
if exists(scheduler):
save_obj = {**save_obj, scheduler_key: scheduler.state_dict()}
if exists(warmup_scheduler):
save_obj = {**save_obj, warmup_scheduler_key: warmup_scheduler.state_dict()}
save_obj = {**save_obj, scaler_key: scaler.state_dict(), optimizer_key: optimizer.state_dict()}
if self.use_ema:
save_obj = {**save_obj, 'ema': self.ema_unets.state_dict()}
# determine if imagen config is available
if hasattr(self.imagen, '_config'):
self.print(f'this checkpoint is commandable from the CLI - "imagen --model {str(path)} \"<prompt>\""')
save_obj = {
**save_obj,
'imagen_type': 'elucidated' if self.is_elucidated else 'original',
'imagen_params': self.imagen._config
}
#save to path
with fs.open(path, 'wb') as f:
torch.save(save_obj, f)
self.print(f'checkpoint saved to {path}')
def load(self, path, only_model = False, strict = True, noop_if_not_exist = False):
fs = self.fs
if noop_if_not_exist and not fs.exists(path):
self.print(f'trainer checkpoint not found at {str(path)}')
return
assert fs.exists(path), f'{path} does not exist'
self.reset_ema_unets_all_one_device()
# to avoid extra GPU memory usage in main process when using Accelerate
with fs.open(path) as f:
loaded_obj = torch.load(f, map_location='cpu')
if version.parse(__version__) != version.parse(loaded_obj['version']):
self.print(f'loading saved imagen at version {loaded_obj["version"]}, but current package version is {__version__}')
try:
self.imagen.load_state_dict(loaded_obj['model'], strict = strict)
except RuntimeError:
print("Failed loading state dict. Trying partial load")
self.imagen.load_state_dict(restore_parts(self.imagen.state_dict(),
loaded_obj['model']))
if only_model:
return loaded_obj
self.steps.copy_(loaded_obj['steps'])
for ind in range(0, self.num_unets):
scaler_key = f'scaler{ind}'
optimizer_key = f'optim{ind}'
scheduler_key = f'scheduler{ind}'
warmup_scheduler_key = f'warmup{ind}'
scaler = getattr(self, scaler_key)
optimizer = getattr(self, optimizer_key)
scheduler = getattr(self, scheduler_key)
warmup_scheduler = getattr(self, warmup_scheduler_key)
if exists(scheduler) and scheduler_key in loaded_obj:
scheduler.load_state_dict(loaded_obj[scheduler_key])
if exists(warmup_scheduler) and warmup_scheduler_key in loaded_obj:
warmup_scheduler.load_state_dict(loaded_obj[warmup_scheduler_key])
if exists(optimizer):
try:
optimizer.load_state_dict(loaded_obj[optimizer_key])
scaler.load_state_dict(loaded_obj[scaler_key])
except:
self.print('could not load optimizer and scaler, possibly because you have turned on mixed precision training since the last run. resuming with new optimizer and scalers')
if self.use_ema:
assert 'ema' in loaded_obj
try:
self.ema_unets.load_state_dict(loaded_obj['ema'], strict = strict)
except RuntimeError:
print("Failed loading state dict. Trying partial load")
self.ema_unets.load_state_dict(restore_parts(self.ema_unets.state_dict(),
loaded_obj['ema']))
self.print(f'checkpoint loaded from {path}')
return loaded_obj
# managing ema unets and their devices
@property
def unets(self):
return nn.ModuleList([ema.ema_model for ema in self.ema_unets])
def get_ema_unet(self, unet_number = None):
if not self.use_ema:
return
unet_number = self.validate_unet_number(unet_number)
index = unet_number - 1
if isinstance(self.unets, nn.ModuleList):
unets_list = [unet for unet in self.ema_unets]
delattr(self, 'ema_unets')
self.ema_unets = unets_list
if index != self.ema_unet_being_trained_index:
for unet_index, unet in enumerate(self.ema_unets):
unet.to(self.device if unet_index == index else 'cpu')
self.ema_unet_being_trained_index = index
return self.ema_unets[index]
def reset_ema_unets_all_one_device(self, device = None):
if not self.use_ema:
return
device = default(device, self.device)
self.ema_unets = nn.ModuleList([*self.ema_unets])
self.ema_unets.to(device)
self.ema_unet_being_trained_index = -1
@torch.no_grad()
@contextmanager
def use_ema_unets(self):
if not self.use_ema:
output = yield
return output
self.reset_ema_unets_all_one_device()
self.imagen.reset_unets_all_one_device()
self.unets.eval()
trainable_unets = self.imagen.unets
self.imagen.unets = self.unets # swap in exponential moving averaged unets for sampling
output = yield
self.imagen.unets = trainable_unets # restore original training unets
# cast the ema_model unets back to original device
for ema in self.ema_unets:
ema.restore_ema_model_device()
return output
def print_unet_devices(self):
self.print('unet devices:')
for i, unet in enumerate(self.imagen.unets):
device = next(unet.parameters()).device
self.print(f'\tunet {i}: {device}')
if not self.use_ema:
return
self.print('\nema unet devices:')
for i, ema_unet in enumerate(self.ema_unets):
device = next(ema_unet.parameters()).device
self.print(f'\tema unet {i}: {device}')
# overriding state dict functions
def state_dict(self, *args, **kwargs):
self.reset_ema_unets_all_one_device()
return super().state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
self.reset_ema_unets_all_one_device()
return super().load_state_dict(*args, **kwargs)
# encoding text functions
def encode_text(self, text, **kwargs):
return self.imagen.encode_text(text, **kwargs)
# forwarding functions and gradient step updates
def update(self, unet_number = None):
unet_number = self.validate_unet_number(unet_number)
self.validate_and_set_unet_being_trained(unet_number)
self.set_accelerator_scaler(unet_number)
index = unet_number - 1
unet = self.unet_being_trained
optimizer = getattr(self, f'optim{index}')
scaler = getattr(self, f'scaler{index}')
scheduler = getattr(self, f'scheduler{index}')
warmup_scheduler = getattr(self, f'warmup{index}')
# set the grad scaler on the accelerator, since we are managing one per u-net
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(unet.parameters(), self.max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if self.use_ema:
ema_unet = self.get_ema_unet(unet_number)
ema_unet.update()
# scheduler, if needed
maybe_warmup_context = nullcontext() if not exists(warmup_scheduler) else warmup_scheduler.dampening()
with maybe_warmup_context:
if exists(scheduler) and not self.accelerator.optimizer_step_was_skipped: # recommended in the docs
scheduler.step()
self.steps += F.one_hot(torch.tensor(unet_number - 1, device = self.steps.device), num_classes = len(self.steps))
if not exists(self.checkpoint_path):
return
total_steps = int(self.steps.sum().item())
if total_steps % self.checkpoint_every:
return
self.save_to_checkpoint_folder()
@torch.no_grad()
@cast_torch_tensor
@imagen_sample_in_chunks
def sample(self, *args, **kwargs):
context = nullcontext if kwargs.pop('use_non_ema', False) else self.use_ema_unets
self.print_untrained_unets()
if not self.is_main:
kwargs['use_tqdm'] = False
with context():
output = self.imagen.sample(*args, device = self.device, **kwargs)
return output
@partial(cast_torch_tensor, cast_fp16 = True)
def forward(
self,
*args,
unet_number = None,
max_batch_size = None,
**kwargs
):
unet_number = self.validate_unet_number(unet_number)
self.validate_and_set_unet_being_trained(unet_number)
self.set_accelerator_scaler(unet_number)
assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, f'you can only train unet #{self.only_train_unet_number}'
total_loss = 0.
for chunk_size_frac, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs):
with self.accelerator.autocast():
loss = self.imagen(*chunked_args, unet = self.unet_being_trained, unet_number = unet_number, **chunked_kwargs)
loss = loss * chunk_size_frac
total_loss += loss.item()
if self.training:
self.accelerator.backward(loss)
return total_loss
|
imagen-pytorch-main
|
imagen_pytorch/trainer.py
|
import math
import copy
from random import random
from beartype.typing import List, Union
from beartype import beartype
from tqdm.auto import tqdm
from functools import partial, wraps
from contextlib import contextmanager, nullcontext
from collections import namedtuple
from pathlib import Path
import torch
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
from torch import nn, einsum
from torch.cuda.amp import autocast
from torch.special import expm1
import torchvision.transforms as T
import kornia.augmentation as K
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME
from imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time
# helper functions
def exists(val):
return val is not None
def identity(t, *args, **kwargs):
return t
def divisible_by(numer, denom):
return (numer % denom) == 0
def first(arr, d = None):
if len(arr) == 0:
return d
return arr[0]
def maybe(fn):
@wraps(fn)
def inner(x):
if not exists(x):
return x
return fn(x)
return inner
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cast_tuple(val, length = None):
if isinstance(val, list):
val = tuple(val)
output = val if isinstance(val, tuple) else ((val,) * default(length, 1))
if exists(length):
assert len(output) == length
return output
def compact(input_dict):
return {key: value for key, value in input_dict.items() if exists(value)}
def maybe_transform_dict_key(input_dict, key, fn):
if key not in input_dict:
return input_dict
copied_dict = input_dict.copy()
copied_dict[key] = fn(copied_dict[key])
return copied_dict
def cast_uint8_images_to_float(images):
if not images.dtype == torch.uint8:
return images
return images / 255
def module_device(module):
return next(module.parameters()).device
def zero_init_(m):
nn.init.zeros_(m.weight)
if exists(m.bias):
nn.init.zeros_(m.bias)
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def pad_tuple_to_length(t, length, fillvalue = None):
remain_length = length - len(t)
if remain_length <= 0:
return t
return (*t, *((fillvalue,) * remain_length))
# helper classes
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, *args, **kwargs):
return x
# tensor helpers
def log(t, eps: float = 1e-12):
return torch.log(t.clamp(min = eps))
def l2norm(t):
return F.normalize(t, dim = -1)
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims))
def masked_mean(t, *, dim, mask = None):
if not exists(mask):
return t.mean(dim = dim)
denom = mask.sum(dim = dim, keepdim = True)
mask = rearrange(mask, 'b n -> b n 1')
masked_t = t.masked_fill(~mask, 0.)
return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)
def resize_image_to(
image,
target_image_size,
clamp_range = None,
mode = 'nearest'
):
orig_image_size = image.shape[-1]
if orig_image_size == target_image_size:
return image
out = F.interpolate(image, target_image_size, mode = mode)
if exists(clamp_range):
out = out.clamp(*clamp_range)
return out
def calc_all_frame_dims(
downsample_factors: List[int],
frames
):
if not exists(frames):
return (tuple(),) * len(downsample_factors)
all_frame_dims = []
for divisor in downsample_factors:
assert divisible_by(frames, divisor)
all_frame_dims.append((frames // divisor,))
return all_frame_dims
def safe_get_tuple_index(tup, index, default = None):
if len(tup) <= index:
return default
return tup[index]
# image normalization functions
# ddpms expect images to be in the range of -1 to 1
def normalize_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_zero_to_one(normed_img):
return (normed_img + 1) * 0.5
# classifier free guidance functions
def prob_mask_like(shape, prob, device):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
# gaussian diffusion with continuous time helper functions and classes
# large part of this was thanks to @crowsonkb at https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/utils.py
@torch.jit.script
def beta_linear_log_snr(t):
return -torch.log(expm1(1e-4 + 10 * (t ** 2)))
@torch.jit.script
def alpha_cosine_log_snr(t, s: float = 0.008):
return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version
def log_snr_to_alpha_sigma(log_snr):
return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))
class GaussianDiffusionContinuousTimes(nn.Module):
def __init__(self, *, noise_schedule, timesteps = 1000):
super().__init__()
if noise_schedule == "linear":
self.log_snr = beta_linear_log_snr
elif noise_schedule == "cosine":
self.log_snr = alpha_cosine_log_snr
else:
raise ValueError(f'invalid noise schedule {noise_schedule}')
self.num_timesteps = timesteps
def get_times(self, batch_size, noise_level, *, device):
return torch.full((batch_size,), noise_level, device = device, dtype = torch.float32)
def sample_random_times(self, batch_size, *, device):
return torch.zeros((batch_size,), device = device).float().uniform_(0, 1)
def get_condition(self, times):
return maybe(self.log_snr)(times)
def get_sampling_timesteps(self, batch, *, device):
times = torch.linspace(1., 0., self.num_timesteps + 1, device = device)
times = repeat(times, 't -> b t', b = batch)
times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)
times = times.unbind(dim = -1)
return times
def q_posterior(self, x_start, x_t, t, *, t_next = None):
t_next = default(t_next, lambda: (t - 1. / self.num_timesteps).clamp(min = 0.))
""" https://openreview.net/attachment?id=2LdBqxc1Yv&name=supplementary_material """
log_snr = self.log_snr(t)
log_snr_next = self.log_snr(t_next)
log_snr, log_snr_next = map(partial(right_pad_dims_to, x_t), (log_snr, log_snr_next))
alpha, sigma = log_snr_to_alpha_sigma(log_snr)
alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)
# c - as defined near eq 33
c = -expm1(log_snr - log_snr_next)
posterior_mean = alpha_next * (x_t * (1 - c) / alpha + c * x_start)
# following (eq. 33)
posterior_variance = (sigma_next ** 2) * c
posterior_log_variance_clipped = log(posterior_variance, eps = 1e-20)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def q_sample(self, x_start, t, noise = None):
dtype = x_start.dtype
if isinstance(t, float):
batch = x_start.shape[0]
t = torch.full((batch,), t, device = x_start.device, dtype = dtype)
noise = default(noise, lambda: torch.randn_like(x_start))
log_snr = self.log_snr(t).type(dtype)
log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)
alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)
return alpha * x_start + sigma * noise, log_snr, alpha, sigma
def q_sample_from_to(self, x_from, from_t, to_t, noise = None):
shape, device, dtype = x_from.shape, x_from.device, x_from.dtype
batch = shape[0]
if isinstance(from_t, float):
from_t = torch.full((batch,), from_t, device = device, dtype = dtype)
if isinstance(to_t, float):
to_t = torch.full((batch,), to_t, device = device, dtype = dtype)
noise = default(noise, lambda: torch.randn_like(x_from))
log_snr = self.log_snr(from_t)
log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)
alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)
log_snr_to = self.log_snr(to_t)
log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)
alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)
return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha
def predict_start_from_v(self, x_t, t, v):
log_snr = self.log_snr(t)
log_snr = right_pad_dims_to(x_t, log_snr)
alpha, sigma = log_snr_to_alpha_sigma(log_snr)
return alpha * x_t - sigma * v
def predict_start_from_noise(self, x_t, t, noise):
log_snr = self.log_snr(t)
log_snr = right_pad_dims_to(x_t, log_snr)
alpha, sigma = log_snr_to_alpha_sigma(log_snr)
return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)
# norms and residuals
class LayerNorm(nn.Module):
def __init__(self, feats, stable = False, dim = -1):
super().__init__()
self.stable = stable
self.dim = dim
self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))
def forward(self, x):
dtype, dim = x.dtype, self.dim
if self.stable:
x = x / x.amax(dim = dim, keepdim = True).detach()
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = dim, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = dim, keepdim = True)
return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)
ChanLayerNorm = partial(LayerNorm, dim = -3)
class Always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class Parallel(nn.Module):
def __init__(self, *fns):
super().__init__()
self.fns = nn.ModuleList(fns)
def forward(self, x):
outputs = [fn(x) for fn in self.fns]
return sum(outputs)
# attention pooling
class PerceiverAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
scale = 8
):
super().__init__()
self.scale = scale
self.heads = heads
inner_dim = dim_head * heads
self.norm = nn.LayerNorm(dim)
self.norm_latents = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
nn.LayerNorm(dim)
)
def forward(self, x, latents, mask = None):
x = self.norm(x)
latents = self.norm_latents(latents)
b, h = x.shape[0], self.heads
q = self.to_q(latents)
# the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to
kv_input = torch.cat((x, latents), dim = -2)
k, v = self.to_kv(kv_input).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# qk rmsnorm
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# similarities and masking
sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale
if exists(mask):
max_neg_value = -torch.finfo(sim.dtype).max
mask = F.pad(mask, (0, latents.shape[-2]), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
# attention
attn = sim.softmax(dim = -1, dtype = torch.float32)
attn = attn.to(sim.dtype)
out = einsum('... i j, ... j d -> ... i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)', h = h)
return self.to_out(out)
class PerceiverResampler(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
num_latents = 64,
num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence
max_seq_len = 512,
ff_mult = 4
):
super().__init__()
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.latents = nn.Parameter(torch.randn(num_latents, dim))
self.to_latents_from_mean_pooled_seq = None
if num_latents_mean_pooled > 0:
self.to_latents_from_mean_pooled_seq = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, dim * num_latents_mean_pooled),
Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)
)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),
FeedForward(dim = dim, mult = ff_mult)
]))
def forward(self, x, mask = None):
n, device = x.shape[1], x.device
pos_emb = self.pos_emb(torch.arange(n, device = device))
x_with_pos = x + pos_emb
latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])
if exists(self.to_latents_from_mean_pooled_seq):
meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))
meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)
latents = torch.cat((meanpooled_latents, latents), dim = -2)
for attn, ff in self.layers:
latents = attn(x_with_pos, latents, mask = mask) + latents
latents = ff(latents) + latents
return latents
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
*,
dim_head = 64,
heads = 8,
context_dim = None,
scale = 8
):
super().__init__()
self.scale = scale
self.heads = heads
inner_dim = dim_head * heads
self.norm = LayerNorm(dim)
self.null_kv = nn.Parameter(torch.randn(2, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
LayerNorm(dim)
)
def forward(self, x, context = None, mask = None, attn_bias = None):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> b 1 d', b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# add text conditioning, if present
if exists(context):
assert exists(self.to_context)
ck, cv = self.to_context(context).chunk(2, dim = -1)
k = torch.cat((ck, k), dim = -2)
v = torch.cat((cv, v), dim = -2)
# qk rmsnorm
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# calculate query / key similarities
sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale
# relative positional encoding (T5 style)
if exists(attn_bias):
sim = sim + attn_bias
# masking
max_neg_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
# attention
attn = sim.softmax(dim = -1, dtype = torch.float32)
attn = attn.to(sim.dtype)
# aggregate values
out = einsum('b h i j, b j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# decoder
def Upsample(dim, dim_out = None):
dim_out = default(dim_out, dim)
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv2d(dim, dim_out, 3, padding = 1)
)
class PixelShuffleUpsample(nn.Module):
"""
code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts
https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf
"""
def __init__(self, dim, dim_out = None):
super().__init__()
dim_out = default(dim_out, dim)
conv = nn.Conv2d(dim, dim_out * 4, 1)
self.net = nn.Sequential(
conv,
nn.SiLU(),
nn.PixelShuffle(2)
)
self.init_conv_(conv)
def init_conv_(self, conv):
o, i, h, w = conv.weight.shape
conv_weight = torch.empty(o // 4, i, h, w)
nn.init.kaiming_uniform_(conv_weight)
conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')
conv.weight.data.copy_(conv_weight)
nn.init.zeros_(conv.bias.data)
def forward(self, x):
return self.net(x)
def Downsample(dim, dim_out = None):
# https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample
# named SP-conv in the paper, but basically a pixel unshuffle
dim_out = default(dim_out, dim)
return nn.Sequential(
Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),
nn.Conv2d(dim * 4, dim_out, 1)
)
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)
emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')
return torch.cat((emb.sin(), emb.cos()), dim = -1)
class LearnedSinusoidalPosEmb(nn.Module):
""" following @crowsonkb 's lead with learned sinusoidal pos emb """
""" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """
def __init__(self, dim):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim))
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
class Block(nn.Module):
def __init__(
self,
dim,
dim_out,
groups = 8,
norm = True
):
super().__init__()
self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()
self.activation = nn.SiLU()
self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)
def forward(self, x, scale_shift = None):
x = self.groupnorm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.activation(x)
return self.project(x)
class ResnetBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
*,
cond_dim = None,
time_cond_dim = None,
groups = 8,
linear_attn = False,
use_gca = False,
squeeze_excite = False,
**attn_kwargs
):
super().__init__()
self.time_mlp = None
if exists(time_cond_dim):
self.time_mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_cond_dim, dim_out * 2)
)
self.cross_attn = None
if exists(cond_dim):
attn_klass = CrossAttention if not linear_attn else LinearCrossAttention
self.cross_attn = attn_klass(
dim = dim_out,
context_dim = cond_dim,
**attn_kwargs
)
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()
def forward(self, x, time_emb = None, cond = None):
scale_shift = None
if exists(self.time_mlp) and exists(time_emb):
time_emb = self.time_mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x)
if exists(self.cross_attn):
assert exists(cond)
h = rearrange(h, 'b c h w -> b h w c')
h, ps = pack([h], 'b * c')
h = self.cross_attn(h, context = cond) + h
h, = unpack(h, ps, 'b * c')
h = rearrange(h, 'b h w c -> b c h w')
h = self.block2(h, scale_shift = scale_shift)
h = h * self.gca(h)
return h + self.res_conv(x)
class CrossAttention(nn.Module):
def __init__(
self,
dim,
*,
context_dim = None,
dim_head = 64,
heads = 8,
norm_context = False,
scale = 8
):
super().__init__()
self.scale = scale
self.heads = heads
inner_dim = dim_head * heads
context_dim = default(context_dim, dim)
self.norm = LayerNorm(dim)
self.norm_context = LayerNorm(context_dim) if norm_context else Identity()
self.null_kv = nn.Parameter(torch.randn(2, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim, bias = False),
LayerNorm(dim)
)
def forward(self, x, context, mask = None):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
context = self.norm_context(context)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> b h 1 d', h = self.heads, b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# cosine sim attention
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# similarities
sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
# masking
max_neg_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, max_neg_value)
attn = sim.softmax(dim = -1, dtype = torch.float32)
attn = attn.to(sim.dtype)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class LinearCrossAttention(CrossAttention):
def forward(self, x, context, mask = None):
b, n, device = *x.shape[:2], x.device
x = self.norm(x)
context = self.norm_context(context)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = self.heads), (q, k, v))
# add null key / value for classifier free guidance in prior net
nk, nv = map(lambda t: repeat(t, 'd -> (b h) 1 d', h = self.heads, b = b), self.null_kv.unbind(dim = -2))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
# masking
max_neg_value = -torch.finfo(x.dtype).max
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
mask = rearrange(mask, 'b n -> b n 1')
k = k.masked_fill(~mask, max_neg_value)
v = v.masked_fill(~mask, 0.)
# linear attention
q = q.softmax(dim = -1)
k = k.softmax(dim = -2)
q = q * self.scale
context = einsum('b n d, b n e -> b d e', k, v)
out = einsum('b n d, b d e -> b n e', q, context)
out = rearrange(out, '(b h) n d -> b n (h d)', h = self.heads)
return self.to_out(out)
class LinearAttention(nn.Module):
def __init__(
self,
dim,
dim_head = 32,
heads = 8,
dropout = 0.05,
context_dim = None,
**kwargs
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm = ChanLayerNorm(dim)
self.nonlin = nn.SiLU()
self.to_q = nn.Sequential(
nn.Dropout(dropout),
nn.Conv2d(dim, inner_dim, 1, bias = False),
nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)
)
self.to_k = nn.Sequential(
nn.Dropout(dropout),
nn.Conv2d(dim, inner_dim, 1, bias = False),
nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)
)
self.to_v = nn.Sequential(
nn.Dropout(dropout),
nn.Conv2d(dim, inner_dim, 1, bias = False),
nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)
)
self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, inner_dim * 2, bias = False)) if exists(context_dim) else None
self.to_out = nn.Sequential(
nn.Conv2d(inner_dim, dim, 1, bias = False),
ChanLayerNorm(dim)
)
def forward(self, fmap, context = None):
h, x, y = self.heads, *fmap.shape[-2:]
fmap = self.norm(fmap)
q, k, v = map(lambda fn: fn(fmap), (self.to_q, self.to_k, self.to_v))
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) (x y) c', h = h), (q, k, v))
if exists(context):
assert exists(self.to_context)
ck, cv = self.to_context(context).chunk(2, dim = -1)
ck, cv = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (ck, cv))
k = torch.cat((k, ck), dim = -2)
v = torch.cat((v, cv), dim = -2)
q = q.softmax(dim = -1)
k = k.softmax(dim = -2)
q = q * self.scale
context = einsum('b n d, b n e -> b d e', k, v)
out = einsum('b n d, b d e -> b n e', q, context)
out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)
out = self.nonlin(out)
return self.to_out(out)
class GlobalContext(nn.Module):
""" basically a superior form of squeeze-excitation that is attention-esque """
def __init__(
self,
*,
dim_in,
dim_out
):
super().__init__()
self.to_k = nn.Conv2d(dim_in, 1, 1)
hidden_dim = max(3, dim_out // 2)
self.net = nn.Sequential(
nn.Conv2d(dim_in, hidden_dim, 1),
nn.SiLU(),
nn.Conv2d(hidden_dim, dim_out, 1),
nn.Sigmoid()
)
def forward(self, x):
context = self.to_k(x)
x, context = map(lambda t: rearrange(t, 'b n ... -> b n (...)'), (x, context))
out = einsum('b i n, b c n -> b c i', context.softmax(dim = -1), x)
out = rearrange(out, '... -> ... 1')
return self.net(out)
def FeedForward(dim, mult = 2):
hidden_dim = int(dim * mult)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, hidden_dim, bias = False),
nn.GELU(),
LayerNorm(hidden_dim),
nn.Linear(hidden_dim, dim, bias = False)
)
def ChanFeedForward(dim, mult = 2): # in paper, it seems for self attention layers they did feedforwards with twice channel width
hidden_dim = int(dim * mult)
return nn.Sequential(
ChanLayerNorm(dim),
nn.Conv2d(dim, hidden_dim, 1, bias = False),
nn.GELU(),
ChanLayerNorm(hidden_dim),
nn.Conv2d(hidden_dim, dim, 1, bias = False)
)
class TransformerBlock(nn.Module):
def __init__(
self,
dim,
*,
depth = 1,
heads = 8,
dim_head = 32,
ff_mult = 2,
context_dim = None
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),
FeedForward(dim = dim, mult = ff_mult)
]))
def forward(self, x, context = None):
x = rearrange(x, 'b c h w -> b h w c')
x, ps = pack([x], 'b * c')
for attn, ff in self.layers:
x = attn(x, context = context) + x
x = ff(x) + x
x, = unpack(x, ps, 'b * c')
x = rearrange(x, 'b h w c -> b c h w')
return x
class LinearAttentionTransformerBlock(nn.Module):
def __init__(
self,
dim,
*,
depth = 1,
heads = 8,
dim_head = 32,
ff_mult = 2,
context_dim = None,
**kwargs
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
LinearAttention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),
ChanFeedForward(dim = dim, mult = ff_mult)
]))
def forward(self, x, context = None):
for attn, ff in self.layers:
x = attn(x, context = context) + x
x = ff(x) + x
return x
class CrossEmbedLayer(nn.Module):
def __init__(
self,
dim_in,
kernel_sizes,
dim_out = None,
stride = 2
):
super().__init__()
assert all([*map(lambda t: (t % 2) == (stride % 2), kernel_sizes)])
dim_out = default(dim_out, dim_in)
kernel_sizes = sorted(kernel_sizes)
num_scales = len(kernel_sizes)
# calculate the dimension at each scale
dim_scales = [int(dim_out / (2 ** i)) for i in range(1, num_scales)]
dim_scales = [*dim_scales, dim_out - sum(dim_scales)]
self.convs = nn.ModuleList([])
for kernel, dim_scale in zip(kernel_sizes, dim_scales):
self.convs.append(nn.Conv2d(dim_in, dim_scale, kernel, stride = stride, padding = (kernel - stride) // 2))
def forward(self, x):
fmaps = tuple(map(lambda conv: conv(x), self.convs))
return torch.cat(fmaps, dim = 1)
class UpsampleCombiner(nn.Module):
def __init__(
self,
dim,
*,
enabled = False,
dim_ins = tuple(),
dim_outs = tuple()
):
super().__init__()
dim_outs = cast_tuple(dim_outs, len(dim_ins))
assert len(dim_ins) == len(dim_outs)
self.enabled = enabled
if not self.enabled:
self.dim_out = dim
return
self.fmap_convs = nn.ModuleList([Block(dim_in, dim_out) for dim_in, dim_out in zip(dim_ins, dim_outs)])
self.dim_out = dim + (sum(dim_outs) if len(dim_outs) > 0 else 0)
def forward(self, x, fmaps = None):
target_size = x.shape[-1]
fmaps = default(fmaps, tuple())
if not self.enabled or len(fmaps) == 0 or len(self.fmap_convs) == 0:
return x
fmaps = [resize_image_to(fmap, target_size) for fmap in fmaps]
outs = [conv(fmap) for fmap, conv in zip(fmaps, self.fmap_convs)]
return torch.cat((x, *outs), dim = 1)
class Unet(nn.Module):
def __init__(
self,
*,
dim,
text_embed_dim = get_encoded_dim(DEFAULT_T5_NAME),
num_resnet_blocks = 1,
cond_dim = None,
num_image_tokens = 4,
num_time_tokens = 2,
learned_sinu_pos_emb_dim = 16,
out_dim = None,
dim_mults=(1, 2, 4, 8),
cond_images_channels = 0,
channels = 3,
channels_out = None,
attn_dim_head = 64,
attn_heads = 8,
ff_mult = 2.,
lowres_cond = False, # for cascading diffusion - https://cascaded-diffusion.github.io/
layer_attns = True,
layer_attns_depth = 1,
layer_mid_attns_depth = 1,
layer_attns_add_text_cond = True, # whether to condition the self-attention blocks with the text embeddings, as described in Appendix D.3.1
attend_at_middle = True, # whether to have a layer of attention at the bottleneck (can turn off for higher resolution in cascading DDPM, before bringing in efficient attention)
layer_cross_attns = True,
use_linear_attn = False,
use_linear_cross_attn = False,
cond_on_text = True,
max_text_len = 256,
init_dim = None,
resnet_groups = 8,
init_conv_kernel_size = 7, # kernel size of initial conv, if not using cross embed
init_cross_embed = True,
init_cross_embed_kernel_sizes = (3, 7, 15),
cross_embed_downsample = False,
cross_embed_downsample_kernel_sizes = (2, 4),
attn_pool_text = True,
attn_pool_num_latents = 32,
dropout = 0.,
memory_efficient = False,
init_conv_to_final_conv_residual = False,
use_global_context_attn = True,
scale_skip_connection = True,
final_resnet_block = True,
final_conv_kernel_size = 3,
self_cond = False,
resize_mode = 'nearest',
combine_upsample_fmaps = False, # combine feature maps from all upsample blocks, used in unet squared successfully
pixel_shuffle_upsample = True, # may address checkboard artifacts
):
super().__init__()
# guide researchers
assert attn_heads > 1, 'you need to have more than 1 attention head, ideally at least 4 or 8'
if dim < 128:
print_once('The base dimension of your u-net should ideally be no smaller than 128, as recommended by a professional DDPM trainer https://nonint.com/2022/05/04/friends-dont-let-friends-train-small-diffusion-models/')
# save locals to take care of some hyperparameters for cascading DDPM
self._locals = locals()
self._locals.pop('self', None)
self._locals.pop('__class__', None)
# determine dimensions
self.channels = channels
self.channels_out = default(channels_out, channels)
# (1) in cascading diffusion, one concats the low resolution image, blurred, for conditioning the higher resolution synthesis
# (2) in self conditioning, one appends the predict x0 (x_start)
init_channels = channels * (1 + int(lowres_cond) + int(self_cond))
init_dim = default(init_dim, dim)
self.self_cond = self_cond
# optional image conditioning
self.has_cond_image = cond_images_channels > 0
self.cond_images_channels = cond_images_channels
init_channels += cond_images_channels
# initial convolution
self.init_conv = CrossEmbedLayer(init_channels, dim_out = init_dim, kernel_sizes = init_cross_embed_kernel_sizes, stride = 1) if init_cross_embed else nn.Conv2d(init_channels, init_dim, init_conv_kernel_size, padding = init_conv_kernel_size // 2)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
# time conditioning
cond_dim = default(cond_dim, dim)
time_cond_dim = dim * 4 * (2 if lowres_cond else 1)
# embedding time for log(snr) noise from continuous version
sinu_pos_emb = LearnedSinusoidalPosEmb(learned_sinu_pos_emb_dim)
sinu_pos_emb_input_dim = learned_sinu_pos_emb_dim + 1
self.to_time_hiddens = nn.Sequential(
sinu_pos_emb,
nn.Linear(sinu_pos_emb_input_dim, time_cond_dim),
nn.SiLU()
)
self.to_time_cond = nn.Sequential(
nn.Linear(time_cond_dim, time_cond_dim)
)
# project to time tokens as well as time hiddens
self.to_time_tokens = nn.Sequential(
nn.Linear(time_cond_dim, cond_dim * num_time_tokens),
Rearrange('b (r d) -> b r d', r = num_time_tokens)
)
# low res aug noise conditioning
self.lowres_cond = lowres_cond
if lowres_cond:
self.to_lowres_time_hiddens = nn.Sequential(
LearnedSinusoidalPosEmb(learned_sinu_pos_emb_dim),
nn.Linear(learned_sinu_pos_emb_dim + 1, time_cond_dim),
nn.SiLU()
)
self.to_lowres_time_cond = nn.Sequential(
nn.Linear(time_cond_dim, time_cond_dim)
)
self.to_lowres_time_tokens = nn.Sequential(
nn.Linear(time_cond_dim, cond_dim * num_time_tokens),
Rearrange('b (r d) -> b r d', r = num_time_tokens)
)
# normalizations
self.norm_cond = nn.LayerNorm(cond_dim)
# text encoding conditioning (optional)
self.text_to_cond = None
if cond_on_text:
assert exists(text_embed_dim), 'text_embed_dim must be given to the unet if cond_on_text is True'
self.text_to_cond = nn.Linear(text_embed_dim, cond_dim)
# finer control over whether to condition on text encodings
self.cond_on_text = cond_on_text
# attention pooling
self.attn_pool = PerceiverResampler(dim = cond_dim, depth = 2, dim_head = attn_dim_head, heads = attn_heads, num_latents = attn_pool_num_latents) if attn_pool_text else None
# for classifier free guidance
self.max_text_len = max_text_len
self.null_text_embed = nn.Parameter(torch.randn(1, max_text_len, cond_dim))
self.null_text_hidden = nn.Parameter(torch.randn(1, time_cond_dim))
# for non-attention based text conditioning at all points in the network where time is also conditioned
self.to_text_non_attn_cond = None
if cond_on_text:
self.to_text_non_attn_cond = nn.Sequential(
nn.LayerNorm(cond_dim),
nn.Linear(cond_dim, time_cond_dim),
nn.SiLU(),
nn.Linear(time_cond_dim, time_cond_dim)
)
# attention related params
attn_kwargs = dict(heads = attn_heads, dim_head = attn_dim_head)
num_layers = len(in_out)
# resnet block klass
num_resnet_blocks = cast_tuple(num_resnet_blocks, num_layers)
resnet_groups = cast_tuple(resnet_groups, num_layers)
resnet_klass = partial(ResnetBlock, **attn_kwargs)
layer_attns = cast_tuple(layer_attns, num_layers)
layer_attns_depth = cast_tuple(layer_attns_depth, num_layers)
layer_cross_attns = cast_tuple(layer_cross_attns, num_layers)
use_linear_attn = cast_tuple(use_linear_attn, num_layers)
use_linear_cross_attn = cast_tuple(use_linear_cross_attn, num_layers)
assert all([layers == num_layers for layers in list(map(len, (resnet_groups, layer_attns, layer_cross_attns)))])
# downsample klass
downsample_klass = Downsample
if cross_embed_downsample:
downsample_klass = partial(CrossEmbedLayer, kernel_sizes = cross_embed_downsample_kernel_sizes)
# initial resnet block (for memory efficient unet)
self.init_resnet_block = resnet_klass(init_dim, init_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[0], use_gca = use_global_context_attn) if memory_efficient else None
# scale for resnet skip connections
self.skip_connect_scale = 1. if not scale_skip_connection else (2 ** -0.5)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
layer_params = [num_resnet_blocks, resnet_groups, layer_attns, layer_attns_depth, layer_cross_attns, use_linear_attn, use_linear_cross_attn]
reversed_layer_params = list(map(reversed, layer_params))
# downsampling layers
skip_connect_dims = [] # keep track of skip connection dimensions
for ind, ((dim_in, dim_out), layer_num_resnet_blocks, groups, layer_attn, layer_attn_depth, layer_cross_attn, layer_use_linear_attn, layer_use_linear_cross_attn) in enumerate(zip(in_out, *layer_params)):
is_last = ind >= (num_resolutions - 1)
layer_cond_dim = cond_dim if layer_cross_attn or layer_use_linear_cross_attn else None
if layer_attn:
transformer_block_klass = TransformerBlock
elif layer_use_linear_attn:
transformer_block_klass = LinearAttentionTransformerBlock
else:
transformer_block_klass = Identity
current_dim = dim_in
# whether to pre-downsample, from memory efficient unet
pre_downsample = None
if memory_efficient:
pre_downsample = downsample_klass(dim_in, dim_out)
current_dim = dim_out
skip_connect_dims.append(current_dim)
# whether to do post-downsample, for non-memory efficient unet
post_downsample = None
if not memory_efficient:
post_downsample = downsample_klass(current_dim, dim_out) if not is_last else Parallel(nn.Conv2d(dim_in, dim_out, 3, padding = 1), nn.Conv2d(dim_in, dim_out, 1))
self.downs.append(nn.ModuleList([
pre_downsample,
resnet_klass(current_dim, current_dim, cond_dim = layer_cond_dim, linear_attn = layer_use_linear_cross_attn, time_cond_dim = time_cond_dim, groups = groups),
nn.ModuleList([ResnetBlock(current_dim, current_dim, time_cond_dim = time_cond_dim, groups = groups, use_gca = use_global_context_attn) for _ in range(layer_num_resnet_blocks)]),
transformer_block_klass(dim = current_dim, depth = layer_attn_depth, ff_mult = ff_mult, context_dim = cond_dim, **attn_kwargs),
post_downsample
]))
# middle layers
mid_dim = dims[-1]
self.mid_block1 = ResnetBlock(mid_dim, mid_dim, cond_dim = cond_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[-1])
self.mid_attn = TransformerBlock(mid_dim, depth = layer_mid_attns_depth, **attn_kwargs) if attend_at_middle else None
self.mid_block2 = ResnetBlock(mid_dim, mid_dim, cond_dim = cond_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[-1])
# upsample klass
upsample_klass = Upsample if not pixel_shuffle_upsample else PixelShuffleUpsample
# upsampling layers
upsample_fmap_dims = []
for ind, ((dim_in, dim_out), layer_num_resnet_blocks, groups, layer_attn, layer_attn_depth, layer_cross_attn, layer_use_linear_attn, layer_use_linear_cross_attn) in enumerate(zip(reversed(in_out), *reversed_layer_params)):
is_last = ind == (len(in_out) - 1)
layer_cond_dim = cond_dim if layer_cross_attn or layer_use_linear_cross_attn else None
if layer_attn:
transformer_block_klass = TransformerBlock
elif layer_use_linear_attn:
transformer_block_klass = LinearAttentionTransformerBlock
else:
transformer_block_klass = Identity
skip_connect_dim = skip_connect_dims.pop()
upsample_fmap_dims.append(dim_out)
self.ups.append(nn.ModuleList([
resnet_klass(dim_out + skip_connect_dim, dim_out, cond_dim = layer_cond_dim, linear_attn = layer_use_linear_cross_attn, time_cond_dim = time_cond_dim, groups = groups),
nn.ModuleList([ResnetBlock(dim_out + skip_connect_dim, dim_out, time_cond_dim = time_cond_dim, groups = groups, use_gca = use_global_context_attn) for _ in range(layer_num_resnet_blocks)]),
transformer_block_klass(dim = dim_out, depth = layer_attn_depth, ff_mult = ff_mult, context_dim = cond_dim, **attn_kwargs),
upsample_klass(dim_out, dim_in) if not is_last or memory_efficient else Identity()
]))
# whether to combine feature maps from all upsample blocks before final resnet block out
self.upsample_combiner = UpsampleCombiner(
dim = dim,
enabled = combine_upsample_fmaps,
dim_ins = upsample_fmap_dims,
dim_outs = dim
)
# whether to do a final residual from initial conv to the final resnet block out
self.init_conv_to_final_conv_residual = init_conv_to_final_conv_residual
final_conv_dim = self.upsample_combiner.dim_out + (dim if init_conv_to_final_conv_residual else 0)
# final optional resnet block and convolution out
self.final_res_block = ResnetBlock(final_conv_dim, dim, time_cond_dim = time_cond_dim, groups = resnet_groups[0], use_gca = True) if final_resnet_block else None
final_conv_dim_in = dim if final_resnet_block else final_conv_dim
final_conv_dim_in += (channels if lowres_cond else 0)
self.final_conv = nn.Conv2d(final_conv_dim_in, self.channels_out, final_conv_kernel_size, padding = final_conv_kernel_size // 2)
zero_init_(self.final_conv)
# resize mode
self.resize_mode = resize_mode
# if the current settings for the unet are not correct
# for cascading DDPM, then reinit the unet with the right settings
def cast_model_parameters(
self,
*,
lowres_cond,
text_embed_dim,
channels,
channels_out,
cond_on_text
):
if lowres_cond == self.lowres_cond and \
channels == self.channels and \
cond_on_text == self.cond_on_text and \
text_embed_dim == self._locals['text_embed_dim'] and \
channels_out == self.channels_out:
return self
updated_kwargs = dict(
lowres_cond = lowres_cond,
text_embed_dim = text_embed_dim,
channels = channels,
channels_out = channels_out,
cond_on_text = cond_on_text
)
return self.__class__(**{**self._locals, **updated_kwargs})
# methods for returning the full unet config as well as its parameter state
def to_config_and_state_dict(self):
return self._locals, self.state_dict()
# class method for rehydrating the unet from its config and state dict
@classmethod
def from_config_and_state_dict(klass, config, state_dict):
unet = klass(**config)
unet.load_state_dict(state_dict)
return unet
# methods for persisting unet to disk
def persist_to_file(self, path):
path = Path(path)
path.parents[0].mkdir(exist_ok = True, parents = True)
config, state_dict = self.to_config_and_state_dict()
pkg = dict(config = config, state_dict = state_dict)
torch.save(pkg, str(path))
# class method for rehydrating the unet from file saved with `persist_to_file`
@classmethod
def hydrate_from_file(klass, path):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path))
assert 'config' in pkg and 'state_dict' in pkg
config, state_dict = pkg['config'], pkg['state_dict']
return Unet.from_config_and_state_dict(config, state_dict)
# forward with classifier free guidance
def forward_with_cond_scale(
self,
*args,
cond_scale = 1.,
**kwargs
):
logits = self.forward(*args, **kwargs)
if cond_scale == 1:
return logits
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
self,
x,
time,
*,
lowres_cond_img = None,
lowres_noise_times = None,
text_embeds = None,
text_mask = None,
cond_images = None,
self_cond = None,
cond_drop_prob = 0.
):
batch_size, device = x.shape[0], x.device
# condition on self
if self.self_cond:
self_cond = default(self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x, self_cond), dim = 1)
# add low resolution conditioning, if present
assert not (self.lowres_cond and not exists(lowres_cond_img)), 'low resolution conditioning image must be present'
assert not (self.lowres_cond and not exists(lowres_noise_times)), 'low resolution conditioning noise time must be present'
if exists(lowres_cond_img):
x = torch.cat((x, lowres_cond_img), dim = 1)
# condition on input image
assert not (self.has_cond_image ^ exists(cond_images)), 'you either requested to condition on an image on the unet, but the conditioning image is not supplied, or vice versa'
if exists(cond_images):
assert cond_images.shape[1] == self.cond_images_channels, 'the number of channels on the conditioning image you are passing in does not match what you specified on initialiation of the unet'
cond_images = resize_image_to(cond_images, x.shape[-1], mode = self.resize_mode)
x = torch.cat((cond_images, x), dim = 1)
# initial convolution
x = self.init_conv(x)
# init conv residual
if self.init_conv_to_final_conv_residual:
init_conv_residual = x.clone()
# time conditioning
time_hiddens = self.to_time_hiddens(time)
# derive time tokens
time_tokens = self.to_time_tokens(time_hiddens)
t = self.to_time_cond(time_hiddens)
# add lowres time conditioning to time hiddens
# and add lowres time tokens along sequence dimension for attention
if self.lowres_cond:
lowres_time_hiddens = self.to_lowres_time_hiddens(lowres_noise_times)
lowres_time_tokens = self.to_lowres_time_tokens(lowres_time_hiddens)
lowres_t = self.to_lowres_time_cond(lowres_time_hiddens)
t = t + lowres_t
time_tokens = torch.cat((time_tokens, lowres_time_tokens), dim = -2)
# text conditioning
text_tokens = None
if exists(text_embeds) and self.cond_on_text:
# conditional dropout
text_keep_mask = prob_mask_like((batch_size,), 1 - cond_drop_prob, device = device)
text_keep_mask_embed = rearrange(text_keep_mask, 'b -> b 1 1')
text_keep_mask_hidden = rearrange(text_keep_mask, 'b -> b 1')
# calculate text embeds
text_tokens = self.text_to_cond(text_embeds)
text_tokens = text_tokens[:, :self.max_text_len]
if exists(text_mask):
text_mask = text_mask[:, :self.max_text_len]
text_tokens_len = text_tokens.shape[1]
remainder = self.max_text_len - text_tokens_len
if remainder > 0:
text_tokens = F.pad(text_tokens, (0, 0, 0, remainder))
if exists(text_mask):
if remainder > 0:
text_mask = F.pad(text_mask, (0, remainder), value = False)
text_mask = rearrange(text_mask, 'b n -> b n 1')
text_keep_mask_embed = text_mask & text_keep_mask_embed
null_text_embed = self.null_text_embed.to(text_tokens.dtype) # for some reason pytorch AMP not working
text_tokens = torch.where(
text_keep_mask_embed,
text_tokens,
null_text_embed
)
if exists(self.attn_pool):
text_tokens = self.attn_pool(text_tokens)
# extra non-attention conditioning by projecting and then summing text embeddings to time
# termed as text hiddens
mean_pooled_text_tokens = text_tokens.mean(dim = -2)
text_hiddens = self.to_text_non_attn_cond(mean_pooled_text_tokens)
null_text_hidden = self.null_text_hidden.to(t.dtype)
text_hiddens = torch.where(
text_keep_mask_hidden,
text_hiddens,
null_text_hidden
)
t = t + text_hiddens
# main conditioning tokens (c)
c = time_tokens if not exists(text_tokens) else torch.cat((time_tokens, text_tokens), dim = -2)
# normalize conditioning tokens
c = self.norm_cond(c)
# initial resnet block (for memory efficient unet)
if exists(self.init_resnet_block):
x = self.init_resnet_block(x, t)
# go through the layers of the unet, down and up
hiddens = []
for pre_downsample, init_block, resnet_blocks, attn_block, post_downsample in self.downs:
if exists(pre_downsample):
x = pre_downsample(x)
x = init_block(x, t, c)
for resnet_block in resnet_blocks:
x = resnet_block(x, t)
hiddens.append(x)
x = attn_block(x, c)
hiddens.append(x)
if exists(post_downsample):
x = post_downsample(x)
x = self.mid_block1(x, t, c)
if exists(self.mid_attn):
x = self.mid_attn(x)
x = self.mid_block2(x, t, c)
add_skip_connection = lambda x: torch.cat((x, hiddens.pop() * self.skip_connect_scale), dim = 1)
up_hiddens = []
for init_block, resnet_blocks, attn_block, upsample in self.ups:
x = add_skip_connection(x)
x = init_block(x, t, c)
for resnet_block in resnet_blocks:
x = add_skip_connection(x)
x = resnet_block(x, t)
x = attn_block(x, c)
up_hiddens.append(x.contiguous())
x = upsample(x)
# whether to combine all feature maps from upsample blocks
x = self.upsample_combiner(x, up_hiddens)
# final top-most residual if needed
if self.init_conv_to_final_conv_residual:
x = torch.cat((x, init_conv_residual), dim = 1)
if exists(self.final_res_block):
x = self.final_res_block(x, t)
if exists(lowres_cond_img):
x = torch.cat((x, lowres_cond_img), dim = 1)
return self.final_conv(x)
# null unet
class NullUnet(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.lowres_cond = False
self.dummy_parameter = nn.Parameter(torch.tensor([0.]))
def cast_model_parameters(self, *args, **kwargs):
return self
def forward(self, x, *args, **kwargs):
return x
# predefined unets, with configs lining up with hyperparameters in appendix of paper
class BaseUnet64(Unet):
def __init__(self, *args, **kwargs):
default_kwargs = dict(
dim = 512,
dim_mults = (1, 2, 3, 4),
num_resnet_blocks = 3,
layer_attns = (False, True, True, True),
layer_cross_attns = (False, True, True, True),
attn_heads = 8,
ff_mult = 2.,
memory_efficient = False
)
super().__init__(*args, **{**default_kwargs, **kwargs})
class SRUnet256(Unet):
def __init__(self, *args, **kwargs):
default_kwargs = dict(
dim = 128,
dim_mults = (1, 2, 4, 8),
num_resnet_blocks = (2, 4, 8, 8),
layer_attns = (False, False, False, True),
layer_cross_attns = (False, False, False, True),
attn_heads = 8,
ff_mult = 2.,
memory_efficient = True
)
super().__init__(*args, **{**default_kwargs, **kwargs})
class SRUnet1024(Unet):
def __init__(self, *args, **kwargs):
default_kwargs = dict(
dim = 128,
dim_mults = (1, 2, 4, 8),
num_resnet_blocks = (2, 4, 8, 8),
layer_attns = False,
layer_cross_attns = (False, False, False, True),
attn_heads = 8,
ff_mult = 2.,
memory_efficient = True
)
super().__init__(*args, **{**default_kwargs, **kwargs})
# main imagen ddpm class, which is a cascading DDPM from Ho et al.
class Imagen(nn.Module):
def __init__(
self,
unets,
*,
image_sizes, # for cascading ddpm, image size at each stage
text_encoder_name = DEFAULT_T5_NAME,
text_embed_dim = None,
channels = 3,
timesteps = 1000,
cond_drop_prob = 0.1,
loss_type = 'l2',
noise_schedules = 'cosine',
pred_objectives = 'noise',
random_crop_sizes = None,
lowres_noise_schedule = 'linear',
lowres_sample_noise_level = 0.2, # in the paper, they present a new trick where they noise the lowres conditioning image, and at sample time, fix it to a certain level (0.1 or 0.3) - the unets are also made to be conditioned on this noise level
per_sample_random_aug_noise_level = False, # unclear when conditioning on augmentation noise level, whether each batch element receives a random aug noise value - turning off due to @marunine's find
condition_on_text = True,
auto_normalize_img = True, # whether to take care of normalizing the image from [0, 1] to [-1, 1] and back automatically - you can turn this off if you want to pass in the [-1, 1] ranged image yourself from the dataloader
dynamic_thresholding = True,
dynamic_thresholding_percentile = 0.95, # unsure what this was based on perusal of paper
only_train_unet_number = None,
temporal_downsample_factor = 1,
resize_cond_video_frames = True,
resize_mode = 'nearest',
min_snr_loss_weight = True, # https://arxiv.org/abs/2303.09556
min_snr_gamma = 5
):
super().__init__()
# loss
if loss_type == 'l1':
loss_fn = F.l1_loss
elif loss_type == 'l2':
loss_fn = F.mse_loss
elif loss_type == 'huber':
loss_fn = F.smooth_l1_loss
else:
raise NotImplementedError()
self.loss_type = loss_type
self.loss_fn = loss_fn
# conditioning hparams
self.condition_on_text = condition_on_text
self.unconditional = not condition_on_text
# channels
self.channels = channels
# automatically take care of ensuring that first unet is unconditional
# while the rest of the unets are conditioned on the low resolution image produced by previous unet
unets = cast_tuple(unets)
num_unets = len(unets)
# determine noise schedules per unet
timesteps = cast_tuple(timesteps, num_unets)
# make sure noise schedule defaults to 'cosine', 'cosine', and then 'linear' for rest of super-resoluting unets
noise_schedules = cast_tuple(noise_schedules)
noise_schedules = pad_tuple_to_length(noise_schedules, 2, 'cosine')
noise_schedules = pad_tuple_to_length(noise_schedules, num_unets, 'linear')
# construct noise schedulers
noise_scheduler_klass = GaussianDiffusionContinuousTimes
self.noise_schedulers = nn.ModuleList([])
for timestep, noise_schedule in zip(timesteps, noise_schedules):
noise_scheduler = noise_scheduler_klass(noise_schedule = noise_schedule, timesteps = timestep)
self.noise_schedulers.append(noise_scheduler)
# randomly cropping for upsampler training
self.random_crop_sizes = cast_tuple(random_crop_sizes, num_unets)
assert not exists(first(self.random_crop_sizes)), 'you should not need to randomly crop image during training for base unet, only for upsamplers - so pass in `random_crop_sizes = (None, 128, 256)` as example'
# lowres augmentation noise schedule
self.lowres_noise_schedule = GaussianDiffusionContinuousTimes(noise_schedule = lowres_noise_schedule)
# ddpm objectives - predicting noise by default
self.pred_objectives = cast_tuple(pred_objectives, num_unets)
# get text encoder
self.text_encoder_name = text_encoder_name
self.text_embed_dim = default(text_embed_dim, lambda: get_encoded_dim(text_encoder_name))
self.encode_text = partial(t5_encode_text, name = text_encoder_name)
# construct unets
self.unets = nn.ModuleList([])
self.unet_being_trained_index = -1 # keeps track of which unet is being trained at the moment
self.only_train_unet_number = only_train_unet_number
for ind, one_unet in enumerate(unets):
assert isinstance(one_unet, (Unet, Unet3D, NullUnet))
is_first = ind == 0
one_unet = one_unet.cast_model_parameters(
lowres_cond = not is_first,
cond_on_text = self.condition_on_text,
text_embed_dim = self.text_embed_dim if self.condition_on_text else None,
channels = self.channels,
channels_out = self.channels
)
self.unets.append(one_unet)
# unet image sizes
image_sizes = cast_tuple(image_sizes)
self.image_sizes = image_sizes
assert num_unets == len(image_sizes), f'you did not supply the correct number of u-nets ({len(unets)}) for resolutions {image_sizes}'
self.sample_channels = cast_tuple(self.channels, num_unets)
# determine whether we are training on images or video
is_video = any([isinstance(unet, Unet3D) for unet in self.unets])
self.is_video = is_video
self.right_pad_dims_to_datatype = partial(rearrange, pattern = ('b -> b 1 1 1' if not is_video else 'b -> b 1 1 1 1'))
self.resize_to = resize_video_to if is_video else resize_image_to
self.resize_to = partial(self.resize_to, mode = resize_mode)
# temporal interpolation
temporal_downsample_factor = cast_tuple(temporal_downsample_factor, num_unets)
self.temporal_downsample_factor = temporal_downsample_factor
self.resize_cond_video_frames = resize_cond_video_frames
self.temporal_downsample_divisor = temporal_downsample_factor[0]
assert temporal_downsample_factor[-1] == 1, 'downsample factor of last stage must be 1'
assert tuple(sorted(temporal_downsample_factor, reverse = True)) == temporal_downsample_factor, 'temporal downsample factor must be in order of descending'
# cascading ddpm related stuff
lowres_conditions = tuple(map(lambda t: t.lowres_cond, self.unets))
assert lowres_conditions == (False, *((True,) * (num_unets - 1))), 'the first unet must be unconditioned (by low resolution image), and the rest of the unets must have `lowres_cond` set to True'
self.lowres_sample_noise_level = lowres_sample_noise_level
self.per_sample_random_aug_noise_level = per_sample_random_aug_noise_level
# classifier free guidance
self.cond_drop_prob = cond_drop_prob
self.can_classifier_guidance = cond_drop_prob > 0.
# normalize and unnormalize image functions
self.normalize_img = normalize_neg_one_to_one if auto_normalize_img else identity
self.unnormalize_img = unnormalize_zero_to_one if auto_normalize_img else identity
self.input_image_range = (0. if auto_normalize_img else -1., 1.)
# dynamic thresholding
self.dynamic_thresholding = cast_tuple(dynamic_thresholding, num_unets)
self.dynamic_thresholding_percentile = dynamic_thresholding_percentile
# min snr loss weight
min_snr_loss_weight = cast_tuple(min_snr_loss_weight, num_unets)
min_snr_gamma = cast_tuple(min_snr_gamma, num_unets)
assert len(min_snr_loss_weight) == len(min_snr_gamma) == num_unets
self.min_snr_gamma = tuple((gamma if use_min_snr else None) for use_min_snr, gamma in zip(min_snr_loss_weight, min_snr_gamma))
# one temp parameter for keeping track of device
self.register_buffer('_temp', torch.tensor([0.]), persistent = False)
# default to device of unets passed in
self.to(next(self.unets.parameters()).device)
def force_unconditional_(self):
self.condition_on_text = False
self.unconditional = True
for unet in self.unets:
unet.cond_on_text = False
@property
def device(self):
return self._temp.device
def get_unet(self, unet_number):
assert 0 < unet_number <= len(self.unets)
index = unet_number - 1
if isinstance(self.unets, nn.ModuleList):
unets_list = [unet for unet in self.unets]
delattr(self, 'unets')
self.unets = unets_list
if index != self.unet_being_trained_index:
for unet_index, unet in enumerate(self.unets):
unet.to(self.device if unet_index == index else 'cpu')
self.unet_being_trained_index = index
return self.unets[index]
def reset_unets_all_one_device(self, device = None):
device = default(device, self.device)
self.unets = nn.ModuleList([*self.unets])
self.unets.to(device)
self.unet_being_trained_index = -1
@contextmanager
def one_unet_in_gpu(self, unet_number = None, unet = None):
assert exists(unet_number) ^ exists(unet)
if exists(unet_number):
unet = self.unets[unet_number - 1]
cpu = torch.device('cpu')
devices = [module_device(unet) for unet in self.unets]
self.unets.to(cpu)
unet.to(self.device)
yield
for unet, device in zip(self.unets, devices):
unet.to(device)
# overriding state dict functions
def state_dict(self, *args, **kwargs):
self.reset_unets_all_one_device()
return super().state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
self.reset_unets_all_one_device()
return super().load_state_dict(*args, **kwargs)
# gaussian diffusion methods
def p_mean_variance(
self,
unet,
x,
t,
*,
noise_scheduler,
text_embeds = None,
text_mask = None,
cond_images = None,
cond_video_frames = None,
post_cond_video_frames = None,
lowres_cond_img = None,
self_cond = None,
lowres_noise_times = None,
cond_scale = 1.,
model_output = None,
t_next = None,
pred_objective = 'noise',
dynamic_threshold = True
):
assert not (cond_scale != 1. and not self.can_classifier_guidance), 'imagen was not trained with conditional dropout, and thus one cannot use classifier free guidance (cond_scale anything other than 1)'
video_kwargs = dict()
if self.is_video:
video_kwargs = dict(
cond_video_frames = cond_video_frames,
post_cond_video_frames = post_cond_video_frames,
)
pred = default(model_output, lambda: unet.forward_with_cond_scale(
x,
noise_scheduler.get_condition(t),
text_embeds = text_embeds,
text_mask = text_mask,
cond_images = cond_images,
cond_scale = cond_scale,
lowres_cond_img = lowres_cond_img,
self_cond = self_cond,
lowres_noise_times = self.lowres_noise_schedule.get_condition(lowres_noise_times),
**video_kwargs
))
if pred_objective == 'noise':
x_start = noise_scheduler.predict_start_from_noise(x, t = t, noise = pred)
elif pred_objective == 'x_start':
x_start = pred
elif pred_objective == 'v':
x_start = noise_scheduler.predict_start_from_v(x, t = t, v = pred)
else:
raise ValueError(f'unknown objective {pred_objective}')
if dynamic_threshold:
# following pseudocode in appendix
# s is the dynamic threshold, determined by percentile of absolute values of reconstructed sample per batch element
s = torch.quantile(
rearrange(x_start, 'b ... -> b (...)').abs(),
self.dynamic_thresholding_percentile,
dim = -1
)
s.clamp_(min = 1.)
s = right_pad_dims_to(x_start, s)
x_start = x_start.clamp(-s, s) / s
else:
x_start.clamp_(-1., 1.)
mean_and_variance = noise_scheduler.q_posterior(x_start = x_start, x_t = x, t = t, t_next = t_next)
return mean_and_variance, x_start
@torch.no_grad()
def p_sample(
self,
unet,
x,
t,
*,
noise_scheduler,
t_next = None,
text_embeds = None,
text_mask = None,
cond_images = None,
cond_video_frames = None,
post_cond_video_frames = None,
cond_scale = 1.,
self_cond = None,
lowres_cond_img = None,
lowres_noise_times = None,
pred_objective = 'noise',
dynamic_threshold = True
):
b, *_, device = *x.shape, x.device
video_kwargs = dict()
if self.is_video:
video_kwargs = dict(
cond_video_frames = cond_video_frames,
post_cond_video_frames = post_cond_video_frames,
)
(model_mean, _, model_log_variance), x_start = self.p_mean_variance(
unet,
x = x,
t = t,
t_next = t_next,
noise_scheduler = noise_scheduler,
text_embeds = text_embeds,
text_mask = text_mask,
cond_images = cond_images,
cond_scale = cond_scale,
lowres_cond_img = lowres_cond_img,
self_cond = self_cond,
lowres_noise_times = lowres_noise_times,
pred_objective = pred_objective,
dynamic_threshold = dynamic_threshold,
**video_kwargs
)
noise = torch.randn_like(x)
# no noise when t == 0
is_last_sampling_timestep = (t_next == 0) if isinstance(noise_scheduler, GaussianDiffusionContinuousTimes) else (t == 0)
nonzero_mask = (1 - is_last_sampling_timestep.float()).reshape(b, *((1,) * (len(x.shape) - 1)))
pred = model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
return pred, x_start
@torch.no_grad()
def p_sample_loop(
self,
unet,
shape,
*,
noise_scheduler,
lowres_cond_img = None,
lowres_noise_times = None,
text_embeds = None,
text_mask = None,
cond_images = None,
cond_video_frames = None,
post_cond_video_frames = None,
inpaint_images = None,
inpaint_videos = None,
inpaint_masks = None,
inpaint_resample_times = 5,
init_images = None,
skip_steps = None,
cond_scale = 1,
pred_objective = 'noise',
dynamic_threshold = True,
use_tqdm = True
):
device = self.device
batch = shape[0]
img = torch.randn(shape, device = device)
# video
is_video = len(shape) == 5
frames = shape[-3] if is_video else None
resize_kwargs = dict(target_frames = frames) if exists(frames) else dict()
# for initialization with an image or video
if exists(init_images):
img += init_images
# keep track of x0, for self conditioning
x_start = None
# prepare inpainting
inpaint_images = default(inpaint_videos, inpaint_images)
has_inpainting = exists(inpaint_images) and exists(inpaint_masks)
resample_times = inpaint_resample_times if has_inpainting else 1
if has_inpainting:
inpaint_images = self.normalize_img(inpaint_images)
inpaint_images = self.resize_to(inpaint_images, shape[-1], **resize_kwargs)
inpaint_masks = self.resize_to(rearrange(inpaint_masks, 'b ... -> b 1 ...').float(), shape[-1], **resize_kwargs).bool()
# time
timesteps = noise_scheduler.get_sampling_timesteps(batch, device = device)
# whether to skip any steps
skip_steps = default(skip_steps, 0)
timesteps = timesteps[skip_steps:]
# video conditioning kwargs
video_kwargs = dict()
if self.is_video:
video_kwargs = dict(
cond_video_frames = cond_video_frames,
post_cond_video_frames = post_cond_video_frames,
)
for times, times_next in tqdm(timesteps, desc = 'sampling loop time step', total = len(timesteps), disable = not use_tqdm):
is_last_timestep = times_next == 0
for r in reversed(range(resample_times)):
is_last_resample_step = r == 0
if has_inpainting:
noised_inpaint_images, *_ = noise_scheduler.q_sample(inpaint_images, t = times)
img = img * ~inpaint_masks + noised_inpaint_images * inpaint_masks
self_cond = x_start if unet.self_cond else None
img, x_start = self.p_sample(
unet,
img,
times,
t_next = times_next,
text_embeds = text_embeds,
text_mask = text_mask,
cond_images = cond_images,
cond_scale = cond_scale,
self_cond = self_cond,
lowres_cond_img = lowres_cond_img,
lowres_noise_times = lowres_noise_times,
noise_scheduler = noise_scheduler,
pred_objective = pred_objective,
dynamic_threshold = dynamic_threshold,
**video_kwargs
)
if has_inpainting and not (is_last_resample_step or torch.all(is_last_timestep)):
renoised_img = noise_scheduler.q_sample_from_to(img, times_next, times)
img = torch.where(
self.right_pad_dims_to_datatype(is_last_timestep),
img,
renoised_img
)
img.clamp_(-1., 1.)
# final inpainting
if has_inpainting:
img = img * ~inpaint_masks + inpaint_images * inpaint_masks
unnormalize_img = self.unnormalize_img(img)
return unnormalize_img
@torch.no_grad()
@eval_decorator
@beartype
def sample(
self,
texts: List[str] = None,
text_masks = None,
text_embeds = None,
video_frames = None,
cond_images = None,
cond_video_frames = None,
post_cond_video_frames = None,
inpaint_videos = None,
inpaint_images = None,
inpaint_masks = None,
inpaint_resample_times = 5,
init_images = None,
skip_steps = None,
batch_size = 1,
cond_scale = 1.,
lowres_sample_noise_level = None,
start_at_unet_number = 1,
start_image_or_video = None,
stop_at_unet_number = None,
return_all_unet_outputs = False,
return_pil_images = False,
device = None,
use_tqdm = True,
use_one_unet_in_gpu = True
):
device = default(device, self.device)
self.reset_unets_all_one_device(device = device)
cond_images = maybe(cast_uint8_images_to_float)(cond_images)
if exists(texts) and not exists(text_embeds) and not self.unconditional:
assert all([*map(len, texts)]), 'text cannot be empty'
with autocast(enabled = False):
text_embeds, text_masks = self.encode_text(texts, return_attn_mask = True)
text_embeds, text_masks = map(lambda t: t.to(device), (text_embeds, text_masks))
if not self.unconditional:
assert exists(text_embeds), 'text must be passed in if the network was not trained without text `condition_on_text` must be set to `False` when training'
text_masks = default(text_masks, lambda: torch.any(text_embeds != 0., dim = -1))
batch_size = text_embeds.shape[0]
# inpainting
inpaint_images = default(inpaint_videos, inpaint_images)
if exists(inpaint_images):
if self.unconditional:
if batch_size == 1: # assume researcher wants to broadcast along inpainted images
batch_size = inpaint_images.shape[0]
assert inpaint_images.shape[0] == batch_size, 'number of inpainting images must be equal to the specified batch size on sample `sample(batch_size=<int>)``'
assert not (self.condition_on_text and inpaint_images.shape[0] != text_embeds.shape[0]), 'number of inpainting images must be equal to the number of text to be conditioned on'
assert not (self.condition_on_text and not exists(text_embeds)), 'text or text encodings must be passed into imagen if specified'
assert not (not self.condition_on_text and exists(text_embeds)), 'imagen specified not to be conditioned on text, yet it is presented'
assert not (exists(text_embeds) and text_embeds.shape[-1] != self.text_embed_dim), f'invalid text embedding dimension being passed in (should be {self.text_embed_dim})'
assert not (exists(inpaint_images) ^ exists(inpaint_masks)), 'inpaint images and masks must be both passed in to do inpainting'
outputs = []
is_cuda = next(self.parameters()).is_cuda
device = next(self.parameters()).device
lowres_sample_noise_level = default(lowres_sample_noise_level, self.lowres_sample_noise_level)
num_unets = len(self.unets)
# condition scaling
cond_scale = cast_tuple(cond_scale, num_unets)
# add frame dimension for video
if self.is_video and exists(inpaint_images):
video_frames = inpaint_images.shape[2]
if inpaint_masks.ndim == 3:
inpaint_masks = repeat(inpaint_masks, 'b h w -> b f h w', f = video_frames)
assert inpaint_masks.shape[1] == video_frames
assert not (self.is_video and not exists(video_frames)), 'video_frames must be passed in on sample time if training on video'
all_frame_dims = calc_all_frame_dims(self.temporal_downsample_factor, video_frames)
frames_to_resize_kwargs = lambda frames: dict(target_frames = frames) if exists(frames) else dict()
# for initial image and skipping steps
init_images = cast_tuple(init_images, num_unets)
init_images = [maybe(self.normalize_img)(init_image) for init_image in init_images]
skip_steps = cast_tuple(skip_steps, num_unets)
# handle starting at a unet greater than 1, for training only-upscaler training
if start_at_unet_number > 1:
assert start_at_unet_number <= num_unets, 'must start a unet that is less than the total number of unets'
assert not exists(stop_at_unet_number) or start_at_unet_number <= stop_at_unet_number
assert exists(start_image_or_video), 'starting image or video must be supplied if only doing upscaling'
prev_image_size = self.image_sizes[start_at_unet_number - 2]
prev_frame_size = all_frame_dims[start_at_unet_number - 2][0] if self.is_video else None
img = self.resize_to(start_image_or_video, prev_image_size, **frames_to_resize_kwargs(prev_frame_size))
# go through each unet in cascade
for unet_number, unet, channel, image_size, frame_dims, noise_scheduler, pred_objective, dynamic_threshold, unet_cond_scale, unet_init_images, unet_skip_steps in tqdm(zip(range(1, num_unets + 1), self.unets, self.sample_channels, self.image_sizes, all_frame_dims, self.noise_schedulers, self.pred_objectives, self.dynamic_thresholding, cond_scale, init_images, skip_steps), disable = not use_tqdm):
if unet_number < start_at_unet_number:
continue
assert not isinstance(unet, NullUnet), 'one cannot sample from null / placeholder unets'
context = self.one_unet_in_gpu(unet = unet) if is_cuda and use_one_unet_in_gpu else nullcontext()
with context:
# video kwargs
video_kwargs = dict()
if self.is_video:
video_kwargs = dict(
cond_video_frames = cond_video_frames,
post_cond_video_frames = post_cond_video_frames,
)
video_kwargs = compact(video_kwargs)
if self.is_video and self.resize_cond_video_frames:
downsample_scale = self.temporal_downsample_factor[unet_number - 1]
temporal_downsample_fn = partial(scale_video_time, downsample_scale = downsample_scale)
video_kwargs = maybe_transform_dict_key(video_kwargs, 'cond_video_frames', temporal_downsample_fn)
video_kwargs = maybe_transform_dict_key(video_kwargs, 'post_cond_video_frames', temporal_downsample_fn)
# low resolution conditioning
lowres_cond_img = lowres_noise_times = None
shape = (batch_size, channel, *frame_dims, image_size, image_size)
resize_kwargs = dict(target_frames = frame_dims[0]) if self.is_video else dict()
if unet.lowres_cond:
lowres_noise_times = self.lowres_noise_schedule.get_times(batch_size, lowres_sample_noise_level, device = device)
lowres_cond_img = self.resize_to(img, image_size, **resize_kwargs)
lowres_cond_img = self.normalize_img(lowres_cond_img)
lowres_cond_img, *_ = self.lowres_noise_schedule.q_sample(x_start = lowres_cond_img, t = lowres_noise_times, noise = torch.randn_like(lowres_cond_img))
# init images or video
if exists(unet_init_images):
unet_init_images = self.resize_to(unet_init_images, image_size, **resize_kwargs)
# shape of stage
shape = (batch_size, self.channels, *frame_dims, image_size, image_size)
img = self.p_sample_loop(
unet,
shape,
text_embeds = text_embeds,
text_mask = text_masks,
cond_images = cond_images,
inpaint_images = inpaint_images,
inpaint_masks = inpaint_masks,
inpaint_resample_times = inpaint_resample_times,
init_images = unet_init_images,
skip_steps = unet_skip_steps,
cond_scale = unet_cond_scale,
lowres_cond_img = lowres_cond_img,
lowres_noise_times = lowres_noise_times,
noise_scheduler = noise_scheduler,
pred_objective = pred_objective,
dynamic_threshold = dynamic_threshold,
use_tqdm = use_tqdm,
**video_kwargs
)
outputs.append(img)
if exists(stop_at_unet_number) and stop_at_unet_number == unet_number:
break
output_index = -1 if not return_all_unet_outputs else slice(None) # either return last unet output or all unet outputs
if not return_pil_images:
return outputs[output_index]
if not return_all_unet_outputs:
outputs = outputs[-1:]
assert not self.is_video, 'converting sampled video tensor to video file is not supported yet'
pil_images = list(map(lambda img: list(map(T.ToPILImage(), img.unbind(dim = 0))), outputs))
return pil_images[output_index] # now you have a bunch of pillow images you can just .save(/where/ever/you/want.png)
@beartype
def p_losses(
self,
unet: Union[Unet, Unet3D, NullUnet, DistributedDataParallel],
x_start,
times,
*,
noise_scheduler,
lowres_cond_img = None,
lowres_aug_times = None,
text_embeds = None,
text_mask = None,
cond_images = None,
noise = None,
times_next = None,
pred_objective = 'noise',
min_snr_gamma = None,
random_crop_size = None,
**kwargs
):
is_video = x_start.ndim == 5
noise = default(noise, lambda: torch.randn_like(x_start))
# normalize to [-1, 1]
x_start = self.normalize_img(x_start)
lowres_cond_img = maybe(self.normalize_img)(lowres_cond_img)
# random cropping during training
# for upsamplers
if exists(random_crop_size):
if is_video:
frames = x_start.shape[2]
x_start, lowres_cond_img, noise = map(lambda t: rearrange(t, 'b c f h w -> (b f) c h w'), (x_start, lowres_cond_img, noise))
aug = K.RandomCrop((random_crop_size, random_crop_size), p = 1.)
# make sure low res conditioner and image both get augmented the same way
# detailed https://kornia.readthedocs.io/en/latest/augmentation.module.html?highlight=randomcrop#kornia.augmentation.RandomCrop
x_start = aug(x_start)
lowres_cond_img = aug(lowres_cond_img, params = aug._params)
noise = aug(noise, params = aug._params)
if is_video:
x_start, lowres_cond_img, noise = map(lambda t: rearrange(t, '(b f) c h w -> b c f h w', f = frames), (x_start, lowres_cond_img, noise))
# get x_t
x_noisy, log_snr, alpha, sigma = noise_scheduler.q_sample(x_start = x_start, t = times, noise = noise)
# also noise the lowres conditioning image
# at sample time, they then fix the noise level of 0.1 - 0.3
lowres_cond_img_noisy = None
if exists(lowres_cond_img):
lowres_aug_times = default(lowres_aug_times, times)
lowres_cond_img_noisy, *_ = self.lowres_noise_schedule.q_sample(x_start = lowres_cond_img, t = lowres_aug_times, noise = torch.randn_like(lowres_cond_img))
# time condition
noise_cond = noise_scheduler.get_condition(times)
# unet kwargs
unet_kwargs = dict(
text_embeds = text_embeds,
text_mask = text_mask,
cond_images = cond_images,
lowres_noise_times = self.lowres_noise_schedule.get_condition(lowres_aug_times),
lowres_cond_img = lowres_cond_img_noisy,
cond_drop_prob = self.cond_drop_prob,
**kwargs
)
# self condition if needed
# Because 'unet' can be an instance of DistributedDataParallel coming from the
# ImagenTrainer.unet_being_trained when invoking ImagenTrainer.forward(), we need to
# access the member 'module' of the wrapped unet instance.
self_cond = unet.module.self_cond if isinstance(unet, DistributedDataParallel) else unet.self_cond
if self_cond and random() < 0.5:
with torch.no_grad():
pred = unet.forward(
x_noisy,
noise_cond,
**unet_kwargs
).detach()
x_start = noise_scheduler.predict_start_from_noise(x_noisy, t = times, noise = pred) if pred_objective == 'noise' else pred
unet_kwargs = {**unet_kwargs, 'self_cond': x_start}
# get prediction
pred = unet.forward(
x_noisy,
noise_cond,
**unet_kwargs
)
# prediction objective
if pred_objective == 'noise':
target = noise
elif pred_objective == 'x_start':
target = x_start
elif pred_objective == 'v':
# derivation detailed in Appendix D of Progressive Distillation paper
# https://arxiv.org/abs/2202.00512
# this makes distillation viable as well as solve an issue with color shifting in upresoluting unets, noted in imagen-video
target = alpha * noise - sigma * x_start
else:
raise ValueError(f'unknown objective {pred_objective}')
# losses
losses = self.loss_fn(pred, target, reduction = 'none')
losses = reduce(losses, 'b ... -> b', 'mean')
# min snr loss reweighting
snr = log_snr.exp()
maybe_clipped_snr = snr.clone()
if exists(min_snr_gamma):
maybe_clipped_snr.clamp_(max = min_snr_gamma)
if pred_objective == 'noise':
loss_weight = maybe_clipped_snr / snr
elif pred_objective == 'x_start':
loss_weight = maybe_clipped_snr
elif pred_objective == 'v':
loss_weight = maybe_clipped_snr / (snr + 1)
losses = losses * loss_weight
return losses.mean()
@beartype
def forward(
self,
images, # rename to images or video
unet: Union[Unet, Unet3D, NullUnet, DistributedDataParallel] = None,
texts: List[str] = None,
text_embeds = None,
text_masks = None,
unet_number = None,
cond_images = None,
**kwargs
):
if self.is_video and images.ndim == 4:
images = rearrange(images, 'b c h w -> b c 1 h w')
kwargs.update(ignore_time = True)
assert images.shape[-1] == images.shape[-2], f'the images you pass in must be a square, but received dimensions of {images.shape[2]}, {images.shape[-1]}'
assert not (len(self.unets) > 1 and not exists(unet_number)), f'you must specify which unet you want trained, from a range of 1 to {len(self.unets)}, if you are training cascading DDPM (multiple unets)'
unet_number = default(unet_number, 1)
assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you can only train on unet #{self.only_train_unet_number}'
images = cast_uint8_images_to_float(images)
cond_images = maybe(cast_uint8_images_to_float)(cond_images)
assert images.dtype == torch.float or images.dtype == torch.half, f'images tensor needs to be floats but {images.dtype} dtype found instead'
unet_index = unet_number - 1
unet = default(unet, lambda: self.get_unet(unet_number))
assert not isinstance(unet, NullUnet), 'null unet cannot and should not be trained'
noise_scheduler = self.noise_schedulers[unet_index]
min_snr_gamma = self.min_snr_gamma[unet_index]
pred_objective = self.pred_objectives[unet_index]
target_image_size = self.image_sizes[unet_index]
random_crop_size = self.random_crop_sizes[unet_index]
prev_image_size = self.image_sizes[unet_index - 1] if unet_index > 0 else None
b, c, *_, h, w, device, is_video = *images.shape, images.device, images.ndim == 5
assert images.shape[1] == self.channels
assert h >= target_image_size and w >= target_image_size
frames = images.shape[2] if is_video else None
all_frame_dims = tuple(safe_get_tuple_index(el, 0) for el in calc_all_frame_dims(self.temporal_downsample_factor, frames))
ignore_time = kwargs.get('ignore_time', False)
target_frame_size = all_frame_dims[unet_index] if is_video and not ignore_time else None
prev_frame_size = all_frame_dims[unet_index - 1] if is_video and not ignore_time and unet_index > 0 else None
frames_to_resize_kwargs = lambda frames: dict(target_frames = frames) if exists(frames) else dict()
times = noise_scheduler.sample_random_times(b, device = device)
if exists(texts) and not exists(text_embeds) and not self.unconditional:
assert all([*map(len, texts)]), 'text cannot be empty'
assert len(texts) == len(images), 'number of text captions does not match up with the number of images given'
with autocast(enabled = False):
text_embeds, text_masks = self.encode_text(texts, return_attn_mask = True)
text_embeds, text_masks = map(lambda t: t.to(images.device), (text_embeds, text_masks))
if not self.unconditional:
text_masks = default(text_masks, lambda: torch.any(text_embeds != 0., dim = -1))
assert not (self.condition_on_text and not exists(text_embeds)), 'text or text encodings must be passed into decoder if specified'
assert not (not self.condition_on_text and exists(text_embeds)), 'decoder specified not to be conditioned on text, yet it is presented'
assert not (exists(text_embeds) and text_embeds.shape[-1] != self.text_embed_dim), f'invalid text embedding dimension being passed in (should be {self.text_embed_dim})'
# handle video frame conditioning
if self.is_video and self.resize_cond_video_frames:
downsample_scale = self.temporal_downsample_factor[unet_index]
temporal_downsample_fn = partial(scale_video_time, downsample_scale = downsample_scale)
kwargs = maybe_transform_dict_key(kwargs, 'cond_video_frames', temporal_downsample_fn)
kwargs = maybe_transform_dict_key(kwargs, 'post_cond_video_frames', temporal_downsample_fn)
# handle low resolution conditioning
lowres_cond_img = lowres_aug_times = None
if exists(prev_image_size):
lowres_cond_img = self.resize_to(images, prev_image_size, **frames_to_resize_kwargs(prev_frame_size), clamp_range = self.input_image_range)
lowres_cond_img = self.resize_to(lowres_cond_img, target_image_size, **frames_to_resize_kwargs(target_frame_size), clamp_range = self.input_image_range)
if self.per_sample_random_aug_noise_level:
lowres_aug_times = self.lowres_noise_schedule.sample_random_times(b, device = device)
else:
lowres_aug_time = self.lowres_noise_schedule.sample_random_times(1, device = device)
lowres_aug_times = repeat(lowres_aug_time, '1 -> b', b = b)
images = self.resize_to(images, target_image_size, **frames_to_resize_kwargs(target_frame_size))
return self.p_losses(unet, images, times, text_embeds = text_embeds, text_mask = text_masks, cond_images = cond_images, noise_scheduler = noise_scheduler, lowres_cond_img = lowres_cond_img, lowres_aug_times = lowres_aug_times, pred_objective = pred_objective, min_snr_gamma = min_snr_gamma, random_crop_size = random_crop_size, **kwargs)
|
imagen-pytorch-main
|
imagen_pytorch/imagen_pytorch.py
|
from pathlib import Path
from functools import partial
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms as T, utils
import torch.nn.functional as F
from imagen_pytorch import t5
from torch.nn.utils.rnn import pad_sequence
from PIL import Image
from datasets.utils.file_utils import get_datasets_user_agent
import io
import urllib
USER_AGENT = get_datasets_user_agent()
# helpers functions
def exists(val):
return val is not None
def cycle(dl):
while True:
for data in dl:
yield data
def convert_image_to(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# dataset, dataloader, collator
class Collator:
def __init__(self, image_size, url_label, text_label, image_label, name, channels):
self.url_label = url_label
self.text_label = text_label
self.image_label = image_label
self.download = url_label is not None
self.name = name
self.channels = channels
self.transform = T.Compose([
T.Resize(image_size),
T.CenterCrop(image_size),
T.ToTensor(),
])
def __call__(self, batch):
texts = []
images = []
for item in batch:
try:
if self.download:
image = self.fetch_single_image(item[self.url_label])
else:
image = item[self.image_label]
image = self.transform(image.convert(self.channels))
except:
continue
text = t5.t5_encode_text([item[self.text_label]], name=self.name)
texts.append(torch.squeeze(text))
images.append(image)
if len(texts) == 0:
return None
texts = pad_sequence(texts, True)
newbatch = []
for i in range(len(texts)):
newbatch.append((images[i], texts[i]))
return torch.utils.data.dataloader.default_collate(newbatch)
def fetch_single_image(self, image_url, timeout=1):
try:
request = urllib.request.Request(
image_url,
data=None,
headers={"user-agent": USER_AGENT},
)
with urllib.request.urlopen(request, timeout=timeout) as req:
image = Image.open(io.BytesIO(req.read())).convert('RGB')
except Exception:
image = None
return image
class Dataset(Dataset):
def __init__(
self,
folder,
image_size,
exts = ['jpg', 'jpeg', 'png', 'tiff'],
convert_image_to_type = None
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
convert_fn = partial(convert_image_to, convert_image_to_type) if exists(convert_image_to_type) else nn.Identity()
self.transform = T.Compose([
T.Lambda(convert_fn),
T.Resize(image_size),
T.RandomHorizontalFlip(),
T.CenterCrop(image_size),
T.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
def get_images_dataloader(
folder,
*,
batch_size,
image_size,
shuffle = True,
cycle_dl = False,
pin_memory = True
):
ds = Dataset(folder, image_size)
dl = DataLoader(ds, batch_size = batch_size, shuffle = shuffle, pin_memory = pin_memory)
if cycle_dl:
dl = cycle(dl)
return dl
|
imagen-pytorch-main
|
imagen_pytorch/data.py
|
from imagen_pytorch.test import test_trainer
|
imagen-pytorch-main
|
imagen_pytorch/test/__init__.py
|
from imagen_pytorch.trainer import ImagenTrainer
from imagen_pytorch.configs import ImagenConfig
from imagen_pytorch.t5 import t5_encode_text
from torch.utils.data import Dataset
import torch
def test_trainer_instantiation():
unet1 = dict(
dim = 8,
dim_mults = (1, 1, 1, 1),
num_resnet_blocks = 1,
layer_attns = False,
layer_cross_attns = False,
attn_heads = 2
)
imagen = ImagenConfig(
unets=(unet1,),
image_sizes=(64,),
).create()
trainer = ImagenTrainer(
imagen=imagen
)
def test_trainer_step():
class TestDataset(Dataset):
def __init__(self):
super().__init__()
def __len__(self):
return 16
def __getitem__(self, index):
return (torch.zeros(3, 64, 64), torch.zeros(6, 768))
unet1 = dict(
dim = 8,
dim_mults = (1, 1, 1, 1),
num_resnet_blocks = 1,
layer_attns = False,
layer_cross_attns = False,
attn_heads = 2
)
imagen = ImagenConfig(
unets=(unet1,),
image_sizes=(64,),
).create()
trainer = ImagenTrainer(
imagen=imagen
)
ds = TestDataset()
trainer.add_train_dataset(ds, batch_size=8)
trainer.train_step(1)
assert trainer.num_steps_taken(1) == 1
|
imagen-pytorch-main
|
imagen_pytorch/test/test_trainer.py
|
from setuptools import setup, find_packages
setup(
name = 'h-transformer-1d',
packages = find_packages(),
version = '0.1.8',
license='MIT',
description = 'H-Transformer 1D - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/h-transformer-1d',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'efficient attention'
],
install_requires=[
'einops>=0.3',
'rotary-embedding-torch',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
h-transformer-1d-main
|
setup.py
|
from h_transformer_1d import HTransformer1D
from h_transformer_1d.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 4096
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = HTransformer1D(
num_tokens = 256,
dim = 512,
max_seq_len = SEQ_LEN,
depth = 8,
heads = 8,
causal = True,
reversible = True
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
|
h-transformer-1d-main
|
train.py
|
import torch
from torch import nn
import torch.nn.functional as F
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
device = start_tokens.device
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_token = (out == eos_token)
if is_eos_token.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
return out
def forward(self, x, **kwargs):
xi = x[:, :-1]
xo = x[:, 1:]
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
|
h-transformer-1d-main
|
h_transformer_1d/autoregressive_wrapper.py
|
import torch
import torch.nn as nn
from operator import itemgetter
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
def layer_drop(layers, prob):
to_drop = torch.empty(len(layers)).uniform_(0, 1) < prob
blocks = [block for block, drop in zip(layers, to_drop) if not drop]
blocks = layers[:1] if len(blocks) == 0 else blocks
return blocks
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, args):
ctx.args = args
for block, kwarg in zip(blocks, args):
x = block(x, **kwarg)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
args = ctx.args
for block, kwargs in zip(ctx.blocks[::-1], args[::-1]):
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}, layer_dropout = 0.):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
self.layer_dropout = layer_dropout
def forward(self, x, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
if self.training and self.layer_dropout > 0:
layers_and_args = layer_drop(layers_and_args, self.layer_dropout)
for (f, g), (f_args, g_args) in layers_and_args:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
return x
class ReversibleSequence(nn.Module):
def __init__(self, blocks, args_route = {}, layer_dropout = 0.):
super().__init__()
self.args_route = args_route
self.layer_dropout = layer_dropout
self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim=-1)
blocks = self.blocks
args = route_args(self.args_route, kwargs, len(blocks))
args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args))
layers_and_args = list(zip(blocks, args))
if self.training and self.layer_dropout > 0:
layers_and_args = layer_drop(layers_and_args, self.layer_dropout)
blocks, args = map(lambda ind: list(map(itemgetter(ind), layers_and_args)), (0, 1))
out = _ReversibleFunction.apply(x, blocks, args)
return torch.stack(out.chunk(2, dim=-1)).sum(dim=0)
|
h-transformer-1d-main
|
h_transformer_1d/reversible.py
|
from h_transformer_1d.h_transformer_1d import HTransformer1D
|
h-transformer-1d-main
|
h_transformer_1d/__init__.py
|
from math import log2, ceil
from functools import wraps
import torch
from torch import nn, einsum, diagonal
import torch.nn.functional as F
from h_transformer_1d.reversible import ReversibleSequence, SequentialSequence
from rotary_embedding_torch import apply_rotary_emb, RotaryEmbedding
from einops import rearrange, reduce, repeat
# helpers
def exists(val):
return val is not None
def masked_aggregate(tensor, mask = None, dim = -1, average = True):
if not exists(mask):
fn = torch.sum if not average else torch.mean
return fn(tensor, dim = dim)
diff_len = len(tensor.shape) - len(mask.shape)
mask = mask[(..., *((None,) * diff_len))]
tensor = tensor.masked_fill(~mask, 0.)
total_el = mask.sum(dim = dim)
agg = tensor.sum(dim = dim)
if average:
agg = agg / total_el.clamp(min = 1.)
agg.masked_fill_(total_el == 0, 0.)
return agg
def shift(t, amount, mask = None):
if amount == 0:
return t
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return F.pad(t, (0, 0, amount, -amount), value = 0.)
# helper classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class FeedForward(nn.Module):
def __init__(
self,
dim,
*,
mult = 4
):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
# token shifting
class PreShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# hierarchical attention helper functions
def cast_for_op(cast_type, fn):
@wraps(fn)
def inner(t, *args, **kwargs):
orig_type = t.dtype
t = t.type(cast_type)
out = fn(t, *args, **kwargs)
out = out.type(orig_type)
return out
return inner
def flip_every_two(t):
t = rearrange(t, 'b (n r) ... -> b n r ...', r = 2)
t = torch.flip(t, dims = (2,)) # so we pay attention to the off-diagonal blocks in the attention matrix
t = rearrange(t, 'b n r ... -> b (n r) ...')
return t
# attention
class HAttention1D(nn.Module):
def __init__(
self,
dim,
*,
heads = 8,
dim_head = 64,
block_size = 16,
pos_emb = None,
eps = 1e-8,
**kwargs
):
super().__init__()
self.eps = eps
self.heads = heads
self.scale = dim_head ** -0.5
self.block_size = block_size
inner_dim = heads * dim_head
self.pos_emb = pos_emb
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x, mask = None):
b, n, h, device, bsz, eps = *x.shape[:2], self.heads, x.device, self.block_size, self.eps
# pad sequence length to power of 2
pad_to_len = 2 ** ceil(log2(n))
padding = pad_to_len - n
if padding != 0:
x = F.pad(x, (0, 0, 0, padding), value = 0.)
if exists(mask):
mask = F.pad(mask, (0, padding), value = False)
# derive queries, keys, values
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
# split out heads, and also divide sequence into blocks
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (q, k, v))
if exists(mask):
mask = repeat(mask, 'b n -> (b h) n', h = h)
# scale
q = q * self.scale
# rotary pos emb
if exists(self.pos_emb):
freqs = self.pos_emb(torch.arange(pad_to_len, device = device), cache_key = pad_to_len)
freqs = rearrange(freqs, 'n d -> () n d')
q, k, v = map(lambda t: apply_rotary_emb(freqs, t), (q, k, v))
# calculate number of levels until 2 x 2
num_levels = int(log2(pad_to_len // bsz)) - 2
assert num_levels >= 0, 'number of levels must be at least greater than 0'
# coarsening
qkvs = [(q, k, v, mask)]
for level in range(num_levels):
q, k, v = map(lambda t: rearrange(t, 'b (n r) d -> b n r d', r = 2), (q, k, v))
if exists(mask):
mask = repeat(mask, 'b (n r) -> b n r', r = 2)
# masked mean for queries and keys, but not values
q = masked_aggregate(q, mask, dim = 2)
k = masked_aggregate(k, mask, dim = 2)
v = masked_aggregate(v, mask, dim = 2, average = False)
if exists(mask):
mask = torch.any(mask, dim = 2)
coarsened_qkvs = (q, k, v, mask)
qkvs.append(coarsened_qkvs)
qkvs = [qkvs[0], *qkvs] # duplicate the finest resolution an extra time, for the base diagonal
# half-attention function
def calculate_Y_and_A(q, k, v, mask = None):
S = einsum('... i d, ... j d -> ... i j', q, k)
if exists(mask):
mask_value = -torch.finfo(S.dtype).max
S = S.masked_fill(~mask, mask_value)
S = S - torch.max(S, dim = -1, keepdim = True).values
A = S.exp()
y = einsum('... i j, ... j d -> ... i d', A, v)
A = A.sum(dim = -1)
y = rearrange(y, 'b ... n d -> b (... n) d')
A = rearrange(A, 'b ... i -> b (... i)')
return y, A
to_blocks = lambda t: rearrange(t, 'b (n z) ... -> b n z ...', z = bsz)
# calculate Ys, as in the paper
Ys = []
for ind, (q, k, v, mask) in enumerate(reversed(qkvs)):
is_last = ind == (len(qkvs) - 1)
q, k, v = map(to_blocks, (q, k, v))
# generate the mask for S
S_mask = None
if exists(mask):
mask = to_blocks(mask)
q_mask = mask
k_mask = cast_for_op(torch.int, flip_every_two)(mask) if not is_last else mask
S_mask = rearrange(q_mask, '... n -> ... n ()') * rearrange(k_mask, '... n -> ... () n')
# flip keys and values to capture the off-diagonals
if not is_last:
k, v = map(flip_every_two, (k, v))
Y_level = calculate_Y_and_A(q, k, v, mask = S_mask)
Ys.append(Y_level)
# interpolate
Y = 0
A = 0
for ind, (Y_level, A_level) in enumerate(Ys):
is_last = ind == (len(Ys) - 1)
if not is_last and torch.is_tensor(Y):
Y = repeat(Y, 'b n d -> b (n r) d', r = 2)
if not is_last and torch.is_tensor(A):
A = repeat(A, 'b n -> b (n r)', r = 2)
Y = Y_level + Y
A = A_level + A
out = Y / rearrange(A + eps, 'b n -> b n ()')
# merge heads
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
# combine out
return self.to_out(out[:, :n])
# causal attention
class CausalHAttention1D(nn.Module):
def __init__(
self,
dim,
*,
max_seq_len,
heads = 8,
dim_head = 64,
block_size = 16,
eps = 1e-8,
pos_emb = None
):
super().__init__()
self.eps = eps
self.heads = heads
self.scale = dim_head ** -0.5
self.block_size = block_size
inner_dim = heads * dim_head
self.pos_emb = pos_emb
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
# derive mask
num_levels = int(log2(max_seq_len // block_size)) - 1
root_seq = torch.arange(max_seq_len)
seqs = [root_seq]
seq = root_seq
for ind in range(num_levels):
seq = rearrange(seq, '(n r) -> n r', r = 2)
seq = seq.max(dim = -1).values
expanded_mask_seq = repeat(seq, 'n -> (n r)', r = (2 ** (ind + 1)))
seqs.append(expanded_mask_seq)
seq_keys = torch.stack(seqs, dim = 0)
mask = seq_keys > rearrange(root_seq, 'n -> () n')
self.register_buffer('mask', mask)
def forward(self, x, **kwargs):
b, n, h, device, bsz, eps = *x.shape[:2], self.heads, x.device, self.block_size, self.eps
# pad sequence length to power of 2
pad_to_len = 2 ** ceil(log2(n))
padding = pad_to_len - n
if padding != 0:
x = F.pad(x, (0, 0, 0, padding), value = 0.)
# derive queries, keys, values
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
# split out heads, and also divide sequence into blocks
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (q, k, v))
# scale
q = q * self.scale
# rotary embedding
if exists(self.pos_emb):
freqs = self.pos_emb(torch.arange(pad_to_len, device = device), cache_key = pad_to_len)
freqs = rearrange(freqs, 'n d -> () n d')
q, k, v = map(lambda t: apply_rotary_emb(freqs, t), (q, k, v))
# calculate number of levels until 2 x 2
num_levels = int(log2(pad_to_len // bsz)) - 1
# coarsening
qkvs = [(q, k, v)]
for level in range(num_levels):
q, k, v = map(lambda t: rearrange(t, 'b (n r) d -> b n r d', r = 2), (q, k, v))
# masked mean for queries and keys, but not values
q = q.mean(dim = 2)
k = k.mean(dim = 2)
v = v.sum(dim = 2)
coarsened_qkvs = (q, k, v)
qkvs.append(coarsened_qkvs)
# half-attention function
def calculate_Y_and_A(q, k, v, mask_right_off_diagonals = False, causal_mask_diagonal = False):
if mask_right_off_diagonals:
q, k, v = map(lambda t: rearrange(t, 'b (n r) ... -> b n r ...', r = 2), (q, k, v))
q, k, v = map(lambda t: t[:, :, 1], (q, k, v))
S = einsum('... i d, ... j d -> ... i j', q, k)
if causal_mask_diagonal:
causal_mask = torch.ones(*S.shape[-2:], device = S.device).triu(1).bool()
mask_value = -torch.finfo(S.dtype).max
causal_mask = rearrange(causal_mask, 'i j -> () () i j')
S = S.masked_fill(causal_mask, mask_value)
S = S - torch.amax(S, dim = -1, keepdim = True)
A = S.exp()
y = einsum('... i j, ... j d -> ... i d', A, v)
A = A.sum(dim = -1)
if mask_right_off_diagonals:
y, A = map(lambda t: rearrange(t, 'b n ... -> b n () ...'), (y, A))
y = F.pad(y, (0, 0, 0, 0, 1, 0), value = 0.)
A = F.pad(A, (0, 0, 1, 0), value = 0.)
y = rearrange(y, 'b ... d -> b (...) d')
A = rearrange(A, 'b ... -> b (...)')
return y, A
to_blocks = lambda t: rearrange(t, 'b (n z) ... -> b n z ...', z = bsz)
# calculate Ys, as in the paper
Ys = []
for ind, (q, k, v) in enumerate(reversed(qkvs)):
is_last = ind == (len(qkvs) - 1)
q, k, v = map(to_blocks, (q, k, v))
# flip keys and values to capture the off-diagonals
if not is_last:
k, v = map(flip_every_two, (k, v))
Y_level = calculate_Y_and_A(q, k, v, mask_right_off_diagonals = not is_last, causal_mask_diagonal = is_last)
Ys.append(Y_level)
# interpolate
def safe_cat(acc, el, dim = 0):
if not exists(acc):
return el
return torch.cat((el, acc), dim = dim)
Y = None
A = None
for Y_level, A_level in Ys:
Y_level, A_level = map(lambda t: rearrange(t, '... -> () ...'), (Y_level, A_level))
if torch.is_tensor(Y):
Y = repeat(Y, '... n d -> ... (n r) d', r = 2)
if torch.is_tensor(A):
A = repeat(A, '... n -> ... (n r)', r = 2)
Y = safe_cat(Y, Y_level)
A = safe_cat(A, A_level)
# create causal mask for Y and A
causal_mask = self.mask[:(num_levels + 1), :pad_to_len]
# mask and sum
Y_causal_mask = rearrange(causal_mask, 'h n -> h () n ()')
A_causal_mask = rearrange(causal_mask, 'h n -> h () n')
Y = Y.masked_fill(Y_causal_mask, 0.)
A = A.masked_fill(A_causal_mask, 0.)
Y = Y.sum(dim = 0)
A = A.sum(dim = 0)
# normalize
out = Y / rearrange(A + eps, 'b n -> b n ()')
# merge heads
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
# combine out
return self.to_out(out[:, :n])
# main class
class HTransformer1D(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
max_seq_len,
causal = False,
heads = 8,
dim_head = 64,
ff_mult = 4,
block_size = 128, # this is the Nr in the paper - Nb = (max_seq_len / tokens_per_block)
pos_emb = None,
reversible = False,
shift_tokens = False
):
super().__init__()
assert (max_seq_len % block_size) == 0, 'maximum sequence length must be divisible by the block size'
num_blocks = max_seq_len // block_size
assert log2(max_seq_len // block_size).is_integer(), f'number of blocks {num_blocks} must be a power of 2'
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = RotaryEmbedding(dim = dim_head)
self.max_seq_len = max_seq_len
layers = nn.ModuleList([])
attn_class = CausalHAttention1D if causal else HAttention1D
attn_kwargs = dict(max_seq_len = max_seq_len) if causal else dict()
shift_token_ranges = (0, 1) if shift_tokens else (-1, 0, 1)
for ind in range(depth):
attn = attn_class(dim, dim_head = dim_head, heads = heads, block_size = block_size, pos_emb = self.pos_emb, **attn_kwargs)
ff = FeedForward(dim, mult = ff_mult)
if shift_tokens:
attn, ff = map(lambda t: PreShiftTokens(shift_token_ranges, t), (attn, ff))
attn, ff = map(lambda t: PreNorm(dim, t), (attn, ff))
layers.append(nn.ModuleList([attn ,ff]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth
attn_route_map = {'mask': route_attn}
self.layers = execute_type(layers, args_route = {**attn_route_map})
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x, mask = None):
b, n, device = *x.shape, x.device
assert n <= self.max_seq_len, 'sequence length must be less than the maximum sequence length'
x = self.token_emb(x)
x = self.layers(x, mask = mask)
return self.to_logits(x)
|
h-transformer-1d-main
|
h_transformer_1d/h_transformer_1d.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.