index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
722,360
paramz.core.observable
change_priority
null
def change_priority(self, observer, callble, priority): self.remove_observer(observer, callble) self.add_observer(observer, callble, priority)
(self, observer, callble, priority)
722,361
paramz.core.gradcheckable
checkgrad
Check the gradient of this parameter with respect to the highest parent's objective function. This is a three point estimate of the gradient, wiggling at the parameters with a stepsize step. The check passes if either the ratio or the difference between numerical and analytical gradient is smaller then tolerance. :param bool verbose: whether each parameter shall be checked individually. :param float step: the stepsize for the numerical three point gradient estimate. :param float tolerance: the tolerance for the gradient ratio or difference. :param float df_tolerance: the tolerance for df_tolerance .. note:: The *dF_ratio* indicates the limit of accuracy of numerical gradients. If it is too small, e.g., smaller than 1e-12, the numerical gradients are usually not accurate enough for the tests (shown with blue).
def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3, df_tolerance=1e-12): """ Check the gradient of this parameter with respect to the highest parent's objective function. This is a three point estimate of the gradient, wiggling at the parameters with a stepsize step. The check passes if either the ratio or the difference between numerical and analytical gradient is smaller then tolerance. :param bool verbose: whether each parameter shall be checked individually. :param float step: the stepsize for the numerical three point gradient estimate. :param float tolerance: the tolerance for the gradient ratio or difference. :param float df_tolerance: the tolerance for df_tolerance .. note:: The *dF_ratio* indicates the limit of accuracy of numerical gradients. If it is too small, e.g., smaller than 1e-12, the numerical gradients are usually not accurate enough for the tests (shown with blue). """ # Make sure we always call the gradcheck on the highest parent # This ensures the assumption of the highest parent to hold the fixes # In the checkgrad function we take advantage of that, so it needs # to be set in place here. if self.has_parent(): return self._highest_parent_._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance, df_tolerance=df_tolerance) return self._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance, df_tolerance=df_tolerance)
(self, verbose=0, step=1e-06, tolerance=0.001, df_tolerance=1e-12)
722,362
paramz.core.constrainable
constrain
:param transform: the :py:class:`paramz.transformations.Transformation` to constrain the this parameter to. :param warning: print a warning if re-constraining parameters. Constrain the parameter to the given :py:class:`paramz.transformations.Transformation`.
def constrain(self, transform, warning=True, trigger_parent=True): """ :param transform: the :py:class:`paramz.transformations.Transformation` to constrain the this parameter to. :param warning: print a warning if re-constraining parameters. Constrain the parameter to the given :py:class:`paramz.transformations.Transformation`. """ if isinstance(transform, Transformation): self.param_array[...] = transform.initialize(self.param_array) elif transform == __fixed__: return self.fix(warning=warning, trigger_parent=trigger_parent) else: raise ValueError('Can only constrain with paramz.transformations.Transformation object') reconstrained = self.unconstrain() added = self._add_to_index_operations(self.constraints, reconstrained, transform, warning) self.trigger_update(trigger_parent) return added
(self, transform, warning=True, trigger_parent=True)
722,363
paramz.core.constrainable
constrain_bounded
:param lower, upper: the limits to bound this parameter to :param warning: print a warning if re-constraining parameters. Constrain this parameter to lie within the given range.
def constrain_bounded(self, lower, upper, warning=True, trigger_parent=True): """ :param lower, upper: the limits to bound this parameter to :param warning: print a warning if re-constraining parameters. Constrain this parameter to lie within the given range. """ self.constrain(Logistic(lower, upper), warning=warning, trigger_parent=trigger_parent)
(self, lower, upper, warning=True, trigger_parent=True)
722,364
paramz.core.constrainable
constrain_fixed
Constrain this parameter to be fixed to the current value it carries. This does not override the previous constraints, so unfixing will restore the constraint set before fixing. :param warning: print a warning for overwriting constraints.
def constrain_fixed(self, value=None, warning=True, trigger_parent=True): """ Constrain this parameter to be fixed to the current value it carries. This does not override the previous constraints, so unfixing will restore the constraint set before fixing. :param warning: print a warning for overwriting constraints. """ if value is not None: self[:] = value #index = self.unconstrain() index = self._add_to_index_operations(self.constraints, np.empty(0), __fixed__, warning) self._highest_parent_._set_fixed(self, index) self.notify_observers(self, None if trigger_parent else -np.inf) return index
(self, value=None, warning=True, trigger_parent=True)
722,365
paramz.core.constrainable
constrain_negative
:param warning: print a warning if re-constraining parameters. Constrain this parameter to the default negative constraint.
def constrain_negative(self, warning=True, trigger_parent=True): """ :param warning: print a warning if re-constraining parameters. Constrain this parameter to the default negative constraint. """ self.constrain(NegativeLogexp(), warning=warning, trigger_parent=trigger_parent)
(self, warning=True, trigger_parent=True)
722,366
paramz.core.constrainable
constrain_positive
:param warning: print a warning if re-constraining parameters. Constrain this parameter to the default positive constraint.
def constrain_positive(self, warning=True, trigger_parent=True): """ :param warning: print a warning if re-constraining parameters. Constrain this parameter to the default positive constraint. """ self.constrain(Logexp(), warning=warning, trigger_parent=trigger_parent)
(self, warning=True, trigger_parent=True)
722,367
paramz.parameterized
copy
null
def copy(self, memo=None): if memo is None: memo = {} memo[id(self.optimizer_array)] = None # and param_array memo[id(self.param_array)] = None # and param_array copy = super(Parameterized, self).copy(memo) copy._connect_parameters() copy._connect_fixes() copy._notify_parent_change() return copy
(self, memo=None)
722,368
paramz.core.parameter_core
disable_caching
null
def disable_caching(self): def visit(self): self.cache.disable_caching() self.traverse(visit)
(self)
722,369
paramz.core.parameter_core
enable_caching
null
def enable_caching(self): def visit(self): self.cache.enable_caching() self.traverse(visit)
(self)
722,371
paramz.parameterized
get_property_string
null
def get_property_string(self, propname): props = [] for p in self.parameters: props.extend(p.get_property_string(propname)) return props
(self, propname)
722,372
paramz.parameterized
grep_param_names
create a list of parameters, matching regular expression regexp
def grep_param_names(self, regexp): """ create a list of parameters, matching regular expression regexp """ if not isinstance(regexp, _pattern_type): regexp = compile(regexp) found_params = [] def visit(innerself, regexp): if (innerself is not self) and regexp.match(innerself.hierarchy_name().partition('.')[2]): found_params.append(innerself) self.traverse(visit, regexp) return found_params
(self, regexp)
722,373
paramz.core.parentable
has_parent
Return whether this parentable object currently has a parent.
def has_parent(self): """ Return whether this parentable object currently has a parent. """ return self._parent_ is not None
(self)
722,374
paramz.core.nameable
hierarchy_name
return the name for this object with the parents names attached by dots. :param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()` on the names, recursively
def hierarchy_name(self, adjust_for_printing=True): """ return the name for this object with the parents names attached by dots. :param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()` on the names, recursively """ if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x) else: adjust = lambda x: x if self.has_parent(): return self._parent_.hierarchy_name() + "." + adjust(self.name) return adjust(self.name)
(self, adjust_for_printing=True)
722,375
paramz.core.parameter_core
initialize_parameter
Call this function to initialize the model, if you built it without initialization. This HAS to be called manually before optmizing or it will be causing unexpected behaviour, if not errors!
def initialize_parameter(self): """ Call this function to initialize the model, if you built it without initialization. This HAS to be called manually before optmizing or it will be causing unexpected behaviour, if not errors! """ #logger.debug("connecting parameters") self._highest_parent_._notify_parent_change() self._highest_parent_._connect_parameters() #logger.debug("calling parameters changed") self._highest_parent_._connect_fixes() self.trigger_update()
(self)
722,376
paramz.parameterized
link_parameter
:param parameters: the parameters to add :type parameters: list of or one :py:class:`paramz.param.Param` :param [index]: index of where to put parameters Add all parameters to this param class, you can insert parameters at any given index using the :func:`list.insert` syntax
def link_parameter(self, param, index=None): """ :param parameters: the parameters to add :type parameters: list of or one :py:class:`paramz.param.Param` :param [index]: index of where to put parameters Add all parameters to this param class, you can insert parameters at any given index using the :func:`list.insert` syntax """ if param in self.parameters and index is not None: self.unlink_parameter(param) return self.link_parameter(param, index) # elif param.has_parent(): # raise HierarchyError, "parameter {} already in another model ({}), create new object (or copy) for adding".format(param._short(), param._highest_parent_._short()) elif param not in self.parameters: if param.has_parent(): def visit(parent, self): if parent is self: raise HierarchyError("You cannot add a parameter twice into the hierarchy") param.traverse_parents(visit, self) param._parent_.unlink_parameter(param) # make sure the size is set if index is None: start = sum(p.size for p in self.parameters) for name, iop in self._index_operations.items(): iop.shift_right(start, param.size) iop.update(param._index_operations[name], self.size) param._parent_ = self param._parent_index_ = len(self.parameters) self.parameters.append(param) else: start = sum(p.size for p in self.parameters[:index]) for name, iop in self._index_operations.items(): iop.shift_right(start, param.size) iop.update(param._index_operations[name], start) param._parent_ = self param._parent_index_ = index if index>=0 else len(self.parameters[:index]) for p in self.parameters[index:]: p._parent_index_ += 1 self.parameters.insert(index, param) param.add_observer(self, self._pass_through_notify_observers, -np.inf) parent = self while parent is not None: parent.size += param.size parent = parent._parent_ self._notify_parent_change() if not self._in_init_ and self._highest_parent_._model_initialized_: #self._connect_parameters() #self._notify_parent_change() self._highest_parent_._connect_parameters() self._highest_parent_._notify_parent_change() self._highest_parent_._connect_fixes() return param else: raise HierarchyError("""Parameter exists already, try making a copy""")
(self, param, index=None)
722,377
paramz.parameterized
link_parameters
convenience method for adding several parameters without gradient specification
def link_parameters(self, *parameters): """ convenience method for adding several parameters without gradient specification """ [self.link_parameter(p) for p in parameters]
(self, *parameters)
722,378
paramz.core.observable
notify_observers
Notifies all observers. Which is the element, which kicked off this notification loop. The first argument will be self, the second `which`. .. note:: notifies only observers with priority p > min_priority! :param min_priority: only notify observers with priority > min_priority if min_priority is None, notify all observers in order
def notify_observers(self, which=None, min_priority=None): """ Notifies all observers. Which is the element, which kicked off this notification loop. The first argument will be self, the second `which`. .. note:: notifies only observers with priority p > min_priority! :param min_priority: only notify observers with priority > min_priority if min_priority is None, notify all observers in order """ if self._update_on: if which is None: which = self if min_priority is None: [callble(self, which=which) for _, _, callble in self.observers] else: for p, _, callble in self.observers: if p <= min_priority: break callble(self, which=which)
(self, which=None, min_priority=None)
722,379
paramz.model
objective_function
The objective function for the given algorithm. This function is the true objective, which wants to be minimized. Note that all parameters are already set and in place, so you just need to return the objective function here. For probabilistic models this is the negative log_likelihood (including the MAP prior), so we return it here. If your model is not probabilistic, just return your objective to minimize here!
def objective_function(self): """ The objective function for the given algorithm. This function is the true objective, which wants to be minimized. Note that all parameters are already set and in place, so you just need to return the objective function here. For probabilistic models this is the negative log_likelihood (including the MAP prior), so we return it here. If your model is not probabilistic, just return your objective to minimize here! """ raise NotImplementedError("Implement the result of the objective function here")
(self)
722,380
paramz.model
objective_function_gradients
The gradients for the objective function for the given algorithm. The gradients are w.r.t. the *negative* objective function, as this framework works with *negative* log-likelihoods as a default. You can find the gradient for the parameters in self.gradient at all times. This is the place, where gradients get stored for parameters. This function is the true objective, which wants to be minimized. Note that all parameters are already set and in place, so you just need to return the gradient here. For probabilistic models this is the gradient of the negative log_likelihood (including the MAP prior), so we return it here. If your model is not probabilistic, just return your *negative* gradient here!
def objective_function_gradients(self): """ The gradients for the objective function for the given algorithm. The gradients are w.r.t. the *negative* objective function, as this framework works with *negative* log-likelihoods as a default. You can find the gradient for the parameters in self.gradient at all times. This is the place, where gradients get stored for parameters. This function is the true objective, which wants to be minimized. Note that all parameters are already set and in place, so you just need to return the gradient here. For probabilistic models this is the gradient of the negative log_likelihood (including the MAP prior), so we return it here. If your model is not probabilistic, just return your *negative* gradient here! """ return self.gradient
(self)
722,381
paramz.model
optimize
Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors. kwargs are passed to the optimizer. They can be: :param max_iters: maximum number of function evaluations :type max_iters: int :messages: True: Display messages during optimisation, "ipython_notebook": :type messages: bool"string :param optimizer: which optimizer to use (defaults to self.preferred optimizer) :type optimizer: string Valid optimizers are: - 'scg': scaled conjugate gradient method, recommended for stability. See also GPy.inference.optimization.scg - 'fmin_tnc': truncated Newton method (see scipy.optimize.fmin_tnc) - 'simplex': the Nelder-Mead simplex method (see scipy.optimize.fmin), - 'lbfgsb': the l-bfgs-b method (see scipy.optimize.fmin_l_bfgs_b), - 'lbfgs': the bfgs method (see scipy.optimize.fmin_bfgs), - 'sgd': stochastic gradient decsent (see scipy.optimize.sgd). For experts only!
def optimize(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=True, clear_after_finish=False, **kwargs): """ Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors. kwargs are passed to the optimizer. They can be: :param max_iters: maximum number of function evaluations :type max_iters: int :messages: True: Display messages during optimisation, "ipython_notebook": :type messages: bool"string :param optimizer: which optimizer to use (defaults to self.preferred optimizer) :type optimizer: string Valid optimizers are: - 'scg': scaled conjugate gradient method, recommended for stability. See also GPy.inference.optimization.scg - 'fmin_tnc': truncated Newton method (see scipy.optimize.fmin_tnc) - 'simplex': the Nelder-Mead simplex method (see scipy.optimize.fmin), - 'lbfgsb': the l-bfgs-b method (see scipy.optimize.fmin_l_bfgs_b), - 'lbfgs': the bfgs method (see scipy.optimize.fmin_bfgs), - 'sgd': stochastic gradient decsent (see scipy.optimize.sgd). For experts only! """ if self.is_fixed or self.size == 0: print('nothing to optimize') return if not self.update_model(): print("updates were off, setting updates on again") self.update_model(True) if start is None: start = self.optimizer_array if optimizer is None: optimizer = self.preferred_optimizer if isinstance(optimizer, optimization.Optimizer): opt = optimizer opt.model = self else: optimizer = optimization.get_optimizer(optimizer) opt = optimizer(max_iters=max_iters, **kwargs) with VerboseOptimization(self, opt, maxiters=max_iters, verbose=messages, ipython_notebook=ipython_notebook, clear_after_finish=clear_after_finish) as vo: opt.run(start, f_fp=self._objective_grads, f=self._objective, fp=self._grads) self.optimizer_array = opt.x_opt self.optimization_runs.append(opt) return opt
(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=True, clear_after_finish=False, **kwargs)
722,382
paramz.model
optimize_restarts
Perform random restarts of the model, and set the model to the best seen solution. If the robust flag is set, exceptions raised during optimizations will be handled silently. If _all_ runs fail, the model is reset to the existing parameter values. \*\*kwargs are passed to the optimizer. :param num_restarts: number of restarts to use (default 10) :type num_restarts: int :param robust: whether to handle exceptions silently or not (default False) :type robust: bool :param parallel: whether to run each restart as a separate process. It relies on the multiprocessing module. :type parallel: bool :param num_processes: number of workers in the multiprocessing pool :type numprocesses: int :param max_f_eval: maximum number of function evaluations :type max_f_eval: int :param max_iters: maximum number of iterations :type max_iters: int :param messages: whether to display during optimisation :type messages: bool .. note:: If num_processes is None, the number of workes in the multiprocessing pool is automatically set to the number of processors on the current machine.
def optimize_restarts(self, num_restarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs): """ Perform random restarts of the model, and set the model to the best seen solution. If the robust flag is set, exceptions raised during optimizations will be handled silently. If _all_ runs fail, the model is reset to the existing parameter values. \*\*kwargs are passed to the optimizer. :param num_restarts: number of restarts to use (default 10) :type num_restarts: int :param robust: whether to handle exceptions silently or not (default False) :type robust: bool :param parallel: whether to run each restart as a separate process. It relies on the multiprocessing module. :type parallel: bool :param num_processes: number of workers in the multiprocessing pool :type numprocesses: int :param max_f_eval: maximum number of function evaluations :type max_f_eval: int :param max_iters: maximum number of iterations :type max_iters: int :param messages: whether to display during optimisation :type messages: bool .. note:: If num_processes is None, the number of workes in the multiprocessing pool is automatically set to the number of processors on the current machine. """ initial_length = len(self.optimization_runs) initial_parameters = self.optimizer_array.copy() if parallel: #pragma: no cover try: pool = mp.Pool(processes=num_processes) obs = [self.copy() for i in range(num_restarts)] [obs[i].randomize() for i in range(num_restarts-1)] jobs = pool.map(opt_wrapper, [(o,kwargs) for o in obs]) pool.close() pool.join() except KeyboardInterrupt: print("Ctrl+c received, terminating and joining pool.") pool.terminate() pool.join() for i in range(num_restarts): try: if not parallel: if i > 0: self.randomize() self.optimize(**kwargs) else:#pragma: no cover self.optimization_runs.append(jobs[i]) if verbose: print(("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt))) except Exception as e: if robust: print(("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts))) else: raise e if len(self.optimization_runs) > initial_length: # This works, since failed jobs don't get added to the optimization_runs. i = np.argmin([o.f_opt for o in self.optimization_runs[initial_length:]]) self.optimizer_array = self.optimization_runs[initial_length + i].x_opt else: self.optimizer_array = initial_parameters return self.optimization_runs
(self, num_restarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs)
722,383
paramz.core.parameter_core
parameter_names
Get the names of all parameters of this model or parameter. It starts from the parameterized object you are calling this method on. Note: This does not unravel multidimensional parameters, use parameter_names_flat to unravel parameters! :param bool add_self: whether to add the own name in front of names :param bool adjust_for_printing: whether to call `adjust_name_for_printing` on names :param bool recursive: whether to traverse through hierarchy and append leaf node names :param bool intermediate: whether to add intermediate names, that is parameterized objects
def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True, intermediate=False): """ Get the names of all parameters of this model or parameter. It starts from the parameterized object you are calling this method on. Note: This does not unravel multidimensional parameters, use parameter_names_flat to unravel parameters! :param bool add_self: whether to add the own name in front of names :param bool adjust_for_printing: whether to call `adjust_name_for_printing` on names :param bool recursive: whether to traverse through hierarchy and append leaf node names :param bool intermediate: whether to add intermediate names, that is parameterized objects """ if adjust_for_printing: adjust = adjust_name_for_printing else: adjust = lambda x: x names = [] if intermediate or (not recursive): names.extend([adjust(x.name) for x in self.parameters]) if intermediate or recursive: names.extend([ xi for x in self.parameters for xi in x.parameter_names(add_self=True, adjust_for_printing=adjust_for_printing, recursive=True, intermediate=False)]) if add_self: names = map(lambda x: adjust(self.name) + "." + x, names) return names
(self, add_self=False, adjust_for_printing=False, recursive=True, intermediate=False)
722,384
paramz.core.parameter_core
parameter_names_flat
Return the flattened parameter names for all subsequent parameters of this parameter. We do not include the name for self here! If you want the names for fixed parameters as well in this list, set include_fixed to True. if not hasattr(obj, 'cache'): obj.cache = FunctionCacher() :param bool include_fixed: whether to include fixed names here.
def parameter_names_flat(self, include_fixed=False): """ Return the flattened parameter names for all subsequent parameters of this parameter. We do not include the name for self here! If you want the names for fixed parameters as well in this list, set include_fixed to True. if not hasattr(obj, 'cache'): obj.cache = FunctionCacher() :param bool include_fixed: whether to include fixed names here. """ name_list = [] for p in self.flattened_parameters: name = p.hierarchy_name() if p.size > 1: name_list.extend(["{}[{!s}]".format(name, i) for i in p._indices()]) else: name_list.append(name) name_list = np.array(name_list) if not include_fixed and self._has_fixes(): return name_list[self._fixes_] return name_list
(self, include_fixed=False)
722,385
paramz.core.parameter_core
parameters_changed
This method gets called when parameters have changed. Another way of listening to param changes is to add self as a listener to the param, such that updates get passed through. See :py:function:``paramz.param.Observable.add_observer``
def parameters_changed(self): """ This method gets called when parameters have changed. Another way of listening to param changes is to add self as a listener to the param, such that updates get passed through. See :py:function:``paramz.param.Observable.add_observer`` """ pass
(self)
722,386
paramz.core.pickleable
pickle
:param f: either filename or open file object to write to. if it is an open buffer, you have to make sure to close it properly. :param protocol: pickling protocol to use, python-pickle for details.
def pickle(self, f, protocol=-1): """ :param f: either filename or open file object to write to. if it is an open buffer, you have to make sure to close it properly. :param protocol: pickling protocol to use, python-pickle for details. """ try: #Py2 import cPickle as pickle if isinstance(f, basestring): with open(f, 'wb') as f: pickle.dump(self, f, protocol) else: pickle.dump(self, f, protocol) except ImportError: #python3 import pickle if isinstance(f, str): with open(f, 'wb') as f: pickle.dump(self, f, protocol) else: pickle.dump(self, f, protocol)
(self, f, protocol=-1)
722,387
paramz.core.parameter_core
randomize
Randomize the model. Make this draw from the rand_gen if one exists, else draw random normal(0,1) :param rand_gen: np random number generator which takes args and kwargs :param flaot loc: loc parameter for random number generator :param float scale: scale parameter for random number generator :param args, kwargs: will be passed through to random number generator
def randomize(self, rand_gen=None, *args, **kwargs): """ Randomize the model. Make this draw from the rand_gen if one exists, else draw random normal(0,1) :param rand_gen: np random number generator which takes args and kwargs :param flaot loc: loc parameter for random number generator :param float scale: scale parameter for random number generator :param args, kwargs: will be passed through to random number generator """ if rand_gen is None: rand_gen = np.random.normal # first take care of all parameters (from N(0,1)) x = rand_gen(size=self._size_transformed(), *args, **kwargs) updates = self.update_model() self.update_model(False) # Switch off the updates self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...) # now draw from prior where possible x = self.param_array.copy() unfixlist = np.ones((self.size,),dtype=bool) unfixlist[self.constraints[__fixed__]] = False self.param_array.flat[unfixlist] = x.view(np.ndarray).ravel()[unfixlist] self.update_model(updates)
(self, rand_gen=None, *args, **kwargs)
722,388
paramz.core.indexable
remove_index_operation
null
def remove_index_operation(self, name): if name in self._index_operations: delitem(self._index_operations, name) #delattr(self, name) else: raise AttributeError("No index operation with the name {}".format(name))
(self, name)
722,389
paramz.core.observable
remove_observer
Either (if callble is None) remove all callables, which were added alongside observer, or remove callable `callble` which was added alongside the observer `observer`.
def remove_observer(self, observer, callble=None): """ Either (if callble is None) remove all callables, which were added alongside observer, or remove callable `callble` which was added alongside the observer `observer`. """ to_remove = [] for poc in self.observers: _, obs, clble = poc if callble is not None: if (obs is observer) and (callble == clble): to_remove.append(poc) else: if obs is observer: to_remove.append(poc) for r in to_remove: self.observers.remove(*r)
(self, observer, callble=None)
722,390
paramz.core.parameter_core
save
Save all the model parameters into a file (HDF5 by default). This is not supported yet. We are working on having a consistent, human readable way of saving and loading GPy models. This only saves the parameter array to a hdf5 file. In order to load the model again, use the same script for building the model you used to build this model. Then load the param array from this hdf5 file and set the parameters of the created model: >>> m[:] = h5_file['param_array'] This is less then optimal, we are working on a better solution to that.
def save(self, filename, ftype='HDF5'): # pragma: no coverage """ Save all the model parameters into a file (HDF5 by default). This is not supported yet. We are working on having a consistent, human readable way of saving and loading GPy models. This only saves the parameter array to a hdf5 file. In order to load the model again, use the same script for building the model you used to build this model. Then load the param array from this hdf5 file and set the parameters of the created model: >>> m[:] = h5_file['param_array'] This is less then optimal, we are working on a better solution to that. """ from ..param import Param def gather_params(self, plist): if isinstance(self,Param): plist.append(self) plist = [] self.traverse(gather_params, plist) names = self.parameter_names(adjust_for_printing=True) if ftype=='HDF5': try: import h5py f = h5py.File(filename,'w') for p,n in zip(plist,names): n = n.replace('.','_') p = p.values d = f.create_dataset(n,p.shape,dtype=p.dtype) d[:] = p if hasattr(self, 'param_array'): d = f.create_dataset('param_array',self.param_array.shape, dtype=self.param_array.dtype) d[:] = self.param_array f.close() except: raise 'Fails to write the parameters into a HDF5 file!'
(self, filename, ftype='HDF5')
722,391
paramz.core.observable
set_updates
null
def set_updates(self, on=True): self._update_on = on
(self, on=True)
722,392
paramz.core.updateable
toggle_update
null
def toggle_update(self): print("deprecated: toggle_update was renamed to update_toggle for easier access") self.update_toggle()
(self)
722,393
paramz.core.parameter_core
traverse
Traverse the hierarchy performing `visit(self, *args, **kwargs)` at every node passed by downwards. This function includes self! See *visitor pattern* in literature. This is implemented in pre-order fashion. Example:: #Collect all children: children = [] self.traverse(children.append) print children
def traverse(self, visit, *args, **kwargs): """ Traverse the hierarchy performing `visit(self, *args, **kwargs)` at every node passed by downwards. This function includes self! See *visitor pattern* in literature. This is implemented in pre-order fashion. Example:: #Collect all children: children = [] self.traverse(children.append) print children """ if not self.__visited: visit(self, *args, **kwargs) self.__visited = True self._traverse(visit, *args, **kwargs) self.__visited = False
(self, visit, *args, **kwargs)
722,394
paramz.core.parameter_core
traverse_parents
Traverse the hierarchy upwards, visiting all parents and their children except self. See "visitor pattern" in literature. This is implemented in pre-order fashion. Example: parents = [] self.traverse_parents(parents.append) print parents
def traverse_parents(self, visit, *args, **kwargs): """ Traverse the hierarchy upwards, visiting all parents and their children except self. See "visitor pattern" in literature. This is implemented in pre-order fashion. Example: parents = [] self.traverse_parents(parents.append) print parents """ if self.has_parent(): self.__visited = True self._parent_.traverse_parents(visit, *args, **kwargs) self._parent_.traverse(visit, *args, **kwargs) self.__visited = False
(self, visit, *args, **kwargs)
722,395
paramz.core.updateable
trigger_update
Update the model from the current state. Make sure that updates are on, otherwise this method will do nothing :param bool trigger_parent: Whether to trigger the parent, after self has updated
def trigger_update(self, trigger_parent=True): """ Update the model from the current state. Make sure that updates are on, otherwise this method will do nothing :param bool trigger_parent: Whether to trigger the parent, after self has updated """ if not self.update_model() or (hasattr(self, "_in_init_") and self._in_init_): #print "Warning: updates are off, updating the model will do nothing" return self._trigger_params_changed(trigger_parent)
(self, trigger_parent=True)
722,396
paramz.core.constrainable
unconstrain
:param transforms: The transformations to unconstrain from. remove all :py:class:`paramz.transformations.Transformation` transformats of this parameter object.
def unconstrain(self, *transforms): """ :param transforms: The transformations to unconstrain from. remove all :py:class:`paramz.transformations.Transformation` transformats of this parameter object. """ return self._remove_from_index_operations(self.constraints, transforms)
(self, *transforms)
722,397
paramz.core.constrainable
unconstrain_bounded
:param lower, upper: the limits to unbound this parameter from Remove (lower, upper) bounded constrain from this parameter/
def unconstrain_bounded(self, lower, upper): """ :param lower, upper: the limits to unbound this parameter from Remove (lower, upper) bounded constrain from this parameter/ """ self.unconstrain(Logistic(lower, upper))
(self, lower, upper)
722,398
paramz.core.constrainable
unconstrain_fixed
This parameter will no longer be fixed. If there was a constraint on this parameter when fixing it, it will be constraint with that previous constraint.
def unconstrain_fixed(self): """ This parameter will no longer be fixed. If there was a constraint on this parameter when fixing it, it will be constraint with that previous constraint. """ unconstrained = self.unconstrain(__fixed__) self._highest_parent_._set_unfixed(self, unconstrained) #if self._default_constraint_ is not None: # return self.constrain(self._default_constraint_) return unconstrained
(self)
722,399
paramz.core.constrainable
unconstrain_negative
Remove negative constraint of this parameter.
def unconstrain_negative(self): """ Remove negative constraint of this parameter. """ self.unconstrain(NegativeLogexp())
(self)
722,400
paramz.core.constrainable
unconstrain_positive
Remove positive constraint of this parameter.
def unconstrain_positive(self): """ Remove positive constraint of this parameter. """ self.unconstrain(Logexp())
(self)
722,402
paramz.parameterized
unlink_parameter
:param param: param object to remove from being a parameter of this parameterized object.
def unlink_parameter(self, param): """ :param param: param object to remove from being a parameter of this parameterized object. """ if not param in self.parameters: try: raise HierarchyError("{} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name)) except AttributeError: raise HierarchyError("{} does not seem to be a parameter, remove parameters directly from their respective parents".format(str(param))) start = sum([p.size for p in self.parameters[:param._parent_index_]]) self.size -= param.size del self.parameters[param._parent_index_] self._remove_parameter_name(param) param._disconnect_parent() param.remove_observer(self, self._pass_through_notify_observers) for name, iop in self._index_operations.items(): iop.shift_left(start, param.size) self._connect_parameters() self._notify_parent_change() parent = self._parent_ while parent is not None: parent.size -= param.size parent = parent._parent_ self._highest_parent_._connect_parameters() self._highest_parent_._connect_fixes() self._highest_parent_._notify_parent_change()
(self, param)
722,403
paramz.core.updateable
update_model
Get or set, whether automatic updates are performed. When updates are off, the model might be in a non-working state. To make the model work turn updates on again. :param bool|None updates: bool: whether to do updates None: get the current update state
def update_model(self, updates=None): """ Get or set, whether automatic updates are performed. When updates are off, the model might be in a non-working state. To make the model work turn updates on again. :param bool|None updates: bool: whether to do updates None: get the current update state """ if updates is None: return self._update_on assert isinstance(updates, bool), "updates are either on (True) or off (False)" p = getattr(self, '_highest_parent_', None) def turn_updates(s): s._update_on = updates p.traverse(turn_updates) self.trigger_update()
(self, updates=None)
722,404
paramz.core.updateable
update_toggle
null
def update_toggle(self): self.update_model(not self.update_model())
(self)
722,405
paramz.core.observable_array
ObsAr
An ndarray which reports changes to its observers. .. warning:: ObsAr tries to not ever give back an observable array itself. Thus, if you want to preserve an ObsAr you need to work in memory. Let `a` be an ObsAr and you want to add a random number `r` to it. You need to make sure it stays an ObsAr by working in memory (see numpy for details): .. code-block:: python a[:] += r The observers can add themselves with a callable, which will be called every time this array changes. The callable takes exactly one argument, which is this array itself.
class ObsAr(np.ndarray, Pickleable, Observable): """ An ndarray which reports changes to its observers. .. warning:: ObsAr tries to not ever give back an observable array itself. Thus, if you want to preserve an ObsAr you need to work in memory. Let `a` be an ObsAr and you want to add a random number `r` to it. You need to make sure it stays an ObsAr by working in memory (see numpy for details): .. code-block:: python a[:] += r The observers can add themselves with a callable, which will be called every time this array changes. The callable takes exactly one argument, which is this array itself. """ __array_priority__ = -1 # Never give back ObsAr def __new__(cls, input_array, *a, **kw): # allways make a copy of input paramters, as we need it to be in C order: if not isinstance(input_array, ObsAr): try: # try to cast ints to floats obj = np.atleast_1d(np.require(input_array, dtype=np.float_, requirements=['W', 'C'])).view(cls) except ValueError: # do we have other dtypes in the array? obj = np.atleast_1d(np.require(input_array, requirements=['W', 'C'])).view(cls) else: obj = input_array super(ObsAr, obj).__init__(*a, **kw) return obj def __array_finalize__(self, obj): # see InfoArray.__array_finalize__ for comments if obj is None: return self.observers = getattr(obj, 'observers', None) self._update_on = getattr(obj, '_update_on', None) def __array_wrap__(self, out_arr, context=None): #np.ndarray.__array_wrap__(self, out_arr, context) #return out_arr return out_arr.view(np.ndarray) def _setup_observers(self): # do not setup anything, as observable arrays do not have default observers pass @property def values(self): """ Return the ObsAr underlying array as a standard ndarray. """ return self.view(np.ndarray) def copy(self): """ Make a copy. This means, we delete all observers and return a copy of this array. It will still be an ObsAr! """ from .lists_and_dicts import ObserverList memo = {} memo[id(self)] = self memo[id(self.observers)] = ObserverList() return self.__deepcopy__(memo) def __deepcopy__(self, memo): s = self.__new__(self.__class__, input_array=self.view(np.ndarray).copy()) memo[id(self)] = s import copy Pickleable.__setstate__(s, copy.deepcopy(self.__getstate__(), memo)) return s def __reduce__(self): func, args, state = super(ObsAr, self).__reduce__() return func, args, (state, Pickleable.__getstate__(self)) def __setstate__(self, state): np.ndarray.__setstate__(self, state[0]) Pickleable.__setstate__(self, state[1]) def __setitem__(self, s, val): super(ObsAr, self).__setitem__(s, val) self.notify_observers() def __getslice__(self, start, stop): #pragma: no cover return self.__getitem__(slice(start, stop)) def __setslice__(self, start, stop, val): #pragma: no cover return self.__setitem__(slice(start, stop), val) def __ilshift__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__ilshift__(self, *args, **kwargs) self.notify_observers() return r def __irshift__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__irshift__(self, *args, **kwargs) self.notify_observers() return r def __ixor__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__ixor__(self, *args, **kwargs) self.notify_observers() return r def __ipow__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__ipow__(self, *args, **kwargs) self.notify_observers() return r def __ifloordiv__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__ifloordiv__(self, *args, **kwargs) self.notify_observers() return r def __isub__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__isub__(self, *args, **kwargs) self.notify_observers() return r def __ior__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__ior__(self, *args, **kwargs) self.notify_observers() return r def __itruediv__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__itruediv__(self, *args, **kwargs) self.notify_observers() return r def __idiv__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__idiv__(self, *args, **kwargs) self.notify_observers() return r def __iand__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__iand__(self, *args, **kwargs) self.notify_observers() return r def __imod__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__imod__(self, *args, **kwargs) self.notify_observers() return r def __iadd__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__iadd__(self, *args, **kwargs) self.notify_observers() return r def __imul__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__imul__(self, *args, **kwargs) self.notify_observers() return r
(input_array, *a, **kw)
722,406
paramz.core.observable_array
__array_finalize__
null
def __array_finalize__(self, obj): # see InfoArray.__array_finalize__ for comments if obj is None: return self.observers = getattr(obj, 'observers', None) self._update_on = getattr(obj, '_update_on', None)
(self, obj)
722,407
paramz.core.observable_array
__array_wrap__
null
def __array_wrap__(self, out_arr, context=None): #np.ndarray.__array_wrap__(self, out_arr, context) #return out_arr return out_arr.view(np.ndarray)
(self, out_arr, context=None)
722,408
paramz.core.observable_array
__deepcopy__
null
def __deepcopy__(self, memo): s = self.__new__(self.__class__, input_array=self.view(np.ndarray).copy()) memo[id(self)] = s import copy Pickleable.__setstate__(s, copy.deepcopy(self.__getstate__(), memo)) return s
(self, memo)
722,409
paramz.core.observable_array
__getslice__
null
def __getslice__(self, start, stop): #pragma: no cover return self.__getitem__(slice(start, stop))
(self, start, stop)
722,411
paramz.core.observable_array
__iadd__
null
def __iadd__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__iadd__(self, *args, **kwargs) self.notify_observers() return r
(self, *args, **kwargs)
722,412
paramz.core.observable_array
__iand__
null
def __iand__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__iand__(self, *args, **kwargs) self.notify_observers() return r
(self, *args, **kwargs)
722,413
paramz.core.observable_array
__idiv__
null
def __idiv__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__idiv__(self, *args, **kwargs) self.notify_observers() return r
(self, *args, **kwargs)
722,414
paramz.core.observable_array
__ifloordiv__
null
def __ifloordiv__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__ifloordiv__(self, *args, **kwargs) self.notify_observers() return r
(self, *args, **kwargs)
722,415
paramz.core.observable_array
__ilshift__
null
def __ilshift__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__ilshift__(self, *args, **kwargs) self.notify_observers() return r
(self, *args, **kwargs)
722,416
paramz.core.observable_array
__imod__
null
def __imod__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__imod__(self, *args, **kwargs) self.notify_observers() return r
(self, *args, **kwargs)
722,417
paramz.core.observable_array
__imul__
null
def __imul__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__imul__(self, *args, **kwargs) self.notify_observers() return r
(self, *args, **kwargs)
722,418
paramz.core.pickleable
__init__
null
def __init__(self, *a, **kw): super(Pickleable, self).__init__()
(self, *a, **kw)
722,419
paramz.core.observable_array
__ior__
null
def __ior__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__ior__(self, *args, **kwargs) self.notify_observers() return r
(self, *args, **kwargs)
722,420
paramz.core.observable_array
__ipow__
null
def __ipow__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__ipow__(self, *args, **kwargs) self.notify_observers() return r
(self, *args, **kwargs)
722,421
paramz.core.observable_array
__irshift__
null
def __irshift__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__irshift__(self, *args, **kwargs) self.notify_observers() return r
(self, *args, **kwargs)
722,422
paramz.core.observable_array
__isub__
null
def __isub__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__isub__(self, *args, **kwargs) self.notify_observers() return r
(self, *args, **kwargs)
722,423
paramz.core.observable_array
__itruediv__
null
def __itruediv__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__itruediv__(self, *args, **kwargs) self.notify_observers() return r
(self, *args, **kwargs)
722,424
paramz.core.observable_array
__ixor__
null
def __ixor__(self, *args, **kwargs): #pragma: no cover r = np.ndarray.__ixor__(self, *args, **kwargs) self.notify_observers() return r
(self, *args, **kwargs)
722,425
paramz.core.observable_array
__new__
null
def __new__(cls, input_array, *a, **kw): # allways make a copy of input paramters, as we need it to be in C order: if not isinstance(input_array, ObsAr): try: # try to cast ints to floats obj = np.atleast_1d(np.require(input_array, dtype=np.float_, requirements=['W', 'C'])).view(cls) except ValueError: # do we have other dtypes in the array? obj = np.atleast_1d(np.require(input_array, requirements=['W', 'C'])).view(cls) else: obj = input_array super(ObsAr, obj).__init__(*a, **kw) return obj
(cls, input_array, *a, **kw)
722,426
paramz.core.observable_array
__reduce__
null
def __reduce__(self): func, args, state = super(ObsAr, self).__reduce__() return func, args, (state, Pickleable.__getstate__(self))
(self)
722,427
paramz.core.observable_array
__setitem__
null
def __setitem__(self, s, val): super(ObsAr, self).__setitem__(s, val) self.notify_observers()
(self, s, val)
722,428
paramz.core.observable_array
__setslice__
null
def __setslice__(self, start, stop, val): #pragma: no cover return self.__setitem__(slice(start, stop), val)
(self, start, stop, val)
722,429
paramz.core.observable_array
__setstate__
null
def __setstate__(self, state): np.ndarray.__setstate__(self, state[0]) Pickleable.__setstate__(self, state[1])
(self, state)
722,430
paramz.core.observable_array
_setup_observers
null
def _setup_observers(self): # do not setup anything, as observable arrays do not have default observers pass
(self)
722,433
paramz.core.observable_array
copy
Make a copy. This means, we delete all observers and return a copy of this array. It will still be an ObsAr!
def copy(self): """ Make a copy. This means, we delete all observers and return a copy of this array. It will still be an ObsAr! """ from .lists_and_dicts import ObserverList memo = {} memo[id(self)] = self memo[id(self.observers)] = ObserverList() return self.__deepcopy__(memo)
(self)
722,438
paramz.param
Param
Parameter object for GPy models. :param str name: name of the parameter to be printed :param input_array: array which this parameter handles :type input_array: np.ndarray :param default_constraint: The default constraint for this parameter :type default_constraint: You can add/remove constraints by calling constrain on the parameter itself, e.g: - self[:,1].constrain_positive() - self[0].tie_to(other) - self.untie() - self[:3,:].unconstrain() - self[1].fix() Fixing parameters will fix them to the value they are right now. If you change the fixed value, it will be fixed to the new value! Important Notes: The array given into this, will be used as the Param object. That is, the memory of the numpy array given will be the memory of this object. If you want to make a new Param object you need to copy the input array! Multilevel indexing (e.g. self[:2][1:]) is not supported and might lead to unexpected behaviour. Try to index in one go, using boolean indexing or the numpy builtin np.index function. See :py:class:`GPy.core.parameterized.Parameterized` for more details on constraining etc.
class Param(Parameterizable, ObsAr): """ Parameter object for GPy models. :param str name: name of the parameter to be printed :param input_array: array which this parameter handles :type input_array: np.ndarray :param default_constraint: The default constraint for this parameter :type default_constraint: You can add/remove constraints by calling constrain on the parameter itself, e.g: - self[:,1].constrain_positive() - self[0].tie_to(other) - self.untie() - self[:3,:].unconstrain() - self[1].fix() Fixing parameters will fix them to the value they are right now. If you change the fixed value, it will be fixed to the new value! Important Notes: The array given into this, will be used as the Param object. That is, the memory of the numpy array given will be the memory of this object. If you want to make a new Param object you need to copy the input array! Multilevel indexing (e.g. self[:2][1:]) is not supported and might lead to unexpected behaviour. Try to index in one go, using boolean indexing or the numpy builtin np.index function. See :py:class:`GPy.core.parameterized.Parameterized` for more details on constraining etc. """ __array_priority__ = -1 # Never give back Param _fixes_ = None parameters = [] def __new__(cls, name, input_array, default_constraint=None): obj = super(Param, cls).__new__(cls, input_array=input_array) obj._current_slice_ = (slice(obj.shape[0]),) obj._realshape_ = obj.shape obj._realsize_ = obj.size obj._realndim_ = obj.ndim obj._original_ = obj return obj def __init__(self, name, input_array, default_constraint=None, *a, **kw): self._in_init_ = True super(Param, self).__init__(name=name, default_constraint=default_constraint, *a, **kw) self._in_init_ = False def __array_finalize__(self, obj): # see InfoArray.__array_finalize__ for comments if obj is None: return super(Param, self).__array_finalize__(obj) self._parent_ = getattr(obj, '_parent_', None) self._parent_index_ = getattr(obj, '_parent_index_', None) self._default_constraint_ = getattr(obj, '_default_constraint_', None) self._current_slice_ = getattr(obj, '_current_slice_', None) self._realshape_ = getattr(obj, '_realshape_', None) self._realsize_ = getattr(obj, '_realsize_', None) self._realndim_ = getattr(obj, '_realndim_', None) self._original_ = getattr(obj, '_original_', None) self._name = getattr(obj, '_name', None) self._gradient_array_ = getattr(obj, '_gradient_array_', None) self._update_on = getattr(obj, '_update_on', None) try: self._index_operations = obj._index_operations except AttributeError: pass #self._index_operations = getattr(obj, '_index_operations', None) #self.constraints = getattr(obj, 'constraints', None) #self.priors = getattr(obj, 'priors', None) @property def param_array(self): """ As we are a leaf, this just returns self """ return self @property def values(self): """ Return self as numpy array view """ return self.view(np.ndarray) @property def gradient(self): """ Return a view on the gradient, which is in the same shape as this parameter is. Note: this is not the real gradient array, it is just a view on it. To work on the real gradient array use: self.full_gradient """ if getattr(self, '_gradient_array_', None) is None: self._gradient_array_ = np.empty(self._realshape_, dtype=np.float64) return self._gradient_array_#[self._current_slice_] @gradient.setter def gradient(self, val): self.gradient[:] = val #=========================================================================== # Array operations -> done #=========================================================================== def __getitem__(self, s, *args, **kwargs): if not isinstance(s, tuple): s = (s,) #if not reduce(lambda a, b: a or np.any(b is Ellipsis), s, False) and len(s) <= self.ndim: # s += (Ellipsis,) new_arr = super(Param, self).__getitem__(s, *args, **kwargs) try: new_arr._current_slice_ = s new_arr._gradient_array_ = self.gradient[s] new_arr._original_ = self._original_ except AttributeError: pass # returning 0d array or float, double etc return new_arr def _raveled_index(self, slice_index=None): # return an index array on the raveled array, which is formed by the current_slice # of this object extended_realshape = np.cumprod((1,) + self._realshape_[:0:-1])[::-1] ind = self._indices(slice_index) if ind.ndim < 2: ind = ind[:, None] return np.asarray(np.apply_along_axis(lambda x: np.sum(extended_realshape * x), 1, ind), dtype=int) def _raveled_index_for(self, obj): return self._raveled_index() #=========================================================================== # Constrainable #=========================================================================== def _ensure_fixes(self): if (not hasattr(self, "_fixes_")) or (self._fixes_ is None) or (self._fixes_.size != self._realsize_): self._fixes_ = np.ones(self._realsize_, dtype=bool) #=========================================================================== # Convenience #=========================================================================== @property def is_fixed(self): from paramz.transformations import __fixed__ return self.constraints[__fixed__].size == self.size def _get_original(self, param): return self._original_ #=========================================================================== # Pickling and copying #=========================================================================== def copy(self): return Parameterizable.copy(self, which=self) def __deepcopy__(self, memo): s = self.__new__(self.__class__, name=self.name, input_array=self.view(np.ndarray).copy()) memo[id(self)] = s import copy Pickleable.__setstate__(s, copy.deepcopy(self.__getstate__(), memo)) return s def _setup_observers(self): """ Setup the default observers 1: pass through to parent, if present """ if self.has_parent(): self.add_observer(self._parent_, self._parent_._pass_through_notify_observers, -np.inf) #=========================================================================== # Printing -> done #=========================================================================== @property def _description_str(self): if self.size <= 1: return [str(self.view(np.ndarray)[0])] else: return [str(self.shape)] def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True, **kw): # this is just overwrighting the parameterized calls to # parameter names, in order to maintain OOP if adjust_for_printing: return [adjust_name_for_printing(self.name)] return [self.name] @property def flattened_parameters(self): return [self] @property def num_params(self): return 0 def get_property_string(self, propname): prop = self._index_operations[propname] return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", prop.items()))] def __repr__(self, *args, **kwargs): name = "\033[1m{x:s}\033[0;0m:\n".format( x=self.hierarchy_name()) return name + super(Param, self).__repr__(*args, **kwargs) def _indices(self, slice_index=None): # get a int-array containing all indices in the first axis. if slice_index is None: slice_index = self._current_slice_ #try: indices = np.indices(self._realshape_, dtype=int) indices = indices[(slice(None),)+slice_index] indices = np.rollaxis(indices, 0, indices.ndim).reshape(-1,self._realndim_) #print indices_ #if not np.all(indices==indices__): # import ipdb; ipdb.set_trace() #except: # indices = np.indices(self._realshape_, dtype=int) # indices = indices[(slice(None),)+slice_index] # indices = np.rollaxis(indices, 0, indices.ndim) return indices def _max_len_names(self, gen, header): return reduce(lambda a, b: max(a, len(" ".join(map(str, b)))), gen, len(header)) def _max_len_values(self): return reduce(lambda a, b: max(a, len("{x:=.{0}g}".format(__precision__, x=b))), self.flat, len(self.hierarchy_name())) def _max_len_index(self, ind): return reduce(lambda a, b: max(a, len(str(b))), ind, len(__index_name__)) def _repr_html_(self, indices=None, iops=None, lx=None, li=None, lls=None): """Representation of the parameter in html for notebook display.""" filter_ = self._current_slice_ vals = self.flat if indices is None: indices = self._indices(filter_) if iops is None: ravi = self._raveled_index(filter_) iops = OrderedDict([name, iop.properties_for(ravi)] for name, iop in self._index_operations.items()) if lls is None: lls = [self._max_len_names(iop, name) for name, iop in iops.items()] header_format = """ <tr> <th><b>{i}</b></th> <th><b>{x}</b></th> <th><b>{iops}</b></th> </tr>""" header = header_format.format(x=self.hierarchy_name(), i=__index_name__, iops="</b></th><th><b>".join(list(iops.keys()))) # nice header for printing to_print = ["""<style type="text/css"> .tg {padding:2px 3px;word-break:normal;border-collapse:collapse;border-spacing:0;border-color:#DCDCDC;margin:0px auto;width:100%;} .tg td{font-family:"Courier New", Courier, monospace !important;font-weight:bold;color:#444;background-color:#F7FDFA;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;} .tg th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;} .tg .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;} .tg .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;} </style>"""] to_print.append('<table class="tg">') to_print.append(header) format_spec = self._format_spec(indices, iops, lx, li, lls, False) format_spec[:2] = ["<tr><td class=tg-left>{i}</td>".format(i=format_spec[0]), "<td class=tg-right>{i}</td>".format(i=format_spec[1])] for i in range(2, len(format_spec)): format_spec[i] = '<td class=tg-left>{c}</td>'.format(c=format_spec[i]) format_spec = "".join(format_spec) + '</tr>' for i in range(self.size): to_print.append(format_spec.format(index=indices[i], value="{1:.{0}f}".format(__precision__, vals[i]), **dict((name, ' '.join(map(str, iops[name][i]))) for name in iops))) return '\n'.join(to_print) def _format_spec(self, indices, iops, lx=None, li=None, lls=None, VT100=True): if li is None: li = self._max_len_index(indices) if lx is None: lx = self._max_len_values() if lls is None: lls = [self._max_len_names(iop, name) for name, iop in iops.items()] if VT100: format_spec = [" \033[1m{{index!s:<{0}}}\033[0;0m".format(li),"{{value!s:>{0}}}".format(lx)] else: format_spec = [" {{index!s:<{0}}}".format(li),"{{value!s:>{0}}}".format(lx)] for opname, l in zip(iops, lls): f = '{{{1}!s:^{0}}}'.format(l, opname) format_spec.append(f) return format_spec def __str__(self, indices=None, iops=None, lx=None, li=None, lls=None, only_name=False, VT100=True): filter_ = self._current_slice_ vals = self.flat if indices is None: indices = self._indices(filter_) if iops is None: ravi = self._raveled_index(filter_) iops = OrderedDict([name, iop.properties_for(ravi)] for name, iop in self._index_operations.items()) if lls is None: lls = [self._max_len_names(iop, name) for name, iop in iops.items()] format_spec = ' | '.join(self._format_spec(indices, iops, lx, li, lls, VT100)) to_print = [] if not only_name: to_print.append(format_spec.format(index=__index_name__, value=self.hierarchy_name(), **dict((name, name) for name in iops))) else: to_print.append(format_spec.format(index='-'*li, value=self.hierarchy_name(), **dict((name, '-'*l) for name, l in zip(iops, lls)))) for i in range(self.size): to_print.append(format_spec.format(index=indices[i], value="{1:.{0}f}".format(__precision__, vals[i]), **dict((name, ' '.join(map(str, iops[name][i]))) for name in iops))) return '\n'.join(to_print) def build_pydot(self,G): # pragma: no cover """ Build a pydot representation of this model. This needs pydot installed. Example Usage: np.random.seed(1000) X = np.random.normal(0,1,(20,2)) beta = np.random.uniform(0,1,(2,1)) Y = X.dot(beta) m = RidgeRegression(X, Y) G = m.build_pydot() G.write_png('example_hierarchy_layout.png') The output looks like: .. image:: example_hierarchy_layout.png Rectangles are parameterized objects (nodes or leafs of hierarchy). Trapezoids are param objects, which represent the arrays for parameters. Black arrows show parameter hierarchical dependence. The arrow points from parents towards children. Orange arrows show the observer pattern. Self references (here) are the references to the call to parameters changed and references upwards are the references to tell the parents they need to update. """ import pydot node = pydot.Node(id(self), shape='trapezium', label=self.name)#, fontcolor='white', color='white') G.add_node(node) for _, o, _ in self.observers: label = o.name if hasattr(o, 'name') else str(o) observed_node = pydot.Node(id(o), label=label) if str(id(o)) not in G.obj_dict['nodes']: # pragma: no cover G.add_node(observed_node) edge = pydot.Edge(str(id(self)), str(id(o)), color='darkorange2', arrowhead='vee') G.add_edge(edge) return node
(name, input_array, default_constraint=None)
722,439
paramz.param
__array_finalize__
null
def __array_finalize__(self, obj): # see InfoArray.__array_finalize__ for comments if obj is None: return super(Param, self).__array_finalize__(obj) self._parent_ = getattr(obj, '_parent_', None) self._parent_index_ = getattr(obj, '_parent_index_', None) self._default_constraint_ = getattr(obj, '_default_constraint_', None) self._current_slice_ = getattr(obj, '_current_slice_', None) self._realshape_ = getattr(obj, '_realshape_', None) self._realsize_ = getattr(obj, '_realsize_', None) self._realndim_ = getattr(obj, '_realndim_', None) self._original_ = getattr(obj, '_original_', None) self._name = getattr(obj, '_name', None) self._gradient_array_ = getattr(obj, '_gradient_array_', None) self._update_on = getattr(obj, '_update_on', None) try: self._index_operations = obj._index_operations except AttributeError: pass #self._index_operations = getattr(obj, '_index_operations', None) #self.constraints = getattr(obj, 'constraints', None) #self.priors = getattr(obj, 'priors', None)
(self, obj)
722,441
paramz.param
__deepcopy__
null
def __deepcopy__(self, memo): s = self.__new__(self.__class__, name=self.name, input_array=self.view(np.ndarray).copy()) memo[id(self)] = s import copy Pickleable.__setstate__(s, copy.deepcopy(self.__getstate__(), memo)) return s
(self, memo)
722,442
paramz.param
__getitem__
null
def __getitem__(self, s, *args, **kwargs): if not isinstance(s, tuple): s = (s,) #if not reduce(lambda a, b: a or np.any(b is Ellipsis), s, False) and len(s) <= self.ndim: # s += (Ellipsis,) new_arr = super(Param, self).__getitem__(s, *args, **kwargs) try: new_arr._current_slice_ = s new_arr._gradient_array_ = self.gradient[s] new_arr._original_ = self._original_ except AttributeError: pass # returning 0d array or float, double etc return new_arr
(self, s, *args, **kwargs)
722,452
paramz.param
__init__
null
def __init__(self, name, input_array, default_constraint=None, *a, **kw): self._in_init_ = True super(Param, self).__init__(name=name, default_constraint=default_constraint, *a, **kw) self._in_init_ = False
(self, name, input_array, default_constraint=None, *a, **kw)
722,459
paramz.param
__new__
null
def __new__(cls, name, input_array, default_constraint=None): obj = super(Param, cls).__new__(cls, input_array=input_array) obj._current_slice_ = (slice(obj.shape[0]),) obj._realshape_ = obj.shape obj._realsize_ = obj.size obj._realndim_ = obj.ndim obj._original_ = obj return obj
(cls, name, input_array, default_constraint=None)
722,461
paramz.param
__repr__
null
def __repr__(self, *args, **kwargs): name = "\033[1m{x:s}\033[0;0m:\n".format( x=self.hierarchy_name()) return name + super(Param, self).__repr__(*args, **kwargs)
(self, *args, **kwargs)
722,464
paramz.core.parameter_core
__setstate__
null
def __setstate__(self, state): super(Parameterizable, self).__setstate__(state) self.logger = logging.getLogger(self.__class__.__name__) return self
(self, state)
722,465
paramz.param
__str__
null
def __str__(self, indices=None, iops=None, lx=None, li=None, lls=None, only_name=False, VT100=True): filter_ = self._current_slice_ vals = self.flat if indices is None: indices = self._indices(filter_) if iops is None: ravi = self._raveled_index(filter_) iops = OrderedDict([name, iop.properties_for(ravi)] for name, iop in self._index_operations.items()) if lls is None: lls = [self._max_len_names(iop, name) for name, iop in iops.items()] format_spec = ' | '.join(self._format_spec(indices, iops, lx, li, lls, VT100)) to_print = [] if not only_name: to_print.append(format_spec.format(index=__index_name__, value=self.hierarchy_name(), **dict((name, name) for name in iops))) else: to_print.append(format_spec.format(index='-'*li, value=self.hierarchy_name(), **dict((name, '-'*l) for name, l in zip(iops, lls)))) for i in range(self.size): to_print.append(format_spec.format(index=indices[i], value="{1:.{0}f}".format(__precision__, vals[i]), **dict((name, ' '.join(map(str, iops[name][i]))) for name in iops))) return '\n'.join(to_print)
(self, indices=None, iops=None, lx=None, li=None, lls=None, only_name=False, VT100=True)
722,469
paramz.core.gradcheckable
_checkgrad
Perform the checkgrad on the model. TODO: this can be done more efficiently, when doing it inside here
def _checkgrad(self, param, verbose=0, step=1e-6, tolerance=1e-3, df_tolerance=1e-12): """ Perform the checkgrad on the model. TODO: this can be done more efficiently, when doing it inside here """ raise HierarchyError("This parameter is not in a model with a likelihood, and, therefore, cannot be gradient checked!")
(self, param, verbose=0, step=1e-06, tolerance=0.001, df_tolerance=1e-12)
722,471
paramz.core.parameter_core
_connect_parameters
null
def _connect_parameters(self): pass
(self)
722,473
paramz.param
_ensure_fixes
null
def _ensure_fixes(self): if (not hasattr(self, "_fixes_")) or (self._fixes_ is None) or (self._fixes_.size != self._realsize_): self._fixes_ = np.ones(self._realsize_, dtype=bool)
(self)
722,474
paramz.param
_format_spec
null
def _format_spec(self, indices, iops, lx=None, li=None, lls=None, VT100=True): if li is None: li = self._max_len_index(indices) if lx is None: lx = self._max_len_values() if lls is None: lls = [self._max_len_names(iop, name) for name, iop in iops.items()] if VT100: format_spec = [" \033[1m{{index!s:<{0}}}\033[0;0m".format(li),"{{value!s:>{0}}}".format(lx)] else: format_spec = [" {{index!s:<{0}}}".format(li),"{{value!s:>{0}}}".format(lx)] for opname, l in zip(iops, lls): f = '{{{1}!s:^{0}}}'.format(l, opname) format_spec.append(f) return format_spec
(self, indices, iops, lx=None, li=None, lls=None, VT100=True)
722,475
paramz.param
_get_original
null
def _get_original(self, param): return self._original_
(self, param)
722,477
paramz.param
_indices
null
def _indices(self, slice_index=None): # get a int-array containing all indices in the first axis. if slice_index is None: slice_index = self._current_slice_ #try: indices = np.indices(self._realshape_, dtype=int) indices = indices[(slice(None),)+slice_index] indices = np.rollaxis(indices, 0, indices.ndim).reshape(-1,self._realndim_) #print indices_ #if not np.all(indices==indices__): # import ipdb; ipdb.set_trace() #except: # indices = np.indices(self._realshape_, dtype=int) # indices = indices[(slice(None),)+slice_index] # indices = np.rollaxis(indices, 0, indices.ndim) return indices
(self, slice_index=None)
722,478
paramz.param
_max_len_index
null
def _max_len_index(self, ind): return reduce(lambda a, b: max(a, len(str(b))), ind, len(__index_name__))
(self, ind)
722,479
paramz.param
_max_len_names
null
def _max_len_names(self, gen, header): return reduce(lambda a, b: max(a, len(" ".join(map(str, b)))), gen, len(header))
(self, gen, header)
722,480
paramz.param
_max_len_values
null
def _max_len_values(self): return reduce(lambda a, b: max(a, len("{x:=.{0}g}".format(__precision__, x=b))), self.flat, len(self.hierarchy_name()))
(self)
722,488
paramz.param
_raveled_index
null
def _raveled_index(self, slice_index=None): # return an index array on the raveled array, which is formed by the current_slice # of this object extended_realshape = np.cumprod((1,) + self._realshape_[:0:-1])[::-1] ind = self._indices(slice_index) if ind.ndim < 2: ind = ind[:, None] return np.asarray(np.apply_along_axis(lambda x: np.sum(extended_realshape * x), 1, ind), dtype=int)
(self, slice_index=None)
722,489
paramz.param
_raveled_index_for
null
def _raveled_index_for(self, obj): return self._raveled_index()
(self, obj)
722,493
paramz.param
_repr_html_
Representation of the parameter in html for notebook display.
def _repr_html_(self, indices=None, iops=None, lx=None, li=None, lls=None): """Representation of the parameter in html for notebook display.""" filter_ = self._current_slice_ vals = self.flat if indices is None: indices = self._indices(filter_) if iops is None: ravi = self._raveled_index(filter_) iops = OrderedDict([name, iop.properties_for(ravi)] for name, iop in self._index_operations.items()) if lls is None: lls = [self._max_len_names(iop, name) for name, iop in iops.items()] header_format = """ <tr> <th><b>{i}</b></th> <th><b>{x}</b></th> <th><b>{iops}</b></th> </tr>""" header = header_format.format(x=self.hierarchy_name(), i=__index_name__, iops="</b></th><th><b>".join(list(iops.keys()))) # nice header for printing to_print = ["""<style type="text/css"> .tg {padding:2px 3px;word-break:normal;border-collapse:collapse;border-spacing:0;border-color:#DCDCDC;margin:0px auto;width:100%;} .tg td{font-family:"Courier New", Courier, monospace !important;font-weight:bold;color:#444;background-color:#F7FDFA;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;} .tg th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;} .tg .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;} .tg .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;} </style>"""] to_print.append('<table class="tg">') to_print.append(header) format_spec = self._format_spec(indices, iops, lx, li, lls, False) format_spec[:2] = ["<tr><td class=tg-left>{i}</td>".format(i=format_spec[0]), "<td class=tg-right>{i}</td>".format(i=format_spec[1])] for i in range(2, len(format_spec)): format_spec[i] = '<td class=tg-left>{c}</td>'.format(c=format_spec[i]) format_spec = "".join(format_spec) + '</tr>' for i in range(self.size): to_print.append(format_spec.format(index=indices[i], value="{1:.{0}f}".format(__precision__, vals[i]), **dict((name, ' '.join(map(str, iops[name][i]))) for name in iops))) return '\n'.join(to_print)
(self, indices=None, iops=None, lx=None, li=None, lls=None)
722,496
paramz.param
_setup_observers
Setup the default observers 1: pass through to parent, if present
def _setup_observers(self): """ Setup the default observers 1: pass through to parent, if present """ if self.has_parent(): self.add_observer(self._parent_, self._parent_._pass_through_notify_observers, -np.inf)
(self)
722,503
paramz.param
build_pydot
Build a pydot representation of this model. This needs pydot installed. Example Usage: np.random.seed(1000) X = np.random.normal(0,1,(20,2)) beta = np.random.uniform(0,1,(2,1)) Y = X.dot(beta) m = RidgeRegression(X, Y) G = m.build_pydot() G.write_png('example_hierarchy_layout.png') The output looks like: .. image:: example_hierarchy_layout.png Rectangles are parameterized objects (nodes or leafs of hierarchy). Trapezoids are param objects, which represent the arrays for parameters. Black arrows show parameter hierarchical dependence. The arrow points from parents towards children. Orange arrows show the observer pattern. Self references (here) are the references to the call to parameters changed and references upwards are the references to tell the parents they need to update.
def build_pydot(self,G): # pragma: no cover """ Build a pydot representation of this model. This needs pydot installed. Example Usage: np.random.seed(1000) X = np.random.normal(0,1,(20,2)) beta = np.random.uniform(0,1,(2,1)) Y = X.dot(beta) m = RidgeRegression(X, Y) G = m.build_pydot() G.write_png('example_hierarchy_layout.png') The output looks like: .. image:: example_hierarchy_layout.png Rectangles are parameterized objects (nodes or leafs of hierarchy). Trapezoids are param objects, which represent the arrays for parameters. Black arrows show parameter hierarchical dependence. The arrow points from parents towards children. Orange arrows show the observer pattern. Self references (here) are the references to the call to parameters changed and references upwards are the references to tell the parents they need to update. """ import pydot node = pydot.Node(id(self), shape='trapezium', label=self.name)#, fontcolor='white', color='white') G.add_node(node) for _, o, _ in self.observers: label = o.name if hasattr(o, 'name') else str(o) observed_node = pydot.Node(id(o), label=label) if str(id(o)) not in G.obj_dict['nodes']: # pragma: no cover G.add_node(observed_node) edge = pydot.Edge(str(id(self)), str(id(o)), color='darkorange2', arrowhead='vee') G.add_edge(edge) return node
(self, G)
722,511
paramz.param
copy
null
def copy(self): return Parameterizable.copy(self, which=self)
(self)
722,515
paramz.param
get_property_string
null
def get_property_string(self, propname): prop = self._index_operations[propname] return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", prop.items()))]
(self, propname)
722,520
paramz.param
parameter_names
null
def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True, **kw): # this is just overwrighting the parameterized calls to # parameter names, in order to maintain OOP if adjust_for_printing: return [adjust_name_for_printing(self.name)] return [self.name]
(self, add_self=False, adjust_for_printing=False, recursive=True, **kw)
722,541
paramz.parameterized
Parameterized
Say m is a handle to a parameterized class. Printing parameters:: - print m: prints a nice summary over all parameters - print m.name: prints details for param with name 'name' - print m[regexp]: prints details for all the parameters which match (!) regexp - print m['']: prints details for all parameters Fields:: Name: The name of the param, can be renamed! Value: Shape or value, if one-valued Constrain: constraint of the param, curly "{c}" brackets indicate some parameters are constrained by c. See detailed print to get exact constraints. Tied_to: which paramter it is tied to. Getting and setting parameters:: - Set all values in param to one: m.name.to.param = 1 - Set all values in parameterized: m.name[:] = 1 - Set values to random values: m[:] = np.random.norm(m.size) Handling of constraining, fixing and tieing parameters:: - You can constrain parameters by calling the constrain on the param itself, e.g: - m.name[:,1].constrain_positive() - m.name[0].tie_to(m.name[1]) - Fixing parameters will fix them to the value they are right now. If you change the parameters value, the param will be fixed to the new value! - If you want to operate on all parameters use m[''] to wildcard select all paramters and concatenate them. Printing m[''] will result in printing of all parameters in detail.
class Parameterized(with_metaclass(ParametersChangedMeta, Parameterizable)): """ Say m is a handle to a parameterized class. Printing parameters:: - print m: prints a nice summary over all parameters - print m.name: prints details for param with name 'name' - print m[regexp]: prints details for all the parameters which match (!) regexp - print m['']: prints details for all parameters Fields:: Name: The name of the param, can be renamed! Value: Shape or value, if one-valued Constrain: constraint of the param, curly "{c}" brackets indicate some parameters are constrained by c. See detailed print to get exact constraints. Tied_to: which paramter it is tied to. Getting and setting parameters:: - Set all values in param to one: m.name.to.param = 1 - Set all values in parameterized: m.name[:] = 1 - Set values to random values: m[:] = np.random.norm(m.size) Handling of constraining, fixing and tieing parameters:: - You can constrain parameters by calling the constrain on the param itself, e.g: - m.name[:,1].constrain_positive() - m.name[0].tie_to(m.name[1]) - Fixing parameters will fix them to the value they are right now. If you change the parameters value, the param will be fixed to the new value! - If you want to operate on all parameters use m[''] to wildcard select all paramters and concatenate them. Printing m[''] will result in printing of all parameters in detail. """ #=========================================================================== # Metaclass for parameters changed after init. # This makes sure, that parameters changed will always be called after __init__ # **Never** call parameters_changed() yourself #This is ignored in Python 3 -- you need to put the meta class in the function definition. #__metaclass__ = ParametersChangedMeta #The six module is used to support both Python 2 and 3 simultaneously #=========================================================================== def __init__(self, name=None, parameters=[]): super(Parameterized, self).__init__(name=name) self.size = sum(p.size for p in self.parameters) self.add_observer(self, self._parameters_changed_notification, -100) self._fixes_ = None self._param_slices_ = [] #self._connect_parameters() self.link_parameters(*parameters) #=========================================================================== # Add remove parameters: #=========================================================================== def link_parameter(self, param, index=None): """ :param parameters: the parameters to add :type parameters: list of or one :py:class:`paramz.param.Param` :param [index]: index of where to put parameters Add all parameters to this param class, you can insert parameters at any given index using the :func:`list.insert` syntax """ if param in self.parameters and index is not None: self.unlink_parameter(param) return self.link_parameter(param, index) # elif param.has_parent(): # raise HierarchyError, "parameter {} already in another model ({}), create new object (or copy) for adding".format(param._short(), param._highest_parent_._short()) elif param not in self.parameters: if param.has_parent(): def visit(parent, self): if parent is self: raise HierarchyError("You cannot add a parameter twice into the hierarchy") param.traverse_parents(visit, self) param._parent_.unlink_parameter(param) # make sure the size is set if index is None: start = sum(p.size for p in self.parameters) for name, iop in self._index_operations.items(): iop.shift_right(start, param.size) iop.update(param._index_operations[name], self.size) param._parent_ = self param._parent_index_ = len(self.parameters) self.parameters.append(param) else: start = sum(p.size for p in self.parameters[:index]) for name, iop in self._index_operations.items(): iop.shift_right(start, param.size) iop.update(param._index_operations[name], start) param._parent_ = self param._parent_index_ = index if index>=0 else len(self.parameters[:index]) for p in self.parameters[index:]: p._parent_index_ += 1 self.parameters.insert(index, param) param.add_observer(self, self._pass_through_notify_observers, -np.inf) parent = self while parent is not None: parent.size += param.size parent = parent._parent_ self._notify_parent_change() if not self._in_init_ and self._highest_parent_._model_initialized_: #self._connect_parameters() #self._notify_parent_change() self._highest_parent_._connect_parameters() self._highest_parent_._notify_parent_change() self._highest_parent_._connect_fixes() return param else: raise HierarchyError("""Parameter exists already, try making a copy""") def link_parameters(self, *parameters): """ convenience method for adding several parameters without gradient specification """ [self.link_parameter(p) for p in parameters] def unlink_parameter(self, param): """ :param param: param object to remove from being a parameter of this parameterized object. """ if not param in self.parameters: try: raise HierarchyError("{} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name)) except AttributeError: raise HierarchyError("{} does not seem to be a parameter, remove parameters directly from their respective parents".format(str(param))) start = sum([p.size for p in self.parameters[:param._parent_index_]]) self.size -= param.size del self.parameters[param._parent_index_] self._remove_parameter_name(param) param._disconnect_parent() param.remove_observer(self, self._pass_through_notify_observers) for name, iop in self._index_operations.items(): iop.shift_left(start, param.size) self._connect_parameters() self._notify_parent_change() parent = self._parent_ while parent is not None: parent.size -= param.size parent = parent._parent_ self._highest_parent_._connect_parameters() self._highest_parent_._connect_fixes() self._highest_parent_._notify_parent_change() def _connect_parameters(self, ignore_added_names=False): # connect parameterlist to this parameterized object # This just sets up the right connection for the params objects # to be used as parameters # it also sets the constraints for each parameter to the constraints # of their respective parents self._model_initialized_ = True if not hasattr(self, "parameters") or len(self.parameters) < 1: # no parameters for this class return old_size = 0 self._param_slices_ = [] for i, p in enumerate(self.parameters): if not p.param_array.flags['C_CONTIGUOUS']:# getattr(p, 'shape', None) != getattr(p, '_realshape_', None): raise ValueError(""" Have you added an additional dimension to a Param object? p[:,None], where p is of type Param does not work and is expected to fail! Try increasing the dimensionality of the param array before making a Param out of it: p = Param("<name>", array[:,None]) Otherwise this should not happen! Please write an email to the developers with the code, which reproduces this error. All parameter arrays must be C_CONTIGUOUS """) p._parent_ = self p._parent_index_ = i pslice = slice(old_size, old_size + p.size) # first connect all children p._propagate_param_grad(self.param_array[pslice], self.gradient_full[pslice]) # then connect children to self self.param_array[pslice] = p.param_array.flat # , requirements=['C', 'W']).ravel(order='C') self.gradient_full[pslice] = p.gradient_full.flat # , requirements=['C', 'W']).ravel(order='C') p.param_array.data = self.param_array[pslice].data p.gradient_full.data = self.gradient_full[pslice].data self._param_slices_.append(pslice) self._add_parameter_name(p) old_size += p.size #=========================================================================== # Get/set parameters: #=========================================================================== def grep_param_names(self, regexp): """ create a list of parameters, matching regular expression regexp """ if not isinstance(regexp, _pattern_type): regexp = compile(regexp) found_params = [] def visit(innerself, regexp): if (innerself is not self) and regexp.match(innerself.hierarchy_name().partition('.')[2]): found_params.append(innerself) self.traverse(visit, regexp) return found_params def __getitem__(self, name, paramlist=None): if isinstance(name, (int, slice, tuple, np.ndarray)): return self.param_array[name] else: paramlist = self.grep_param_names(name) if len(paramlist) < 1: raise AttributeError(name) if len(paramlist) == 1: #if isinstance(paramlist[-1], Parameterized) and paramlist[-1].size > 0: # paramlist = paramlist[-1].flattened_parameters # if len(paramlist) != 1: # return ParamConcatenation(paramlist) return paramlist[-1] from .param import ParamConcatenation return ParamConcatenation(paramlist) def __setitem__(self, name, value, paramlist=None): if not self._model_initialized_: raise AttributeError("""Model is not initialized, this change will only be reflected after initialization if in leaf. If you are loading a model, set updates off, then initialize, then set the values, then update the model to be fully initialized: >>> m.update_model(False) >>> m.initialize_parameter() >>> m[:] = loaded_parameters >>> m.update_model(True) """) if value is None: return # nothing to do here if isinstance(name, (slice, tuple, np.ndarray)): try: self.param_array[name] = value except: raise ValueError("Setting by slice or index only allowed with array-like") self.trigger_update() else: param = self.__getitem__(name, paramlist) param[:] = value def __setattr__(self, name, val): # override the default behaviour, if setting a param, so broadcasting can by used if hasattr(self, "parameters"): pnames = self.parameter_names(False, adjust_for_printing=True, recursive=False) if name in pnames: param = self.parameters[pnames.index(name)] param[:] = val; return return object.__setattr__(self, name, val) #=========================================================================== # Pickling #=========================================================================== def __setstate__(self, state): super(Parameterized, self).__setstate__(state) self._connect_parameters() self._connect_fixes() self._notify_parent_change() self.parameters_changed() return self def copy(self, memo=None): if memo is None: memo = {} memo[id(self.optimizer_array)] = None # and param_array memo[id(self.param_array)] = None # and param_array copy = super(Parameterized, self).copy(memo) copy._connect_parameters() copy._connect_fixes() copy._notify_parent_change() return copy #=========================================================================== # Printing: #=========================================================================== def _short(self): return self.hierarchy_name() @property def flattened_parameters(self): return [xi for x in self.parameters for xi in x.flattened_parameters] def get_property_string(self, propname): props = [] for p in self.parameters: props.extend(p.get_property_string(propname)) return props @property def _description_str(self): return [xi for x in self.parameters for xi in x._description_str] def _repr_html_(self, header=True): """Representation of the parameters in html for notebook display.""" name = adjust_name_for_printing(self.name) + "." names = self.parameter_names() desc = self._description_str iops = OrderedDict() for opname in self._index_operations: iop = [] for p in self.parameters: iop.extend(p.get_property_string(opname)) iops[opname] = iop format_spec = self._format_spec(name, names, desc, iops, False) to_print = [] if header: to_print.append("<tr><th><b>" + '</b></th><th><b>'.join(format_spec).format(name=name, desc='value', **dict((name, name) for name in iops)) + "</b></th></tr>") format_spec = "<tr><td class=tg-left>" + format_spec[0] + '</td><td class=tg-right>' + format_spec[1] + '</td><td class=tg-center>' + '</td><td class=tg-center>'.join(format_spec[2:]) + "</td></tr>" for i in range(len(names)): to_print.append(format_spec.format(name=names[i], desc=desc[i], **dict((name, iops[name][i]) for name in iops))) style = """<style type="text/css"> .tg {font-family:"Courier New", Courier, monospace !important;padding:2px 3px;word-break:normal;border-collapse:collapse;border-spacing:0;border-color:#DCDCDC;margin:0px auto;width:100%;} .tg td{font-family:"Courier New", Courier, monospace !important;font-weight:bold;color:#444;background-color:#F7FDFA;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;} .tg th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;} .tg .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;} .tg .tg-center{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:center;} .tg .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;} </style>""" return style + '\n' + '<table class="tg">' + '\n'.join(to_print) + '\n</table>' def _format_spec(self, name, names, desc, iops, VT100=True): nl = max([len(str(x)) for x in names + [name]]) sl = max([len(str(x)) for x in desc + ["value"]]) lls = [reduce(lambda a,b: max(a, len(b)), iops[opname], len(opname)) for opname in iops] if VT100: format_spec = [" \033[1m{{name!s:<{0}}}\033[0;0m".format(nl),"{{desc!s:>{0}}}".format(sl)] else: format_spec = [" {{name!s:<{0}}}".format(nl),"{{desc!s:>{0}}}".format(sl)] for opname, l in zip(iops, lls): f = '{{{1}!s:^{0}}}'.format(l, opname) format_spec.append(f) return format_spec def __str__(self, header=True, VT100=True): name = adjust_name_for_printing(self.name) + "." names = self.parameter_names(adjust_for_printing=True) desc = self._description_str iops = OrderedDict() for opname in self._index_operations: iops[opname] = self.get_property_string(opname) format_spec = ' | '.join(self._format_spec(name, names, desc, iops, VT100)) to_print = [] if header: to_print.append(format_spec.format(name=name, desc='value', **dict((name, name) for name in iops))) for i in range(len(names)): to_print.append(format_spec.format(name=names[i], desc=desc[i], **dict((name, iops[name][i]) for name in iops))) return '\n'.join(to_print) def build_pydot(self, G=None): # pragma: no cover """ Build a pydot representation of this model. This needs pydot installed. Example Usage:: np.random.seed(1000) X = np.random.normal(0,1,(20,2)) beta = np.random.uniform(0,1,(2,1)) Y = X.dot(beta) m = RidgeRegression(X, Y) G = m.build_pydot() G.write_png('example_hierarchy_layout.png') The output looks like: .. image:: ./example_hierarchy_layout.png Rectangles are parameterized objects (nodes or leafs of hierarchy). Trapezoids are param objects, which represent the arrays for parameters. Black arrows show parameter hierarchical dependence. The arrow points from parents towards children. Orange arrows show the observer pattern. Self references (here) are the references to the call to parameters changed and references upwards are the references to tell the parents they need to update. """ import pydot # @UnresolvedImport iamroot = False if G is None: G = pydot.Dot(graph_type='digraph', bgcolor=None) iamroot=True node = pydot.Node(id(self), shape='box', label=self.name)#, color='white') G.add_node(node) for child in self.parameters: child_node = child.build_pydot(G) G.add_edge(pydot.Edge(node, child_node))#, color='white')) for _, o, _ in self.observers: label = o.name if hasattr(o, 'name') else str(o) observed_node = pydot.Node(id(o), label=label) if str(id(o)) not in G.obj_dict['nodes']: G.add_node(observed_node) edge = pydot.Edge(str(id(self)), str(id(o)), color='darkorange2', arrowhead='vee') G.add_edge(edge) if iamroot: return G return node
(*args, **kw)
722,545
paramz.parameterized
__init__
null
def __init__(self, name=None, parameters=[]): super(Parameterized, self).__init__(name=name) self.size = sum(p.size for p in self.parameters) self.add_observer(self, self._parameters_changed_notification, -100) self._fixes_ = None self._param_slices_ = [] #self._connect_parameters() self.link_parameters(*parameters)
(self, name=None, parameters=[])
722,549
paramz.parameterized
__str__
null
def __str__(self, header=True, VT100=True): name = adjust_name_for_printing(self.name) + "." names = self.parameter_names(adjust_for_printing=True) desc = self._description_str iops = OrderedDict() for opname in self._index_operations: iops[opname] = self.get_property_string(opname) format_spec = ' | '.join(self._format_spec(name, names, desc, iops, VT100)) to_print = [] if header: to_print.append(format_spec.format(name=name, desc='value', **dict((name, name) for name in iops))) for i in range(len(names)): to_print.append(format_spec.format(name=names[i], desc=desc[i], **dict((name, iops[name][i]) for name in iops))) return '\n'.join(to_print)
(self, header=True, VT100=True)
722,573
paramz.parameterized
_repr_html_
Representation of the parameters in html for notebook display.
def _repr_html_(self, header=True): """Representation of the parameters in html for notebook display.""" name = adjust_name_for_printing(self.name) + "." names = self.parameter_names() desc = self._description_str iops = OrderedDict() for opname in self._index_operations: iop = [] for p in self.parameters: iop.extend(p.get_property_string(opname)) iops[opname] = iop format_spec = self._format_spec(name, names, desc, iops, False) to_print = [] if header: to_print.append("<tr><th><b>" + '</b></th><th><b>'.join(format_spec).format(name=name, desc='value', **dict((name, name) for name in iops)) + "</b></th></tr>") format_spec = "<tr><td class=tg-left>" + format_spec[0] + '</td><td class=tg-right>' + format_spec[1] + '</td><td class=tg-center>' + '</td><td class=tg-center>'.join(format_spec[2:]) + "</td></tr>" for i in range(len(names)): to_print.append(format_spec.format(name=names[i], desc=desc[i], **dict((name, iops[name][i]) for name in iops))) style = """<style type="text/css"> .tg {font-family:"Courier New", Courier, monospace !important;padding:2px 3px;word-break:normal;border-collapse:collapse;border-spacing:0;border-color:#DCDCDC;margin:0px auto;width:100%;} .tg td{font-family:"Courier New", Courier, monospace !important;font-weight:bold;color:#444;background-color:#F7FDFA;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;} .tg th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;} .tg .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;} .tg .tg-center{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:center;} .tg .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;} </style>""" return style + '\n' + '<table class="tg">' + '\n'.join(to_print) + '\n</table>'
(self, header=True)
722,626
paramz
_unpickle
null
def _unpickle(file_or_path, pickle, strcl, p3kw): if isinstance(file_or_path, strcl): with open(file_or_path, 'rb') as f: m = pickle.load(f, **p3kw) else: m = pickle.load(file_or_path, **p3kw) return m
(file_or_path, pickle, strcl, p3kw)