Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def get(self, key, dt): try: return self._cache[key].unwrap(dt) except Expired: self.cleanup(self._cache[key]._unsafe_get_value()) del self._cache[key] raise KeyError(key)
[ "Get the value of a cached object.\n\n Parameters\n ----------\n key : any\n The key to lookup.\n dt : datetime\n The time of the lookup.\n\n Returns\n -------\n result : any\n The value for ``key``.\n\n Raises\n ------\n KeyError\n Raised if the key is not in the cache or the value for the key\n has expired.\n " ]
Please provide a description of the function:def set(self, key, value, expiration_dt): self._cache[key] = CachedObject(value, expiration_dt)
[ "Adds a new key value pair to the cache.\n\n Parameters\n ----------\n key : any\n The key to use for the pair.\n value : any\n The value to store under the name ``key``.\n expiration_dt : datetime\n When should this mapping expire? The cache is considered invalid\n for dates **strictly greater** than ``expiration_dt``.\n " ]
Please provide a description of the function:def ensure_dir(self, *path_parts): path = self.getpath(*path_parts) ensure_directory(path) return path
[ "Ensures a subdirectory of the working directory.\n\n Parameters\n ----------\n path_parts : iterable[str]\n The parts of the path after the working directory.\n " ]
Please provide a description of the function:def verify_frames_aligned(frames, calendar): indexes = [f.index for f in frames] check_indexes_all_same(indexes, message="DataFrame indexes don't match:") columns = [f.columns for f in frames] check_indexes_all_same(columns, message="DataFrame columns don't match:") start, end = indexes[0][[0, -1]] cal_sessions = calendar.sessions_in_range(start, end) check_indexes_all_same( [indexes[0], cal_sessions], "DataFrame index doesn't match {} calendar:".format(calendar.name), )
[ "\n Verify that DataFrames in ``frames`` have the same indexing scheme and are\n aligned to ``calendar``.\n\n Parameters\n ----------\n frames : list[pd.DataFrame]\n calendar : trading_calendars.TradingCalendar\n\n Raises\n ------\n ValueError\n If frames have different indexes/columns, or if frame indexes do not\n match a contiguous region of ``calendar``.\n " ]
Please provide a description of the function:def get_value(self, sid, dt, field): return self.frames[field].loc[dt, sid]
[ "\n Parameters\n ----------\n sid : int\n The asset identifier.\n day : datetime64-like\n Midnight of the day for which data is requested.\n field : string\n The price field. e.g. ('open', 'high', 'low', 'close', 'volume')\n\n Returns\n -------\n float\n The spot price for colname of the given sid on the given day.\n Raises a NoDataOnDate exception if the given day and sid is before\n or after the date range of the equity.\n Returns -1 if the day is within the date range, but the price is\n 0.\n " ]
Please provide a description of the function:def get_last_traded_dt(self, asset, dt): try: return self.frames['close'].loc[:, asset.sid].last_valid_index() except IndexError: return NaT
[ "\n Parameters\n ----------\n asset : zipline.asset.Asset\n The asset identifier.\n dt : datetime64-like\n Midnight of the day for which data is requested.\n\n Returns\n -------\n pd.Timestamp : The last know dt for the asset and dt;\n NaT if no trade is found before the given dt.\n " ]
Please provide a description of the function:def same(*values): if not values: return True first, rest = values[0], values[1:] return all(value == first for value in rest)
[ "\n Check if all values in a sequence are equal.\n\n Returns True on empty sequences.\n\n Examples\n --------\n >>> same(1, 1, 1, 1)\n True\n >>> same(1, 2, 1)\n False\n >>> same()\n True\n " ]
Please provide a description of the function:def dzip_exact(*dicts): if not same(*map(viewkeys, dicts)): raise ValueError( "dict keys not all equal:\n\n%s" % _format_unequal_keys(dicts) ) return {k: tuple(d[k] for d in dicts) for k in dicts[0]}
[ "\n Parameters\n ----------\n *dicts : iterable[dict]\n A sequence of dicts all sharing the same keys.\n\n Returns\n -------\n zipped : dict\n A dict whose keys are the union of all keys in *dicts, and whose values\n are tuples of length len(dicts) containing the result of looking up\n each key in each dict.\n\n Raises\n ------\n ValueError\n If dicts don't all have the same keys.\n\n Examples\n --------\n >>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4})\n >>> result == {'a': (1, 3), 'b': (2, 4)}\n True\n " ]
Please provide a description of the function:def _gen_unzip(it, elem_len): elem = next(it) first_elem_len = len(elem) if elem_len is not None and elem_len != first_elem_len: raise ValueError( 'element at index 0 was length %d, expected %d' % ( first_elem_len, elem_len, ) ) else: elem_len = first_elem_len yield elem for n, elem in enumerate(it, 1): if len(elem) != elem_len: raise ValueError( 'element at index %d was length %d, expected %d' % ( n, len(elem), elem_len, ), ) yield elem
[ "Helper for unzip which checks the lengths of each element in it.\n Parameters\n ----------\n it : iterable[tuple]\n An iterable of tuples. ``unzip`` should map ensure that these are\n already tuples.\n elem_len : int or None\n The expected element length. If this is None it is infered from the\n length of the first element.\n Yields\n ------\n elem : tuple\n Each element of ``it``.\n Raises\n ------\n ValueError\n Raised when the lengths do not match the ``elem_len``.\n " ]
Please provide a description of the function:def unzip(seq, elem_len=None): ret = tuple(zip(*_gen_unzip(map(tuple, seq), elem_len))) if ret: return ret if elem_len is None: raise ValueError("cannot unzip empty sequence without 'elem_len'") return ((),) * elem_len
[ "Unzip a length n sequence of length m sequences into m seperate length\n n sequences.\n Parameters\n ----------\n seq : iterable[iterable]\n The sequence to unzip.\n elem_len : int, optional\n The expected length of each element of ``seq``. If not provided this\n will be infered from the length of the first element of ``seq``. This\n can be used to ensure that code like: ``a, b = unzip(seq)`` does not\n fail even when ``seq`` is empty.\n Returns\n -------\n seqs : iterable[iterable]\n The new sequences pulled out of the first iterable.\n Raises\n ------\n ValueError\n Raised when ``seq`` is empty and ``elem_len`` is not provided.\n Raised when elements of ``seq`` do not match the given ``elem_len`` or\n the length of the first element of ``seq``.\n Examples\n --------\n >>> seq = [('a', 1), ('b', 2), ('c', 3)]\n >>> cs, ns = unzip(seq)\n >>> cs\n ('a', 'b', 'c')\n >>> ns\n (1, 2, 3)\n\n # checks that the elements are the same length\n >>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')]\n >>> cs, ns = unzip(seq)\n Traceback (most recent call last):\n ...\n ValueError: element at index 2 was length 3, expected 2\n\n # allows an explicit element length instead of infering\n >>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)]\n >>> cs, ns = unzip(seq, 2)\n Traceback (most recent call last):\n ...\n ValueError: element at index 0 was length 3, expected 2\n\n # handles empty sequences when a length is given\n >>> cs, ns = unzip([], elem_len=2)\n >>> cs == ns == ()\n True\n\n Notes\n -----\n This function will force ``seq`` to completion.\n " ]
Please provide a description of the function:def getattrs(value, attrs, default=_no_default): try: for attr in attrs: value = getattr(value, attr) except AttributeError: if default is _no_default: raise value = default return value
[ "\n Perform a chained application of ``getattr`` on ``value`` with the values\n in ``attrs``.\n\n If ``default`` is supplied, return it if any of the attribute lookups fail.\n\n Parameters\n ----------\n value : object\n Root of the lookup chain.\n attrs : iterable[str]\n Sequence of attributes to look up.\n default : object, optional\n Value to return if any of the lookups fail.\n\n Returns\n -------\n result : object\n Result of the lookup sequence.\n\n Examples\n --------\n >>> class EmptyObject(object):\n ... pass\n ...\n >>> obj = EmptyObject()\n >>> obj.foo = EmptyObject()\n >>> obj.foo.bar = \"value\"\n >>> getattrs(obj, ('foo', 'bar'))\n 'value'\n\n >>> getattrs(obj, ('foo', 'buzz'))\n Traceback (most recent call last):\n ...\n AttributeError: 'EmptyObject' object has no attribute 'buzz'\n\n >>> getattrs(obj, ('foo', 'buzz'), 'default')\n 'default'\n " ]
Please provide a description of the function:def set_attribute(name, value): def decorator(f): setattr(f, name, value) return f return decorator
[ "\n Decorator factory for setting attributes on a function.\n\n Doesn't change the behavior of the wrapped function.\n\n Examples\n --------\n >>> @set_attribute('__name__', 'foo')\n ... def bar():\n ... return 3\n ...\n >>> bar()\n 3\n >>> bar.__name__\n 'foo'\n " ]
Please provide a description of the function:def foldr(f, seq, default=_no_default): return reduce( flip(f), reversed(seq), *(default,) if default is not _no_default else () )
[ "Fold a function over a sequence with right associativity.\n\n Parameters\n ----------\n f : callable[any, any]\n The function to reduce the sequence with.\n The first argument will be the element of the sequence; the second\n argument will be the accumulator.\n seq : iterable[any]\n The sequence to reduce.\n default : any, optional\n The starting value to reduce with. If not provided, the sequence\n cannot be empty, and the last value of the sequence will be used.\n\n Returns\n -------\n folded : any\n The folded value.\n\n Notes\n -----\n This functions works by reducing the list in a right associative way.\n\n For example, imagine we are folding with ``operator.add`` or ``+``:\n\n .. code-block:: python\n\n foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))\n\n In the more general case with an arbitrary function, ``foldr`` will expand\n like so:\n\n .. code-block:: python\n\n foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))\n\n For a more in depth discussion of left and right folds, see:\n `https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_\n The images in that page are very good for showing the differences between\n ``foldr`` and ``foldl`` (``reduce``).\n\n .. note::\n\n For performance reasons is is best to pass a strict (non-lazy) sequence,\n for example, a list.\n\n See Also\n --------\n :func:`functools.reduce`\n :func:`sum`\n " ]
Please provide a description of the function:def invert(d): out = {} for k, v in iteritems(d): try: out[v].add(k) except KeyError: out[v] = {k} return out
[ "\n Invert a dictionary into a dictionary of sets.\n\n >>> invert({'a': 1, 'b': 2, 'c': 1}) # doctest: +SKIP\n {1: {'a', 'c'}, 2: {'b'}}\n " ]
Please provide a description of the function:def simplex_projection(v, b=1): r v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p + 1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho + 1)]) w = (v - theta) w[w < 0] = 0 return w
[ "Projection vectors to the simplex domain\n\n Implemented according to the paper: Efficient projections onto the\n l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.\n Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg\n Optimization Problem: min_{w}\\| w - v \\|_{2}^{2}\n s.t. sum_{i=1}^{m}=z, w_{i}\\geq 0\n\n Input: A vector v \\in R^{m}, and a scalar z > 0 (default=1)\n Output: Projection vector w\n\n :Example:\n >>> proj = simplex_projection([.4 ,.3, -.4, .5])\n >>> proj # doctest: +NORMALIZE_WHITESPACE\n array([ 0.33333333, 0.23333333, 0. , 0.43333333])\n >>> print(proj.sum())\n 1.0\n\n Original matlab implementation: John Duchi (jduchi@cs.berkeley.edu)\n Python-port: Copyright 2013 by Thomas Wiecki (thomas.wiecki@gmail.com).\n " ]
Please provide a description of the function:def run_example(example_name, environ): mod = EXAMPLE_MODULES[example_name] register_calendar("YAHOO", get_calendar("NYSE"), force=True) return run_algorithm( initialize=getattr(mod, 'initialize', None), handle_data=getattr(mod, 'handle_data', None), before_trading_start=getattr(mod, 'before_trading_start', None), analyze=getattr(mod, 'analyze', None), bundle='test', environ=environ, # Provide a default capital base, but allow the test to override. **merge({'capital_base': 1e7}, mod._test_args()) )
[ "\n Run an example module from zipline.examples.\n " ]
Please provide a description of the function:def vectorized_beta(dependents, independent, allowed_missing, out=None): # Cache these as locals since we're going to call them multiple times. nan = np.nan isnan = np.isnan N, M = dependents.shape if out is None: out = np.full(M, nan) # Copy N times as a column vector and fill with nans to have the same # missing value pattern as the dependent variable. # # PERF_TODO: We could probably avoid the space blowup by doing this in # Cython. # shape: (N, M) independent = np.where( isnan(dependents), nan, independent, ) # Calculate beta as Cov(X, Y) / Cov(X, X). # https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line # noqa # # NOTE: The usual formula for covariance is:: # # mean((X - mean(X)) * (Y - mean(Y))) # # However, we don't actually need to take the mean of both sides of the # product, because of the folllowing equivalence:: # # Let X_res = (X - mean(X)). # We have: # # mean(X_res * (Y - mean(Y))) = mean(X_res * (Y - mean(Y))) # (1) = mean((X_res * Y) - (X_res * mean(Y))) # (2) = mean(X_res * Y) - mean(X_res * mean(Y)) # (3) = mean(X_res * Y) - mean(X_res) * mean(Y) # (4) = mean(X_res * Y) - 0 * mean(Y) # (5) = mean(X_res * Y) # # # The tricky step in the above derivation is step (4). We know that # mean(X_res) is zero because, for any X: # # mean(X - mean(X)) = mean(X) - mean(X) = 0. # # The upshot of this is that we only have to center one of `independent` # and `dependent` when calculating covariances. Since we need the centered # `independent` to calculate its variance in the next step, we choose to # center `independent`. # shape: (N, M) ind_residual = independent - nanmean(independent, axis=0) # shape: (M,) covariances = nanmean(ind_residual * dependents, axis=0) # We end up with different variances in each column here because each # column may have a different subset of the data dropped due to missing # data in the corresponding dependent column. # shape: (M,) independent_variances = nanmean(ind_residual ** 2, axis=0) # shape: (M,) np.divide(covariances, independent_variances, out=out) # Write nans back to locations where we have more then allowed number of # missing entries. nanlocs = isnan(independent).sum(axis=0) > allowed_missing out[nanlocs] = nan return out
[ "\n Compute slopes of linear regressions between columns of ``dependents`` and\n ``independent``.\n\n Parameters\n ----------\n dependents : np.array[N, M]\n Array with columns of data to be regressed against ``independent``.\n independent : np.array[N, 1]\n Independent variable of the regression\n allowed_missing : int\n Number of allowed missing (NaN) observations per column. Columns with\n more than this many non-nan observations in both ``dependents`` and\n ``independents`` will output NaN as the regression coefficient.\n\n Returns\n -------\n slopes : np.array[M]\n Linear regression coefficients for each column of ``dependents``.\n " ]
Please provide a description of the function:def _format_url(instrument_type, instrument_ids, start_date, end_date, earliest_allowed_date): return ( "http://www.bankofcanada.ca/stats/results/csv" "?lP=lookup_{instrument_type}_yields.php" "&sR={restrict}" "&se={instrument_ids}" "&dF={start}" "&dT={end}".format( instrument_type=instrument_type, instrument_ids='-'.join(map(prepend("L_"), instrument_ids)), restrict=earliest_allowed_date.strftime("%Y-%m-%d"), start=start_date.strftime("%Y-%m-%d"), end=end_date.strftime("%Y-%m-%d"), ) )
[ "\n Format a URL for loading data from Bank of Canada.\n " ]
Please provide a description of the function:def load_frame(url, skiprows): return pd.read_csv( url, skiprows=skiprows, skipinitialspace=True, na_values=["Bank holiday", "Not available"], parse_dates=["Date"], index_col="Date", ).dropna(how='all') \ .tz_localize('UTC') \ .rename(columns=COLUMN_NAMES)
[ "\n Load a DataFrame of data from a Bank of Canada site.\n " ]
Please provide a description of the function:def check_known_inconsistencies(bill_data, bond_data): inconsistent_dates = bill_data.index.sym_diff(bond_data.index) known_inconsistencies = [ # bill_data has an entry for 2010-02-15, which bond_data doesn't. # bond_data has an entry for 2006-09-04, which bill_data doesn't. # Both of these dates are bank holidays (Flag Day and Labor Day, # respectively). pd.Timestamp('2006-09-04', tz='UTC'), pd.Timestamp('2010-02-15', tz='UTC'), # 2013-07-25 comes back as "Not available" from the bills endpoint. # This date doesn't seem to be a bank holiday, but the previous # calendar implementation dropped this entry, so we drop it as well. # If someone cares deeply about the integrity of the Canadian trading # calendar, they may want to consider forward-filling here rather than # dropping the row. pd.Timestamp('2013-07-25', tz='UTC'), ] unexpected_inconsistences = inconsistent_dates.drop(known_inconsistencies) if len(unexpected_inconsistences): in_bills = bill_data.index.difference(bond_data.index).difference( known_inconsistencies ) in_bonds = bond_data.index.difference(bill_data.index).difference( known_inconsistencies ) raise ValueError( "Inconsistent dates for Canadian treasury bills vs bonds. \n" "Dates with bills but not bonds: {in_bills}.\n" "Dates with bonds but not bills: {in_bonds}.".format( in_bills=in_bills, in_bonds=in_bonds, ) )
[ "\n There are a couple quirks in the data provided by Bank of Canada.\n Check that no new quirks have been introduced in the latest download.\n " ]
Please provide a description of the function:def earliest_possible_date(): today = pd.Timestamp('now', tz='UTC').normalize() # Bank of Canada only has the last 10 years of data at any given time. return today.replace(year=today.year - 10)
[ "\n The earliest date for which we can load data from this module.\n " ]
Please provide a description of the function:def fill_price_worse_than_limit_price(fill_price, order): if order.limit: # this is tricky! if an order with a limit price has reached # the limit price, we will try to fill the order. do not fill # these shares if the impacted price is worse than the limit # price. return early to avoid creating the transaction. # buy order is worse if the impacted price is greater than # the limit price. sell order is worse if the impacted price # is less than the limit price if (order.direction > 0 and fill_price > order.limit) or \ (order.direction < 0 and fill_price < order.limit): return True return False
[ "\n Checks whether the fill price is worse than the order's limit price.\n\n Parameters\n ----------\n fill_price: float\n The price to check.\n\n order: zipline.finance.order.Order\n The order whose limit price to check.\n\n Returns\n -------\n bool: Whether the fill price is above the limit price (for a buy) or below\n the limit price (for a sell).\n " ]
Please provide a description of the function:def _get_window_data(self, data, asset, window_length): try: values = self._window_data_cache.get(asset, data.current_session) except KeyError: try: # Add a day because we want 'window_length' complete days, # excluding the current day. volume_history = data.history( asset, 'volume', window_length + 1, '1d', ) close_history = data.history( asset, 'close', window_length + 1, '1d', ) except HistoryWindowStartsBeforeData: # If there is not enough data to do a full history call, return # values as if there was no data. return 0, np.NaN # Exclude the first value of the percent change array because it is # always just NaN. close_volatility = close_history[:-1].pct_change()[1:].std( skipna=False, ) values = { 'volume': volume_history[:-1].mean(), 'close': close_volatility * SQRT_252, } self._window_data_cache.set(asset, values, data.current_session) return values['volume'], values['close']
[ "\n Internal utility method to return the trailing mean volume over the\n past 'window_length' days, and volatility of close prices for a\n specific asset.\n\n Parameters\n ----------\n data : The BarData from which to fetch the daily windows.\n asset : The Asset whose data we are fetching.\n window_length : Number of days of history used to calculate the mean\n volume and close price volatility.\n\n Returns\n -------\n (mean volume, volatility)\n " ]
Please provide a description of the function:def validate_dtype(termname, dtype, missing_value): if dtype is NotSpecified: raise DTypeNotSpecified(termname=termname) try: dtype = dtype_class(dtype) except TypeError: raise NotDType(dtype=dtype, termname=termname) if not can_represent_dtype(dtype): raise UnsupportedDType(dtype=dtype, termname=termname) if missing_value is NotSpecified: missing_value = default_missing_value_for_dtype(dtype) try: if (dtype == categorical_dtype): # This check is necessary because we use object dtype for # categoricals, and numpy will allow us to promote numerical # values to object even though we don't support them. _assert_valid_categorical_missing_value(missing_value) # For any other type, we can check if the missing_value is safe by # making an array of that value and trying to safely convert it to # the desired type. # 'same_kind' allows casting between things like float32 and # float64, but not str and int. array([missing_value]).astype(dtype=dtype, casting='same_kind') except TypeError as e: raise TypeError( "Missing value {value!r} is not a valid choice " "for term {termname} with dtype {dtype}.\n\n" "Coercion attempt failed with: {error}".format( termname=termname, value=missing_value, dtype=dtype, error=e, ) ) return dtype, missing_value
[ "\n Validate a `dtype` and `missing_value` passed to Term.__new__.\n\n Ensures that we know how to represent ``dtype``, and that missing_value\n is specified for types without default missing values.\n\n Returns\n -------\n validated_dtype, validated_missing_value : np.dtype, any\n The dtype and missing_value to use for the new term.\n\n Raises\n ------\n DTypeNotSpecified\n When no dtype was passed to the instance, and the class doesn't\n provide a default.\n NotDType\n When either the class or the instance provides a value not\n coercible to a numpy dtype.\n NoDefaultMissingValue\n When dtype requires an explicit missing_value, but\n ``missing_value`` is NotSpecified.\n " ]
Please provide a description of the function:def _assert_valid_categorical_missing_value(value): label_types = LabelArray.SUPPORTED_SCALAR_TYPES if not isinstance(value, label_types): raise TypeError( "Categorical terms must have missing values of type " "{types}.".format( types=' or '.join([t.__name__ for t in label_types]), ) )
[ "\n Check that value is a valid categorical missing_value.\n\n Raises a TypeError if the value is cannot be used as the missing_value for\n a categorical_dtype Term.\n " ]
Please provide a description of the function:def _pop_params(cls, kwargs): params = cls.params if not isinstance(params, Mapping): params = {k: NotSpecified for k in params} param_values = [] for key, default_value in params.items(): try: value = kwargs.pop(key, default_value) if value is NotSpecified: raise KeyError(key) # Check here that the value is hashable so that we fail here # instead of trying to hash the param values tuple later. hash(value) except KeyError: raise TypeError( "{typename} expected a keyword parameter {name!r}.".format( typename=cls.__name__, name=key ) ) except TypeError: # Value wasn't hashable. raise TypeError( "{typename} expected a hashable value for parameter " "{name!r}, but got {value!r} instead.".format( typename=cls.__name__, name=key, value=value, ) ) param_values.append((key, value)) return tuple(param_values)
[ "\n Pop entries from the `kwargs` passed to cls.__new__ based on the values\n in `cls.params`.\n\n Parameters\n ----------\n kwargs : dict\n The kwargs passed to cls.__new__.\n\n Returns\n -------\n params : list[(str, object)]\n A list of string, value pairs containing the entries in cls.params.\n\n Raises\n ------\n TypeError\n Raised if any parameter values are not passed or not hashable.\n " ]
Please provide a description of the function:def _static_identity(cls, domain, dtype, missing_value, window_safe, ndim, params): return (cls, domain, dtype, missing_value, window_safe, ndim, params)
[ "\n Return the identity of the Term that would be constructed from the\n given arguments.\n\n Identities that compare equal will cause us to return a cached instance\n rather than constructing a new one. We do this primarily because it\n makes dependency resolution easier.\n\n This is a classmethod so that it can be called from Term.__new__ to\n determine whether to produce a new instance.\n " ]
Please provide a description of the function:def _init(self, domain, dtype, missing_value, window_safe, ndim, params): self.domain = domain self.dtype = dtype self.missing_value = missing_value self.window_safe = window_safe self.ndim = ndim for name, value in params: if hasattr(self, name): raise TypeError( "Parameter {name!r} conflicts with already-present" " attribute with value {value!r}.".format( name=name, value=getattr(self, name), ) ) # TODO: Consider setting these values as attributes and replacing # the boilerplate in NumericalExpression, Rank, and # PercentileFilter. self.params = dict(params) # Make sure that subclasses call super() in their _validate() methods # by setting this flag. The base class implementation of _validate # should set this flag to True. self._subclass_called_super_validate = False self._validate() assert self._subclass_called_super_validate, ( "Term._validate() was not called.\n" "This probably means that you overrode _validate" " without calling super()." ) del self._subclass_called_super_validate return self
[ "\n Parameters\n ----------\n domain : zipline.pipeline.domain.Domain\n The domain of this term.\n dtype : np.dtype\n Dtype of this term's output.\n missing_value : object\n Missing value for this term.\n ndim : 1 or 2\n The dimensionality of this term.\n params : tuple[(str, hashable)]\n Tuple of key/value pairs of additional parameters.\n " ]
Please provide a description of the function:def dependencies(self): extra_input_rows = max(0, self.window_length - 1) out = {} for term in self.inputs: out[term] = extra_input_rows out[self.mask] = 0 return out
[ "\n The number of extra rows needed for each of our inputs to compute this\n term.\n " ]
Please provide a description of the function:def to_workspace_value(self, result, assets): return result.unstack().fillna(self.missing_value).reindex( columns=assets, fill_value=self.missing_value, ).values
[ "\n Called with a column of the result of a pipeline. This needs to put\n the data into a format that can be used in a workspace to continue\n doing computations.\n\n Parameters\n ----------\n result : pd.Series\n A multiindexed series with (dates, assets) whose values are the\n results of running this pipeline term over the dates.\n assets : pd.Index\n All of the assets being requested. This allows us to correctly\n shape the workspace value.\n\n Returns\n -------\n workspace_value : array-like\n An array like value that the engine can consume.\n " ]
Please provide a description of the function:def earn_stock_dividend(self, stock_dividend): return { 'payment_asset': stock_dividend.payment_asset, 'share_count': np.floor( self.amount * float(stock_dividend.ratio) ) }
[ "\n Register the number of shares we held at this dividend's ex date so\n that we can pay out the correct amount on the dividend's pay date.\n " ]
Please provide a description of the function:def handle_split(self, asset, ratio): if self.asset != asset: raise Exception("updating split with the wrong asset!") # adjust the # of shares by the ratio # (if we had 100 shares, and the ratio is 3, # we now have 33 shares) # (old_share_count / ratio = new_share_count) # (old_price * ratio = new_price) # e.g., 33.333 raw_share_count = self.amount / float(ratio) # e.g., 33 full_share_count = np.floor(raw_share_count) # e.g., 0.333 fractional_share_count = raw_share_count - full_share_count # adjust the cost basis to the nearest cent, e.g., 60.0 new_cost_basis = round(self.cost_basis * ratio, 2) self.cost_basis = new_cost_basis self.amount = full_share_count return_cash = round(float(fractional_share_count * new_cost_basis), 2) log.info("after split: " + str(self)) log.info("returning cash: " + str(return_cash)) # return the leftover cash, which will be converted into cash # (rounded to the nearest cent) return return_cash
[ "\n Update the position by the split ratio, and return the resulting\n fractional share that will be converted into cash.\n\n Returns the unused cash.\n " ]
Please provide a description of the function:def adjust_commission_cost_basis(self, asset, cost): if asset != self.asset: raise Exception('Updating a commission for a different asset?') if cost == 0.0: return # If we no longer hold this position, there is no cost basis to # adjust. if self.amount == 0: return # We treat cost basis as the share price where we have broken even. # For longs, commissions cause a relatively straight forward increase # in the cost basis. # # For shorts, you actually want to decrease the cost basis because you # break even and earn a profit when the share price decreases. # # Shorts are represented as having a negative `amount`. # # The multiplication and division by `amount` cancel out leaving the # cost_basis positive, while subtracting the commission. prev_cost = self.cost_basis * self.amount if isinstance(asset, Future): cost_to_use = cost / asset.price_multiplier else: cost_to_use = cost new_cost = prev_cost + cost_to_use self.cost_basis = new_cost / self.amount
[ "\n A note about cost-basis in zipline: all positions are considered\n to share a cost basis, even if they were executed in different\n transactions with different commission costs, different prices, etc.\n\n Due to limitations about how zipline handles positions, zipline will\n currently spread an externally-delivered commission charge across\n all shares in a position.\n " ]
Please provide a description of the function:def to_dict(self): return { 'sid': self.asset, 'amount': self.amount, 'cost_basis': self.cost_basis, 'last_sale_price': self.last_sale_price }
[ "\n Creates a dictionary representing the state of this position.\n Returns a dict object of the form:\n " ]
Please provide a description of the function:def _make_bundle_core(): _bundles = {} # the registered bundles # Expose _bundles through a proxy so that users cannot mutate this # accidentally. Users may go through `register` to update this which will # warn when trampling another bundle. bundles = mappingproxy(_bundles) @curry def register(name, f, calendar_name='NYSE', start_session=None, end_session=None, minutes_per_day=390, create_writers=True): if name in bundles: warnings.warn( 'Overwriting bundle with name %r' % name, stacklevel=3, ) # NOTE: We don't eagerly compute calendar values here because # `register` is called at module scope in zipline, and creating a # calendar currently takes between 0.5 and 1 seconds, which causes a # noticeable delay on the zipline CLI. _bundles[name] = RegisteredBundle( calendar_name=calendar_name, start_session=start_session, end_session=end_session, minutes_per_day=minutes_per_day, ingest=f, create_writers=create_writers, ) return f def unregister(name): try: del _bundles[name] except KeyError: raise UnknownBundle(name) def ingest(name, environ=os.environ, timestamp=None, assets_versions=(), show_progress=False): try: bundle = bundles[name] except KeyError: raise UnknownBundle(name) calendar = get_calendar(bundle.calendar_name) start_session = bundle.start_session end_session = bundle.end_session if start_session is None or start_session < calendar.first_session: start_session = calendar.first_session if end_session is None or end_session > calendar.last_session: end_session = calendar.last_session if timestamp is None: timestamp = pd.Timestamp.utcnow() timestamp = timestamp.tz_convert('utc').tz_localize(None) timestr = to_bundle_ingest_dirname(timestamp) cachepath = cache_path(name, environ=environ) pth.ensure_directory(pth.data_path([name, timestr], environ=environ)) pth.ensure_directory(cachepath) with dataframe_cache(cachepath, clean_on_failure=False) as cache, \ ExitStack() as stack: # we use `cleanup_on_failure=False` so that we don't purge the # cache directory if the load fails in the middle if bundle.create_writers: wd = stack.enter_context(working_dir( pth.data_path([], environ=environ)) ) daily_bars_path = wd.ensure_dir( *daily_equity_relative( name, timestr, environ=environ, ) ) daily_bar_writer = BcolzDailyBarWriter( daily_bars_path, calendar, start_session, end_session, ) # Do an empty write to ensure that the daily ctables exist # when we create the SQLiteAdjustmentWriter below. The # SQLiteAdjustmentWriter needs to open the daily ctables so # that it can compute the adjustment ratios for the dividends. daily_bar_writer.write(()) minute_bar_writer = BcolzMinuteBarWriter( wd.ensure_dir(*minute_equity_relative( name, timestr, environ=environ) ), calendar, start_session, end_session, minutes_per_day=bundle.minutes_per_day, ) assets_db_path = wd.getpath(*asset_db_relative( name, timestr, environ=environ, )) asset_db_writer = AssetDBWriter(assets_db_path) adjustment_db_writer = stack.enter_context( SQLiteAdjustmentWriter( wd.getpath(*adjustment_db_relative( name, timestr, environ=environ)), BcolzDailyBarReader(daily_bars_path), overwrite=True, ) ) else: daily_bar_writer = None minute_bar_writer = None asset_db_writer = None adjustment_db_writer = None if assets_versions: raise ValueError('Need to ingest a bundle that creates ' 'writers in order to downgrade the assets' ' db.') bundle.ingest( environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_db_writer, calendar, start_session, end_session, cache, show_progress, pth.data_path([name, timestr], environ=environ), ) for version in sorted(set(assets_versions), reverse=True): version_path = wd.getpath(*asset_db_relative( name, timestr, environ=environ, db_version=version, )) with working_file(version_path) as wf: shutil.copy2(assets_db_path, wf.path) downgrade(wf.path, version) def most_recent_data(bundle_name, timestamp, environ=None): if bundle_name not in bundles: raise UnknownBundle(bundle_name) try: candidates = os.listdir( pth.data_path([bundle_name], environ=environ), ) return pth.data_path( [bundle_name, max( filter(complement(pth.hidden), candidates), key=from_bundle_ingest_dirname, )], environ=environ, ) except (ValueError, OSError) as e: if getattr(e, 'errno', errno.ENOENT) != errno.ENOENT: raise raise ValueError( 'no data for bundle {bundle!r} on or before {timestamp}\n' 'maybe you need to run: $ zipline ingest -b {bundle}'.format( bundle=bundle_name, timestamp=timestamp, ), ) def load(name, environ=os.environ, timestamp=None): if timestamp is None: timestamp = pd.Timestamp.utcnow() timestr = most_recent_data(name, timestamp, environ=environ) return BundleData( asset_finder=AssetFinder( asset_db_path(name, timestr, environ=environ), ), equity_minute_bar_reader=BcolzMinuteBarReader( minute_equity_path(name, timestr, environ=environ), ), equity_daily_bar_reader=BcolzDailyBarReader( daily_equity_path(name, timestr, environ=environ), ), adjustment_reader=SQLiteAdjustmentReader( adjustment_db_path(name, timestr, environ=environ), ), ) @preprocess( before=optionally(ensure_timestamp), after=optionally(ensure_timestamp), ) def clean(name, before=None, after=None, keep_last=None, environ=os.environ): try: all_runs = sorted( filter( complement(pth.hidden), os.listdir(pth.data_path([name], environ=environ)), ), key=from_bundle_ingest_dirname, ) except OSError as e: if e.errno != errno.ENOENT: raise raise UnknownBundle(name) if ((before is not None or after is not None) and keep_last is not None): raise BadClean(before, after, keep_last) if keep_last is None: def should_clean(name): dt = from_bundle_ingest_dirname(name) return ( (before is not None and dt < before) or (after is not None and dt > after) ) elif keep_last >= 0: last_n_dts = set(take(keep_last, reversed(all_runs))) def should_clean(name): return name not in last_n_dts else: raise BadClean(before, after, keep_last) cleaned = set() for run in all_runs: if should_clean(run): path = pth.data_path([name, run], environ=environ) shutil.rmtree(path) cleaned.add(path) return cleaned return BundleCore(bundles, register, unregister, ingest, load, clean)
[ "Create a family of data bundle functions that read from the same\n bundle mapping.\n\n Returns\n -------\n bundles : mappingproxy\n The mapping of bundles to bundle payloads.\n register : callable\n The function which registers new bundles in the ``bundles`` mapping.\n unregister : callable\n The function which deregisters bundles from the ``bundles`` mapping.\n ingest : callable\n The function which downloads and write data for a given data bundle.\n load : callable\n The function which loads the ingested bundles back into memory.\n clean : callable\n The function which cleans up data written with ``ingest``.\n ", "Register a data bundle ingest function.\n\n Parameters\n ----------\n name : str\n The name of the bundle.\n f : callable\n The ingest function. This function will be passed:\n\n environ : mapping\n The environment this is being run with.\n asset_db_writer : AssetDBWriter\n The asset db writer to write into.\n minute_bar_writer : BcolzMinuteBarWriter\n The minute bar writer to write into.\n daily_bar_writer : BcolzDailyBarWriter\n The daily bar writer to write into.\n adjustment_writer : SQLiteAdjustmentWriter\n The adjustment db writer to write into.\n calendar : trading_calendars.TradingCalendar\n The trading calendar to ingest for.\n start_session : pd.Timestamp\n The first session of data to ingest.\n end_session : pd.Timestamp\n The last session of data to ingest.\n cache : DataFrameCache\n A mapping object to temporarily store dataframes.\n This should be used to cache intermediates in case the load\n fails. This will be automatically cleaned up after a\n successful load.\n show_progress : bool\n Show the progress for the current load where possible.\n calendar_name : str, optional\n The name of a calendar used to align bundle data.\n Default is 'NYSE'.\n start_session : pd.Timestamp, optional\n The first session for which we want data. If not provided,\n or if the date lies outside the range supported by the\n calendar, the first_session of the calendar is used.\n end_session : pd.Timestamp, optional\n The last session for which we want data. If not provided,\n or if the date lies outside the range supported by the\n calendar, the last_session of the calendar is used.\n minutes_per_day : int, optional\n The number of minutes in each normal trading day.\n create_writers : bool, optional\n Should the ingest machinery create the writers for the ingest\n function. This can be disabled as an optimization for cases where\n they are not needed, like the ``quantopian-quandl`` bundle.\n\n Notes\n -----\n This function my be used as a decorator, for example:\n\n .. code-block:: python\n\n @register('quandl')\n def quandl_ingest_function(...):\n ...\n\n See Also\n --------\n zipline.data.bundles.bundles\n ", "Unregister a bundle.\n\n Parameters\n ----------\n name : str\n The name of the bundle to unregister.\n\n Raises\n ------\n UnknownBundle\n Raised when no bundle has been registered with the given name.\n\n See Also\n --------\n zipline.data.bundles.bundles\n ", "Ingest data for a given bundle.\n\n Parameters\n ----------\n name : str\n The name of the bundle.\n environ : mapping, optional\n The environment variables. By default this is os.environ.\n timestamp : datetime, optional\n The timestamp to use for the load.\n By default this is the current time.\n assets_versions : Iterable[int], optional\n Versions of the assets db to which to downgrade.\n show_progress : bool, optional\n Tell the ingest function to display the progress where possible.\n ", "Get the path to the most recent data after ``date``for the\n given bundle.\n\n Parameters\n ----------\n bundle_name : str\n The name of the bundle to lookup.\n timestamp : datetime\n The timestamp to begin searching on or before.\n environ : dict, optional\n An environment dict to forward to zipline_root.\n ", "Loads a previously ingested bundle.\n\n Parameters\n ----------\n name : str\n The name of the bundle.\n environ : mapping, optional\n The environment variables. Defaults of os.environ.\n timestamp : datetime, optional\n The timestamp of the data to lookup.\n Defaults to the current time.\n\n Returns\n -------\n bundle_data : BundleData\n The raw data readers for this bundle.\n ", "Clean up data that was created with ``ingest`` or\n ``$ python -m zipline ingest``\n\n Parameters\n ----------\n name : str\n The name of the bundle to remove data for.\n before : datetime, optional\n Remove data ingested before this date.\n This argument is mutually exclusive with: keep_last\n after : datetime, optional\n Remove data ingested after this date.\n This argument is mutually exclusive with: keep_last\n keep_last : int, optional\n Remove all but the last ``keep_last`` ingestions.\n This argument is mutually exclusive with:\n before\n after\n environ : mapping, optional\n The environment variables. Defaults of os.environ.\n\n Returns\n -------\n cleaned : set[str]\n The names of the runs that were removed.\n\n Raises\n ------\n BadClean\n Raised when ``before`` and or ``after`` are passed with\n ``keep_last``. This is a subclass of ``ValueError``.\n " ]
Please provide a description of the function:def deprecated(msg=None, stacklevel=2): def deprecated_dec(fn): @wraps(fn) def wrapper(*args, **kwargs): warnings.warn( msg or "Function %s is deprecated." % fn.__name__, category=DeprecationWarning, stacklevel=stacklevel ) return fn(*args, **kwargs) return wrapper return deprecated_dec
[ "\n Used to mark a function as deprecated.\n\n Parameters\n ----------\n msg : str\n The message to display in the deprecation warning.\n stacklevel : int\n How far up the stack the warning needs to go, before\n showing the relevant calling lines.\n\n Examples\n --------\n @deprecated(msg='function_a is deprecated! Use function_b instead.')\n def function_a(*args, **kwargs):\n " ]
Please provide a description of the function:def load_pricing_adjustments(self, columns, dts, assets): out = [None] * len(columns) for i, column in enumerate(columns): adjs = {} for asset in assets: adjs.update(self._get_adjustments_in_range( asset, dts, column)) out[i] = adjs return out
[ "\n Returns\n -------\n adjustments : list[dict[int -> Adjustment]]\n A list, where each element corresponds to the `columns`, of\n mappings from index to adjustment objects to apply at that index.\n " ]
Please provide a description of the function:def _get_adjustments_in_range(self, asset, dts, field): sid = int(asset) start = normalize_date(dts[0]) end = normalize_date(dts[-1]) adjs = {} if field != 'volume': mergers = self._adjustments_reader.get_adjustments_for_sid( 'mergers', sid) for m in mergers: dt = m[0] if start < dt <= end: end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, m[1]) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] divs = self._adjustments_reader.get_adjustments_for_sid( 'dividends', sid) for d in divs: dt = d[0] if start < dt <= end: end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, d[1]) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] splits = self._adjustments_reader.get_adjustments_for_sid( 'splits', sid) for s in splits: dt = s[0] if start < dt <= end: if field == 'volume': ratio = 1.0 / s[1] else: ratio = s[1] end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, ratio) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] return adjs
[ "\n Get the Float64Multiply objects to pass to an AdjustedArrayWindow.\n\n For the use of AdjustedArrayWindow in the loader, which looks back\n from current simulation time back to a window of data the dictionary is\n structured with:\n - the key into the dictionary for adjustments is the location of the\n day from which the window is being viewed.\n - the start of all multiply objects is always 0 (in each window all\n adjustments are overlapping)\n - the end of the multiply object is the location before the calendar\n location of the adjustment action, making all days before the event\n adjusted.\n\n Parameters\n ----------\n asset : Asset\n The assets for which to get adjustments.\n dts : iterable of datetime64-like\n The dts for which adjustment data is needed.\n field : str\n OHLCV field for which to get the adjustments.\n\n Returns\n -------\n out : dict[loc -> Float64Multiply]\n The adjustments as a dict of loc -> Float64Multiply\n " ]
Please provide a description of the function:def get(self, end_ix): if self.most_recent_ix == end_ix: return self.current target = end_ix - self.cal_start - self.offset + 1 self.current = self.window.seek(target) self.most_recent_ix = end_ix return self.current
[ "\n Returns\n -------\n out : A np.ndarray of the equity pricing up to end_ix after adjustments\n and rounding have been applied.\n " ]
Please provide a description of the function:def _ensure_sliding_windows(self, assets, dts, field, is_perspective_after): end = dts[-1] size = len(dts) asset_windows = {} needed_assets = [] cal = self._calendar assets = self._asset_finder.retrieve_all(assets) end_ix = find_in_sorted_index(cal, end) for asset in assets: try: window = self._window_blocks[field].get( (asset, size, is_perspective_after), end) except KeyError: needed_assets.append(asset) else: if end_ix < window.most_recent_ix: # Window needs reset. Requested end index occurs before the # end index from the previous history call for this window. # Grab new window instead of rewinding adjustments. needed_assets.append(asset) else: asset_windows[asset] = window if needed_assets: offset = 0 start_ix = find_in_sorted_index(cal, dts[0]) prefetch_end_ix = min(end_ix + self._prefetch_length, len(cal) - 1) prefetch_end = cal[prefetch_end_ix] prefetch_dts = cal[start_ix:prefetch_end_ix + 1] if is_perspective_after: adj_end_ix = min(prefetch_end_ix + 1, len(cal) - 1) adj_dts = cal[start_ix:adj_end_ix + 1] else: adj_dts = prefetch_dts prefetch_len = len(prefetch_dts) array = self._array(prefetch_dts, needed_assets, field) if field == 'sid': window_type = Int64Window else: window_type = Float64Window view_kwargs = {} if field == 'volume': array = array.astype(float64_dtype) for i, asset in enumerate(needed_assets): adj_reader = None try: adj_reader = self._adjustment_readers[type(asset)] except KeyError: adj_reader = None if adj_reader is not None: adjs = adj_reader.load_pricing_adjustments( [field], adj_dts, [asset])[0] else: adjs = {} window = window_type( array[:, i].reshape(prefetch_len, 1), view_kwargs, adjs, offset, size, int(is_perspective_after), self._decimal_places_for_asset(asset, dts[-1]), ) sliding_window = SlidingWindow(window, size, start_ix, offset) asset_windows[asset] = sliding_window self._window_blocks[field].set( (asset, size, is_perspective_after), sliding_window, prefetch_end) return [asset_windows[asset] for asset in assets]
[ "\n Ensure that there is a Float64Multiply window for each asset that can\n provide data for the given parameters.\n If the corresponding window for the (assets, len(dts), field) does not\n exist, then create a new one.\n If a corresponding window does exist for (assets, len(dts), field), but\n can not provide data for the current dts range, then create a new\n one and replace the expired window.\n\n Parameters\n ----------\n assets : iterable of Assets\n The assets in the window\n dts : iterable of datetime64-like\n The datetimes for which to fetch data.\n Makes an assumption that all dts are present and contiguous,\n in the calendar.\n field : str\n The OHLCV field for which to retrieve data.\n is_perspective_after : bool\n see: `PricingHistoryLoader.history`\n\n Returns\n -------\n out : list of Float64Window with sufficient data so that each asset's\n window can provide `get` for the index corresponding with the last\n value in `dts`\n " ]
Please provide a description of the function:def history(self, assets, dts, field, is_perspective_after): block = self._ensure_sliding_windows(assets, dts, field, is_perspective_after) end_ix = self._calendar.searchsorted(dts[-1]) return concatenate( [window.get(end_ix) for window in block], axis=1, )
[ "\n A window of pricing data with adjustments applied assuming that the\n end of the window is the day before the current simulation time.\n\n Parameters\n ----------\n assets : iterable of Assets\n The assets in the window.\n dts : iterable of datetime64-like\n The datetimes for which to fetch data.\n Makes an assumption that all dts are present and contiguous,\n in the calendar.\n field : str\n The OHLCV field for which to retrieve data.\n is_perspective_after : bool\n True, if the window is being viewed immediately after the last dt\n in the sliding window.\n False, if the window is viewed on the last dt.\n\n This flag is used for handling the case where the last dt in the\n requested window immediately precedes a corporate action, e.g.:\n\n - is_perspective_after is True\n\n When the viewpoint is after the last dt in the window, as when a\n daily history window is accessed from a simulation that uses a\n minute data frequency, the history call to this loader will not\n include the current simulation dt. At that point in time, the raw\n data for the last day in the window will require adjustment, so the\n most recent adjustment with respect to the simulation time is\n applied to the last dt in the requested window.\n\n An example equity which has a 0.5 split ratio dated for 05-27,\n with the dts for a history call of 5 bars with a '1d' frequency at\n 05-27 9:31. Simulation frequency is 'minute'.\n\n (In this case this function is called with 4 daily dts, and the\n calling function is responsible for stitching back on the\n 'current' dt)\n\n | | | | | last dt | <-- viewer is here |\n | | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 |\n | raw | 10.10 | 10.20 | 10.30 | 10.40 | |\n | adj | 5.05 | 5.10 | 5.15 | 5.25 | |\n\n The adjustment is applied to the last dt, 05-26, and all previous\n dts.\n\n - is_perspective_after is False, daily\n\n When the viewpoint is the same point in time as the last dt in the\n window, as when a daily history window is accessed from a\n simulation that uses a daily data frequency, the history call will\n include the current dt. At that point in time, the raw data for the\n last day in the window will be post-adjustment, so no adjustment\n is applied to the last dt.\n\n An example equity which has a 0.5 split ratio dated for 05-27,\n with the dts for a history call of 5 bars with a '1d' frequency at\n 05-27 0:00. Simulation frequency is 'daily'.\n\n | | | | | | <-- viewer is here |\n | | | | | | last dt |\n | | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 |\n | raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 |\n | adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 |\n\n Adjustments are applied 05-23 through 05-26 but not to the last dt,\n 05-27\n\n Returns\n -------\n out : np.ndarray with shape(len(days between start, end), len(assets))\n " ]
Please provide a description of the function:def parse_date_str_series(format_str, tz, date_str_series, data_frequency, trading_day): # Explicitly ignoring this parameter. See note above. if format_str is not None: logger.warn( "The 'format_str' parameter to fetch_csv is deprecated. " "Ignoring and defaulting to pandas default date parsing." ) format_str = None tz_str = str(tz) if tz_str == pytz.utc.zone: parsed = pd.to_datetime( date_str_series.values, format=format_str, utc=True, errors='coerce', ) else: parsed = pd.to_datetime( date_str_series.values, format=format_str, errors='coerce', ).tz_localize(tz_str).tz_convert('UTC') if data_frequency == 'daily': parsed = roll_dts_to_midnight(parsed, trading_day) return parsed
[ "\n Efficient parsing for a 1d Pandas/numpy object containing string\n representations of dates.\n\n Note: pd.to_datetime is significantly faster when no format string is\n passed, and in pandas 0.12.0 the %p strptime directive is not correctly\n handled if a format string is explicitly passed, but AM/PM is handled\n properly if format=None.\n\n Moreover, we were previously ignoring this parameter unintentionally\n because we were incorrectly passing it as a positional. For all these\n reasons, we ignore the format_str parameter when parsing datetimes.\n " ]
Please provide a description of the function:def _lookup_unconflicted_symbol(self, symbol): try: uppered = symbol.upper() except AttributeError: # The mapping fails because symbol was a non-string return numpy.nan try: return self.finder.lookup_symbol( uppered, as_of_date=None, country_code=self.country_code, ) except MultipleSymbolsFound: # Fill conflicted entries with zeros to mark that they need to be # resolved by date. return 0 except SymbolNotFound: # Fill not found entries with nans. return numpy.nan
[ "\n Attempt to find a unique asset whose symbol is the given string.\n\n If multiple assets have held the given symbol, return a 0.\n\n If no asset has held the given symbol, return a NaN.\n " ]
Please provide a description of the function:def transform(self): algo = self.algo metrics_tracker = algo.metrics_tracker emission_rate = metrics_tracker.emission_rate def every_bar(dt_to_use, current_data=self.current_data, handle_data=algo.event_manager.handle_data): for capital_change in calculate_minute_capital_changes(dt_to_use): yield capital_change self.simulation_dt = dt_to_use # called every tick (minute or day). algo.on_dt_changed(dt_to_use) blotter = algo.blotter # handle any transactions and commissions coming out new orders # placed in the last bar new_transactions, new_commissions, closed_orders = \ blotter.get_transactions(current_data) blotter.prune_orders(closed_orders) for transaction in new_transactions: metrics_tracker.process_transaction(transaction) # since this order was modified, record it order = blotter.orders[transaction.order_id] metrics_tracker.process_order(order) for commission in new_commissions: metrics_tracker.process_commission(commission) handle_data(algo, current_data, dt_to_use) # grab any new orders from the blotter, then clear the list. # this includes cancelled orders. new_orders = blotter.new_orders blotter.new_orders = [] # if we have any new orders, record them so that we know # in what perf period they were placed. for new_order in new_orders: metrics_tracker.process_order(new_order) def once_a_day(midnight_dt, current_data=self.current_data, data_portal=self.data_portal): # process any capital changes that came overnight for capital_change in algo.calculate_capital_changes( midnight_dt, emission_rate=emission_rate, is_interday=True): yield capital_change # set all the timestamps self.simulation_dt = midnight_dt algo.on_dt_changed(midnight_dt) metrics_tracker.handle_market_open( midnight_dt, algo.data_portal, ) # handle any splits that impact any positions or any open orders. assets_we_care_about = ( viewkeys(metrics_tracker.positions) | viewkeys(algo.blotter.open_orders) ) if assets_we_care_about: splits = data_portal.get_splits(assets_we_care_about, midnight_dt) if splits: algo.blotter.process_splits(splits) metrics_tracker.handle_splits(splits) def on_exit(): # Remove references to algo, data portal, et al to break cycles # and ensure deterministic cleanup of these objects when the # simulation finishes. self.algo = None self.benchmark_source = self.current_data = self.data_portal = None with ExitStack() as stack: stack.callback(on_exit) stack.enter_context(self.processor) stack.enter_context(ZiplineAPI(self.algo)) if algo.data_frequency == 'minute': def execute_order_cancellation_policy(): algo.blotter.execute_cancel_policy(SESSION_END) def calculate_minute_capital_changes(dt): # process any capital changes that came between the last # and current minutes return algo.calculate_capital_changes( dt, emission_rate=emission_rate, is_interday=False) else: def execute_order_cancellation_policy(): pass def calculate_minute_capital_changes(dt): return [] for dt, action in self.clock: if action == BAR: for capital_change_packet in every_bar(dt): yield capital_change_packet elif action == SESSION_START: for capital_change_packet in once_a_day(dt): yield capital_change_packet elif action == SESSION_END: # End of the session. positions = metrics_tracker.positions position_assets = algo.asset_finder.retrieve_all(positions) self._cleanup_expired_assets(dt, position_assets) execute_order_cancellation_policy() algo.validate_account_controls() yield self._get_daily_message(dt, algo, metrics_tracker) elif action == BEFORE_TRADING_START_BAR: self.simulation_dt = dt algo.on_dt_changed(dt) algo.before_trading_start(self.current_data) elif action == MINUTE_END: minute_msg = self._get_minute_message( dt, algo, metrics_tracker, ) yield minute_msg risk_message = metrics_tracker.handle_simulation_end( self.data_portal, ) yield risk_message
[ "\n Main generator work loop.\n " ]
Please provide a description of the function:def _cleanup_expired_assets(self, dt, position_assets): algo = self.algo def past_auto_close_date(asset): acd = asset.auto_close_date return acd is not None and acd <= dt # Remove positions in any sids that have reached their auto_close date. assets_to_clear = \ [asset for asset in position_assets if past_auto_close_date(asset)] metrics_tracker = algo.metrics_tracker data_portal = self.data_portal for asset in assets_to_clear: metrics_tracker.process_close_position(asset, dt, data_portal) # Remove open orders for any sids that have reached their auto close # date. These orders get processed immediately because otherwise they # would not be processed until the first bar of the next day. blotter = algo.blotter assets_to_cancel = [ asset for asset in blotter.open_orders if past_auto_close_date(asset) ] for asset in assets_to_cancel: blotter.cancel_all_orders_for_asset(asset) # Make a copy here so that we are not modifying the list that is being # iterated over. for order in copy(blotter.new_orders): if order.status == ORDER_STATUS.CANCELLED: metrics_tracker.process_order(order) blotter.new_orders.remove(order)
[ "\n Clear out any assets that have expired before starting a new sim day.\n\n Performs two functions:\n\n 1. Finds all assets for which we have open orders and clears any\n orders whose assets are on or after their auto_close_date.\n\n 2. Finds all assets for which we have positions and generates\n close_position events for any assets that have reached their\n auto_close_date.\n " ]
Please provide a description of the function:def _get_daily_message(self, dt, algo, metrics_tracker): perf_message = metrics_tracker.handle_market_close( dt, self.data_portal, ) perf_message['daily_perf']['recorded_vars'] = algo.recorded_vars return perf_message
[ "\n Get a perf message for the given datetime.\n " ]
Please provide a description of the function:def _get_minute_message(self, dt, algo, metrics_tracker): rvars = algo.recorded_vars minute_message = metrics_tracker.handle_minute_close( dt, self.data_portal, ) minute_message['minute_perf']['recorded_vars'] = rvars return minute_message
[ "\n Get a perf message for the given datetime.\n " ]
Please provide a description of the function:def load_adjustments(self, dates, assets, should_include_splits, should_include_mergers, should_include_dividends, adjustment_type): return load_adjustments_from_sqlite( self.conn, dates, assets, should_include_splits, should_include_mergers, should_include_dividends, adjustment_type, )
[ "\n Load collection of Adjustment objects from underlying adjustments db.\n\n Parameters\n ----------\n dates : pd.DatetimeIndex\n Dates for which adjustments are needed.\n assets : pd.Int64Index\n Assets for which adjustments are needed.\n should_include_splits : bool\n Whether split adjustments should be included.\n should_include_mergers : bool\n Whether merger adjustments should be included.\n should_include_dividends : bool\n Whether dividend adjustments should be included.\n adjustment_type : str\n Whether price adjustments, volume adjustments, or both, should be\n included in the output.\n\n Returns\n -------\n adjustments : dict[str -> dict[int -> Adjustment]]\n A dictionary containing price and/or volume adjustment mappings\n from index to adjustment objects to apply at that index.\n " ]
Please provide a description of the function:def unpack_db_to_component_dfs(self, convert_dates=False): return { t_name: self.get_df_from_table(t_name, convert_dates) for t_name in self._datetime_int_cols }
[ "Returns the set of known tables in the adjustments file in DataFrame\n form.\n\n Parameters\n ----------\n convert_dates : bool, optional\n By default, dates are returned in seconds since EPOCH. If\n convert_dates is True, all ints in date columns will be converted\n to datetimes.\n\n Returns\n -------\n dfs : dict{str->DataFrame}\n Dictionary which maps table name to the corresponding DataFrame\n version of the table, where all date columns have been coerced back\n from int to datetime.\n " ]
Please provide a description of the function:def _df_dtypes(self, table_name, convert_dates): out = self._raw_table_dtypes[table_name] if convert_dates: out = out.copy() for date_column in self._datetime_int_cols[table_name]: out[date_column] = datetime64ns_dtype return out
[ "Get dtypes to use when unpacking sqlite tables as dataframes.\n " ]
Please provide a description of the function:def calc_dividend_ratios(self, dividends): if dividends is None or dividends.empty: return pd.DataFrame(np.array( [], dtype=[ ('sid', uint64_dtype), ('effective_date', uint32_dtype), ('ratio', float64_dtype), ], )) pricing_reader = self._equity_daily_bar_reader input_sids = dividends.sid.values unique_sids, sids_ix = np.unique(input_sids, return_inverse=True) dates = pricing_reader.sessions.values close, = pricing_reader.load_raw_arrays( ['close'], pd.Timestamp(dates[0], tz='UTC'), pd.Timestamp(dates[-1], tz='UTC'), unique_sids, ) date_ix = np.searchsorted(dates, dividends.ex_date.values) mask = date_ix > 0 date_ix = date_ix[mask] sids_ix = sids_ix[mask] input_dates = dividends.ex_date.values[mask] # subtract one day to get the close on the day prior to the merger previous_close = close[date_ix - 1, sids_ix] input_sids = input_sids[mask] amount = dividends.amount.values[mask] ratio = 1.0 - amount / previous_close non_nan_ratio_mask = ~np.isnan(ratio) for ix in np.flatnonzero(~non_nan_ratio_mask): log.warn( "Couldn't compute ratio for dividend" " sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}", sid=input_sids[ix], ex_date=pd.Timestamp(input_dates[ix]), amount=amount[ix], ) positive_ratio_mask = ratio > 0 for ix in np.flatnonzero(~positive_ratio_mask & non_nan_ratio_mask): log.warn( "Dividend ratio <= 0 for dividend" " sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}", sid=input_sids[ix], ex_date=pd.Timestamp(input_dates[ix]), amount=amount[ix], ) valid_ratio_mask = non_nan_ratio_mask & positive_ratio_mask return pd.DataFrame({ 'sid': input_sids[valid_ratio_mask], 'effective_date': input_dates[valid_ratio_mask], 'ratio': ratio[valid_ratio_mask], })
[ "\n Calculate the ratios to apply to equities when looking back at pricing\n history so that the price is smoothed over the ex_date, when the market\n adjusts to the change in equity value due to upcoming dividend.\n\n Returns\n -------\n DataFrame\n A frame in the same format as splits and mergers, with keys\n - sid, the id of the equity\n - effective_date, the date in seconds on which to apply the ratio.\n - ratio, the ratio to apply to backwards looking pricing data.\n " ]
Please provide a description of the function:def write_dividend_data(self, dividends, stock_dividends=None): # First write the dividend payouts. self._write_dividends(dividends) self._write_stock_dividends(stock_dividends) # Second from the dividend payouts, calculate ratios. dividend_ratios = self.calc_dividend_ratios(dividends) self.write_frame('dividends', dividend_ratios)
[ "\n Write both dividend payouts and the derived price adjustment ratios.\n " ]
Please provide a description of the function:def write(self, splits=None, mergers=None, dividends=None, stock_dividends=None): self.write_frame('splits', splits) self.write_frame('mergers', mergers) self.write_dividend_data(dividends, stock_dividends) # Use IF NOT EXISTS here to allow multiple writes if desired. self.conn.execute( "CREATE INDEX IF NOT EXISTS splits_sids " "ON splits(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS splits_effective_date " "ON splits(effective_date)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS mergers_sids " "ON mergers(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS mergers_effective_date " "ON mergers(effective_date)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS dividends_sid " "ON dividends(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS dividends_effective_date " "ON dividends(effective_date)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS dividend_payouts_sid " "ON dividend_payouts(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS dividends_payouts_ex_date " "ON dividend_payouts(ex_date)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS stock_dividend_payouts_sid " "ON stock_dividend_payouts(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS stock_dividends_payouts_ex_date " "ON stock_dividend_payouts(ex_date)" )
[ "\n Writes data to a SQLite file to be read by SQLiteAdjustmentReader.\n\n Parameters\n ----------\n splits : pandas.DataFrame, optional\n Dataframe containing split data. The format of this dataframe is:\n effective_date : int\n The date, represented as seconds since Unix epoch, on which\n the adjustment should be applied.\n ratio : float\n A value to apply to all data earlier than the effective date.\n For open, high, low, and close those values are multiplied by\n the ratio. Volume is divided by this value.\n sid : int\n The asset id associated with this adjustment.\n mergers : pandas.DataFrame, optional\n DataFrame containing merger data. The format of this dataframe is:\n effective_date : int\n The date, represented as seconds since Unix epoch, on which\n the adjustment should be applied.\n ratio : float\n A value to apply to all data earlier than the effective date.\n For open, high, low, and close those values are multiplied by\n the ratio. Volume is unaffected.\n sid : int\n The asset id associated with this adjustment.\n dividends : pandas.DataFrame, optional\n DataFrame containing dividend data. The format of the dataframe is:\n sid : int\n The asset id associated with this adjustment.\n ex_date : datetime64\n The date on which an equity must be held to be eligible to\n receive payment.\n declared_date : datetime64\n The date on which the dividend is announced to the public.\n pay_date : datetime64\n The date on which the dividend is distributed.\n record_date : datetime64\n The date on which the stock ownership is checked to determine\n distribution of dividends.\n amount : float\n The cash amount paid for each share.\n\n Dividend ratios are calculated as:\n ``1.0 - (dividend_value / \"close on day prior to ex_date\")``\n stock_dividends : pandas.DataFrame, optional\n DataFrame containing stock dividend data. The format of the\n dataframe is:\n sid : int\n The asset id associated with this adjustment.\n ex_date : datetime64\n The date on which an equity must be held to be eligible to\n receive payment.\n declared_date : datetime64\n The date on which the dividend is announced to the public.\n pay_date : datetime64\n The date on which the dividend is distributed.\n record_date : datetime64\n The date on which the stock ownership is checked to determine\n distribution of dividends.\n payment_sid : int\n The asset id of the shares that should be paid instead of\n cash.\n ratio : float\n The ratio of currently held shares in the held sid that\n should be paid with new shares of the payment_sid.\n\n See Also\n --------\n zipline.data.adjustments.SQLiteAdjustmentReader\n " ]
Please provide a description of the function:def compute(self, today, assets, out, *arrays): raise NotImplementedError( "{name} must define a compute method".format( name=type(self).__name__ ) )
[ "\n Override this method with a function that writes a value into `out`.\n " ]
Please provide a description of the function:def _allocate_output(self, windows, shape): missing_value = self.missing_value outputs = self.outputs if outputs is not NotSpecified: out = recarray( shape, formats=[self.dtype.str] * len(outputs), names=outputs, ) out[:] = missing_value else: out = full(shape, missing_value, dtype=self.dtype) return out
[ "\n Allocate an output array whose rows should be passed to `self.compute`.\n\n The resulting array must have a shape of ``shape``.\n\n If we have standard outputs (i.e. self.outputs is NotSpecified), the\n default is an empty ndarray whose dtype is ``self.dtype``.\n\n If we have an outputs tuple, the default is an empty recarray with\n ``self.outputs`` as field names. Each field will have dtype\n ``self.dtype``.\n\n This can be overridden to control the kind of array constructed\n (e.g. to produce a LabelArray instead of an ndarray).\n " ]
Please provide a description of the function:def _compute(self, windows, dates, assets, mask): format_inputs = self._format_inputs compute = self.compute params = self.params ndim = self.ndim shape = (len(mask), 1) if ndim == 1 else mask.shape out = self._allocate_output(windows, shape) with self.ctx: for idx, date in enumerate(dates): # Never apply a mask to 1D outputs. out_mask = array([True]) if ndim == 1 else mask[idx] # Mask our inputs as usual. inputs_mask = mask[idx] masked_assets = assets[inputs_mask] out_row = out[idx][out_mask] inputs = format_inputs(windows, inputs_mask) compute(date, masked_assets, out_row, *inputs, **params) out[idx][out_mask] = out_row return out
[ "\n Call the user's `compute` function on each window with a pre-built\n output array.\n " ]
Please provide a description of the function:def make_aliased_type(cls, other_base): docstring = dedent( ).format(t=other_base.__name__) doc = format_docstring( owner_name=other_base.__name__, docstring=docstring, formatters={'name': PIPELINE_ALIAS_NAME_DOC}, ) return type( 'Aliased' + other_base.__name__, (cls, other_base), {'__doc__': doc, '__module__': other_base.__module__}, )
[ "\n Factory for making Aliased{Filter,Factor,Classifier}.\n ", "\n A {t} that names another {t}.\n\n Parameters\n ----------\n term : {t}\n {{name}}\n " ]
Please provide a description of the function:def compute_extra_rows(self, all_dates, start_date, end_date, min_extra_rows): try: current_start_pos = all_dates.get_loc(start_date) - min_extra_rows if current_start_pos < 0: raise NoFurtherDataError.from_lookback_window( initial_message="Insufficient data to compute Pipeline:", first_date=all_dates[0], lookback_start=start_date, lookback_length=min_extra_rows, ) except KeyError: before, after = nearest_unequal_elements(all_dates, start_date) raise ValueError( "Pipeline start_date {start_date} is not in calendar.\n" "Latest date before start_date is {before}.\n" "Earliest date after start_date is {after}.".format( start_date=start_date, before=before, after=after, ) ) # Our possible target dates are all the dates on or before the current # starting position. # TODO: Consider bounding this below by self.window_length candidates = all_dates[:current_start_pos + 1] # Choose the latest date in the candidates that is the start of a new # period at our frequency. choices = select_sampling_indices(candidates, self._frequency) # If we have choices, the last choice is the first date if the # period containing current_start_date. Choose it. new_start_date = candidates[choices[-1]] # Add the difference between the new and old start dates to get the # number of rows for the new start_date. new_start_pos = all_dates.get_loc(new_start_date) assert new_start_pos <= current_start_pos, \ "Computed negative extra rows!" return min_extra_rows + (current_start_pos - new_start_pos)
[ "\n Ensure that min_extra_rows pushes us back to a computation date.\n\n Parameters\n ----------\n all_dates : pd.DatetimeIndex\n The trading sessions against which ``self`` will be computed.\n start_date : pd.Timestamp\n The first date for which final output is requested.\n end_date : pd.Timestamp\n The last date for which final output is requested.\n min_extra_rows : int\n The minimum number of extra rows required of ``self``, as\n determined by other terms that depend on ``self``.\n\n Returns\n -------\n extra_rows : int\n The number of extra rows to compute. This will be the minimum\n number of rows required to make our computed start_date fall on a\n recomputation date.\n " ]
Please provide a description of the function:def _compute(self, inputs, dates, assets, mask): to_sample = dates[select_sampling_indices(dates, self._frequency)] assert to_sample[0] == dates[0], \ "Misaligned sampling dates in %s." % type(self).__name__ real_compute = self._wrapped_term._compute # Inputs will contain different kinds of values depending on whether or # not we're a windowed computation. # If we're windowed, then `inputs` is a list of iterators of ndarrays. # If we're not windowed, then `inputs` is just a list of ndarrays. # There are two things we care about doing with the input: # 1. Preparing an input to be passed to our wrapped term. # 2. Skipping an input if we're going to use an already-computed row. # We perform these actions differently based on the expected kind of # input, and we encapsulate these actions with closures so that we # don't clutter the code below with lots of branching. if self.windowed: # If we're windowed, inputs are stateful AdjustedArrays. We don't # need to do any preparation before forwarding to real_compute, but # we need to call `next` on them if we want to skip an iteration. def prepare_inputs(): return inputs def skip_this_input(): for w in inputs: next(w) else: # If we're not windowed, inputs are just ndarrays. We need to # slice out a single row when forwarding to real_compute, but we # don't need to do anything to skip an input. def prepare_inputs(): # i is the loop iteration variable below. return [a[[i]] for a in inputs] def skip_this_input(): pass results = [] samples = iter(to_sample) next_sample = next(samples) for i, compute_date in enumerate(dates): if next_sample == compute_date: results.append( real_compute( prepare_inputs(), dates[i:i + 1], assets, mask[i:i + 1], ) ) try: next_sample = next(samples) except StopIteration: # No more samples to take. Set next_sample to Nat, which # compares False with any other datetime. next_sample = pd_NaT else: skip_this_input() # Copy results from previous sample period. results.append(results[-1]) # We should have exhausted our sample dates. try: next_sample = next(samples) except StopIteration: pass else: raise AssertionError("Unconsumed sample date: %s" % next_sample) # Concatenate stored results. return vstack(results)
[ "\n Compute by delegating to self._wrapped_term._compute on sample dates.\n\n On non-sample dates, forward-fill from previously-computed samples.\n " ]
Please provide a description of the function:def make_downsampled_type(cls, other_base): docstring = dedent( ).format(t=other_base.__name__) doc = format_docstring( owner_name=other_base.__name__, docstring=docstring, formatters={'frequency': PIPELINE_DOWNSAMPLING_FREQUENCY_DOC}, ) return type( 'Downsampled' + other_base.__name__, (cls, other_base,), {'__doc__': doc, '__module__': other_base.__module__}, )
[ "\n Factory for making Downsampled{Filter,Factor,Classifier}.\n ", "\n A {t} that defers to another {t} at lower-than-daily frequency.\n\n Parameters\n ----------\n term : {t}\n {{frequency}}\n " ]
Please provide a description of the function:def preprocess(*_unused, **processors): if _unused: raise TypeError("preprocess() doesn't accept positional arguments") def _decorator(f): args, varargs, varkw, defaults = argspec = getargspec(f) if defaults is None: defaults = () no_defaults = (NO_DEFAULT,) * (len(args) - len(defaults)) args_defaults = list(zip(args, no_defaults + defaults)) if varargs: args_defaults.append((varargs, NO_DEFAULT)) if varkw: args_defaults.append((varkw, NO_DEFAULT)) argset = set(args) | {varargs, varkw} - {None} # Arguments can be declared as tuples in Python 2. if not all(isinstance(arg, str) for arg in args): raise TypeError( "Can't validate functions using tuple unpacking: %s" % (argspec,) ) # Ensure that all processors map to valid names. bad_names = viewkeys(processors) - argset if bad_names: raise TypeError( "Got processors for unknown arguments: %s." % bad_names ) return _build_preprocessed_function( f, processors, args_defaults, varargs, varkw, ) return _decorator
[ "\n Decorator that applies pre-processors to the arguments of a function before\n calling the function.\n\n Parameters\n ----------\n **processors : dict\n Map from argument name -> processor function.\n\n A processor function takes three arguments: (func, argname, argvalue).\n\n `func` is the the function for which we're processing args.\n `argname` is the name of the argument we're processing.\n `argvalue` is the value of the argument we're processing.\n\n Examples\n --------\n >>> def _ensure_tuple(func, argname, arg):\n ... if isinstance(arg, tuple):\n ... return argvalue\n ... try:\n ... return tuple(arg)\n ... except TypeError:\n ... raise TypeError(\n ... \"%s() expected argument '%s' to\"\n ... \" be iterable, but got %s instead.\" % (\n ... func.__name__, argname, arg,\n ... )\n ... )\n ...\n >>> @preprocess(arg=_ensure_tuple)\n ... def foo(arg):\n ... return arg\n ...\n >>> foo([1, 2, 3])\n (1, 2, 3)\n >>> foo(\"a\")\n ('a',)\n >>> foo(2)\n Traceback (most recent call last):\n ...\n TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead.\n " ]
Please provide a description of the function:def call(f): @wraps(f) def processor(func, argname, arg): return f(arg) return processor
[ "\n Wrap a function in a processor that calls `f` on the argument before\n passing it along.\n\n Useful for creating simple arguments to the `@preprocess` decorator.\n\n Parameters\n ----------\n f : function\n Function accepting a single argument and returning a replacement.\n\n Examples\n --------\n >>> @preprocess(x=call(lambda x: x + 1))\n ... def foo(x):\n ... return x\n ...\n >>> foo(1)\n 2\n " ]
Please provide a description of the function:def _build_preprocessed_function(func, processors, args_defaults, varargs, varkw): format_kwargs = {'func_name': func.__name__} def mangle(name): return 'a' + uuid4().hex + name format_kwargs['mangled_func'] = mangled_funcname = mangle(func.__name__) def make_processor_assignment(arg, processor_name): template = "{arg} = {processor}({func}, '{arg}', {arg})" return template.format( arg=arg, processor=processor_name, func=mangled_funcname, ) exec_globals = {mangled_funcname: func, 'wraps': wraps} defaults_seen = 0 default_name_template = 'a' + uuid4().hex + '_%d' signature = [] call_args = [] assignments = [] star_map = { varargs: '*', varkw: '**', } def name_as_arg(arg): return star_map.get(arg, '') + arg for arg, default in args_defaults: if default is NO_DEFAULT: signature.append(name_as_arg(arg)) else: default_name = default_name_template % defaults_seen exec_globals[default_name] = default signature.append('='.join([name_as_arg(arg), default_name])) defaults_seen += 1 if arg in processors: procname = mangle('_processor_' + arg) exec_globals[procname] = processors[arg] assignments.append(make_processor_assignment(arg, procname)) call_args.append(name_as_arg(arg)) exec_str = dedent( ).format( func_name=func.__name__, signature=', '.join(signature), assignments='\n '.join(assignments), wrapped_funcname=mangled_funcname, call_args=', '.join(call_args), ) compiled = compile( exec_str, func.__code__.co_filename, mode='exec', ) exec_locals = {} exec_(compiled, exec_globals, exec_locals) new_func = exec_locals[func.__name__] code = new_func.__code__ args = { attr: getattr(code, attr) for attr in dir(code) if attr.startswith('co_') } # Copy the firstlineno out of the underlying function so that exceptions # get raised with the correct traceback. # This also makes dynamic source inspection (like IPython `??` operator) # work as intended. try: # Try to get the pycode object from the underlying function. original_code = func.__code__ except AttributeError: try: # The underlying callable was not a function, try to grab the # `__func__.__code__` which exists on method objects. original_code = func.__func__.__code__ except AttributeError: # The underlying callable does not have a `__code__`. There is # nothing for us to correct. return new_func args['co_firstlineno'] = original_code.co_firstlineno new_func.__code__ = CodeType(*map(getitem(args), _code_argorder)) return new_func
[ "\n Build a preprocessed function with the same signature as `func`.\n\n Uses `exec` internally to build a function that actually has the same\n signature as `func.\n ", "\\\n @wraps({wrapped_funcname})\n def {func_name}({signature}):\n {assignments}\n return {wrapped_funcname}({call_args})\n " ]
Please provide a description of the function:def get_benchmark_returns(symbol): r = requests.get( 'https://api.iextrading.com/1.0/stock/{}/chart/5y'.format(symbol) ) data = r.json() df = pd.DataFrame(data) df.index = pd.DatetimeIndex(df['date']) df = df['close'] return df.sort_index().tz_localize('UTC').pct_change(1).iloc[1:]
[ "\n Get a Series of benchmark returns from IEX associated with `symbol`.\n Default is `SPY`.\n\n Parameters\n ----------\n symbol : str\n Benchmark symbol for which we're getting the returns.\n\n The data is provided by IEX (https://iextrading.com/), and we can\n get up to 5 years worth of data.\n " ]
Please provide a description of the function:def delimit(delimiters, content): if len(delimiters) != 2: raise ValueError( "`delimiters` must be of length 2. Got %r" % delimiters ) return ''.join([delimiters[0], content, delimiters[1]])
[ "\n Surround `content` with the first and last characters of `delimiters`.\n\n >>> delimit('[]', \"foo\") # doctest: +SKIP\n '[foo]'\n >>> delimit('\"\"', \"foo\") # doctest: +SKIP\n '\"foo\"'\n " ]
Please provide a description of the function:def roots(g): "Get nodes from graph G with indegree 0" return set(n for n, d in iteritems(g.in_degree()) if d == 0)
[]
Please provide a description of the function:def _render(g, out, format_, include_asset_exists=False): graph_attrs = {'rankdir': 'TB', 'splines': 'ortho'} cluster_attrs = {'style': 'filled', 'color': 'lightgoldenrod1'} in_nodes = g.loadable_terms out_nodes = list(g.outputs.values()) f = BytesIO() with graph(f, "G", **graph_attrs): # Write outputs cluster. with cluster(f, 'Output', labelloc='b', **cluster_attrs): for term in filter_nodes(include_asset_exists, out_nodes): add_term_node(f, term) # Write inputs cluster. with cluster(f, 'Input', **cluster_attrs): for term in filter_nodes(include_asset_exists, in_nodes): add_term_node(f, term) # Write intermediate results. for term in filter_nodes(include_asset_exists, topological_sort(g.graph)): if term in in_nodes or term in out_nodes: continue add_term_node(f, term) # Write edges for source, dest in g.graph.edges(): if source is AssetExists() and not include_asset_exists: continue add_edge(f, id(source), id(dest)) cmd = ['dot', '-T', format_] try: proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) except OSError as e: if e.errno == errno.ENOENT: raise RuntimeError( "Couldn't find `dot` graph layout program. " "Make sure Graphviz is installed and `dot` is on your path." ) else: raise f.seek(0) proc_stdout, proc_stderr = proc.communicate(f.read()) if proc_stderr: raise RuntimeError( "Error(s) while rendering graph: %s" % proc_stderr.decode('utf-8') ) out.write(proc_stdout)
[ "\n Draw `g` as a graph to `out`, in format `format`.\n\n Parameters\n ----------\n g : zipline.pipeline.graph.TermGraph\n Graph to render.\n out : file-like object\n format_ : str {'png', 'svg'}\n Output format.\n include_asset_exists : bool\n Whether to filter out `AssetExists()` nodes.\n " ]
Please provide a description of the function:def display_graph(g, format='svg', include_asset_exists=False): try: import IPython.display as display except ImportError: raise NoIPython("IPython is not installed. Can't display graph.") if format == 'svg': display_cls = display.SVG elif format in ("jpeg", "png"): display_cls = partial(display.Image, format=format, embed=True) out = BytesIO() _render(g, out, format, include_asset_exists=include_asset_exists) return display_cls(data=out.getvalue())
[ "\n Display a TermGraph interactively from within IPython.\n " ]
Please provide a description of the function:def format_attrs(attrs): if not attrs: return '' entries = ['='.join((key, value)) for key, value in iteritems(attrs)] return '[' + ', '.join(entries) + ']'
[ "\n Format key, value pairs from attrs into graphviz attrs format\n\n Examples\n --------\n >>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP\n '[key1=value1, key2=value2]'\n " ]
Please provide a description of the function:def apply_async(f, args=(), kwargs=None, callback=None): try: value = (identity if callback is None else callback)( f(*args, **kwargs or {}), ) successful = True except Exception as e: value = e successful = False return ApplyAsyncResult(value, successful)
[ "Apply a function but emulate the API of an asynchronous call.\n\n Parameters\n ----------\n f : callable\n The function to call.\n args : tuple, optional\n The positional arguments.\n kwargs : dict, optional\n The keyword arguments.\n\n Returns\n -------\n future : ApplyAsyncResult\n The result of calling the function boxed in a future-like api.\n\n Notes\n -----\n This calls the function eagerly but wraps it so that ``SequentialPool``\n can be used where a :class:`multiprocessing.Pool` or\n :class:`gevent.pool.Pool` would be used.\n " ]
Please provide a description of the function:def maybe_show_progress(it, show_progress, **kwargs): if show_progress: return click.progressbar(it, **kwargs) # context manager that just return `it` when we enter it return CallbackManager(lambda it=it: it)
[ "Optionally show a progress bar for the given iterator.\n\n Parameters\n ----------\n it : iterable\n The underlying iterator.\n show_progress : bool\n Should progress be shown.\n **kwargs\n Forwarded to the click progress bar.\n\n Returns\n -------\n itercontext : context manager\n A context manager whose enter is the actual iterator to use.\n\n Examples\n --------\n .. code-block:: python\n\n with maybe_show_progress([1, 2, 3], True) as ns:\n for n in ns:\n ...\n " ]
Please provide a description of the function:def main(extension, strict_extensions, default_extension, x): # install a logbook handler before performing any other operations logbook.StderrHandler().push_application() create_args(x, zipline.extension_args) load_extensions( default_extension, extension, strict_extensions, os.environ, )
[ "Top level zipline entry point.\n " ]
Please provide a description of the function:def ipython_only(option): if __IPYTHON__: return option argname = extract_option_object(option).name def d(f): @wraps(f) def _(*args, **kwargs): kwargs[argname] = None return f(*args, **kwargs) return _ return d
[ "Mark that an option should only be exposed in IPython.\n\n Parameters\n ----------\n option : decorator\n A click.option decorator.\n\n Returns\n -------\n ipython_only_dec : decorator\n A decorator that correctly applies the argument even when not\n using IPython mode.\n " ]
Please provide a description of the function:def run(ctx, algofile, algotext, define, data_frequency, capital_base, bundle, bundle_timestamp, start, end, output, trading_calendar, print_algo, metrics_set, local_namespace, blotter): # check that the start and end dates are passed correctly if start is None and end is None: # check both at the same time to avoid the case where a user # does not pass either of these and then passes the first only # to be told they need to pass the second argument also ctx.fail( "must specify dates with '-s' / '--start' and '-e' / '--end'", ) if start is None: ctx.fail("must specify a start date with '-s' / '--start'") if end is None: ctx.fail("must specify an end date with '-e' / '--end'") if (algotext is not None) == (algofile is not None): ctx.fail( "must specify exactly one of '-f' / '--algofile' or" " '-t' / '--algotext'", ) trading_calendar = get_calendar(trading_calendar) perf = _run( initialize=None, handle_data=None, before_trading_start=None, analyze=None, algofile=algofile, algotext=algotext, defines=define, data_frequency=data_frequency, capital_base=capital_base, bundle=bundle, bundle_timestamp=bundle_timestamp, start=start, end=end, output=output, trading_calendar=trading_calendar, print_algo=print_algo, metrics_set=metrics_set, local_namespace=local_namespace, environ=os.environ, blotter=blotter, benchmark_returns=None, ) if output == '-': click.echo(str(perf)) elif output != os.devnull: # make the zipline magic not write any data perf.to_pickle(output) return perf
[ "Run a backtest for the given algorithm.\n " ]
Please provide a description of the function:def zipline_magic(line, cell=None): load_extensions( default=True, extensions=[], strict=True, environ=os.environ, ) try: return run.main( # put our overrides at the start of the parameter list so that # users may pass values with higher precedence [ '--algotext', cell, '--output', os.devnull, # don't write the results by default ] + ([ # these options are set when running in line magic mode # set a non None algo text to use the ipython user_ns '--algotext', '', '--local-namespace', ] if cell is None else []) + line.split(), '%s%%zipline' % ((cell or '') and '%'), # don't use system exit and propogate errors to the caller standalone_mode=False, ) except SystemExit as e: # https://github.com/mitsuhiko/click/pull/533 # even in standalone_mode=False `--help` really wants to kill us ;_; if e.code: raise ValueError('main returned non-zero status code: %d' % e.code)
[ "The zipline IPython cell magic.\n " ]
Please provide a description of the function:def ingest(bundle, assets_version, show_progress): bundles_module.ingest( bundle, os.environ, pd.Timestamp.utcnow(), assets_version, show_progress, )
[ "Ingest the data for the given bundle.\n " ]
Please provide a description of the function:def clean(bundle, before, after, keep_last): bundles_module.clean( bundle, before, after, keep_last, )
[ "Clean up data downloaded with the ingest command.\n " ]
Please provide a description of the function:def bundles(): for bundle in sorted(bundles_module.bundles.keys()): if bundle.startswith('.'): # hide the test data continue try: ingestions = list( map(text_type, bundles_module.ingestions_for_bundle(bundle)) ) except OSError as e: if e.errno != errno.ENOENT: raise ingestions = [] # If we got no ingestions, either because the directory didn't exist or # because there were no entries, print a single message indicating that # no ingestions have yet been made. for timestamp in ingestions or ["<no ingestions>"]: click.echo("%s %s" % (bundle, timestamp))
[ "List all of the available data bundles.\n " ]
Please provide a description of the function:def binary_operator(op): # When combining a Filter with a NumericalExpression, we use this # attrgetter instance to defer to the commuted interpretation of the # NumericalExpression operator. commuted_method_getter = attrgetter(method_name_for_op(op, commute=True)) def binary_operator(self, other): if isinstance(self, NumericalExpression): self_expr, other_expr, new_inputs = self.build_binary_op( op, other, ) return NumExprFilter.create( "({left}) {op} ({right})".format( left=self_expr, op=op, right=other_expr, ), new_inputs, ) elif isinstance(other, NumericalExpression): # NumericalExpression overrides numerical ops to correctly handle # merging of inputs. Look up and call the appropriate # right-binding operator with ourself as the input. return commuted_method_getter(other)(self) elif isinstance(other, Term): if other.dtype != bool_dtype: raise BadBinaryOperator(op, self, other) if self is other: return NumExprFilter.create( "x_0 {op} x_0".format(op=op), (self,), ) return NumExprFilter.create( "x_0 {op} x_1".format(op=op), (self, other), ) elif isinstance(other, int): # Note that this is true for bool as well return NumExprFilter.create( "x_0 {op} {constant}".format(op=op, constant=int(other)), binds=(self,), ) raise BadBinaryOperator(op, self, other) binary_operator.__doc__ = "Binary Operator: '%s'" % op return binary_operator
[ "\n Factory function for making binary operator methods on a Filter subclass.\n\n Returns a function \"binary_operator\" suitable for implementing functions\n like __and__ or __or__.\n " ]
Please provide a description of the function:def unary_operator(op): valid_ops = {'~'} if op not in valid_ops: raise ValueError("Invalid unary operator %s." % op) def unary_operator(self): # This can't be hoisted up a scope because the types returned by # unary_op_return_type aren't defined when the top-level function is # invoked. if isinstance(self, NumericalExpression): return NumExprFilter.create( "{op}({expr})".format(op=op, expr=self._expr), self.inputs, ) else: return NumExprFilter.create("{op}x_0".format(op=op), (self,)) unary_operator.__doc__ = "Unary Operator: '%s'" % op return unary_operator
[ "\n Factory function for making unary operator methods for Filters.\n " ]
Please provide a description of the function:def create(cls, expr, binds): return cls(expr=expr, binds=binds, dtype=bool_dtype)
[ "\n Helper for creating new NumExprFactors.\n\n This is just a wrapper around NumericalExpression.__new__ that always\n forwards `bool` as the dtype, since Filters can only be of boolean\n dtype.\n " ]
Please provide a description of the function:def _compute(self, arrays, dates, assets, mask): return super(NumExprFilter, self)._compute( arrays, dates, assets, mask, ) & mask
[ "\n Compute our result with numexpr, then re-apply `mask`.\n " ]
Please provide a description of the function:def _validate(self): if not 0.0 <= self._min_percentile < self._max_percentile <= 100.0: raise BadPercentileBounds( min_percentile=self._min_percentile, max_percentile=self._max_percentile, upper_bound=100.0 ) return super(PercentileFilter, self)._validate()
[ "\n Ensure that our percentile bounds are well-formed.\n " ]
Please provide a description of the function:def _compute(self, arrays, dates, assets, mask): # TODO: Review whether there's a better way of handling small numbers # of columns. data = arrays[0].copy().astype(float64) data[~mask] = nan # FIXME: np.nanpercentile **should** support computing multiple bounds # at once, but there's a bug in the logic for multiple bounds in numpy # 1.9.2. It will be fixed in 1.10. # c.f. https://github.com/numpy/numpy/pull/5981 lower_bounds = nanpercentile( data, self._min_percentile, axis=1, keepdims=True, ) upper_bounds = nanpercentile( data, self._max_percentile, axis=1, keepdims=True, ) return (lower_bounds <= data) & (data <= upper_bounds)
[ "\n For each row in the input, compute a mask of all values falling between\n the given percentiles.\n " ]
Please provide a description of the function:def parse_treasury_csv_column(column): column_re = re.compile( r"^(?P<prefix>RIFLGFC)" "(?P<unit>[YM])" "(?P<periods>[0-9]{2})" "(?P<suffix>_N.B)$" ) match = column_re.match(column) if match is None: raise ValueError("Couldn't parse CSV column %r." % column) unit, periods = get_unit_and_periods(match.groupdict()) # Roundtrip through int to coerce '06' into '6'. return str(int(periods)) + ('year' if unit == 'Y' else 'month')
[ "\n Parse a treasury CSV column into a more human-readable format.\n\n Columns start with 'RIFLGFC', followed by Y or M (year or month), followed\n by a two-digit number signifying number of years/months, followed by _N.B.\n We only care about the middle two entries, which we turn into a string like\n 3month or 30year.\n " ]
Please provide a description of the function:def get_daily_10yr_treasury_data(): url = "https://www.federalreserve.gov/datadownload/Output.aspx?rel=H15" \ "&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=" \ "&filetype=csv&label=include&layout=seriescolumn" return pd.read_csv(url, header=5, index_col=0, names=['DATE', 'BC_10YEAR'], parse_dates=True, converters={1: dataconverter}, squeeze=True)
[ "Download daily 10 year treasury rates from the Federal Reserve and\n return a pandas.Series." ]
Please provide a description of the function:def _sid_subdir_path(sid): padded_sid = format(sid, '06') return os.path.join( # subdir 1 00/XX padded_sid[0:2], # subdir 2 XX/00 padded_sid[2:4], "{0}.bcolz".format(str(padded_sid)) )
[ "\n Format subdir path to limit the number directories in any given\n subdirectory to 100.\n\n The number in each directory is designed to support at least 100000\n equities.\n\n Parameters\n ----------\n sid : int\n Asset identifier.\n\n Returns\n -------\n out : string\n A path for the bcolz rootdir, including subdirectory prefixes based on\n the padded string representation of the given sid.\n\n e.g. 1 is formatted as 00/00/000001.bcolz\n " ]
Please provide a description of the function:def convert_cols(cols, scale_factor, sid, invalid_data_behavior): scaled_opens = (np.nan_to_num(cols['open']) * scale_factor).round() scaled_highs = (np.nan_to_num(cols['high']) * scale_factor).round() scaled_lows = (np.nan_to_num(cols['low']) * scale_factor).round() scaled_closes = (np.nan_to_num(cols['close']) * scale_factor).round() exclude_mask = np.zeros_like(scaled_opens, dtype=bool) for col_name, scaled_col in [ ('open', scaled_opens), ('high', scaled_highs), ('low', scaled_lows), ('close', scaled_closes), ]: max_val = scaled_col.max() try: check_uint32_safe(max_val, col_name) except ValueError: if invalid_data_behavior == 'raise': raise if invalid_data_behavior == 'warn': logger.warn( 'Values for sid={}, col={} contain some too large for ' 'uint32 (max={}), filtering them out', sid, col_name, max_val, ) # We want to exclude all rows that have an unsafe value in # this column. exclude_mask &= (scaled_col >= np.iinfo(np.uint32).max) # Convert all cols to uint32. opens = scaled_opens.astype(np.uint32) highs = scaled_highs.astype(np.uint32) lows = scaled_lows.astype(np.uint32) closes = scaled_closes.astype(np.uint32) volumes = cols['volume'].astype(np.uint32) # Exclude rows with unsafe values by setting to zero. opens[exclude_mask] = 0 highs[exclude_mask] = 0 lows[exclude_mask] = 0 closes[exclude_mask] = 0 volumes[exclude_mask] = 0 return opens, highs, lows, closes, volumes
[ "Adapt OHLCV columns into uint32 columns.\n\n Parameters\n ----------\n cols : dict\n A dict mapping each column name (open, high, low, close, volume)\n to a float column to convert to uint32.\n scale_factor : int\n Factor to use to scale float values before converting to uint32.\n sid : int\n Sid of the relevant asset, for logging.\n invalid_data_behavior : str\n Specifies behavior when data cannot be converted to uint32.\n If 'raise', raises an exception.\n If 'warn', logs a warning and filters out incompatible values.\n If 'ignore', silently filters out incompatible values.\n " ]
Please provide a description of the function:def write(self, rootdir): calendar = self.calendar slicer = calendar.schedule.index.slice_indexer( self.start_session, self.end_session, ) schedule = calendar.schedule[slicer] market_opens = schedule.market_open market_closes = schedule.market_close metadata = { 'version': self.version, 'ohlc_ratio': self.default_ohlc_ratio, 'ohlc_ratios_per_sid': self.ohlc_ratios_per_sid, 'minutes_per_day': self.minutes_per_day, 'calendar_name': self.calendar.name, 'start_session': str(self.start_session.date()), 'end_session': str(self.end_session.date()), # Write these values for backwards compatibility 'first_trading_day': str(self.start_session.date()), 'market_opens': ( market_opens.values.astype('datetime64[m]'). astype(np.int64).tolist()), 'market_closes': ( market_closes.values.astype('datetime64[m]'). astype(np.int64).tolist()), } with open(self.metadata_path(rootdir), 'w+') as fp: json.dump(metadata, fp)
[ "\n Write the metadata to a JSON file in the rootdir.\n\n Values contained in the metadata are:\n\n version : int\n The value of FORMAT_VERSION of this class.\n ohlc_ratio : int\n The default ratio by which to multiply the pricing data to\n convert the floats from floats to an integer to fit within\n the np.uint32. If ohlc_ratios_per_sid is None or does not\n contain a mapping for a given sid, this ratio is used.\n ohlc_ratios_per_sid : dict\n A dict mapping each sid in the output to the factor by\n which the pricing data is multiplied so that the float data\n can be stored as an integer.\n minutes_per_day : int\n The number of minutes per each period.\n calendar_name : str\n The name of the TradingCalendar on which the minute bars are\n based.\n start_session : datetime\n 'YYYY-MM-DD' formatted representation of the first trading\n session in the data set.\n end_session : datetime\n 'YYYY-MM-DD' formatted representation of the last trading\n session in the data set.\n\n Deprecated, but included for backwards compatibility:\n\n first_trading_day : string\n 'YYYY-MM-DD' formatted representation of the first trading day\n available in the dataset.\n market_opens : list\n List of int64 values representing UTC market opens as\n minutes since epoch.\n market_closes : list\n List of int64 values representing UTC market closes as\n minutes since epoch.\n " ]
Please provide a description of the function:def open(cls, rootdir, end_session=None): metadata = BcolzMinuteBarMetadata.read(rootdir) return BcolzMinuteBarWriter( rootdir, metadata.calendar, metadata.start_session, end_session if end_session is not None else metadata.end_session, metadata.minutes_per_day, metadata.default_ohlc_ratio, metadata.ohlc_ratios_per_sid, write_metadata=end_session is not None )
[ "\n Open an existing ``rootdir`` for writing.\n\n Parameters\n ----------\n end_session : Timestamp (optional)\n When appending, the intended new ``end_session``.\n " ]
Please provide a description of the function:def sidpath(self, sid): sid_subdir = _sid_subdir_path(sid) return join(self._rootdir, sid_subdir)
[ "\n Parameters\n ----------\n sid : int\n Asset identifier.\n\n Returns\n -------\n out : string\n Full path to the bcolz rootdir for the given sid.\n " ]
Please provide a description of the function:def last_date_in_output_for_sid(self, sid): sizes_path = "{0}/close/meta/sizes".format(self.sidpath(sid)) if not os.path.exists(sizes_path): return pd.NaT with open(sizes_path, mode='r') as f: sizes = f.read() data = json.loads(sizes) # use integer division so that the result is an int # for pandas index later https://github.com/pandas-dev/pandas/blob/master/pandas/tseries/base.py#L247 # noqa num_days = data['shape'][0] // self._minutes_per_day if num_days == 0: # empty container return pd.NaT return self._session_labels[num_days - 1]
[ "\n Parameters\n ----------\n sid : int\n Asset identifier.\n\n Returns\n -------\n out : pd.Timestamp\n The midnight of the last date written in to the output for the\n given sid.\n " ]
Please provide a description of the function:def _init_ctable(self, path): # Only create the containing subdir on creation. # This is not to be confused with the `.bcolz` directory, but is the # directory up one level from the `.bcolz` directories. sid_containing_dirname = os.path.dirname(path) if not os.path.exists(sid_containing_dirname): # Other sids may have already created the containing directory. os.makedirs(sid_containing_dirname) initial_array = np.empty(0, np.uint32) table = ctable( rootdir=path, columns=[ initial_array, initial_array, initial_array, initial_array, initial_array, ], names=[ 'open', 'high', 'low', 'close', 'volume' ], expectedlen=self._expectedlen, mode='w', ) table.flush() return table
[ "\n Create empty ctable for given path.\n\n Parameters\n ----------\n path : string\n The path to rootdir of the new ctable.\n " ]
Please provide a description of the function:def _ensure_ctable(self, sid): sidpath = self.sidpath(sid) if not os.path.exists(sidpath): return self._init_ctable(sidpath) return bcolz.ctable(rootdir=sidpath, mode='a')
[ "Ensure that a ctable exists for ``sid``, then return it." ]
Please provide a description of the function:def pad(self, sid, date): table = self._ensure_ctable(sid) last_date = self.last_date_in_output_for_sid(sid) tds = self._session_labels if date <= last_date or date < tds[0]: # No need to pad. return if last_date == pd.NaT: # If there is no data, determine how many days to add so that # desired days are written to the correct slots. days_to_zerofill = tds[tds.slice_indexer(end=date)] else: days_to_zerofill = tds[tds.slice_indexer( start=last_date + tds.freq, end=date)] self._zerofill(table, len(days_to_zerofill)) new_last_date = self.last_date_in_output_for_sid(sid) assert new_last_date == date, "new_last_date={0} != date={1}".format( new_last_date, date)
[ "\n Fill sid container with empty data through the specified date.\n\n If the last recorded trade is not at the close, then that day will be\n padded with zeros until its close. Any day after that (up to and\n including the specified date) will be padded with `minute_per_day`\n worth of zeros\n\n Parameters\n ----------\n sid : int\n The asset identifier for the data being written.\n date : datetime-like\n The date used to calculate how many slots to be pad.\n The padding is done through the date, i.e. after the padding is\n done the `last_date_in_output_for_sid` will be equal to `date`\n " ]
Please provide a description of the function:def set_sid_attrs(self, sid, **kwargs): table = self._ensure_ctable(sid) for k, v in kwargs.items(): table.attrs[k] = v
[ "Write all the supplied kwargs as attributes of the sid's file.\n " ]
Please provide a description of the function:def write(self, data, show_progress=False, invalid_data_behavior='warn'): ctx = maybe_show_progress( data, show_progress=show_progress, item_show_func=lambda e: e if e is None else str(e[0]), label="Merging minute equity files:", ) write_sid = self.write_sid with ctx as it: for e in it: write_sid(*e, invalid_data_behavior=invalid_data_behavior)
[ "Write a stream of minute data.\n\n Parameters\n ----------\n data : iterable[(int, pd.DataFrame)]\n The data to write. Each element should be a tuple of sid, data\n where data has the following format:\n columns : ('open', 'high', 'low', 'close', 'volume')\n open : float64\n high : float64\n low : float64\n close : float64\n volume : float64|int64\n index : DatetimeIndex of market minutes.\n A given sid may appear more than once in ``data``; however,\n the dates must be strictly increasing.\n show_progress : bool, optional\n Whether or not to show a progress bar while writing.\n " ]
Please provide a description of the function:def write_sid(self, sid, df, invalid_data_behavior='warn'): cols = { 'open': df.open.values, 'high': df.high.values, 'low': df.low.values, 'close': df.close.values, 'volume': df.volume.values, } dts = df.index.values # Call internal method, since DataFrame has already ensured matching # index and value lengths. self._write_cols(sid, dts, cols, invalid_data_behavior)
[ "\n Write the OHLCV data for the given sid.\n If there is no bcolz ctable yet created for the sid, create it.\n If the length of the bcolz ctable is not exactly to the date before\n the first day provided, fill the ctable with 0s up to that date.\n\n Parameters\n ----------\n sid : int\n The asset identifer for the data being written.\n df : pd.DataFrame\n DataFrame of market data with the following characteristics.\n columns : ('open', 'high', 'low', 'close', 'volume')\n open : float64\n high : float64\n low : float64\n close : float64\n volume : float64|int64\n index : DatetimeIndex of market minutes.\n " ]
Please provide a description of the function:def write_cols(self, sid, dts, cols, invalid_data_behavior='warn'): if not all(len(dts) == len(cols[name]) for name in self.COL_NAMES): raise BcolzMinuteWriterColumnMismatch( "Length of dts={0} should match cols: {1}".format( len(dts), " ".join("{0}={1}".format(name, len(cols[name])) for name in self.COL_NAMES))) self._write_cols(sid, dts, cols, invalid_data_behavior)
[ "\n Write the OHLCV data for the given sid.\n If there is no bcolz ctable yet created for the sid, create it.\n If the length of the bcolz ctable is not exactly to the date before\n the first day provided, fill the ctable with 0s up to that date.\n\n Parameters\n ----------\n sid : int\n The asset identifier for the data being written.\n dts : datetime64 array\n The dts corresponding to values in cols.\n cols : dict of str -> np.array\n dict of market data with the following characteristics.\n keys are ('open', 'high', 'low', 'close', 'volume')\n open : float64\n high : float64\n low : float64\n close : float64\n volume : float64|int64\n " ]
Please provide a description of the function:def _write_cols(self, sid, dts, cols, invalid_data_behavior): table = self._ensure_ctable(sid) tds = self._session_labels input_first_day = self._calendar.minute_to_session_label( pd.Timestamp(dts[0]), direction='previous') last_date = self.last_date_in_output_for_sid(sid) day_before_input = input_first_day - tds.freq self.pad(sid, day_before_input) table = self._ensure_ctable(sid) # Get the number of minutes already recorded in this sid's ctable num_rec_mins = table.size all_minutes = self._minute_index # Get the latest minute we wish to write to the ctable last_minute_to_write = pd.Timestamp(dts[-1], tz='UTC') # In the event that we've already written some minutely data to the # ctable, guard against overwriting that data. if num_rec_mins > 0: last_recorded_minute = all_minutes[num_rec_mins - 1] if last_minute_to_write <= last_recorded_minute: raise BcolzMinuteOverlappingData(dedent(.strip()).format(last_date, input_first_day, sid)) latest_min_count = all_minutes.get_loc(last_minute_to_write) # Get all the minutes we wish to write (all market minutes after the # latest currently written, up to and including last_minute_to_write) all_minutes_in_window = all_minutes[num_rec_mins:latest_min_count + 1] minutes_count = all_minutes_in_window.size open_col = np.zeros(minutes_count, dtype=np.uint32) high_col = np.zeros(minutes_count, dtype=np.uint32) low_col = np.zeros(minutes_count, dtype=np.uint32) close_col = np.zeros(minutes_count, dtype=np.uint32) vol_col = np.zeros(minutes_count, dtype=np.uint32) dt_ixs = np.searchsorted(all_minutes_in_window.values, dts.astype('datetime64[ns]')) ohlc_ratio = self.ohlc_ratio_for_sid(sid) ( open_col[dt_ixs], high_col[dt_ixs], low_col[dt_ixs], close_col[dt_ixs], vol_col[dt_ixs], ) = convert_cols(cols, ohlc_ratio, sid, invalid_data_behavior) table.append([ open_col, high_col, low_col, close_col, vol_col ]) table.flush()
[ "\n Internal method for `write_cols` and `write`.\n\n Parameters\n ----------\n sid : int\n The asset identifier for the data being written.\n dts : datetime64 array\n The dts corresponding to values in cols.\n cols : dict of str -> np.array\n dict of market data with the following characteristics.\n keys are ('open', 'high', 'low', 'close', 'volume')\n open : float64\n high : float64\n low : float64\n close : float64\n volume : float64|int64\n ", "\n Data with last_date={0} already includes input start={1} for\n sid={2}" ]