query
stringlengths 9
60
| language
stringclasses 1
value | code
stringlengths 105
25.7k
| url
stringlengths 91
217
|
---|---|---|---|
linear regression
|
python
|
def fit_linear(X, y):
"""
Uses OLS to fit the regression.
"""
model = linear_model.LinearRegression()
model.fit(X, y)
return model
|
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/bestfit.py#L175-L181
|
linear regression
|
python
|
def linregress(x, y, return_stats=False):
"""linear regression calculation
Parameters
----
x : independent variable (series)
y : dependent variable (series)
return_stats : returns statistical values as well if required (bool)
Returns
----
list of parameters (and statistics)
"""
a1, a0, r_value, p_value, stderr = scipy.stats.linregress(x, y)
retval = a1, a0
if return_stats:
retval += r_value, p_value, stderr
return retval
|
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L122-L142
|
linear regression
|
python
|
def linear_regression(X, y, add_intercept=True, coef_only=False, alpha=0.05,
as_dataframe=True, remove_na=False):
"""(Multiple) Linear regression.
Parameters
----------
X : np.array or list
Predictor(s). Shape = (n_samples, n_features) or (n_samples,).
y : np.array or list
Dependent variable. Shape = (n_samples).
add_intercept : bool
If False, assume that the data are already centered. If True, add a
constant term to the model. In this case, the first value in the
output dict is the intercept of the model.
coef_only : bool
If True, return only the regression coefficients.
alpha : float
Alpha value used for the confidence intervals.
CI = [alpha / 2 ; 1 - alpha / 2]
as_dataframe : bool
If True, returns a pandas DataFrame. If False, returns a dictionnary.
remove_na : bool
If True, apply a listwise deletion of missing values (i.e. the entire
row is removed).
Returns
-------
stats : dataframe or dict
Linear regression summary::
'names' : name of variable(s) in the model (e.g. x1, x2...)
'coef' : regression coefficients
'se' : standard error of the estimate
'T' : T-values
'pval' : p-values
'r2' : coefficient of determination (R2)
'adj_r2' : adjusted R2
'CI[2.5%]' : lower confidence interval
'CI[97.5%]' : upper confidence interval
Notes
-----
The beta coefficients of the regression are estimated using the
:py:func:`numpy.linalg.lstsq` function.
It is generally recommanded to include a constant term (intercept) to the
model to limit the bias and force the residual mean to equal zero.
Note that intercept coefficient and p-values are however rarely meaningful.
The standard error of the estimates is a measure of the accuracy of the
prediction defined as:
.. math:: se = \\sqrt{MSE \\cdot (X^TX)^{-1}}
where :math:`MSE` is the mean squared error,
.. math:: MSE = \\frac{\\sum{(true - pred)^2}}{n - p - 1}
:math:`p` is the total number of explanatory variables in the model
(excluding the intercept) and :math:`n` is the sample size.
Using the coefficients and the standard errors, the T-values can be
obtained:
.. math:: T = \\frac{coef}{se}
and the p-values can then be approximated using a T-distribution
with :math:`n - p - 1` degrees of freedom.
The coefficient of determination (:math:`R^2`) is defined as:
.. math:: R^2 = 1 - (\\frac{SS_{resid}}{SS_{total}})
The adjusted :math:`R^2` is defined as:
.. math:: \\overline{R}^2 = 1 - (1 - R^2) \\frac{n - 1}{n - p - 1}
Results have been compared against sklearn, statsmodels and JASP.
This function will not run if NaN values are either present in the target
or predictors variables. Please remove them before runing the function.
Examples
--------
1. Simple linear regression
>>> import numpy as np
>>> from pingouin import linear_regression
>>> np.random.seed(123)
>>> mean, cov, n = [4, 6], [[1, 0.5], [0.5, 1]], 30
>>> x, y = np.random.multivariate_normal(mean, cov, n).T
>>> lm = linear_regression(x, y)
>>> lm.round(2)
names coef se T pval r2 adj_r2 CI[2.5%] CI[97.5%]
0 Intercept 4.40 0.54 8.16 0.00 0.24 0.21 3.29 5.50
1 x1 0.39 0.13 2.99 0.01 0.24 0.21 0.12 0.67
2. Multiple linear regression
>>> np.random.seed(42)
>>> z = np.random.normal(size=n)
>>> X = np.column_stack((x, z))
>>> lm = linear_regression(X, y)
>>> print(lm['coef'].values)
[4.54123324 0.36628301 0.17709451]
3. Using a Pandas DataFrame
>>> import pandas as pd
>>> df = pd.DataFrame({'x': x, 'y': y, 'z': z})
>>> lm = linear_regression(df[['x', 'z']], df['y'])
>>> print(lm['coef'].values)
[4.54123324 0.36628301 0.17709451]
4. No intercept and return coef only
>>> linear_regression(X, y, add_intercept=False, coef_only=True)
array([ 1.40935593, -0.2916508 ])
5. Return a dictionnary instead of a DataFrame
>>> lm_dict = linear_regression(X, y, as_dataframe=False)
6. Remove missing values
>>> X[4, 1] = np.nan
>>> y[7] = np.nan
>>> linear_regression(X, y, remove_na=True, coef_only=True)
array([4.64069731, 0.35455398, 0.1888135 ])
"""
# Extract names if X is a Dataframe or Series
if isinstance(X, pd.DataFrame):
names = X.keys().tolist()
elif isinstance(X, pd.Series):
names = [X.name]
else:
names = []
assert 0 < alpha < 1
assert y.ndim == 1, 'y must be one-dimensional.'
# Convert input to numpy array
X = np.asarray(X)
y = np.asarray(y)
if X.ndim == 1:
# Convert to (n_samples, n_features) shape
X = X[..., np.newaxis]
# Check for NaN / Inf
if remove_na:
X, y = rm_na(X, y[..., np.newaxis], paired=True, axis='rows')
y = np.squeeze(y)
y_gd = np.isfinite(y).all()
X_gd = np.isfinite(X).all()
assert y_gd, 'Target (y) contains NaN or Inf. Please remove them.'
assert X_gd, 'Predictors (X) contain NaN or Inf. Please remove them.'
# Check that X and y have same length
assert y.shape[0] == X.shape[0], 'X and y must have same number of samples'
if not names:
names = ['x' + str(i + 1) for i in range(X.shape[1])]
if add_intercept:
# Add intercept
X = np.column_stack((np.ones(X.shape[0]), X))
names.insert(0, "Intercept")
# Compute beta coefficient and predictions
coef = np.linalg.lstsq(X, y, rcond=None)[0]
if coef_only:
return coef
pred = np.dot(X, coef)
resid = np.square(y - pred)
ss_res = resid.sum()
n, p = X.shape[0], X.shape[1]
# Degrees of freedom should not include the intercept
dof = n - p if add_intercept else n - p - 1
# Compute mean squared error, variance and SE
MSE = ss_res / dof
beta_var = MSE * (np.linalg.pinv(np.dot(X.T, X)).diagonal())
beta_se = np.sqrt(beta_var)
# Compute R2, adjusted R2 and RMSE
ss_tot = np.square(y - y.mean()).sum()
# ss_exp = np.square(pred - y.mean()).sum()
r2 = 1 - (ss_res / ss_tot)
adj_r2 = 1 - (1 - r2) * (n - 1) / dof
# Compute T and p-values
T = coef / beta_se
pval = np.array([2 * t.sf(np.abs(i), dof) for i in T])
# Compute confidence intervals
crit = t.ppf(1 - alpha / 2, dof)
marg_error = crit * beta_se
ll = coef - marg_error
ul = coef + marg_error
# Rename CI
ll_name = 'CI[%.1f%%]' % (100 * alpha / 2)
ul_name = 'CI[%.1f%%]' % (100 * (1 - alpha / 2))
# Create dict
stats = {'names': names, 'coef': coef, 'se': beta_se, 'T': T,
'pval': pval, 'r2': r2, 'adj_r2': adj_r2, ll_name: ll,
ul_name: ul}
if as_dataframe:
return pd.DataFrame.from_dict(stats)
else:
return stats
|
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/regression.py#L10-L223
|
linear regression
|
python
|
def linear_regression(self):
""" Linear Regression.
This function runs linear regression and stores the,
1. Model
2. Model name
3. Mean score of cross validation
4. Metrics
"""
model = LinearRegression()
scores = []
kfold = KFold(n_splits=self.cv, shuffle=True, random_state=42)
for i, (train, test) in enumerate(kfold.split(self.baseline_in, self.baseline_out)):
model.fit(self.baseline_in.iloc[train], self.baseline_out.iloc[train])
scores.append(model.score(self.baseline_in.iloc[test], self.baseline_out.iloc[test]))
mean_score = sum(scores) / len(scores)
self.models.append(model)
self.model_names.append('Linear Regression')
self.max_scores.append(mean_score)
self.metrics['Linear Regression'] = {}
self.metrics['Linear Regression']['R2'] = mean_score
self.metrics['Linear Regression']['Adj R2'] = self.adj_r2(mean_score, self.baseline_in.shape[0], self.baseline_in.shape[1])
|
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Model_Data.py#L176-L203
|
linear regression
|
python
|
def linreg(x, y):
"""
does a linear regression
"""
if len(x) != len(y):
print('x and y must be same length')
return
xx, yy, xsum, ysum, xy, n, sum = 0, 0, 0, 0, 0, len(x), 0
linpars = {}
for i in range(n):
xx += x[i] * x[i]
yy += y[i] * y[i]
xy += x[i] * y[i]
xsum += x[i]
ysum += y[i]
xsig = np.sqrt(old_div((xx - old_div(xsum**2, n)), (n - 1.)))
ysig = np.sqrt(old_div((yy - old_div(ysum**2, n)), (n - 1.)))
linpars['slope'] = old_div(
(xy - (xsum * ysum / n)), (xx - old_div((xsum**2), n)))
linpars['b'] = old_div((ysum - linpars['slope'] * xsum), n)
linpars['r'] = old_div((linpars['slope'] * xsig), ysig)
for i in range(n):
a = y[i] - linpars['b'] - linpars['slope'] * x[i]
sum += a
linpars['sigma'] = old_div(sum, (n - 2.))
linpars['n'] = n
return linpars
|
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L9589-L9615
|
linear regression
|
python
|
def _linear_seaborn_(self, label=None, style=None, opts=None):
"""
Returns a Seaborn linear regression plot
"""
xticks, yticks = self._get_ticks(opts)
try:
fig = sns.lmplot(self.x, self.y, data=self.df)
fig = self._set_with_height(fig, opts)
return fig
except Exception as e:
self.err(e, self.linear_,
"Can not draw linear regression chart")
|
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/charts/seaborn.py#L57-L68
|
linear regression
|
python
|
def logx_linear(x, a, b):
"""logx linear
Parameters
----------
x: int
a: float
b: float
Returns
-------
float
a * np.log(x) + b
"""
x = np.log(x)
return a*x + b
|
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py#L89-L104
|
linear regression
|
python
|
def lasso_regression(self):
""" Lasso Regression.
This function runs lasso regression and stores the,
1. Model
2. Model name
3. Max score
4. Metrics
"""
score_list = []
max_score = float('-inf')
best_alpha = None
for alpha in self.alphas:
# model = Lasso(normalize=True, alpha=alpha, max_iter=5000)
model = Lasso(alpha=alpha, max_iter=5000)
model.fit(self.baseline_in, self.baseline_out.values.ravel())
scores = []
kfold = KFold(n_splits=self.cv, shuffle=True, random_state=42)
for i, (train, test) in enumerate(kfold.split(self.baseline_in, self.baseline_out)):
model.fit(self.baseline_in.iloc[train], self.baseline_out.iloc[train])
scores.append(model.score(self.baseline_in.iloc[test], self.baseline_out.iloc[test]))
mean_score = np.mean(scores)
score_list.append(mean_score)
if mean_score > max_score:
max_score = mean_score
best_alpha = alpha
# self.models.append(Lasso(normalize=True, alpha=best_alpha, max_iter=5000))
self.models.append(Lasso(alpha=best_alpha, max_iter=5000))
self.model_names.append('Lasso Regression')
self.max_scores.append(max_score)
self.metrics['Lasso Regression'] = {}
self.metrics['Lasso Regression']['R2'] = max_score
self.metrics['Lasso Regression']['Adj R2'] = self.adj_r2(max_score, self.baseline_in.shape[0], self.baseline_in.shape[1])
|
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Model_Data.py#L206-L246
|
linear regression
|
python
|
def regressionplot(x, y, poly=None):
"""
Plot a 2-D linear regression (y = slope * x + offset) overlayed over the raw data samples
"""
if not isinstance(x[0], (float, int, np.float64, np.float32)):
x = [row[0] for row in x]
y_regression = poly[0] * np.array(x) + poly[-1]
try:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(x, y_regression, 'r-', x, y, 'o', markersize=5)
plt.legend(['%+.2g * x + %.2g' % poly, 'Samples'])
ax.grid(True)
plt.draw()
except:
logger.warn('No display available')
return y_regression
|
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L228-L245
|
linear regression
|
python
|
def linearRegression(requestContext, seriesList, startSourceAt=None,
endSourceAt=None):
"""
Graphs the liner regression function by least squares method.
Takes one metric or a wildcard seriesList, followed by a quoted string
with the time to start the line and another quoted string with the time
to end the line. The start and end times are inclusive (default range is
from to until). See ``from / until`` in the render\_api_ for examples of
time formats. Datapoints in the range is used to regression.
Example::
&target=linearRegression(Server.instance01.threads.busy,'-1d')
&target=linearRegression(Server.instance*.threads.busy,
"00:00 20140101","11:59 20140630")
"""
from .app import evaluateTarget
results = []
sourceContext = requestContext.copy()
if startSourceAt is not None:
sourceContext['startTime'] = parseATTime(startSourceAt)
if endSourceAt is not None:
sourceContext['endTime'] = parseATTime(endSourceAt)
sourceList = []
for series in seriesList:
source = evaluateTarget(sourceContext, series.pathExpression)
sourceList.extend(source)
for source, series in zip(sourceList, seriesList):
newName = 'linearRegression(%s, %s, %s)' % (
series.name,
int(epoch(sourceContext['startTime'])),
int(epoch(sourceContext['endTime'])))
forecast = linearRegressionAnalysis(source)
if forecast is None:
continue
factor, offset = forecast
values = [offset + (series.start + i * series.step) * factor
for i in range(len(series))]
newSeries = TimeSeries(newName, series.start, series.end,
series.step, values)
newSeries.pathExpression = newSeries.name
results.append(newSeries)
return results
|
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2998-L3044
|
linear regression
|
python
|
def regression(fname="regression.png"):
"""
Create figures for regression models
"""
_, axes = plt.subplots(ncols=2, figsize=(18, 6))
alphas = np.logspace(-10, 1, 300)
data = load_concrete(split=True)
# Plot prediction error in the middle
oz = PredictionError(LassoCV(alphas=alphas), ax=axes[0])
oz.fit(data.X.train, data.y.train)
oz.score(data.X.test, data.y.test)
oz.finalize()
# Plot residuals on the right
oz = ResidualsPlot(RidgeCV(alphas=alphas), ax=axes[1])
oz.fit(data.X.train, data.y.train)
oz.score(data.X.test, data.y.test)
oz.finalize()
# Save figure
path = os.path.join(FIGURES, fname)
plt.tight_layout()
plt.savefig(path)
|
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/paper/figures/figures.py#L104-L127
|
linear regression
|
python
|
def interpolate_linear(self, lons, lats, data):
"""
Interpolate using linear approximation
Returns the same as interpolate(lons,lats,data,order=1)
"""
return self.interpolate(lons, lats, data, order=1)
|
https://github.com/underworldcode/stripy/blob/d4c3480c3e58c88489ded695eadbe7cd5bf94b48/stripy-src/stripy/spherical.py#L559-L564
|
linear regression
|
python
|
def regress(self, method = 'lstsq'):
"""regress performs linear least squares regression of the designmatrix on the data.
:param method: method, or backend to be used for the regression analysis.
:type method: string, one of ['lstsq', 'sm_ols']
:returns: instance variables 'betas' (nr_betas x nr_signals) and 'residuals' (nr_signals x nr_samples) are created.
"""
if method is 'lstsq':
self.betas, residuals_sum, rank, s = LA.lstsq(self.design_matrix.T, self.resampled_signal.T)
self.residuals = self.resampled_signal - self.predict_from_design_matrix(self.design_matrix)
elif method is 'sm_ols':
import statsmodels.api as sm
assert self.resampled_signal.shape[0] == 1, \
'signal input into statsmodels OLS cannot contain multiple signals at once, present shape %s' % str(self.resampled_signal.shape)
model = sm.OLS(np.squeeze(self.resampled_signal),self.design_matrix.T)
results = model.fit()
# make betas and residuals that are compatible with the LA.lstsq type.
self.betas = np.array(results.params).reshape((self.design_matrix.shape[0], self.resampled_signal.shape[0]))
self.residuals = np.array(results.resid).reshape(self.resampled_signal.shape)
self.logger.debug('performed %s regression on %s design_matrix and %s signal' % (method, str(self.design_matrix.shape), str(self.resampled_signal.shape)))
|
https://github.com/tknapen/FIRDeconvolution/blob/6263496a356c449062fe4c216fef56541f6dc151/src/FIRDeconvolution.py#L217-L239
|
linear regression
|
python
|
def linreg_ols_svd(y, X, rcond=1e-15):
"""Linear Regression, OLS, inv by SVD
Properties
----------
* Numpy's lstsq is based on LAPACK's _gelsd what applies SVD
* SVD inverse might be slow (complex Landau O)
* speed might decline during forward selection
* no overhead or other computations
Example:
--------
beta = lin_ols_svd(y,X)
"""
import numpy as np
try: # solve OLS formula
beta, _, _, singu = np.linalg.lstsq(b=y, a=X, rcond=rcond)
except np.linalg.LinAlgError:
print("LinAlgError: computation does not converge.")
return None
# check singu
if np.any(singu < 0.0):
print("Error: A singular value of X is numerically not well-behaved.")
return None
# return estimated model parameters
return beta
|
https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/linreg_ols_svd.py#L2-L29
|
linear regression
|
python
|
def _linear_predictor(self, X=None, modelmat=None, b=None, term=-1):
"""linear predictor
compute the linear predictor portion of the model
ie multiply the model matrix by the spline basis coefficients
Parameters
---------
at least 1 of (X, modelmat)
and
at least 1 of (b, feature)
X : array-like of shape (n_samples, m_features) or None, optional
containing the input dataset
if None, will attempt to use modelmat
modelmat : array-like or None, optional
contains the spline basis for each feature evaluated at the input
values for each feature, ie model matrix
if None, will attempt to construct the model matrix from X
b : array-like or None, optional
contains the spline coefficients
if None, will use current model coefficients
feature : int, optional
feature for which to compute the linear prediction
if -1, will compute for all features
Returns
-------
lp : np.array of shape (n_samples,)
"""
if modelmat is None:
modelmat = self._modelmat(X, term=term)
if b is None:
b = self.coef_[self.terms.get_coef_indices(term)]
return modelmat.dot(b).flatten()
|
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L357-L393
|
linear regression
|
python
|
def progression_linear(week, start_weight, final_weight, start_week, end_week):
"""A linear progression function going through the points
('start_week', 'start_weight') and ('end_week', 'final_weight'), evaluated
in 'week'.
Parameters
----------
week
The week to evaluate the linear function at.
start_weight
The weight at 'start_week'.
final_weight
The weight at 'end_week'.
start_week
The number of the first week, typically 1.
end_week
The number of the final week, e.g. 8.
Returns
-------
weight
The weight at 'week'.
Examples
-------
>>> progression_linear(week = 2, start_weight = 100, final_weight = 120,
... start_week = 1, end_week = 3)
110.0
>>> progression_linear(3, 100, 140, 1, 5)
120.0
"""
# Calculate the slope of the linear function
slope = (start_weight - final_weight) / (start_week - end_week)
# Return the answer y = slope (x - x_0) + y_0
return slope * (week - start_week) + start_weight
|
https://github.com/tommyod/streprogen/blob/21b903618e8b2d398bceb394d18d7c74ca984def/streprogen/modeling.py#L156-L194
|
linear regression
|
python
|
def linearRegressionAnalysis(series):
"""
Returns factor and offset of linear regression function by least
squares method.
"""
n = safeLen(series)
sumI = sum([i for i, v in enumerate(series) if v is not None])
sumV = sum([v for i, v in enumerate(series) if v is not None])
sumII = sum([i * i for i, v in enumerate(series) if v is not None])
sumIV = sum([i * v for i, v in enumerate(series) if v is not None])
denominator = float(n * sumII - sumI * sumI)
if denominator == 0:
return None
else:
factor = (n * sumIV - sumI * sumV) / denominator / series.step
offset = sumII * sumV - sumIV * sumI
offset = offset / denominator - factor * series.start
return factor, offset
|
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2977-L2995
|
linear regression
|
python
|
def _lmder1_linear_r1zcr(n, m, factor, target_fnorm1, target_fnorm2, target_params):
"""A rank-1 linear function with zero columns and rows (lmder test #3)"""
def func(params, vec):
s = 0
for j in range(1, n - 1):
s += (j + 1) * params[j]
for i in range(m):
vec[i] = i * s - 1
vec[m-1] = -1
def jac(params, jac):
jac.fill(0)
for i in range(1, n - 1):
for j in range(1, m - 1):
jac[i,j] = j * (i + 1)
guess = np.ones(n) * factor
#_lmder1_test(m, func, jac, guess)
_lmder1_driver(m, func, jac, guess,
target_fnorm1, target_fnorm2, None)
|
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/lmmin.py#L2472-L2494
|
linear regression
|
python
|
def fit_linear(xdata, ydata):
"""
Returns slope and intercept of line of best fit:
y = a*x + b
through the supplied data.
Parameters
----------
xdata, ydata:
Arrays of x data and y data (having matching lengths).
"""
x = _n.array(xdata)
y = _n.array(ydata)
ax = _n.average(x)
ay = _n.average(y)
axx = _n.average(x*x)
ayx = _n.average(y*x)
slope = (ayx - ay*ax) / (axx - ax*ax)
intercept = ay - slope*ax
return slope, intercept
|
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_functions.py#L738-L763
|
linear regression
|
python
|
def linear(target, X, A1='', A2=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of `X`:
.. math::
r = A_{1} X + A_{2}
Parameters
----------
A1 -> A2 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
X : string
The dictionary key on the target objecxt containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
A = _parse_args(target=target, key=A1, default=1.0)
B = _parse_args(target=target, key=A2, default=0.0)
X = target[X]
r = A * X + B
S1 = A
S2 = B
values = {'S1': S1, 'S2': S2, 'rate': r}
return values
|
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/physics/generic_source_term.py#L28-L71
|
linear regression
|
python
|
def linear_model(x, y):
"""
Returns a linear model transformation function fitted on the two supplied points.
y = m*x + b
Note: Assumes that slope > 0, otherwise division through zero might occur.
"""
x1, x2 = x
y1, y2 = y
m = (y2 - y1) / (x2 - x1)
b = y1 - (m * x1)
return lambda x: m * x + b
|
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/IntensityRangeStandardization.py#L524-L534
|
linear regression
|
python
|
def linear_regression(self, target, regression_length, mask=NotSpecified):
"""
Construct a new Factor that performs an ordinary least-squares
regression predicting the columns of `self` from `target`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term to use as the predictor/independent variable in each
regression. This may be a Factor, a BoundColumn or a Slice. If
`target` is two-dimensional, regressions are computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each
regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed with the
target slice each day.
Returns
-------
regressions : zipline.pipeline.factors.RollingLinearRegression
A new Factor that will compute linear regressions of `target`
against the columns of `self`.
Examples
--------
Suppose we want to create a factor that regresses AAPL's 10-day returns
against the 10-day returns of all other assets, computing each
regression over 30 days. This can be achieved by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_regressions = returns.linear_regression(
target=returns_slice, regression_length=30,
)
This is equivalent to doing::
aapl_regressions = RollingLinearRegressionOfReturns(
target=sid(24), returns_length=10, regression_length=30,
)
See Also
--------
:func:`scipy.stats.linregress`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
from .statistical import RollingLinearRegression
return RollingLinearRegression(
dependent=self,
independent=target,
regression_length=regression_length,
mask=mask,
)
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L786-L843
|
linear regression
|
python
|
def linreg_mle(y, X, algorithm='Nelder-Mead', debug=False):
"""MLE for Linear Regression Model
Parameters:
-----------
y : ndarray
target variable with N observations
X : ndarray
The <N x C> design matrix with C independent
variables, features, factors, etc.
algorithm : str
Optional. Default 'Nelder-Mead' (Simplex).
The algorithm used in scipy.optimize.minimize
debug : bool
Optional.
Returns:
--------
beta : ndarray
Estimated regression coefficients.
results : scipy.optimize.optimize.OptimizeResult
Optional. If debug=True then only scipy's
optimization result variable is returned.
"""
import numpy as np
import scipy.stats as sstat
import scipy.optimize as sopt
def objective_nll_linreg(theta, y, X):
yhat = np.dot(X, theta[:-1]) # =X*beta
return -1.0 * sstat.norm.logpdf(y, loc=yhat, scale=theta[-1]).sum()
# check eligible algorithm
if algorithm not in ('Nelder-Mead', 'CG', 'BFGS'):
raise Exception('Optimization Algorithm not supported.')
# set start values
theta0 = np.ones((X.shape[1] + 1, ))
# run solver
results = sopt.minimize(
objective_nll_linreg,
theta0,
args=(y, X),
method=algorithm,
options={'disp': False})
# debug?
if debug:
return results
# done
return results.x[:-1]
|
https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/linreg_mle.py#L2-L59
|
linear regression
|
python
|
def ols_matrix(A, norm_func=None):
"""
Generate the matrix used to solve OLS regression.
Parameters
----------
A: float array
The design matrix
norm: callable, optional
A normalization function to apply to the matrix, before extracting the
OLS matrix.
Notes
-----
The matrix needed for OLS regression for the equation:
..math ::
y = A \beta
is given by:
..math ::
\hat{\beta} = (A' x A)^{-1} A' y
See also
--------
http://en.wikipedia.org/wiki/Ordinary_least_squares#Estimation
"""
A = np.asarray(A)
if norm_func is not None:
X = np.matrix(unit_vector(A.copy(), norm_func=norm_func))
else:
X = np.matrix(A.copy())
return la.pinv(X.T * X) * X.T
|
https://github.com/cni/MRS/blob/16098b3cf4830780efd787fee9efa46513850283/MRS/utils.py#L235-L276
|
linear regression
|
python
|
def interpolation_linear(x, x1, x2, y1, y2):
"""
Linear interpolation
returns (y2 - y1) / (x2 - x1) * (x - x1) + y1
"""
m = (y2 - y1) / (x2 - x1)
t = (x - x1)
return m * t + y1
|
https://github.com/pbrisk/mathtoolspy/blob/d0d35b45d20f346ba8a755e53ed0aa182fab43dd/mathtoolspy/utils/math_fcts.py#L51-L58
|
linear regression
|
python
|
def run(features, labels, regularization=0., constfeat=True):
"""
Run linear regression on the given data.
.. versionadded:: 0.5.0
If a regularization parameter is provided, this function
is a simplification and specialization of ridge
regression, as implemented in `scikit-learn
<http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html#sklearn.linear_model.Ridge>`_.
Setting `solver` to `'svd'` in :class:`sklearn.linear_model.Ridge` and equating
our `regularization` with their `alpha` will yield the same results.
Parameters
----------
features : ndarray
Features on which to run linear regression.
labels : ndarray
Labels for the given features. Multiple columns
of labels are allowed.
regularization : float, optional
Regularization parameter. Defaults to 0.
constfeat : bool, optional
Whether or not the first column of features is
the constant feature 1. If True, the first column
will be excluded from regularization. Defaults to True.
Returns
-------
model : ndarray
Regression model for the given data.
"""
n_col = (features.shape[1] if len(features.shape) > 1 else 1)
reg_matrix = regularization * np.identity(n_col, dtype='float64')
if constfeat:
reg_matrix[0, 0] = 0.
# http://stackoverflow.com/questions/27476933/numpy-linear-regression-with-regularization
return np.linalg.lstsq(features.T.dot(features) + reg_matrix, features.T.dot(labels))[0]
|
https://github.com/aisthesis/pynance/blob/9eb0d78b60fe2a324ed328d026fedb6dbe8f7f41/pynance/learn/linreg.py#L13-L53
|
linear regression
|
python
|
def lreg(self, xcol, ycol, name="Regression"):
"""
Add a column to the main dataframe populted with
the model's linear regression for a column
"""
try:
x = self.df[xcol].values.reshape(-1, 1)
y = self.df[ycol]
lm = linear_model.LinearRegression()
lm.fit(x, y)
predictions = lm.predict(x)
self.df[name] = predictions
except Exception as e:
self.err(e, "Can not calculate linear regression")
|
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/stats.py#L8-L21
|
linear regression
|
python
|
def def_linear(fun):
"""Flags that a function is linear wrt all args"""
defjvp_argnum(fun, lambda argnum, g, ans, args, kwargs:
fun(*subval(args, argnum, g), **kwargs))
|
https://github.com/HIPS/autograd/blob/e3b525302529d7490769d5c0bcfc7457e24e3b3e/autograd/core.py#L151-L154
|
linear regression
|
python
|
def linreg_ols_pinv(y, X, rcond=1e-15):
"""Linear Regression, OLS, by multiplying with Pseudoinverse"""
import numpy as np
try: # multiply with inverse to compute coefficients
return np.dot(np.linalg.pinv(
np.dot(X.T, X), rcond=rcond), np.dot(X.T, y))
except np.linalg.LinAlgError:
print("LinAlgError: SVD does not converge")
return None
|
https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/linreg_ols_pinv.py#L2-L10
|
linear regression
|
python
|
def linreg_ols_lu(y, X):
"""Linear Regression, OLS, by solving linear equations and LU decomposition
Properties
----------
* based on LAPACK's _gesv what applies LU decomposition
* avoids using python's inverse functions
* should be stable
* no overhead or other computations
Example:
--------
beta = linreg_ols_lu(y,X)
Links:
------
* http://oxyba.de/docs/linreg_ols_lu
"""
import numpy as np
try: # solve OLS formula
return np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y))
except np.linalg.LinAlgError:
print("LinAlgError: X*X' is singular or not square.")
return None
|
https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/linreg_ols_lu.py#L2-L26
|
linear regression
|
python
|
def _dlinear_seaborn_(self, label=None, style=None, opts=None):
"""
Returns a Seaborn linear regression plot with marginal distribution
"""
color, size = self._get_color_size(style)
try:
fig = sns.jointplot(self.x, self.y, color=color,
size=size, data=self.df, kind="reg")
fig = self._set_with_height(fig, opts)
return fig
except Exception as e:
self.err(e, self.dlinear_,
"Can not draw linear regression chart with distribution")
|
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/charts/seaborn.py#L70-L82
|
linear regression
|
python
|
def linear_trend(x, param):
"""
Calculate a linear least-squares regression for the values of the time series versus the sequence from 0 to
length of the time series minus one.
This feature assumes the signal to be uniformly sampled. It will not use the time stamps to fit the model.
The parameters control which of the characteristics are returned.
Possible extracted attributes are "pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of
linregress for more information.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"attr": x} with x an string, the attribute name of the regression model
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
# todo: we could use the index of the DataFrame here
linReg = linregress(range(len(x)), x)
return [("attr_\"{}\"".format(config["attr"]), getattr(linReg, config["attr"]))
for config in param]
|
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L1152-L1173
|
linear regression
|
python
|
def agg_linear_trend(self, x, param=None):
"""
As in tsfresh `agg_inear_trend <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/\
feature_extraction/feature_calculators.py#L1727>`_
Calculates a linear least-squares regression for values of the time series that were aggregated over chunks\
versus the sequence from 0 up to the number of chunks minus one.
This feature assumes the signal to be uniformly sampled. It will not use the time stamps to fit the model.
The parameters attr controls which of the characteristics are returned. Possible extracted attributes are\
"pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of linregress for more \
information.
The chunksize is regulated by "chunk_len". It specifies how many time series values are in each chunk.
Further, the aggregation function is controlled by "f_agg", which can use "max", "min" or , "mean", "median"
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param param: contains dictionaries {"attr": x, "chunk_len": l, "f_agg": f} with x, f a str and l an int
:type param: list
:return: the different feature values
:rtype: pandas.Series
"""
if param is None:
param = [{'attr': 'intercept', 'chunk_len': 5, 'f_agg': 'min'},
{'attr': 'rvalue', 'chunk_len': 10, 'f_agg': 'var'},
{'attr': 'intercept', 'chunk_len': 10, 'f_agg': 'min'}]
agg = feature_calculators.agg_linear_trend(x, param)
logging.debug("agg linear trend by tsfresh calculated")
return list(agg)
|
https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/tremor_processor.py#L396-L427
|
linear regression
|
python
|
def fit(self, X, y):
""" Compute average slope and intercept for all X, y pairs
Arguments:
X (np.array): model input (independent variable)
y (np.array): model output (dependent variable)
Returns:
Linear Regression instance with `slope` and `intercept` attributes
References:
Based on: https://github.com/justmarkham/DAT4/blob/master/notebooks/08_linear_regression.ipynb
>>> n_samples = 100
>>> X = np.arange(100).reshape((n_samples, 1))
>>> slope, intercept = 3.14159, -4.242
>>> y = 3.14 * X + np.random.randn(*X.shape) + intercept
>>> line = LinearRegressor()
>>> line.fit(X, y)
<nlpia.models.LinearRegressor object ...
>>> abs(line.slope - slope) < abs(0.02 * (slope + 1))
True
>>> abs(line.intercept - intercept) < 0.2 * (abs(intercept) + 1)
True
"""
# initial sums
n = float(len(X))
sum_x = X.sum()
sum_y = y.sum()
sum_xy = (X * y).sum()
sum_xx = (X**2).sum()
# formula for w0
self.slope = (sum_xy - (sum_x * sum_y) / n) / (sum_xx - (sum_x * sum_x) / n)
# formula for w1
self.intercept = sum_y / n - self.slope * (sum_x / n)
return self
|
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/models.py#L15-L54
|
linear regression
|
python
|
def simulate_linear_model(A, x0, v, ts_length):
r"""
This is a separate function for simulating a vector linear system of
the form
.. math::
x_{t+1} = A x_t + v_t
given :math:`x_0` = x0
Here :math:`x_t` and :math:`v_t` are both n x 1 and :math:`A` is n x n.
The purpose of separating this functionality out is to target it for
optimization by Numba. For the same reason, matrix multiplication is
broken down into for loops.
Parameters
----------
A : array_like or scalar(float)
Should be n x n
x0 : array_like
Should be n x 1. Initial condition
v : np.ndarray
Should be n x ts_length-1. Its t-th column is used as the time t
shock :math:`v_t`
ts_length : int
The length of the time series
Returns
--------
x : np.ndarray
Time series with ts_length columns, the t-th column being :math:`x_t`
"""
A = np.asarray(A)
n = A.shape[0]
x = np.empty((n, ts_length))
x[:, 0] = x0
for t in range(ts_length-1):
# x[:, t+1] = A.dot(x[:, t]) + v[:, t]
for i in range(n):
x[i, t+1] = v[i, t] # Shock
for j in range(n):
x[i, t+1] += A[i, j] * x[j, t] # Dot Product
return x
|
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/lss.py#L20-L64
|
linear regression
|
python
|
def linear_constraints(self):
""" Returns the linear constraints.
"""
if self.lin_N == 0:
return None, array([]), array([])
A = lil_matrix((self.lin_N, self.var_N), dtype=float64)
l = -Inf * ones(self.lin_N)
u = -l
for lin in self.lin_constraints:
if lin.N: # non-zero number of rows to add
Ak = lin.A # A for kth linear constrain set
i1 = lin.i1 # starting row index
iN = lin.iN # ending row index
vsl = lin.vs # var set list
kN = -1 # initialize last col of Ak used
Ai = lil_matrix((lin.N, self.var_N), dtype=float64)
for v in vsl:
var = self.get_var(v)
j1 = var.i1 # starting column in A
jN = var.iN # ending column in A
k1 = kN + 1 # starting column in Ak
kN = kN + var.N # ending column in Ak
if j1 == jN:
# FIXME: Single column slicing broken in lil.
for i in range(Ai.shape[0]):
Ai[i, j1] = Ak[i, k1]
else:
Ai[:, j1:jN + 1] = Ak[:, k1:kN + 1]
A[i1:iN + 1, :] = Ai
l[i1:iN + 1] = lin.l
u[i1:iN + 1] = lin.u
return A.tocsr(), l, u
|
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/opf.py#L782-L818
|
linear regression
|
python
|
def calculate_linear_predictor(model_matrix, model_coefficients, offset=None,
name=None):
"""Computes `model_matrix @ model_coefficients + offset`."""
with tf.compat.v1.name_scope(name, 'calculate_linear_predictor',
[model_matrix, model_coefficients, offset]):
predicted_linear_response = tf.linalg.matvec(model_matrix,
model_coefficients)
if offset is not None:
predicted_linear_response += offset
return predicted_linear_response
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/fisher_scoring.py#L623-L632
|
linear regression
|
python
|
def linear_trend_timewise(x, param):
"""
Calculate a linear least-squares regression for the values of the time series versus the sequence from 0 to
length of the time series minus one.
This feature uses the index of the time series to fit the model, which must be of a datetime
dtype.
The parameters control which of the characteristics are returned.
Possible extracted attributes are "pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of
linregress for more information.
:param x: the time series to calculate the feature of. The index must be datetime.
:type x: pandas.Series
:param param: contains dictionaries {"attr": x} with x an string, the attribute name of the regression model
:type param: list
:return: the different feature values
:return type: list
"""
ix = x.index
# Get differences between each timestamp and the first timestamp in seconds.
# Then convert to hours and reshape for linear regression
times_seconds = (ix - ix[0]).total_seconds()
times_hours = np.asarray(times_seconds / float(3600))
linReg = linregress(times_hours, x.values)
return [("attr_\"{}\"".format(config["attr"]), getattr(linReg, config["attr"]))
for config in param]
|
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L1899-L1927
|
linear regression
|
python
|
def _fit_regression(self, y):
"""
fit regression using pseudo-inverse
or supplied regressor
"""
if self.regressor is None:
self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
else:
self.regressor.fit(self.hidden_activations_, y)
self.fitted_ = True
|
https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/extreme_learning_machines/elm.py#L149-L160
|
linear regression
|
python
|
def regression_tikhonov(G, y, M, tau=0):
r"""Solve a regression problem on graph via Tikhonov minimization.
The function solves
.. math:: \operatorname*{arg min}_x \| M x - y \|_2^2 + \tau \ x^T L x
if :math:`\tau > 0`, and
.. math:: \operatorname*{arg min}_x x^T L x \ \text{ s. t. } \ y = M x
otherwise.
Parameters
----------
G : :class:`pygsp.graphs.Graph`
y : array, length G.n_vertices
Measurements.
M : array of boolean, length G.n_vertices
Masking vector.
tau : float
Regularization parameter.
Returns
-------
x : array, length G.n_vertices
Recovered values :math:`x`.
Examples
--------
>>> from pygsp import graphs, filters, learning
>>> import matplotlib.pyplot as plt
>>>
>>> G = graphs.Sensor(N=100, seed=42)
>>> G.estimate_lmax()
Create a smooth ground truth signal:
>>> filt = lambda x: 1 / (1 + 10*x)
>>> filt = filters.Filter(G, filt)
>>> rs = np.random.RandomState(42)
>>> signal = filt.analyze(rs.normal(size=G.n_vertices))
Construct a measurement signal from a binary mask:
>>> mask = rs.uniform(0, 1, G.n_vertices) > 0.5
>>> measures = signal.copy()
>>> measures[~mask] = np.nan
Solve the regression problem by reconstructing the signal:
>>> recovery = learning.regression_tikhonov(G, measures, mask, tau=0)
Plot the results:
>>> fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(10, 3))
>>> limits = [signal.min(), signal.max()]
>>> _ = G.plot_signal(signal, ax=ax1, limits=limits, title='Ground truth')
>>> _ = G.plot_signal(measures, ax=ax2, limits=limits, title='Measures')
>>> _ = G.plot_signal(recovery, ax=ax3, limits=limits, title='Recovery')
>>> _ = fig.tight_layout()
"""
if tau > 0:
y[M == False] = 0
if sparse.issparse(G.L):
def Op(x):
return (M * x.T).T + tau * (G.L.dot(x))
LinearOp = sparse.linalg.LinearOperator([G.N, G.N], Op)
if y.ndim > 1:
sol = np.empty(shape=y.shape)
res = np.empty(shape=y.shape[1])
for i in range(y.shape[1]):
sol[:, i], res[i] = sparse.linalg.cg(
LinearOp, y[:, i])
else:
sol, res = sparse.linalg.cg(LinearOp, y)
# TODO: do something with the residual...
return sol
else:
# Creating this matrix may be problematic in term of memory.
# Consider using an operator instead...
if type(G.L).__module__ == np.__name__:
LinearOp = np.diag(M*1) + tau * G.L
return np.linalg.solve(LinearOp, M * y)
else:
if np.prod(M.shape) != G.n_vertices:
raise ValueError("M should be of size [G.n_vertices,]")
indl = M
indu = (M == False)
Luu = G.L[indu, :][:, indu]
Wul = - G.L[indu, :][:, indl]
if sparse.issparse(G.L):
sol_part = sparse.linalg.spsolve(Luu, Wul.dot(y[indl]))
else:
sol_part = np.linalg.solve(Luu, np.matmul(Wul, y[indl]))
sol = y.copy()
sol[indu] = sol_part
return sol
|
https://github.com/epfl-lts2/pygsp/blob/8ce5bde39206129287375af24fdbcd7edddca8c5/pygsp/learning.py#L254-L368
|
linear regression
|
python
|
def linear_variogram_model(m, d):
"""Linear model, m is [slope, nugget]"""
slope = float(m[0])
nugget = float(m[1])
return slope * d + nugget
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/variogram_models.py#L30-L34
|
linear regression
|
python
|
def regression_and_plot(x, y=None):
"""
Fit a line to the x, y data supplied and plot it along with teh raw samples
>>> age = [25, 26, 33, 29, 27, 21, 26, 35, 21, 37, 21, 38, 18, 19, 36, 30, 29, 24, 24, 36, 36, 27,
... 33, 23, 21, 26, 27, 27, 24, 26, 25, 24, 22, 25, 40, 39, 19, 31, 33, 30, 33, 27, 40, 32,
... 31, 35, 26, 34, 27, 34, 33, 20, 19, 40, 39, 39, 37, 18, 35, 20, 28, 31, 30, 29, 31, 18,
... 40, 20, 32, 20, 34, 34, 25, 29, 40, 40, 39, 36, 39, 34, 34, 35, 39, 38, 33, 32, 21, 29,
... 36, 33, 30, 39, 21, 19, 38, 30, 40, 36, 34, 28, 37, 29, 39, 25, 36, 33, 37, 19, 28, 26, 18, 22,
... 40, 20, 40, 20, 39, 29, 26, 26, 22, 37, 34, 29, 24, 23, 21, 19, 29, 30, 23, 40, 30, 30, 19, 39,
... 39, 25, 36, 38, 24, 32, 34, 33, 36, 30, 35, 26, 28, 23, 25, 23, 40, 20, 26, 26, 22, 23, 18, 36,
... 34, 36, 35, 40, 39, 39, 33, 22, 37, 20, 37, 35, 20, 23, 37, 32, 25, 35, 35, 22, 21, 31, 40, 26,
... 24, 29, 37, 19, 33, 31, 29, 27, 21, 19, 39, 34, 34, 40, 26, 39, 35, 31, 35, 24, 19, 27, 27, 20,
... 28, 30, 23, 21, 20, 26, 31, 24, 25, 25, 22, 32, 28, 36, 21, 38, 18, 25, 21, 33, 40, 19, 38, 33,
... 37, 32, 31, 31, 38, 19, 37, 37, 32, 36, 34, 35, 35, 35, 37, 35, 39, 34, 24, 25, 18, 40, 33, 32,
... 23, 25, 19, 39, 38, 36, 32, 27, 22, 40, 28, 29, 25, 36, 26, 28, 32, 34, 34, 21, 21, 32, 19, 35,
... 30, 35, 26, 31, 38, 34, 33, 35, 37, 38, 36, 40, 22, 30, 28, 28, 29, 36, 24, 28, 28, 28, 26, 21,
... 35, 22, 32, 28, 19, 33, 18, 22, 36, 26, 19, 26, 30, 27, 28, 24, 36, 37, 20, 32, 38, 39, 38, 30,
... 32, 30, 26, 23, 19, 29, 33, 34, 23, 30, 32, 40, 36, 29, 39, 34, 34, 22, 22, 22, 36, 38, 38, 30,
... 26, 40, 34, 21, 34, 38, 32, 35, 35, 26, 28, 20, 40, 23, 24, 26, 24, 39, 21, 33, 31, 39, 39, 20,
... 22, 18, 23, 36, 32, 37, 36, 26, 30, 30, 30, 21, 22, 40, 38, 22, 27, 23, 21, 22, 20, 30, 31, 40,
... 19, 32, 24, 21, 27, 32, 30, 34, 18, 25, 22, 40, 23, 19, 24, 24, 25, 40, 27, 29, 22, 39, 38, 34,
... 39, 30, 31, 33, 34, 25, 20, 20, 20, 20, 24, 19, 21, 31, 31, 29, 38, 39, 33, 40, 24, 38, 37, 18,
... 24, 38, 38, 22, 40, 21, 36, 30, 21, 30, 35, 20, 25, 25, 29, 30, 20, 29, 29, 31, 20, 26, 26, 38,
... 37, 39, 31, 35, 36, 30, 38, 36, 23, 39, 39, 20, 30, 34, 21, 23, 21, 33, 30, 33, 32, 36, 18, 31,
... 32, 25, 23, 23, 21, 34, 18, 40, 21, 29, 29, 21, 38, 35, 38, 32, 38, 27, 23, 33, 29, 19, 20, 35,
... 29, 27, 28, 20, 40, 35, 40, 40, 20, 36, 38, 28, 30, 30, 36, 29, 27, 25, 33, 19, 27, 28, 34, 36,
... 27, 40, 38, 37, 31, 33, 38, 36, 25, 23, 22, 23, 34, 26, 24, 28, 32, 22, 18, 29, 19, 21, 27, 28,
... 35, 30, 40, 28, 37, 34, 24, 40, 33, 29, 30, 36, 25, 26, 26, 28, 34, 39, 34, 26, 24, 33, 38, 37,
... 36, 34, 37, 33, 25, 27, 30, 26, 21, 40, 26, 25, 25, 40, 28, 35, 36, 39, 33, 36, 40, 32, 36, 26,
... 24, 36, 27, 28, 26, 37, 36, 37, 36, 20, 34, 30, 32, 40, 20, 31, 23, 27, 19, 24, 23, 24, 25, 36,
... 26, 33, 30, 27, 26, 28, 28, 21, 31, 24, 27, 24, 29, 29, 28, 22, 20, 23, 35, 30, 37, 31, 31, 21,
... 32, 29, 27, 27, 30, 39, 34, 23, 35, 39, 27, 40, 28, 36, 35, 38, 21, 18, 21, 38, 37, 24, 21, 25,
... 35, 27, 35, 24, 36, 32, 20]
>>> wage = [17000, 13000, 28000, 45000, 28000, 1200, 15500, 26400, 14000, 35000, 16400, 50000, 2600, 9000,
... 27000, 150000, 32000, 22000, 65000, 56000, 6500, 30000, 70000, 9000, 6000, 34000, 40000, 30000,
... 6400, 87000, 20000, 45000, 4800, 34000, 75000, 26000, 4000, 50000, 63000, 14700, 45000, 42000,
... 10000, 40000, 70000, 14000, 54000, 14000, 23000, 24400, 27900, 4700, 8000, 19000, 17300, 45000,
... 3900, 2900, 138000, 2100, 60000, 55000, 45000, 40000, 45700, 90000, 40000, 13000, 30000, 2000,
... 75000, 60000, 70000, 41000, 42000, 31000, 39000, 104000, 52000, 20000, 59000, 66000, 63000, 32000,
... 11000, 16000, 6400, 17000, 47700, 5000, 25000, 35000, 20000, 14000, 29000, 267000, 31000, 27000,
... 64000, 39600, 267000, 7100, 33000, 31500, 40000, 23000, 3000, 14000, 44000, 15100, 2600, 6200,
... 50000, 3000, 25000, 2000, 38000, 22000, 20000, 2500, 1500, 42000, 30000, 27000, 7000, 11900, 27000,
... 24000, 4300, 30200, 2500, 30000, 70000, 38700, 8000, 36000, 66000, 24000, 95000, 39000, 20000, 23000,
... 56000, 25200, 62000, 12000, 13000, 35000, 35000, 14000, 24000, 12000, 14000, 31000, 40000, 22900, 12000,
... 14000, 1600, 12000, 80000, 90000, 126000, 1600, 100000, 8000, 71000, 40000, 42000, 40000, 120000, 35000,
... 1200, 4000, 32000, 8000, 14500, 65000, 15000, 3000, 2000, 23900, 1000, 22000, 18200, 8000, 30000, 23000,
... 30000, 27000, 70000, 40000, 18000, 3100, 57000, 25000, 32000, 10000, 4000, 49000, 93000, 35000, 49000,
... 40000, 5500, 30000, 25000, 5700, 6000, 30000, 42900, 8000, 5300, 90000, 85000, 15000, 17000, 5600,
... 11500, 52000, 1000, 42000, 2100, 50000, 1500, 40000, 28000, 5300, 149000, 3200, 12000, 83000, 45000,
... 31200, 25000, 72000, 70000, 7000, 23000, 40000, 40000, 28000, 10000, 48000, 20000, 60000, 19000, 25000,
... 39000, 68000, 2300, 23900, 5000, 16300, 80000, 45000, 12000, 9000, 1300, 35000, 35000, 47000, 32000,
... 18000, 20000, 20000, 23400, 48000, 8000, 5200, 33500, 22000, 22000, 52000, 104000, 28000, 13000, 12000,
... 15000, 53000, 27000, 50000, 13900, 23000, 28100, 23000, 12000, 55000, 83000, 31000, 33200, 45000, 3000,
... 18000, 11000, 41000, 36000, 33600, 38000, 45000, 53000, 24000, 3000, 37500, 7700, 4800, 29000, 6600,
... 12400, 20000, 2000, 1100, 55000, 13400, 10000, 6000, 6000, 16000, 19000, 8300, 52000, 58000, 27000,
... 25000, 80000, 10000, 22000, 18000, 21000, 8000, 15200, 15000, 5000, 50000, 89000, 7000, 65000, 58000,
... 42000, 55000, 40000, 14000, 36000, 30000, 7900, 6000, 1200, 10000, 54000, 12800, 35000, 34000, 40000,
... 45000, 9600, 3300, 39000, 22000, 40000, 68000, 24400, 1000, 10800, 8400, 50000, 22000, 20000, 20000,
... 1300, 9000, 14200, 32000, 65000, 18000, 18000, 3000, 16700, 1500, 1400, 15000, 55000, 42000, 70000,
... 35000, 21600, 5800, 35000, 5700, 1700, 40000, 40000, 45000, 25000, 13000, 6400, 11000, 4200, 30000,
... 32000, 120000, 10000, 19000, 12000, 13000, 37000, 40000, 38000, 60000, 3100, 16000, 18000, 130000,
... 5000, 5000, 35000, 1000, 14300, 100000, 20000, 33000, 8000, 9400, 87000, 2500, 12000, 12000, 33000,
... 16500, 25500, 7200, 2300, 3100, 2100, 3200, 45000, 40000, 3800, 30000, 12000, 62000, 45000, 46000,
... 50000, 40000, 13000, 50000, 23000, 4000, 40000, 25000, 16000, 3000, 80000, 27000, 68000, 3500,
... 1300, 10000, 46000, 5800, 24000, 12500, 50000, 48000, 29000, 19000, 26000, 30000, 10000, 10000,
... 20000, 43000, 105000, 55000, 5000, 65000, 68000, 38000, 47000, 48700, 6100, 55000, 30000, 5000, 3500,
... 23400, 11400, 7000, 1300, 80000, 65000, 45000, 19000, 3000, 17100, 22900, 31200, 35000, 3000, 5000,
... 1000, 36000, 4800, 60000, 9800, 30000, 85000, 18000, 24000, 60000, 30000, 2000, 39000, 12000, 10500,
... 60000, 36000, 10500, 3600, 1200, 28600, 48000, 20800, 5400, 9600, 30000, 30000, 20000, 6700, 30000,
... 3200, 42000, 37000, 5000, 18000, 20000, 14000, 12000, 18000, 3000, 13500, 35000, 38000, 30000, 36000,
... 66000, 45000, 32000, 46000, 80000, 27000, 4000, 21000, 7600, 16000, 10300, 27000, 19000, 14000, 19000,
... 3100, 20000, 2700, 27000, 7000, 13600, 75000, 35000, 36000, 25000, 6000, 36000, 50000, 46000, 3000,
... 37000, 40000, 30000, 48800, 19700, 16000, 14000, 12000, 25000, 25000, 28600, 17000, 31200, 57000,
... 23000, 23500, 46000, 18700, 26700, 9900, 16000, 3000, 52000, 51000, 14000, 14400, 27000, 26000, 60000,
... 25000, 6000, 20000, 3000, 69000, 24800, 12000, 3100, 18000, 20000, 267000, 28000, 9800, 18200, 80000,
... 6800, 21100, 20000, 68000, 20000, 45000, 8000, 40000, 31900, 28000, 24000, 2000, 32000, 11000, 20000,
... 5900, 16100, 23900, 40000, 37500, 11000, 55000, 37500, 60000, 23000, 9500, 34500, 4000, 9000, 11200,
... 35200, 30000, 18000, 21800, 19700, 16700, 12500, 11300, 4000, 39000, 32000, 14000, 65000, 50000,
... 2000, 30400, 22000, 1600, 56000, 40000, 85000, 9000, 10000, 19000, 5300, 5200, 43000, 60000, 50000,
... 38000, 267000, 15600, 1800, 17000, 45000, 31000, 5000, 8000, 43000, 103000, 45000, 8800, 26000, 47000,
... 40000, 8000]
>>> # Udacity data shows that people earn $1.8K more for each year of age and start with a $21K deficit
>>> regress(age, wage) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([22214.93338944, ...)
>> # Gainseville, FL census data shows 14 more new homes are built each year, starting with 517 completed in 1991
>> poly = regress([483, 576, 529, 551, 529, 551, 663, 639, 704, 675, 601, 621, 630, 778, 831, 610])
"""
if y is None:
y = x
x = range(len(x))
if not isinstance(x[0], (float, int, np.float64, np.float32)):
x = [row[0] for row in x]
A = np.vstack([np.array(x), np.ones(len(x))]).T
fit = np.linalg.lstsq(A, y, rcond=None)
# if fit is None:
# fit = [(1, 0), None, None, None]
poly = fit[0][0], fit[0][-1]
poly = regressionplot(x, y, poly)
return poly
|
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L125-L225
|
linear regression
|
python
|
def linear_sym(target, X, A1='', A2=''):
r"""
Calculates the rate, as well as slope and intercept of the following
function at the given value of *x*:
.. math::
r = A_{1} x + A_{2}
Parameters
----------
A1 -> A2 : string
The dictionary keys on the target object containing the coefficients
values to be used in the source term model
X : string
The dictionary key on the target object containing the the quantity
of interest
Returns
-------
A dictionary containing the following three items:
**'rate'** - The value of the source term function at the given X.
**'S1'** - The slope of the source term function at the given X.
**'S2'** - The intercept of the source term function at the given X.
The slope and intercept provide a linearized source term equation about the
current value of X as follow:
.. math::
rate = S_{1} X + S_{2}
"""
A = _parse_args(target=target, key=A1, default=1.0)
B = _parse_args(target=target, key=A2, default=0.0)
X = target[X]
# Symbols used in symbolic function
a, b, x = _syp.symbols('a,b,x')
# Equation
y = a*x + b
# Callable functions
r, s1, s2 = _build_func(eq=y, a=a, b=b, x=x)
# Values
r_val = r(A, B, X)
s1_val = s1(A, B, X)
s2_val = s2(A, B, X)
values = {'S1': s1_val, 'S2': s2_val, 'rate': r_val}
return values
|
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/physics/generic_source_term.py#L334-L383
|
linear regression
|
python
|
def linregressIgnoringOutliers(x, y, n_iter=3, nstd=2):
"""
do linear regression [n_iter] times
successive removing [outliers]
return result of normal linregress
"""
for _ in range(n_iter):
m, n = linregress(x, y)[:2]
y_fit = x * m + n
dy = y - y_fit
std = (dy**2).mean()**0.5
inliers = abs(dy) < nstd * std
if inliers.sum() > 2:
x = x[inliers]
y = y[inliers]
else:
break
return linregress(x, y)
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/fit/linregressIgnoringOutliers.py#L5-L22
|
linear regression
|
python
|
def lpc_ref(signal, order):
"""Compute the Linear Prediction Coefficients.
Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will
find the k+1 coefficients of a k order linear filter:
xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1]
Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized.
Parameters
----------
signal: array_like
input signal
order : int
LPC order (the output will have order + 1 items)
Notes
----
This is just for reference, as it is using the direct inversion of the
toeplitz matrix, which is really slow"""
if signal.ndim > 1:
raise ValueError("Array of rank > 1 not supported yet")
if order > signal.size:
raise ValueError("Input signal must have a lenght >= lpc order")
if order > 0:
p = order + 1
r = np.zeros(p, 'float32')
# Number of non zero values in autocorrelation one needs for p LPC
# coefficients
nx = np.min([p, signal.size])
x = np.correlate(signal, signal, 'full')
r[:nx] = x[signal.size - 1:signal.size + order]
phi = np.dot(sp.linalg.inv(sp.linalg.toeplitz(r[:-1])), -r[1:])
return np.concatenate(([1.], phi))
else:
return np.ones(1, dtype='float32')
|
https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/analysis/formants/lpc.py#L13-L50
|
linear regression
|
python
|
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0,
initialWeights=None, regParam=0.0, regType=None, intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a linear regression model using Stochastic Gradient
Descent (SGD). This solves the least squares regression
formulation
f(weights) = 1/(2n) ||A weights - y||^2
which is the mean squared error. Here the data matrix has n rows,
and the input RDD holds the set of rows of A, each with its
corresponding right hand side label y.
See also the documentation for the precise formulation.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.0)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization
- None for no regularization (default)
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.regression.LinearRegression.", DeprecationWarning)
def train(rdd, i):
return callMLlibFunc("trainLinearRegressionModelWithSGD", rdd, int(iterations),
float(step), float(miniBatchFraction), i, float(regParam),
regType, bool(intercept), bool(validateData),
float(convergenceTol))
return _regression_train_wrapper(train, LinearRegressionModel, data, initialWeights)
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L230-L291
|
linear regression
|
python
|
def linear(self, fnct, x, y, sd=None, wt=1.0, fid=0):
"""Make a linear least squares solution.
Makes a linear least squares solution for the points through the
ordinates at the x values, using the specified fnct. The x can be of
any dimension, depending on the number of arguments needed in the
functional evaluation. The values should be given in the order:
x0[1], x0[2], ..., x1[1], ..., xn[m] if there are n observations,
and m arguments. x should be a vector of m*n length; y (the
observations) a vector of length n.
:param fnct: the functional to fit
:param x: the abscissa values
:param y: the ordinate values
:param sd: standard deviation of equations (one or more values used
cyclically)
:param wt: an optional alternate for `sd`
:param fid: the id of the sub-fitter (numerical)
"""
self._fit(fitfunc="linear", fnct=fnct, x=x, y=y, sd=sd, wt=wt, fid=fid)
|
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/fitting/fitting.py#L355-L375
|
linear regression
|
python
|
def fit(self, n_iter, eta=0.01):
"""Linear regression for n_iter"""
for _ in range(n_iter):
gradient = self.compute_gradient()
self.gradient_step(gradient, eta)
|
https://github.com/n1analytics/python-paillier/blob/955f8c0bfa9623be15b75462b121d28acf70f04b/examples/federated_learning_with_encryption.py#L160-L164
|
linear regression
|
python
|
def _parse_linear_expression(expression, expanded=False, **kwargs):
"""
Parse the coefficients of a linear expression (linearity is assumed).
Returns a dictionary of variable: coefficient pairs.
"""
offset = 0
constant = None
if expression.is_Add:
coefficients = expression.as_coefficients_dict()
elif expression.is_Mul:
coefficients = {expression.args[1]: expression.args[0]}
elif expression.is_Symbol:
coefficients = {expression: 1}
elif expression.is_Number:
coefficients = {}
else:
raise ValueError("Expression {} seems to be invalid".format(expression))
for var in coefficients:
if not (var.is_Symbol):
if var == one:
constant = var
offset = float(coefficients[var])
elif expanded:
raise ValueError("Expression {} seems to be invalid".format(expression))
else:
coefficients = _parse_linear_expression(expression, expanded=True, **kwargs)
if constant is not None:
del coefficients[constant]
return offset, coefficients
|
https://github.com/biosustain/optlang/blob/13673ac26f6b3ba37a2ef392489722c52e3c5ff1/optlang/expression_parsing.py#L67-L98
|
linear regression
|
python
|
def linear(m=1, b=0):
''' Return a driver function that can advance a sequence of linear values.
.. code-block:: none
value = m * i + b
Args:
m (float) : a slope for the linear driver
x (float) : an offset for the linear driver
'''
def f(i):
return m * i + b
return partial(force, sequence=_advance(f))
|
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/driving.py#L137-L151
|
linear regression
|
python
|
def regress_fit(fqdn, result, *argl, **argd):
"""Analyzes the result of a regression algorithm's fitting. See also
:func:`fit` for explanation of arguments.
"""
if len(argl) > 2:
yP = argl[2]
out = _generic_fit(fqdn, result, regress_predict, yP, *argl, **argd)
return out
|
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/analyze/sklearn.py#L223-L230
|
linear regression
|
python
|
def _compute_non_linear_slope(self, vs30, C):
"""
Compute non-linear slope factor,
equations (13a) to (13d), pag 108-109.
"""
V1 = 180.0
V2 = 300.0
Vref = 760.0
# equation (13d), values are zero for vs30 >= Vref = 760.0
bnl = np.zeros(vs30.shape)
# equation (13a)
idx = vs30 <= V1
bnl[idx] = C['b1']
# equation (13b)
idx = np.where((vs30 > V1) & (vs30 <= V2))
bnl[idx] = (C['b1'] - C['b2']) * \
np.log(vs30[idx] / V2) / np.log(V1 / V2) + C['b2']
# equation (13c)
idx = np.where((vs30 > V2) & (vs30 < Vref))
bnl[idx] = C['b2'] * np.log(vs30[idx] / Vref) / np.log(V2 / Vref)
return bnl
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/boore_atkinson_2008.py#L218-L242
|
linear regression
|
python
|
def linear_response_matrix(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None):
"""
computes the linear response matrix (m x n), with n beeing the data size and m being the coefficients
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:return:
"""
A = []
for i in range(self._num_bands):
if self._compute_bool[i] is True:
A_i = self._imageModel_list[i].linear_response_matrix(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)
if A == []:
A = A_i
else:
A = np.append(A, A_i, axis=1)
return A
|
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/ImSim/MultiBand/multi_exposures.py#L38-L56
|
linear regression
|
python
|
def linear(self, x):
"""Computes logits by running x through a linear layer.
Args:
x: A float32 tensor with shape [batch_size, length, hidden_size]
Returns:
float32 tensor with shape [batch_size, length, vocab_size].
"""
with tf.name_scope("presoftmax_linear"):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[1]
x = tf.reshape(x, [-1, self.hidden_size])
logits = tf.matmul(x, self.shared_weights, transpose_b=True)
return tf.reshape(logits, [batch_size, length, self.vocab_size])
|
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/model/embedding_layer.py#L77-L92
|
linear regression
|
python
|
def make_regression(func, n_samples=100, n_features=1, bias=0.0, noise=0.0,
random_state=None):
"""
Make dataset for a regression problem.
Examples
--------
>>> f = lambda x: 0.5*x + np.sin(2*x)
>>> X, y = make_regression(f, bias=.5, noise=1., random_state=1)
>>> X.shape
(100, 1)
>>> y.shape
(100,)
>>> X[:5].round(2)
array([[ 1.62],
[-0.61],
[-0.53],
[-1.07],
[ 0.87]])
>>> y[:5].round(2)
array([ 0.76, 0.48, -0.23, -0.28, 0.83])
"""
generator = check_random_state(random_state)
X = generator.randn(n_samples, n_features)
# unpack the columns of X
y = func(*X.T) + bias
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
return X, y
|
https://github.com/NICTA/revrand/blob/4c1881b6c1772d2b988518e49dde954f165acfb6/revrand/utils/datasets.py#L23-L54
|
linear regression
|
python
|
def lpc(signal, order, axis=-1):
"""Compute the Linear Prediction Coefficients.
Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will
find the k+1 coefficients of a k order linear filter:
xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1]
Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized.
Parameters
----------
signal: array_like
input signal
order : int
LPC order (the output will have order + 1 items)
Returns
-------
a : array-like
the solution of the inversion.
e : array-like
the prediction error.
k : array-like
reflection coefficients.
Notes
-----
This uses Levinson-Durbin recursion for the autocorrelation matrix
inversion, and fft for the autocorrelation computation.
For small order, particularly if order << signal size, direct computation
of the autocorrelation is faster: use levinson and correlate in this case."""
n = signal.shape[axis]
if order > n:
raise ValueError("Input signal must have length >= order")
r = acorr_lpc(signal, axis)
return levinson_1d(r, order)
|
https://github.com/mmcauliffe/Conch-sounds/blob/e05535fd08e4b0e47e37a77ef521d05eff1d6bc5/conch/analysis/formants/lpc.py#L157-L195
|
linear regression
|
python
|
def agg_linear_trend(x, param):
"""
Calculates a linear least-squares regression for values of the time series that were aggregated over chunks versus
the sequence from 0 up to the number of chunks minus one.
This feature assumes the signal to be uniformly sampled. It will not use the time stamps to fit the model.
The parameters attr controls which of the characteristics are returned. Possible extracted attributes are "pvalue",
"rvalue", "intercept", "slope", "stderr", see the documentation of linregress for more information.
The chunksize is regulated by "chunk_len". It specifies how many time series values are in each chunk.
Further, the aggregation function is controlled by "f_agg", which can use "max", "min" or , "mean", "median"
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"attr": x, "chunk_len": l, "f_agg": f} with x, f an string and l an int
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
# todo: we could use the index of the DataFrame here
calculated_agg = {}
res_data = []
res_index = []
for parameter_combination in param:
chunk_len = parameter_combination["chunk_len"]
f_agg = parameter_combination["f_agg"]
aggregate_result = _aggregate_on_chunks(x, f_agg, chunk_len)
if f_agg not in calculated_agg or chunk_len not in calculated_agg[f_agg]:
if chunk_len >= len(x):
calculated_agg[f_agg] = {chunk_len: np.NaN}
else:
lin_reg_result = linregress(range(len(aggregate_result)), aggregate_result)
calculated_agg[f_agg] = {chunk_len: lin_reg_result}
attr = parameter_combination["attr"]
if chunk_len >= len(x):
res_data.append(np.NaN)
else:
res_data.append(getattr(calculated_agg[f_agg][chunk_len], attr))
res_index.append("f_agg_\"{}\"__chunk_len_{}__attr_\"{}\"".format(f_agg, chunk_len, attr))
return zip(res_index, res_data)
|
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L1805-L1854
|
linear regression
|
python
|
def is_linear(self):
"""
Tests whether all filters in the list are linear. CascadeFilter and
ParallelFilter instances are also linear if all filters they group are
linear.
"""
return all(isinstance(filt, LinearFilter) or
(hasattr(filt, "is_linear") and filt.is_linear())
for filt in self.callables)
|
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_filters.py#L916-L925
|
linear regression
|
python
|
def regression(self, slope=None):
"""regress tip values against branch values
Parameters
----------
slope : None, optional
if given, the slope isn't optimized
Returns
-------
dict
regression parameters
"""
self._calculate_averages()
clock_model = base_regression(self.tree.root.Q, slope)
clock_model['r_val'] = self.explained_variance()
return clock_model
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeregression.py#L292-L310
|
linear regression
|
python
|
def admm_linearized_simple(x, f, g, L, tau, sigma, niter, **kwargs):
"""Non-optimized version of ``admm_linearized``.
This function is intended for debugging. It makes a lot of copies and
performs no error checking.
"""
callback = kwargs.pop('callback', None)
z = L.range.zero()
u = L.range.zero()
for _ in range(niter):
x[:] = f.proximal(tau)(x - tau / sigma * L.adjoint(L(x) + u - z))
z = g.proximal(sigma)(L(x) + u)
u = L(x) + u - z
if callback is not None:
callback(x)
|
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/admm.py#L157-L171
|
linear regression
|
python
|
def lpc(x, N=None):
"""Linear Predictor Coefficients.
:param x:
:param int N: default is length(X) - 1
:Details:
Finds the coefficients :math:`A=(1, a(2), \dots a(N+1))`, of an Nth order
forward linear predictor that predicts the current value value of the
real-valued time series x based on past samples:
.. math:: \hat{x}(n) = -a(2)*x(n-1) - a(3)*x(n-2) - ... - a(N+1)*x(n-N)
such that the sum of the squares of the errors
.. math:: err(n) = X(n) - Xp(n)
is minimized. This function uses the Levinson-Durbin recursion to
solve the normal equations that arise from the least-squares formulation.
.. seealso:: :func:`levinson`, :func:`aryule`, :func:`prony`, :func:`stmcb`
.. todo:: matrix case, references
:Example:
::
from scipy.signal import lfilter
noise = randn(50000,1); % Normalized white Gaussian noise
x = filter([1], [1 1/2 1/3 1/4], noise)
x = x[45904:50000]
x.reshape(4096, 1)
x = x[0]
Compute the predictor coefficients, estimated signal, prediction error, and autocorrelation sequence of the prediction error:
1.00000 + 0.00000i 0.51711 - 0.00000i 0.33908 - 0.00000i 0.24410 - 0.00000i
::
a = lpc(x, 3)
est_x = lfilter([0 -a(2:end)],1,x); % Estimated signal
e = x - est_x; % Prediction error
[acs,lags] = xcorr(e,'coeff'); % ACS of prediction error
"""
m = len(x)
if N is None:
N = m - 1 #default value if N is not provided
elif N > m-1:
#disp('Warning: zero-padding short input sequence')
x.resize(N+1)
#todo: check this zero-padding.
X = fft(x, 2**nextpow2(2.*len(x)-1))
R = real(ifft(abs(X)**2))
R = R/(m-1.) #Biased autocorrelation estimate
a, e, ref = LEVINSON(R, N)
return a, e
|
https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/lpc.py#L10-L72
|
linear regression
|
python
|
def plsr(df, v, n_components=2, mean_center=False, scale=True, **kwargs):
"""
Partial Least Squares Regression Analysis, based on `sklearn.cross_decomposition.PLSRegression`
Performs a partial least squares regression (PLS-R) on the supplied dataframe ``df``
against the provided continuous variable ``v``, selecting the first ``n_components``.
For more information on PLS regression and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.cross_decomposition.PLSRegression.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param v: Continuous variable to perform regression against
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PLS regression
:param kwargs: additional keyword arguments to `sklearn.cross_decomposition.PLSRegression`
:return: scores ``DataFrame`` of PLS-R scores n_components x n_samples
weights ``DataFrame`` of PLS-R weights n_variables x n_components
"""
if not sklearn:
assert('This library depends on scikit-learn (sklearn) to perform PLS-DA')
from sklearn.cross_decomposition import PLSRegression
df = df.copy()
# We have to zero fill, nan errors in PLSRegression
df[ np.isnan(df) ] = 0
if mean_center:
mean = np.mean(df.values, axis=0)
df = df - mean
#TODO: Extract values if v is DataFrame?
plsr = PLSRegression(n_components=n_components, scale=scale, **kwargs)
plsr.fit(df.values.T, v)
scores = pd.DataFrame(plsr.x_scores_.T)
scores.index = ['Latent Variable %d' % (n+1) for n in range(0, scores.shape[0])]
scores.columns = df.columns
weights = pd.DataFrame(plsr.x_weights_)
weights.index = df.index
weights.columns = ['Weights on Latent Variable %d' % (n+1) for n in range(0, weights.shape[1])]
loadings = pd.DataFrame(plsr.x_loadings_)
loadings.index = df.index
loadings.columns = ['Loadings on Latent Variable %d' % (n+1) for n in range(0, loadings.shape[1])]
#r = plsr.score(df.values.T, v)
predicted = plsr.predict(df.values.T)
return scores, weights, loadings, predicted
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/analysis.py#L164-L216
|
linear regression
|
python
|
def piecewise_linear_scalar(params, xval):
'''Piecewise linear function for a scalar variable xval (float).
:param params:
Piecewise linear parameters (numpy.ndarray) in the following form:
[slope_i,... slope_n, turning_point_i, ..., turning_point_n, intercept]
Length params === 2 * number_segments, e.g.
[slope_1, slope_2, slope_3, turning_point1, turning_point_2, intercept]
:param xval:
Value for evaluation of function (float)
:returns:
Piecewise linear function evaluated at point xval (float)
'''
n_params = len(params)
n_seg, remainder = divmod(n_params, 2)
if remainder:
raise ValueError(
'Piecewise Function requires 2 * nsegments parameters')
if n_seg == 1:
return params[1] + params[0] * xval
gradients = params[0:n_seg]
turning_points = params[n_seg: -1]
c_val = np.array([params[-1]])
for iloc in range(1, n_seg):
c_val = np.hstack(
[c_val, (c_val[iloc - 1] + gradients[iloc - 1] *
turning_points[iloc - 1]) - (gradients[iloc] *
turning_points[iloc - 1])])
if xval <= turning_points[0]:
return gradients[0] * xval + c_val[0]
elif xval > turning_points[-1]:
return gradients[-1] * xval + c_val[-1]
else:
select = np.nonzero(turning_points <= xval)[0][-1] + 1
return gradients[select] * xval + c_val[select]
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/utils.py#L292-L330
|
linear regression
|
python
|
def linreg_ols_qr(y, X):
"""Linear Regression, OLS, inverse by QR Factoring"""
import numpy as np
try: # multiply with inverse to compute coefficients
q, r = np.linalg.qr(np.dot(X.T, X))
return np.dot(np.dot(np.linalg.inv(r), q.T), np.dot(X.T, y))
except np.linalg.LinAlgError:
print("LinAlgError: Factoring failed")
return None
|
https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/linreg_ols_qr.py#L2-L10
|
linear regression
|
python
|
def _lmder1_linear_full_rank(n, m, factor, target_fnorm1, target_fnorm2):
"""A full-rank linear function (lmder test #1)"""
def func(params, vec):
s = params.sum()
temp = 2. * s / m + 1
vec[:] = -temp
vec[:params.size] += params
def jac(params, jac):
# jac.shape = (n, m) by LMDER standards
jac.fill(-2. / m)
for i in range(n):
jac[i,i] += 1
guess = np.ones(n) * factor
#_lmder1_test(m, func, jac, guess)
_lmder1_driver(m, func, jac, guess,
target_fnorm1, target_fnorm2,
[-1] * n)
|
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/lmmin.py#L2395-L2415
|
linear regression
|
python
|
def lerp(self, a, t):
""" Lerp. Linear interpolation from self to a"""
return self.plus(a.minus(self).times(t));
|
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/geom.py#L85-L87
|
linear regression
|
python
|
def regression():
"""
Run regression testing - lint and then run all tests.
"""
lint()
doctests()
storybook = _storybook({}).only_uninherited()
storybook.with_params(**{"python version": "2.7.14"}).filter(
lambda story: not story.info.get("fails_on_python_2")
).ordered_by_name().play()
storybook.with_params(**{"python version": "3.7.0"}).ordered_by_name().play()
|
https://github.com/crdoconnor/strictyaml/blob/efdac7f89e81679fc95686288cd32b9563fde609/hitch/key.py#L101-L111
|
linear regression
|
python
|
def linOriginRegression(points):
"""
computes a linear regression starting at zero
"""
j = sum([ i[0] for i in points ])
k = sum([ i[1] for i in points ])
if j != 0:
return k/j, j, k
return 1, j, k
|
https://github.com/benedictpaten/sonLib/blob/1decb75bb439b70721ec776f685ce98e25217d26/misc.py#L28-L36
|
linear regression
|
python
|
def linear(X, n, *args, **kwargs):
"""Linear mean function of arbitrary dimension, suitable for use with :py:class:`MeanFunction`.
The form is :math:`m_0 * X[:, 0] + m_1 * X[:, 1] + \dots + b`.
Parameters
----------
X : array, (`M`, `D`)
The points to evaluate the model at.
n : array of non-negative int, (`D`)
The derivative order to take, specified as an integer order for each
dimension in `X`.
*args : num_dim+1 floats
The slopes for each dimension, plus the constant term. Must be of the
form `m0, m1, ..., b`.
"""
hyper_deriv = kwargs.pop('hyper_deriv', None)
m = scipy.asarray(args[:-1])
b = args[-1]
if sum(n) > 1:
return scipy.zeros(X.shape[0])
elif sum(n) == 0:
if hyper_deriv is not None:
if hyper_deriv < len(m):
return X[:, hyper_deriv]
elif hyper_deriv == len(m):
return scipy.ones(X.shape[0])
else:
raise ValueError("Invalid value for hyper_deriv, " + str(hyper_deriv))
else:
return (m * X).sum(axis=1) + b
else:
# sum(n) == 1:
if hyper_deriv is not None:
if n[hyper_deriv] == 1:
return scipy.ones(X.shape[0])
else:
return scipy.zeros(X.shape[0])
return m[n == 1] * scipy.ones(X.shape[0])
|
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/mean.py#L446-L484
|
linear regression
|
python
|
def logistic_regression(X, y, coef_only=False, alpha=0.05,
as_dataframe=True, remove_na=False, **kwargs):
"""(Multiple) Binary logistic regression.
Parameters
----------
X : np.array or list
Predictor(s). Shape = (n_samples, n_features) or (n_samples,).
y : np.array or list
Dependent variable. Shape = (n_samples).
Must be binary.
coef_only : bool
If True, return only the regression coefficients.
alpha : float
Alpha value used for the confidence intervals.
CI = [alpha / 2 ; 1 - alpha / 2]
as_dataframe : bool
If True, returns a pandas DataFrame. If False, returns a dictionnary.
remove_na : bool
If True, apply a listwise deletion of missing values (i.e. the entire
row is removed).
**kwargs : optional
Optional arguments passed to sklearn.linear_model.LogisticRegression.
Returns
-------
stats : dataframe or dict
Logistic regression summary::
'names' : name of variable(s) in the model (e.g. x1, x2...)
'coef' : regression coefficients
'se' : standard error
'z' : z-scores
'pval' : two-tailed p-values
'CI[2.5%]' : lower confidence interval
'CI[97.5%]' : upper confidence interval
Notes
-----
This is a wrapper around the
:py:class:`sklearn.linear_model.LogisticRegression` class.
Results have been compared against statsmodels and JASP.
Note that the first coefficient is always the constant term (intercept) of
the model.
This function will not run if NaN values are either present in the target
or predictors variables. Please remove them before runing the function.
Adapted from a code found at
https://gist.github.com/rspeare/77061e6e317896be29c6de9a85db301d
Examples
--------
1. Simple binary logistic regression
>>> import numpy as np
>>> from pingouin import logistic_regression
>>> np.random.seed(123)
>>> x = np.random.normal(size=30)
>>> y = np.random.randint(0, 2, size=30)
>>> lom = logistic_regression(x, y)
>>> lom.round(2)
names coef se z pval CI[2.5%] CI[97.5%]
0 Intercept -0.27 0.37 -0.73 0.46 -0.99 0.45
1 x1 0.06 0.32 0.19 0.85 -0.56 0.68
2. Multiple binary logistic regression
>>> np.random.seed(42)
>>> z = np.random.normal(size=30)
>>> X = np.column_stack((x, z))
>>> lom = logistic_regression(X, y)
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
3. Using a Pandas DataFrame
>>> import pandas as pd
>>> df = pd.DataFrame({'x': x, 'y': y, 'z': z})
>>> lom = logistic_regression(df[['x', 'z']], df['y'])
>>> print(lom['coef'].values)
[-0.34933805 -0.0226106 -0.39453532]
4. Return only the coefficients
>>> logistic_regression(X, y, coef_only=True)
array([-0.34933805, -0.0226106 , -0.39453532])
4. Passing custom parameters to sklearn
>>> lom = logistic_regression(X, y, solver='sag', max_iter=10000)
>>> print(lom['coef'].values)
[-0.34941889 -0.02261911 -0.39451064]
"""
# Check that sklearn is installed
from pingouin.utils import _is_sklearn_installed
_is_sklearn_installed(raise_error=True)
from sklearn.linear_model import LogisticRegression
# Extract names if X is a Dataframe or Series
if isinstance(X, pd.DataFrame):
names = X.keys().tolist()
elif isinstance(X, pd.Series):
names = [X.name]
else:
names = []
assert 0 < alpha < 1
assert y.ndim == 1, 'y must be one-dimensional.'
# Convert to numpy array
X = np.asarray(X)
y = np.asarray(y)
# Add axis if only one-dimensional array
if X.ndim == 1:
X = X[..., np.newaxis]
# Check for NaN / Inf
if remove_na:
X, y = rm_na(X, y[..., np.newaxis], paired=True, axis='rows')
y = np.squeeze(y)
y_gd = np.isfinite(y).all()
X_gd = np.isfinite(X).all()
assert y_gd, 'Target variable contains NaN or Inf. Please remove them.'
assert X_gd, 'Predictors contains NaN or Inf. Please remove them.'
# Check that X and y have same length
assert y.shape[0] == X.shape[0], 'X and y must have same number of samples'
# Check that y is binary
if np.unique(y).size != 2:
raise ValueError('Dependent variable must be binary.')
if not names:
names = ['x' + str(i + 1) for i in range(X.shape[1])]
# Add intercept in names
names.insert(0, "Intercept")
# Initialize and fit
if 'solver' not in kwargs:
kwargs['solver'] = 'lbfgs'
if 'multi_class' not in kwargs:
kwargs['multi_class'] = 'auto'
lom = LogisticRegression(**kwargs)
lom.fit(X, y)
coef = np.append(lom.intercept_, lom.coef_)
if coef_only:
return coef
# Design matrix -- add intercept
X_design = np.column_stack((np.ones(X.shape[0]), X))
n, p = X_design.shape
# Fisher Information Matrix
denom = (2 * (1 + np.cosh(lom.decision_function(X))))
denom = np.tile(denom, (p, 1)).T
fim = np.dot((X_design / denom).T, X_design)
crao = np.linalg.inv(fim)
# Standard error and Z-scores
se = np.sqrt(np.diag(crao))
z_scores = coef / se
# Two-tailed p-values
pval = np.array([2 * norm.sf(abs(z)) for z in z_scores])
# Confidence intervals
crit = norm.ppf(1 - alpha / 2)
ll = coef - crit * se
ul = coef + crit * se
# Rename CI
ll_name = 'CI[%.1f%%]' % (100 * alpha / 2)
ul_name = 'CI[%.1f%%]' % (100 * (1 - alpha / 2))
# Create dict
stats = {'names': names, 'coef': coef, 'se': se, 'z': z_scores,
'pval': pval, ll_name: ll, ul_name: ul}
if as_dataframe:
return pd.DataFrame.from_dict(stats)
else:
return stats
|
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/regression.py#L226-L411
|
linear regression
|
python
|
def llinregress(x,y):
"""
Calculates a regression line on x,y pairs.
Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates
Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate
"""
TINY = 1.0e-20
if len(x) != len(y):
raise ValueError('Input values not paired in linregress. Aborting.')
n = len(x)
x = [float(_) for _ in x]
y = [float(_) for _ in y]
xmean = mean(x)
ymean = mean(y)
r_num = float(n*(summult(x,y)) - sum(x)*sum(y))
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t))
slope = r_num / float(n*ss(x) - square_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*samplestdev(y)
return slope, intercept, r, prob, sterrest
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L928-L953
|
linear regression
|
python
|
def get_logistic_regression_coefs_l1(self, category,
clf=LassoCV(alphas=[0.1, 0.001],
max_iter=10000,
n_jobs=-1)):
''' Computes l1-penalized logistic regression score.
Parameters
----------
category : str
category name to score
Returns
-------
(coefficient array, accuracy, majority class baseline accuracy)
'''
try:
from sklearn.cross_validation import cross_val_predict
except:
from sklearn.model_selection import cross_val_predict
y = self._get_mask_from_category(category)
y_continuous = self._get_continuous_version_boolean_y(y)
# X = TfidfTransformer().fit_transform(self._X)
X = self._X
clf.fit(X, y_continuous)
y_hat = (cross_val_predict(clf, X, y_continuous) > 0)
acc, baseline = self._get_accuracy_and_baseline_accuracy(y, y_hat)
clf.fit(X, y_continuous)
return clf.coef_, acc, baseline
|
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/TermDocMatrix.py#L421-L448
|
linear regression
|
python
|
def linear_response_matrix(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None):
"""
computes the linear response matrix (m x n), with n beeing the data size and m being the coefficients
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:return:
"""
A = self._response_matrix(self.ImageNumerics.ra_grid_ray_shooting, self.ImageNumerics.dec_grid_ray_shooting,
kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, self.ImageNumerics.mask)
return A
|
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/ImSim/image_model.py#L163-L175
|
linear regression
|
python
|
def regression(self, y, y_mask, smoothness_penalty=0, kernel='rbf'):
'''Perform vertex-valued regression, given partial labels.
y : (n,d) array of known labels
y_mask : index object such that all_labels[y_mask] == y
From "Regularization and Semi-supervised Learning on Large Graphs"
by Belkin, Matveeva, and Niyogi in 2004.
Doesn't support multiple labels per vertex, unlike the paper's algorithm.
To allow provided y values to change, use a (small) smoothness_penalty.
'''
n = self.num_vertices()
# input validation for y
y = np.array(y, copy=True)
ravel_f = False
if y.ndim == 1:
y = y[:,None]
ravel_f = True
if y.ndim != 2 or y.size == 0:
raise ValueError('Invalid shape of y array: %s' % (y.shape,))
k, d = y.shape
# input validation for y_mask
if not hasattr(y_mask, 'dtype') or y_mask.dtype != 'bool':
tmp = np.zeros(n, dtype=bool)
tmp[y_mask] = True
y_mask = tmp
# mean-center known y for stability
y_mean = y.mean(axis=0)
y -= y_mean
# use the normalized Laplacian for the smoothness matrix
S = self.kernelize(kernel).laplacian(normed=True)
if ss.issparse(S):
S = S.tocsr()
if smoothness_penalty == 0:
# see Algorithm 2: Interpolated Regularization
unlabeled_mask = ~y_mask
S_23 = S[unlabeled_mask, :]
S_3 = S_23[:, unlabeled_mask]
rhs = S_23[:, y_mask].dot(y)
if ss.issparse(S):
f_unlabeled = ss.linalg.spsolve(S_3, rhs)
if f_unlabeled.ndim == 1:
f_unlabeled = f_unlabeled[:,None]
else:
f_unlabeled = sl.solve(S_3, rhs, sym_pos=True, overwrite_a=True,
overwrite_b=True)
f = np.zeros((n, d))
f[y_mask] = y
f[unlabeled_mask] = -f_unlabeled
else:
# see Algorithm 1: Tikhonov Regularization in the paper
y_hat = np.zeros((n, d))
y_hat[y_mask] = y
I = y_mask.astype(float) # only one label per vertex
lhs = k * smoothness_penalty * S
if ss.issparse(lhs):
lhs.setdiag(lhs.diagonal() + I)
f = ss.linalg.lsqr(lhs, y_hat)[0]
else:
lhs.flat[::n+1] += I
f = sl.solve(lhs, y_hat, sym_pos=True, overwrite_a=True,
overwrite_b=True)
# re-add the mean
f += y_mean
if ravel_f:
return f.ravel()
return f
|
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/label.py#L153-L224
|
linear regression
|
python
|
def l2_regression(
input_, target, name=PROVIDED, loss_weight=None,
per_example_weights=None):
"""Applies an L2 Regression (Sum of Squared Error) to the target."""
target = _convert_and_assert_tensors_compatible(input_, target)
return apply_regression(input_,
functions.l2_regression_sq_loss,
target,
[],
name='%s_loss' % name,
loss_weight=loss_weight,
per_example_weights=per_example_weights)
|
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_loss_methods.py#L134-L145
|
linear regression
|
python
|
def _lreg_bokeh(self, **kwargs):
"""
Returns a Bokeh linear regression line
"""
try:
ds2 = self._duplicate_()
ds2.timestamps(ds2.x)
ds2.lreg("Timestamps", ds2.y)
ds2.drop(ds2.y)
ds2.df = ds2.df.rename(columns={'Regression': ds2.y})
if "date_format" in self.chart_style:
ds2.date("Date", format=self.chart_style["date_format"])
c = ds2.line_()
return c
except Exception as e:
self.err(e, "Can not draw linear regression chart")
|
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/charts/bokeh.py#L90-L105
|
linear regression
|
python
|
def explain_prediction_linear_regressor(reg, doc,
vec=None,
top=None,
top_targets=None,
target_names=None,
targets=None,
feature_names=None,
feature_re=None,
feature_filter=None,
vectorized=False):
"""
Explain prediction of a linear regressor.
See :func:`eli5.explain_prediction` for description of
``top``, ``top_targets``, ``target_names``, ``targets``,
``feature_names``, ``feature_re`` and ``feature_filter`` parameters.
``vec`` is a vectorizer instance used to transform
raw features to the input of the classifier ``clf``;
you can pass it instead of ``feature_names``.
``vectorized`` is a flag which tells eli5 if ``doc`` should be
passed through ``vec`` or not. By default it is False, meaning that
if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
regressor ``reg``. Set it to True if you're passing ``vec``,
but ``doc`` is already vectorized.
"""
if isinstance(reg, (SVR, NuSVR)) and reg.kernel != 'linear':
return explain_prediction_sklearn_not_supported(reg, doc)
vec, feature_names = handle_vec(reg, doc, vec, vectorized, feature_names)
X = get_X(doc, vec=vec, vectorized=vectorized, to_dense=True)
score, = reg.predict(X)
if has_intercept(reg):
X = add_intercept(X)
x = get_X0(X)
feature_names, flt_indices = feature_names.handle_filter(
feature_filter, feature_re, x)
res = Explanation(
estimator=repr(reg),
method='linear model',
targets=[],
is_regression=True,
)
assert res.targets is not None
_weights = _linear_weights(reg, x, top, feature_names, flt_indices)
names = get_default_target_names(reg)
display_names = get_target_display_names(names, target_names, targets,
top_targets, score)
if is_multitarget_regressor(reg):
for label_id, label in display_names:
target_expl = TargetExplanation(
target=label,
feature_weights=_weights(label_id),
score=score[label_id],
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
else:
target_expl = TargetExplanation(
target=display_names[0][1],
feature_weights=_weights(0),
score=score,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
return res
|
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/sklearn/explain_prediction.py#L258-L331
|
linear regression
|
python
|
def LR_train(w, b, X, Y, num_iterations, learning_rate, print_cost=True):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w weights, a numpy array of size (dim, 1)
b bias, a scalar
X data of shape (dim, number of examples)
Y true "label" vector, of shape (1, number of examples)
num_iterations number of iterations of the optimization loop
learning_rate learning rate of the gradient descent update rule
print_cost True to print the loss every 100 steps
Returns:
params dictionary containing the weights w and bias b
grads dictionary containing the gradients of the weights and bias with respect to the cost function
costs list of all the costs computed during the optimization, this will be used to plot the learning curve.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation
grads, cost = propagate(w, b, X, Y)
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule
w = w - learning_rate * dw
b = b - learning_rate * db
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training examples
if print_cost and i % 100 == 0:
print("Cost after iteration %i: %f" % (i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
|
https://github.com/zengbin93/zb/blob/ccdb384a0b5801b459933220efcb71972c2b89a7/zb/algorithms/logistic_regression.py#L71-L119
|
linear regression
|
python
|
def to_linear(self, index=None):
"""
Transforms the StepColormap into a LinearColormap.
Parameters
----------
index : list of floats, default None
The values corresponding to each color in the output colormap.
It has to be sorted.
If None, a regular grid between `vmin` and `vmax` is created.
"""
if index is None:
n = len(self.index)-1
index = [self.index[i]*(1.-i/(n-1.))+self.index[i+1]*i/(n-1.) for
i in range(n)]
colors = [self.rgba_floats_tuple(x) for x in index]
return LinearColormap(colors, index=index,
vmin=self.vmin, vmax=self.vmax)
|
https://github.com/python-visualization/branca/blob/4e89e88a5a7ff3586f0852249c2c125f72316da8/branca/colormap.py#L390-L409
|
linear regression
|
python
|
def as_nonlinear(self, params=None):
"""Return a `Model` equivalent to this object. The nonlinear solver is less
efficient, but lets you freeze parameters, compute uncertainties, etc.
If the `params` argument is provided, solve() will be called on the
returned object with those parameters. If it is `None` and this object
has parameters in `self.params`, those will be use. Otherwise, solve()
will not be called on the returned object.
"""
if params is None:
params = self.params
nlm = Model(None, self.data, self.invsigma)
nlm.set_func(lambda p, x: npoly.polyval(x, p),
self.pnames,
args=(self.x,))
if params is not None:
nlm.solve(params)
return nlm
|
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/lsqmdl.py#L444-L464
|
linear regression
|
python
|
def linear(Ks, dim, num_q, rhos, nus):
r'''
Estimates the linear inner product \int p q between two distributions,
based on kNN distances.
'''
return _get_linear(Ks, dim)(num_q, rhos, nus)
|
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L560-L565
|
linear regression
|
python
|
def eval_linear_approx(self, Dxy, gradY):
r"""Compute term :math:`\langle \nabla f(\mathbf{y}),
\mathbf{x} - \mathbf{y} \rangle` (in frequency domain) that is
part of the quadratic function :math:`Q_L` used for
backtracking. Since this class computes the backtracking in
the DFT, it is important to preserve the DFT scaling.
"""
return np.sum(np.real(np.conj(Dxy) * gradY))
|
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/fista/fista.py#L930-L938
|
linear regression
|
python
|
def _linear_interp(curve, test_x, round_result=False):
"""
Take a series of points and interpolate between them at ``test_x``.
Args:
curve (list[tuple]): A list of ``(x, y)`` points sorted in
nondecreasing ``x`` value. If multiple points have the same
``x`` value, all but the last will be ignored.
test_x (float): The ``x`` value to find the ``y`` value of
Returns:
float: The ``y`` value of the curve at ``test_x``
if ``round_result is False``
int: if ``round_result is True`` or the result is a whole number,
the ``y`` value of the curve at ``test_x`` rounded to the
nearest whole number.
Raises:
ProbabilityUndefinedError: if ``test_x`` is out of the
domain of ``curve``
Example:
>>> curve = [(0, 0), (2, 1)]
>>> _linear_interp(curve, 0.5)
0.25
>>> _linear_interp(curve, 0.5, round_result=True)
0
"""
index = 0
for index in range(len(curve) - 1):
# Ignore points which share an x value with the following point
if curve[index][0] == curve[index + 1][0]:
continue
if curve[index][0] <= test_x <= curve[index + 1][0]:
slope = ((curve[index + 1][1] - curve[index][1]) /
(curve[index + 1][0] - curve[index][0]))
y_intercept = curve[index][1] - (slope * curve[index][0])
result = (slope * test_x) + y_intercept
if round_result:
return int(round(result))
else:
if result.is_integer():
return int(result)
else:
return result
else:
raise ProbabilityUndefinedError
|
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/blur/rand.py#L27-L74
|
linear regression
|
python
|
def optimize_linear(grad, eps, ord=np.inf):
"""
Solves for the optimal input to a linear function under a norm constraint.
Optimal_perturbation = argmax_{eta, ||eta||_{ord} < eps} dot(eta, grad)
:param grad: Tensor, shape (N, d_1, ...). Batch of gradients
:param eps: float. Scalar specifying size of constraint region
:param ord: np.inf, 1, or 2. Order of norm constraint.
:returns: Tensor, shape (N, d_1, ...). Optimal perturbation
"""
red_ind = list(range(1, len(grad.size())))
avoid_zero_div = torch.tensor(1e-12, dtype=grad.dtype, device=grad.device)
if ord == np.inf:
# Take sign of gradient
optimal_perturbation = torch.sign(grad)
elif ord == 1:
abs_grad = torch.abs(grad)
sign = torch.sign(grad)
red_ind = list(range(1, len(grad.size())))
abs_grad = torch.abs(grad)
ori_shape = [1]*len(grad.size())
ori_shape[0] = grad.size(0)
max_abs_grad, _ = torch.max(abs_grad.view(grad.size(0), -1), 1)
max_mask = abs_grad.eq(max_abs_grad.view(ori_shape)).to(torch.float)
num_ties = max_mask
for red_scalar in red_ind:
num_ties = torch.sum(num_ties, red_scalar, keepdim=True)
optimal_perturbation = sign * max_mask / num_ties
# TODO integrate below to a test file
# check that the optimal perturbations have been correctly computed
opt_pert_norm = optimal_perturbation.abs().sum(dim=red_ind)
assert torch.all(opt_pert_norm == torch.ones_like(opt_pert_norm))
elif ord == 2:
square = torch.max(
avoid_zero_div,
torch.sum(grad ** 2, red_ind, keepdim=True)
)
optimal_perturbation = grad / torch.sqrt(square)
# TODO integrate below to a test file
# check that the optimal perturbations have been correctly computed
opt_pert_norm = optimal_perturbation.pow(2).sum(dim=red_ind, keepdim=True).sqrt()
one_mask = (square <= avoid_zero_div).to(torch.float) * opt_pert_norm + \
(square > avoid_zero_div).to(torch.float)
assert torch.allclose(opt_pert_norm, one_mask, rtol=1e-05, atol=1e-08)
else:
raise NotImplementedError("Only L-inf, L1 and L2 norms are "
"currently implemented.")
# Scale perturbation to be the solution for the norm=eps rather than
# norm=1 problem
scaled_perturbation = eps * optimal_perturbation
return scaled_perturbation
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_pytorch.py#L159-L213
|
linear regression
|
python
|
def plsr(df, v, n_components=2, mean_center=False, scale=True, fcol=None, ecol=None, marker='o', markersize=40, threshold=None, label_threshold=None, label_weights=None, label_scores=None, return_df=False, show_covariance_ellipse=False, *args, **kwargs):
"""
Partial Least Squares Regression Analysis, based on `sklearn.cross_decomposition.PLSRegression`
Performs a partial least squares regression (PLS-R) on the supplied dataframe ``df``
against the provided continuous variable ``v``, selecting the first ``n_components``.
For more information on PLS regression and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.cross_decomposition.PLSRegression.html>`_.
Resulting scores, weights and regression plots are generated showing the distribution of samples within the resulting
PCA space. Sample color and marker size can be controlled by label, lookup and calculation (lambda) to
generate complex plots highlighting sample separation.
For further information see the examples included in the documentation.
:param df: Pandas `DataFrame`
:param v: Continuous variable to perform regression against
:param n_components: `int` number of Principal components to return
:param mean_center: `bool` mean center the data before performing PCA
:param fcol: `dict` of indexers:colors, where colors are hex colors or matplotlib color names
:param ecol: `dict` of indexers:colors, where colors are hex colors or matplotlib color names
:param marker: `str` matplotlib marker name (default "o")
:param markersize: `int` or `callable` which returns an `int` for a given indexer
:param threshold: `float` weight threshold for plot (horizontal line)
:param label_threshold: `float` weight threshold over which to draw labels
:param label_weights: `list` of `str`
:param label_scores: `list` of `str`
:param return_df: `bool` return the resulting scores, weights as pandas DataFrames
:param show_covariance_ellipse: `bool` show the covariance ellipse around each group
:param args: additional arguments passed to analysis.pca
:param kwargs: additional arguments passed to analysis.pca
:return:
"""
scores, weights, loadings, predicted = analysis.plsr(df, v, n_components=n_components, scale=scale, *args, **kwargs)
scores_ax = _pca_scores(scores, fcol=fcol, ecol=ecol, marker=marker, markersize=markersize, label_scores=label_scores, show_covariance_ellipse=show_covariance_ellipse)
weights_ax = []
for pc in range(0, weights.shape[1]):
weights_ax.append( _pca_weights(weights, pc, threshold=threshold, label_threshold=label_threshold, label_weights=label_weights) )
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1,1,1)
slope, intercept, r, p, se = sp.stats.linregress(v, predicted.flatten())
# Add regression line
xmin, xmax = np.min(v), np.max(v)
ax.plot([xmin, xmax],[xmin*slope+intercept, xmax*slope+intercept], lw=1, c='k')
ax.scatter(v, predicted, s=50, alpha=0.5)
ax.set_xlabel("Actual values")
ax.set_ylabel("Predicted values")
ax.set_aspect(1./ax.get_data_ratio())
ax.text(0.05, 0.95, '$y = %.2f+%.2fx$' % (intercept, slope), horizontalalignment='left', transform=ax.transAxes, color='black', fontsize=14)
ax.text(0.95, 0.15, '$r^2$ = %.2f' % (r**2), horizontalalignment='right', transform=ax.transAxes, color='black', fontsize=14)
ax.text(0.95, 0.10, '$p$ = %.2f' % p, horizontalalignment='right', transform=ax.transAxes, color='black', fontsize=14)
ax.text(0.95, 0.05, '$SE$ = %.2f' % se, horizontalalignment='right', transform=ax.transAxes, color='black', fontsize=14)
if return_df:
return scores, weights, loadings, predicted
else:
return scores_ax, weights_ax, ax
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/visualize.py#L362-L429
|
linear regression
|
python
|
def nonlinear_odr(x, y, dx, dy, func, params_init, **kwargs):
"""Perform a non-linear orthogonal distance regression, return the results as
ErrorValue() instances.
Inputs:
x: one-dimensional numpy array of the independent variable
y: one-dimensional numpy array of the dependent variable
dx: absolute error (square root of the variance) of the independent
variable. Either a one-dimensional numpy array or None. If None,
weighting is disabled. Non-finite (NaN or inf) elements signify
that the corresponding element in x is to be treated as fixed by
ODRPACK.
dy: absolute error (square root of the variance) of the dependent
variable. Either a one-dimensional numpy array or None. If None,
weighting is disabled.
func: a callable with the signature
func(x,par1,par2,par3,...)
params_init: list or tuple of the first estimates of the
parameters par1, par2, par3 etc. to be fitted
other optional keyword arguments will be passed to leastsq().
Outputs: par1, par2, par3, ... , statdict
par1, par2, par3, ...: fitted values of par1, par2, par3 etc
as instances of ErrorValue.
statdict: dictionary of various statistical parameters:
'DoF': Degrees of freedom
'Chi2': Chi-squared
'Chi2_reduced': Reduced Chi-squared
'num_func_eval': number of function evaluations during fit.
'func_value': the function evaluated in the best fitting parameters
'message': status message from leastsq()
'error_flag': integer status flag from leastsq() ('ier')
'Covariance': covariance matrix (variances in the diagonal)
'Correlation_coeffs': Pearson's correlation coefficients (usually
denoted by 'r') in a matrix. The diagonal is unity.
Notes:
for the actual fitting, the module scipy.odr is used.
"""
odrmodel=odr.Model(lambda pars, x: func(x,*pars))
if dx is not None:
# treat non-finite values as fixed
xfixed=np.isfinite(dx)
else:
xfixed=None
odrdata=odr.RealData(x, y, sx=dx,sy=dy, fix=xfixed)
odrodr=odr.ODR(odrdata,odrmodel,params_init,ifixb=[not(isinstance(p,FixedParameter)) for p in params_init],
**kwargs)
odroutput=odrodr.run()
statdict=odroutput.__dict__.copy()
statdict['Covariance']=odroutput.cov_beta
statdict['Correlation_coeffs']=odroutput.cov_beta/np.outer(odroutput.sd_beta,odroutput.sd_beta)
statdict['DoF']=len(x)-len(odroutput.beta)
statdict['Chi2_reduced']=statdict['res_var']
statdict['func_value']=statdict['y']
statdict['Chi2']=statdict['sum_square']
def convert(p_, dp_, pi):
if isinstance(pi, FixedParameter):
return FixedParameter(p_)
else:
return ErrorValue(p_, dp_)
return tuple([convert(p_, dp_, pi) for (p_, dp_, pi) in zip(odroutput.beta, odroutput.sd_beta, params_init)] + [statdict])
|
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/misc/easylsq.py#L116-L179
|
linear regression
|
python
|
def linear_smooth(t, y, dy, span=None, cv=True,
t_out=None, span_out=None, period=None):
"""Perform a linear smooth of the data
Parameters
----------
t, y, dy : array_like
time, value, and error in value of the input data
span : array_like
the integer spans of the data
cv : boolean (default=True)
if True, treat the problem as a cross-validation, i.e. don't use
each point in the evaluation of its own smoothing.
t_out : array_like (optional)
the output times for the moving averages
span_out : array_like (optional)
the spans associated with the output times t_out
period : float
if provided, then consider the inputs periodic with the given period
Returns
-------
y_smooth : array_like
smoothed y values at each time t or t_out
"""
t_input = t
prep = _prep_smooth(t, y, dy, span, t_out, span_out, period)
t, y, dy, span, t_out, span_out, indices = prep
if period:
t_input = np.asarray(t_input) % period
w = 1. / (dy ** 2)
w, yw, tw, tyw, ttw = windowed_sum([w, y * w, w, y * w, w], t=t,
tpowers=[0, 0, 1, 1, 2],
span=span, indices=indices,
subtract_mid=cv, period=period)
denominator = (w * ttw - tw * tw)
slope = (tyw * w - tw * yw)
intercept = (ttw * yw - tyw * tw)
if np.any(denominator == 0):
raise ValueError("Zero denominator in linear smooth. This usually "
"indicates that the input contains duplicate points.")
if t_out is None:
return (slope * t_input + intercept) / denominator
elif span_out is not None:
return (slope * t_out + intercept) / denominator
else:
i = np.minimum(len(t) - 1, np.searchsorted(t, t_out))
return (slope[i] * t_out + intercept[i]) / denominator[i]
|
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/utils.py#L141-L192
|
linear regression
|
python
|
def _compute_non_linear_term(self, pga4nl, bnl):
"""
Compute non-linear term,
equation (8a) to (8c), pag 108.
"""
fnl = np.zeros(pga4nl.shape)
a1 = 0.03
a2 = 0.09
pga_low = 0.06
# equation (8a)
idx = pga4nl <= a1
fnl[idx] = bnl[idx] * np.log(pga_low / 0.1)
# equation (8b)
idx = np.where((pga4nl > a1) & (pga4nl <= a2))
delta_x = np.log(a2 / a1)
delta_y = bnl[idx] * np.log(a2 / pga_low)
c = (3 * delta_y - bnl[idx] * delta_x) / delta_x ** 2
d = -(2 * delta_y - bnl[idx] * delta_x) / delta_x ** 3
fnl[idx] = bnl[idx] * np.log(pga_low / 0.1) +\
c * (np.log(pga4nl[idx] / a1) ** 2) + \
d * (np.log(pga4nl[idx] / a1) ** 3)
# equation (8c)
idx = pga4nl > a2
fnl[idx] = np.squeeze(bnl[idx]) * np.log(pga4nl[idx] / 0.1)
return fnl
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/boore_atkinson_2008.py#L244-L273
|
linear regression
|
python
|
def c3(x, lag):
"""
This function calculates the value of
.. math::
\\frac{1}{n-2lag} \sum_{i=0}^{n-2lag} x_{i + 2 \cdot lag}^2 \cdot x_{i + lag} \cdot x_{i}
which is
.. math::
\\mathbb{E}[L^2(X)^2 \cdot L(X) \cdot X]
where :math:`\\mathbb{E}` is the mean and :math:`L` is the lag operator. It was proposed in [1] as a measure of
non linearity in the time series.
.. rubric:: References
| [1] Schreiber, T. and Schmitz, A. (1997).
| Discrimination power of measures for nonlinearity in a time series
| PHYSICAL REVIEW E, VOLUME 55, NUMBER 5
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param lag: the lag that should be used in the calculation of the feature
:type lag: int
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
n = x.size
if 2 * lag >= n:
return 0
else:
return np.mean((_roll(x, 2 * -lag) * _roll(x, -lag) * x)[0:(n - 2 * lag)])
|
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L1399-L1435
|
linear regression
|
python
|
def linearize(wcsim, wcsima, wcsref, imcrpix, f, shift, hx=1.0, hy=1.0):
""" linearization using 5-point formula for first order derivative
"""
x0 = imcrpix[0]
y0 = imcrpix[1]
p = np.asarray([[x0, y0],
[x0 - hx, y0],
[x0 - hx * 0.5, y0],
[x0 + hx * 0.5, y0],
[x0 + hx, y0],
[x0, y0 - hy],
[x0, y0 - hy * 0.5],
[x0, y0 + hy * 0.5],
[x0, y0 + hy]],
dtype=np.float64)
# convert image coordinates to reference image coordinates:
p = wcsref.wcs_world2pix(wcsim.wcs_pix2world(p, 1), 1).astype(ndfloat128)
# apply linear fit transformation:
p = np.dot(f, (p - shift).T).T
# convert back to image coordinate system:
p = wcsima.wcs_world2pix(
wcsref.wcs_pix2world(p.astype(np.float64), 1), 1).astype(ndfloat128)
# derivative with regard to x:
u1 = ((p[1] - p[4]) + 8 * (p[3] - p[2])) / (6*hx)
# derivative with regard to y:
u2 = ((p[5] - p[8]) + 8 * (p[7] - p[6])) / (6*hy)
return (np.asarray([u1, u2]).T, p[0])
|
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/updatehdr.py#L283-L312
|
linear regression
|
python
|
def apply_regression(input_,
regression_fn,
target,
regression_args=(),
regression_kwargs=None,
name=PROVIDED,
loss_weight=None,
per_example_weights=None):
"""Applies the given regression and adds the loss to the bookkeeper.
This does not change tensor.
Args:
input_: A Tensor or a Pretty Tensor holding the input.
regression_fn: A function that takes (in order) tensor, labels.
target: The targe of the regression.
regression_args: Other arguments for the regression.
regression_kwargs: Keyword args for the regression.
name: The name, also added to regression_kwargs.
loss_weight: A scalar multiplier for the loss.
per_example_weights: A Tensor with a weight per example.
Returns:
The loss tensor's name.
Raises:
ValueError: If the target is not a compatible shape with input_.
"""
if regression_kwargs is None:
regression_kwargs = {}
if name is not None and 'name' not in regression_kwargs:
regression_kwargs['name'] = name
elif name is None:
name = input_.tensor.op.name
tensor = input_.tensor
loss = regression_fn(tensor, target, *regression_args, **regression_kwargs)
if loss_weight is not None:
loss *= loss_weight
if per_example_weights is not None:
per_example_weights = _convert_and_assert_per_example_weights_compatible(
input_,
per_example_weights,
dtype=loss.dtype)
loss *= per_example_weights
# Use mean so that the learning rate is independent of the batch size.
if name is None:
name = loss.op.name
if tensor.get_shape()[0].value is not None:
# Try to use division instead of reduce_mean because reduce_mean doesn't
# work on GPU.
avg_loss = tf.reduce_sum(loss) / tensor.get_shape()[0].value
else:
avg_loss = tf.reduce_mean(loss)
return input_.add_loss(avg_loss, name=name)
|
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_loss_methods.py#L79-L130
|
linear regression
|
python
|
def l1_regression(
input_, target, name=PROVIDED, loss_weight=None,
per_example_weights=None):
"""Applies an L1 Regression (Sum of Absolute Error) to the target."""
target = _convert_and_assert_tensors_compatible(input_, target)
return apply_regression(input_,
functions.l1_regression_loss,
target,
[],
name='%s_loss' % name,
loss_weight=loss_weight,
per_example_weights=per_example_weights)
|
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_loss_methods.py#L149-L160
|
linear regression
|
python
|
def get_linearoperator(shape, A, timer=None):
"""Enhances aslinearoperator if A is None."""
ret = None
import scipy.sparse.linalg as scipylinalg
if isinstance(A, LinearOperator):
ret = A
elif A is None:
ret = IdentityLinearOperator(shape)
elif isinstance(A, numpy.ndarray) or isspmatrix(A):
ret = MatrixLinearOperator(A)
elif isinstance(A, numpy.matrix):
ret = MatrixLinearOperator(numpy.atleast_2d(numpy.asarray(A)))
elif isinstance(A, scipylinalg.LinearOperator):
if not hasattr(A, 'dtype'):
raise ArgumentError('scipy LinearOperator has no dtype.')
ret = LinearOperator(A.shape, dot=A.matvec, dot_adj=A.rmatvec,
dtype=A.dtype)
else:
raise TypeError('type not understood')
# set up timer if requested
if A is not None and not isinstance(A, IdentityLinearOperator) \
and timer is not None:
ret = TimedLinearOperator(ret, timer)
# check shape
if shape != ret.shape:
raise LinearOperatorError('shape mismatch')
return ret
|
https://github.com/andrenarchy/krypy/blob/4883ec9a61d64ea56489e15c35cc40f0633ab2f1/krypy/utils.py#L207-L236
|
linear regression
|
python
|
def cric__lasso():
""" Lasso Regression
"""
model = sklearn.linear_model.LogisticRegression(penalty="l1", C=0.002)
# we want to explain the raw probability outputs of the trees
model.predict = lambda X: model.predict_proba(X)[:,1]
return model
|
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/models.py#L133-L141
|
linear regression
|
python
|
def var_regression_matrix(H, x, model, sigma=1):
"""
Compute the variance of the 'regression error'.
Parameters
----------
H : 2d-array
The regression matrix
x : 2d-array
The coordinates to calculate the regression error variance at.
model : str
A string of tokens that define the regression model (e.g.
'1 x1 x2 x1*x2')
sigma : scalar
An estimate of the variance (default: 1).
Returns
-------
var : scalar
The variance of the regression error, evaluated at ``x``.
"""
x = np.atleast_2d(x)
H = np.atleast_2d(H)
if x.shape[0]==1:
x = x.T
if np.rank(H)<(np.dot(H.T, H)).shape[0]:
raise ValueError("model and DOE don't suit together")
x_mod = build_regression_matrix(x, model)
var = sigma**2*np.dot(np.dot(x_mod.T, np.linalg.inv(np.dot(H.T, H))), x_mod)
return var
|
https://github.com/tisimst/pyDOE/blob/436143702507a5c8ff87b361223eee8171d6a1d7/pyDOE/var_regression_matrix.py#L18-L51
|
linear regression
|
python
|
def analog_linear2_ramp(ramp_data, start_time, end_time, value_final,
time_subarray):
"""Use this when you want a discontinuous jump at the end of the linear ramp."""
value_initial = ramp_data["value"]
value_final2 = ramp_data["value_final"]
interp = (time_subarray - start_time)/(end_time - start_time)
return value_initial*(1.0 - interp) + value_final2*interp
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/ramps.py#L477-L483
|
linear regression
|
python
|
def _log_linear_interpolation(predictions):
"""
Returns averaged and re-normalized log probabilities
"""
log_probs = utils.average_arrays([p.log() for p in predictions])
# pylint: disable=invalid-unary-operand-type
return -log_probs.log_softmax()
|
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/inference.py#L1489-L1495
|
linear regression
|
python
|
def plsda(df, a, b, n_components=2, mean_center=False, scale=True, fcol=None, ecol=None, marker='o', markersize=40, threshold=None, label_threshold=None, label_weights=None, label_scores=None, return_df=False, show_covariance_ellipse=False, *args, **kwargs):
"""
Partial Least Squares Regression Analysis, based on `sklearn.cross_decomposition.PLSRegression`
Performs a partial least squares regression (PLS-R) on the supplied dataframe ``df``
against the provided continuous variable ``v``, selecting the first ``n_components``.
For more information on PLS regression and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.cross_decomposition.PLSRegression.html>`_.
Resulting scores and weights plots are generated showing the distribution of samples within the resulting
PCA space. Sample color and marker size can be controlled by label, lookup and calculation (lambda) to
generate complex plots highlighting sample separation.
For further information see the examples included in the documentation.
:param df: Pandas `DataFrame`
:param a: Column selector for group a
:param b: Column selector for group b
:param n_components: `int` number of Principal components to return
:param mean_center: `bool` mean center the data before performing PCA
:param fcol: `dict` of indexers:colors, where colors are hex colors or matplotlib color names
:param ecol: `dict` of indexers:colors, where colors are hex colors or matplotlib color names
:param marker: `str` matplotlib marker name (default "o")
:param markersize: `int` or `callable` which returns an `int` for a given indexer
:param threshold: `float` weight threshold for plot (horizontal line)
:param label_threshold: `float` weight threshold over which to draw labels
:param label_weights: `list` of `str`
:param label_scores: `list` of `str`
:param return_df: `bool` return the resulting scores, weights as pandas DataFrames
:param show_covariance_ellipse: `bool` show the covariance ellipse around each group
:param args: additional arguments passed to analysis.pca
:param kwargs: additional arguments passed to analysis.pca
:return:
"""
scores, weights, loadings = analysis.plsda(df, a, b, n_components=n_components, scale=scale, *args, **kwargs)
scores_ax = _pca_scores(scores, fcol=fcol, ecol=ecol, marker=marker, markersize=markersize, label_scores=label_scores, show_covariance_ellipse=show_covariance_ellipse)
weights_ax = []
for pc in range(0, weights.shape[1]):
weights_ax.append( _pca_weights(weights, pc, threshold=threshold, label_threshold=label_threshold, label_weights=label_weights) )
if return_df:
return scores, weights
else:
return scores_ax, weights_ax
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/visualize.py#L312-L358
|
linear regression
|
python
|
def add_linear_obj(model):
"""Add a linear version of a minimal medium to the model solver.
Changes the optimization objective to finding the growth medium requiring
the smallest total import flux::
minimize sum |r_i| for r_i in import_reactions
Arguments
---------
model : cobra.Model
The model to modify.
"""
coefs = {}
for rxn in find_boundary_types(model, "exchange"):
export = len(rxn.reactants) == 1
if export:
coefs[rxn.reverse_variable] = 1
else:
coefs[rxn.forward_variable] = 1
model.objective.set_linear_coefficients(coefs)
model.objective.direction = "min"
|
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/medium/minimal_medium.py#L17-L38
|
linear regression
|
python
|
def linear(self, limits=None, k=5):
"""Returns an ndarray of linear breaks."""
start, stop = limits or (self.minval, self.maxval)
return np.linspace(start, stop, k)
|
https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/models.py#L81-L84
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.