Unnamed: 0
int64 0
2.44k
| repo
stringlengths 32
81
| hash
stringlengths 40
40
| diff
stringlengths 113
1.17k
| old_path
stringlengths 5
84
| rewrite
stringlengths 34
79
| initial_state
stringlengths 75
980
| final_state
stringlengths 76
980
|
---|---|---|---|---|---|---|---|
1,800 |
https://:@github.com/Infinidat/infi.pypi_manager.git
|
d7b669eaf58a03e887135683264dd475d67f2e39
|
@@ -115,7 +115,7 @@ def mirror_file(repository_config, filename, package_name, package_version, meta
data.update(metadata)
for key, value in list(data.items()):
- if isinstance(value, str):
+ if not isinstance(value, str):
data[key] = value.encode("utf-8")
repository = repository_config["repository"]
|
src/infi/pypi_manager/mirror/mirror_all.py
|
ReplaceText(target='not ' @(118,11)->(118,11))
|
def mirror_file(repository_config, filename, package_name, package_version, meta
data.update(metadata)
for key, value in list(data.items()):
if isinstance(value, str):
data[key] = value.encode("utf-8")
repository = repository_config["repository"]
|
def mirror_file(repository_config, filename, package_name, package_version, meta
data.update(metadata)
for key, value in list(data.items()):
if not isinstance(value, str):
data[key] = value.encode("utf-8")
repository = repository_config["repository"]
|
1,801 |
https://:@github.com/novopl/fabops.git
|
ea5c44f518554a239afd8650d216ea1f7e23db9e
|
@@ -105,7 +105,7 @@ def lint(exclude, include_untracked, commit_only, pretend):
exclude = list(exclude) # Convert from tuple to easily concatenate.
if commit_only:
- include = ['*' + f for f in git.staged() if f.endswith('.py')]
+ include += ['*' + f for f in git.staged() if f.endswith('.py')]
exclude += git.ignore()
if not include_untracked:
|
src/peltak/commands/lint.py
|
ReplaceText(target='+=' @(108,16)->(108,17))
|
def lint(exclude, include_untracked, commit_only, pretend):
exclude = list(exclude) # Convert from tuple to easily concatenate.
if commit_only:
include = ['*' + f for f in git.staged() if f.endswith('.py')]
exclude += git.ignore()
if not include_untracked:
|
def lint(exclude, include_untracked, commit_only, pretend):
exclude = list(exclude) # Convert from tuple to easily concatenate.
if commit_only:
include += ['*' + f for f in git.staged() if f.endswith('.py')]
exclude += git.ignore()
if not include_untracked:
|
1,802 |
https://:@github.com/novopl/fabops.git
|
94bb4222594c1be8cb1d760d21abccf8393fbe57
|
@@ -95,7 +95,7 @@ def finish():
common.git_branch_delete(branch.name)
common.git_prune()
- common.git_checkout(develop)
+ common.git_checkout(master)
def merged():
|
src/peltak/extra/gitflow/logic/release.py
|
ReplaceText(target='master' @(98,24)->(98,31))
|
def finish():
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(develop)
def merged():
|
def finish():
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(master)
def merged():
|
1,803 |
https://:@github.com/novopl/fabops.git
|
4ace52dda2d85cb734ee74e126530e805777a000
|
@@ -72,7 +72,7 @@ def add_hooks():
# Write pre-push hook
log.info("Adding pre-push hook: <33>{}", push_hook)
- fs.write_file(commit_hook, util.remove_indent('''
+ fs.write_file(push_hook, util.remove_indent('''
#!/bin/bash
PATH="/opt/local/libexec/gnubin:$PATH"
|
src/peltak/logic/git.py
|
ReplaceText(target='push_hook' @(75,18)->(75,29))
|
def add_hooks():
# Write pre-push hook
log.info("Adding pre-push hook: <33>{}", push_hook)
fs.write_file(commit_hook, util.remove_indent('''
#!/bin/bash
PATH="/opt/local/libexec/gnubin:$PATH"
|
def add_hooks():
# Write pre-push hook
log.info("Adding pre-push hook: <33>{}", push_hook)
fs.write_file(push_hook, util.remove_indent('''
#!/bin/bash
PATH="/opt/local/libexec/gnubin:$PATH"
|
1,804 |
https://:@github.com/ome/omero-scripts.git
|
bcf0a97fecbc46da12cd0fc1dc679d6eb1a002d4
|
@@ -186,7 +186,7 @@ def createmovie_figure(conn, pixel_ids, t_indexes, z_start, z_end, width,
plane_def = omero.romio.PlaneDef()
plane_def.z = pro_start
plane_def.t = time
- plane_def = re.renderCompressed(plane_def)
+ rendered_img = re.renderCompressed(plane_def)
# create images and resize, add to list
image = Image.open(io.BytesIO(rendered_img))
resized_image = imgUtil.resizeImage(image, width, height)
|
omero/figure_scripts/Movie_Figure.py
|
ReplaceText(target='rendered_img' @(189,20)->(189,29))
|
def createmovie_figure(conn, pixel_ids, t_indexes, z_start, z_end, width,
plane_def = omero.romio.PlaneDef()
plane_def.z = pro_start
plane_def.t = time
plane_def = re.renderCompressed(plane_def)
# create images and resize, add to list
image = Image.open(io.BytesIO(rendered_img))
resized_image = imgUtil.resizeImage(image, width, height)
|
def createmovie_figure(conn, pixel_ids, t_indexes, z_start, z_end, width,
plane_def = omero.romio.PlaneDef()
plane_def.z = pro_start
plane_def.t = time
rendered_img = re.renderCompressed(plane_def)
# create images and resize, add to list
image = Image.open(io.BytesIO(rendered_img))
resized_image = imgUtil.resizeImage(image, width, height)
|
1,805 |
https://:@github.com/MozillaSecurity/grizzly.git
|
5ff196df7b3e4cb6fafece51f3a92dd6fe240639
|
@@ -71,7 +71,7 @@ def test_adapter_04(tmp_path):
# path to file
file1 = (tmp_path / "test1.txt")
file1.touch()
- found = tuple(SimpleAdapter.scan_path(str(tmp_path)))
+ found = tuple(SimpleAdapter.scan_path(str(file1)))
assert len(found) == 1
assert str(file1) in found
# path to directory
|
grizzly/common/test_adapter.py
|
ReplaceText(target='file1' @(74,46)->(74,54))
|
def test_adapter_04(tmp_path):
# path to file
file1 = (tmp_path / "test1.txt")
file1.touch()
found = tuple(SimpleAdapter.scan_path(str(tmp_path)))
assert len(found) == 1
assert str(file1) in found
# path to directory
|
def test_adapter_04(tmp_path):
# path to file
file1 = (tmp_path / "test1.txt")
file1.touch()
found = tuple(SimpleAdapter.scan_path(str(file1)))
assert len(found) == 1
assert str(file1) in found
# path to directory
|
1,806 |
https://:@github.com/CIMAC-CIDC/schemas.git
|
e7ecb7443f00d666cd8e67b7e643f5837fc2f8f2
|
@@ -743,7 +743,7 @@ def merge_artifact(
_set_data_format(ct, existing_artifact)
# return new object and the artifact that was merged
- return ct, artifact
+ return ct, existing_artifact
class InvalidMergeTargetException(ValueError):
"""Exception raised for target of merge_clinical_trial_metadata being non schema compliant."""
|
cidc_schemas/prism.py
|
ReplaceText(target='existing_artifact' @(746,15)->(746,23))
|
def merge_artifact(
_set_data_format(ct, existing_artifact)
# return new object and the artifact that was merged
return ct, artifact
class InvalidMergeTargetException(ValueError):
"""Exception raised for target of merge_clinical_trial_metadata being non schema compliant."""
|
def merge_artifact(
_set_data_format(ct, existing_artifact)
# return new object and the artifact that was merged
return ct, existing_artifact
class InvalidMergeTargetException(ValueError):
"""Exception raised for target of merge_clinical_trial_metadata being non schema compliant."""
|
1,807 |
https://:@github.com/CIMAC-CIDC/schemas.git
|
c7ddda9fbcc6fed237e11e3024e6ea610b08ecfa
|
@@ -535,7 +535,7 @@ class Template:
raise Exception(e)
changes.extend(chs)
- fs.extend(fs)
+ files.extend(fs)
return changes, files
|
cidc_schemas/template.py
|
ReplaceText(target='files' @(538,12)->(538,14))
|
class Template:
raise Exception(e)
changes.extend(chs)
fs.extend(fs)
return changes, files
|
class Template:
raise Exception(e)
changes.extend(chs)
files.extend(fs)
return changes, files
|
1,808 |
https://:@github.com/ggstuart/pyEMIS.git
|
05b6ac16ca87ee3a9e5696ecc27b9bb4024cc7f1
|
@@ -29,7 +29,7 @@ class SimpleProfile(AnalysisBase):
result = {}
pred = self.baseline_model.prediction(this_week)
for p in percentiles:
- result[p] = pred + self.baseline_model.percentile_in_place(p, this_week)
+ result[p] = pred + self.baseline_model.percentile_in_place(this_week, p)
return result
|
lib/analysis/profile.py
|
ArgSwap(idxs=0<->1 @(32,31)->(32,70))
|
class SimpleProfile(AnalysisBase):
result = {}
pred = self.baseline_model.prediction(this_week)
for p in percentiles:
result[p] = pred + self.baseline_model.percentile_in_place(p, this_week)
return result
|
class SimpleProfile(AnalysisBase):
result = {}
pred = self.baseline_model.prediction(this_week)
for p in percentiles:
result[p] = pred + self.baseline_model.percentile_in_place(this_week, p)
return result
|
1,809 |
https://:@github.com/scipion-em/scipion-em.git
|
aab3b1e42dbced18cebf4b6e8fd15265288693d0
|
@@ -122,7 +122,7 @@ class ProtFrealignClassify(ProtFrealignBase, ProtClassify3D):
for ref in range(1, self.numberOfRef + 1):
refVol = self._getFileName('ref_vol_class', iter=iter, ref=ref) # reference volume of the step.
iterVol = self._getFileName('iter_vol_class', iter=iter, ref=ref) # refined volumes of the step
- if iter != 1:
+ if iter == 1:
copyFile(volFn, iterVol) #Copy the initial volume in the current directory.
else:
self._splitParFile(iter, ref, cpusRef[ref-1])
|
pwem/packages/brandeis/protocol_ml_classification.py
|
ReplaceText(target='==' @(125,20)->(125,22))
|
class ProtFrealignClassify(ProtFrealignBase, ProtClassify3D):
for ref in range(1, self.numberOfRef + 1):
refVol = self._getFileName('ref_vol_class', iter=iter, ref=ref) # reference volume of the step.
iterVol = self._getFileName('iter_vol_class', iter=iter, ref=ref) # refined volumes of the step
if iter != 1:
copyFile(volFn, iterVol) #Copy the initial volume in the current directory.
else:
self._splitParFile(iter, ref, cpusRef[ref-1])
|
class ProtFrealignClassify(ProtFrealignBase, ProtClassify3D):
for ref in range(1, self.numberOfRef + 1):
refVol = self._getFileName('ref_vol_class', iter=iter, ref=ref) # reference volume of the step.
iterVol = self._getFileName('iter_vol_class', iter=iter, ref=ref) # refined volumes of the step
if iter == 1:
copyFile(volFn, iterVol) #Copy the initial volume in the current directory.
else:
self._splitParFile(iter, ref, cpusRef[ref-1])
|
1,810 |
https://:@github.com/scipion-em/scipion-em.git
|
79434578b0e8d5d29e900b60aa9fadddcc75d1bf
|
@@ -177,7 +177,7 @@ class ProtPrime(em.ProtInitialVolume):
vol.append(aux)
self._defineOutputs(outputVolumes=vol)
- self._defineSourceRelation(vol, self.inputClasses)
+ self._defineSourceRelation(self.inputClasses, vol)
#--------------------------- INFO functions --------------------------------------------
def _summary(self):
|
pwem/packages/simple/protocol_prime.py
|
ArgSwap(idxs=0<->1 @(180,8)->(180,34))
|
class ProtPrime(em.ProtInitialVolume):
vol.append(aux)
self._defineOutputs(outputVolumes=vol)
self._defineSourceRelation(vol, self.inputClasses)
#--------------------------- INFO functions --------------------------------------------
def _summary(self):
|
class ProtPrime(em.ProtInitialVolume):
vol.append(aux)
self._defineOutputs(outputVolumes=vol)
self._defineSourceRelation(self.inputClasses, vol)
#--------------------------- INFO functions --------------------------------------------
def _summary(self):
|
1,811 |
https://:@github.com/scipion-em/scipion-em.git
|
14b6a308b1710c111cdc515b1d4367b0d6289c77
|
@@ -121,7 +121,7 @@ class ProtSummovie(ProtProcessMovies):
# special case is mrc but ends in mrcs
inMovieName= os.path.join(movieFolder,movieName)
if movieName.endswith('.mrc'):
- movieNameAux = inMovieName
+ movieNameAux = movieName
elif movieName.endswith('.mrcs'):
movieNameAux= pwutils.replaceExt(inMovieName, "mrc")
createLink(inMovieName,movieNameAux)
|
pwem/packages/grigoriefflab/protocol_summovie.py
|
ReplaceText(target='movieName' @(124,27)->(124,38))
|
class ProtSummovie(ProtProcessMovies):
# special case is mrc but ends in mrcs
inMovieName= os.path.join(movieFolder,movieName)
if movieName.endswith('.mrc'):
movieNameAux = inMovieName
elif movieName.endswith('.mrcs'):
movieNameAux= pwutils.replaceExt(inMovieName, "mrc")
createLink(inMovieName,movieNameAux)
|
class ProtSummovie(ProtProcessMovies):
# special case is mrc but ends in mrcs
inMovieName= os.path.join(movieFolder,movieName)
if movieName.endswith('.mrc'):
movieNameAux = movieName
elif movieName.endswith('.mrcs'):
movieNameAux= pwutils.replaceExt(inMovieName, "mrc")
createLink(inMovieName,movieNameAux)
|
1,812 |
https://:@github.com/scipion-em/scipion-em.git
|
9c9bbe991ab901a16c11eece48cb54468b1b663a
|
@@ -90,7 +90,7 @@ class ChimeraViewerBase(Viewer):
if _showVol.hasOrigin():
x, y, z = _showVol.getOrigin().getShifts()
else:
- x, y, z = outputVol.getOrigin(force=True).getShifts()
+ x, y, z = _showVol.getOrigin(force=True).getShifts()
f.write("volume #1 style surface voxelSize %f origin "
"%0.2f,%0.2f,%0.2f\n"
|
pwem/packages/chimera/viewer.py
|
ReplaceText(target='_showVol' @(93,26)->(93,35))
|
class ChimeraViewerBase(Viewer):
if _showVol.hasOrigin():
x, y, z = _showVol.getOrigin().getShifts()
else:
x, y, z = outputVol.getOrigin(force=True).getShifts()
f.write("volume #1 style surface voxelSize %f origin "
"%0.2f,%0.2f,%0.2f\n"
|
class ChimeraViewerBase(Viewer):
if _showVol.hasOrigin():
x, y, z = _showVol.getOrigin().getShifts()
else:
x, y, z = _showVol.getOrigin(force=True).getShifts()
f.write("volume #1 style surface voxelSize %f origin "
"%0.2f,%0.2f,%0.2f\n"
|
1,813 |
https://:@github.com/scipion-em/scipion-em.git
|
2d9ba7dfcd59de3b77e19cb23d5fa2ad34c80d55
|
@@ -110,7 +110,7 @@ class scipionMMCIFIO(MMCIFIO):
hetfield, resseq, icode = residue.get_id()
if hetfield == " ":
residue_type = "ATOM"
- label_seq_id = str(residue_number)
+ label_seq_id = str(resseq)
residue_number += 1
else:
residue_type = "HETATM"
|
pwem/convert/atom_struct.py
|
ReplaceText(target='resseq' @(113,47)->(113,61))
|
class scipionMMCIFIO(MMCIFIO):
hetfield, resseq, icode = residue.get_id()
if hetfield == " ":
residue_type = "ATOM"
label_seq_id = str(residue_number)
residue_number += 1
else:
residue_type = "HETATM"
|
class scipionMMCIFIO(MMCIFIO):
hetfield, resseq, icode = residue.get_id()
if hetfield == " ":
residue_type = "ATOM"
label_seq_id = str(resseq)
residue_number += 1
else:
residue_type = "HETATM"
|
1,814 |
https://:@github.com/scipion-em/scipion-em.git
|
18a1a11d22aa9a2e8e046e685264fb8d53a96af5
|
@@ -419,7 +419,7 @@ class ImageHandler(object):
def createEmptyImage(cls, fnOut, xDim=1, yDim=1, zDim=1, nDim=1,
dataType=None):
dt = dataType or cls.DT_FLOAT
- xmippLib.createEmptyFile(fnOut, xDim, yDim, zDim, nDim, dataType)
+ xmippLib.createEmptyFile(fnOut, xDim, yDim, zDim, nDim, dt)
@classmethod
def isImageFile(cls, imgFn):
|
pwem/convert/image_handler.py
|
ReplaceText(target='dt' @(422,64)->(422,72))
|
class ImageHandler(object):
def createEmptyImage(cls, fnOut, xDim=1, yDim=1, zDim=1, nDim=1,
dataType=None):
dt = dataType or cls.DT_FLOAT
xmippLib.createEmptyFile(fnOut, xDim, yDim, zDim, nDim, dataType)
@classmethod
def isImageFile(cls, imgFn):
|
class ImageHandler(object):
def createEmptyImage(cls, fnOut, xDim=1, yDim=1, zDim=1, nDim=1,
dataType=None):
dt = dataType or cls.DT_FLOAT
xmippLib.createEmptyFile(fnOut, xDim, yDim, zDim, nDim, dt)
@classmethod
def isImageFile(cls, imgFn):
|
1,815 |
https://:@github.com/gedaskir/qmeq.git
|
347cea42998433ead153e7701cf69caa5086ee13
|
@@ -364,8 +364,8 @@ class LeadsTunneling(object):
self.Tba0 = construct_Tba(tleadsp, self.stateind, self.mtype, self.Tba0)
if updateq:
for j0 in tleadsp:
- try: self.tleads[j0] += tleadsp[j0] # if tleads[j0] != 0:
- except: self.tleads.update({j0:tleads[j0]}) # if tleads[j0] != 0:
+ try: self.tleads[j0] += tleadsp[j0] # if tleads[j0] != 0:
+ except: self.tleads.update({j0:tleadsp[j0]}) # if tleads[j0] != 0:
def change(self, tleads=None, mulst=None, tlst=None, dlst=None, updateq=True):
"""
|
qmeq/leadstun.py
|
ReplaceText(target='tleadsp' @(368,51)->(368,57))
|
class LeadsTunneling(object):
self.Tba0 = construct_Tba(tleadsp, self.stateind, self.mtype, self.Tba0)
if updateq:
for j0 in tleadsp:
try: self.tleads[j0] += tleadsp[j0] # if tleads[j0] != 0:
except: self.tleads.update({j0:tleads[j0]}) # if tleads[j0] != 0:
def change(self, tleads=None, mulst=None, tlst=None, dlst=None, updateq=True):
"""
|
class LeadsTunneling(object):
self.Tba0 = construct_Tba(tleadsp, self.stateind, self.mtype, self.Tba0)
if updateq:
for j0 in tleadsp:
try: self.tleads[j0] += tleadsp[j0] # if tleads[j0] != 0:
except: self.tleads.update({j0:tleadsp[j0]}) # if tleads[j0] != 0:
def change(self, tleads=None, mulst=None, tlst=None, dlst=None, updateq=True):
"""
|
1,816 |
https://:@github.com/caleb-easterly/metaquant.git
|
745e72d4ef68c944db5395e09bd85070b8e2652a
|
@@ -17,7 +17,7 @@ def common_hierarchical_analysis(db, df, annot_colname, samp_grps, min_peptides,
# filter
int_all_ranks_filt = stats.filter_min_observed(intensity_all_ranks, threshold, samp_grps)
- int_all_ranks_filt['id'] = intensity_all_ranks.index
+ int_all_ranks_filt['id'] = int_all_ranks_filt.index
# calculate means
int_w_means = stats.calc_means(int_all_ranks_filt, samp_grps)
|
metaquant/analysis/common.py
|
ReplaceText(target='int_all_ranks_filt' @(20,31)->(20,50))
|
def common_hierarchical_analysis(db, df, annot_colname, samp_grps, min_peptides,
# filter
int_all_ranks_filt = stats.filter_min_observed(intensity_all_ranks, threshold, samp_grps)
int_all_ranks_filt['id'] = intensity_all_ranks.index
# calculate means
int_w_means = stats.calc_means(int_all_ranks_filt, samp_grps)
|
def common_hierarchical_analysis(db, df, annot_colname, samp_grps, min_peptides,
# filter
int_all_ranks_filt = stats.filter_min_observed(intensity_all_ranks, threshold, samp_grps)
int_all_ranks_filt['id'] = int_all_ranks_filt.index
# calculate means
int_w_means = stats.calc_means(int_all_ranks_filt, samp_grps)
|
1,817 |
https://:@github.com/matthiask/django-keyed-urls.git
|
85701797d220d26d175a834250969d86aa8a08c6
|
@@ -55,7 +55,7 @@ def get_url(key, language=None, fail_silently=False):
url = None
if url is None and not fail_silently:
- raise KeyDoesNotExist('No match found for key "%s".' % url)
+ raise KeyDoesNotExist('No match found for key "%s".' % key)
return None if url == _none_type else url
|
keyed_urls/__init__.py
|
ReplaceText(target='key' @(58,63)->(58,66))
|
def get_url(key, language=None, fail_silently=False):
url = None
if url is None and not fail_silently:
raise KeyDoesNotExist('No match found for key "%s".' % url)
return None if url == _none_type else url
|
def get_url(key, language=None, fail_silently=False):
url = None
if url is None and not fail_silently:
raise KeyDoesNotExist('No match found for key "%s".' % key)
return None if url == _none_type else url
|
1,818 |
https://:@github.com/sommalia/moco-wrapper.git
|
8463f534ca51588a6619f61f59bb5d0d064511ab
|
@@ -42,7 +42,7 @@ class Unit(MWRAPBase):
if value is not None:
params[key] = value
- if sort_order is not None:
+ if sort_by is not None:
params["sort_by"] = "{} {}".format(sort_by, sort_order)
return self._moco.get(API_PATH["unit_getlist"], params=params)
|
moco_wrapper/models/unit.py
|
ReplaceText(target='sort_by' @(45,11)->(45,21))
|
class Unit(MWRAPBase):
if value is not None:
params[key] = value
if sort_order is not None:
params["sort_by"] = "{} {}".format(sort_by, sort_order)
return self._moco.get(API_PATH["unit_getlist"], params=params)
|
class Unit(MWRAPBase):
if value is not None:
params[key] = value
if sort_by is not None:
params["sort_by"] = "{} {}".format(sort_by, sort_order)
return self._moco.get(API_PATH["unit_getlist"], params=params)
|
1,819 |
https://:@github.com/scupid-admin/morph-python-sdk.git
|
0739734905cb5e49f54807792dc9426884be6eae
|
@@ -11,7 +11,7 @@ def read_and_set_attribute(event, context):
"9711xxx400": True,
"8130xxx599": True
}
- phone_number = context["userVariables"]["_PHONE_NUMBER"]
+ phone_number = event["userVariables"]["_PHONE_NUMBER"]
if phone_number is None:
phone_number = ""
else:
|
morph/examples.py
|
ReplaceText(target='event' @(14,19)->(14,26))
|
def read_and_set_attribute(event, context):
"9711xxx400": True,
"8130xxx599": True
}
phone_number = context["userVariables"]["_PHONE_NUMBER"]
if phone_number is None:
phone_number = ""
else:
|
def read_and_set_attribute(event, context):
"9711xxx400": True,
"8130xxx599": True
}
phone_number = event["userVariables"]["_PHONE_NUMBER"]
if phone_number is None:
phone_number = ""
else:
|
1,820 |
https://:@github.com/Scorpi000/QuantStudio.git
|
c158be9b67b0058be35cc3b7e8b1c04e3c06d252
|
@@ -315,7 +315,7 @@ class FactorTurnover(BaseModule):
HTML += "</ul>"
else:
HTML = ""
- iHTML += self._Output["统计数据"].to_html(formatters=[_QS_formatPandasPercentage]*5)
+ iHTML = self._Output["统计数据"].to_html(formatters=[_QS_formatPandasPercentage]*5)
Pos = iHTML.find(">")
HTML += iHTML[:Pos]+' align="center"'+iHTML[Pos:]
Fig = self.genMatplotlibFig()
|
QuantStudio/BackTest/SectionFactor/Correlation.py
|
ReplaceText(target='=' @(318,14)->(318,16))
|
class FactorTurnover(BaseModule):
HTML += "</ul>"
else:
HTML = ""
iHTML += self._Output["统计数据"].to_html(formatters=[_QS_formatPandasPercentage]*5)
Pos = iHTML.find(">")
HTML += iHTML[:Pos]+' align="center"'+iHTML[Pos:]
Fig = self.genMatplotlibFig()
|
class FactorTurnover(BaseModule):
HTML += "</ul>"
else:
HTML = ""
iHTML = self._Output["统计数据"].to_html(formatters=[_QS_formatPandasPercentage]*5)
Pos = iHTML.find(">")
HTML += iHTML[:Pos]+' align="center"'+iHTML[Pos:]
Fig = self.genMatplotlibFig()
|
1,821 |
https://:@github.com/Scorpi000/QuantStudio.git
|
2d2788d5d5192b8b90868c30e247ef6533fb1164
|
@@ -667,7 +667,7 @@ class SQLDB(QSSQLObject, WritableFactorDB):
if (DataLenMax!=DataLenMin).sum()>0:
self._QS_Logger.warning("'%s' 在写入因子 '%s' 时出现因子值长度不一致的情况, 将填充缺失!" % (self.Name, str(data.columns.tolist())))
for i in range(data.shape[0]):
- iDataLen = DataLen.iloc[i]
+ iDataLen = DataLenMax.iloc[i]
if iDataLen>0:
iData = data.iloc[i].apply(lambda x: [None]*(iDataLen-len(x))+x if isinstance(x, list) else [x]*iDataLen).tolist()
NewData.extend(zip(*iData))
|
QuantStudio/FactorDataBase/SQLDB.py
|
ReplaceText(target='DataLenMax' @(670,23)->(670,30))
|
class SQLDB(QSSQLObject, WritableFactorDB):
if (DataLenMax!=DataLenMin).sum()>0:
self._QS_Logger.warning("'%s' 在写入因子 '%s' 时出现因子值长度不一致的情况, 将填充缺失!" % (self.Name, str(data.columns.tolist())))
for i in range(data.shape[0]):
iDataLen = DataLen.iloc[i]
if iDataLen>0:
iData = data.iloc[i].apply(lambda x: [None]*(iDataLen-len(x))+x if isinstance(x, list) else [x]*iDataLen).tolist()
NewData.extend(zip(*iData))
|
class SQLDB(QSSQLObject, WritableFactorDB):
if (DataLenMax!=DataLenMin).sum()>0:
self._QS_Logger.warning("'%s' 在写入因子 '%s' 时出现因子值长度不一致的情况, 将填充缺失!" % (self.Name, str(data.columns.tolist())))
for i in range(data.shape[0]):
iDataLen = DataLenMax.iloc[i]
if iDataLen>0:
iData = data.iloc[i].apply(lambda x: [None]*(iDataLen-len(x))+x if isinstance(x, list) else [x]*iDataLen).tolist()
NewData.extend(zip(*iData))
|
1,822 |
https://:@github.com/servir-mekong/hydra-floods.git
|
f7de9b4516deb7ac5055624fb9012596ac25374c
|
@@ -129,7 +129,7 @@ def globalOtsu(collection,target_date,region,
imageEdge = target.mask(edges)
histogram_image = target.mask(edgeBuffer)
- histogram = histogram_image.reduceRegion(ee.Reducer.histogram(255, 2)\
+ histogram = target.reduceRegion(ee.Reducer.histogram(255, 2)\
.combine('mean', None, True)\
.combine('variance', None,True),sampleRegion,reductionScale,bestEffort=True)
|
hydrafloods/geeutils.py
|
ReplaceText(target='target' @(132,17)->(132,32))
|
def globalOtsu(collection,target_date,region,
imageEdge = target.mask(edges)
histogram_image = target.mask(edgeBuffer)
histogram = histogram_image.reduceRegion(ee.Reducer.histogram(255, 2)\
.combine('mean', None, True)\
.combine('variance', None,True),sampleRegion,reductionScale,bestEffort=True)
|
def globalOtsu(collection,target_date,region,
imageEdge = target.mask(edges)
histogram_image = target.mask(edgeBuffer)
histogram = target.reduceRegion(ee.Reducer.histogram(255, 2)\
.combine('mean', None, True)\
.combine('variance', None,True),sampleRegion,reductionScale,bestEffort=True)
|
1,823 |
https://:@github.com/pytorchbearer/visual.git
|
c2d6f39bebb1754a92d3499ff0a5eca64b206ca4
|
@@ -265,7 +265,7 @@ class CPPNImage(Image):
x_coord_range = torch.linspace(-r, r, steps=self.width)
y_coord_range = torch.linspace(-r, r, steps=self.height)
- x, y = torch.meshgrid(x_coord_range, y_coord_range)
+ x, y = torch.meshgrid(y_coord_range, x_coord_range)
self.loc = nn.Parameter(torch.stack((x, y), dim=0).unsqueeze(0), requires_grad=False)
|
visual/images.py
|
ArgSwap(idxs=0<->1 @(268,15)->(268,29))
|
class CPPNImage(Image):
x_coord_range = torch.linspace(-r, r, steps=self.width)
y_coord_range = torch.linspace(-r, r, steps=self.height)
x, y = torch.meshgrid(x_coord_range, y_coord_range)
self.loc = nn.Parameter(torch.stack((x, y), dim=0).unsqueeze(0), requires_grad=False)
|
class CPPNImage(Image):
x_coord_range = torch.linspace(-r, r, steps=self.width)
y_coord_range = torch.linspace(-r, r, steps=self.height)
x, y = torch.meshgrid(y_coord_range, x_coord_range)
self.loc = nn.Parameter(torch.stack((x, y), dim=0).unsqueeze(0), requires_grad=False)
|
1,824 |
https://:@github.com/ocsmit/raster-indices-calc.git
|
d8eb85db236c048cccf1506180e37d1bbd2734ff
|
@@ -473,7 +473,7 @@ def NDBaI(landsat_dir, ndbai_out):
swir1_band = SWIR1_path.GetRasterBand(1).ReadAsArray().astype(np.float32)
TIR_path = gdal.Open(os.path.join(landsat_dir, tir[0]))
tir_band = TIR_path.GetRasterBand(1).ReadAsArray().astype(np.float32)
- snap = gdal.Open(os.path.join(landsat_dir, swir1[0]))
+ snap = gdal.Open(os.path.join(landsat_dir, tir[0]))
# Perform Calculation
ndbai = ((swir1_band - tir_band) / (swir1_band + tir_band))
|
rindcalc/index_utils.py
|
ReplaceText(target='tir' @(476,47)->(476,52))
|
def NDBaI(landsat_dir, ndbai_out):
swir1_band = SWIR1_path.GetRasterBand(1).ReadAsArray().astype(np.float32)
TIR_path = gdal.Open(os.path.join(landsat_dir, tir[0]))
tir_band = TIR_path.GetRasterBand(1).ReadAsArray().astype(np.float32)
snap = gdal.Open(os.path.join(landsat_dir, swir1[0]))
# Perform Calculation
ndbai = ((swir1_band - tir_band) / (swir1_band + tir_band))
|
def NDBaI(landsat_dir, ndbai_out):
swir1_band = SWIR1_path.GetRasterBand(1).ReadAsArray().astype(np.float32)
TIR_path = gdal.Open(os.path.join(landsat_dir, tir[0]))
tir_band = TIR_path.GetRasterBand(1).ReadAsArray().astype(np.float32)
snap = gdal.Open(os.path.join(landsat_dir, tir[0]))
# Perform Calculation
ndbai = ((swir1_band - tir_band) / (swir1_band + tir_band))
|
1,825 |
https://:@github.com/spacegraphcats/spacegraphcats.git
|
99052c4fbbebc852ccc6e6eca6f3732b054ed274
|
@@ -75,7 +75,7 @@ def load_and_compute_augg(project):
changed = True
d = 1
print("Augmenting", end=" ", flush=True)
- while changed and d <= project.radius:
+ while changed and d < project.radius:
if d in augs:
print("({})".format(d), end=" ", flush=True)
with open(augname.format(d), 'r') as f:
|
build-catlas.py
|
ReplaceText(target='<' @(78,24)->(78,26))
|
def load_and_compute_augg(project):
changed = True
d = 1
print("Augmenting", end=" ", flush=True)
while changed and d <= project.radius:
if d in augs:
print("({})".format(d), end=" ", flush=True)
with open(augname.format(d), 'r') as f:
|
def load_and_compute_augg(project):
changed = True
d = 1
print("Augmenting", end=" ", flush=True)
while changed and d < project.radius:
if d in augs:
print("({})".format(d), end=" ", flush=True)
with open(augname.format(d), 'r') as f:
|
1,826 |
https://:@github.com/spacegraphcats/spacegraphcats.git
|
5267323b50570258de758732d04239fef6cc0a93
|
@@ -55,7 +55,7 @@ def main():
query_mh = query_sig.minhash
query_mh = query_mh.downsample_max_hash(frontier_mh)
- frontier_mh = query_mh.downsample_max_hash(query_mh)
+ frontier_mh = frontier_mh.downsample_max_hash(query_mh)
containment = query_mh.contained_by(frontier_mh)
similarity = query_mh.similarity(frontier_mh)
|
search/frontier_search_batch.py
|
ReplaceText(target='frontier_mh' @(58,22)->(58,30))
|
def main():
query_mh = query_sig.minhash
query_mh = query_mh.downsample_max_hash(frontier_mh)
frontier_mh = query_mh.downsample_max_hash(query_mh)
containment = query_mh.contained_by(frontier_mh)
similarity = query_mh.similarity(frontier_mh)
|
def main():
query_mh = query_sig.minhash
query_mh = query_mh.downsample_max_hash(frontier_mh)
frontier_mh = frontier_mh.downsample_max_hash(query_mh)
containment = query_mh.contained_by(frontier_mh)
similarity = query_mh.similarity(frontier_mh)
|
1,827 |
https://:@github.com/spacegraphcats/spacegraphcats.git
|
df62df921fd822029c1ae7a139f0d75c8b576295
|
@@ -120,7 +120,7 @@ def main(args=sys.argv[1:]):
if 2**ratio < 10:
new_node_set.add(node_id)
- if mh_size > 1:
+ if mh > 1:
n_merged += 1
merge_mh.merge(mh)
|
search/extract_nodes_by_shadow_ratio.py
|
ReplaceText(target='mh' @(123,15)->(123,22))
|
def main(args=sys.argv[1:]):
if 2**ratio < 10:
new_node_set.add(node_id)
if mh_size > 1:
n_merged += 1
merge_mh.merge(mh)
|
def main(args=sys.argv[1:]):
if 2**ratio < 10:
new_node_set.add(node_id)
if mh > 1:
n_merged += 1
merge_mh.merge(mh)
|
1,828 |
https://:@github.com/spacegraphcats/spacegraphcats.git
|
73bf57a0d3b45aa1c5f524e361676a1297e85db0
|
@@ -90,7 +90,7 @@ def main(args=sys.argv[1:]):
if 1:
terminal = set()
for subnode in dag[top_node_id]:
- mh = load_minhash(node_id, minhash_db)
+ mh = load_minhash(subnode, minhash_db)
if mh:
terminal.update(find_terminal_nodes(subnode, args.maxsize))
|
search/extract_nodes_by_shadow_ratio.py
|
ReplaceText(target='subnode' @(93,30)->(93,37))
|
def main(args=sys.argv[1:]):
if 1:
terminal = set()
for subnode in dag[top_node_id]:
mh = load_minhash(node_id, minhash_db)
if mh:
terminal.update(find_terminal_nodes(subnode, args.maxsize))
|
def main(args=sys.argv[1:]):
if 1:
terminal = set()
for subnode in dag[top_node_id]:
mh = load_minhash(subnode, minhash_db)
if mh:
terminal.update(find_terminal_nodes(subnode, args.maxsize))
|
1,829 |
https://:@github.com/podhmo/cssdiff.git
|
fbe359fe1837b98694e9c83a96af2ed7cde53083
|
@@ -108,7 +108,7 @@ def difference(s1, s2, op="+", iterate=lambda s: sorted(s.items())):
if another_value is None:
d[style].append(add(name, value))
elif value != another_value:
- d[style].append(change(name, value, another_value))
+ d[style].append(change(name, another_value, value))
return d
|
cssdiff/__init__.py
|
ArgSwap(idxs=1<->2 @(111,32)->(111,38))
|
def difference(s1, s2, op="+", iterate=lambda s: sorted(s.items())):
if another_value is None:
d[style].append(add(name, value))
elif value != another_value:
d[style].append(change(name, value, another_value))
return d
|
def difference(s1, s2, op="+", iterate=lambda s: sorted(s.items())):
if another_value is None:
d[style].append(add(name, value))
elif value != another_value:
d[style].append(change(name, another_value, value))
return d
|
1,830 |
https://:@github.com/bodylabs/capysule.git
|
d28605af4e7b127dd3e823252bb817666e41fe00
|
@@ -10,4 +10,4 @@ class Collection(WrenCollection):
if response.status_code == requests.codes.not_found and response.json() == {'message': 'Could not find resource'}:
raise NotFound(response.text)
else:
- super(self, Collection).handle_error(response)
+ super(Collection, self).handle_error(response)
|
capysule/collection.py
|
ArgSwap(idxs=0<->1 @(13,12)->(13,17))
|
class Collection(WrenCollection):
if response.status_code == requests.codes.not_found and response.json() == {'message': 'Could not find resource'}:
raise NotFound(response.text)
else:
super(self, Collection).handle_error(response)
|
class Collection(WrenCollection):
if response.status_code == requests.codes.not_found and response.json() == {'message': 'Could not find resource'}:
raise NotFound(response.text)
else:
super(Collection, self).handle_error(response)
|
1,831 |
https://:@github.com/kaaengine/kaa.git
|
172bc5493ac61b92b4acf49ff6e124f10b6e9c2c
|
@@ -79,7 +79,7 @@ class DemoScene(Scene):
z_index=10,
))
- self.spawn_timer = Timer(20, self._spawn_heartbeat,
+ self.spawn_timer = Timer(self._spawn_heartbeat, 20,
single_shot=False)
self.spawn_timer.start()
|
demos/physics3/main.py
|
ArgSwap(idxs=0<->1 @(82,27)->(82,32))
|
class DemoScene(Scene):
z_index=10,
))
self.spawn_timer = Timer(20, self._spawn_heartbeat,
single_shot=False)
self.spawn_timer.start()
|
class DemoScene(Scene):
z_index=10,
))
self.spawn_timer = Timer(self._spawn_heartbeat, 20,
single_shot=False)
self.spawn_timer.start()
|
1,832 |
https://:@github.com/oladotunr/cosine.git
|
2497192592fd0709608c0b6b16dc342b7f645cdc
|
@@ -98,7 +98,7 @@ class CosineAlgo(object):
instrument = CosineInstrument.load(self.instr_cache, **instr_defs[instr])
self._cxt.instruments[instrument.name] = instrument
order_worker = CosineOrderWorker(self._cfg.orders.ActiveDepth, instrument, venue, logger=self.logger)
- self._cxt.orders[k][instr.symbol] = order_worker
+ self._cxt.orders[k][instrument.symbol] = order_worker
venue_instruments += 1
if venue_instruments == 0:
raise LookupError("No instruments loaded for any of the provided venues")
|
cosine/core/algo.py
|
ReplaceText(target='instrument' @(101,36)->(101,41))
|
class CosineAlgo(object):
instrument = CosineInstrument.load(self.instr_cache, **instr_defs[instr])
self._cxt.instruments[instrument.name] = instrument
order_worker = CosineOrderWorker(self._cfg.orders.ActiveDepth, instrument, venue, logger=self.logger)
self._cxt.orders[k][instr.symbol] = order_worker
venue_instruments += 1
if venue_instruments == 0:
raise LookupError("No instruments loaded for any of the provided venues")
|
class CosineAlgo(object):
instrument = CosineInstrument.load(self.instr_cache, **instr_defs[instr])
self._cxt.instruments[instrument.name] = instrument
order_worker = CosineOrderWorker(self._cfg.orders.ActiveDepth, instrument, venue, logger=self.logger)
self._cxt.orders[k][instrument.symbol] = order_worker
venue_instruments += 1
if venue_instruments == 0:
raise LookupError("No instruments loaded for any of the provided venues")
|
1,833 |
https://:@github.com/hcji/CTSgetPy.git
|
93ac966118febd2bd7120f1149eb1f8572939afa
|
@@ -52,7 +52,7 @@ def CTSget(source, targets, identifiers, top_only=True, timeout=60, server="http
result = {}
if type(targets) is str:
result[targets] = CTS_translate_multi(source, targets, identifiers, top_only, timeout, server)
- elif type(identifiers) is list:
+ elif type(targets) is list:
for i in range(len(targets)):
target = targets[i]
print ('translating from ' + source + ' to ' + target)
|
CTSgetPy/CTSgetPy.py
|
ReplaceText(target='targets' @(55,14)->(55,25))
|
def CTSget(source, targets, identifiers, top_only=True, timeout=60, server="http
result = {}
if type(targets) is str:
result[targets] = CTS_translate_multi(source, targets, identifiers, top_only, timeout, server)
elif type(identifiers) is list:
for i in range(len(targets)):
target = targets[i]
print ('translating from ' + source + ' to ' + target)
|
def CTSget(source, targets, identifiers, top_only=True, timeout=60, server="http
result = {}
if type(targets) is str:
result[targets] = CTS_translate_multi(source, targets, identifiers, top_only, timeout, server)
elif type(targets) is list:
for i in range(len(targets)):
target = targets[i]
print ('translating from ' + source + ' to ' + target)
|
1,834 |
https://:@github.com/phylliade/vinci.git
|
8b1940c5613e124a55d11ba257a8800bc12d65a5
|
@@ -31,7 +31,7 @@ class SARSAAgent(Agent):
self.model = model
self.nb_actions = nb_actions
self.policy = policy
- self.test_policy = policy
+ self.test_policy = test_policy
self.gamma = gamma
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
|
rl/agents/sarsa.py
|
ReplaceText(target='test_policy' @(34,27)->(34,33))
|
class SARSAAgent(Agent):
self.model = model
self.nb_actions = nb_actions
self.policy = policy
self.test_policy = policy
self.gamma = gamma
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
|
class SARSAAgent(Agent):
self.model = model
self.nb_actions = nb_actions
self.policy = policy
self.test_policy = test_policy
self.gamma = gamma
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
|
1,835 |
https://:@github.com/phelimb/atlas.git
|
dfacc38b17582837e32e711ecc23926476c19b66
|
@@ -297,7 +297,7 @@ class Genotyper(object):
for probe_name, probe_coverages in self.variant_covgs.items():
probe_id = self._name_to_id(probe_name)
variant = None
- call = gt.type(probe_coverages, variant=variant)
+ call = gt.type(probe_coverages, variant=probe_name)
genotypes.append(sum(call["genotype"]))
filters.append(int(call["info"]["filter"] == "PASS"))
if sum(call["genotype"]) > 0 or not call[
|
mykatlas/typing/typer/genotyper.py
|
ReplaceText(target='probe_name' @(300,52)->(300,59))
|
class Genotyper(object):
for probe_name, probe_coverages in self.variant_covgs.items():
probe_id = self._name_to_id(probe_name)
variant = None
call = gt.type(probe_coverages, variant=variant)
genotypes.append(sum(call["genotype"]))
filters.append(int(call["info"]["filter"] == "PASS"))
if sum(call["genotype"]) > 0 or not call[
|
class Genotyper(object):
for probe_name, probe_coverages in self.variant_covgs.items():
probe_id = self._name_to_id(probe_name)
variant = None
call = gt.type(probe_coverages, variant=probe_name)
genotypes.append(sum(call["genotype"]))
filters.append(int(call["info"]["filter"] == "PASS"))
if sum(call["genotype"]) > 0 or not call[
|
1,836 |
https://:@github.com/munisisazade/startmicro.git
|
4c74f41a31a6bef9aaa09586628e8427a7d68851
|
@@ -54,7 +54,7 @@ class Command(object):
self.write_file(self.folder_name, "docker-compose.yml", docker_compose)
self.write_file(self.folder_name, "Dockerfile", Dockerfile)
self.write_file(self.folder_name, "README.md", readme)
- if answers.get("type") == "Restful" and not answers:
+ if answers.get("type") == "Restful" or not answers:
self.write_file(self.api_path, "producer.py", producer_restful)
self.write_file(self.folder_name, "run.py", run_restful)
elif answers.get("type") == "Redis pubsub":
|
startmicro/core/base.py
|
ReplaceText(target='or' @(57,44)->(57,47))
|
class Command(object):
self.write_file(self.folder_name, "docker-compose.yml", docker_compose)
self.write_file(self.folder_name, "Dockerfile", Dockerfile)
self.write_file(self.folder_name, "README.md", readme)
if answers.get("type") == "Restful" and not answers:
self.write_file(self.api_path, "producer.py", producer_restful)
self.write_file(self.folder_name, "run.py", run_restful)
elif answers.get("type") == "Redis pubsub":
|
class Command(object):
self.write_file(self.folder_name, "docker-compose.yml", docker_compose)
self.write_file(self.folder_name, "Dockerfile", Dockerfile)
self.write_file(self.folder_name, "README.md", readme)
if answers.get("type") == "Restful" or not answers:
self.write_file(self.api_path, "producer.py", producer_restful)
self.write_file(self.folder_name, "run.py", run_restful)
elif answers.get("type") == "Redis pubsub":
|
1,837 |
https://:@github.com/EdMan1022/PySpot.git
|
c5624d03bc367d65859ec67e2bee62464e212996
|
@@ -17,4 +17,4 @@ class Auth(object):
:return: (Bool) True if expired, False if not
"""
- return self.expires_at > datetime.datetime.now()
+ return self.expires_at < datetime.datetime.now()
|
pyspot/auth.py
|
ReplaceText(target='<' @(20,31)->(20,32))
|
class Auth(object):
:return: (Bool) True if expired, False if not
"""
return self.expires_at > datetime.datetime.now()
|
class Auth(object):
:return: (Bool) True if expired, False if not
"""
return self.expires_at < datetime.datetime.now()
|
1,838 |
https://:@github.com/jolyonb/olxcleaner.git
|
775eed61ebf7b79e304f9012c5e9264bb7ec505f
|
@@ -121,7 +121,7 @@ def traverse_course(edxobj, node, filename, errorstore, pointer=False):
try:
new_node = etree.parse(new_file).getroot()
except XMLSyntaxError as e:
- errorstore.add_error(InvalidXML(filename, e.args[0]))
+ errorstore.add_error(InvalidXML(new_file, e.args[0]))
return
else:
traverse_course(edxobj, new_node, new_file, errorstore, pointer=True)
|
edx-xml-clean/loader/xml.py
|
ReplaceText(target='new_file' @(124,44)->(124,52))
|
def traverse_course(edxobj, node, filename, errorstore, pointer=False):
try:
new_node = etree.parse(new_file).getroot()
except XMLSyntaxError as e:
errorstore.add_error(InvalidXML(filename, e.args[0]))
return
else:
traverse_course(edxobj, new_node, new_file, errorstore, pointer=True)
|
def traverse_course(edxobj, node, filename, errorstore, pointer=False):
try:
new_node = etree.parse(new_file).getroot()
except XMLSyntaxError as e:
errorstore.add_error(InvalidXML(new_file, e.args[0]))
return
else:
traverse_course(edxobj, new_node, new_file, errorstore, pointer=True)
|
1,839 |
https://:@github.com/socek/confsave.git
|
6cb0b0b47e72f019cd92a4eddf8b2794c01b1e6e
|
@@ -46,7 +46,7 @@ class Commands(object):
self._init_repo()
for filename in glob(self.app.get_home_path() + '/.*'):
endpoint = Endpoint(self.app, filename)
- if not endpoint.is_visible():
+ if endpoint.is_visible():
print(endpoint.path)
def ignore(self, filename):
|
confsave/commands.py
|
ReplaceText(target='' @(49,15)->(49,19))
|
class Commands(object):
self._init_repo()
for filename in glob(self.app.get_home_path() + '/.*'):
endpoint = Endpoint(self.app, filename)
if not endpoint.is_visible():
print(endpoint.path)
def ignore(self, filename):
|
class Commands(object):
self._init_repo()
for filename in glob(self.app.get_home_path() + '/.*'):
endpoint = Endpoint(self.app, filename)
if endpoint.is_visible():
print(endpoint.path)
def ignore(self, filename):
|
1,840 |
https://:@github.com/asulibraries/django-asutheme.git
|
ab1982643a4145db1954d38d0a7088b8478bdbc6
|
@@ -3,6 +3,6 @@ from django.conf import settings
def container_style(request):
classname = 'container'
- if getattr('ASU_THEME_FLUID', settings, False):
+ if getattr(settings, 'ASU_THEME_FLUID', False):
classname += '-fluid'
return {'asutheme_container_class': classname}
|
asutheme/context_processors.py
|
ArgSwap(idxs=0<->1 @(6,7)->(6,14))
|
from django.conf import settings
def container_style(request):
classname = 'container'
if getattr('ASU_THEME_FLUID', settings, False):
classname += '-fluid'
return {'asutheme_container_class': classname}
|
from django.conf import settings
def container_style(request):
classname = 'container'
if getattr(settings, 'ASU_THEME_FLUID', False):
classname += '-fluid'
return {'asutheme_container_class': classname}
|
1,841 |
https://:@github.com/neuropoly/bids_neuropoly.git
|
aa37aa05e2ebff34631a0ec101749587d1d4b372
|
@@ -73,7 +73,7 @@ def convert_dcm2nii(path_data, subject, path_out='./'):
# Build output file name
fname_out = os.path.join(subject, contrast_dict[contrast][1],
subject + '_' + contrast_dict[contrast][0] + '.'
- + nii_file.split(os.extsep, 1)[1])
+ + nii_file_all_ext.split(os.extsep, 1)[1])
os.makedirs(os.path.abspath(os.path.dirname(fname_out)), exist_ok=True)
# Move
shutil.move(nii_file_all_ext, fname_out)
|
scripts/convert_dcm2nii.py
|
ReplaceText(target='nii_file_all_ext' @(76,47)->(76,55))
|
def convert_dcm2nii(path_data, subject, path_out='./'):
# Build output file name
fname_out = os.path.join(subject, contrast_dict[contrast][1],
subject + '_' + contrast_dict[contrast][0] + '.'
+ nii_file.split(os.extsep, 1)[1])
os.makedirs(os.path.abspath(os.path.dirname(fname_out)), exist_ok=True)
# Move
shutil.move(nii_file_all_ext, fname_out)
|
def convert_dcm2nii(path_data, subject, path_out='./'):
# Build output file name
fname_out = os.path.join(subject, contrast_dict[contrast][1],
subject + '_' + contrast_dict[contrast][0] + '.'
+ nii_file_all_ext.split(os.extsep, 1)[1])
os.makedirs(os.path.abspath(os.path.dirname(fname_out)), exist_ok=True)
# Move
shutil.move(nii_file_all_ext, fname_out)
|
1,842 |
https://:@github.com/cea-ufmg/sym2num.git
|
49bd276097ec6efaf8b5e33541f75f5cdae58d25
|
@@ -152,7 +152,7 @@ def isstatic(arguments):
if len(arguments) == 0:
return True
elif not isinstance(arguments[0], var.SymbolObject):
- return False
+ return True
else:
return 'cls' != arguments[0].name != 'self'
|
sym2num/function.py
|
ReplaceText(target='True' @(155,15)->(155,20))
|
def isstatic(arguments):
if len(arguments) == 0:
return True
elif not isinstance(arguments[0], var.SymbolObject):
return False
else:
return 'cls' != arguments[0].name != 'self'
|
def isstatic(arguments):
if len(arguments) == 0:
return True
elif not isinstance(arguments[0], var.SymbolObject):
return True
else:
return 'cls' != arguments[0].name != 'self'
|
1,843 |
https://:@github.com/python-odin/baldr.git
|
fbce55b525f2fa4b171dc1b688bea4ca42b8a099
|
@@ -212,7 +212,7 @@ class ResourceApiCommon(object):
except Exception as e:
# Special case when a request raises a 500 error. If we are in debug mode and a default is used (ie
# request does not explicitly specify a content type) fall back to the Django default exception page.
- if settings.DEBUG and getattr(response_codec, 'is_default', False):
+ if settings.DEBUG and getattr(response_type, 'is_default', False):
raise
# Catch any other exceptions and pass them to the 500 handler for evaluation.
resource = self.handle_500(request, e)
|
baldr/api.py
|
ReplaceText(target='response_type' @(215,46)->(215,60))
|
class ResourceApiCommon(object):
except Exception as e:
# Special case when a request raises a 500 error. If we are in debug mode and a default is used (ie
# request does not explicitly specify a content type) fall back to the Django default exception page.
if settings.DEBUG and getattr(response_codec, 'is_default', False):
raise
# Catch any other exceptions and pass them to the 500 handler for evaluation.
resource = self.handle_500(request, e)
|
class ResourceApiCommon(object):
except Exception as e:
# Special case when a request raises a 500 error. If we are in debug mode and a default is used (ie
# request does not explicitly specify a content type) fall back to the Django default exception page.
if settings.DEBUG and getattr(response_type, 'is_default', False):
raise
# Catch any other exceptions and pass them to the 500 handler for evaluation.
resource = self.handle_500(request, e)
|
1,844 |
https://:@github.com/CallmeNezha/xmldiffs.git
|
51dab2722f963206b71bde197c283efb63079b49
|
@@ -130,7 +130,7 @@ else:
def write_sorted_file(fpath, outdir=None, cfg=None):
if outdir is not None:
fbasename = os.path.splitext(os.path.basename(fpath))[0]
- sorted_fpath = os.path.join(outdir, "{}.cmp.xml".format(fpath))
+ sorted_fpath = os.path.join(outdir, "{}.cmp.xml".format(fbasename))
tmp = unicode_writer(open(sorted_fpath, 'w'))
else:
tmp = unicode_writer(NamedTemporaryFile('w'))
|
xmldiffs/command_line.py
|
ReplaceText(target='fbasename' @(133,64)->(133,69))
|
else:
def write_sorted_file(fpath, outdir=None, cfg=None):
if outdir is not None:
fbasename = os.path.splitext(os.path.basename(fpath))[0]
sorted_fpath = os.path.join(outdir, "{}.cmp.xml".format(fpath))
tmp = unicode_writer(open(sorted_fpath, 'w'))
else:
tmp = unicode_writer(NamedTemporaryFile('w'))
|
else:
def write_sorted_file(fpath, outdir=None, cfg=None):
if outdir is not None:
fbasename = os.path.splitext(os.path.basename(fpath))[0]
sorted_fpath = os.path.join(outdir, "{}.cmp.xml".format(fbasename))
tmp = unicode_writer(open(sorted_fpath, 'w'))
else:
tmp = unicode_writer(NamedTemporaryFile('w'))
|
1,845 |
https://:@github.com/ahmed-shariff/ml-pipeline.git
|
2aa119fe99d2e96857754b56b601a82463f13c9c
|
@@ -418,7 +418,7 @@ class Metric():
return 0
def get_tracking_delta(self):
- if len(self.track_value_list) > self.track_average_epoc_count:
+ if len(self.track_value_list) == self.track_average_epoc_count:
return sum(
[self.track_value_list[idx + 1] -
self.track_value_list[idx]
|
mlpipeline/utils/_utils.py
|
ReplaceText(target='==' @(421,38)->(421,39))
|
class Metric():
return 0
def get_tracking_delta(self):
if len(self.track_value_list) > self.track_average_epoc_count:
return sum(
[self.track_value_list[idx + 1] -
self.track_value_list[idx]
|
class Metric():
return 0
def get_tracking_delta(self):
if len(self.track_value_list) == self.track_average_epoc_count:
return sum(
[self.track_value_list[idx + 1] -
self.track_value_list[idx]
|
1,846 |
https://:@github.com/rshk/jobcontrol.git
|
41187069493848c41741b1217b32aeb21e442c43
|
@@ -112,7 +112,7 @@ class MemoryJobControl(JobControlBase):
if jrdef['job_id'] == job_id)
for jrid, jrdef in sorted(list(runs)):
- yield jrdef
+ yield jrid
# ------------------------------------------------------------
# Logging
|
jobcontrol/ext/memory.py
|
ReplaceText(target='jrid' @(115,18)->(115,23))
|
class MemoryJobControl(JobControlBase):
if jrdef['job_id'] == job_id)
for jrid, jrdef in sorted(list(runs)):
yield jrdef
# ------------------------------------------------------------
# Logging
|
class MemoryJobControl(JobControlBase):
if jrdef['job_id'] == job_id)
for jrid, jrdef in sorted(list(runs)):
yield jrid
# ------------------------------------------------------------
# Logging
|
1,847 |
https://:@github.com/spatialucr/geosnap.git
|
73d2b50077271c9f3c530a52ef97890200c29751
|
@@ -171,7 +171,7 @@ def harmonize(
profiles.append(profile)
if len(intensive_variables) > 0:
- profile = pd.DataFrame(interpolation[1], columns=extensive_variables)
+ profile = pd.DataFrame(interpolation[1], columns=intensive_variables)
profiles.append(profile)
profile = pd.concat(profiles, sort=True)
|
geosnap/harmonize/harmonize.py
|
ReplaceText(target='intensive_variables' @(174,61)->(174,80))
|
def harmonize(
profiles.append(profile)
if len(intensive_variables) > 0:
profile = pd.DataFrame(interpolation[1], columns=extensive_variables)
profiles.append(profile)
profile = pd.concat(profiles, sort=True)
|
def harmonize(
profiles.append(profile)
if len(intensive_variables) > 0:
profile = pd.DataFrame(interpolation[1], columns=intensive_variables)
profiles.append(profile)
profile = pd.concat(profiles, sort=True)
|
1,848 |
https://:@github.com/stefanseefeld/faber.git
|
3cb344b107e599396c70e3e72488abbeff4af738
|
@@ -416,7 +416,7 @@ def extend (name, values):
if __implicit_features.has_key(v):
raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v]))
- __implicit_features[v] = name
+ __implicit_features[v] = feature
if len (feature.values()) == 0 and len (values) > 0:
# This is the first value specified for this feature,
|
src/build/feature.py
|
ReplaceText(target='feature' @(419,37)->(419,41))
|
def extend (name, values):
if __implicit_features.has_key(v):
raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v]))
__implicit_features[v] = name
if len (feature.values()) == 0 and len (values) > 0:
# This is the first value specified for this feature,
|
def extend (name, values):
if __implicit_features.has_key(v):
raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v]))
__implicit_features[v] = feature
if len (feature.values()) == 0 and len (values) > 0:
# This is the first value specified for this feature,
|
1,849 |
https://:@github.com/stefanseefeld/faber.git
|
475c635167c32ee61b5951e2c1ba6a5613723437
|
@@ -160,7 +160,7 @@ def refine (properties, requirements):
# Record them so that we can handle 'properties'.
for r in requirements:
# Don't consider conditional requirements.
- if r.condition():
+ if not r.condition():
required[r.feature()] = r
for p in properties:
|
src/build/property.py
|
ReplaceText(target='not ' @(163,11)->(163,11))
|
def refine (properties, requirements):
# Record them so that we can handle 'properties'.
for r in requirements:
# Don't consider conditional requirements.
if r.condition():
required[r.feature()] = r
for p in properties:
|
def refine (properties, requirements):
# Record them so that we can handle 'properties'.
for r in requirements:
# Don't consider conditional requirements.
if not r.condition():
required[r.feature()] = r
for p in properties:
|
1,850 |
https://:@github.com/stefanseefeld/faber.git
|
632ab9c8665f96399b75d4c36b7e50db9cd05812
|
@@ -362,7 +362,7 @@ def __add_flag (rule_or_module, variable_name, condition, values):
assert m
module = m.group(1)
- __module_flags.setdefault(m, []).append(f)
+ __module_flags.setdefault(module, []).append(f)
__flags.setdefault(rule_or_module, []).append(f)
__requirements = []
|
src/build/toolset.py
|
ReplaceText(target='module' @(365,30)->(365,31))
|
def __add_flag (rule_or_module, variable_name, condition, values):
assert m
module = m.group(1)
__module_flags.setdefault(m, []).append(f)
__flags.setdefault(rule_or_module, []).append(f)
__requirements = []
|
def __add_flag (rule_or_module, variable_name, condition, values):
assert m
module = m.group(1)
__module_flags.setdefault(module, []).append(f)
__flags.setdefault(rule_or_module, []).append(f)
__requirements = []
|
1,851 |
https://:@github.com/ivirshup/ConsistentClusters.git
|
c2ad610430164fe7a56ce5d3f1f8357e1dd336bc
|
@@ -518,7 +518,7 @@ def _call_get_edges(args):
def _get_edges(clustering1: np.array, clustering2: np.array):
edges = []
offset1 = clustering1.min()
- offset2 = clustering1.min()
+ offset2 = clustering2.min()
# Because of how I've done unique node names, potentially this
# could be done in a more generic way by creating a mapping here.
offset_clusts1 = clustering1 - offset1
|
constclust/aggregate.py
|
ReplaceText(target='clustering2' @(521,14)->(521,25))
|
def _call_get_edges(args):
def _get_edges(clustering1: np.array, clustering2: np.array):
edges = []
offset1 = clustering1.min()
offset2 = clustering1.min()
# Because of how I've done unique node names, potentially this
# could be done in a more generic way by creating a mapping here.
offset_clusts1 = clustering1 - offset1
|
def _call_get_edges(args):
def _get_edges(clustering1: np.array, clustering2: np.array):
edges = []
offset1 = clustering1.min()
offset2 = clustering2.min()
# Because of how I've done unique node names, potentially this
# could be done in a more generic way by creating a mapping here.
offset_clusts1 = clustering1 - offset1
|
1,852 |
https://:@github.com/nowells/python-wellrested.git
|
8d016faf90f3a0d833cf9b1c52aaa66ee7de9514
|
@@ -35,7 +35,7 @@ class RestClient(object):
request_body = self._serialize(data)
response_headers, response_content = self._connection.request(resource, method, args=args, body=request_body, headers=headers, content_type=self.content_type)
if response_headers.get('status') == HTTP_STATUS_OK:
- data = self._deserialize(response_content)
+ response_data = self._deserialize(response_content)
return Response(response_headers, response_content, response_data)
def _serialize(self, data):
|
wellrested/connections/__init__.py
|
ReplaceText(target='response_data' @(38,12)->(38,16))
|
class RestClient(object):
request_body = self._serialize(data)
response_headers, response_content = self._connection.request(resource, method, args=args, body=request_body, headers=headers, content_type=self.content_type)
if response_headers.get('status') == HTTP_STATUS_OK:
data = self._deserialize(response_content)
return Response(response_headers, response_content, response_data)
def _serialize(self, data):
|
class RestClient(object):
request_body = self._serialize(data)
response_headers, response_content = self._connection.request(resource, method, args=args, body=request_body, headers=headers, content_type=self.content_type)
if response_headers.get('status') == HTTP_STATUS_OK:
response_data = self._deserialize(response_content)
return Response(response_headers, response_content, response_data)
def _serialize(self, data):
|
1,853 |
https://:@github.com/packagecontrol/st_package_reviewer.git
|
85b67bc0d381d382b2805e6464ab80eb31e2d484
|
@@ -45,7 +45,7 @@ def main():
help="URL to the repository or path to the package to be checked.")
parser.add_argument("--repo-only", action='store_true',
help="Do not check the package itself and only its repository.")
- parser.add_argument("--verbose", "-v", action='store_true',
+ parser.add_argument("-v", "--verbose", action='store_true',
help="Increase verbosity.")
parser.add_argument("--debug", action='store_true',
help="Enter pdb on excpetions. Implies --verbose.")
|
package_reviewer/__main__.py
|
ArgSwap(idxs=0<->1 @(48,4)->(48,23))
|
def main():
help="URL to the repository or path to the package to be checked.")
parser.add_argument("--repo-only", action='store_true',
help="Do not check the package itself and only its repository.")
parser.add_argument("--verbose", "-v", action='store_true',
help="Increase verbosity.")
parser.add_argument("--debug", action='store_true',
help="Enter pdb on excpetions. Implies --verbose.")
|
def main():
help="URL to the repository or path to the package to be checked.")
parser.add_argument("--repo-only", action='store_true',
help="Do not check the package itself and only its repository.")
parser.add_argument("-v", "--verbose", action='store_true',
help="Increase verbosity.")
parser.add_argument("--debug", action='store_true',
help="Enter pdb on excpetions. Implies --verbose.")
|
1,854 |
https://:@github.com/emilbjorklund/django-template-shortcodes.git
|
b2788b9d7fd1211de9666fd5c977274f8f343e30
|
@@ -31,7 +31,7 @@ def parse(value, request):
try:
if cache.get(cache_key):
try:
- parsed = re.sub(r'\[' + item + r'\]', cache.get(item), parsed)
+ parsed = re.sub(r'\[' + item + r'\]', cache.get(cache_key), parsed)
except:
pass
else:
|
shortcodes/parser.py
|
ReplaceText(target='cache_key' @(34,58)->(34,62))
|
def parse(value, request):
try:
if cache.get(cache_key):
try:
parsed = re.sub(r'\[' + item + r'\]', cache.get(item), parsed)
except:
pass
else:
|
def parse(value, request):
try:
if cache.get(cache_key):
try:
parsed = re.sub(r'\[' + item + r'\]', cache.get(cache_key), parsed)
except:
pass
else:
|
1,855 |
https://:@github.com/ods/aiochsa.git
|
24dcfdbc52b0a1009ce4a7b7ebcf72a1b26be18b
|
@@ -61,7 +61,7 @@ class Client:
if response.status != 200:
body = await response.read()
raise DBException.from_message(
- statement, body.decode(errors='replace'),
+ query, body.decode(errors='replace'),
)
if response.content_type == 'application/json':
|
aiochsa/client.py
|
ReplaceText(target='query' @(64,20)->(64,29))
|
class Client:
if response.status != 200:
body = await response.read()
raise DBException.from_message(
statement, body.decode(errors='replace'),
)
if response.content_type == 'application/json':
|
class Client:
if response.status != 200:
body = await response.read()
raise DBException.from_message(
query, body.decode(errors='replace'),
)
if response.content_type == 'application/json':
|
1,856 |
https://:@github.com/lega911/sqlmapper.git
|
cf7f674b0186e12ce37c5a9deb84e6b8d4d58919
|
@@ -116,7 +116,7 @@ class MysqlTable(Table):
scolumn += ' AUTO_INCREMENT'
if default != NoValue:
- if not_null or primary:
+ if auto_increment or primary:
raise ValueError('Can''t have default value')
scolumn += ' DEFAULT %s'
values.append(default)
|
sqlmapper/mysql.py
|
ReplaceText(target='auto_increment' @(119,15)->(119,23))
|
class MysqlTable(Table):
scolumn += ' AUTO_INCREMENT'
if default != NoValue:
if not_null or primary:
raise ValueError('Can''t have default value')
scolumn += ' DEFAULT %s'
values.append(default)
|
class MysqlTable(Table):
scolumn += ' AUTO_INCREMENT'
if default != NoValue:
if auto_increment or primary:
raise ValueError('Can''t have default value')
scolumn += ' DEFAULT %s'
values.append(default)
|
1,857 |
https://:@github.com/vanceeasleaf/aces.git
|
c4097285794c957a7242162570b41412be547ce0
|
@@ -34,5 +34,5 @@ class Device(Material):
#atoms.center()
x=atoms.positions[:,0]
- return atoms
+ return center
|
aces/runners/negf/device/device.py
|
ReplaceText(target='center' @(37,9)->(37,14))
|
class Device(Material):
#atoms.center()
x=atoms.positions[:,0]
return atoms
|
class Device(Material):
#atoms.center()
x=atoms.positions[:,0]
return center
|
1,858 |
https://:@github.com/marcofavorito/temprl.git
|
1b0cb65d54ba2696fea2d41401990e406a8859f0
|
@@ -195,7 +195,7 @@ def _compute_levels(dfa: DFA, property_states):
# levels for failure state (i.e. that cannot reach a final state)
failure_states = set()
for s in filter(lambda x: x not in z_current, dfa.states):
- state2level[s] = max_level
+ state2level[s] = level
failure_states.add(s)
return state2level, max_level, failure_states
|
temprl/automata.py
|
ReplaceText(target='level' @(198,25)->(198,34))
|
def _compute_levels(dfa: DFA, property_states):
# levels for failure state (i.e. that cannot reach a final state)
failure_states = set()
for s in filter(lambda x: x not in z_current, dfa.states):
state2level[s] = max_level
failure_states.add(s)
return state2level, max_level, failure_states
|
def _compute_levels(dfa: DFA, property_states):
# levels for failure state (i.e. that cannot reach a final state)
failure_states = set()
for s in filter(lambda x: x not in z_current, dfa.states):
state2level[s] = level
failure_states.add(s)
return state2level, max_level, failure_states
|
1,859 |
https://:@github.com/equinor/stea.git
|
bdebfe3c0cb939db2dd7b1a39a029d00ffe555a9
|
@@ -82,5 +82,5 @@ def calculate(stea_input):
request.add_profile(profile_id, start_year, data)
- return SteaResult(client.calculate(request), project)
+ return SteaResult(client.calculate(request), stea_input)
|
stea/__init__.py
|
ReplaceText(target='stea_input' @(85,49)->(85,56))
|
def calculate(stea_input):
request.add_profile(profile_id, start_year, data)
return SteaResult(client.calculate(request), project)
|
def calculate(stea_input):
request.add_profile(profile_id, start_year, data)
return SteaResult(client.calculate(request), stea_input)
|
1,860 |
https://:@github.com/tandonneur/AdvancedAnalytics.git
|
4a73a8c616db33f69c5361eca1f4ca18ca7a2b17
|
@@ -710,7 +710,7 @@ class tree_classifier(object):
print(fstr2.format('Class ', dt.classes_[i]), end="")
for j in range(n_classes):
- print("{:>10d}".format(conf_mat_t[i][j]), end="")
+ print("{:>10d}".format(conf_mat_v[i][j]), end="")
print("")
print("")
|
AdvancedAnalytics/Tree.py
|
ReplaceText(target='conf_mat_v' @(713,43)->(713,53))
|
class tree_classifier(object):
print(fstr2.format('Class ', dt.classes_[i]), end="")
for j in range(n_classes):
print("{:>10d}".format(conf_mat_t[i][j]), end="")
print("")
print("")
|
class tree_classifier(object):
print(fstr2.format('Class ', dt.classes_[i]), end="")
for j in range(n_classes):
print("{:>10d}".format(conf_mat_v[i][j]), end="")
print("")
print("")
|
1,861 |
https://:@github.com/usc-isi-i2/dsbox-cleaning.git
|
961d92886916dfbc0a0e1bfd2a51e9c4677301f7
|
@@ -342,7 +342,7 @@ class Profiler(TransformerPrimitiveBase[Input, Output, Hyperparams]):
inputs.iloc[:, col] = numerics
else:
if "http://schema.org/Float" not in old_metadata['semantic_types']:
- old_metadata['semantic_types'] = ("http://schema.org/Float",)
+ old_metadata['semantic_types'] += ("http://schema.org/Float",)
old_metadata['structural_type'] = type(10.2)
inputs.iloc[:, col] = numerics
|
dsbox/datapreprocessing/cleaner/data_profile.py
|
ReplaceText(target='+=' @(345,63)->(345,64))
|
class Profiler(TransformerPrimitiveBase[Input, Output, Hyperparams]):
inputs.iloc[:, col] = numerics
else:
if "http://schema.org/Float" not in old_metadata['semantic_types']:
old_metadata['semantic_types'] = ("http://schema.org/Float",)
old_metadata['structural_type'] = type(10.2)
inputs.iloc[:, col] = numerics
|
class Profiler(TransformerPrimitiveBase[Input, Output, Hyperparams]):
inputs.iloc[:, col] = numerics
else:
if "http://schema.org/Float" not in old_metadata['semantic_types']:
old_metadata['semantic_types'] += ("http://schema.org/Float",)
old_metadata['structural_type'] = type(10.2)
inputs.iloc[:, col] = numerics
|
1,862 |
https://:@github.com/azavea/djsonb.git
|
90e97fc29ca5df0cc56b3193c9f7a4a1543111b5
|
@@ -72,7 +72,7 @@ class FilterTree:
sql_tuple = FilterTree.text_similarity_filter(rule[0], pattern, True)
else:
sql_tuple = FilterTree.text_similarity_filter(rule[0], pattern, False)
- pattern_specs.append(sql_tuple)
+ rule_specs.append(sql_tuple)
rule_strings = [' AND '.join([rule[0] for rule in rule_specs]),
' OR '.join([rule[0] for rule in pattern_specs])]
|
djsonb/lookups.py
|
ReplaceText(target='rule_specs' @(75,20)->(75,33))
|
class FilterTree:
sql_tuple = FilterTree.text_similarity_filter(rule[0], pattern, True)
else:
sql_tuple = FilterTree.text_similarity_filter(rule[0], pattern, False)
pattern_specs.append(sql_tuple)
rule_strings = [' AND '.join([rule[0] for rule in rule_specs]),
' OR '.join([rule[0] for rule in pattern_specs])]
|
class FilterTree:
sql_tuple = FilterTree.text_similarity_filter(rule[0], pattern, True)
else:
sql_tuple = FilterTree.text_similarity_filter(rule[0], pattern, False)
rule_specs.append(sql_tuple)
rule_strings = [' AND '.join([rule[0] for rule in rule_specs]),
' OR '.join([rule[0] for rule in pattern_specs])]
|
1,863 |
https://:@github.com/ionelmc/python-pth.git
|
5e8cbdd87050b06018ef04cc994d8dc155931e98
|
@@ -235,7 +235,7 @@ class Path(AbstractPath):
normpath = property(lambda self: pth(ospath.normpath(self)))
norm = property(lambda self: pth(ospath.normcase(ospath.normpath(self))))
real = realpath = property(lambda self: pth(ospath.realpath(self)))
- rel = relpath = lambda self, start: pth(ospath.relpath(self, start))
+ rel = relpath = lambda self, start: pth(ospath.relpath(start, self))
same = samefile = lambda self, other: ospath.samefile(self, other)
if hasattr(os, 'link'):
if PY33:
|
src/pth.py
|
ArgSwap(idxs=0<->1 @(238,44)->(238,58))
|
class Path(AbstractPath):
normpath = property(lambda self: pth(ospath.normpath(self)))
norm = property(lambda self: pth(ospath.normcase(ospath.normpath(self))))
real = realpath = property(lambda self: pth(ospath.realpath(self)))
rel = relpath = lambda self, start: pth(ospath.relpath(self, start))
same = samefile = lambda self, other: ospath.samefile(self, other)
if hasattr(os, 'link'):
if PY33:
|
class Path(AbstractPath):
normpath = property(lambda self: pth(ospath.normpath(self)))
norm = property(lambda self: pth(ospath.normcase(ospath.normpath(self))))
real = realpath = property(lambda self: pth(ospath.realpath(self)))
rel = relpath = lambda self, start: pth(ospath.relpath(start, self))
same = samefile = lambda self, other: ospath.samefile(self, other)
if hasattr(os, 'link'):
if PY33:
|
1,864 |
https://:@github.com/savex/tempest-parser.git
|
8c2ad3b8d13573924408bf3d2bf50ddc05fdd3d8
|
@@ -33,7 +33,7 @@ def get_date_from_source(source):
# _ctime = time.strftime("%d/%m/%Y %H:%M", time.gmtime(ctime))
return time.strftime(
"%d/%m/%Y %H:%M GMT",
- time.gmtime(ctime)
+ time.gmtime(mtime)
)
|
tempest_parser/manager/importers.py
|
ReplaceText(target='mtime' @(36,20)->(36,25))
|
def get_date_from_source(source):
# _ctime = time.strftime("%d/%m/%Y %H:%M", time.gmtime(ctime))
return time.strftime(
"%d/%m/%Y %H:%M GMT",
time.gmtime(ctime)
)
|
def get_date_from_source(source):
# _ctime = time.strftime("%d/%m/%Y %H:%M", time.gmtime(ctime))
return time.strftime(
"%d/%m/%Y %H:%M GMT",
time.gmtime(mtime)
)
|
1,865 |
https://:@github.com/solidfire/solidfire-cli.git
|
fcb6c5f4abbe9cc7ef2c6dded6d2d7fb2492f931
|
@@ -88,5 +88,5 @@ def remove(ctx, name=None, index=None):
if(name is None and index is not None):
cli_utils.print_result(connections[int(index)], ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
if(name is not None and index is None):
- connections = [connection for connection in connections if connection["name"]!=name]
+ connections = [connection for connection in connections if connection["name"]==name]
cli_utils.print_result(connections, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
|
element/cli/commands/cmd_connection.py
|
ReplaceText(target='==' @(91,85)->(91,87))
|
def remove(ctx, name=None, index=None):
if(name is None and index is not None):
cli_utils.print_result(connections[int(index)], ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
if(name is not None and index is None):
connections = [connection for connection in connections if connection["name"]!=name]
cli_utils.print_result(connections, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
|
def remove(ctx, name=None, index=None):
if(name is None and index is not None):
cli_utils.print_result(connections[int(index)], ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
if(name is not None and index is None):
connections = [connection for connection in connections if connection["name"]==name]
cli_utils.print_result(connections, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
|
1,866 |
https://:@github.com/glimix/numpy-sugar.git
|
59fb36f9110b7ac9ae2ce6e06d443c7d44aac42f
|
@@ -50,7 +50,7 @@ def ddot(L, R, left=True, out=None):
else:
if out is None:
out = copy(L)
- return multiply(out, R, out=out)
+ return multiply(L, R, out=out)
def cdot(L, out=None):
|
numpy_sugar/linalg/dot.py
|
ReplaceText(target='L' @(53,24)->(53,27))
|
def ddot(L, R, left=True, out=None):
else:
if out is None:
out = copy(L)
return multiply(out, R, out=out)
def cdot(L, out=None):
|
def ddot(L, R, left=True, out=None):
else:
if out is None:
out = copy(L)
return multiply(L, R, out=out)
def cdot(L, out=None):
|
1,867 |
https://:@github.com/kuzmoyev/Google-Calendar-Simple-API.git
|
9a902a5ce43d8dc2b18c53b8140443a2a99c2810
|
@@ -474,7 +474,7 @@ class Recurrence:
if freq not in (HOURLY, MINUTELY, DAILY, WEEKLY, MONTHLY, YEARLY):
raise ValueError('"freq" parameter must be one of HOURLY, MINUTELY, DAILY, WEEKLY, MONTHLY or YEARLY. '
'{} was provided'.format(freq))
- if interval and (isinstance(interval, int) or interval < 1):
+ if interval and (not isinstance(interval, int) or interval < 1):
raise ValueError('"interval" parameter must be a positive int. '
'{} was provided'.format(interval))
if count and (not isinstance(count, int) or count < 1):
|
gcsa/recurrence.py
|
ReplaceText(target='not ' @(477,25)->(477,25))
|
class Recurrence:
if freq not in (HOURLY, MINUTELY, DAILY, WEEKLY, MONTHLY, YEARLY):
raise ValueError('"freq" parameter must be one of HOURLY, MINUTELY, DAILY, WEEKLY, MONTHLY or YEARLY. '
'{} was provided'.format(freq))
if interval and (isinstance(interval, int) or interval < 1):
raise ValueError('"interval" parameter must be a positive int. '
'{} was provided'.format(interval))
if count and (not isinstance(count, int) or count < 1):
|
class Recurrence:
if freq not in (HOURLY, MINUTELY, DAILY, WEEKLY, MONTHLY, YEARLY):
raise ValueError('"freq" parameter must be one of HOURLY, MINUTELY, DAILY, WEEKLY, MONTHLY or YEARLY. '
'{} was provided'.format(freq))
if interval and (not isinstance(interval, int) or interval < 1):
raise ValueError('"interval" parameter must be a positive int. '
'{} was provided'.format(interval))
if count and (not isinstance(count, int) or count < 1):
|
1,868 |
https://:@github.com/NFontrodona/Lazyparser.git
|
8e3888ea5248c4f1e599bff0bd40b4e96c58e92f
|
@@ -383,7 +383,7 @@ def set_env(delim1, delim2, hd, tb):
:param hd: (string) the header of parameter
:param tb: (int) the number of space/tab that bedore the docstring
"""
- if isinstance(int, tb):
+ if isinstance(tb, int):
global tab
tab = tb
else:
|
src/lazyparser.py
|
ArgSwap(idxs=0<->1 @(386,7)->(386,17))
|
def set_env(delim1, delim2, hd, tb):
:param hd: (string) the header of parameter
:param tb: (int) the number of space/tab that bedore the docstring
"""
if isinstance(int, tb):
global tab
tab = tb
else:
|
def set_env(delim1, delim2, hd, tb):
:param hd: (string) the header of parameter
:param tb: (int) the number of space/tab that bedore the docstring
"""
if isinstance(tb, int):
global tab
tab = tb
else:
|
1,869 |
https://:@github.com/mikemill/rq_retry_scheduler.git
|
d279c059418831f33d588d963252de0610cb17a7
|
@@ -59,7 +59,7 @@ class Scheduler(object):
def delay_job(self, job, time_delta):
amount = int(time_delta.total_seconds())
- self.connection.zincrby(self.scheduler_jobs_key, job.id, amount)
+ self.connection.zincrby(self.scheduler_jobs_key, amount, job.id)
def should_repeat_job(self, job):
max_runs = job.meta['max_runs']
|
rq_retry_scheduler/scheduler.py
|
ArgSwap(idxs=1<->2 @(62,8)->(62,31))
|
class Scheduler(object):
def delay_job(self, job, time_delta):
amount = int(time_delta.total_seconds())
self.connection.zincrby(self.scheduler_jobs_key, job.id, amount)
def should_repeat_job(self, job):
max_runs = job.meta['max_runs']
|
class Scheduler(object):
def delay_job(self, job, time_delta):
amount = int(time_delta.total_seconds())
self.connection.zincrby(self.scheduler_jobs_key, amount, job.id)
def should_repeat_job(self, job):
max_runs = job.meta['max_runs']
|
1,870 |
https://:@github.com/jbaber/pedigree.git
|
d4f17753d97d279e2845e1b5569c8f5f99fa9939
|
@@ -633,7 +633,7 @@ def toml_to_family(toml_filename):
for relation in big_dict['spouse']
if relation[0] == spouse_uid
]
- family.add_spouses(spouse, children)
+ family.add_spouses(spouse, spouses)
return family
|
src/pedigree/pedigree_lib.py
|
ReplaceText(target='spouses' @(636,31)->(636,39))
|
def toml_to_family(toml_filename):
for relation in big_dict['spouse']
if relation[0] == spouse_uid
]
family.add_spouses(spouse, children)
return family
|
def toml_to_family(toml_filename):
for relation in big_dict['spouse']
if relation[0] == spouse_uid
]
family.add_spouses(spouse, spouses)
return family
|
1,871 |
https://:@github.com/wayne-li2/Flask-User.git
|
928e09cff2d773d5f2cbfa1ed847e32b1b5eae07
|
@@ -83,7 +83,7 @@ def test_roles(db):
role1 = db_adapter.find_first_object(RoleClass, name='Role 1')
db_adapter.delete_object(role1)
role2 = db_adapter.find_first_object(RoleClass, name='Role 2')
- db_adapter.delete_object(role1)
+ db_adapter.delete_object(role2)
db_adapter.commit()
|
flask_user/tests/test_roles.py
|
ReplaceText(target='role2' @(86,33)->(86,38))
|
def test_roles(db):
role1 = db_adapter.find_first_object(RoleClass, name='Role 1')
db_adapter.delete_object(role1)
role2 = db_adapter.find_first_object(RoleClass, name='Role 2')
db_adapter.delete_object(role1)
db_adapter.commit()
|
def test_roles(db):
role1 = db_adapter.find_first_object(RoleClass, name='Role 1')
db_adapter.delete_object(role1)
role2 = db_adapter.find_first_object(RoleClass, name='Role 2')
db_adapter.delete_object(role2)
db_adapter.commit()
|
1,872 |
https://:@github.com/manodeep/astro3D.git
|
38bca8c32c783779f5ddd2bfab66ec69427f4d12
|
@@ -117,7 +117,7 @@ def test_sorted_order(opt):
return False
if (outer_sort == outer_sort_next):
- if (inner_sort > inner_sort_next):
+ if (inner_sort < inner_sort_next):
print("For Halo ID {0} we had a {1} of {2}. After sorting via lexsort "
"inner-key {1}, the next Halo has ID {3} and a {1} of {4}"
.format(halo_id, opt["sort_mass"], inner_sort, halo_id_next,
|
tests/tests.py
|
ReplaceText(target='<' @(120,35)->(120,36))
|
def test_sorted_order(opt):
return False
if (outer_sort == outer_sort_next):
if (inner_sort > inner_sort_next):
print("For Halo ID {0} we had a {1} of {2}. After sorting via lexsort "
"inner-key {1}, the next Halo has ID {3} and a {1} of {4}"
.format(halo_id, opt["sort_mass"], inner_sort, halo_id_next,
|
def test_sorted_order(opt):
return False
if (outer_sort == outer_sort_next):
if (inner_sort < inner_sort_next):
print("For Halo ID {0} we had a {1} of {2}. After sorting via lexsort "
"inner-key {1}, the next Halo has ID {3} and a {1} of {4}"
.format(halo_id, opt["sort_mass"], inner_sort, halo_id_next,
|
1,873 |
https://:@github.com/manodeep/astro3D.git
|
69e10a4b7b50701e65a9ca0f6782fc2654987588
|
@@ -119,7 +119,7 @@ def my_test_sorted_order(opt):
pytest.fail()
if (outer_sort == outer_sort_next):
- if (inner_sort < inner_sort_next):
+ if (inner_sort > inner_sort_next):
print("For Halo ID {0} we had a {1} of {2}. After sorting via lexsort "
"inner-key {1}, the next Halo has ID {3} and a {1} of {4}"
.format(halo_id, opt["sort_mass"], inner_sort, halo_id_next,
|
tests/forest_sorter_test.py
|
ReplaceText(target='>' @(122,35)->(122,36))
|
def my_test_sorted_order(opt):
pytest.fail()
if (outer_sort == outer_sort_next):
if (inner_sort < inner_sort_next):
print("For Halo ID {0} we had a {1} of {2}. After sorting via lexsort "
"inner-key {1}, the next Halo has ID {3} and a {1} of {4}"
.format(halo_id, opt["sort_mass"], inner_sort, halo_id_next,
|
def my_test_sorted_order(opt):
pytest.fail()
if (outer_sort == outer_sort_next):
if (inner_sort > inner_sort_next):
print("For Halo ID {0} we had a {1} of {2}. After sorting via lexsort "
"inner-key {1}, the next Halo has ID {3} and a {1} of {4}"
.format(halo_id, opt["sort_mass"], inner_sort, halo_id_next,
|
1,874 |
https://:@github.com/kazenniy/atapt.git
|
993a7734447b17a5d96db5428b267556f90b10b4
|
@@ -416,7 +416,7 @@ class atapt:
# word 83 "Commands and feature sets supported"
features = int.from_bytes(buf[166] + buf[167], byteorder='little')
- if major & 0x400:
+ if features & 0x400:
self.lba48bit = True
else:
self.lba48bit = False
|
atapt/atapt.py
|
ReplaceText(target='features' @(419,11)->(419,16))
|
class atapt:
# word 83 "Commands and feature sets supported"
features = int.from_bytes(buf[166] + buf[167], byteorder='little')
if major & 0x400:
self.lba48bit = True
else:
self.lba48bit = False
|
class atapt:
# word 83 "Commands and feature sets supported"
features = int.from_bytes(buf[166] + buf[167], byteorder='little')
if features & 0x400:
self.lba48bit = True
else:
self.lba48bit = False
|
1,875 |
https://:@github.com/RI-imaging/DryMass.git
|
d92c19314359ae4e3b4feb0136bc649840cf57a7
|
@@ -221,7 +221,7 @@ def plot_qpi_sphere(qpi_real, qpi_sim, path=None, simtype="simulation"):
plt.tight_layout(rect=(0, 0, 1, .95))
# add identifier
- fig.text(x=.5, y=.99, s=qpi_real["identifier"],
+ fig.text(x=.5, y=.99, s=qpi_sim["identifier"],
verticalalignment="top",
horizontalalignment="center",
fontsize=14)
|
drymass/plot.py
|
ReplaceText(target='qpi_sim' @(224,28)->(224,36))
|
def plot_qpi_sphere(qpi_real, qpi_sim, path=None, simtype="simulation"):
plt.tight_layout(rect=(0, 0, 1, .95))
# add identifier
fig.text(x=.5, y=.99, s=qpi_real["identifier"],
verticalalignment="top",
horizontalalignment="center",
fontsize=14)
|
def plot_qpi_sphere(qpi_real, qpi_sim, path=None, simtype="simulation"):
plt.tight_layout(rect=(0, 0, 1, .95))
# add identifier
fig.text(x=.5, y=.99, s=qpi_sim["identifier"],
verticalalignment="top",
horizontalalignment="center",
fontsize=14)
|
1,876 |
https://:@github.com/lazaret/anuket.git
|
6f069e0d5d6498048990cacd743cd5d63e0e84fa
|
@@ -59,7 +59,7 @@ class UniqueAuthEmail(validators.FancyValidator):
user_id = values['user_id']
else:
user_id = None
- if email and (user.user_id != user_id):
+ if user and (user.user_id != user_id):
errors = {'email': self.message('not_unique_email', state)}
raise Invalid(self.message('not_unique_email', state),
values, state, error_dict=errors)
|
wepwawet/lib/validators.py
|
ReplaceText(target='user' @(62,15)->(62,20))
|
class UniqueAuthEmail(validators.FancyValidator):
user_id = values['user_id']
else:
user_id = None
if email and (user.user_id != user_id):
errors = {'email': self.message('not_unique_email', state)}
raise Invalid(self.message('not_unique_email', state),
values, state, error_dict=errors)
|
class UniqueAuthEmail(validators.FancyValidator):
user_id = values['user_id']
else:
user_id = None
if user and (user.user_id != user_id):
errors = {'email': self.message('not_unique_email', state)}
raise Invalid(self.message('not_unique_email', state),
values, state, error_dict=errors)
|
1,877 |
https://:@github.com/esteinig/dartqc.git
|
b98781ae43cc97f8df60a703a594395ddca18d87
|
@@ -292,7 +292,7 @@ class CommandLine:
for value in [command["maf"], command["call"], command["rep"], command["seq_identity"]]:
if value != -1:
- if value < 1 or value > 1:
+ if value < 0 or value > 1:
raise ValueError("Filter and identity thresholds must be larger >= 0 and <= 1.")
for value in [command["clone_selector"], command["identity_selector"]]:
|
dart_qc.py
|
ReplaceText(target='0' @(295,27)->(295,28))
|
class CommandLine:
for value in [command["maf"], command["call"], command["rep"], command["seq_identity"]]:
if value != -1:
if value < 1 or value > 1:
raise ValueError("Filter and identity thresholds must be larger >= 0 and <= 1.")
for value in [command["clone_selector"], command["identity_selector"]]:
|
class CommandLine:
for value in [command["maf"], command["call"], command["rep"], command["seq_identity"]]:
if value != -1:
if value < 0 or value > 1:
raise ValueError("Filter and identity thresholds must be larger >= 0 and <= 1.")
for value in [command["clone_selector"], command["identity_selector"]]:
|
1,878 |
https://:@github.com/dmentipl/phantom-build.git
|
09e3fcdf5c2ca013f55ec9d9d4af396c99b655f8
|
@@ -387,7 +387,7 @@ def setup_calculation(
raise SetupError(msg)
else:
logger.info('Successfully set up Phantom calculation')
- logger.info(f'run_path: {run_path}')
+ logger.info(f'run_path: {_run_path}')
shutil.copy(_in_file, _run_path)
|
phantombuild/phantombuild.py
|
ReplaceText(target='_run_path' @(390,33)->(390,41))
|
def setup_calculation(
raise SetupError(msg)
else:
logger.info('Successfully set up Phantom calculation')
logger.info(f'run_path: {run_path}')
shutil.copy(_in_file, _run_path)
|
def setup_calculation(
raise SetupError(msg)
else:
logger.info('Successfully set up Phantom calculation')
logger.info(f'run_path: {_run_path}')
shutil.copy(_in_file, _run_path)
|
1,879 |
https://:@github.com/shane-breeze/zinv-analysis.git
|
206e736a62c914f577717679adc3f16b9fa1d6b7
|
@@ -36,7 +36,7 @@ class EventSumsProducer(object):
event.METnoX_diMuonParaProjPt_Minus_DiMuon_pt = dimu_para - dimu_pt
event.METnoX_diMuonPerpProjPt_Plus_DiMuon_pt = dimu_perp + dimu_pt
event.METnoX_diMuonParaProjPt_Div_DiMuon_pt = dimu_para / dimu_pt
- event.METnoX_diMuonPerpProjPt_Plus_DiMuon_pt_Div_DiMuon_pt = (dimu_para + dimu_pt) / dimu_pt
+ event.METnoX_diMuonPerpProjPt_Plus_DiMuon_pt_Div_DiMuon_pt = (dimu_perp + dimu_pt) / dimu_pt
# MHT
ht, mht, mhphi = create_mht(
|
sequence/Readers/EventSumsProducer.py
|
ReplaceText(target='dimu_perp' @(39,70)->(39,79))
|
class EventSumsProducer(object):
event.METnoX_diMuonParaProjPt_Minus_DiMuon_pt = dimu_para - dimu_pt
event.METnoX_diMuonPerpProjPt_Plus_DiMuon_pt = dimu_perp + dimu_pt
event.METnoX_diMuonParaProjPt_Div_DiMuon_pt = dimu_para / dimu_pt
event.METnoX_diMuonPerpProjPt_Plus_DiMuon_pt_Div_DiMuon_pt = (dimu_para + dimu_pt) / dimu_pt
# MHT
ht, mht, mhphi = create_mht(
|
class EventSumsProducer(object):
event.METnoX_diMuonParaProjPt_Minus_DiMuon_pt = dimu_para - dimu_pt
event.METnoX_diMuonPerpProjPt_Plus_DiMuon_pt = dimu_perp + dimu_pt
event.METnoX_diMuonParaProjPt_Div_DiMuon_pt = dimu_para / dimu_pt
event.METnoX_diMuonPerpProjPt_Plus_DiMuon_pt_Div_DiMuon_pt = (dimu_perp + dimu_pt) / dimu_pt
# MHT
ht, mht, mhphi = create_mht(
|
1,880 |
https://:@github.com/mbr/ragstoriches.git
|
a99ce338a03bc04bf4f6045b5e18c590177bb3ef
|
@@ -78,7 +78,7 @@ def run_scraper():
scraper = obj
for name, obj in getattr(mod, '_rr_export', {}).iteritems():
- scope[name] = name
+ scope[name] = obj
scraper.scrape(url=args.url,
scraper_name=args.scraper,
|
ragstoriches/apps.py
|
ReplaceText(target='obj' @(81,29)->(81,33))
|
def run_scraper():
scraper = obj
for name, obj in getattr(mod, '_rr_export', {}).iteritems():
scope[name] = name
scraper.scrape(url=args.url,
scraper_name=args.scraper,
|
def run_scraper():
scraper = obj
for name, obj in getattr(mod, '_rr_export', {}).iteritems():
scope[name] = obj
scraper.scrape(url=args.url,
scraper_name=args.scraper,
|
1,881 |
https://:@github.com/NickYi1990/Kaggle_Buddy.git
|
e461b1afe43f676923157628cb528833cf480882
|
@@ -83,7 +83,7 @@ class callbacks_keras:
if epoch%self.decay_after_n_epoch==0 and epoch!=0:
lr = K.get_value(self.model.optimizer.lr)
K.set_value(self.model.optimizer.lr, lr*self.decay_rate)
- print("lr changed to {}".format(lr**self.decay_rate))
+ print("lr changed to {}".format(lr*self.decay_rate))
return K.get_value(self.model.optimizer.lr)
def ka_xgb_r2_error(preds, dtrain):
|
Utils/KA_utils.py
|
ReplaceText(target='*' @(86,46)->(86,48))
|
class callbacks_keras:
if epoch%self.decay_after_n_epoch==0 and epoch!=0:
lr = K.get_value(self.model.optimizer.lr)
K.set_value(self.model.optimizer.lr, lr*self.decay_rate)
print("lr changed to {}".format(lr**self.decay_rate))
return K.get_value(self.model.optimizer.lr)
def ka_xgb_r2_error(preds, dtrain):
|
class callbacks_keras:
if epoch%self.decay_after_n_epoch==0 and epoch!=0:
lr = K.get_value(self.model.optimizer.lr)
K.set_value(self.model.optimizer.lr, lr*self.decay_rate)
print("lr changed to {}".format(lr*self.decay_rate))
return K.get_value(self.model.optimizer.lr)
def ka_xgb_r2_error(preds, dtrain):
|
1,882 |
https://:@github.com/Qingluan/QmongoHelper.git
|
67bf4fbd01beddd456583429cc428b4aeb1c2025
|
@@ -69,7 +69,7 @@ class dbhelper(object):
@_run
def update(self,document,target,**kargs):
- self._db[document].update(kargs,target,callback=self.callback)
+ self._db[document].update(target,kargs,callback=self.callback)
# def to_list_callback(self,infos,error):
|
__db.py
|
ArgSwap(idxs=0<->1 @(72,8)->(72,33))
|
class dbhelper(object):
@_run
def update(self,document,target,**kargs):
self._db[document].update(kargs,target,callback=self.callback)
# def to_list_callback(self,infos,error):
|
class dbhelper(object):
@_run
def update(self,document,target,**kargs):
self._db[document].update(target,kargs,callback=self.callback)
# def to_list_callback(self,infos,error):
|
1,883 |
https://:@github.com/abelcarreras/aiida_extensions.git
|
90d585773141882b4c16bca8aa6ecdea3ca34072
|
@@ -224,7 +224,7 @@ class OptimizeCalculation(JobCalculation):
structure_txt = generate_LAMMPS_structure(structure)
- input_txt = generate_LAMMPS_input(potential_data,
+ input_txt = generate_LAMMPS_input(potential_object,
parameters_data,
structure_file=self._INPUT_STRUCTURE,
optimize_path_file=self._OUTPUT_TRAJECTORY_FILE_NAME)
|
plugins/jobs/lammps/optimize.py
|
ReplaceText(target='potential_object' @(227,42)->(227,56))
|
class OptimizeCalculation(JobCalculation):
structure_txt = generate_LAMMPS_structure(structure)
input_txt = generate_LAMMPS_input(potential_data,
parameters_data,
structure_file=self._INPUT_STRUCTURE,
optimize_path_file=self._OUTPUT_TRAJECTORY_FILE_NAME)
|
class OptimizeCalculation(JobCalculation):
structure_txt = generate_LAMMPS_structure(structure)
input_txt = generate_LAMMPS_input(potential_object,
parameters_data,
structure_file=self._INPUT_STRUCTURE,
optimize_path_file=self._OUTPUT_TRAJECTORY_FILE_NAME)
|
1,884 |
https://:@github.com/abelcarreras/aiida_extensions.git
|
22dc45e2580529558e5e80e1f9fbd24e2540c201
|
@@ -547,7 +547,7 @@ class WorkflowQHA(Workflow):
test_range[0] -= np.ceil(np.min([total_range/2, abs(test_range[0] - min_stress)]) / interval) * interval
# test_range[0] -= np.ceil(abs(test_range[0] - min_stress) / interval) * interval
- if max_stress < test_range[1] or min_stress > test_range[0]:
+ if max_stress < test_range[1] and min_stress > test_range[0]:
if abs(max_stress - test_range[1]) < interval * 2 or abs(test_range[0] - min_stress) < interval * 2:
interval *= 0.5
|
workflows/wf_qha.py
|
ReplaceText(target='and' @(550,42)->(550,44))
|
class WorkflowQHA(Workflow):
test_range[0] -= np.ceil(np.min([total_range/2, abs(test_range[0] - min_stress)]) / interval) * interval
# test_range[0] -= np.ceil(abs(test_range[0] - min_stress) / interval) * interval
if max_stress < test_range[1] or min_stress > test_range[0]:
if abs(max_stress - test_range[1]) < interval * 2 or abs(test_range[0] - min_stress) < interval * 2:
interval *= 0.5
|
class WorkflowQHA(Workflow):
test_range[0] -= np.ceil(np.min([total_range/2, abs(test_range[0] - min_stress)]) / interval) * interval
# test_range[0] -= np.ceil(abs(test_range[0] - min_stress) / interval) * interval
if max_stress < test_range[1] and min_stress > test_range[0]:
if abs(max_stress - test_range[1]) < interval * 2 or abs(test_range[0] - min_stress) < interval * 2:
interval *= 0.5
|
1,885 |
https://:@github.com/GeoStat-Framework/welltestpy.git
|
1f9ba2a6af846ae9ef0004c885d807a45a26a911
|
@@ -833,7 +833,7 @@ class Well(object):
)
if not self._radius.scalar:
raise ValueError("Well: 'radius' needs to be scalar")
- if self.radius <= 0.0:
+ if self.radius < 0.0:
raise ValueError("Well: 'radius' needs to be positiv")
if isinstance(coordinates, Variable):
|
welltestpy/data/varlib.py
|
ReplaceText(target='<' @(836,23)->(836,25))
|
class Well(object):
)
if not self._radius.scalar:
raise ValueError("Well: 'radius' needs to be scalar")
if self.radius <= 0.0:
raise ValueError("Well: 'radius' needs to be positiv")
if isinstance(coordinates, Variable):
|
class Well(object):
)
if not self._radius.scalar:
raise ValueError("Well: 'radius' needs to be scalar")
if self.radius < 0.0:
raise ValueError("Well: 'radius' needs to be positiv")
if isinstance(coordinates, Variable):
|
1,886 |
https://:@github.com/GeoStat-Framework/welltestpy.git
|
412f42ecf1bf2f9ae284aefd96802ee99cacdf2a
|
@@ -544,7 +544,7 @@ def load_obs(obsfile):
obs = load_var(TxtIO(zfile.open(obsf)))
- observation = varlib.Observation(name, time, obs, description)
+ observation = varlib.Observation(name, obs, time, description)
except Exception:
raise Exception("loadObs: loading the observation was not possible")
return observation
|
welltestpy/data/data_io.py
|
ArgSwap(idxs=1<->2 @(547,22)->(547,40))
|
def load_obs(obsfile):
obs = load_var(TxtIO(zfile.open(obsf)))
observation = varlib.Observation(name, time, obs, description)
except Exception:
raise Exception("loadObs: loading the observation was not possible")
return observation
|
def load_obs(obsfile):
obs = load_var(TxtIO(zfile.open(obsf)))
observation = varlib.Observation(name, obs, time, description)
except Exception:
raise Exception("loadObs: loading the observation was not possible")
return observation
|
1,887 |
https://:@github.com/GeoStat-Framework/welltestpy.git
|
412f42ecf1bf2f9ae284aefd96802ee99cacdf2a
|
@@ -370,7 +370,7 @@ class PumpingTest(Test):
description : :class:`str`, optional
Description of the Variable. Default: ``"Drawdown observation"``
"""
- obs = varlib.DrawdownObs(well, time, observation, description)
+ obs = varlib.DrawdownObs(well, observation, time, description)
self.add_observations(obs)
def add_observations(self, obs):
|
welltestpy/data/testslib.py
|
ArgSwap(idxs=1<->2 @(373,14)->(373,32))
|
class PumpingTest(Test):
description : :class:`str`, optional
Description of the Variable. Default: ``"Drawdown observation"``
"""
obs = varlib.DrawdownObs(well, time, observation, description)
self.add_observations(obs)
def add_observations(self, obs):
|
class PumpingTest(Test):
description : :class:`str`, optional
Description of the Variable. Default: ``"Drawdown observation"``
"""
obs = varlib.DrawdownObs(well, observation, time, description)
self.add_observations(obs)
def add_observations(self, obs):
|
1,888 |
https://:@github.com/biosustain/venom.git
|
94aab380cb41a3923248adc51e3bbe312fe98cf0
|
@@ -48,7 +48,7 @@ def _route_handler(venom: 'venom.rpc.Venom', method: Method, protocol_factory: T
http_request_query.decode(http_request.url.query, request)
http_request_path.decode(http_request.match_info, request)
- response = await venom.invoke(method, request, context=AioHTTPRequestContext(request))
+ response = await venom.invoke(method, request, context=AioHTTPRequestContext(http_request))
return web.Response(body=rpc_response.pack(response),
content_type=rpc_response.mime,
status=http_status)
|
venom/rpc/comms/aiohttp.py
|
ReplaceText(target='http_request' @(51,89)->(51,96))
|
def _route_handler(venom: 'venom.rpc.Venom', method: Method, protocol_factory: T
http_request_query.decode(http_request.url.query, request)
http_request_path.decode(http_request.match_info, request)
response = await venom.invoke(method, request, context=AioHTTPRequestContext(request))
return web.Response(body=rpc_response.pack(response),
content_type=rpc_response.mime,
status=http_status)
|
def _route_handler(venom: 'venom.rpc.Venom', method: Method, protocol_factory: T
http_request_query.decode(http_request.url.query, request)
http_request_path.decode(http_request.match_info, request)
response = await venom.invoke(method, request, context=AioHTTPRequestContext(http_request))
return web.Response(body=rpc_response.pack(response),
content_type=rpc_response.mime,
status=http_status)
|
1,889 |
https://:@github.com/altio/foundation.git
|
e39b13a5046467ebed3014bb2b5b4a47c5cd0e80
|
@@ -117,7 +117,7 @@ class Backend(six.with_metaclass(MediaDefiningClass, Router)):
# set app_index_class on app to "None" to skip creation
app_index_class = getattr(app_config, 'app_index_class', None)
if app_index_class:
- template_name = getattr(app_config, 'template_name', 'app_index.html')
+ template_name = getattr(app_index_class, 'template_name', 'app_index.html')
app_index = app_index_class.as_view(
app_config=app_config, backend=self, template_name=template_name
)
|
foundation/backend/base.py
|
ReplaceText(target='app_index_class' @(120,36)->(120,46))
|
class Backend(six.with_metaclass(MediaDefiningClass, Router)):
# set app_index_class on app to "None" to skip creation
app_index_class = getattr(app_config, 'app_index_class', None)
if app_index_class:
template_name = getattr(app_config, 'template_name', 'app_index.html')
app_index = app_index_class.as_view(
app_config=app_config, backend=self, template_name=template_name
)
|
class Backend(six.with_metaclass(MediaDefiningClass, Router)):
# set app_index_class on app to "None" to skip creation
app_index_class = getattr(app_config, 'app_index_class', None)
if app_index_class:
template_name = getattr(app_index_class, 'template_name', 'app_index.html')
app_index = app_index_class.as_view(
app_config=app_config, backend=self, template_name=template_name
)
|
1,890 |
https://:@github.com/privacyidea/crontabparser.git
|
19e8fe34a9f9f1a2156c17d3b4b756f3aca17cdc
|
@@ -30,7 +30,7 @@ class CronJob(object):
assert len(time) <= 5
padded_time = tuple(time) + ('*',) * (5 - len(time))
assert len(padded_time) == 5
- return cls(command, time[0], user, padded_time[1], padded_time[2], padded_time[3], padded_time[4])
+ return cls(command, padded_time[0], user, padded_time[1], padded_time[2], padded_time[3], padded_time[4])
@property
def time(self):
|
cronjobparser.py
|
ReplaceText(target='padded_time' @(33,28)->(33,32))
|
class CronJob(object):
assert len(time) <= 5
padded_time = tuple(time) + ('*',) * (5 - len(time))
assert len(padded_time) == 5
return cls(command, time[0], user, padded_time[1], padded_time[2], padded_time[3], padded_time[4])
@property
def time(self):
|
class CronJob(object):
assert len(time) <= 5
padded_time = tuple(time) + ('*',) * (5 - len(time))
assert len(padded_time) == 5
return cls(command, padded_time[0], user, padded_time[1], padded_time[2], padded_time[3], padded_time[4])
@property
def time(self):
|
1,891 |
https://:@github.com/privacyidea/crontabparser.git
|
8a6a177a47b83938946ea7bf76741d96352eab08
|
@@ -30,7 +30,7 @@ class CronJob(object):
if len(time) > 5:
raise RuntimeError("Malformed cronjob time: {!r}".format(time))
padded_time = tuple(time) + ('*',) * (5 - len(time))
- if len(padded_time) > 5:
+ if len(padded_time) != 5:
raise RuntimeError("Malformed cronjob time: {!r}".format(padded_time))
return cls(command, padded_time[0], user, padded_time[1], padded_time[2], padded_time[3], padded_time[4])
|
cronjobparser.py
|
ReplaceText(target='!=' @(33,28)->(33,29))
|
class CronJob(object):
if len(time) > 5:
raise RuntimeError("Malformed cronjob time: {!r}".format(time))
padded_time = tuple(time) + ('*',) * (5 - len(time))
if len(padded_time) > 5:
raise RuntimeError("Malformed cronjob time: {!r}".format(padded_time))
return cls(command, padded_time[0], user, padded_time[1], padded_time[2], padded_time[3], padded_time[4])
|
class CronJob(object):
if len(time) > 5:
raise RuntimeError("Malformed cronjob time: {!r}".format(time))
padded_time = tuple(time) + ('*',) * (5 - len(time))
if len(padded_time) != 5:
raise RuntimeError("Malformed cronjob time: {!r}".format(padded_time))
return cls(command, padded_time[0], user, padded_time[1], padded_time[2], padded_time[3], padded_time[4])
|
1,892 |
https://:@github.com/smurn/sourblossom.git
|
9071c9cc86f2755254ed3d36b7a08080c64ac19f
|
@@ -116,7 +116,7 @@ class MsgConnection(protocol.Protocol):
def done(result):
self._sending = False
return result
- d.addBoth(d)
+ d.addBoth(done)
def _frame_received(self, frameid, blob):
return self.frame_received(frameid, blob)
|
sourblossom/router.py
|
ReplaceText(target='done' @(119,18)->(119,19))
|
class MsgConnection(protocol.Protocol):
def done(result):
self._sending = False
return result
d.addBoth(d)
def _frame_received(self, frameid, blob):
return self.frame_received(frameid, blob)
|
class MsgConnection(protocol.Protocol):
def done(result):
self._sending = False
return result
d.addBoth(done)
def _frame_received(self, frameid, blob):
return self.frame_received(frameid, blob)
|
1,893 |
https://:@github.com/PMCC-BioinformaticsCore/pipelines.git
|
44406718107c18b4fe00e2a3abb4077dd158e298
|
@@ -770,7 +770,7 @@ class Workflow(Tool):
wtools[s.id()] = wf_wdl
wtools.update(wf_tools)
else:
- wtools[s.id()] = t.wdl(with_docker=with_docker)
+ wtools[t.id()] = t.wdl(with_docker=with_docker)
w.calls.append(
s.wdl(tool_aliases[t.id().lower()].upper() + "." + t.id(), s.id())
|
Pipeline/workflow/workflow.py
|
ReplaceText(target='t' @(773,23)->(773,24))
|
class Workflow(Tool):
wtools[s.id()] = wf_wdl
wtools.update(wf_tools)
else:
wtools[s.id()] = t.wdl(with_docker=with_docker)
w.calls.append(
s.wdl(tool_aliases[t.id().lower()].upper() + "." + t.id(), s.id())
|
class Workflow(Tool):
wtools[s.id()] = wf_wdl
wtools.update(wf_tools)
else:
wtools[t.id()] = t.wdl(with_docker=with_docker)
w.calls.append(
s.wdl(tool_aliases[t.id().lower()].upper() + "." + t.id(), s.id())
|
1,894 |
https://:@github.com/nvaytet/visens.git
|
320e6fed81a09d158e3728b7de54233657a24e0d
|
@@ -13,7 +13,7 @@ def image(filename, colormap="viridis", vmin=None, vmax=None, log=False,
z, edges = np.histogram(data.ids,
bins=np.arange(-0.5, data.nx * data.ny + 0.5))
- z = z.reshape(data.nx, data.ny)
+ z = z.reshape(data.ny, data.nx)
if side_panels:
z_sumx = np.sum(z, axis=1)
z_sumy = np.sum(z, axis=0)
|
src/visens/image.py
|
ArgSwap(idxs=0<->1 @(16,8)->(16,17))
|
def image(filename, colormap="viridis", vmin=None, vmax=None, log=False,
z, edges = np.histogram(data.ids,
bins=np.arange(-0.5, data.nx * data.ny + 0.5))
z = z.reshape(data.nx, data.ny)
if side_panels:
z_sumx = np.sum(z, axis=1)
z_sumy = np.sum(z, axis=0)
|
def image(filename, colormap="viridis", vmin=None, vmax=None, log=False,
z, edges = np.histogram(data.ids,
bins=np.arange(-0.5, data.nx * data.ny + 0.5))
z = z.reshape(data.ny, data.nx)
if side_panels:
z_sumx = np.sum(z, axis=1)
z_sumy = np.sum(z, axis=0)
|
1,895 |
https://:@github.com/nvaytet/visens.git
|
320e6fed81a09d158e3728b7de54233657a24e0d
|
@@ -65,7 +65,7 @@ def slicer(filename, colormap="viridis", vmin=None, vmax=None, log=False,
z, xe, ye = np.histogram2d(data.ids, data.tofs/1.0e3,
bins=[np.arange(-0.5, data.nx * data.ny + 0.5),
t])
- z = z.reshape(data.nx, data.ny, nbins)
+ z = z.reshape(data.ny, data.nx, nbins)
# Transpose should be True for old December 2018 files
if transpose:
z = np.transpose(z, axes=[1, 0, 2])
|
src/visens/slicer.py
|
ArgSwap(idxs=0<->1 @(68,8)->(68,17))
|
def slicer(filename, colormap="viridis", vmin=None, vmax=None, log=False,
z, xe, ye = np.histogram2d(data.ids, data.tofs/1.0e3,
bins=[np.arange(-0.5, data.nx * data.ny + 0.5),
t])
z = z.reshape(data.nx, data.ny, nbins)
# Transpose should be True for old December 2018 files
if transpose:
z = np.transpose(z, axes=[1, 0, 2])
|
def slicer(filename, colormap="viridis", vmin=None, vmax=None, log=False,
z, xe, ye = np.histogram2d(data.ids, data.tofs/1.0e3,
bins=[np.arange(-0.5, data.nx * data.ny + 0.5),
t])
z = z.reshape(data.ny, data.nx, nbins)
# Transpose should be True for old December 2018 files
if transpose:
z = np.transpose(z, axes=[1, 0, 2])
|
1,896 |
https://:@github.com/compmech/structmanager.git
|
3082bac86d0c1ad2826111bde8c2e5d5976dd7c8
|
@@ -200,7 +200,7 @@ class Model(object):
print('Building stringers...')
for s in stringers.values():
- s.elements = [bdf.elements[eid] for eid in p.eids]
+ s.elements = [bdf.elements[eid] for eid in s.eids]
setelements = set(s.elements)
print('finished!')
|
structMan/model.py
|
ReplaceText(target='s' @(203,55)->(203,56))
|
class Model(object):
print('Building stringers...')
for s in stringers.values():
s.elements = [bdf.elements[eid] for eid in p.eids]
setelements = set(s.elements)
print('finished!')
|
class Model(object):
print('Building stringers...')
for s in stringers.values():
s.elements = [bdf.elements[eid] for eid in s.eids]
setelements = set(s.elements)
print('finished!')
|
1,897 |
https://:@github.com/al-fontes-jr/bardolph.git
|
c4429c7a1453ff8088bffa4066b8fe9ff7d4c164
|
@@ -61,7 +61,7 @@ def as_raw(reg, logical_value, use_float=False):
else:
value = (logical_value % 360.0) / 360.0 * 65535.0
elif reg in (Register.BRIGHTNESS, Register.SATURATION):
- if logical_value == 100.0:
+ if logical_value >= 100.0:
value = 65535.0
else:
value = logical_value / 100.0 * 65535.0
|
bardolph/controller/units.py
|
ReplaceText(target='>=' @(64,25)->(64,27))
|
def as_raw(reg, logical_value, use_float=False):
else:
value = (logical_value % 360.0) / 360.0 * 65535.0
elif reg in (Register.BRIGHTNESS, Register.SATURATION):
if logical_value == 100.0:
value = 65535.0
else:
value = logical_value / 100.0 * 65535.0
|
def as_raw(reg, logical_value, use_float=False):
else:
value = (logical_value % 360.0) / 360.0 * 65535.0
elif reg in (Register.BRIGHTNESS, Register.SATURATION):
if logical_value >= 100.0:
value = 65535.0
else:
value = logical_value / 100.0 * 65535.0
|
1,898 |
https://:@github.com/bluecoveltd/contracts.git
|
f5b74e088920520642500d4da990580c841cbb22
|
@@ -25,7 +25,7 @@ class SeparateContext(Contract):
return SeparateContext(tokens[0]['child'], where=where)
-sepcon = (Group(Literal('$') - Literal('(') -
+sepcon = (Group(Literal('$') + Literal('(') -
contract_expression('child') - Literal(')')))
sepcon.setParseAction(SeparateContext.parse_action)
sepcon.setName('Context separation construct')
|
src/contracts/library/separate_context.py
|
ReplaceText(target='+' @(28,29)->(28,30))
|
class SeparateContext(Contract):
return SeparateContext(tokens[0]['child'], where=where)
sepcon = (Group(Literal('$') - Literal('(') -
contract_expression('child') - Literal(')')))
sepcon.setParseAction(SeparateContext.parse_action)
sepcon.setName('Context separation construct')
|
class SeparateContext(Contract):
return SeparateContext(tokens[0]['child'], where=where)
sepcon = (Group(Literal('$') + Literal('(') -
contract_expression('child') - Literal(')')))
sepcon.setParseAction(SeparateContext.parse_action)
sepcon.setName('Context separation construct')
|
1,899 |
https://:@github.com/TimHessels/WaporTranslator.git
|
58f1b770f5b60b2468677294c51c52460305b12f
|
@@ -111,7 +111,7 @@ class Rasterdata_tiffs:
time_or = ''
# Apply gapfilling if needed
- if gap_filling != None and ~np.isnan(np.nanmean(Array)):
+ if gap_filling != None and ~np.isnan(np.nanmean(Array_end)):
Array_end[np.isnan(Array_end)] = -9999
Array_end = RC.gap_filling(Array_end, -9999, gap_filling)
Array_end = Array_end * MASK
|
LEVEL_1/DataCube.py
|
ReplaceText(target='Array_end' @(114,60)->(114,65))
|
class Rasterdata_tiffs:
time_or = ''
# Apply gapfilling if needed
if gap_filling != None and ~np.isnan(np.nanmean(Array)):
Array_end[np.isnan(Array_end)] = -9999
Array_end = RC.gap_filling(Array_end, -9999, gap_filling)
Array_end = Array_end * MASK
|
class Rasterdata_tiffs:
time_or = ''
# Apply gapfilling if needed
if gap_filling != None and ~np.isnan(np.nanmean(Array_end)):
Array_end[np.isnan(Array_end)] = -9999
Array_end = RC.gap_filling(Array_end, -9999, gap_filling)
Array_end = Array_end * MASK
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.