Unnamed: 0
int64 0
2.44k
| repo
stringlengths 32
81
| hash
stringlengths 40
40
| diff
stringlengths 113
1.17k
| old_path
stringlengths 5
84
| rewrite
stringlengths 34
79
| initial_state
stringlengths 75
980
| final_state
stringlengths 76
980
|
---|---|---|---|---|---|---|---|
2,100 |
https://:@github.com/sviete/home-assistant.git
|
066254afdfcc7cd513b9450e0693c1d188d12c4d
|
@@ -156,7 +156,7 @@ class AtaDeviceClimate(MelCloudClimate):
)
vane_vertical = self._device.vane_vertical
- if vane_horizontal:
+ if vane_vertical:
attr.update(
{
ATTR_VANE_VERTICAL: vane_vertical,
|
homeassistant/components/melcloud/climate.py
|
ReplaceText(target='vane_vertical' @(159,11)->(159,26))
|
class AtaDeviceClimate(MelCloudClimate):
)
vane_vertical = self._device.vane_vertical
if vane_horizontal:
attr.update(
{
ATTR_VANE_VERTICAL: vane_vertical,
|
class AtaDeviceClimate(MelCloudClimate):
)
vane_vertical = self._device.vane_vertical
if vane_vertical:
attr.update(
{
ATTR_VANE_VERTICAL: vane_vertical,
|
2,101 |
https://:@github.com/sviete/home-assistant.git
|
a6b407d706a0ed00d740a9c50eeb5107acccddc1
|
@@ -58,7 +58,7 @@ def setup(hass, config):
success = True
for conf in config[DOMAIN]:
- protocol = "https" if config[CONF_SSL] else "http"
+ protocol = "https" if conf[CONF_SSL] else "http"
host_name = conf[CONF_HOST]
server_origin = f"{protocol}://{host_name}"
|
homeassistant/components/zoneminder/__init__.py
|
ReplaceText(target='conf' @(61,30)->(61,36))
|
def setup(hass, config):
success = True
for conf in config[DOMAIN]:
protocol = "https" if config[CONF_SSL] else "http"
host_name = conf[CONF_HOST]
server_origin = f"{protocol}://{host_name}"
|
def setup(hass, config):
success = True
for conf in config[DOMAIN]:
protocol = "https" if conf[CONF_SSL] else "http"
host_name = conf[CONF_HOST]
server_origin = f"{protocol}://{host_name}"
|
2,102 |
https://:@github.com/sviete/home-assistant.git
|
6c3ea2a904ab38afcb0de424592949ff2d26e99d
|
@@ -153,7 +153,7 @@ async def async_setup(hass, config):
entity_filter = conf[CONF_FILTER]
entity_config = conf[CONF_ENTITY_CONFIG]
interface_choice = (
- InterfaceChoice.Default if config.get(CONF_ZEROCONF_DEFAULT_INTERFACE) else None
+ InterfaceChoice.Default if conf.get(CONF_ZEROCONF_DEFAULT_INTERFACE) else None
)
homekit = HomeKit(
|
homeassistant/components/homekit/__init__.py
|
ReplaceText(target='conf' @(156,35)->(156,41))
|
async def async_setup(hass, config):
entity_filter = conf[CONF_FILTER]
entity_config = conf[CONF_ENTITY_CONFIG]
interface_choice = (
InterfaceChoice.Default if config.get(CONF_ZEROCONF_DEFAULT_INTERFACE) else None
)
homekit = HomeKit(
|
async def async_setup(hass, config):
entity_filter = conf[CONF_FILTER]
entity_config = conf[CONF_ENTITY_CONFIG]
interface_choice = (
InterfaceChoice.Default if conf.get(CONF_ZEROCONF_DEFAULT_INTERFACE) else None
)
homekit = HomeKit(
|
2,103 |
https://:@github.com/sviete/home-assistant.git
|
233284056ae02d0423ef590d2c931dad450a6447
|
@@ -119,5 +119,5 @@ class XS1ThermostatEntity(XS1DeviceEntity, ClimateEntity):
async def async_update(self):
"""Also update the sensor when available."""
await super().async_update()
- if self.sensor is None:
+ if self.sensor is not None:
await self.hass.async_add_executor_job(self.sensor.update)
|
homeassistant/components/xs1/climate.py
|
ReplaceText(target=' is not ' @(122,22)->(122,26))
|
class XS1ThermostatEntity(XS1DeviceEntity, ClimateEntity):
async def async_update(self):
"""Also update the sensor when available."""
await super().async_update()
if self.sensor is None:
await self.hass.async_add_executor_job(self.sensor.update)
|
class XS1ThermostatEntity(XS1DeviceEntity, ClimateEntity):
async def async_update(self):
"""Also update the sensor when available."""
await super().async_update()
if self.sensor is not None:
await self.hass.async_add_executor_job(self.sensor.update)
|
2,104 |
https://:@github.com/sviete/home-assistant.git
|
01bac9f433f26440eff3a4f1365cda9480dc971c
|
@@ -60,7 +60,7 @@ async def async_setup_entry(hass, config_entry):
shark_vacs = await ayla_api.async_get_devices(False)
device_names = ", ".join([d.name for d in shark_vacs])
- LOGGER.debug("Found %d Shark IQ device(s): %s", len(device_names), device_names)
+ LOGGER.debug("Found %d Shark IQ device(s): %s", len(shark_vacs), device_names)
coordinator = SharkIqUpdateCoordinator(hass, config_entry, ayla_api, shark_vacs)
await coordinator.async_refresh()
|
homeassistant/components/sharkiq/__init__.py
|
ReplaceText(target='shark_vacs' @(63,56)->(63,68))
|
async def async_setup_entry(hass, config_entry):
shark_vacs = await ayla_api.async_get_devices(False)
device_names = ", ".join([d.name for d in shark_vacs])
LOGGER.debug("Found %d Shark IQ device(s): %s", len(device_names), device_names)
coordinator = SharkIqUpdateCoordinator(hass, config_entry, ayla_api, shark_vacs)
await coordinator.async_refresh()
|
async def async_setup_entry(hass, config_entry):
shark_vacs = await ayla_api.async_get_devices(False)
device_names = ", ".join([d.name for d in shark_vacs])
LOGGER.debug("Found %d Shark IQ device(s): %s", len(shark_vacs), device_names)
coordinator = SharkIqUpdateCoordinator(hass, config_entry, ayla_api, shark_vacs)
await coordinator.async_refresh()
|
2,105 |
https://:@github.com/Melevir/flake8-super-mario.git
|
1aba55ed7cd233a511372c6bfa552684c7243136
|
@@ -14,6 +14,6 @@ def run_validator_for_test_file(filename: str) -> List:
with open(test_file_path, 'r') as file_handler:
raw_content = file_handler.read()
tree = ast.parse(raw_content)
- checker = SuperMarionChecker(tree=tree, filename=filename)
+ checker = SuperMarionChecker(tree=tree, filename=test_file_path)
return list(checker.run())
|
tests/conftest.py
|
ReplaceText(target='test_file_path' @(17,53)->(17,61))
|
def run_validator_for_test_file(filename: str) -> List:
with open(test_file_path, 'r') as file_handler:
raw_content = file_handler.read()
tree = ast.parse(raw_content)
checker = SuperMarionChecker(tree=tree, filename=filename)
return list(checker.run())
|
def run_validator_for_test_file(filename: str) -> List:
with open(test_file_path, 'r') as file_handler:
raw_content = file_handler.read()
tree = ast.parse(raw_content)
checker = SuperMarionChecker(tree=tree, filename=test_file_path)
return list(checker.run())
|
2,106 |
https://:@github.com/OxfordHED/sunbear.git
|
f7d00ff0471d6e1bd301a965f4550fa2e8f81f7a
|
@@ -78,7 +78,7 @@ class Momentum(GradOptInterface):
# check the stopping conditions
if self._is_stop():
break
- return x
+ return xmin
def _is_stop(self):
def disp(s):
|
sunbear/gradopt/momentum.py
|
ReplaceText(target='xmin' @(81,15)->(81,16))
|
class Momentum(GradOptInterface):
# check the stopping conditions
if self._is_stop():
break
return x
def _is_stop(self):
def disp(s):
|
class Momentum(GradOptInterface):
# check the stopping conditions
if self._is_stop():
break
return xmin
def _is_stop(self):
def disp(s):
|
2,107 |
https://:@github.com/The-Politico/django-politico-staff.git
|
119c1cd3ba70d18e733d4c0cb2d4dc2f89be5428
|
@@ -64,7 +64,7 @@ def sync_slack_users(pks):
profile, created = Profile.objects.update_or_create(
user=user,
defaults={
- "slack_api_id": slack_profile["id"],
+ "slack_api_id": slack_user["id"],
"politico_title": slack_profile.get("title", "Staff writer"),
},
)
|
staff/tasks/user.py
|
ReplaceText(target='slack_user' @(67,32)->(67,45))
|
def sync_slack_users(pks):
profile, created = Profile.objects.update_or_create(
user=user,
defaults={
"slack_api_id": slack_profile["id"],
"politico_title": slack_profile.get("title", "Staff writer"),
},
)
|
def sync_slack_users(pks):
profile, created = Profile.objects.update_or_create(
user=user,
defaults={
"slack_api_id": slack_user["id"],
"politico_title": slack_profile.get("title", "Staff writer"),
},
)
|
2,108 |
https://:@bitbucket.org/verisage/python-harvest-oauth.git
|
e31145b870253beda1cf37671a84036864c05544
|
@@ -135,7 +135,7 @@ class TestHarvest(unittest.TestCase):
with patch('add_time.json') as oauth2_mock:
r = self.oauth_client.add(add_data, params={'access_token': self.TEST_ACCESS})
- add_mock.assert_called_once_with('POST', '/daily/add', add_data, params={'access_token':self.TEST_ACCESS})
+ oauth2_mock.assert_called_once_with('POST', '/daily/add', add_data, params={'access_token':self.TEST_ACCESS})
self.assertTrue(r)
if __name__ == '__main__':
|
tests/harvest_test.py
|
ReplaceText(target='oauth2_mock' @(138,12)->(138,20))
|
class TestHarvest(unittest.TestCase):
with patch('add_time.json') as oauth2_mock:
r = self.oauth_client.add(add_data, params={'access_token': self.TEST_ACCESS})
add_mock.assert_called_once_with('POST', '/daily/add', add_data, params={'access_token':self.TEST_ACCESS})
self.assertTrue(r)
if __name__ == '__main__':
|
class TestHarvest(unittest.TestCase):
with patch('add_time.json') as oauth2_mock:
r = self.oauth_client.add(add_data, params={'access_token': self.TEST_ACCESS})
oauth2_mock.assert_called_once_with('POST', '/daily/add', add_data, params={'access_token':self.TEST_ACCESS})
self.assertTrue(r)
if __name__ == '__main__':
|
2,109 |
https://:@github.com/zadorlab/KinBot.git
|
47ebac0df7af70d23b52bd2681a42f91b99c824a
|
@@ -16,7 +16,7 @@ class IntraRAddExocyclicF(GeneralReac):
self.fix_bonds(fix)
if step < self.dihstep:
- self.set_dihedrals(change, fix, cut=1)
+ self.set_dihedrals(change, step, cut=1)
elif step == self.dihstep:
self.fix_dihedrals(fix)
|
kinbot/reac_Intra_R_Add_Exocyclic_F.py
|
ReplaceText(target='step' @(19,39)->(19,42))
|
class IntraRAddExocyclicF(GeneralReac):
self.fix_bonds(fix)
if step < self.dihstep:
self.set_dihedrals(change, fix, cut=1)
elif step == self.dihstep:
self.fix_dihedrals(fix)
|
class IntraRAddExocyclicF(GeneralReac):
self.fix_bonds(fix)
if step < self.dihstep:
self.set_dihedrals(change, step, cut=1)
elif step == self.dihstep:
self.fix_dihedrals(fix)
|
2,110 |
https://:@github.com/zadorlab/KinBot.git
|
11b23a25b4b4e814ac43dcda3d395687498b076d
|
@@ -1037,7 +1037,7 @@ class ReactionFinder:
if k not in korcek_chain_filt and l not in korcek_chain_filt:
korcek_chain_filt.append(kch)
- for ins in korcek_chain:
+ for ins in korcek_chain_filt:
if bond[ins[0]][ins[-1]] == 1: # it is a ring
rxns += [ins]
|
kinbot/reaction_finder.py
|
ReplaceText(target='korcek_chain_filt' @(1040,23)->(1040,35))
|
class ReactionFinder:
if k not in korcek_chain_filt and l not in korcek_chain_filt:
korcek_chain_filt.append(kch)
for ins in korcek_chain:
if bond[ins[0]][ins[-1]] == 1: # it is a ring
rxns += [ins]
|
class ReactionFinder:
if k not in korcek_chain_filt and l not in korcek_chain_filt:
korcek_chain_filt.append(kch)
for ins in korcek_chain_filt:
if bond[ins[0]][ins[-1]] == 1: # it is a ring
rxns += [ins]
|
2,111 |
https://:@github.com/fabiommendes/kpop.git
|
67feb4ed935ad5e52840c410ec9a61958c2e65d0
|
@@ -126,7 +126,7 @@ class Plot(Attr):
kwargs.pop('self', None)
pop = self._population
- coords = pop.projection(which, 2, **coords_kwargs)
+ coords = pop.projection(2, which, **coords_kwargs)
return self.scatter_coords(coords, **scatter_kwargs)
def scatter_coords(self, coords, merge=False, colors=None, title=None,
|
src/kpop/population/plot.py
|
ArgSwap(idxs=0<->1 @(129,17)->(129,31))
|
class Plot(Attr):
kwargs.pop('self', None)
pop = self._population
coords = pop.projection(which, 2, **coords_kwargs)
return self.scatter_coords(coords, **scatter_kwargs)
def scatter_coords(self, coords, merge=False, colors=None, title=None,
|
class Plot(Attr):
kwargs.pop('self', None)
pop = self._population
coords = pop.projection(2, which, **coords_kwargs)
return self.scatter_coords(coords, **scatter_kwargs)
def scatter_coords(self, coords, merge=False, colors=None, title=None,
|
2,112 |
https://:@github.com/fabiommendes/kpop.git
|
f7ed21864aab121b3cca7544545059b9c93008e5
|
@@ -11,7 +11,7 @@ def file_or_path(func):
def decorated(file, *args, **kwargs):
if isinstance(file, str):
with open(file) as F:
- result = func(file, *args, **kwargs)
+ result = func(F, *args, **kwargs)
return result
else:
return func(file, *args, **kwargs)
|
src/kpop/io/utils.py
|
ReplaceText(target='F' @(14,30)->(14,34))
|
def file_or_path(func):
def decorated(file, *args, **kwargs):
if isinstance(file, str):
with open(file) as F:
result = func(file, *args, **kwargs)
return result
else:
return func(file, *args, **kwargs)
|
def file_or_path(func):
def decorated(file, *args, **kwargs):
if isinstance(file, str):
with open(file) as F:
result = func(F, *args, **kwargs)
return result
else:
return func(file, *args, **kwargs)
|
2,113 |
https://:@github.com/ocean-perception/auv_nav.git
|
b9d53bbb3836981f47891730e765f9ccbb726345
|
@@ -251,7 +251,7 @@ def interpolate_sensor_list(sensor_list,
sensor_list[i].eastings,
sensor_list[i].northings)
- if _centre_list[i].covariance is not None:
+ if _centre_list[j].covariance is not None:
sensor_list[i].covariance = interpolate_covariance(
sensor_list[i].epoch_timestamp,
_centre_list[j-1].epoch_timestamp,
|
auv_nav/tools/interpolate.py
|
ReplaceText(target='j' @(254,28)->(254,29))
|
def interpolate_sensor_list(sensor_list,
sensor_list[i].eastings,
sensor_list[i].northings)
if _centre_list[i].covariance is not None:
sensor_list[i].covariance = interpolate_covariance(
sensor_list[i].epoch_timestamp,
_centre_list[j-1].epoch_timestamp,
|
def interpolate_sensor_list(sensor_list,
sensor_list[i].eastings,
sensor_list[i].northings)
if _centre_list[j].covariance is not None:
sensor_list[i].covariance = interpolate_covariance(
sensor_list[i].epoch_timestamp,
_centre_list[j-1].epoch_timestamp,
|
2,114 |
https://:@github.com/ocean-perception/auv_nav.git
|
338d30cbe4b97a33d0e85c880ac3dd44bd1212b5
|
@@ -671,7 +671,7 @@ def plot_2d_deadreckoning(camera1_list,
[float(i.eastings) for i in pf_fusion_dvl_list],
[float(i.northings) for i in pf_fusion_dvl_list]],
i)
- if len(pf_timestamps_interval) > 1:
+ if len(pf_fusion_centre_list) > 1:
make_frame(frame,
['pf_dvl_distribution',
pf_timestamps_interval,
|
auv_nav/plot/plot_process_data.py
|
ReplaceText(target='pf_fusion_centre_list' @(674,15)->(674,37))
|
def plot_2d_deadreckoning(camera1_list,
[float(i.eastings) for i in pf_fusion_dvl_list],
[float(i.northings) for i in pf_fusion_dvl_list]],
i)
if len(pf_timestamps_interval) > 1:
make_frame(frame,
['pf_dvl_distribution',
pf_timestamps_interval,
|
def plot_2d_deadreckoning(camera1_list,
[float(i.eastings) for i in pf_fusion_dvl_list],
[float(i.northings) for i in pf_fusion_dvl_list]],
i)
if len(pf_fusion_centre_list) > 1:
make_frame(frame,
['pf_dvl_distribution',
pf_timestamps_interval,
|
2,115 |
https://:@github.com/ocean-perception/auv_nav.git
|
189387b6c5a95518839624237c2f95073c301a8b
|
@@ -49,7 +49,7 @@ def run_ransac(data, estimate, is_inlier, sample_size, goal_inliers, max_iterati
if ic > goal_inliers and stop_at_goal:
break
# estimate final model using all inliers
- m = fit_plane(inliers)
+ best_model = fit_plane(inliers)
return best_model, inliers, i
|
auv_cal/ransac.py
|
ReplaceText(target='best_model' @(52,4)->(52,5))
|
def run_ransac(data, estimate, is_inlier, sample_size, goal_inliers, max_iterati
if ic > goal_inliers and stop_at_goal:
break
# estimate final model using all inliers
m = fit_plane(inliers)
return best_model, inliers, i
|
def run_ransac(data, estimate, is_inlier, sample_size, goal_inliers, max_iterati
if ic > goal_inliers and stop_at_goal:
break
# estimate final model using all inliers
best_model = fit_plane(inliers)
return best_model, inliers, i
|
2,116 |
https://:@github.com/ocean-perception/auv_nav.git
|
8767eda5191ee84d65f3338f30382fbfd7815281
|
@@ -428,7 +428,7 @@ class LaserCalibrator():
point_cloud_local = random.sample(inliers_cloud_list, cloud_sample_size)
total_no_points = len(point_cloud_local)
p = Plane([1, 0, 0, 1.5])
- m = p.fit_non_robust(cloud)
+ m = p.fit_non_robust(point_cloud_local)
"""
m, _ = plane_fitting_ransac(
point_cloud_local,
|
auv_cal/laser_calibrator.py
|
ReplaceText(target='point_cloud_local' @(431,33)->(431,38))
|
class LaserCalibrator():
point_cloud_local = random.sample(inliers_cloud_list, cloud_sample_size)
total_no_points = len(point_cloud_local)
p = Plane([1, 0, 0, 1.5])
m = p.fit_non_robust(cloud)
"""
m, _ = plane_fitting_ransac(
point_cloud_local,
|
class LaserCalibrator():
point_cloud_local = random.sample(inliers_cloud_list, cloud_sample_size)
total_no_points = len(point_cloud_local)
p = Plane([1, 0, 0, 1.5])
m = p.fit_non_robust(point_cloud_local)
"""
m, _ = plane_fitting_ransac(
point_cloud_local,
|
2,117 |
https://:@github.com/DiffusionMapsAcademics/pyDiffMap.git
|
94705d0c523a9a87b7f6dd9b4074d895c805ce8d
|
@@ -196,7 +196,7 @@ class Kernel(object):
elif epsilon == 'bgh': # Berry, Giannakis Harlim method.
if (self.metric != 'euclidean'): # TODO : replace with call to scipy metrics.
warnings.warn('The BGH method for choosing epsilon assumes a euclidean metric. However, the metric being used is %s. Proceed at your own risk...' % self.metric)
- if self.scaled_dists is not None:
+ if self.scaled_dists is None:
self.scaled_dists = self._get_scaled_distance_mat(self.data, self.bandwidths)
self.epsilon_fitted, self.d = choose_optimal_epsilon_BGH(self.scaled_dists.data**2)
else:
|
src/pydiffmap/kernel.py
|
ReplaceText(target=' is ' @(199,32)->(199,40))
|
class Kernel(object):
elif epsilon == 'bgh': # Berry, Giannakis Harlim method.
if (self.metric != 'euclidean'): # TODO : replace with call to scipy metrics.
warnings.warn('The BGH method for choosing epsilon assumes a euclidean metric. However, the metric being used is %s. Proceed at your own risk...' % self.metric)
if self.scaled_dists is not None:
self.scaled_dists = self._get_scaled_distance_mat(self.data, self.bandwidths)
self.epsilon_fitted, self.d = choose_optimal_epsilon_BGH(self.scaled_dists.data**2)
else:
|
class Kernel(object):
elif epsilon == 'bgh': # Berry, Giannakis Harlim method.
if (self.metric != 'euclidean'): # TODO : replace with call to scipy metrics.
warnings.warn('The BGH method for choosing epsilon assumes a euclidean metric. However, the metric being used is %s. Proceed at your own risk...' % self.metric)
if self.scaled_dists is None:
self.scaled_dists = self._get_scaled_distance_mat(self.data, self.bandwidths)
self.epsilon_fitted, self.d = choose_optimal_epsilon_BGH(self.scaled_dists.data**2)
else:
|
2,118 |
https://:@github.com/grm/py-smart-gardena.git
|
1851380e4082d4440fdc12d0dc77e04eebfd3e2a
|
@@ -162,7 +162,7 @@ class SmartSystem:
devices_smart_system[real_id][device["type"]] = []
devices_smart_system[real_id][device["type"]].append(device)
for parsed_device in devices_smart_system.values():
- location.add_device(DeviceFactory.build(self, device))
+ location.add_device(DeviceFactory.build(self, parsed_device))
def start_ws(self):
url = f"{self.SMART_HOST}/v1/locations"
|
src/gardena/smart_system.py
|
ReplaceText(target='parsed_device' @(165,62)->(165,68))
|
class SmartSystem:
devices_smart_system[real_id][device["type"]] = []
devices_smart_system[real_id][device["type"]].append(device)
for parsed_device in devices_smart_system.values():
location.add_device(DeviceFactory.build(self, device))
def start_ws(self):
url = f"{self.SMART_HOST}/v1/locations"
|
class SmartSystem:
devices_smart_system[real_id][device["type"]] = []
devices_smart_system[real_id][device["type"]].append(device)
for parsed_device in devices_smart_system.values():
location.add_device(DeviceFactory.build(self, parsed_device))
def start_ws(self):
url = f"{self.SMART_HOST}/v1/locations"
|
2,119 |
https://:@github.com/Stupeflix/zfs_backup.git
|
95be33619441d81b9797e249625de88afcc764d5
|
@@ -81,7 +81,7 @@ class Snapshot(object):
def try_to_create(self):
limit = datetime.now() - timedelta(seconds=self.settings['SNAPSHOT_INTERVAL'])
- if 'date' in self.last_snapshot or self.last_snapshot['date'] <= limit:
+ if 'date' not in self.last_snapshot or self.last_snapshot['date'] <= limit:
self.create()
def remove_snapshot(self, snapshot):
|
zfs_backup/snapshot.py
|
ReplaceText(target=' not in ' @(84,17)->(84,21))
|
class Snapshot(object):
def try_to_create(self):
limit = datetime.now() - timedelta(seconds=self.settings['SNAPSHOT_INTERVAL'])
if 'date' in self.last_snapshot or self.last_snapshot['date'] <= limit:
self.create()
def remove_snapshot(self, snapshot):
|
class Snapshot(object):
def try_to_create(self):
limit = datetime.now() - timedelta(seconds=self.settings['SNAPSHOT_INTERVAL'])
if 'date' not in self.last_snapshot or self.last_snapshot['date'] <= limit:
self.create()
def remove_snapshot(self, snapshot):
|
2,120 |
https://:@github.com/ilblackdragon/studio.git
|
c6a526f0966c0bd609904d5edc3d530bee4b21b7
|
@@ -14,7 +14,7 @@ class PubsubQueue(object):
self.logger.setLevel(verbose)
sub_name = sub_name if sub_name else queue_name + "_sub"
self.logger.info("Topic name = {}".format(queue_name))
- self.logger.info("Subscription name = {}".format(queue_name))
+ self.logger.info("Subscription name = {}".format(sub_name))
if queue_name not in [t.name for t in self.client.list_topics()]:
self.topic.create()
self.logger.info('topic {} created'.format(queue_name))
|
studio/pubsub_queue.py
|
ReplaceText(target='sub_name' @(17,57)->(17,67))
|
class PubsubQueue(object):
self.logger.setLevel(verbose)
sub_name = sub_name if sub_name else queue_name + "_sub"
self.logger.info("Topic name = {}".format(queue_name))
self.logger.info("Subscription name = {}".format(queue_name))
if queue_name not in [t.name for t in self.client.list_topics()]:
self.topic.create()
self.logger.info('topic {} created'.format(queue_name))
|
class PubsubQueue(object):
self.logger.setLevel(verbose)
sub_name = sub_name if sub_name else queue_name + "_sub"
self.logger.info("Topic name = {}".format(queue_name))
self.logger.info("Subscription name = {}".format(sub_name))
if queue_name not in [t.name for t in self.client.list_topics()]:
self.topic.create()
self.logger.info('topic {} created'.format(queue_name))
|
2,121 |
https://:@github.com/ilblackdragon/studio.git
|
eb2c4eb5fc61d879b77062bec7c626815d20e30f
|
@@ -195,7 +195,7 @@ class RMQueue(object):
"""
self._logger.debug('declare queue ' + queue_name)
with self._rmq_lock:
- self._channel.queue_declare(self.on_queue_declareok, queue_name)
+ self._channel.queue_declare(queue_name, self.on_queue_declareok)
def on_queue_declareok(self, method_frame):
"""
|
studio/rabbit_queue.py
|
ArgSwap(idxs=0<->1 @(198,12)->(198,39))
|
class RMQueue(object):
"""
self._logger.debug('declare queue ' + queue_name)
with self._rmq_lock:
self._channel.queue_declare(self.on_queue_declareok, queue_name)
def on_queue_declareok(self, method_frame):
"""
|
class RMQueue(object):
"""
self._logger.debug('declare queue ' + queue_name)
with self._rmq_lock:
self._channel.queue_declare(queue_name, self.on_queue_declareok)
def on_queue_declareok(self, method_frame):
"""
|
2,122 |
https://:@github.com/exopy/exopy_hqc_legacy.git
|
50201c6854aaac9abac67ba1a2c53bf41b871e34
|
@@ -221,7 +221,7 @@ class SPADQ14(DllInstrument):
# If we are not averaging we wait for all records to be acquired.
if not n_records or (not average and
- n_records < retrieved_records):
+ n_records < records_per_capture):
time.sleep(1e-6)
continue
if not get_data(cu, id_, buffers_ptr,
|
exopy_hqc_legacy/instruments/drivers/dll/sp_adq14.py
|
ReplaceText(target='records_per_capture' @(224,45)->(224,62))
|
class SPADQ14(DllInstrument):
# If we are not averaging we wait for all records to be acquired.
if not n_records or (not average and
n_records < retrieved_records):
time.sleep(1e-6)
continue
if not get_data(cu, id_, buffers_ptr,
|
class SPADQ14(DllInstrument):
# If we are not averaging we wait for all records to be acquired.
if not n_records or (not average and
n_records < records_per_capture):
time.sleep(1e-6)
continue
if not get_data(cu, id_, buffers_ptr,
|
2,123 |
https://:@github.com/uw-loci/multiscale.git
|
bf29e09937e4b119cac002762b170438fe00e13a
|
@@ -164,7 +164,7 @@ def process_orientation_alignment(ret_image_path, orient_image_path,
retardance, orientation = calculate_retardance_over_area(
ret_roi, orient_roi)
- alignment = calculate_alignment(orient_tile)
+ alignment = calculate_alignment(orient_roi)
sample = blk.get_core_file_name(output_path)
mouse, slide = sample.split('-')
|
mp_img_manip/polarimetry.py
|
ReplaceText(target='orient_roi' @(167,56)->(167,67))
|
def process_orientation_alignment(ret_image_path, orient_image_path,
retardance, orientation = calculate_retardance_over_area(
ret_roi, orient_roi)
alignment = calculate_alignment(orient_tile)
sample = blk.get_core_file_name(output_path)
mouse, slide = sample.split('-')
|
def process_orientation_alignment(ret_image_path, orient_image_path,
retardance, orientation = calculate_retardance_over_area(
ret_roi, orient_roi)
alignment = calculate_alignment(orient_roi)
sample = blk.get_core_file_name(output_path)
mouse, slide = sample.split('-')
|
2,124 |
https://:@github.com/uw-loci/multiscale.git
|
1b703cd8f176c289d52c195094a7f2035ebf0a15
|
@@ -78,7 +78,7 @@ def plot_overlay(fixed_image: sitk.Image, moving_image: sitk.Image, transform: s
if downsample:
fixed_shrunk = trans.resize_image(fixed_image, fixed_image.GetSpacing()[0], downsample_target)
- rotated_shrunk = trans.resize_image(rotated_image, moving_image.GetSpacing()[0], downsample_target)
+ rotated_shrunk = trans.resize_image(rotated_image, fixed_image.GetSpacing()[0], downsample_target)
spacing = fixed_shrunk.GetSpacing()
overlay_array = overlay_images(fixed_shrunk, rotated_shrunk)
|
multiscale/itk/itk_plotting.py
|
ReplaceText(target='fixed_image' @(81,67)->(81,79))
|
def plot_overlay(fixed_image: sitk.Image, moving_image: sitk.Image, transform: s
if downsample:
fixed_shrunk = trans.resize_image(fixed_image, fixed_image.GetSpacing()[0], downsample_target)
rotated_shrunk = trans.resize_image(rotated_image, moving_image.GetSpacing()[0], downsample_target)
spacing = fixed_shrunk.GetSpacing()
overlay_array = overlay_images(fixed_shrunk, rotated_shrunk)
|
def plot_overlay(fixed_image: sitk.Image, moving_image: sitk.Image, transform: s
if downsample:
fixed_shrunk = trans.resize_image(fixed_image, fixed_image.GetSpacing()[0], downsample_target)
rotated_shrunk = trans.resize_image(rotated_image, fixed_image.GetSpacing()[0], downsample_target)
spacing = fixed_shrunk.GetSpacing()
overlay_array = overlay_images(fixed_shrunk, rotated_shrunk)
|
2,125 |
https://:@github.com/uw-loci/multiscale.git
|
66a12265d2e2289686d445c9a9785581773bc31b
|
@@ -106,7 +106,7 @@ def process_orientation_alignment(ret_image_path, orient_image_path,
if roi_size is None:
with open(output_path, 'w', newline='') as csvfile:
print('\nWriting average retardance file for {} at tile size {}'.format(
- output_path.name, tile_size[0]))
+ ret_image_path.name, tile_size[0]))
writer = csv.writer(csvfile)
writer.writerow(['Mouse', 'Slide', 'Modality', 'Tile',
'Retardance', 'Orientation', 'Alignment'])
|
multiscale/polarimetry/polarimetry.py
|
ReplaceText(target='ret_image_path' @(109,32)->(109,43))
|
def process_orientation_alignment(ret_image_path, orient_image_path,
if roi_size is None:
with open(output_path, 'w', newline='') as csvfile:
print('\nWriting average retardance file for {} at tile size {}'.format(
output_path.name, tile_size[0]))
writer = csv.writer(csvfile)
writer.writerow(['Mouse', 'Slide', 'Modality', 'Tile',
'Retardance', 'Orientation', 'Alignment'])
|
def process_orientation_alignment(ret_image_path, orient_image_path,
if roi_size is None:
with open(output_path, 'w', newline='') as csvfile:
print('\nWriting average retardance file for {} at tile size {}'.format(
ret_image_path.name, tile_size[0]))
writer = csv.writer(csvfile)
writer.writerow(['Mouse', 'Slide', 'Modality', 'Tile',
'Retardance', 'Orientation', 'Alignment'])
|
2,126 |
https://:@github.com/l616769490/fc-utils.git
|
9ce6c047b524094d5dce68b8b39bd880f25a51b8
|
@@ -71,7 +71,7 @@ def fieldStrAndPer(d):
if v != None:
l1.append(k)
noAppend = True # 标记lper和l2还未赋值
- if isinstance(l2, str):
+ if isinstance(v, str):
if v.startswith('+') or v.startswith('-') or v.startswith('*') or v.startswith('/'):
vv = dataToFloat(v[1:])
if vv:
|
fcutils/sql_utils.py
|
ReplaceText(target='v' @(74,26)->(74,28))
|
def fieldStrAndPer(d):
if v != None:
l1.append(k)
noAppend = True # 标记lper和l2还未赋值
if isinstance(l2, str):
if v.startswith('+') or v.startswith('-') or v.startswith('*') or v.startswith('/'):
vv = dataToFloat(v[1:])
if vv:
|
def fieldStrAndPer(d):
if v != None:
l1.append(k)
noAppend = True # 标记lper和l2还未赋值
if isinstance(v, str):
if v.startswith('+') or v.startswith('-') or v.startswith('*') or v.startswith('/'):
vv = dataToFloat(v[1:])
if vv:
|
2,127 |
https://:@github.com/shlomikushchi/zipline-live2.git
|
102cfcbe5b21dd95e73616e78b9d1b0dac7af55b
|
@@ -139,7 +139,7 @@ def get_next_trading_dt(current, interval):
next_dt = next_dt + interval
next_dt = pd.Timestamp(next_dt, tz=trading.environment.exchange_tz)
next_dt_utc = next_dt.tz_convert('UTC')
- if trading.environment.is_market_hours(next_dt):
+ if trading.environment.is_market_hours(next_dt_utc):
break
next_dt = next_dt_utc.tz_convert(trading.environment.exchange_tz)
|
zipline/utils/factory.py
|
ReplaceText(target='next_dt_utc' @(142,47)->(142,54))
|
def get_next_trading_dt(current, interval):
next_dt = next_dt + interval
next_dt = pd.Timestamp(next_dt, tz=trading.environment.exchange_tz)
next_dt_utc = next_dt.tz_convert('UTC')
if trading.environment.is_market_hours(next_dt):
break
next_dt = next_dt_utc.tz_convert(trading.environment.exchange_tz)
|
def get_next_trading_dt(current, interval):
next_dt = next_dt + interval
next_dt = pd.Timestamp(next_dt, tz=trading.environment.exchange_tz)
next_dt_utc = next_dt.tz_convert('UTC')
if trading.environment.is_market_hours(next_dt_utc):
break
next_dt = next_dt_utc.tz_convert(trading.environment.exchange_tz)
|
2,128 |
https://:@github.com/shlomikushchi/zipline-live2.git
|
0e4f3f957ad95d43e4890cc4b2ea3a10f9b3d4da
|
@@ -272,7 +272,7 @@ def create_test_df_source(sim_params=None, bars='daily'):
elif bars == 'minute':
freq = pd.datetools.Minute()
else:
- raise ValueError('%s bars not understood.' % freq)
+ raise ValueError('%s bars not understood.' % bars)
if sim_params:
index = sim_params.trading_days
|
zipline/utils/factory.py
|
ReplaceText(target='bars' @(275,53)->(275,57))
|
def create_test_df_source(sim_params=None, bars='daily'):
elif bars == 'minute':
freq = pd.datetools.Minute()
else:
raise ValueError('%s bars not understood.' % freq)
if sim_params:
index = sim_params.trading_days
|
def create_test_df_source(sim_params=None, bars='daily'):
elif bars == 'minute':
freq = pd.datetools.Minute()
else:
raise ValueError('%s bars not understood.' % bars)
if sim_params:
index = sim_params.trading_days
|
2,129 |
https://:@github.com/shlomikushchi/zipline-live2.git
|
2104a35af8d43a398b4df4a947d79b642189c346
|
@@ -2215,7 +2215,7 @@ class TradingAlgorithm(object):
"""
control = RestrictedListOrder(on_error, restrictions)
self.register_trading_control(control)
- self.restrictions = restrictions
+ self.restrictions |= restrictions
@api_method
def set_long_only(self, on_error='fail'):
|
zipline/algorithm.py
|
ReplaceText(target='|=' @(2218,26)->(2218,27))
|
class TradingAlgorithm(object):
"""
control = RestrictedListOrder(on_error, restrictions)
self.register_trading_control(control)
self.restrictions = restrictions
@api_method
def set_long_only(self, on_error='fail'):
|
class TradingAlgorithm(object):
"""
control = RestrictedListOrder(on_error, restrictions)
self.register_trading_control(control)
self.restrictions |= restrictions
@api_method
def set_long_only(self, on_error='fail'):
|
2,130 |
https://:@github.com/shlomikushchi/zipline-live2.git
|
03782fdfd3e49af0407b15e796adb89268997dd1
|
@@ -550,7 +550,7 @@ def assert_timestamp_and_datetime_equal(result,
)
result = pd.Timestamp(result)
- expected = pd.Timestamp(result)
+ expected = pd.Timestamp(expected)
if compare_nat_equal and pd.isnull(result) and pd.isnull(expected):
return
|
zipline/testing/predicates.py
|
ReplaceText(target='expected' @(553,28)->(553,34))
|
def assert_timestamp_and_datetime_equal(result,
)
result = pd.Timestamp(result)
expected = pd.Timestamp(result)
if compare_nat_equal and pd.isnull(result) and pd.isnull(expected):
return
|
def assert_timestamp_and_datetime_equal(result,
)
result = pd.Timestamp(result)
expected = pd.Timestamp(expected)
if compare_nat_equal and pd.isnull(result) and pd.isnull(expected):
return
|
2,131 |
https://:@github.com/shlomikushchi/zipline-live2.git
|
6f5e353647162c074e0e88e16046cb1ec3be1563
|
@@ -64,7 +64,7 @@ class RealtimeClock(object):
current_time = pd.to_datetime('now', utc=True)
server_time = (current_time + self.time_skew).floor('1 min')
- if (server_time == self.before_trading_start_minutes[0] and
+ if (server_time >= self.before_trading_start_minutes[0] and
not self._before_trading_start_bar_yielded):
self._last_emit = server_time
self._before_trading_start_bar_yielded = True
|
zipline/gens/realtimeclock.py
|
ReplaceText(target='>=' @(67,28)->(67,30))
|
class RealtimeClock(object):
current_time = pd.to_datetime('now', utc=True)
server_time = (current_time + self.time_skew).floor('1 min')
if (server_time == self.before_trading_start_minutes[0] and
not self._before_trading_start_bar_yielded):
self._last_emit = server_time
self._before_trading_start_bar_yielded = True
|
class RealtimeClock(object):
current_time = pd.to_datetime('now', utc=True)
server_time = (current_time + self.time_skew).floor('1 min')
if (server_time >= self.before_trading_start_minutes[0] and
not self._before_trading_start_bar_yielded):
self._last_emit = server_time
self._before_trading_start_bar_yielded = True
|
2,132 |
https://:@github.com/SiLab-Bonn/testbeam_analysis.git
|
e5351795584850b633befa2cf63f7a372b58c9e5
|
@@ -313,7 +313,7 @@ def calculate_residuals(input_tracks_file, input_alignment_file, output_residual
name='XResidualsY_DUT%d' % (actual_dut),
title='Residual distribution in y direction as a function of the x position for DUT %d ' % (actual_dut),
atom=tb.Atom.from_dtype(hist_x_residual_y_hist.dtype),
- shape=hist_x_residual_x_hist.shape,
+ shape=hist_x_residual_y_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_x_res_y.attrs.xedges = hist_x_residual_y_xedges
out_x_res_y.attrs.yedges = hist_x_residual_y_yedges
|
testbeam_analysis/result_analysis.py
|
ReplaceText(target='hist_x_residual_y_hist' @(316,61)->(316,83))
|
def calculate_residuals(input_tracks_file, input_alignment_file, output_residual
name='XResidualsY_DUT%d' % (actual_dut),
title='Residual distribution in y direction as a function of the x position for DUT %d ' % (actual_dut),
atom=tb.Atom.from_dtype(hist_x_residual_y_hist.dtype),
shape=hist_x_residual_x_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_x_res_y.attrs.xedges = hist_x_residual_y_xedges
out_x_res_y.attrs.yedges = hist_x_residual_y_yedges
|
def calculate_residuals(input_tracks_file, input_alignment_file, output_residual
name='XResidualsY_DUT%d' % (actual_dut),
title='Residual distribution in y direction as a function of the x position for DUT %d ' % (actual_dut),
atom=tb.Atom.from_dtype(hist_x_residual_y_hist.dtype),
shape=hist_x_residual_y_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_x_res_y.attrs.xedges = hist_x_residual_y_xedges
out_x_res_y.attrs.yedges = hist_x_residual_y_yedges
|
2,133 |
https://:@github.com/SiLab-Bonn/testbeam_analysis.git
|
c44b1a0a37f1897ef97caa7f510931d958b6291d
|
@@ -398,7 +398,7 @@ def calculate_residuals(input_tracks_file, input_alignment_file, output_residual
name='ColumnResidualsRow_DUT%d' % (actual_dut),
title='Residual distribution in row direction as a function of the column position for DUT %d ' % (actual_dut),
atom=tb.Atom.from_dtype(hist_col_residual_row_hist.dtype),
- shape=hist_col_residual_col_hist.shape,
+ shape=hist_col_residual_row_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_col_res_row.attrs.xedges = hist_col_residual_row_xedges
out_col_res_row.attrs.yedges = hist_col_residual_row_yedges
|
testbeam_analysis/result_analysis.py
|
ReplaceText(target='hist_col_residual_row_hist' @(401,65)->(401,91))
|
def calculate_residuals(input_tracks_file, input_alignment_file, output_residual
name='ColumnResidualsRow_DUT%d' % (actual_dut),
title='Residual distribution in row direction as a function of the column position for DUT %d ' % (actual_dut),
atom=tb.Atom.from_dtype(hist_col_residual_row_hist.dtype),
shape=hist_col_residual_col_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_col_res_row.attrs.xedges = hist_col_residual_row_xedges
out_col_res_row.attrs.yedges = hist_col_residual_row_yedges
|
def calculate_residuals(input_tracks_file, input_alignment_file, output_residual
name='ColumnResidualsRow_DUT%d' % (actual_dut),
title='Residual distribution in row direction as a function of the column position for DUT %d ' % (actual_dut),
atom=tb.Atom.from_dtype(hist_col_residual_row_hist.dtype),
shape=hist_col_residual_row_hist.shape,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_col_res_row.attrs.xedges = hist_col_residual_row_xedges
out_col_res_row.attrs.yedges = hist_col_residual_row_yedges
|
2,134 |
https://:@github.com/SiLab-Bonn/testbeam_analysis.git
|
7b1825cc07d59af6921e6b95c2e020e116d65c21
|
@@ -418,7 +418,7 @@ def fit_tracks(input_track_candidates_file, input_alignment_file, output_tracks_
# Split data and fit on all available cores
n_slices = cpu_count()
- slices = np.array_split(n_tracks, n_slices)
+ slices = np.array_split(track_hits, n_slices)
results = pool.map(_fit_tracks_loop, slices)
del track_hits
|
testbeam_analysis/track_analysis.py
|
ReplaceText(target='track_hits' @(421,48)->(421,56))
|
def fit_tracks(input_track_candidates_file, input_alignment_file, output_tracks_
# Split data and fit on all available cores
n_slices = cpu_count()
slices = np.array_split(n_tracks, n_slices)
results = pool.map(_fit_tracks_loop, slices)
del track_hits
|
def fit_tracks(input_track_candidates_file, input_alignment_file, output_tracks_
# Split data and fit on all available cores
n_slices = cpu_count()
slices = np.array_split(track_hits, n_slices)
results = pool.map(_fit_tracks_loop, slices)
del track_hits
|
2,135 |
https://:@github.com/SiLab-Bonn/testbeam_analysis.git
|
4f8bb30949aba54a1c83b0cc2e9dc03c633a6673
|
@@ -593,7 +593,7 @@ def _find_tracks_loop(tracklets, tr_column, tr_row, tr_z, tr_charge, column_sigm
# print '== ACTUAL DUT ==', dut_index
actual_column_sigma, actual_row_sigma = column_sigma[dut_index], row_sigma[dut_index]
- if not reference_hit_set and not np.isnan(tr_row[track_index][dut_index]): # Search for first DUT that registered a hit
+ if not reference_hit_set and not np.isnan(tr_column[track_index][dut_index]): # Search for first DUT that registered a hit
actual_track_column, actual_track_row = tr_column[track_index][dut_index], tr_row[track_index][dut_index]
reference_hit_set = True
tracklets[track_index]['track_quality'] |= (65793 << dut_index) # First track hit has best quality by definition
|
testbeam_analysis/track_analysis.py
|
ReplaceText(target='tr_column' @(596,54)->(596,60))
|
def _find_tracks_loop(tracklets, tr_column, tr_row, tr_z, tr_charge, column_sigm
# print '== ACTUAL DUT ==', dut_index
actual_column_sigma, actual_row_sigma = column_sigma[dut_index], row_sigma[dut_index]
if not reference_hit_set and not np.isnan(tr_row[track_index][dut_index]): # Search for first DUT that registered a hit
actual_track_column, actual_track_row = tr_column[track_index][dut_index], tr_row[track_index][dut_index]
reference_hit_set = True
tracklets[track_index]['track_quality'] |= (65793 << dut_index) # First track hit has best quality by definition
|
def _find_tracks_loop(tracklets, tr_column, tr_row, tr_z, tr_charge, column_sigm
# print '== ACTUAL DUT ==', dut_index
actual_column_sigma, actual_row_sigma = column_sigma[dut_index], row_sigma[dut_index]
if not reference_hit_set and not np.isnan(tr_column[track_index][dut_index]): # Search for first DUT that registered a hit
actual_track_column, actual_track_row = tr_column[track_index][dut_index], tr_row[track_index][dut_index]
reference_hit_set = True
tracklets[track_index]['track_quality'] |= (65793 << dut_index) # First track hit has best quality by definition
|
2,136 |
https://:@github.com/pfalcon/ScratchABlock.git
|
3d0617383d0937d2e743d3eeb37fc5389129e831
|
@@ -47,7 +47,7 @@ def loop_single_entry(cfg):
if not b.items:
landing_site = p
if not landing_site:
- farthest = max(back_jumps)
+ farthest = max(back_preds)
print("farthest", farthest)
newb = BBlock(farthest + "_1")
cfg.add_node(newb.addr, newb)
|
xform.py
|
ReplaceText(target='back_preds' @(50,31)->(50,41))
|
def loop_single_entry(cfg):
if not b.items:
landing_site = p
if not landing_site:
farthest = max(back_jumps)
print("farthest", farthest)
newb = BBlock(farthest + "_1")
cfg.add_node(newb.addr, newb)
|
def loop_single_entry(cfg):
if not b.items:
landing_site = p
if not landing_site:
farthest = max(back_preds)
print("farthest", farthest)
newb = BBlock(farthest + "_1")
cfg.add_node(newb.addr, newb)
|
2,137 |
https://:@github.com/theislab/trVAE.git
|
839a30f966f4bd7654d56910dee543f129efea94
|
@@ -493,7 +493,7 @@ class RCCVAE:
if self.train_with_fake_labels:
x_train = np.reshape(train_data.X, newshape=(-1, *self.x_dim))
x = [x_train, train_labels, pseudo_labels]
- y = [x_train, pseudo_labels]
+ y = [x_train, train_labels]
else:
x_train = np.reshape(train_data.X, newshape=(-1, *self.x_dim))
x = [x_train, train_labels, train_labels]
|
rcvae/models/_rccvae.py
|
ReplaceText(target='train_labels' @(496,26)->(496,39))
|
class RCCVAE:
if self.train_with_fake_labels:
x_train = np.reshape(train_data.X, newshape=(-1, *self.x_dim))
x = [x_train, train_labels, pseudo_labels]
y = [x_train, pseudo_labels]
else:
x_train = np.reshape(train_data.X, newshape=(-1, *self.x_dim))
x = [x_train, train_labels, train_labels]
|
class RCCVAE:
if self.train_with_fake_labels:
x_train = np.reshape(train_data.X, newshape=(-1, *self.x_dim))
x = [x_train, train_labels, pseudo_labels]
y = [x_train, train_labels]
else:
x_train = np.reshape(train_data.X, newshape=(-1, *self.x_dim))
x = [x_train, train_labels, train_labels]
|
2,138 |
https://:@github.com/theislab/trVAE.git
|
7eac79cf26380e6f665884ef1b1a330aecdab93a
|
@@ -139,7 +139,7 @@ def train_network(data_dict=None,
model_path=f"../models/{data_name}-{img_resize}-{preprocess}/{arch_style}-{z_dim}/",
dropout_rate=dropout_rate)
- print(train_data.shape, data_valid.shape)
+ print(train_data.shape, valid_data.shape)
network.train(train_data,
use_validation=True,
|
tests/test_rccvae.py
|
ReplaceText(target='valid_data' @(142,28)->(142,38))
|
def train_network(data_dict=None,
model_path=f"../models/{data_name}-{img_resize}-{preprocess}/{arch_style}-{z_dim}/",
dropout_rate=dropout_rate)
print(train_data.shape, data_valid.shape)
network.train(train_data,
use_validation=True,
|
def train_network(data_dict=None,
model_path=f"../models/{data_name}-{img_resize}-{preprocess}/{arch_style}-{z_dim}/",
dropout_rate=dropout_rate)
print(train_data.shape, valid_data.shape)
network.train(train_data,
use_validation=True,
|
2,139 |
https://:@github.com/theislab/trVAE.git
|
106ef16445334677462106453aa6e43ce1ece084
|
@@ -106,7 +106,7 @@ def reconstruct_whole_data(data_dict={}, z_dim=100):
cell_type_data = train[train.obs[cell_type_key] == cell_type]
cell_type_ctrl_data = train[((train.obs[cell_type_key] == cell_type) & (train.obs["condition"] == ctrl_key))]
- pred = network.predict(cell_type_data,
+ pred = network.predict(cell_type_ctrl_data,
encoder_labels=np.zeros((cell_type_ctrl_data.shape[0], 1)),
decoder_labels=np.ones((cell_type_ctrl_data.shape[0], 1)))
|
tests/test_rcvae.py
|
ReplaceText(target='cell_type_ctrl_data' @(109,31)->(109,45))
|
def reconstruct_whole_data(data_dict={}, z_dim=100):
cell_type_data = train[train.obs[cell_type_key] == cell_type]
cell_type_ctrl_data = train[((train.obs[cell_type_key] == cell_type) & (train.obs["condition"] == ctrl_key))]
pred = network.predict(cell_type_data,
encoder_labels=np.zeros((cell_type_ctrl_data.shape[0], 1)),
decoder_labels=np.ones((cell_type_ctrl_data.shape[0], 1)))
|
def reconstruct_whole_data(data_dict={}, z_dim=100):
cell_type_data = train[train.obs[cell_type_key] == cell_type]
cell_type_ctrl_data = train[((train.obs[cell_type_key] == cell_type) & (train.obs["condition"] == ctrl_key))]
pred = network.predict(cell_type_ctrl_data,
encoder_labels=np.zeros((cell_type_ctrl_data.shape[0], 1)),
decoder_labels=np.ones((cell_type_ctrl_data.shape[0], 1)))
|
2,140 |
https://:@github.com/theislab/trVAE.git
|
6a19b6aa1c89f14ac093ebf0f20a8c6cb902a632
|
@@ -222,7 +222,7 @@ def evaluate_network(data_dict=None, z_dim=100, n_files=5, k=5, arch_style=1, pr
if sparse.issparse(valid_data.X):
valid_data.X = valid_data.X.A
- source_images_train = train_data[valid_data.obs["condition"] == source_key].X
+ source_images_train = train_data[train_data.obs["condition"] == source_key].X
source_images_valid = valid_data[valid_data.obs["condition"] == source_key].X
source_images_train = np.reshape(source_images_train, (-1, img_width, img_height, n_channels))
|
tests/test_rccvae.py
|
ReplaceText(target='train_data' @(225,41)->(225,51))
|
def evaluate_network(data_dict=None, z_dim=100, n_files=5, k=5, arch_style=1, pr
if sparse.issparse(valid_data.X):
valid_data.X = valid_data.X.A
source_images_train = train_data[valid_data.obs["condition"] == source_key].X
source_images_valid = valid_data[valid_data.obs["condition"] == source_key].X
source_images_train = np.reshape(source_images_train, (-1, img_width, img_height, n_channels))
|
def evaluate_network(data_dict=None, z_dim=100, n_files=5, k=5, arch_style=1, pr
if sparse.issparse(valid_data.X):
valid_data.X = valid_data.X.A
source_images_train = train_data[train_data.obs["condition"] == source_key].X
source_images_valid = valid_data[valid_data.obs["condition"] == source_key].X
source_images_train = np.reshape(source_images_train, (-1, img_width, img_height, n_channels))
|
2,141 |
https://:@github.com/theislab/trVAE.git
|
99e9fa47a8024f4ea73c83f462ea2a5f5c78b87e
|
@@ -318,7 +318,7 @@ def visualize_trained_network_results(data_dict, z_dim=100, mmd_dimension=128, a
frameon=False)
for gene in top_100_genes[:3]:
- sc.pl.violin(cell_type_adata, keys=gene, groupby=condition_key,
+ sc.pl.violin(pred_adatas, keys=gene, groupby=condition_key,
save=f"_{data_name}_{cell_type}_{gene}.pdf",
show=False,
wspace=0.2,
|
tests/test_rcvae_multi.py
|
ReplaceText(target='pred_adatas' @(321,25)->(321,40))
|
def visualize_trained_network_results(data_dict, z_dim=100, mmd_dimension=128, a
frameon=False)
for gene in top_100_genes[:3]:
sc.pl.violin(cell_type_adata, keys=gene, groupby=condition_key,
save=f"_{data_name}_{cell_type}_{gene}.pdf",
show=False,
wspace=0.2,
|
def visualize_trained_network_results(data_dict, z_dim=100, mmd_dimension=128, a
frameon=False)
for gene in top_100_genes[:3]:
sc.pl.violin(pred_adatas, keys=gene, groupby=condition_key,
save=f"_{data_name}_{cell_type}_{gene}.pdf",
show=False,
wspace=0.2,
|
2,142 |
https://:@github.com/succhiello/hieratic.git
|
1a67208b4af1892081323d0f4ff4db36e5500ec8
|
@@ -108,7 +108,7 @@ class ItemResource(Resource):
persistence_converter = self.get_persistence_converter(self.engine_name)
if persistence_converter is not None:
updates = persistence_converter(updates)
- self.engine.update(patch, primary_index, context, updates)
+ self.engine.update(primary_index, patch, context, updates)
self.get_data()
def delete(self, context=None):
|
hieratic/item.py
|
ArgSwap(idxs=0<->1 @(111,8)->(111,26))
|
class ItemResource(Resource):
persistence_converter = self.get_persistence_converter(self.engine_name)
if persistence_converter is not None:
updates = persistence_converter(updates)
self.engine.update(patch, primary_index, context, updates)
self.get_data()
def delete(self, context=None):
|
class ItemResource(Resource):
persistence_converter = self.get_persistence_converter(self.engine_name)
if persistence_converter is not None:
updates = persistence_converter(updates)
self.engine.update(primary_index, patch, context, updates)
self.get_data()
def delete(self, context=None):
|
2,143 |
https://:@github.com/jeff-99/hashdex.git
|
db7083cb6b71fdb7a28c04721b3551a146bfd6cb
|
@@ -14,7 +14,7 @@ class DirectoryScanner(object):
file_list.append(File(os.path.join(root, file), file))
for subdir in subdirs:
- self._fetch_files(subdir, files)
+ self._fetch_files(subdir, file_list)
return file_list
|
hashdex/files.py
|
ReplaceText(target='file_list' @(17,42)->(17,47))
|
class DirectoryScanner(object):
file_list.append(File(os.path.join(root, file), file))
for subdir in subdirs:
self._fetch_files(subdir, files)
return file_list
|
class DirectoryScanner(object):
file_list.append(File(os.path.join(root, file), file))
for subdir in subdirs:
self._fetch_files(subdir, file_list)
return file_list
|
2,144 |
https://:@github.com/Gab0/linkageMapper.git
|
2e302c068dc6e0512eb5ec185273c7e34eca32cd
|
@@ -186,7 +186,7 @@ def Execute(options):
outputFastaName = "LOCI_%s.fasta" % locus_name
outputFastaPath = os.path.join(options.outputPath, outputFastaName)
- if os.path.isfile(outputFastaName):
+ if os.path.isfile(outputFastaPath):
print("Skipping locus %s. Already exists..." % locus_name)
continue
|
linkageMapper/primerFinder.py
|
ReplaceText(target='outputFastaPath' @(189,26)->(189,41))
|
def Execute(options):
outputFastaName = "LOCI_%s.fasta" % locus_name
outputFastaPath = os.path.join(options.outputPath, outputFastaName)
if os.path.isfile(outputFastaName):
print("Skipping locus %s. Already exists..." % locus_name)
continue
|
def Execute(options):
outputFastaName = "LOCI_%s.fasta" % locus_name
outputFastaPath = os.path.join(options.outputPath, outputFastaName)
if os.path.isfile(outputFastaPath):
print("Skipping locus %s. Already exists..." % locus_name)
continue
|
2,145 |
https://:@github.com/enzoampil/psequant.git
|
a393849078b354a869643126d16e33273cfecf59
|
@@ -155,7 +155,7 @@ def backtest(
else:
# Allow instance of BaseStrategy or from the predefined mapping
- if issubclass(strategy, bt.Strategy):
+ if issubclass(bt.Strategy, strategy):
strat_name = str(strategy)
else:
strat_name = strategy
|
python/fastquant/backtest/backtest.py
|
ArgSwap(idxs=0<->1 @(158,11)->(158,21))
|
def backtest(
else:
# Allow instance of BaseStrategy or from the predefined mapping
if issubclass(strategy, bt.Strategy):
strat_name = str(strategy)
else:
strat_name = strategy
|
def backtest(
else:
# Allow instance of BaseStrategy or from the predefined mapping
if issubclass(bt.Strategy, strategy):
strat_name = str(strategy)
else:
strat_name = strategy
|
2,146 |
https://:@github.com/infobyte/faraday_agent_dispatcher.git
|
c833c82e068e54645c983c5c50f4535661f6c2b1
|
@@ -181,7 +181,7 @@ def process_repo_var_envs(executor_name, metadata: dict):
env_vars = metadata["environment_variables"]
for env_var in env_vars:
- def_value = config.instance[section].get(executor_name, None)
+ def_value = config.instance[section].get(env_var, None)
value = click.prompt(f"Environment variable {env_var} value", default=def_value)
config.instance.set(section, env_var, value)
|
faraday_agent_dispatcher/cli/utils/model_load.py
|
ReplaceText(target='env_var' @(184,49)->(184,62))
|
def process_repo_var_envs(executor_name, metadata: dict):
env_vars = metadata["environment_variables"]
for env_var in env_vars:
def_value = config.instance[section].get(executor_name, None)
value = click.prompt(f"Environment variable {env_var} value", default=def_value)
config.instance.set(section, env_var, value)
|
def process_repo_var_envs(executor_name, metadata: dict):
env_vars = metadata["environment_variables"]
for env_var in env_vars:
def_value = config.instance[section].get(env_var, None)
value = click.prompt(f"Environment variable {env_var} value", default=def_value)
config.instance.set(section, env_var, value)
|
2,147 |
https://:@github.com/leylabmpi/pyTecanFluent.git
|
a5ca3f143da09723ffc3fed37fe5e9f0dcb243d4
|
@@ -680,7 +680,7 @@ def write_report(df_map, outFH, pcr_volume, mm_volume,
## total PCR
total_pcr_volume = pcr_volume * n_rxn
## total mastermix
- total_mm_volume = pcr_volume * n_rxn
+ total_mm_volume = mm_volume * n_rxn
## total primer
if fp_tube > 0 and fp_volume > 0:
total_fp_volume = fp_volume * n_rxn
|
pyTecanFluent/Map2Robot.py
|
ReplaceText(target='mm_volume' @(683,22)->(683,32))
|
def write_report(df_map, outFH, pcr_volume, mm_volume,
## total PCR
total_pcr_volume = pcr_volume * n_rxn
## total mastermix
total_mm_volume = pcr_volume * n_rxn
## total primer
if fp_tube > 0 and fp_volume > 0:
total_fp_volume = fp_volume * n_rxn
|
def write_report(df_map, outFH, pcr_volume, mm_volume,
## total PCR
total_pcr_volume = pcr_volume * n_rxn
## total mastermix
total_mm_volume = mm_volume * n_rxn
## total primer
if fp_tube > 0 and fp_volume > 0:
total_fp_volume = fp_volume * n_rxn
|
2,148 |
https://:@github.com/mindey/langsplit.git
|
11c289b80c6999eeeb17fc91212fb00a9d1b8354
|
@@ -87,7 +87,7 @@ def split(text, sep='.:', ends=['\n', ':'], min_key_length=2, max_key_length=2,
except Exception as e:
# Alternatively, we could assign it to None key.
- name[settings.UNKNOWN_LANGUAGE] += paragraph
+ result[settings.UNKNOWN_LANGUAGE] += paragraph
logger.info('Language not detected: {}'.format(paragraph))
if i < number_of_paragraphs - 1:
|
langsplit/splitter.py
|
ReplaceText(target='result' @(90,24)->(90,28))
|
def split(text, sep='.:', ends=['\n', ':'], min_key_length=2, max_key_length=2,
except Exception as e:
# Alternatively, we could assign it to None key.
name[settings.UNKNOWN_LANGUAGE] += paragraph
logger.info('Language not detected: {}'.format(paragraph))
if i < number_of_paragraphs - 1:
|
def split(text, sep='.:', ends=['\n', ':'], min_key_length=2, max_key_length=2,
except Exception as e:
# Alternatively, we could assign it to None key.
result[settings.UNKNOWN_LANGUAGE] += paragraph
logger.info('Language not detected: {}'.format(paragraph))
if i < number_of_paragraphs - 1:
|
2,149 |
https://:@github.com/tkf/factlog.git
|
c032135fe5abcdedeb5a26c013e2f74c1c466a7d
|
@@ -170,7 +170,7 @@ def list_run(
from .filetitle import write_paths_and_titles
write_paths_and_titles(output, paths, showpaths, separator)
else:
- output.writelines(interleave(paths, itertools.repeat(separator)))
+ output.writelines(interleave(showpaths, itertools.repeat(separator)))
if output is not sys.stdout:
output.close()
|
factlog/record.py
|
ReplaceText(target='showpaths' @(173,37)->(173,42))
|
def list_run(
from .filetitle import write_paths_and_titles
write_paths_and_titles(output, paths, showpaths, separator)
else:
output.writelines(interleave(paths, itertools.repeat(separator)))
if output is not sys.stdout:
output.close()
|
def list_run(
from .filetitle import write_paths_and_titles
write_paths_and_titles(output, paths, showpaths, separator)
else:
output.writelines(interleave(showpaths, itertools.repeat(separator)))
if output is not sys.stdout:
output.close()
|
2,150 |
https://:@github.com/l0kix2/python-dehydrate.git
|
47212c6e5d2433bbe399dfbdb01ba65b48c5636b
|
@@ -151,7 +151,7 @@ class ComplexField(Field):
if self.is_iterable:
return map(dehydrator.dehydrate, target)
else:
- return dehydrator.dehydrate(obj)
+ return dehydrator.dehydrate(target)
@property
def target(self):
|
dehydrate/fields.py
|
ReplaceText(target='target' @(154,40)->(154,43))
|
class ComplexField(Field):
if self.is_iterable:
return map(dehydrator.dehydrate, target)
else:
return dehydrator.dehydrate(obj)
@property
def target(self):
|
class ComplexField(Field):
if self.is_iterable:
return map(dehydrator.dehydrate, target)
else:
return dehydrator.dehydrate(target)
@property
def target(self):
|
2,151 |
https://:@github.com/wf4ever/ro-manager.git
|
cc9a10e9cd88e634ee8ff745e099696d173021de
|
@@ -74,7 +74,7 @@ class Minim_graph(object):
self._minimgr.add( (rule, MINIM.query, querynode) )
self._minimgr.add( (querynode, MINIM.sparql_query, rdflib.Literal(ForEach)) )
if ResultMod:
- self._minimgr.add( (querynode, MINIM.result_mod, rdflib.Literal(Exists)) )
+ self._minimgr.add( (querynode, MINIM.result_mod, rdflib.Literal(ResultMod)) )
if Exists:
existsnode = rdflib.BNode()
self._minimgr.add( (rule, MINIM.exists, existsnode) )
|
src/checklist/minim_graph.py
|
ReplaceText(target='ResultMod' @(77,80)->(77,86))
|
class Minim_graph(object):
self._minimgr.add( (rule, MINIM.query, querynode) )
self._minimgr.add( (querynode, MINIM.sparql_query, rdflib.Literal(ForEach)) )
if ResultMod:
self._minimgr.add( (querynode, MINIM.result_mod, rdflib.Literal(Exists)) )
if Exists:
existsnode = rdflib.BNode()
self._minimgr.add( (rule, MINIM.exists, existsnode) )
|
class Minim_graph(object):
self._minimgr.add( (rule, MINIM.query, querynode) )
self._minimgr.add( (querynode, MINIM.sparql_query, rdflib.Literal(ForEach)) )
if ResultMod:
self._minimgr.add( (querynode, MINIM.result_mod, rdflib.Literal(ResultMod)) )
if Exists:
existsnode = rdflib.BNode()
self._minimgr.add( (rule, MINIM.exists, existsnode) )
|
2,152 |
https://:@github.com/frlender/qn.git
|
5b94be8b02089eb0d50c102c249c9ddd4a185091
|
@@ -253,7 +253,7 @@ def loadPkl(path):
def dumpPkl(obj,path):
with open(path,'wb') as pf:
- pickle.dump(obj,path)
+ pickle.dump(obj,pf)
def getBaseDir():
currentPath = os.getcwd()
|
py/qn.py
|
ReplaceText(target='pf' @(256,18)->(256,22))
|
def loadPkl(path):
def dumpPkl(obj,path):
with open(path,'wb') as pf:
pickle.dump(obj,path)
def getBaseDir():
currentPath = os.getcwd()
|
def loadPkl(path):
def dumpPkl(obj,path):
with open(path,'wb') as pf:
pickle.dump(obj,pf)
def getBaseDir():
currentPath = os.getcwd()
|
2,153 |
https://:@bitbucket.org/pyKLIP/pyklip.git
|
e2bf632237365bd23a1b8287ba1bfd85d8cb7702
|
@@ -311,7 +311,7 @@ class Data(object):
self.filenums = filenums_collapsed
self.filenames = filenames_collapsed
- if additional_collapsed is not None:
+ if additional_params is not None:
for param_field, param_collapsed in zip(additional_params, additional_collapsed):
param_collapsed.shape = (Ncubes * collapse_channels, ) + param_collapsed.shape[2:]
setattr(self, param_field, param_collapsed)
|
pyklip/instruments/Instrument.py
|
ReplaceText(target='additional_params' @(314,11)->(314,31))
|
class Data(object):
self.filenums = filenums_collapsed
self.filenames = filenames_collapsed
if additional_collapsed is not None:
for param_field, param_collapsed in zip(additional_params, additional_collapsed):
param_collapsed.shape = (Ncubes * collapse_channels, ) + param_collapsed.shape[2:]
setattr(self, param_field, param_collapsed)
|
class Data(object):
self.filenums = filenums_collapsed
self.filenames = filenames_collapsed
if additional_params is not None:
for param_field, param_collapsed in zip(additional_params, additional_collapsed):
param_collapsed.shape = (Ncubes * collapse_channels, ) + param_collapsed.shape[2:]
setattr(self, param_field, param_collapsed)
|
2,154 |
https://:@bitbucket.org/pyKLIP/pyklip.git
|
dc2b513633ddd04ac1746a971723a38dafb9365d
|
@@ -200,7 +200,7 @@ class CHARISData(Data):
with fits.open(filepath, lazy_load_hdus=False) as hdulist:
cube = hdulist[1].data
prihdr = hdulist[0].header
- exthdr = hdulist[0].header
+ exthdr = hdulist[1].header
w = wcs.WCS(header=prihdr, naxis=[1,2])
astr_hdrs = [w.deepcopy() for _ in range(cube.shape[0])] #repeat astrom header for each wavelength slice
|
pyklip/instruments/CHARIS.py
|
ReplaceText(target='1' @(203,33)->(203,34))
|
class CHARISData(Data):
with fits.open(filepath, lazy_load_hdus=False) as hdulist:
cube = hdulist[1].data
prihdr = hdulist[0].header
exthdr = hdulist[0].header
w = wcs.WCS(header=prihdr, naxis=[1,2])
astr_hdrs = [w.deepcopy() for _ in range(cube.shape[0])] #repeat astrom header for each wavelength slice
|
class CHARISData(Data):
with fits.open(filepath, lazy_load_hdus=False) as hdulist:
cube = hdulist[1].data
prihdr = hdulist[0].header
exthdr = hdulist[1].header
w = wcs.WCS(header=prihdr, naxis=[1,2])
astr_hdrs = [w.deepcopy() for _ in range(cube.shape[0])] #repeat astrom header for each wavelength slice
|
2,155 |
https://:@bitbucket.org/pyKLIP/pyklip.git
|
34a9207f22f175f184748b87f9cef6416647890e
|
@@ -1173,7 +1173,7 @@ def klip_dataset(dataset, mode='ADI+SDI', outputdir=".", fileprefix="", annuli=5
if psf_library.dataset is dataset:
raise ValueError("The PSF Library is not prepared for this dataset. Run psf_library.prepare_library()")
if aligned_center is not None:
- if np.array_equal(aligned_center, psf_library.aligned_center):
+ if not np.array_equal(aligned_center, psf_library.aligned_center):
raise ValueError("The images need to be aligned to the same center as the RDI Library")
else:
|
pyklip/parallelized.py
|
ReplaceText(target='not ' @(1176,15)->(1176,15))
|
def klip_dataset(dataset, mode='ADI+SDI', outputdir=".", fileprefix="", annuli=5
if psf_library.dataset is dataset:
raise ValueError("The PSF Library is not prepared for this dataset. Run psf_library.prepare_library()")
if aligned_center is not None:
if np.array_equal(aligned_center, psf_library.aligned_center):
raise ValueError("The images need to be aligned to the same center as the RDI Library")
else:
|
def klip_dataset(dataset, mode='ADI+SDI', outputdir=".", fileprefix="", annuli=5
if psf_library.dataset is dataset:
raise ValueError("The PSF Library is not prepared for this dataset. Run psf_library.prepare_library()")
if aligned_center is not None:
if not np.array_equal(aligned_center, psf_library.aligned_center):
raise ValueError("The images need to be aligned to the same center as the RDI Library")
else:
|
2,156 |
https://:@bitbucket.org/pyKLIP/pyklip.git
|
4748ec86588a4b3b60dcb17d36e14485cdcd19ff
|
@@ -63,7 +63,7 @@ def point_source_detection(image, center,threshold,pix2as=None,mask_radius = 4,m
# Mask out a band of 10 pixels around the edges of the finite pixels of the image.
if maskout_edge is not None:
- IWA,OWA,inner_mask,outer_mask = get_occ(image, centroid = (center[0][0]+stamp_size//2,center[0][1]+stamp_size//2))
+ IWA,OWA,inner_mask,outer_mask = get_occ(image_cpy, centroid = (center[0][0]+stamp_size//2,center[0][1]+stamp_size//2))
conv_kernel = np.ones((maskout_edge,maskout_edge))
flat_cube_wider_mask = convolve2d(outer_mask,conv_kernel,mode="same")
image_cpy[np.where(np.isnan(flat_cube_wider_mask))] = np.nan
|
pyklip/kpp/detection/detection.py
|
ReplaceText(target='image_cpy' @(66,52)->(66,57))
|
def point_source_detection(image, center,threshold,pix2as=None,mask_radius = 4,m
# Mask out a band of 10 pixels around the edges of the finite pixels of the image.
if maskout_edge is not None:
IWA,OWA,inner_mask,outer_mask = get_occ(image, centroid = (center[0][0]+stamp_size//2,center[0][1]+stamp_size//2))
conv_kernel = np.ones((maskout_edge,maskout_edge))
flat_cube_wider_mask = convolve2d(outer_mask,conv_kernel,mode="same")
image_cpy[np.where(np.isnan(flat_cube_wider_mask))] = np.nan
|
def point_source_detection(image, center,threshold,pix2as=None,mask_radius = 4,m
# Mask out a band of 10 pixels around the edges of the finite pixels of the image.
if maskout_edge is not None:
IWA,OWA,inner_mask,outer_mask = get_occ(image_cpy, centroid = (center[0][0]+stamp_size//2,center[0][1]+stamp_size//2))
conv_kernel = np.ones((maskout_edge,maskout_edge))
flat_cube_wider_mask = convolve2d(outer_mask,conv_kernel,mode="same")
image_cpy[np.where(np.isnan(flat_cube_wider_mask))] = np.nan
|
2,157 |
https://:@github.com/kalaspuff/stockholm.git
|
08ee0890e1b7b22c8fc757ee2584e5cdc9ec459c
|
@@ -564,7 +564,7 @@ class MoneyModel(Generic[MoneyType]):
if self._currency and isinstance(self._currency, BaseCurrency):
min_decimals = self._currency.decimal_digits
min_decimals = DEFAULT_MIN_DECIMALS if min_decimals is None else min_decimals
- min_decimals = min(cast(min_decimals, int), max_decimals)
+ min_decimals = min(cast(int, min_decimals), max_decimals)
elif max_decimals is None:
max_decimals = max(min_decimals, DEFAULT_MAX_DECIMALS)
|
stockholm/money.py
|
ArgSwap(idxs=0<->1 @(567,31)->(567,35))
|
class MoneyModel(Generic[MoneyType]):
if self._currency and isinstance(self._currency, BaseCurrency):
min_decimals = self._currency.decimal_digits
min_decimals = DEFAULT_MIN_DECIMALS if min_decimals is None else min_decimals
min_decimals = min(cast(min_decimals, int), max_decimals)
elif max_decimals is None:
max_decimals = max(min_decimals, DEFAULT_MAX_DECIMALS)
|
class MoneyModel(Generic[MoneyType]):
if self._currency and isinstance(self._currency, BaseCurrency):
min_decimals = self._currency.decimal_digits
min_decimals = DEFAULT_MIN_DECIMALS if min_decimals is None else min_decimals
min_decimals = min(cast(int, min_decimals), max_decimals)
elif max_decimals is None:
max_decimals = max(min_decimals, DEFAULT_MAX_DECIMALS)
|
2,158 |
https://:@github.com/rbw/snowstorm.git
|
dcbeaa22bd05821a088861feb4f8f3ad76ff3db1
|
@@ -33,7 +33,7 @@ class Response:
body = await self.text()
content = ujson.loads(body).get("result")
- if "error" in body:
+ if "error" in content:
err = ErrorSchema().load(content["error"])
text = f"{err['message']} ({self.status}): {err['detail']}" if err["detail"] else err["message"]
raise ErrorResponse(text)
|
snow/request/core/base.py
|
ReplaceText(target='content' @(36,22)->(36,26))
|
class Response:
body = await self.text()
content = ujson.loads(body).get("result")
if "error" in body:
err = ErrorSchema().load(content["error"])
text = f"{err['message']} ({self.status}): {err['detail']}" if err["detail"] else err["message"]
raise ErrorResponse(text)
|
class Response:
body = await self.text()
content = ujson.loads(body).get("result")
if "error" in content:
err = ErrorSchema().load(content["error"])
text = f"{err['message']} ({self.status}): {err['detail']}" if err["detail"] else err["message"]
raise ErrorResponse(text)
|
2,159 |
https://:@github.com/mayjolux/udata.git
|
2a578ad9630779fced19b836b5252f62c7ed2caa
|
@@ -207,7 +207,7 @@ class DatasetBlueprintTest(FrontTestCase):
for i in range(1, len(feed.entries)):
published_date = feed.entries[i].published_parsed
prev_published_date = feed.entries[i - 1].published_parsed
- self.assertGreaterEqual(published_date, prev_published_date)
+ self.assertGreaterEqual(prev_published_date, published_date)
def test_recent_feed_owner(self):
owner = UserFactory()
|
udata/tests/frontend/test_dataset_frontend.py
|
ArgSwap(idxs=0<->1 @(210,12)->(210,35))
|
class DatasetBlueprintTest(FrontTestCase):
for i in range(1, len(feed.entries)):
published_date = feed.entries[i].published_parsed
prev_published_date = feed.entries[i - 1].published_parsed
self.assertGreaterEqual(published_date, prev_published_date)
def test_recent_feed_owner(self):
owner = UserFactory()
|
class DatasetBlueprintTest(FrontTestCase):
for i in range(1, len(feed.entries)):
published_date = feed.entries[i].published_parsed
prev_published_date = feed.entries[i - 1].published_parsed
self.assertGreaterEqual(prev_published_date, published_date)
def test_recent_feed_owner(self):
owner = UserFactory()
|
2,160 |
https://:@github.com/mayjolux/udata.git
|
e70455456fabb68859a47a6ef916bc4adb2ccddb
|
@@ -148,7 +148,7 @@ class DatasetBadgesAPI(API):
form.populate_obj(badge)
for existing_badge in dataset.badges:
if existing_badge.kind == badge.kind:
- return badge
+ return existing_badge
dataset.add_badge(badge)
return badge, 201
|
udata/core/dataset/api.py
|
ReplaceText(target='existing_badge' @(151,23)->(151,28))
|
class DatasetBadgesAPI(API):
form.populate_obj(badge)
for existing_badge in dataset.badges:
if existing_badge.kind == badge.kind:
return badge
dataset.add_badge(badge)
return badge, 201
|
class DatasetBadgesAPI(API):
form.populate_obj(badge)
for existing_badge in dataset.badges:
if existing_badge.kind == badge.kind:
return existing_badge
dataset.add_badge(badge)
return badge, 201
|
2,161 |
https://:@github.com/mayjolux/udata.git
|
6b1aaaff35c2f373873324561fea22752adf37e9
|
@@ -104,7 +104,7 @@ class ReuseDatasetsAPI(API):
except Dataset.DoesNotExist:
api.abort(404, 'Dataset {0} does not exists'.format(request.json['id']))
if dataset in reuse.datasets:
- return dataset
+ return reuse
reuse.datasets.append(dataset)
reuse.save()
return reuse, 201
|
udata/core/reuse/api.py
|
ReplaceText(target='reuse' @(107,19)->(107,26))
|
class ReuseDatasetsAPI(API):
except Dataset.DoesNotExist:
api.abort(404, 'Dataset {0} does not exists'.format(request.json['id']))
if dataset in reuse.datasets:
return dataset
reuse.datasets.append(dataset)
reuse.save()
return reuse, 201
|
class ReuseDatasetsAPI(API):
except Dataset.DoesNotExist:
api.abort(404, 'Dataset {0} does not exists'.format(request.json['id']))
if dataset in reuse.datasets:
return reuse
reuse.datasets.append(dataset)
reuse.save()
return reuse, 201
|
2,162 |
https://:@github.com/liushilive/LsBook.git
|
ef6afa367b34525354dee724607c2490bed605b6
|
@@ -83,6 +83,6 @@ def is_summary_exist(book: Book):
"""
book_summary = os.path.join(book.book_path, "summary.md")
if not os.path.isfile(book_summary):
- file_not_found_error(book)
+ file_not_found_error(book_summary)
else:
book.summary_path = book_summary
|
LsBook/parse/parse_summary.py
|
ReplaceText(target='book_summary' @(86,29)->(86,33))
|
def is_summary_exist(book: Book):
"""
book_summary = os.path.join(book.book_path, "summary.md")
if not os.path.isfile(book_summary):
file_not_found_error(book)
else:
book.summary_path = book_summary
|
def is_summary_exist(book: Book):
"""
book_summary = os.path.join(book.book_path, "summary.md")
if not os.path.isfile(book_summary):
file_not_found_error(book_summary)
else:
book.summary_path = book_summary
|
2,163 |
https://:@github.com/liushilive/LsBook.git
|
592651c6c34d89532a35cfffb597e42512146525
|
@@ -58,7 +58,7 @@ def _render_html(book_title, title, author, basePath, book_summary,
language, i18n, github_url, base_assets):
"""生产HTML,返回索引"""
# 解析页面
- base_assets_path = os.path.join(base_assets, basePath) if base_assets else basePath # 资源路径
+ base_assets_path = os.path.join(basePath, base_assets) if base_assets else basePath # 资源路径
book_page, toc_tree, tag_katex, tag_mermaid, tag_prism, tag_lightbox, assets_img = parse_file(
os.path.join(book_path, href),
|
LsBook/renderer/renderer_html.py
|
ArgSwap(idxs=0<->1 @(61,23)->(61,35))
|
def _render_html(book_title, title, author, basePath, book_summary,
language, i18n, github_url, base_assets):
"""生产HTML,返回索引"""
# 解析页面
base_assets_path = os.path.join(base_assets, basePath) if base_assets else basePath # 资源路径
book_page, toc_tree, tag_katex, tag_mermaid, tag_prism, tag_lightbox, assets_img = parse_file(
os.path.join(book_path, href),
|
def _render_html(book_title, title, author, basePath, book_summary,
language, i18n, github_url, base_assets):
"""生产HTML,返回索引"""
# 解析页面
base_assets_path = os.path.join(basePath, base_assets) if base_assets else basePath # 资源路径
book_page, toc_tree, tag_katex, tag_mermaid, tag_prism, tag_lightbox, assets_img = parse_file(
os.path.join(book_path, href),
|
2,164 |
https://:@github.com/eieste/MethodCache.git
|
672a3590ede2a2e6f7a347fd69c0583b2ec4bd9b
|
@@ -66,7 +66,7 @@ def add_to_cache(options={}, func=None, params=None):
if "ttl" not in options:
assert type(cleaned_options["store"].ttl) is int
- cleaned_options["ttl"] = options["store"].ttl
+ cleaned_options["ttl"] = cleaned_options["store"].ttl
else:
assert type(options["ttl"]) is int
|
methodcache/cache.py
|
ReplaceText(target='cleaned_options' @(69,33)->(69,40))
|
def add_to_cache(options={}, func=None, params=None):
if "ttl" not in options:
assert type(cleaned_options["store"].ttl) is int
cleaned_options["ttl"] = options["store"].ttl
else:
assert type(options["ttl"]) is int
|
def add_to_cache(options={}, func=None, params=None):
if "ttl" not in options:
assert type(cleaned_options["store"].ttl) is int
cleaned_options["ttl"] = cleaned_options["store"].ttl
else:
assert type(options["ttl"]) is int
|
2,165 |
https://:@github.com/alphardex/crawltools.git
|
89c92158c4c366af1e22a16d060fdb390683c4b5
|
@@ -17,7 +17,7 @@ def crawl(url):
data['url'] = item['url']
data['downloads'] = item['downloads_count']
data['votes'] = item['votes_count']
- data['comments'] = items['comments_count']
+ data['comments'] = item['comments_count']
pprint(data)
total.append(data)
|
looter/examples/sharecuts.py
|
ReplaceText(target='item' @(20,27)->(20,32))
|
def crawl(url):
data['url'] = item['url']
data['downloads'] = item['downloads_count']
data['votes'] = item['votes_count']
data['comments'] = items['comments_count']
pprint(data)
total.append(data)
|
def crawl(url):
data['url'] = item['url']
data['downloads'] = item['downloads_count']
data['votes'] = item['votes_count']
data['comments'] = item['comments_count']
pprint(data)
total.append(data)
|
2,166 |
https://:@github.com/andrewsanchez/genbankqc.git
|
2aa87a49e09fefbbe7fe2cba2e6074bba157322b
|
@@ -38,7 +38,7 @@ def cli(filter_level, max_unknowns, c_deviations, s_deviations, m_deviations,
print("Completed ", s.species)
print(s)
except Exception:
- print('Failed ', species.species)
+ print('Failed ', s.species)
traceback.print_exc()
else:
from genbankqc import Genbank
|
genbankqc/__main__.py
|
ReplaceText(target='s' @(41,29)->(41,36))
|
def cli(filter_level, max_unknowns, c_deviations, s_deviations, m_deviations,
print("Completed ", s.species)
print(s)
except Exception:
print('Failed ', species.species)
traceback.print_exc()
else:
from genbankqc import Genbank
|
def cli(filter_level, max_unknowns, c_deviations, s_deviations, m_deviations,
print("Completed ", s.species)
print(s)
except Exception:
print('Failed ', s.species)
traceback.print_exc()
else:
from genbankqc import Genbank
|
2,167 |
https://:@github.com/mattlong/fabric.git
|
7d2c14330a3382a86e51fe16a0151299cc4d23ba
|
@@ -207,4 +207,4 @@ Got:
def eq_contents(path, text):
with open(path) as fd:
- eq_(fd.read(), text)
+ eq_(text, fd.read())
|
tests/utils.py
|
ArgSwap(idxs=0<->1 @(210,8)->(210,11))
|
Got:
def eq_contents(path, text):
with open(path) as fd:
eq_(fd.read(), text)
|
Got:
def eq_contents(path, text):
with open(path) as fd:
eq_(text, fd.read())
|
2,168 |
https://:@github.com/rsanchezgarc/carbonCleaner.git
|
df470ae67588e8a1c7ba04ce14b517552e0ad788
|
@@ -41,7 +41,7 @@ def cleanOneMic(micFname, boxSize, deepLearningModel=DEFAULT_MODEL_PATH, inputCo
global MASK_PREDICTOR_HANDLER
with LOCK:
if MASK_PREDICTOR_HANDLER is None:
- MASK_PREDICTOR_HANDLER= MaskPredictor(deepLearningModel, boxSize, gpus)
+ MASK_PREDICTOR_HANDLER= MaskPredictor(boxSize, deepLearningModel, gpus)
maskPredictor= MASK_PREDICTOR_HANDLER
|
micrograph_cleaner_em/cleanOneMic.py
|
ArgSwap(idxs=0<->1 @(44,30)->(44,43))
|
def cleanOneMic(micFname, boxSize, deepLearningModel=DEFAULT_MODEL_PATH, inputCo
global MASK_PREDICTOR_HANDLER
with LOCK:
if MASK_PREDICTOR_HANDLER is None:
MASK_PREDICTOR_HANDLER= MaskPredictor(deepLearningModel, boxSize, gpus)
maskPredictor= MASK_PREDICTOR_HANDLER
|
def cleanOneMic(micFname, boxSize, deepLearningModel=DEFAULT_MODEL_PATH, inputCo
global MASK_PREDICTOR_HANDLER
with LOCK:
if MASK_PREDICTOR_HANDLER is None:
MASK_PREDICTOR_HANDLER= MaskPredictor(boxSize, deepLearningModel, gpus)
maskPredictor= MASK_PREDICTOR_HANDLER
|
2,169 |
https://:@github.com/Storj/storjkademlia.git
|
61631b21db3a4204bef38431799244fda90e7e4d
|
@@ -9,7 +9,7 @@ from kademlia.log import Logger
class KademliaProtocol(RPCProtocol):
def __init__(self, sourceNode, storage, ksize):
RPCProtocol.__init__(self)
- self.router = RoutingTable(self, sourceNode, ksize)
+ self.router = RoutingTable(self, ksize, sourceNode)
self.storage = storage
self.sourceID = sourceNode.id
self.log = Logger(system=self)
|
kademlia/protocol.py
|
ArgSwap(idxs=1<->2 @(12,22)->(12,34))
|
from kademlia.log import Logger
class KademliaProtocol(RPCProtocol):
def __init__(self, sourceNode, storage, ksize):
RPCProtocol.__init__(self)
self.router = RoutingTable(self, sourceNode, ksize)
self.storage = storage
self.sourceID = sourceNode.id
self.log = Logger(system=self)
|
from kademlia.log import Logger
class KademliaProtocol(RPCProtocol):
def __init__(self, sourceNode, storage, ksize):
RPCProtocol.__init__(self)
self.router = RoutingTable(self, ksize, sourceNode)
self.storage = storage
self.sourceID = sourceNode.id
self.log = Logger(system=self)
|
2,170 |
https://:@github.com/biwin/django-allauth-underground.git
|
e34cc91401970a2c2c265ca7e3f76c44b26dc1c9
|
@@ -27,7 +27,7 @@ def verified_email_required(function=None,
def _wrapped_view(request, *args, **kwargs):
if not EmailAddress.objects.filter(user=request.user,
verified=True).exists():
- send_email_confirmation(request.user, request)
+ send_email_confirmation(request, request.user)
return render(request,
'account/verified_email_required.html')
return view_func(request, *args, **kwargs)
|
allauth/account/decorators.py
|
ArgSwap(idxs=0<->1 @(30,16)->(30,39))
|
def verified_email_required(function=None,
def _wrapped_view(request, *args, **kwargs):
if not EmailAddress.objects.filter(user=request.user,
verified=True).exists():
send_email_confirmation(request.user, request)
return render(request,
'account/verified_email_required.html')
return view_func(request, *args, **kwargs)
|
def verified_email_required(function=None,
def _wrapped_view(request, *args, **kwargs):
if not EmailAddress.objects.filter(user=request.user,
verified=True).exists():
send_email_confirmation(request, request.user)
return render(request,
'account/verified_email_required.html')
return view_func(request, *args, **kwargs)
|
2,171 |
https://:@github.com/tuxdna/django-mako.git
|
066efc87e5324ae40f188150c9c59a4edf477513
|
@@ -27,7 +27,7 @@ def render_to_string(template_name, dictionary, context_instance=None):
context_dictionary.update(d)
# fetch and render template
template = middleware.lookup.get_template(template_name)
- return template.render(**dictionary)
+ return template.render(**context_dictionary)
def render_to_response(template_name, dictionary, context_instance=None, **kwargs):
"""
|
djangomako/shortcuts.py
|
ReplaceText(target='context_dictionary' @(30,29)->(30,39))
|
def render_to_string(template_name, dictionary, context_instance=None):
context_dictionary.update(d)
# fetch and render template
template = middleware.lookup.get_template(template_name)
return template.render(**dictionary)
def render_to_response(template_name, dictionary, context_instance=None, **kwargs):
"""
|
def render_to_string(template_name, dictionary, context_instance=None):
context_dictionary.update(d)
# fetch and render template
template = middleware.lookup.get_template(template_name)
return template.render(**context_dictionary)
def render_to_response(template_name, dictionary, context_instance=None, **kwargs):
"""
|
2,172 |
https://:@github.com/brainiak/brainiak-cloud.git
|
43c7525c9a9be12d33426b24fac353dc4d92c35a
|
@@ -130,5 +130,5 @@ class FCMAExperiment(Experiment):
if result > -1:
tmp = result
result = -1
- return str(result)
+ return str(tmp)
return "Data received!"
|
rtcloud/experiments/FCMAExperiment.py
|
ReplaceText(target='tmp' @(133,23)->(133,29))
|
class FCMAExperiment(Experiment):
if result > -1:
tmp = result
result = -1
return str(result)
return "Data received!"
|
class FCMAExperiment(Experiment):
if result > -1:
tmp = result
result = -1
return str(tmp)
return "Data received!"
|
2,173 |
https://:@github.com/NCATS-Tangerine/ros.git
|
3d8810f69f4c417b8fe46464667d082882b99f81
|
@@ -29,6 +29,6 @@ def exec_operator(self, model, job_name):
op_node = wf.spec.get("workflow",{}).get(job_name,{})
if op_node:
router = Router (wf)
- result = router.route (wf, op_node, job_name, op_node['code'], op_node['args'])
+ result = router.route (wf, job_name, op_node, op_node['code'], op_node['args'])
wf.set_result (job_name, result)
return result
|
ros/dag/tasks.py
|
ArgSwap(idxs=1<->2 @(32,17)->(32,29))
|
def exec_operator(self, model, job_name):
op_node = wf.spec.get("workflow",{}).get(job_name,{})
if op_node:
router = Router (wf)
result = router.route (wf, op_node, job_name, op_node['code'], op_node['args'])
wf.set_result (job_name, result)
return result
|
def exec_operator(self, model, job_name):
op_node = wf.spec.get("workflow",{}).get(job_name,{})
if op_node:
router = Router (wf)
result = router.route (wf, job_name, op_node, op_node['code'], op_node['args'])
wf.set_result (job_name, result)
return result
|
2,174 |
https://:@github.com/ICIJ/solr2es.git
|
e4d9cad0605a2a3048b237167f8523b02a47fc22
|
@@ -158,7 +158,7 @@ def translate_doc(row, translation_names, default_values) -> dict:
translated_value = value[0] if type(value) is list else value
if '.' in translated_key:
- translated_value = reduce(lambda i, acc: (acc, i), reversed(translated_key.split('.')[1:] + [value]))
+ translated_value = reduce(lambda i, acc: (acc, i), reversed(translated_key.split('.')[1:] + [translated_value]))
translated_key = translated_key.split('.')[0]
elif translated_key == '_id':
return key, value
|
solr2es/__main__.py
|
ReplaceText(target='translated_value' @(161,105)->(161,110))
|
def translate_doc(row, translation_names, default_values) -> dict:
translated_value = value[0] if type(value) is list else value
if '.' in translated_key:
translated_value = reduce(lambda i, acc: (acc, i), reversed(translated_key.split('.')[1:] + [value]))
translated_key = translated_key.split('.')[0]
elif translated_key == '_id':
return key, value
|
def translate_doc(row, translation_names, default_values) -> dict:
translated_value = value[0] if type(value) is list else value
if '.' in translated_key:
translated_value = reduce(lambda i, acc: (acc, i), reversed(translated_key.split('.')[1:] + [translated_value]))
translated_key = translated_key.split('.')[0]
elif translated_key == '_id':
return key, value
|
2,175 |
https://:@github.com/guyfawcus/ArchMap.git
|
3dc6ce049e92b3997c00c443d3da1a42da586105
|
@@ -123,7 +123,7 @@ def make_gis(parsed_users, output_file_geojson, output_file_kml, send_to_geojson
message("Writing geojson to " + output_file_geojson)
output = open(output_file_geojson, 'w')
- output.write(geojson_str)
+ output.write(geojson_str_pretty)
output.close()
# Write 'kml' to 'output_file_kml' if wanted.
|
archmap.py
|
ReplaceText(target='geojson_str_pretty' @(126,21)->(126,32))
|
def make_gis(parsed_users, output_file_geojson, output_file_kml, send_to_geojson
message("Writing geojson to " + output_file_geojson)
output = open(output_file_geojson, 'w')
output.write(geojson_str)
output.close()
# Write 'kml' to 'output_file_kml' if wanted.
|
def make_gis(parsed_users, output_file_geojson, output_file_kml, send_to_geojson
message("Writing geojson to " + output_file_geojson)
output = open(output_file_geojson, 'w')
output.write(geojson_str_pretty)
output.close()
# Write 'kml' to 'output_file_kml' if wanted.
|
2,176 |
https://:@github.com/machinekoder/speed-friending-matcher.git
|
ea954a502879b22a25d54fd8f40bf35315aede13
|
@@ -45,7 +45,7 @@ class Application(object):
for output in raw_ouputs:
name, arguments = output.split(':')
outputs.append((name, arguments))
- if len(output) == 0:
+ if len(outputs) == 0:
raise ValueError()
except ValueError:
raise RuntimeError('Incorrect output plugin string')
|
application.py
|
ReplaceText(target='outputs' @(48,19)->(48,25))
|
class Application(object):
for output in raw_ouputs:
name, arguments = output.split(':')
outputs.append((name, arguments))
if len(output) == 0:
raise ValueError()
except ValueError:
raise RuntimeError('Incorrect output plugin string')
|
class Application(object):
for output in raw_ouputs:
name, arguments = output.split(':')
outputs.append((name, arguments))
if len(outputs) == 0:
raise ValueError()
except ValueError:
raise RuntimeError('Incorrect output plugin string')
|
2,177 |
https://:@github.com/ruipgil/TrackToTrip.git
|
a67fab1c7801d2eaf8086db367f04282dcadf822
|
@@ -70,7 +70,7 @@ def update_location_centroid(point, cluster, max_distance, min_samples):
biggest_centroid = centroid
if biggest_centroid is None:
- biggest_centroid = compute_centroid(cluster)
+ biggest_centroid = compute_centroid(points)
return biggest_centroid, cluster
|
tracktotrip/location.py
|
ReplaceText(target='points' @(73,44)->(73,51))
|
def update_location_centroid(point, cluster, max_distance, min_samples):
biggest_centroid = centroid
if biggest_centroid is None:
biggest_centroid = compute_centroid(cluster)
return biggest_centroid, cluster
|
def update_location_centroid(point, cluster, max_distance, min_samples):
biggest_centroid = centroid
if biggest_centroid is None:
biggest_centroid = compute_centroid(points)
return biggest_centroid, cluster
|
2,178 |
https://:@github.com/neurodata/primitives-interfaces.git
|
8df5f9ba565690a95034f2d41b00f38d5c70a592
|
@@ -106,7 +106,7 @@ class DatasetToGraphList(transformer.TransformerPrimitiveBase[Inputs, Outputs, H
graphs.append(temp_graph)
# get the task type from the task docs
- temp_path = location_base_uri.split('/')
+ temp_path = datasetDoc_uri.split('/')
problemDoc_uri = '/'.join(temp_path[:-2]) + '/' + '/'.join(temp_path[-2:]).replace('dataset', 'problem')
with open(problemDoc_uri) as json_file:
|
jhu_primitives/dataset_to_graph_list/dataset_to_graph_list.py
|
ReplaceText(target='datasetDoc_uri' @(109,20)->(109,37))
|
class DatasetToGraphList(transformer.TransformerPrimitiveBase[Inputs, Outputs, H
graphs.append(temp_graph)
# get the task type from the task docs
temp_path = location_base_uri.split('/')
problemDoc_uri = '/'.join(temp_path[:-2]) + '/' + '/'.join(temp_path[-2:]).replace('dataset', 'problem')
with open(problemDoc_uri) as json_file:
|
class DatasetToGraphList(transformer.TransformerPrimitiveBase[Inputs, Outputs, H
graphs.append(temp_graph)
# get the task type from the task docs
temp_path = datasetDoc_uri.split('/')
problemDoc_uri = '/'.join(temp_path[:-2]) + '/' + '/'.join(temp_path[-2:]).replace('dataset', 'problem')
with open(problemDoc_uri) as json_file:
|
2,179 |
https://:@github.com/neurodata/primitives-interfaces.git
|
67d23cf31c662487d6aac6bae63e2c54ad00066c
|
@@ -100,7 +100,7 @@ class LargestConnectedComponent(TransformerPrimitiveBase[Inputs, Outputs, Hyperp
# check if the component is largest
if len(connected_component) > len(G_largest):
# if it is largest - flag as such
- G_largest = i
+ G_largest = connected_component
# obtain indices associated with the node_ids in this component
temp_indices = [i for i, x in enumerate(nodeIDs)
if x in list(connected_component)]
|
jhu_primitives/lcc/lcc.py
|
ReplaceText(target='connected_component' @(103,28)->(103,29))
|
class LargestConnectedComponent(TransformerPrimitiveBase[Inputs, Outputs, Hyperp
# check if the component is largest
if len(connected_component) > len(G_largest):
# if it is largest - flag as such
G_largest = i
# obtain indices associated with the node_ids in this component
temp_indices = [i for i, x in enumerate(nodeIDs)
if x in list(connected_component)]
|
class LargestConnectedComponent(TransformerPrimitiveBase[Inputs, Outputs, Hyperp
# check if the component is largest
if len(connected_component) > len(G_largest):
# if it is largest - flag as such
G_largest = connected_component
# obtain indices associated with the node_ids in this component
temp_indices = [i for i, x in enumerate(nodeIDs)
if x in list(connected_component)]
|
2,180 |
https://:@github.com/agorinenko/power_dict.git
|
25aaa83e38e440e9f2d49c49f1620d071805410d
|
@@ -96,7 +96,7 @@ class SchemaValidator:
return DictUtils.get_required_value(context, name, **kwargs)
else:
- if required_error is not None:
+ if default_value is not None:
kwargs['default_value'] = default_value
return DictUtils.get_value(context, name, **kwargs)
|
power_dict/schema_validator.py
|
ReplaceText(target='default_value' @(99,19)->(99,33))
|
class SchemaValidator:
return DictUtils.get_required_value(context, name, **kwargs)
else:
if required_error is not None:
kwargs['default_value'] = default_value
return DictUtils.get_value(context, name, **kwargs)
|
class SchemaValidator:
return DictUtils.get_required_value(context, name, **kwargs)
else:
if default_value is not None:
kwargs['default_value'] = default_value
return DictUtils.get_value(context, name, **kwargs)
|
2,181 |
https://:@github.com/mwilliamson/nope.git
|
cd87dfe247ad7fb3a9f324a8369a3a741c56104f
|
@@ -164,7 +164,7 @@ def _create_type_rules():
primary_type.define(sub_signature | applied_type | type_ref)
explicit_type = (signature | type_) + finished >> (lambda result: result[0])
- type_definition = (type_ref + skip(equals) + type_ + skip(finished)) >> _make_type_definition
+ type_definition = (type_name + skip(equals) + type_ + skip(finished)) >> _make_type_definition
return explicit_type, type_definition
|
nope/parser/typing.py
|
ReplaceText(target='type_name' @(167,23)->(167,31))
|
def _create_type_rules():
primary_type.define(sub_signature | applied_type | type_ref)
explicit_type = (signature | type_) + finished >> (lambda result: result[0])
type_definition = (type_ref + skip(equals) + type_ + skip(finished)) >> _make_type_definition
return explicit_type, type_definition
|
def _create_type_rules():
primary_type.define(sub_signature | applied_type | type_ref)
explicit_type = (signature | type_) + finished >> (lambda result: result[0])
type_definition = (type_name + skip(equals) + type_ + skip(finished)) >> _make_type_definition
return explicit_type, type_definition
|
2,182 |
https://:@github.com/mwilliamson/nope.git
|
17ff327b5165e537fa683d94ad430564a974a9b3
|
@@ -168,7 +168,7 @@ def _create_type_rules():
type_definition = (type_name + skip(equals) + type_ + skip(finished)) >> _make_type_definition
- structural_type_attr = (attr_name + skip(colon) + type_) >> tuple
+ structural_type_attr = (attr_name + skip(colon) + explicit_type) >> tuple
structural_type_attrs = many(structural_type_attr)
structural_type_definition = (type_name + skip(colon) + structural_type_attrs + skip(finished)) >> _make_structural_type_definition
|
nope/parser/typing.py
|
ReplaceText(target='explicit_type' @(171,54)->(171,59))
|
def _create_type_rules():
type_definition = (type_name + skip(equals) + type_ + skip(finished)) >> _make_type_definition
structural_type_attr = (attr_name + skip(colon) + type_) >> tuple
structural_type_attrs = many(structural_type_attr)
structural_type_definition = (type_name + skip(colon) + structural_type_attrs + skip(finished)) >> _make_structural_type_definition
|
def _create_type_rules():
type_definition = (type_name + skip(equals) + type_ + skip(finished)) >> _make_type_definition
structural_type_attr = (attr_name + skip(colon) + explicit_type) >> tuple
structural_type_attrs = many(structural_type_attr)
structural_type_definition = (type_name + skip(colon) + structural_type_attrs + skip(finished)) >> _make_structural_type_definition
|
2,183 |
https://:@github.com/regexpressyourself/passman.git
|
0ed931e680654f9ca017a093115f1f7f0e833ca4
|
@@ -96,7 +96,7 @@ def generatePasswordPrompt():
siz = getUserInput("Password length")
- if not siz=='' and not siz.isdecimal():
+ if not siz=='' or not siz.isdecimal():
print("not a number")
return ""
|
menu.py
|
ReplaceText(target='or' @(99,19)->(99,22))
|
def generatePasswordPrompt():
siz = getUserInput("Password length")
if not siz=='' and not siz.isdecimal():
print("not a number")
return ""
|
def generatePasswordPrompt():
siz = getUserInput("Password length")
if not siz=='' or not siz.isdecimal():
print("not a number")
return ""
|
2,184 |
https://:@github.com/BFriedrichs/motorturbine.git
|
f42dc59b1228d645dce3e9c711385d094990727f
|
@@ -29,7 +29,7 @@ class ListField(base_field.BaseField):
raise errors.TypeMismatch(list, value)
for item in value:
- self.validate(value)
+ self.validate(item)
old_val = copy.deepcopy(self.value)
sync_val = {}
self.value.clear()
|
motorturbine/fields/list_field.py
|
ReplaceText(target='item' @(32,26)->(32,31))
|
class ListField(base_field.BaseField):
raise errors.TypeMismatch(list, value)
for item in value:
self.validate(value)
old_val = copy.deepcopy(self.value)
sync_val = {}
self.value.clear()
|
class ListField(base_field.BaseField):
raise errors.TypeMismatch(list, value)
for item in value:
self.validate(item)
old_val = copy.deepcopy(self.value)
sync_val = {}
self.value.clear()
|
2,185 |
https://:@github.com/commerceblock/pymainstay.git
|
8cd31dc74cc63ab966ab5f6585d29972b089de1a
|
@@ -127,7 +127,7 @@ def parse_msc_args(raw_args):
dest='txid',
help="Verify that the proof sequence is committed to the staychain containing TxID")
- parser_verify.add_argument("-l","--list", type=str,
+ verify_group.add_argument("-l","--list", type=str,
dest='list',
help="Verify the list of comma separated commitments against the sequence proof")
|
mst/args.py
|
ReplaceText(target='verify_group' @(130,4)->(130,17))
|
def parse_msc_args(raw_args):
dest='txid',
help="Verify that the proof sequence is committed to the staychain containing TxID")
parser_verify.add_argument("-l","--list", type=str,
dest='list',
help="Verify the list of comma separated commitments against the sequence proof")
|
def parse_msc_args(raw_args):
dest='txid',
help="Verify that the proof sequence is committed to the staychain containing TxID")
verify_group.add_argument("-l","--list", type=str,
dest='list',
help="Verify the list of comma separated commitments against the sequence proof")
|
2,186 |
https://:@github.com/lxml-cffi/lxml-cffi.git
|
7baf99b8232de27923d0a607ac10a2f77f6a66e2
|
@@ -275,7 +275,7 @@ class Cleaner(object):
for el in _find_styled_elements(doc):
old = el.get('style')
new = _css_javascript_re.sub('', old)
- new = _css_import_re.sub('', old)
+ new = _css_import_re.sub('', new)
if self._has_sneaky_javascript(new):
# Something tricky is going on...
del el.attrib['style']
|
src/lxml/html/clean.py
|
ReplaceText(target='new' @(278,49)->(278,52))
|
class Cleaner(object):
for el in _find_styled_elements(doc):
old = el.get('style')
new = _css_javascript_re.sub('', old)
new = _css_import_re.sub('', old)
if self._has_sneaky_javascript(new):
# Something tricky is going on...
del el.attrib['style']
|
class Cleaner(object):
for el in _find_styled_elements(doc):
old = el.get('style')
new = _css_javascript_re.sub('', old)
new = _css_import_re.sub('', new)
if self._has_sneaky_javascript(new):
# Something tricky is going on...
del el.attrib['style']
|
2,187 |
https://:@github.com/lxml-cffi/lxml-cffi.git
|
ce0d5931b9e5293b8375457de2e40c9c197f4dda
|
@@ -334,7 +334,7 @@ class LXMLOutputChecker(OutputChecker):
attrs.append('-%s="%s"' % (name, self.format_text(value, False)))
else:
if name in want.attrib:
- text = self.collect_diff_text(value, want.attrib[name], False)
+ text = self.collect_diff_text(want.attrib[name], value, False)
else:
text = self.format_text(value, False)
attrs.append('%s="%s"' % (name, text))
|
src/lxml/doctestcompare.py
|
ArgSwap(idxs=0<->1 @(337,27)->(337,49))
|
class LXMLOutputChecker(OutputChecker):
attrs.append('-%s="%s"' % (name, self.format_text(value, False)))
else:
if name in want.attrib:
text = self.collect_diff_text(value, want.attrib[name], False)
else:
text = self.format_text(value, False)
attrs.append('%s="%s"' % (name, text))
|
class LXMLOutputChecker(OutputChecker):
attrs.append('-%s="%s"' % (name, self.format_text(value, False)))
else:
if name in want.attrib:
text = self.collect_diff_text(want.attrib[name], value, False)
else:
text = self.format_text(value, False)
attrs.append('%s="%s"' % (name, text))
|
2,188 |
https://:@github.com/abinit/abiflows.git
|
03b1d8ebd465d1153f35d7956d1937418b7be7ae
|
@@ -296,7 +296,7 @@ class RelaxFWWorkflowSRC(AbstractFWWorkflow):
initialization_info=initialization_info,
wf_task_index_prefix='ioncell',
deps={SRC_ion_fws['run_fw'].tasks[0].task_type: '@structure'})
- fws.extend(SRC_ion_fws['fws'])
+ fws.extend(SRC_ioncell_fws['fws'])
links_dict.update(SRC_ioncell_fws['links_dict'])
links_dict.update({SRC_ion_fws['check_fw']: SRC_ioncell_fws['setup_fw']})
|
abiflows/fireworks/workflows/abinit_workflows.py
|
ReplaceText(target='SRC_ioncell_fws' @(299,19)->(299,30))
|
class RelaxFWWorkflowSRC(AbstractFWWorkflow):
initialization_info=initialization_info,
wf_task_index_prefix='ioncell',
deps={SRC_ion_fws['run_fw'].tasks[0].task_type: '@structure'})
fws.extend(SRC_ion_fws['fws'])
links_dict.update(SRC_ioncell_fws['links_dict'])
links_dict.update({SRC_ion_fws['check_fw']: SRC_ioncell_fws['setup_fw']})
|
class RelaxFWWorkflowSRC(AbstractFWWorkflow):
initialization_info=initialization_info,
wf_task_index_prefix='ioncell',
deps={SRC_ion_fws['run_fw'].tasks[0].task_type: '@structure'})
fws.extend(SRC_ioncell_fws['fws'])
links_dict.update(SRC_ioncell_fws['links_dict'])
links_dict.update({SRC_ion_fws['check_fw']: SRC_ioncell_fws['setup_fw']})
|
2,189 |
https://:@github.com/abinit/abiflows.git
|
5e2edecbad2a8bef67436888703c9ba54eb80a18
|
@@ -807,7 +807,7 @@ class AbiFireTask(BasicTaskMixin, FireTaskBase):
optconf, qadapter_spec, qtk_qadapter = self.run_autoparal(self.abiinput, os.path.abspath('.'), self.ftm)
if self.use_SRC_scheme:
return FWAction(mod_spec={'_set': {'_queueadapter': qadapter_spec, 'mpi_ncpus': optconf['mpi_ncpus'],
- 'optconf': optconf, 'qtk_queueadapter': qadapter_spec.as_dict()}})
+ 'optconf': optconf, 'qtk_queueadapter': qtk_qadapter.as_dict()}})
self.history.log_autoparal(optconf)
self.abiinput.set_vars(optconf.vars)
|
abiflows/fireworks/tasks/abinit_tasks.py
|
ReplaceText(target='qtk_qadapter' @(810,87)->(810,100))
|
class AbiFireTask(BasicTaskMixin, FireTaskBase):
optconf, qadapter_spec, qtk_qadapter = self.run_autoparal(self.abiinput, os.path.abspath('.'), self.ftm)
if self.use_SRC_scheme:
return FWAction(mod_spec={'_set': {'_queueadapter': qadapter_spec, 'mpi_ncpus': optconf['mpi_ncpus'],
'optconf': optconf, 'qtk_queueadapter': qadapter_spec.as_dict()}})
self.history.log_autoparal(optconf)
self.abiinput.set_vars(optconf.vars)
|
class AbiFireTask(BasicTaskMixin, FireTaskBase):
optconf, qadapter_spec, qtk_qadapter = self.run_autoparal(self.abiinput, os.path.abspath('.'), self.ftm)
if self.use_SRC_scheme:
return FWAction(mod_spec={'_set': {'_queueadapter': qadapter_spec, 'mpi_ncpus': optconf['mpi_ncpus'],
'optconf': optconf, 'qtk_queueadapter': qtk_qadapter.as_dict()}})
self.history.log_autoparal(optconf)
self.abiinput.set_vars(optconf.vars)
|
2,190 |
https://:@github.com/abinit/abiflows.git
|
701df642b1ef138bbbae47cf771f1671d8a15537
|
@@ -695,7 +695,7 @@ class PiezoElasticFWWorkflowSRC(AbstractFWWorkflow):
ec_nostress_clamped = myfw_nostress.tasks[-1].get_elastic_tensor(tensor_type='clamped_ion')
ec_nostress_relaxed = myfw_nostress.tasks[-1].get_elastic_tensor(tensor_type='relaxed_ion')
- ec_stress_relaxed = myfw_nostress.tasks[-1].get_elastic_tensor(tensor_type='relaxed_ion_stress_corrected')
+ ec_stress_relaxed = myfw_stress.tasks[-1].get_elastic_tensor(tensor_type='relaxed_ion_stress_corrected')
ec_dicts = {'clamped_ion': ec_nostress_clamped.extended_dict(),
'relaxed_ion': ec_nostress_relaxed.extended_dict(),
|
abiflows/fireworks/workflows/abinit_workflows.py
|
ReplaceText(target='myfw_stress' @(698,28)->(698,41))
|
class PiezoElasticFWWorkflowSRC(AbstractFWWorkflow):
ec_nostress_clamped = myfw_nostress.tasks[-1].get_elastic_tensor(tensor_type='clamped_ion')
ec_nostress_relaxed = myfw_nostress.tasks[-1].get_elastic_tensor(tensor_type='relaxed_ion')
ec_stress_relaxed = myfw_nostress.tasks[-1].get_elastic_tensor(tensor_type='relaxed_ion_stress_corrected')
ec_dicts = {'clamped_ion': ec_nostress_clamped.extended_dict(),
'relaxed_ion': ec_nostress_relaxed.extended_dict(),
|
class PiezoElasticFWWorkflowSRC(AbstractFWWorkflow):
ec_nostress_clamped = myfw_nostress.tasks[-1].get_elastic_tensor(tensor_type='clamped_ion')
ec_nostress_relaxed = myfw_nostress.tasks[-1].get_elastic_tensor(tensor_type='relaxed_ion')
ec_stress_relaxed = myfw_stress.tasks[-1].get_elastic_tensor(tensor_type='relaxed_ion_stress_corrected')
ec_dicts = {'clamped_ion': ec_nostress_clamped.extended_dict(),
'relaxed_ion': ec_nostress_relaxed.extended_dict(),
|
2,191 |
https://:@github.com/abinit/abiflows.git
|
0d2231a3586ae709e7be68977369c685e9818e7b
|
@@ -409,7 +409,7 @@ def createSRCFireworks(setup_task, run_task, handlers=None, validators=None, spe
else:
src_task_index = SRCTaskIndex.from_task(run_task)
setup_spec = copy.deepcopy(spec)
- setup_spec['SRC_task_index'] = task_index
+ setup_spec['SRC_task_index'] = src_task_index
pass
|
abiflows/fireworks/tasks/src_tasks_abc.py
|
ReplaceText(target='src_task_index' @(412,35)->(412,45))
|
def createSRCFireworks(setup_task, run_task, handlers=None, validators=None, spe
else:
src_task_index = SRCTaskIndex.from_task(run_task)
setup_spec = copy.deepcopy(spec)
setup_spec['SRC_task_index'] = task_index
pass
|
def createSRCFireworks(setup_task, run_task, handlers=None, validators=None, spe
else:
src_task_index = SRCTaskIndex.from_task(run_task)
setup_spec = copy.deepcopy(spec)
setup_spec['SRC_task_index'] = src_task_index
pass
|
2,192 |
https://:@github.com/abinit/abiflows.git
|
597da3f247e9fc96c786b140f2bba3e734eed034
|
@@ -353,7 +353,7 @@ def createSRCFireworks(setup_task, run_task, control_task, spec=None, initializa
control_fw = Firework(control_task, spec=run_spec, name=src_task_index.run_str)
links_dict = {setup_fw.fw_id: [run_fw.fw_id],
- run_fw.fw_id: [control_task.fw_id]}
+ run_fw.fw_id: [control_fw.fw_id]}
return {'setup_fw': setup_fw, 'run_fw': run_fw, 'control_fw': control_fw, 'links_dict': links_dict,
'fws': [setup_fw, run_fw, control_fw]}
|
abiflows/fireworks/tasks/src_tasks_abc.py
|
ReplaceText(target='control_fw' @(356,33)->(356,45))
|
def createSRCFireworks(setup_task, run_task, control_task, spec=None, initializa
control_fw = Firework(control_task, spec=run_spec, name=src_task_index.run_str)
links_dict = {setup_fw.fw_id: [run_fw.fw_id],
run_fw.fw_id: [control_task.fw_id]}
return {'setup_fw': setup_fw, 'run_fw': run_fw, 'control_fw': control_fw, 'links_dict': links_dict,
'fws': [setup_fw, run_fw, control_fw]}
|
def createSRCFireworks(setup_task, run_task, control_task, spec=None, initializa
control_fw = Firework(control_task, spec=run_spec, name=src_task_index.run_str)
links_dict = {setup_fw.fw_id: [run_fw.fw_id],
run_fw.fw_id: [control_fw.fw_id]}
return {'setup_fw': setup_fw, 'run_fw': run_fw, 'control_fw': control_fw, 'links_dict': links_dict,
'fws': [setup_fw, run_fw, control_fw]}
|
2,193 |
https://:@github.com/abinit/abiflows.git
|
5379371a8a68ecf3226958a55cbcee8e6d2664d8
|
@@ -425,7 +425,7 @@ def createSRCFireworks(setup_task, run_task, control_task, spec=None, initializa
control_spec = copy.deepcopy(spec)
control_spec = set_short_single_core_to_spec(control_spec)
control_spec['SRC_task_index'] = src_task_index
- control_fw = Firework(control_task, spec=run_spec, name=src_task_index.control_str)
+ control_fw = Firework(control_task, spec=control_spec, name=src_task_index.control_str)
links_dict = {setup_fw.fw_id: [run_fw.fw_id],
run_fw.fw_id: [control_fw.fw_id]}
|
abiflows/fireworks/tasks/src_tasks_abc.py
|
ReplaceText(target='control_spec' @(428,45)->(428,53))
|
def createSRCFireworks(setup_task, run_task, control_task, spec=None, initializa
control_spec = copy.deepcopy(spec)
control_spec = set_short_single_core_to_spec(control_spec)
control_spec['SRC_task_index'] = src_task_index
control_fw = Firework(control_task, spec=run_spec, name=src_task_index.control_str)
links_dict = {setup_fw.fw_id: [run_fw.fw_id],
run_fw.fw_id: [control_fw.fw_id]}
|
def createSRCFireworks(setup_task, run_task, control_task, spec=None, initializa
control_spec = copy.deepcopy(spec)
control_spec = set_short_single_core_to_spec(control_spec)
control_spec['SRC_task_index'] = src_task_index
control_fw = Firework(control_task, spec=control_spec, name=src_task_index.control_str)
links_dict = {setup_fw.fw_id: [run_fw.fw_id],
run_fw.fw_id: [control_fw.fw_id]}
|
2,194 |
https://:@github.com/abinit/abiflows.git
|
54110d5a1108a2b41d4b038d78b3362ecb89fae9
|
@@ -397,7 +397,7 @@ class ControlTask(SRCTaskMixin, FireTaskBase):
modified_objects[update['key']] = mod
else:
new_spec[update['key']] = target_object
- modified_objects[update['key']] = mod
+ modified_objects[update['key']] = target_object
elif update['target'] == 'setup_fw_spec':
if 'mod' in update:
mod = getattr(target_object, update['mod'])()
|
abiflows/fireworks/tasks/src_tasks_abc.py
|
ReplaceText(target='target_object' @(400,58)->(400,61))
|
class ControlTask(SRCTaskMixin, FireTaskBase):
modified_objects[update['key']] = mod
else:
new_spec[update['key']] = target_object
modified_objects[update['key']] = mod
elif update['target'] == 'setup_fw_spec':
if 'mod' in update:
mod = getattr(target_object, update['mod'])()
|
class ControlTask(SRCTaskMixin, FireTaskBase):
modified_objects[update['key']] = mod
else:
new_spec[update['key']] = target_object
modified_objects[update['key']] = target_object
elif update['target'] == 'setup_fw_spec':
if 'mod' in update:
mod = getattr(target_object, update['mod'])()
|
2,195 |
https://:@github.com/ecotrust/madrona.git
|
9d39eb4684bcb2a3b37879c4d7641e1d608f9b78
|
@@ -18,7 +18,7 @@ def simpleLoad(request):
user = loadform.cleaned_data['user']
name = loadform.cleaned_data['name']
mpas = Mpa.objects.filter(user=user, name=name)
- return mpaLoad(request, loadform, mpas)
+ return mpaLoad(request, mpas, loadform)
def simpleCommit(request):
'''
|
example_projects/simple/views.py
|
ArgSwap(idxs=1<->2 @(21,11)->(21,18))
|
def simpleLoad(request):
user = loadform.cleaned_data['user']
name = loadform.cleaned_data['name']
mpas = Mpa.objects.filter(user=user, name=name)
return mpaLoad(request, loadform, mpas)
def simpleCommit(request):
'''
|
def simpleLoad(request):
user = loadform.cleaned_data['user']
name = loadform.cleaned_data['name']
mpas = Mpa.objects.filter(user=user, name=name)
return mpaLoad(request, mpas, loadform)
def simpleCommit(request):
'''
|
2,196 |
https://:@github.com/ecotrust/madrona.git
|
25d8645a5a7a7db8bed2cbc25e21ac164b4a0f50
|
@@ -58,7 +58,7 @@ def show(request, map_name='default'):
# if any one fails, 403 or 404 will be raised
user = request.user
from lingcod.sharing.utils import get_viewable_object_or_respond
- for pk in mpaids:
+ for pk in mpas:
# Does it even exist?
try:
obj = mpa_class.objects.get(pk=pk)
|
lingcod/staticmap/views.py
|
ReplaceText(target='mpas' @(61,14)->(61,20))
|
def show(request, map_name='default'):
# if any one fails, 403 or 404 will be raised
user = request.user
from lingcod.sharing.utils import get_viewable_object_or_respond
for pk in mpaids:
# Does it even exist?
try:
obj = mpa_class.objects.get(pk=pk)
|
def show(request, map_name='default'):
# if any one fails, 403 or 404 will be raised
user = request.user
from lingcod.sharing.utils import get_viewable_object_or_respond
for pk in mpas:
# Does it even exist?
try:
obj = mpa_class.objects.get(pk=pk)
|
2,197 |
https://:@github.com/YugaByte/cassandra-python-driver.git
|
6d34a00cee5b033bf285994af739a09419e447a2
|
@@ -89,8 +89,8 @@ class Metadata(object):
if keyspace in cf_def_rows:
for table_row in cf_def_rows[keyspace]:
table_meta = self._build_table_metadata(
- keyspace_meta, table_row, col_def_rows[keyspace])
- keyspace.tables[table_meta.name] = table_meta
+ keyspace_meta, table_row, col_def_rows[keyspace])
+ keyspace_meta.tables[table_meta.name] = table_meta
def _build_keyspace_metadata(self, row):
name = row["keyspace_name"]
|
cassandra/metadata.py
|
ReplaceText(target='keyspace_meta' @(93,20)->(93,28))
|
class Metadata(object):
if keyspace in cf_def_rows:
for table_row in cf_def_rows[keyspace]:
table_meta = self._build_table_metadata(
keyspace_meta, table_row, col_def_rows[keyspace])
keyspace.tables[table_meta.name] = table_meta
def _build_keyspace_metadata(self, row):
name = row["keyspace_name"]
|
class Metadata(object):
if keyspace in cf_def_rows:
for table_row in cf_def_rows[keyspace]:
table_meta = self._build_table_metadata(
keyspace_meta, table_row, col_def_rows[keyspace])
keyspace_meta.tables[table_meta.name] = table_meta
def _build_keyspace_metadata(self, row):
name = row["keyspace_name"]
|
2,198 |
https://:@github.com/YugaByte/cassandra-python-driver.git
|
c7a77b8862551e73fd09b749316c422eee7a2308
|
@@ -26,7 +26,7 @@ class RoundRobinPolicy(LoadBalancingPolicy):
def populate(self, cluster, hosts):
self._live_hosts = set(hosts)
- if len(hosts) == 1:
+ if len(hosts) <= 1:
self._position = 0
else:
self._position = randint(0, len(hosts) - 1)
|
cassandra/policies.py
|
ReplaceText(target='<=' @(29,22)->(29,24))
|
class RoundRobinPolicy(LoadBalancingPolicy):
def populate(self, cluster, hosts):
self._live_hosts = set(hosts)
if len(hosts) == 1:
self._position = 0
else:
self._position = randint(0, len(hosts) - 1)
|
class RoundRobinPolicy(LoadBalancingPolicy):
def populate(self, cluster, hosts):
self._live_hosts = set(hosts)
if len(hosts) <= 1:
self._position = 0
else:
self._position = randint(0, len(hosts) - 1)
|
2,199 |
https://:@github.com/YugaByte/cassandra-python-driver.git
|
2984ba71634e5c3d4b23bb42a977401ca60ffc01
|
@@ -230,7 +230,7 @@ class BaseModel(object):
'unrecognized polymorphic key {} for class {}'.format(poly_key, poly_base.__name__)
)
- if not issubclass(klass, poly_base):
+ if not issubclass(klass, cls):
raise PolyMorphicModelException(
'{} is not a subclass of {}'.format(klass.__name__, poly_base.__name__)
)
|
cqlengine/models.py
|
ReplaceText(target='cls' @(233,37)->(233,46))
|
class BaseModel(object):
'unrecognized polymorphic key {} for class {}'.format(poly_key, poly_base.__name__)
)
if not issubclass(klass, poly_base):
raise PolyMorphicModelException(
'{} is not a subclass of {}'.format(klass.__name__, poly_base.__name__)
)
|
class BaseModel(object):
'unrecognized polymorphic key {} for class {}'.format(poly_key, poly_base.__name__)
)
if not issubclass(klass, cls):
raise PolyMorphicModelException(
'{} is not a subclass of {}'.format(klass.__name__, poly_base.__name__)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.