content
stringlengths
85
101k
title
stringlengths
0
150
question
stringlengths
15
48k
answers
list
answers_scores
list
non_answers
list
non_answers_scores
list
tags
list
name
stringlengths
35
137
Q: clean_list() --> ValueError: Wrong number of items passed 3, placement implies 1 I inherited this code from previous employee, and I tried to run this code but I'm getting an error. def replaceitem(x): if x in ['ORION', 'ACTION', 'ICE', 'IRIS', 'FOCUS']: return 'CRM Application' else: return x def clean_list(row): new_list = sorted(set(row['APLN_NM']), key=lambda x: row['APLN_NM'].index(x)) for idx,i in enumerate(new_list): new_list[idx] = replaceitem(i) new_list = sorted(set(new_list), key=lambda x: new_list.index(x)) return new_list #********************************************************************************************************************************************* df_agg['APLN_NM_DISTINCT'] = df_agg.apply(clean_list, axis = 1) df_agg_single['APLN_NM_DISTINCT'] = df_agg_single.apply(clean_list, axis = 1) While running the code I got this error: --------------------------------------------------------------------------- KeyError Traceback (most recent call last) /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2890 try: -> 2891 return self._engine.get_loc(casted_key) 2892 except KeyError as err: pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item() KeyError: 'APLN_NM_DISTINCT' The above exception was the direct cause of the following exception: KeyError Traceback (most recent call last) /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/generic.py in _set_item(self, key, value) 3570 try: -> 3571 loc = self._info_axis.get_loc(key) 3572 except KeyError: /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2892 except KeyError as err: -> 2893 raise KeyError(key) from err 2894 KeyError: 'APLN_NM_DISTINCT' During handling of the above exception, another exception occurred: ValueError Traceback (most recent call last) <ipython-input-71-e8b5e8d5b514> in <module> 431 #********************************************************************************************************************************************* 432 df_agg['APLN_NM_DISTINCT'] = df_agg.apply(clean_list, axis = 1) --> 433 df_agg_single['APLN_NM_DISTINCT'] = df_agg_single.apply(clean_list, axis = 1) 434 435 df_agg['TOTAL_HOLD_TIME'] = df_agg_single['TOTAL_HOLD_TIME'].astype(int) /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/frame.py in __setitem__(self, key, value) 3038 else: 3039 # set column -> 3040 self._set_item(key, value) 3041 3042 def _setitem_slice(self, key: slice, value): /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/frame.py in _set_item(self, key, value) 3115 self._ensure_valid_index(value) 3116 value = self._sanitize_column(key, value) -> 3117 NDFrame._set_item(self, key, value) 3118 3119 # check if we are modifying a copy /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/generic.py in _set_item(self, key, value) 3572 except KeyError: 3573 # This item wasn't present, just insert at end -> 3574 self._mgr.insert(len(self._info_axis), key, value) 3575 return 3576 /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/internals/managers.py in insert(self, loc, item, value, allow_duplicates) 1187 value = _safe_reshape(value, (1,) + value.shape) 1188 -> 1189 block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1)) 1190 1191 for blkno, count in _fast_count_smallints(self.blknos[loc:]): /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/internals/blocks.py in make_block(values, placement, klass, ndim, dtype) 2717 values = DatetimeArray._simple_new(values, dtype=dtype) 2718 -> 2719 return klass(values, ndim=ndim, placement=placement) 2720 2721 /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/internals/blocks.py in __init__(self, values, placement, ndim) 2373 values = np.array(values, dtype=object) 2374 -> 2375 super().__init__(values, ndim=ndim, placement=placement) 2376 2377 @property /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/internals/blocks.py in __init__(self, values, placement, ndim) 128 if self._validate_ndim and self.ndim and len(self.mgr_locs) != len(self.values): 129 raise ValueError( --> 130 f"Wrong number of items passed {len(self.values)}, " 131 f"placement implies {len(self.mgr_locs)}" 132 ) ValueError: Wrong number of items passed 3, placement implies 1 df_agg and df_agg_single are dataframes with same column names. But the data is present only in df_agg data in df_agg dataframe looks like this data in df_agg_single dataframe looks like this so if the data frame is empty I am getting this type of error while applying clean_list method on the data frame. A: I identified the error is occurring only if the data frame is empty, so I tried if else to filter the empty data frame and it worked. if df_agg.empty: df_agg['APLN_NM_DISTINCT'] = '' else: df_agg['APLN_NM_DISTINCT'] = df_agg.apply(clean_list, axis = 1) if df_agg_single.empty: df_agg_single['APLN_NM_DISTINCT'] = '' else: df_agg_single['APLN_NM_DISTINCT'] = df_agg_single.apply(clean_list, axis = 1)
clean_list() --> ValueError: Wrong number of items passed 3, placement implies 1
I inherited this code from previous employee, and I tried to run this code but I'm getting an error. def replaceitem(x): if x in ['ORION', 'ACTION', 'ICE', 'IRIS', 'FOCUS']: return 'CRM Application' else: return x def clean_list(row): new_list = sorted(set(row['APLN_NM']), key=lambda x: row['APLN_NM'].index(x)) for idx,i in enumerate(new_list): new_list[idx] = replaceitem(i) new_list = sorted(set(new_list), key=lambda x: new_list.index(x)) return new_list #********************************************************************************************************************************************* df_agg['APLN_NM_DISTINCT'] = df_agg.apply(clean_list, axis = 1) df_agg_single['APLN_NM_DISTINCT'] = df_agg_single.apply(clean_list, axis = 1) While running the code I got this error: --------------------------------------------------------------------------- KeyError Traceback (most recent call last) /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2890 try: -> 2891 return self._engine.get_loc(casted_key) 2892 except KeyError as err: pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc() pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item() pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item() KeyError: 'APLN_NM_DISTINCT' The above exception was the direct cause of the following exception: KeyError Traceback (most recent call last) /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/generic.py in _set_item(self, key, value) 3570 try: -> 3571 loc = self._info_axis.get_loc(key) 3572 except KeyError: /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance) 2892 except KeyError as err: -> 2893 raise KeyError(key) from err 2894 KeyError: 'APLN_NM_DISTINCT' During handling of the above exception, another exception occurred: ValueError Traceback (most recent call last) <ipython-input-71-e8b5e8d5b514> in <module> 431 #********************************************************************************************************************************************* 432 df_agg['APLN_NM_DISTINCT'] = df_agg.apply(clean_list, axis = 1) --> 433 df_agg_single['APLN_NM_DISTINCT'] = df_agg_single.apply(clean_list, axis = 1) 434 435 df_agg['TOTAL_HOLD_TIME'] = df_agg_single['TOTAL_HOLD_TIME'].astype(int) /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/frame.py in __setitem__(self, key, value) 3038 else: 3039 # set column -> 3040 self._set_item(key, value) 3041 3042 def _setitem_slice(self, key: slice, value): /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/frame.py in _set_item(self, key, value) 3115 self._ensure_valid_index(value) 3116 value = self._sanitize_column(key, value) -> 3117 NDFrame._set_item(self, key, value) 3118 3119 # check if we are modifying a copy /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/generic.py in _set_item(self, key, value) 3572 except KeyError: 3573 # This item wasn't present, just insert at end -> 3574 self._mgr.insert(len(self._info_axis), key, value) 3575 return 3576 /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/internals/managers.py in insert(self, loc, item, value, allow_duplicates) 1187 value = _safe_reshape(value, (1,) + value.shape) 1188 -> 1189 block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1)) 1190 1191 for blkno, count in _fast_count_smallints(self.blknos[loc:]): /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/internals/blocks.py in make_block(values, placement, klass, ndim, dtype) 2717 values = DatetimeArray._simple_new(values, dtype=dtype) 2718 -> 2719 return klass(values, ndim=ndim, placement=placement) 2720 2721 /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/internals/blocks.py in __init__(self, values, placement, ndim) 2373 values = np.array(values, dtype=object) 2374 -> 2375 super().__init__(values, ndim=ndim, placement=placement) 2376 2377 @property /opt/rh/rh-python36/root/usr/lib64/python3.6/site-packages/pandas/core/internals/blocks.py in __init__(self, values, placement, ndim) 128 if self._validate_ndim and self.ndim and len(self.mgr_locs) != len(self.values): 129 raise ValueError( --> 130 f"Wrong number of items passed {len(self.values)}, " 131 f"placement implies {len(self.mgr_locs)}" 132 ) ValueError: Wrong number of items passed 3, placement implies 1 df_agg and df_agg_single are dataframes with same column names. But the data is present only in df_agg data in df_agg dataframe looks like this data in df_agg_single dataframe looks like this so if the data frame is empty I am getting this type of error while applying clean_list method on the data frame.
[ "I identified the error is occurring only if the data frame is empty, so I tried if else to filter the empty data frame and it worked.\nif df_agg.empty:\n df_agg['APLN_NM_DISTINCT'] = ''\nelse:\n df_agg['APLN_NM_DISTINCT'] = df_agg.apply(clean_list, axis = 1)\n \n\nif df_agg_single.empty:\n df_agg_single['APLN_NM_DISTINCT'] = ''\nelse:\n df_agg_single['APLN_NM_DISTINCT'] = df_agg_single.apply(clean_list, axis = 1)\n\n" ]
[ 0 ]
[]
[]
[ "dataframe", "keyerror", "pandas", "python", "valueerror" ]
stackoverflow_0074554336_dataframe_keyerror_pandas_python_valueerror.txt
Q: How to not change value if an input is empty I have written this code for a form I was working on. <div class="col-md-6"> <label class="labels">Doğum Günü:</label> <input method="POST"name="birtdate" class="form-control" {% if student.birtdate%} type="text"value="{{student.birtdate}}" onfocus="(this.type='date')"onblur="(this.type='text')" {%else %}type="date" {% endif %}> </div> It should show the initial value when blur and turn into a date field when focused. The problem is when I click on it and then click away it shows an empty value. I need to somehow check that and make it not change the value if the input is empty. How can I do that? A: I'm not sure but if your input changes onblur then does it retain and translate the value from date to text. I think the value resets to default when you change the type. it would be better to write a function to parse the date to text and then set this.value to that instead of just simply using one line statements. date = this.value this.type = text this.value = date.toString()
How to not change value if an input is empty
I have written this code for a form I was working on. <div class="col-md-6"> <label class="labels">Doğum Günü:</label> <input method="POST"name="birtdate" class="form-control" {% if student.birtdate%} type="text"value="{{student.birtdate}}" onfocus="(this.type='date')"onblur="(this.type='text')" {%else %}type="date" {% endif %}> </div> It should show the initial value when blur and turn into a date field when focused. The problem is when I click on it and then click away it shows an empty value. I need to somehow check that and make it not change the value if the input is empty. How can I do that?
[ "I'm not sure but if your input changes onblur then does it retain and translate the value from date to text. I think the value resets to default when you change the type. it would be better to write a function to parse the date to text and then set this.value to that instead of just simply using one line statements.\ndate = this.value\nthis.type = text\nthis.value = date.toString()\n\n" ]
[ 0 ]
[]
[]
[ "django", "html", "javascript", "python" ]
stackoverflow_0070632417_django_html_javascript_python.txt
Q: Is it possible to use properties on module variables instead of instance attributes? What I wish: # main.py import config config.test = True # prints 'config: test is set to True' print(config.test) # True What I tried: # config.py _test = False @property def test(): return _test @test.setter def test(new_value): global _test _test = new_value logger.info(f'config: test is set to {new_value}') Avoiding using a conventional method like config.set_test(new_value) would be neat. Thanks :D A: Make it a class class Config: @property def test(self): return self._test @test.setter def test(self, value): self._test = value def __init__(self, **kwargs): self._test = None However, in a way this makes no sense, and the reason is because you have a getter AND setter so, really you are just going the long way around to set the var. You could just convert self._test to self.test and remove the getter/setter for the same results. The below version only has a getter which implies you have a var that you want to modify internally, but be "read-only" publically. class Config: @property def test(self): return self._test def __init__(self, **kwargs): self._test = None IMO, it only makes sense to have a setter if the guts of the setter are doing something non-trivial. For instance, if you were using tkinter and you wanted to convert the options to properties, it might make sense to have a setter because what you are getting and setting is actually TCL calls. In other words, it's not just some alias for a private var. Example: #mixin to convert root attributes and options to properties class Root_mi: @property def alpha(self) -> float: return self.tk.call('wm', 'attributes', self._w, '-alpha') @alpha.setter def alpha(self, f:float) -> None: self.tk.call('wm', 'attributes', self._w, '-alpha', f) @property def alwaysontop(self) -> bool: return self.tk.call('wm', 'attributes', self._w, '-topmost') @alwaysontop.setter def alwaysontop(self, b:bool) -> None: self.tk.call('wm', 'attributes', self._w, '-topmost', b) #more...
Is it possible to use properties on module variables instead of instance attributes?
What I wish: # main.py import config config.test = True # prints 'config: test is set to True' print(config.test) # True What I tried: # config.py _test = False @property def test(): return _test @test.setter def test(new_value): global _test _test = new_value logger.info(f'config: test is set to {new_value}') Avoiding using a conventional method like config.set_test(new_value) would be neat. Thanks :D
[ "Make it a class\nclass Config:\n @property\n def test(self):\n return self._test\n\n @test.setter\n def test(self, value):\n self._test = value\n\n def __init__(self, **kwargs):\n self._test = None\n\nHowever, in a way this makes no sense, and the reason is because you have a getter AND setter so, really you are just going the long way around to set the var. You could just convert self._test to self.test and remove the getter/setter for the same results.\nThe below version only has a getter which implies you have a var that you want to modify internally, but be \"read-only\" publically.\nclass Config:\n @property\n def test(self):\n return self._test\n\n def __init__(self, **kwargs):\n self._test = None\n\nIMO, it only makes sense to have a setter if the guts of the setter are doing something non-trivial. For instance, if you were using tkinter and you wanted to convert the options to properties, it might make sense to have a setter because what you are getting and setting is actually TCL calls. In other words, it's not just some alias for a private var.\nExample:\n#mixin to convert root attributes and options to properties\nclass Root_mi: \n @property\n def alpha(self) -> float:\n return self.tk.call('wm', 'attributes', self._w, '-alpha')\n \n @alpha.setter\n def alpha(self, f:float) -> None:\n self.tk.call('wm', 'attributes', self._w, '-alpha', f)\n \n @property\n def alwaysontop(self) -> bool:\n return self.tk.call('wm', 'attributes', self._w, '-topmost')\n \n @alwaysontop.setter\n def alwaysontop(self, b:bool) -> None:\n self.tk.call('wm', 'attributes', self._w, '-topmost', b)\n \n#more...\n\n" ]
[ 0 ]
[]
[]
[ "module", "properties", "python" ]
stackoverflow_0074576773_module_properties_python.txt
Q: Why is scipy's probability density function not accepting size of my mean? I am using a function to calculate a likelihood density. I am running through two xs which are vectors of length 7. def lhd(x0, x1, dt): #Define a function to calculate the likelihood density given two values. d = len(x0) #Save the length of the inputs for the below pdf input. print(d) print(len(x1)) lh = multivariate_normal.pdf(x1, mean=(1-dt)*x0, cov=2*dt*np.identity(d)) #Take the pdf from a multivariate normal built from x0, given x1. return lh #Return this pdf value. The mean here is a vector of length 7, and the covariance is a (7,7) array. When I run this, I get the error ValueError: Array 'mean' must be a vector of length 49. but looking at the formula of the pdf I do not think this is correct. Any idea what is going wrong here? A: If dt is a (7,7) array, (1-dt) is also (7,7), the * operator in (1-dt)*x0 is the element-wise multiplication, if x0 is a vector of length 7 the result will be a (7,7) array. I guess you meant to use matrix multiplication, you can that this using the x0 - dt @ x0 (where @ denotes the matrix multiplication operator).
Why is scipy's probability density function not accepting size of my mean?
I am using a function to calculate a likelihood density. I am running through two xs which are vectors of length 7. def lhd(x0, x1, dt): #Define a function to calculate the likelihood density given two values. d = len(x0) #Save the length of the inputs for the below pdf input. print(d) print(len(x1)) lh = multivariate_normal.pdf(x1, mean=(1-dt)*x0, cov=2*dt*np.identity(d)) #Take the pdf from a multivariate normal built from x0, given x1. return lh #Return this pdf value. The mean here is a vector of length 7, and the covariance is a (7,7) array. When I run this, I get the error ValueError: Array 'mean' must be a vector of length 49. but looking at the formula of the pdf I do not think this is correct. Any idea what is going wrong here?
[ "If dt is a (7,7) array, (1-dt) is also (7,7), the * operator in (1-dt)*x0 is the element-wise multiplication, if x0 is a vector of length 7 the result will be a (7,7) array.\nI guess you meant to use matrix multiplication, you can that this using the x0 - dt @ x0 (where @ denotes the matrix multiplication operator).\n" ]
[ 0 ]
[]
[]
[ "python", "scipy", "scipy.stats" ]
stackoverflow_0074571987_python_scipy_scipy.stats.txt
Q: input to sklearn pipeline from previous step and from the fitted data I have a sklearn pipeline like the following: features = Pipeline([ ('feats_A', Function_transformer_A()) ('feats_B', Function_transformer_B()) ]) features.fit(X) The input to feats_A is the fitted data X. And, the input to feats_B is the output from feats_A. Instead, I want to be the input to feats_B the fitted data X and the output from feats_A, together. Given that, these two different data matrices could have different dimensions; Function_transformer_A applies aggregation to process the input data. Is it possible? A: You can try using FeatureUnion def blank(df): return df subpipe = FeatureUnion( [('prep_data', Function_transformer(blank)), ('feats_A', Function_transformer_A())]) features = Pipeline([ ('subpipe', subpipe) ('feats_B', Function_transformer_B()) ])
input to sklearn pipeline from previous step and from the fitted data
I have a sklearn pipeline like the following: features = Pipeline([ ('feats_A', Function_transformer_A()) ('feats_B', Function_transformer_B()) ]) features.fit(X) The input to feats_A is the fitted data X. And, the input to feats_B is the output from feats_A. Instead, I want to be the input to feats_B the fitted data X and the output from feats_A, together. Given that, these two different data matrices could have different dimensions; Function_transformer_A applies aggregation to process the input data. Is it possible?
[ "You can try using FeatureUnion\ndef blank(df):\n return df\n\nsubpipe = FeatureUnion(\n [('prep_data', Function_transformer(blank)),\n ('feats_A', Function_transformer_A())])\n \nfeatures = Pipeline([\n\n ('subpipe', subpipe)\n ('feats_B', Function_transformer_B())\n ])\n\n" ]
[ 0 ]
[]
[]
[ "pipeline", "python", "scikit_learn" ]
stackoverflow_0056969723_pipeline_python_scikit_learn.txt
Q: Is there any way to get username from USERID? Code I'm using to get userid: user = update.message.from_user userid = user['id'] Is there any way to turn userid into username? A: Since you have the full user available, you can just use user.username: python-telegram-bot docs, telegram docs. If you only have the user id and not the full User object, you can query info about the user using getChat: PTB docs, TG docs. Disclaimer: I'm currently the maintainer of python-telegram-bot
Is there any way to get username from USERID?
Code I'm using to get userid: user = update.message.from_user userid = user['id'] Is there any way to turn userid into username?
[ "Since you have the full user available, you can just use user.username: python-telegram-bot docs, telegram docs. If you only have the user id and not the full User object, you can query info about the user using getChat: PTB docs, TG docs.\n\nDisclaimer: I'm currently the maintainer of python-telegram-bot\n" ]
[ 0 ]
[]
[]
[ "python", "python_telegram_bot", "telegram" ]
stackoverflow_0074575159_python_python_telegram_bot_telegram.txt
Q: get ip address using os.system in python Im new to python and Im trying to get the IP Address of my network card using the following: import sys import os ip_address = os.system('/sbin/ifconfig ens33 | grep "inet" |awk '/inet / { print $2 }' | cut -d":" -f2') However it returns the following error: ip_address = os.system('/sbin/ifconfig ens33 | grep "inet" |awk '/inet / { print $2 }' | cut -d":" -f2') ^ SyntaxError: invalid syntax If I just have in up to here it get some of the output: ip_address = os.system('/sbin/ifconfig ens33 | grep "inet" ') inet 192.168.130.130 netmask 255.255.255.0 broadcast 192.168.130.255 inet6 fe80::97b9:2816:c3a3:e02e prefixlen 64 scopeid 0x20 Is there a way to do this using os and sys ? A: Here is a way: import subprocess cmd = """/sbin/ifconfig eth0 | grep "inet" | awk '/inet / { print $2 }' | cut -d: -f2""" r = subprocess.run(cmd, shell=True, capture_output=True, universal_newlines=True) private_ip = r.stdout.strip() >>> obfuscate_ip(private_ip) # see footnote '55.3.93.202' (For your case: use ens33 instead of eth0). That said, you are better off using socket: import socket def get_private_ip(): with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as st: st.settimeout(0.0) try: st.connect(('8.8.8.8', 80)) ip = st.getsockname()[0] except socket.error: ip = '127.0.0.1' return ip private_ip2 = get_private_ip() >>> obfuscate_ip(private_ip2) '55.3.93.202' And: assert private_ip2 == private_ip Footnote: I don't like to reveal my actual IP address (even if just the private one). Thus: import numpy as np import contextlib @contextlib.contextmanager def seed_context(seed): state = np.random.get_state() np.random.seed(seed) try: yield finally: np.random.set_state(state) def obfuscate_ip(ip): with seed_context(id(obfuscate_ip) >> 16): mul = np.random.randint(1, 255, 4) off = np.random.randint(1, 255, 4) parts = (np.array([int(x) for x in ip.split('.')]) * mul + off) % 256 return '.'.join(map(str, parts))
get ip address using os.system in python
Im new to python and Im trying to get the IP Address of my network card using the following: import sys import os ip_address = os.system('/sbin/ifconfig ens33 | grep "inet" |awk '/inet / { print $2 }' | cut -d":" -f2') However it returns the following error: ip_address = os.system('/sbin/ifconfig ens33 | grep "inet" |awk '/inet / { print $2 }' | cut -d":" -f2') ^ SyntaxError: invalid syntax If I just have in up to here it get some of the output: ip_address = os.system('/sbin/ifconfig ens33 | grep "inet" ') inet 192.168.130.130 netmask 255.255.255.0 broadcast 192.168.130.255 inet6 fe80::97b9:2816:c3a3:e02e prefixlen 64 scopeid 0x20 Is there a way to do this using os and sys ?
[ "Here is a way:\nimport subprocess\n\ncmd = \"\"\"/sbin/ifconfig eth0 | grep \"inet\" | awk '/inet / { print $2 }' | cut -d: -f2\"\"\"\nr = subprocess.run(cmd, shell=True, capture_output=True, universal_newlines=True)\nprivate_ip = r.stdout.strip()\n\n>>> obfuscate_ip(private_ip) # see footnote\n'55.3.93.202'\n\n(For your case: use ens33 instead of eth0).\nThat said, you are better off using socket:\nimport socket\n\ndef get_private_ip():\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as st:\n st.settimeout(0.0)\n try:\n st.connect(('8.8.8.8', 80))\n ip = st.getsockname()[0]\n except socket.error:\n ip = '127.0.0.1'\n return ip\n\nprivate_ip2 = get_private_ip()\n\n>>> obfuscate_ip(private_ip2)\n'55.3.93.202'\n\nAnd:\nassert private_ip2 == private_ip\n\n\nFootnote: I don't like to reveal my actual IP address (even if just the private one). Thus:\nimport numpy as np\nimport contextlib\n\n@contextlib.contextmanager\ndef seed_context(seed):\n state = np.random.get_state()\n np.random.seed(seed)\n try:\n yield\n finally:\n np.random.set_state(state)\n\ndef obfuscate_ip(ip):\n with seed_context(id(obfuscate_ip) >> 16):\n mul = np.random.randint(1, 255, 4)\n off = np.random.randint(1, 255, 4)\n parts = (np.array([int(x) for x in ip.split('.')]) * mul + off) % 256\n return '.'.join(map(str, parts))\n\n" ]
[ 1 ]
[]
[]
[ "os.system", "python", "python_3.x" ]
stackoverflow_0074575573_os.system_python_python_3.x.txt
Q: How to convert different formats of date timestamp to the format of timestamp in hive table I have a list of different formats of timestamp. How to change its for to the format accepted in hive tables. For eg. 20210811:12:55:56.563 to 2021-08-11 12:55:56.563 25/05/1999 02:35:05.532 to 1999-05-25 02:35:05.532 . How to do it in python. I have around 7-8 different formats. Does anyone have any ideas or approach around it. Your ideas are most welcome. A: You can use below py function to check the format. If below functions returns not none values, then its in expected format else no. Return value will be a date time in yyyy-MM-dd HH:MI:SS.SSSSS format. You can easily insert this into hive date time field. import datetime #formats to be checked fmts=['%d/%m/%Y %H:%M:%S.%f','%Y%m%d %H:%M:%S.%f','%d-%m-%Y %H:%M:%S.%f'] #func to check the formats and return proper date time if its in correct format. def try_strptime(s, fmts=fmts): for fmt in fmts: try: return datetime.strptime(s, fmt) except: continue return None
How to convert different formats of date timestamp to the format of timestamp in hive table
I have a list of different formats of timestamp. How to change its for to the format accepted in hive tables. For eg. 20210811:12:55:56.563 to 2021-08-11 12:55:56.563 25/05/1999 02:35:05.532 to 1999-05-25 02:35:05.532 . How to do it in python. I have around 7-8 different formats. Does anyone have any ideas or approach around it. Your ideas are most welcome.
[ "You can use below py function to check the format. If below functions returns not none values, then its in expected format else no. Return value will be a date time in yyyy-MM-dd HH:MI:SS.SSSSS format. You can easily insert this into hive date time field.\nimport datetime\n\n#formats to be checked\nfmts=['%d/%m/%Y %H:%M:%S.%f','%Y%m%d %H:%M:%S.%f','%d-%m-%Y %H:%M:%S.%f']\n\n#func to check the formats and return proper date time if its in correct format.\ndef try_strptime(s, fmts=fmts):\n for fmt in fmts:\n try:\n return datetime.strptime(s, fmt)\n except:\n continue\n\n return None \n \n\n" ]
[ 0 ]
[]
[]
[ "hive", "python" ]
stackoverflow_0074572248_hive_python.txt
Q: How can I use MyPy to overload the __init__ method to adjust a getter's return value? Let's say I have a class like this (pseudo-code, please ignore the odd db structure): class Blog(): title = StringProperty() comments = StringProperty(repeated=True) I want to type check StringProperty such that Blog().title returns a str type, and Blog().comments returns a List[str] type. MyPy mentions that something like this is possible by dynamically typing the __init__ method. Here's what I've tried: U = TypeVar('U', bound=StringProperty) V = TypeVar('V', bound=StringProperty) class StringProperty(Property[T]): @overload def __init__(self: StringProperty[U], repeated: Literal[False]=False, **kwargs) -> None: ... @overload def __init__(self: StringProperty[V], repeated: Literal[True]=True, **kwargs) -> None: ... @overload def __get__(self: StringProperty[U], instance, cls) -> str: ... @overload def __get__(self: StringProperty[V], instance, cls) -> List[str]: ... def __set__(self, instance, value: Optional[Union[str, List[str]]]) -> None: ... However, this throws an error that the second __get__ type signature will never be matched. How can I set MyPy to know the return value of the StringProperty.__get__ method dynamically by whether the repeated property is True or False? A: __init__ can be overloaded. self will become the given type. TypeVar needs to become some kind of real type during type analysis. It can't stay as T or U or V. It must be filled in with a type like str or Literal["foo"]. from __future__ import annotations from typing import TypeVar, overload, Literal, Generic _GetReturnT = TypeVar('_GetReturnT', str, list[str], str | list[str]) class StringProperty(Generic[_GetReturnT]): # Handles the default value case too. @overload def __init__(self: StringProperty[str], repeated: Literal[False]=False, **kwargs) -> None: ... @overload def __init__(self: StringProperty[list[str]], repeated: Literal[True], **kwargs) -> None: ... # Callers won't always pass a literal bool right at the call site. The bool # could come from somewhere far. Then we can't know what exactly get() # will return. @overload def __init__(self: StringProperty[str | list[str]], repeated: bool, **kwargs) -> None: ... def __init__(self, repeated: bool = False, **kwargs) -> None: self._repeated = repeated def get(self) -> _GetReturnT: if self._repeated: return ["Hello", "world!"] else: return "just one string" default = StringProperty() # StringProperty[str] default_get = default.get() # str false_literal = StringProperty(repeated=False) # StringProperty[str] false_literal_get = false_literal.get() # str true_literal = StringProperty(repeated=True) # StringProperty[list[str]] true_literal_get = true_literal.get() # list[str] import random some_bool = random.choice([True, False]) # bool unknown_bool = StringProperty(repeated=some_bool) # StringProperty[str | list[str]] unknown_bool_get = unknown_bool.get() # str | list[str] reveal_locals() # error: Value of type variable "_GetReturnT" of "StringProperty" cannot be "int" # # This error happens because we limited _GetReturnT's possible types in # TypeVar(). If we didn't limit the types, users could accidentally refer to a # type in an annotation that's impossible to instantiate. def some_user_function(prop: StringProperty[int]) -> None: prop.get() Note that setting and reading self._repeated does not aid in typing here in any way. StringProperty gets its type only from what types were passed to the constructor. If someone runs false_literal._repeated = True, then false_literal.get() would return ["Hello", "world!"], and the typing information is wrong. Using str or list[str] for StringProperty's type here was convenient. But the type can be less specific for weirder classes. Here we could've used Literal[True], Literal[False], and Literal[True] | Literal[False] to represent the quality of being repeated. Then get() would need overrides based on self to determine the return type. _T = TypeVar('_T', Literal["NOT_REPEATED"], Literal["REPEATED"], Literal[MyEnum.AMAZING], Literal[MyEnum.KINDA_OK_I_GUESS]) # For brevity I don't show Unions in this example, but you'd # need them for a class that works properly. class StringProperty(Generic[_T]): @overload def __init__(self: StringProperty[Literal["NOT_REPEATED"]], repeated: Literal[False]) -> None: ... @overload def __init__(self: StringProperty[Literal["REPEATED"]], repeated: Literal[True]) -> None: ... def __init__(self, repeated: bool) -> None: self._repeated = repeated @overload def get(self: StringProperty[Literal["NOT_REPEATED"]]) -> str: ... @overload def get(self: StringProperty[Literal["REPEATED"]]) -> list[str]: ... def get(self) -> str | list[str]: if self._repeated: return ["Hello", "world!"] else: return "just one string" A: I also had to overload get in @Slix first example: from __future__ import annotations from typing import TypeVar, overload, Literal, Generic _GetReturnT = TypeVar('_GetReturnT', str, list[str], str | list[str]) class StringProperty(Generic[_GetReturnT]): # Handles the default value case too. @overload def __init__(self: StringProperty[str], repeated: Literal[False]=False, **kwargs) -> None: ... @overload def __init__(self: StringProperty[list[str]], repeated: Literal[True], **kwargs) -> None: ... # Callers won't always pass a literal bool right at the call site. The bool # could come from somewhere far. Then we can't know what exactly get() # will return. @overload def __init__(self: StringProperty[str | list[str]], repeated: bool, **kwargs) -> None: ... def __init__(self, repeated: bool = False, **kwargs) -> None: self._repeated = repeated @overload def get(self: StringProperty[str]) -> str: ... @overload def get(self: StringProperty[list[str]]) -> list[str]: ... @overload def get(self: StringProperty[str | list[str]]) -> str | list[str]: ... def get(self) -> str | list[str]: if self._repeated: return ["Hello", "world!"] else: return "just one string" default = StringProperty() # StringProperty[str] default_get = default.get() # str false_literal = StringProperty(repeated=False) # StringProperty[str] false_literal_get = false_literal.get() # str true_literal = StringProperty(repeated=True) # StringProperty[list[str]] true_literal_get = true_literal.get() # list[str] import random some_bool = random.choice([True, False]) # bool unknown_bool = StringProperty(repeated=some_bool) # StringProperty[str | list[str]] unknown_bool_get = unknown_bool.get() # str | list[str] reveal_locals() # error: Value of type variable "_GetReturnT" of "StringProperty" cannot be "int" # # This error happens because we limited _GetReturnT's possible types in # TypeVar(). If we didn't limit the types, users could accidentally refer to a # type in an annotation that's impossible to instantiate. def some_user_function(prop: StringProperty[int]) -> None: prop.get() > venv/bin/mypy --version mypy 0.991 (compiled: yes) > venv/bin/mypy field.py field.py:57: note: Revealed local types are: field.py:57: note: default: field.StringProperty[builtins.str] field.py:57: note: default_get: builtins.str field.py:57: note: false_literal: field.StringProperty[builtins.str] field.py:57: note: false_literal_get: builtins.str field.py:57: note: some_bool: builtins.bool field.py:57: note: true_literal: field.StringProperty[builtins.list[builtins.str]] field.py:57: note: true_literal_get: builtins.list[builtins.str] field.py:57: note: unknown_bool: field.StringProperty[Union[builtins.str, builtins.list[builtins.str]]] field.py:57: note: unknown_bool_get: Union[builtins.str, builtins.list[builtins.str]] field.py:64: error: Value of type variable "_GetReturnT" of "StringProperty" cannot be "int" [type-var] field.py:65: error: Invalid self argument "StringProperty[int]" to attribute function "get" with type "Callable[[StringProperty[str]], str]" [misc] Found 2 errors in 1 file (checked 1 source file)
How can I use MyPy to overload the __init__ method to adjust a getter's return value?
Let's say I have a class like this (pseudo-code, please ignore the odd db structure): class Blog(): title = StringProperty() comments = StringProperty(repeated=True) I want to type check StringProperty such that Blog().title returns a str type, and Blog().comments returns a List[str] type. MyPy mentions that something like this is possible by dynamically typing the __init__ method. Here's what I've tried: U = TypeVar('U', bound=StringProperty) V = TypeVar('V', bound=StringProperty) class StringProperty(Property[T]): @overload def __init__(self: StringProperty[U], repeated: Literal[False]=False, **kwargs) -> None: ... @overload def __init__(self: StringProperty[V], repeated: Literal[True]=True, **kwargs) -> None: ... @overload def __get__(self: StringProperty[U], instance, cls) -> str: ... @overload def __get__(self: StringProperty[V], instance, cls) -> List[str]: ... def __set__(self, instance, value: Optional[Union[str, List[str]]]) -> None: ... However, this throws an error that the second __get__ type signature will never be matched. How can I set MyPy to know the return value of the StringProperty.__get__ method dynamically by whether the repeated property is True or False?
[ "__init__ can be overloaded. self will become the given type.\nTypeVar needs to become some kind of real type during type analysis. It can't stay as T or U or V. It must be filled in with a type like str or Literal[\"foo\"].\nfrom __future__ import annotations\nfrom typing import TypeVar, overload, Literal, Generic\n\n_GetReturnT = TypeVar('_GetReturnT', str, list[str], str | list[str])\n\nclass StringProperty(Generic[_GetReturnT]):\n \n # Handles the default value case too.\n @overload\n def __init__(self: StringProperty[str], repeated: Literal[False]=False, **kwargs) -> None: ...\n\n @overload\n def __init__(self: StringProperty[list[str]], repeated: Literal[True], **kwargs) -> None: ...\n \n # Callers won't always pass a literal bool right at the call site. The bool\n # could come from somewhere far. Then we can't know what exactly get()\n # will return.\n @overload\n def __init__(self: StringProperty[str | list[str]], repeated: bool, **kwargs) -> None: ...\n \n def __init__(self, repeated: bool = False, **kwargs) -> None:\n self._repeated = repeated\n\n def get(self) -> _GetReturnT:\n if self._repeated:\n return [\"Hello\", \"world!\"]\n else:\n return \"just one string\"\n\n\ndefault = StringProperty() # StringProperty[str]\ndefault_get = default.get() # str\n\nfalse_literal = StringProperty(repeated=False) # StringProperty[str]\nfalse_literal_get = false_literal.get() # str\n\ntrue_literal = StringProperty(repeated=True) # StringProperty[list[str]]\ntrue_literal_get = true_literal.get() # list[str]\n\nimport random\nsome_bool = random.choice([True, False]) # bool\nunknown_bool = StringProperty(repeated=some_bool) # StringProperty[str | list[str]]\nunknown_bool_get = unknown_bool.get() # str | list[str]\n\nreveal_locals()\n\n# error: Value of type variable \"_GetReturnT\" of \"StringProperty\" cannot be \"int\"\n#\n# This error happens because we limited _GetReturnT's possible types in\n# TypeVar(). If we didn't limit the types, users could accidentally refer to a\n# type in an annotation that's impossible to instantiate.\ndef some_user_function(prop: StringProperty[int]) -> None:\n prop.get()\n\nNote that setting and reading self._repeated does not aid in typing here in any way. StringProperty gets its type only from what types were passed to the constructor. If someone runs false_literal._repeated = True, then false_literal.get() would return [\"Hello\", \"world!\"], and the typing information is wrong.\nUsing str or list[str] for StringProperty's type here was convenient. But the type can be less specific for weirder classes. Here we could've used Literal[True], Literal[False], and Literal[True] | Literal[False] to represent the quality of being repeated. Then get() would need overrides based on self to determine the return type.\n_T = TypeVar('_T',\n Literal[\"NOT_REPEATED\"],\n Literal[\"REPEATED\"],\n Literal[MyEnum.AMAZING],\n Literal[MyEnum.KINDA_OK_I_GUESS])\n\n# For brevity I don't show Unions in this example, but you'd\n# need them for a class that works properly.\nclass StringProperty(Generic[_T]):\n @overload\n def __init__(self: StringProperty[Literal[\"NOT_REPEATED\"]],\n repeated: Literal[False]) -> None: ...\n\n @overload\n def __init__(self: StringProperty[Literal[\"REPEATED\"]],\n repeated: Literal[True]) -> None: ...\n\n def __init__(self, repeated: bool) -> None:\n self._repeated = repeated\n\n @overload\n def get(self: StringProperty[Literal[\"NOT_REPEATED\"]]) -> str: ...\n\n @overload\n def get(self: StringProperty[Literal[\"REPEATED\"]]) -> list[str]: ...\n\n def get(self) -> str | list[str]:\n if self._repeated:\n return [\"Hello\", \"world!\"]\n else:\n return \"just one string\"\n\n\n", "I also had to overload get in @Slix first example:\nfrom __future__ import annotations\nfrom typing import TypeVar, overload, Literal, Generic\n\n_GetReturnT = TypeVar('_GetReturnT', str, list[str], str | list[str])\n\nclass StringProperty(Generic[_GetReturnT]):\n \n # Handles the default value case too.\n @overload\n def __init__(self: StringProperty[str], repeated: Literal[False]=False, **kwargs) -> None: ...\n\n @overload\n def __init__(self: StringProperty[list[str]], repeated: Literal[True], **kwargs) -> None: ...\n \n # Callers won't always pass a literal bool right at the call site. The bool\n # could come from somewhere far. Then we can't know what exactly get()\n # will return.\n @overload\n def __init__(self: StringProperty[str | list[str]], repeated: bool, **kwargs) -> None: ...\n \n def __init__(self, repeated: bool = False, **kwargs) -> None:\n self._repeated = repeated\n\n @overload\n def get(self: StringProperty[str]) -> str:\n ...\n\n @overload\n def get(self: StringProperty[list[str]]) -> list[str]:\n ...\n\n @overload\n def get(self: StringProperty[str | list[str]]) -> str | list[str]:\n ...\n\n def get(self) -> str | list[str]:\n if self._repeated:\n return [\"Hello\", \"world!\"]\n else:\n return \"just one string\"\n\n\ndefault = StringProperty() # StringProperty[str]\ndefault_get = default.get() # str\n\nfalse_literal = StringProperty(repeated=False) # StringProperty[str]\nfalse_literal_get = false_literal.get() # str\n\ntrue_literal = StringProperty(repeated=True) # StringProperty[list[str]]\ntrue_literal_get = true_literal.get() # list[str]\n\nimport random\nsome_bool = random.choice([True, False]) # bool\nunknown_bool = StringProperty(repeated=some_bool) # StringProperty[str | list[str]]\nunknown_bool_get = unknown_bool.get() # str | list[str]\n\nreveal_locals()\n\n# error: Value of type variable \"_GetReturnT\" of \"StringProperty\" cannot be \"int\"\n#\n# This error happens because we limited _GetReturnT's possible types in\n# TypeVar(). If we didn't limit the types, users could accidentally refer to a\n# type in an annotation that's impossible to instantiate.\ndef some_user_function(prop: StringProperty[int]) -> None:\n prop.get()\n\n> venv/bin/mypy --version\nmypy 0.991 (compiled: yes)\n> venv/bin/mypy field.py\nfield.py:57: note: Revealed local types are:\nfield.py:57: note: default: field.StringProperty[builtins.str]\nfield.py:57: note: default_get: builtins.str\nfield.py:57: note: false_literal: field.StringProperty[builtins.str]\nfield.py:57: note: false_literal_get: builtins.str\nfield.py:57: note: some_bool: builtins.bool\nfield.py:57: note: true_literal: field.StringProperty[builtins.list[builtins.str]]\nfield.py:57: note: true_literal_get: builtins.list[builtins.str]\nfield.py:57: note: unknown_bool: field.StringProperty[Union[builtins.str, builtins.list[builtins.str]]]\nfield.py:57: note: unknown_bool_get: Union[builtins.str, builtins.list[builtins.str]]\nfield.py:64: error: Value of type variable \"_GetReturnT\" of \"StringProperty\" cannot be \"int\" [type-var]\nfield.py:65: error: Invalid self argument \"StringProperty[int]\" to attribute function \"get\" with type \"Callable[[StringProperty[str]], str]\" [misc]\nFound 2 errors in 1 file (checked 1 source file)\n\n" ]
[ 2, 0 ]
[]
[]
[ "mypy", "python" ]
stackoverflow_0064161037_mypy_python.txt
Q: Errors editing python with Vim When I edit a python file in Vim (using MacVim), and I press o to insert a new line, Vim throws the following errors: Error detected while processing function <SNR>20_CheckAlign..GetPythonIndent: line 30: E121: Undefined variable: dummy Press ENTER or type command to continue Error detected while processing function <SNR>20_CheckAlign..GetPythonIndent: line 30: E15: Invalid expression: line('.') < 7 ? dummy : synIDattr(synID(line('.'), col('.') , 1), 'name') =~ '\(Comment\|String\)$' How do I fix this? A: I figured out the problem. It was throwing an error whenever the file's tab settings were different from the editor's tab settings. For example, my test.py file was set to 2 spaces per tab, with tabs expanded into spaces, whereas my editor was set to 4 spaces per tab, no expand. So the solution workaround was to set Vim's tab settings to the settings of the python file being edited. A: Use the following modeline in your python files, that its tab settings are consistent. # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 Alternately, you have them set in your .vimrc file too. set tabstop=4 set shiftwidth=4 set softtabstop=4 set expandtab These are minimal set of things which you ensure consistency when working with python file. There are some great vimrc examples available which you can use as well. A: Changing the indentation settings did not work for me so I worked around this by modifying the python indentation file (/path/to/vim/indent/python.vim). In the GetPythonIndent function I simply replaced all instances of dummy with 0. This fixed the problem for me. Alternatively you could just set s:maxoff to something ridiculously high but this is somewhat less elegant. A: To resolve the problem it seems one must edit the function GetPythonIndent() in /path/to/vim/indent/python.vim. To do this, one must read in the help docs, user_30, indent, indentexpr, and indent-expression (also, C-indenting contains general info that will help explain what's going on). The simplest solution therefore is to switch indenting off when you are editing a python file, until such time as you have time to deal with this irritating bug: :set indentexpr="" You will see in /path/to/vim/indent/python.vim that expression is set to run the buggy function GetPythonIndent: setlocal indentexpr=GetPythonIndent(v:lnum) Set it to "" there to save the inconvenience of unsetting it every time you open a python file. Dani's answer above looks like it might be the better solution, since it would preserve some form of auto-indentation. But this will give you instant relief and save having to read and understand the buggy func.
Errors editing python with Vim
When I edit a python file in Vim (using MacVim), and I press o to insert a new line, Vim throws the following errors: Error detected while processing function <SNR>20_CheckAlign..GetPythonIndent: line 30: E121: Undefined variable: dummy Press ENTER or type command to continue Error detected while processing function <SNR>20_CheckAlign..GetPythonIndent: line 30: E15: Invalid expression: line('.') < 7 ? dummy : synIDattr(synID(line('.'), col('.') , 1), 'name') =~ '\(Comment\|String\)$' How do I fix this?
[ "I figured out the problem. It was throwing an error whenever the file's tab settings were different from the editor's tab settings. For example, my test.py file was set to 2 spaces per tab, with tabs expanded into spaces, whereas my editor was set to 4 spaces per tab, no expand.\nSo the solution workaround was to set Vim's tab settings to the settings of the python file being edited.\n", "Use the following modeline in your python files, that its tab settings are consistent.\n# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4\n\nAlternately, you have them set in your .vimrc file too.\nset tabstop=4\nset shiftwidth=4\nset softtabstop=4\nset expandtab\n\nThese are minimal set of things which you ensure consistency when working with python file.\nThere are some great vimrc examples available which you can use as well.\n", "Changing the indentation settings did not work for me so I worked around this by modifying the python indentation file (/path/to/vim/indent/python.vim).\nIn the GetPythonIndent function I simply replaced all instances of dummy with 0. This fixed the problem for me. \nAlternatively you could just set s:maxoff to something ridiculously high but this is somewhat less elegant.\n", "To resolve the problem it seems one must edit the function GetPythonIndent() in /path/to/vim/indent/python.vim.\nTo do this, one must read in the help docs, user_30, indent, indentexpr, and indent-expression (also, C-indenting contains general info that will help explain what's going on).\nThe simplest solution therefore is to switch indenting off when you are editing a python file, until such time as you have time to deal with this irritating bug:\n:set indentexpr=\"\"\n\nYou will see in /path/to/vim/indent/python.vim that expression is set to run the buggy function GetPythonIndent:\nsetlocal indentexpr=GetPythonIndent(v:lnum)\n\nSet it to \"\" there to save the inconvenience of unsetting it every time you open a python file.\nDani's answer above looks like it might be the better solution, since it would preserve some form of auto-indentation. But this will give you instant relief and save having to read and understand the buggy func.\n" ]
[ 2, 1, 1, 0 ]
[]
[]
[ "macvim", "python", "vim" ]
stackoverflow_0004840851_macvim_python_vim.txt
Q: How do I parse using for-loop? My aim is parse "funpay.com"'s offer page. It has to be easy, cause all offer names are inside the same class 'tc-item'. However I can't use bs4+requests, because this page loads only if you're logged in, which I'm doing via cookies (selenium+pickle). Idk how to make it at all, so I'll appreciate any hints. The code I tried: driver.get("https://funpay.com/orders/trade") soup = bs(driver.page_source, 'html.parser') try: paid = soup.find_all('a', class_='tc-item') for sold in paid: title = sold.find('div', class_='tc-order') # inside 'a', # prints the code of offer print(title) except Exception as ex: print(ex) A: Based on the rather thin starting point, I suspect it's an error that occurs during iterations, so here's what I would do. In order not to discard everything directly, check inside the loop whether the element you are looking for is available or not and let output the result accordingly. ... soup = bs(driver.page_source, 'html.parser') paid = soup.find_all('a', class_='tc-item') for sold in paid: title = sold.find('div', class_='tc-order').text if sold.find('div', class_='tc-order') else 'no element found' print(title)
How do I parse using for-loop?
My aim is parse "funpay.com"'s offer page. It has to be easy, cause all offer names are inside the same class 'tc-item'. However I can't use bs4+requests, because this page loads only if you're logged in, which I'm doing via cookies (selenium+pickle). Idk how to make it at all, so I'll appreciate any hints. The code I tried: driver.get("https://funpay.com/orders/trade") soup = bs(driver.page_source, 'html.parser') try: paid = soup.find_all('a', class_='tc-item') for sold in paid: title = sold.find('div', class_='tc-order') # inside 'a', # prints the code of offer print(title) except Exception as ex: print(ex)
[ "Based on the rather thin starting point, I suspect it's an error that occurs during iterations, so here's what I would do.\nIn order not to discard everything directly, check inside the loop whether the element you are looking for is available or not and let output the result accordingly.\n...\nsoup = bs(driver.page_source, 'html.parser')\npaid = soup.find_all('a', class_='tc-item')\n\nfor sold in paid:\n title = sold.find('div', class_='tc-order').text if sold.find('div', class_='tc-order') else 'no element found'\n print(title)\n\n" ]
[ 0 ]
[]
[]
[ "beautifulsoup", "for_loop", "python", "selenium", "web_scraping" ]
stackoverflow_0074576726_beautifulsoup_for_loop_python_selenium_web_scraping.txt
Q: How to display user profile picutre from sql database stored as blob data in python flask im creating a website in python flask where i want to display a photo for loggin users. My database looks like this: And i dont know how decode ProfilePicutre data and then display it in flask(diffrent for everyuser, like twitter profile picture) A: Looks like you're storing your ProfilePicture file in some sort of binary (displayed in hex) column. Since you haven't mentioned any tools used to query the database (SqlAlchemy or similar) I can't be specific in the solution but, try retrieving the data ans inspect it to figure out if it's returned in bytes, string or something else. After retrieving this data, try converting it into base64. For example purposes, consider that the ProfilePicture is retrieved in bytes and converted with the following line: base64_image = base64.encodebytes(row.ProfilePicture).hex() Having the image encoded in base64 you can render it in your HTML with the following line, if you're using server side rendered HTML (with jinja or some similar library): <img src="data:image/png;base64,{{ base64_image }}" alt="{{ row.Username }} Profile Picture" /> In case you have a REST API, return it and use your JS library to render the image tag just like shown above. Good luck.
How to display user profile picutre from sql database stored as blob data in python flask
im creating a website in python flask where i want to display a photo for loggin users. My database looks like this: And i dont know how decode ProfilePicutre data and then display it in flask(diffrent for everyuser, like twitter profile picture)
[ "Looks like you're storing your ProfilePicture file in some sort of binary (displayed in hex) column. Since you haven't mentioned any tools used to query the database (SqlAlchemy or similar) I can't be specific in the solution but,\ntry retrieving the data ans inspect it to figure out if it's returned in bytes, string or something else. After retrieving this data, try converting it into base64. For example purposes, consider that the ProfilePicture is retrieved in bytes and converted with the following line:\nbase64_image = base64.encodebytes(row.ProfilePicture).hex()\n\nHaving the image encoded in base64 you can render it in your HTML with the following line,\nif you're using server side rendered HTML (with jinja or some similar library):\n<img src=\"data:image/png;base64,{{ base64_image }}\" alt=\"{{ row.Username }} Profile Picture\" />\n\nIn case you have a REST API, return it and use your JS library to render the image tag just like shown above.\nGood luck.\n" ]
[ 0 ]
[]
[]
[ "database", "flask", "python" ]
stackoverflow_0074576495_database_flask_python.txt
Q: How to resample time series dataframe to show average hourly data? I am aware that pandas resample function has **hourly ** rule. However, it returns the average for every hour for the whole dataset. When using that method (df.Value.resample('H').mean()), I get the following output: Time&date Value 2021-01-01 00:00:00 23 2021-01-01 01:00:00 25 However, I would like hourly resample of data which shows the average values throughout the year for the whole dataset (not everyday hourly). What I want: Time&date Value 00:00:00 55 01:00:00 24 Thanks in advance. A: groupby can give you the result you want. Can you try this? dfx=df.groupby(df['date_column'].dt.hour).mean()
How to resample time series dataframe to show average hourly data?
I am aware that pandas resample function has **hourly ** rule. However, it returns the average for every hour for the whole dataset. When using that method (df.Value.resample('H').mean()), I get the following output: Time&date Value 2021-01-01 00:00:00 23 2021-01-01 01:00:00 25 However, I would like hourly resample of data which shows the average values throughout the year for the whole dataset (not everyday hourly). What I want: Time&date Value 00:00:00 55 01:00:00 24 Thanks in advance.
[ "groupby can give you the result you want. Can you try this?\ndfx=df.groupby(df['date_column'].dt.hour).mean()\n\n\n" ]
[ 0 ]
[]
[]
[ "dataframe", "python", "series", "time" ]
stackoverflow_0074576827_dataframe_python_series_time.txt
Q: How to get the items in Queue without removing the items? get() removes and returns an item from Queue in Python. import queue q = queue.Queue() # Here q.put("Apple") q.put("Orange") q.put("Banana") print(q.get()) print(q.get()) print(q.get()) Output: Apple Orange Banana Now, I want to get the items in Queue without removing the items. Is it possible to do this? A: queue_object.queue will return copy of your queue in a deque object which you can then use the slices of. It is of course, not syncronized with the original queue, but will allow you to peek at the queue at the time of the copy. There's a good rationalization for why you wouldn't want to do this explained in detail in this thread comp.lang.python - Queue peek?. But if you're just trying to understand how Queue works, this is one simple way. import Queue q = Queue.Queue() q.push(1) q.put('foo') q.put('bar') d = q.queue print(d) deque(['foo', 'bar']) print(d[0]) 'foo' A: The Queue module implements multi-producer, multi-consumer queues. It is especially useful in threaded programming when information must be exchanged safely between multiple threads. As you can see, the Queue module was created specifically for use with threads, providing only FIFO, LIFO and priority queues, none of which provide this functionality. However by examining the source code of the Queue module, you can see that it simply uses a collections.deque (double ended queue) which can easily accomplish your task. You may index the first item ([0]) and .popleft() in constant time. A: It is NOT safe to simply access the underlying queue. The safe way to do it is to extend the Queue class. If you return the underlying dequeue object, you will NOT be getting a copy, you get the live object. The result of this is that it can change while you are iterating it - which will result in an exception if another thread inserts into the queue during your iteration. Knowing that python uses the GIL, you can safely use list(q.queue), because list() will never cause a context switch. It's better to use the same lock the get() function uses, and not make assumptions about the GIL: import queue class SnapshotQueue(queue.Queue): def snapshot(self): with self.mutex: return list(self.queue) That class can be used safely instead of a regular queue, and it will return a snapshot of the queue state... within a mutex and without causing issues in the underlying queue operation. A: I found this question, because I needed a way to access top element in a PriorityQueue. I couldn't find a way to do that, so I switched to heapq instead. Although it's worth mentioning that heapq is not thread safe. A: You can get the items in a queue without removing the items as shown below: import queue q = queue.Queue() q.put("Apple") q.put("Orange") q.put("Banana") print(q.queue[0]) # Here print(q.queue[1]) # Here print(q.queue[2]) # Here print(q.queue) # Here Output: Apple Orange Banana deque(['Apple', 'Orange', 'Banana']) And, you can also change the items in a queue as shown below: import queue q = queue.Queue() q.put("Apple") q.put("Orange") q.put("Banana") q.queue[0] = "Strawberry" # Here q.queue[1] = "Lemon" # Here q.queue[2] = "kiwi" # Here print(q.queue[0]) print(q.queue[1]) print(q.queue[2]) print(q.queue) Output: Strawberry Lemon kiwi deque(['Strawberry', 'Lemon', 'kiwi']) But, you cannot add items to a queue without put() as shown below: import queue q = queue.Queue() q.queue[0] = "Apple" # Cannot add q.queue[1] = "Orange" # Cannot add q.queue[2] = "Banana" # Cannot add print(q.queue[0]) print(q.queue[1]) print(q.queue[2]) print(q.queue) Then, the error below occurs: IndexError: deque index out of range
How to get the items in Queue without removing the items?
get() removes and returns an item from Queue in Python. import queue q = queue.Queue() # Here q.put("Apple") q.put("Orange") q.put("Banana") print(q.get()) print(q.get()) print(q.get()) Output: Apple Orange Banana Now, I want to get the items in Queue without removing the items. Is it possible to do this?
[ "queue_object.queue will return copy of your queue in a deque object which you can then use the slices of. It is of course, not syncronized with the original queue, but will allow you to peek at the queue at the time of the copy.\nThere's a good rationalization for why you wouldn't want to do this explained in detail in this thread comp.lang.python - Queue peek?. But if you're just trying to understand how Queue works, this is one simple way.\nimport Queue\nq = Queue.Queue()\nq.push(1)\nq.put('foo')\nq.put('bar')\nd = q.queue\nprint(d)\ndeque(['foo', 'bar'])\nprint(d[0])\n'foo'\n\n", "\nThe Queue module implements multi-producer, multi-consumer queues. It is especially useful in threaded programming when information must be exchanged safely between multiple threads. \n\nAs you can see, the Queue module was created specifically for use with threads, providing only FIFO, LIFO and priority queues, none of which provide this functionality. However by examining the source code of the Queue module, you can see that it simply uses a collections.deque (double ended queue) which can easily accomplish your task. You may index the first item ([0]) and .popleft() in constant time.\n", "It is NOT safe to simply access the underlying queue.\nThe safe way to do it is to extend the Queue class. If you return the underlying dequeue object, you will NOT be getting a copy, you get the live object.\nThe result of this is that it can change while you are iterating it - which will result in an exception if another thread inserts into the queue during your iteration.\nKnowing that python uses the GIL, you can safely use list(q.queue), because list() will never cause a context switch.\nIt's better to use the same lock the get() function uses, and not make assumptions about the GIL:\nimport queue\n \nclass SnapshotQueue(queue.Queue):\n def snapshot(self):\n with self.mutex:\n return list(self.queue)\n\nThat class can be used safely instead of a regular queue, and it will return a snapshot of the queue state... within a mutex and without causing issues in the underlying queue operation.\n", "I found this question, because I needed a way to access top element in a PriorityQueue.\nI couldn't find a way to do that, so I switched to heapq instead.\nAlthough it's worth mentioning that heapq is not thread safe.\n", "You can get the items in a queue without removing the items as shown below:\nimport queue\n\nq = queue.Queue()\n\nq.put(\"Apple\")\nq.put(\"Orange\")\nq.put(\"Banana\")\n\nprint(q.queue[0]) # Here\nprint(q.queue[1]) # Here\nprint(q.queue[2]) # Here\n\nprint(q.queue) # Here\n\nOutput:\nApple\nOrange\nBanana\ndeque(['Apple', 'Orange', 'Banana'])\n\nAnd, you can also change the items in a queue as shown below:\nimport queue\n\nq = queue.Queue()\n\nq.put(\"Apple\")\nq.put(\"Orange\")\nq.put(\"Banana\")\n\nq.queue[0] = \"Strawberry\" # Here\nq.queue[1] = \"Lemon\" # Here\nq.queue[2] = \"kiwi\" # Here\n\nprint(q.queue[0])\nprint(q.queue[1])\nprint(q.queue[2])\n\nprint(q.queue)\n\nOutput:\nStrawberry\nLemon\nkiwi\ndeque(['Strawberry', 'Lemon', 'kiwi'])\n\nBut, you cannot add items to a queue without put() as shown below:\nimport queue\n\nq = queue.Queue()\n\nq.queue[0] = \"Apple\" # Cannot add\nq.queue[1] = \"Orange\" # Cannot add\nq.queue[2] = \"Banana\" # Cannot add\n\nprint(q.queue[0])\nprint(q.queue[1])\nprint(q.queue[2])\n\nprint(q.queue)\n\nThen, the error below occurs:\n\nIndexError: deque index out of range\n\n" ]
[ 32, 12, 9, 0, 0 ]
[]
[]
[ "data_structures", "python", "queue" ]
stackoverflow_0016686292_data_structures_python_queue.txt
Q: is there someway I can turn these lists inside of a list into different keys and values So I have this list: [['chocolate', '10225.25', '9025.0', '9505.0', '8750.0'], ['cookie dough', '7901.25', '4267.0', '7056.5', '3550.25'], ['rocky road', '6700.1', '5012.45', '6011.0', '5225.15'], ['strawberry', '9285.15', '8276.1', '8705.0', '7655.1'], ['vanilla', '8580.0', '7201.25', '8900.0', '3500.25']] is there a way I can turn each list inside the list into a dictionary where it would look like this: {'chocolate' : ['10225.25', '9025.0', '9505.0', '8750.0'], 'cookie dough' : ['7901.25', '4267.0', '7056.5', '3550.25'], 'rocky road' : ['6700.1', '5012.45', '6011.0', '5225.15']} ... you get the idea I have tried lots of things but I can't seem to find the solution to my problem. anything would be helpful :) thx A: Let your list be: l=[['chocolate', '10225.25', '9025.0', '9505.0', '8750.0'], ['cookie dough', '7901.25', '4267.0', '7056.5', '3550.25'], ['rocky road', '6700.1', '5012.45', '6011.0', '5225.15'], ['strawberry', '9285.15', '8276.1', '8705.0', '7655.1'], ['vanilla', '8580.0', '7201.25', '8900.0', '3500.25']] You can use a simple comprehension to get the desired result: {x[0]: x[1:] for x in l} #output {'chocolate': ['10225.25', '9025.0', '9505.0', '8750.0'], 'cookie dough': ['7901.25', '4267.0', '7056.5', '3550.25'], 'rocky road': ['6700.1', '5012.45', '6011.0', '5225.15'], 'strawberry': ['9285.15', '8276.1', '8705.0', '7655.1'], 'vanilla': ['8580.0', '7201.25', '8900.0', '3500.25']} OR d={} for x in l: d[x[0]]=x[1:] #output {'chocolate': ['10225.25', '9025.0', '9505.0', '8750.0'], 'cookie dough': ['7901.25', '4267.0', '7056.5', '3550.25'], 'rocky road': ['6700.1', '5012.45', '6011.0', '5225.15'], 'strawberry': ['9285.15', '8276.1', '8705.0', '7655.1'], 'vanilla': ['8580.0', '7201.25', '8900.0', '3500.25']} A: Beginner-Friendly Solution: (Specific to this data) new_dict = {} for item in lst: new_dict[item[0]] = item[1:] Since the string you want to use as a key is always the first item in the list, you can set item[0] as key and use the rest of the items in the nested list as the values to that key. Output: {'chocolate': ['10225.25', '9025.0', '9505.0', '8750.0'], 'cookie dough': ['7901.25', '4267.0', '7056.5', '3550.25'], 'rocky road': ['6700.1', '5012.45', '6011.0', '5225.15'], 'strawberry': ['9285.15', '8276.1', '8705.0', '7655.1'], 'vanilla': ['8580.0', '7201.25', '8900.0', '3500.25']}
is there someway I can turn these lists inside of a list into different keys and values
So I have this list: [['chocolate', '10225.25', '9025.0', '9505.0', '8750.0'], ['cookie dough', '7901.25', '4267.0', '7056.5', '3550.25'], ['rocky road', '6700.1', '5012.45', '6011.0', '5225.15'], ['strawberry', '9285.15', '8276.1', '8705.0', '7655.1'], ['vanilla', '8580.0', '7201.25', '8900.0', '3500.25']] is there a way I can turn each list inside the list into a dictionary where it would look like this: {'chocolate' : ['10225.25', '9025.0', '9505.0', '8750.0'], 'cookie dough' : ['7901.25', '4267.0', '7056.5', '3550.25'], 'rocky road' : ['6700.1', '5012.45', '6011.0', '5225.15']} ... you get the idea I have tried lots of things but I can't seem to find the solution to my problem. anything would be helpful :) thx
[ "Let your list be:\nl=[['chocolate', '10225.25', '9025.0', '9505.0', '8750.0'], ['cookie dough', '7901.25', '4267.0', '7056.5', '3550.25'], ['rocky road', '6700.1', '5012.45', '6011.0', '5225.15'], ['strawberry', '9285.15', '8276.1', '8705.0', '7655.1'], ['vanilla', '8580.0', '7201.25', '8900.0', '3500.25']]\n\nYou can use a simple comprehension to get the desired result:\n{x[0]: x[1:] for x in l}\n\n#output\n{'chocolate': ['10225.25', '9025.0', '9505.0', '8750.0'],\n 'cookie dough': ['7901.25', '4267.0', '7056.5', '3550.25'],\n 'rocky road': ['6700.1', '5012.45', '6011.0', '5225.15'],\n 'strawberry': ['9285.15', '8276.1', '8705.0', '7655.1'],\n 'vanilla': ['8580.0', '7201.25', '8900.0', '3500.25']}\n\nOR\nd={}\nfor x in l:\n d[x[0]]=x[1:]\n\n\n#output\n {'chocolate': ['10225.25', '9025.0', '9505.0', '8750.0'],\n 'cookie dough': ['7901.25', '4267.0', '7056.5', '3550.25'],\n 'rocky road': ['6700.1', '5012.45', '6011.0', '5225.15'],\n 'strawberry': ['9285.15', '8276.1', '8705.0', '7655.1'],\n 'vanilla': ['8580.0', '7201.25', '8900.0', '3500.25']}\n\n", "Beginner-Friendly Solution: (Specific to this data)\n new_dict = {}\n for item in lst:\n new_dict[item[0]] = item[1:]\n\nSince the string you want to use as a key is always the first item in the list, you can set item[0] as key and use the rest of the items in the nested list as the values to that key.\nOutput:\n {'chocolate': ['10225.25', '9025.0', '9505.0', '8750.0'], 'cookie dough': ['7901.25', '4267.0', '7056.5', '3550.25'], 'rocky road': ['6700.1', '5012.45', '6011.0', '5225.15'], 'strawberry': ['9285.15', '8276.1', '8705.0', '7655.1'], 'vanilla': ['8580.0', '7201.25', '8900.0', '3500.25']}\n \n\n" ]
[ 1, 0 ]
[]
[]
[ "dictionary", "list", "python" ]
stackoverflow_0074470878_dictionary_list_python.txt
Q: How to checkin list of divs if there is a span within with class 'new' In Beautifulsoup i receive a list of divs. Each of these divs has an span included: <div role="news_item" class="ni_nav_9tg"> <span class="nav_element_new_S5g">Germany vs. Japan</span> </div> ... <div role="news_item" class="ni_nav_9tg"> <span class="nav_element_new_S5g">Brasil vs. Serbia</span> </div> What i want is to check if in this list of div a span exist whose class contains string "new". Just true or false as result. Of course i could iterate through each item div in list and get span item after this check if class contains string "new", but i am not sure if this is the right approach. A: You could select them directly like: soup.select('div[role="news_item"]:has(span[class*="new"])') to get True or False check the len() of the ResultSet: len(soup.select('div[role="news_item"]:has(span[class*="new"])')) > 0 Example from bs4 import BeautifulSoup html=''' <div role="news_item" class="ni_nav_9tg"> <span class="nav_element_new_S5g">Germany vs. Japan</span> </div> ... <div role="news_item" class="ni_nav_9tg"> <span class="nav_element_new_S5g">Brasil vs. Serbia</span> </div> ''' soup = BeautifulSoup(html) len(soup.select('div[role="news_item"]:has(span[class*="new"])')) > 0
How to checkin list of divs if there is a span within with class 'new'
In Beautifulsoup i receive a list of divs. Each of these divs has an span included: <div role="news_item" class="ni_nav_9tg"> <span class="nav_element_new_S5g">Germany vs. Japan</span> </div> ... <div role="news_item" class="ni_nav_9tg"> <span class="nav_element_new_S5g">Brasil vs. Serbia</span> </div> What i want is to check if in this list of div a span exist whose class contains string "new". Just true or false as result. Of course i could iterate through each item div in list and get span item after this check if class contains string "new", but i am not sure if this is the right approach.
[ "You could select them directly like:\nsoup.select('div[role=\"news_item\"]:has(span[class*=\"new\"])')\n\nto get True or False check the len() of the ResultSet:\nlen(soup.select('div[role=\"news_item\"]:has(span[class*=\"new\"])')) > 0\n\nExample\nfrom bs4 import BeautifulSoup\nhtml='''\n<div role=\"news_item\" class=\"ni_nav_9tg\">\n <span class=\"nav_element_new_S5g\">Germany vs. Japan</span>\n</div>\n...\n<div role=\"news_item\" class=\"ni_nav_9tg\">\n <span class=\"nav_element_new_S5g\">Brasil vs. Serbia</span>\n</div>\n'''\n\nsoup = BeautifulSoup(html)\n\nlen(soup.select('div[role=\"news_item\"]:has(span[class*=\"new\"])')) > 0\n\n" ]
[ 1 ]
[]
[]
[ "beautifulsoup", "list", "python", "web_scraping" ]
stackoverflow_0074576984_beautifulsoup_list_python_web_scraping.txt
Q: Creating a dataframe from different lists I am new to python, so the question could be trivial. I have a pair of lists, containing solid names and the associated counts, of which I am providing a sample here below: volumes1 = ['Shield', 'Side', 'expHall', 'Funnel', 'gridpiece'] counts1= [3911, 1479, 553, 368, 342] and a second pair of lists volumes2 = ['Shield', 'leg', 'Funnel', 'gridpiece','wafer'] counts2= [291, 469, 73, 28, 32] Notice that not all the volumes are present in each list, and their position can be different. What I would like to obtain is a dataframe where the first column comprehends all the volumes in volume1 and volume2, the second columns is all the corresponding values in counts1, and the third column is all the corresponding values in counts2. If a volume in the first column of the dataframe is not present in volume1 the corresponding value in the second column is set to 0, and in the same way if a volume in the first column of the dataframe is not present in volume2 the corresponding value in the third column is set to 0, so that the final output for the values I provided would be: | volumes | counts1 | counts2 | | Shield | 3911 | 291 | | Side | 1479 | 0 | | expHall | 553 | 0 | | Funnel | 368 | 73 | | gridpiece | 342 | 28 | | leg | 0 | 469 | | wafer | 0 | 32 | I am not so experienced in python and I have been struggling a lot with no results, is there any way to obtain what I want in a quick and elegant way? Thanks A: quess not optimal, but one solution import pandas as pd volumes1 = ['Shield', 'Side', 'expHall', 'Funnel', 'gridpiece'] counts1= [3911, 1479, 553, 368, 342] volumes2 = ['Shield', 'leg', 'Funnel', 'gridpiece','wafer'] counts2= [291, 469, 73, 28, 32] volumes12=list(set(volumes1+volumes2)) counts1R=[0]*len(volumes12) counts2R=[0]*len(volumes12) for x in range(0,len(volumes1)): p=list(volumes12).index(volumes1[x]) counts1R[p]=counts1[x] for x in range(0,len(volumes2)): p=list(volumes12).index(volumes2[x]) counts2R[p]=counts2[x] d={ 'volumes':volumes12, 'counts1':counts1R, 'counts2':counts2R } df = pd.DataFrame(data=d) print(df) see https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html
Creating a dataframe from different lists
I am new to python, so the question could be trivial. I have a pair of lists, containing solid names and the associated counts, of which I am providing a sample here below: volumes1 = ['Shield', 'Side', 'expHall', 'Funnel', 'gridpiece'] counts1= [3911, 1479, 553, 368, 342] and a second pair of lists volumes2 = ['Shield', 'leg', 'Funnel', 'gridpiece','wafer'] counts2= [291, 469, 73, 28, 32] Notice that not all the volumes are present in each list, and their position can be different. What I would like to obtain is a dataframe where the first column comprehends all the volumes in volume1 and volume2, the second columns is all the corresponding values in counts1, and the third column is all the corresponding values in counts2. If a volume in the first column of the dataframe is not present in volume1 the corresponding value in the second column is set to 0, and in the same way if a volume in the first column of the dataframe is not present in volume2 the corresponding value in the third column is set to 0, so that the final output for the values I provided would be: | volumes | counts1 | counts2 | | Shield | 3911 | 291 | | Side | 1479 | 0 | | expHall | 553 | 0 | | Funnel | 368 | 73 | | gridpiece | 342 | 28 | | leg | 0 | 469 | | wafer | 0 | 32 | I am not so experienced in python and I have been struggling a lot with no results, is there any way to obtain what I want in a quick and elegant way? Thanks
[ "quess not optimal, but one solution\nimport pandas as pd\n\nvolumes1 = ['Shield', 'Side', 'expHall', 'Funnel', 'gridpiece']\ncounts1= [3911, 1479, 553, 368, 342]\nvolumes2 = ['Shield', 'leg', 'Funnel', 'gridpiece','wafer']\ncounts2= [291, 469, 73, 28, 32]\n\nvolumes12=list(set(volumes1+volumes2))\ncounts1R=[0]*len(volumes12)\ncounts2R=[0]*len(volumes12)\n\nfor x in range(0,len(volumes1)):\n p=list(volumes12).index(volumes1[x])\n counts1R[p]=counts1[x]\nfor x in range(0,len(volumes2)):\n p=list(volumes12).index(volumes2[x])\n counts2R[p]=counts2[x]\n\nd={ 'volumes':volumes12,\n 'counts1':counts1R,\n 'counts2':counts2R\n }\ndf = pd.DataFrame(data=d)\nprint(df)\n\n\n\nsee https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html\n" ]
[ 0 ]
[]
[]
[ "list", "pandas", "python" ]
stackoverflow_0074576892_list_pandas_python.txt
Q: Browsing context has been discarded using GeckoDriver Firefox through Selenium I didn't make any changes to my python selenium program and it worked fine 3 days ago. Now when i try to use it i get: Browsing context has been discarded Failed to decode response from marionette Any idea what could have caused this outside the code? (since no changes were made) I'm using firefox and geckodriver. After i got these errors i updated firefox, geckodriver, and selenium, but it didn't help. A: This error message... Browsing context has been discarded . Failed to decode response from marionette ...implies that the communication between GeckoDriver and Marionette was broken. Some more information regarding the binary version interms of: Selenium Server/Client GeckoDriver Firefox Additionally, your code block and the error stack trace would have given us some clues about whats wrong happening. However this issue can happen due to multiple factors as follows: As per Hang when navigation request removes the current browsing context if you have used driver.navigate().back(); when Selenium's focus was within an <iframe> this error is observed. As per Crash during command execution results in "Internal Server Error: Failed to decode response from marionette" this issue can also occur due to ctypes checks for NULL pointer derefs. You can find the Selenium testcase here. Perhaps instead of panicking, it would have been better to handle this more gracefully by clearing any state and returning geckodriver to accept new connections again. As per Failed to decode response from marionette - Error to open Webdriver using python this issue can also occur if you are not using the complient version of the binaries. GeckoDriver, Selenium and Firefox Browser compatibility chart Reference You can find a relevant detailed discussion in: “Failed to decode response from marionette” message in Python/Firefox headless scraping script A: I experienced the same error on a particular site, after performing a successful login and when I was redirected to the next page. While Inspecting The Source of the new page code in my Firefox browser, I I noticed some bad format/HTML quality details that went away after a manual refresh. (I suspect related to lack of quality of that site in particular). What I did in order to remediate this was to start every next step on a new page with a refresh on my drive: def my_next_step(driver): driver.refresh() time.sleep(10) element=driver.switch_to_frame('iframe') ....... This helped me overcome the site quality issues. A: On Ubuntu 22.10 using (apt, not snap) Firefox and Selenium in Python I also got this error after: driver.switch_to.alert.accept() The solution for me was to switch back to the context with: def upload_file(driver, filepath): """Uploads a Ublock Origin backup (template) .txt file into the Ublock Origin extension.""" driver.find_element("id", "restoreFilePicker").send_keys(filepath) time.sleep(1) driver.switch_to.alert.accept() time.sleep(1) # Switch back to first tab (and reload/restore it). new_window = driver.window_handles[0] driver.switch_to.window(new_window) This answer was given in this question. A: I removed size of window it is working without this error
Browsing context has been discarded using GeckoDriver Firefox through Selenium
I didn't make any changes to my python selenium program and it worked fine 3 days ago. Now when i try to use it i get: Browsing context has been discarded Failed to decode response from marionette Any idea what could have caused this outside the code? (since no changes were made) I'm using firefox and geckodriver. After i got these errors i updated firefox, geckodriver, and selenium, but it didn't help.
[ "This error message...\nBrowsing context has been discarded\n.\nFailed to decode response from marionette\n\n...implies that the communication between GeckoDriver and Marionette was broken.\nSome more information regarding the binary version interms of:\n\nSelenium Server/Client\nGeckoDriver\nFirefox\n\nAdditionally, your code block and the error stack trace would have given us some clues about whats wrong happening. However this issue can happen due to multiple factors as follows:\n\nAs per Hang when navigation request removes the current browsing context if you have used driver.navigate().back(); when Selenium's focus was within an <iframe> this error is observed.\nAs per Crash during command execution results in \"Internal Server Error: Failed to decode response from marionette\" this issue can also occur due to ctypes checks for NULL pointer derefs.\n\n\nYou can find the Selenium testcase here. Perhaps instead of panicking, it would have been better to handle this more gracefully by clearing any state and returning geckodriver to accept new connections again.\n\nAs per Failed to decode response from marionette - Error to open Webdriver using python this issue can also occur if you are not using the complient version of the binaries.\n\nGeckoDriver, Selenium and Firefox Browser compatibility chart\n\n\nReference\nYou can find a relevant detailed discussion in:\n\n“Failed to decode response from marionette” message in Python/Firefox headless scraping script\n\n", "I experienced the same error on a particular site, after performing a successful login and when I was redirected to the next page.\nWhile Inspecting The Source of the new page code in my Firefox browser, I I noticed some bad format/HTML quality details that went away after a manual refresh. (I suspect related to lack of quality of that site in particular).\nWhat I did in order to remediate this was to start every next step on a new page with a refresh on my drive:\ndef my_next_step(driver):\n driver.refresh()\n time.sleep(10)\n element=driver.switch_to_frame('iframe')\n .......\n\nThis helped me overcome the site quality issues.\n", "On Ubuntu 22.10 using (apt, not snap) Firefox and Selenium in Python I also got this error after:\ndriver.switch_to.alert.accept()\n\nThe solution for me was to switch back to the context with:\ndef upload_file(driver, filepath):\n \"\"\"Uploads a Ublock Origin backup (template) .txt file into the Ublock\n Origin extension.\"\"\"\n driver.find_element(\"id\", \"restoreFilePicker\").send_keys(filepath)\n time.sleep(1)\n driver.switch_to.alert.accept()\n time.sleep(1)\n # Switch back to first tab (and reload/restore it).\n new_window = driver.window_handles[0]\n driver.switch_to.window(new_window)\n\nThis answer was given in this question.\n", "I removed size of window it is working without this error\n" ]
[ 3, 1, 1, 0 ]
[]
[]
[ "firefox", "geckodriver", "python", "selenium", "selenium_firefoxdriver" ]
stackoverflow_0054525301_firefox_geckodriver_python_selenium_selenium_firefoxdriver.txt
Q: Apply exec function to pandas DataFrame I have following pandas DataFrame to which I need to apply the exec function to all the rows. import pandas as pd var = 2.5 df = pd.DataFrame(["var*1", "var*3", "var*5"]) Expected result The expected result I need is: 0 0 2.5 1 7.5 2 12.5 Using exec function with apply However if I do the following: df.apply(exec) or even: df.apply(lambda x:exec(x)) I get the following error: TypeError: exec() arg 1 must be a string, bytes or code object Using map with lambda function Other option I tried was: >>> df[0].map(lambda x:exec(x)) 0 None 1 None 2 None Name: 0, dtype: object But it's not the result I expect. Defining a function I even tried to define a function and then apply it: def execute(x): exec("a = " + str(x)) return a df[0].apply(execute) But result is following error: NameError: name 'var' is not defined Question How can I then get the expected result? A: Take a look at pandas.eval. You can reference local variables using @var syntax. As for arbitrary code execution, you should explicitly pass local variables as dict and use eval to get a return value, e.g. eval("a + 2", {"a": 1}) As for your question: df[0].apply(eval, args=({}, {"var": var}))
Apply exec function to pandas DataFrame
I have following pandas DataFrame to which I need to apply the exec function to all the rows. import pandas as pd var = 2.5 df = pd.DataFrame(["var*1", "var*3", "var*5"]) Expected result The expected result I need is: 0 0 2.5 1 7.5 2 12.5 Using exec function with apply However if I do the following: df.apply(exec) or even: df.apply(lambda x:exec(x)) I get the following error: TypeError: exec() arg 1 must be a string, bytes or code object Using map with lambda function Other option I tried was: >>> df[0].map(lambda x:exec(x)) 0 None 1 None 2 None Name: 0, dtype: object But it's not the result I expect. Defining a function I even tried to define a function and then apply it: def execute(x): exec("a = " + str(x)) return a df[0].apply(execute) But result is following error: NameError: name 'var' is not defined Question How can I then get the expected result?
[ "Take a look at pandas.eval. You can reference local variables using @var syntax.\nAs for arbitrary code execution, you should explicitly pass local variables as dict and use eval to get a return value, e.g. eval(\"a + 2\", {\"a\": 1})\nAs for your question:\ndf[0].apply(eval, args=({}, {\"var\": var}))\n\n" ]
[ 1 ]
[]
[]
[ "apply", "dataframe", "exec", "pandas", "python" ]
stackoverflow_0074576920_apply_dataframe_exec_pandas_python.txt
Q: How to fix website picture problem on HTML {% extends 'base.html' %} {% block content %} <hi>Products</hi> <div class="row"> {% for products in products %} <div class="col"> <div class="card" style="width: 70rem;"> <img src="{{ products.image_url }}" class="card-img-top" alt="..."> <div class="card-body"> <h5 class="card-title">{{ products.name }}</h5> <p class="card-text">${{ products.price }}</p> <a href="#" class="btn btn-primary">Add to Cart</a> </div> </div> </div> {% endfor %} </div> {% endblock %} Is there a problem with this code I tried reformatting the code, by replacing the products.url into the alt brackets. But it was what i typed on in the brackets that showed up. A: I am not exactly sure what your problem is but I see problem in code, try replacing: {% for products in products %} to {% for single_product in products %} and then update this lines with new variable name single_product <img src="{{ single_product.image_url }}" class="card-img-top" alt="..."> <h5 class="card-title">{{ single_product.name }}</h5> <p class="card-text">${{ single_product.price }}</p> By using the same variable name you are overriding it
How to fix website picture problem on HTML
{% extends 'base.html' %} {% block content %} <hi>Products</hi> <div class="row"> {% for products in products %} <div class="col"> <div class="card" style="width: 70rem;"> <img src="{{ products.image_url }}" class="card-img-top" alt="..."> <div class="card-body"> <h5 class="card-title">{{ products.name }}</h5> <p class="card-text">${{ products.price }}</p> <a href="#" class="btn btn-primary">Add to Cart</a> </div> </div> </div> {% endfor %} </div> {% endblock %} Is there a problem with this code I tried reformatting the code, by replacing the products.url into the alt brackets. But it was what i typed on in the brackets that showed up.
[ "I am not exactly sure what your problem is but I see problem in code, try replacing:\n{% for products in products %}\n\nto\n{% for single_product in products %}\n\nand then update this lines with new variable name single_product\n<img src=\"{{ single_product.image_url }}\" class=\"card-img-top\" alt=\"...\">\n<h5 class=\"card-title\">{{ single_product.name }}</h5>\n<p class=\"card-text\">${{ single_product.price }}</p>\n\nBy using the same variable name you are overriding it\n" ]
[ 1 ]
[]
[]
[ "django", "django_templates", "python" ]
stackoverflow_0074577098_django_django_templates_python.txt
Q: 'IndexError: list index out of range'. What's wrong? response = requests.get('https://store.steampowered.com/genre/Free%20to%20Play/?tab=1') soup = BeautifulSoup(response.text, 'html.parser') product = random.choice(soup.find_all(class_='gamehover_GameTitle_mrkD1')) print('Рассмотрите эту игру: ' + product.text) I tried taking a different class. Then it returned an empty list in 'product'. A: When I checked it out, there were no elements with the class gamehover_GameTitle_mrkD1 on the website. This results in soup.find_all returning an empty list. Because random.choice then doesn't have any items to choose from, it will raise an indexerror. You can fix this error by choosing a class name that does actually exist on the web page. On the time I checked it, all game titles had animated_featured_capsule_Title_3vZJE as class, but this can change over time. If your code with a correct classname stops working, you'd best check whether the class name is still correct, and if not, change it.
'IndexError: list index out of range'. What's wrong?
response = requests.get('https://store.steampowered.com/genre/Free%20to%20Play/?tab=1') soup = BeautifulSoup(response.text, 'html.parser') product = random.choice(soup.find_all(class_='gamehover_GameTitle_mrkD1')) print('Рассмотрите эту игру: ' + product.text) I tried taking a different class. Then it returned an empty list in 'product'.
[ "When I checked it out, there were no elements with the class gamehover_GameTitle_mrkD1 on the website. This results in soup.find_all returning an empty list. Because random.choice then doesn't have any items to choose from, it will raise an indexerror.\nYou can fix this error by choosing a class name that does actually exist on the web page. On the time I checked it, all game titles had animated_featured_capsule_Title_3vZJE as class, but this can change over time. If your code with a correct classname stops working, you'd best check whether the class name is still correct, and if not, change it.\n" ]
[ 1 ]
[]
[]
[ "parsing", "python", "python_3.x" ]
stackoverflow_0074576913_parsing_python_python_3.x.txt
Q: write specific columns to a csv file with csv module hi I am trying to write on a csv file using csv module (can't use panda's). so issue is I am getting keys like this : name_keys = ['DATASET ID', 'SOURCE NAME', 'NAME'] data = [ { "DATASET ID":112313, "SOURCE NAME":"source 1", "NAME":"0", "TYPE":1, "Random":1 }, { "DATASET ID":112315, "SOURCE NAME":"source 2", "NAME":"1", "TYPE":1, "Random":1 }] with open(file_path, 'w', encoding='UTF8', newline='') as f: writer = csv.DictWriter(f, fieldnames=name_keys) writer.writerow(name_keys) writer.writerows(data_) so I just want data of required name keys . not all keys in data . how I can achieve this ? on this code I am getting error : ValueError: dict contains fields not in fieldnames: A: You need to set extrasaction='ignore' as an argument of DictWriter : If the dictionary passed to the writerow() method contains a key not found in fieldnames, the optional extrasaction parameter indicates what action to take. import csv with open("outputcsv.csv", 'w', encoding='UTF8', newline='') as f: writer = csv.DictWriter(f, fieldnames=name_keys, extrasaction='ignore') writer.writeheader() writer.writerows(data) # Output (.csv) :
write specific columns to a csv file with csv module
hi I am trying to write on a csv file using csv module (can't use panda's). so issue is I am getting keys like this : name_keys = ['DATASET ID', 'SOURCE NAME', 'NAME'] data = [ { "DATASET ID":112313, "SOURCE NAME":"source 1", "NAME":"0", "TYPE":1, "Random":1 }, { "DATASET ID":112315, "SOURCE NAME":"source 2", "NAME":"1", "TYPE":1, "Random":1 }] with open(file_path, 'w', encoding='UTF8', newline='') as f: writer = csv.DictWriter(f, fieldnames=name_keys) writer.writerow(name_keys) writer.writerows(data_) so I just want data of required name keys . not all keys in data . how I can achieve this ? on this code I am getting error : ValueError: dict contains fields not in fieldnames:
[ "You need to set extrasaction='ignore' as an argument of DictWriter :\n\nIf the dictionary passed to the writerow() method\ncontains a key not found in fieldnames, the optional extrasaction\nparameter indicates what action to take.\n\nimport csv\n\nwith open(\"outputcsv.csv\", 'w', encoding='UTF8', newline='') as f:\n writer = csv.DictWriter(f, fieldnames=name_keys, extrasaction='ignore')\n writer.writeheader()\n writer.writerows(data)\n\n# Output (.csv) :\n\n" ]
[ 0 ]
[]
[]
[ "csv", "python", "python_3.x" ]
stackoverflow_0074576994_csv_python_python_3.x.txt
Q: Can't pickle Enum after reloading it : not the same object This is a follow up of my previous question : Enum comparison become False after reloading module Ultimately, I would like to be able to pickle my enum. Let's start from myenum.py again : # myenum.py import enum class MyEnum(enum.Enum): ONE = 1 TWO = 2 I again import this file in my script. I create a variable a an instance of MyEnum, pickles it and load it into a variable b. It works fine and both variables are equal. Now, I reload my file. I try to pickle a but the following error occurs : Traceback (most recent call last): File "f:/python_test/test.py", line 8, in <module> b = pickle.loads(pickle.dumps(a)) _pickle.PicklingError: Can't pickle <enum 'MyEnum'>: it's not the same object as myenum.MyEnum I believe this is because the IDs of the enum changed, so in pickle eyes, a is indeed not the same object. Note that it is not a solution for me to redefine each existing enum variable each time a file is reimported. Here is the code to reproduce the issue : # test.py import importlib, myenum, pickle if __name__=='__main__': a = myenum.MyEnum.ONE b = pickle.loads(pickle.dumps(a)) print(b == a) # is True importlib.reload(globals()["myenum"]) b = pickle.loads(pickle.dumps(a)) # Error print(b == a) A: Short answer: Stop using reload. It's a hack for use during active development, not for production use. If this is just for active development in an interactive session, move the definition of the enum somewhere aside from the module you're actively editing and reloading, so it doesn't get caught up in the reloads. Otherwise, you're stuck; pickle is accurately describing the error. After the reload, a (the old myenum.MyEnum.ONE) is entirely unrelated to myenum.MyEnum.ONE (for that matter, the class of a is unrelated to myenum.MyEnum). Sure, it may have been defined in logically the same way, but both the class and instance were redefined from scratch, and given a key selling point of enum (from the docs: "Within an enumeration, the members can be compared by identity"), the fact that they aren't the same object means they aren't equal. You're asking pickle to take an object of a class pickle can't find, and serialize it such that you can reproduce it, but that class is gone; pickle can't reproduce objects from it because it can't look the class up in any reliable place in your code (if it tried to import your module fresh, it could make the new ONE, but not your old ONE that's not equal to the current ONE). You can't work around that without invoking the most terrible and brittle of hacks. So don't do it; find a way to avoid reloading things that must be pickled. A: I was facing the same issue with pickling a class when the module was reloaded somewhere in between. Fortunately, it's quite easy to work around this using dill instead of pickle.
Can't pickle Enum after reloading it : not the same object
This is a follow up of my previous question : Enum comparison become False after reloading module Ultimately, I would like to be able to pickle my enum. Let's start from myenum.py again : # myenum.py import enum class MyEnum(enum.Enum): ONE = 1 TWO = 2 I again import this file in my script. I create a variable a an instance of MyEnum, pickles it and load it into a variable b. It works fine and both variables are equal. Now, I reload my file. I try to pickle a but the following error occurs : Traceback (most recent call last): File "f:/python_test/test.py", line 8, in <module> b = pickle.loads(pickle.dumps(a)) _pickle.PicklingError: Can't pickle <enum 'MyEnum'>: it's not the same object as myenum.MyEnum I believe this is because the IDs of the enum changed, so in pickle eyes, a is indeed not the same object. Note that it is not a solution for me to redefine each existing enum variable each time a file is reimported. Here is the code to reproduce the issue : # test.py import importlib, myenum, pickle if __name__=='__main__': a = myenum.MyEnum.ONE b = pickle.loads(pickle.dumps(a)) print(b == a) # is True importlib.reload(globals()["myenum"]) b = pickle.loads(pickle.dumps(a)) # Error print(b == a)
[ "Short answer: Stop using reload. It's a hack for use during active development, not for production use.\nIf this is just for active development in an interactive session, move the definition of the enum somewhere aside from the module you're actively editing and reloading, so it doesn't get caught up in the reloads. Otherwise, you're stuck; pickle is accurately describing the error. After the reload, a (the old myenum.MyEnum.ONE) is entirely unrelated to myenum.MyEnum.ONE (for that matter, the class of a is unrelated to myenum.MyEnum). Sure, it may have been defined in logically the same way, but both the class and instance were redefined from scratch, and given a key selling point of enum (from the docs: \"Within an enumeration, the members can be compared by identity\"), the fact that they aren't the same object means they aren't equal. You're asking pickle to take an object of a class pickle can't find, and serialize it such that you can reproduce it, but that class is gone; pickle can't reproduce objects from it because it can't look the class up in any reliable place in your code (if it tried to import your module fresh, it could make the new ONE, but not your old ONE that's not equal to the current ONE).\nYou can't work around that without invoking the most terrible and brittle of hacks. So don't do it; find a way to avoid reloading things that must be pickled.\n", "I was facing the same issue with pickling a class when the module was reloaded somewhere in between.\nFortunately, it's quite easy to work around this using dill instead of pickle.\n" ]
[ 2, 0 ]
[]
[]
[ "enums", "pickle", "python", "python_importlib" ]
stackoverflow_0066460582_enums_pickle_python_python_importlib.txt
Q: How to Rearrange Some Strings in a List and Find the Average of It's Integers? For my class, I have to take a file and turn it into a list with lists inside of it separating each "contestant" and from there, rearrange the Strings in it to where if it were a name, the name John Doe would instead become Doe John. On top of this, I also have to take the integers in each list and calculate their average. We haven't done this in class which is why I'm so lost. Thank you for any help. I've been able to turn my file into a list by doing what I've put below, but after that, I'm completely stuck. my_file = open("sample-1.txt.txt") data3 = my_file.read() list1 = data3.split(" ") flist = list() len_flist = 10 for i in range(0, len(list1), len_flist): flist.append(list1[i:i+len_flist]) my_file.close() print(flist) Output: [['Ty', 'Cobb', '13099', '11434', '3053', '724', '295', '117', '1249', '9'], ['\nChipper', 'Jones', '10614', '8984', '1671', '549', '38', '468', '1512', '1'], ['\nJonny', 'Bench', '8674', '7658', '1254', '381', '24', '389', '891', '1'], ['\nHank', 'Aaron', '13941', '12364', '2294', '624', '98', '755', '1402', '3'], ['\nTony', 'Gwynn', '10232', '9288', '2378', '543', '85', '135', '434', '2'], ['\nJohn', 'Smoltz', '1167', '948', '118', '26', '2', '5', '79', '3'], ['\nAaron', 'Woods', '1122', '123', '324', '45', '88', '1561', '9', '18']] The output is how my teacher wants us to write it. But I'm not sure how to flip the names to be "Cobb, Ty," and then calculate the average of the numbers. The way she wants the output to be by the end is "[[Cobb, Ty, 3747.5], [...], [...]}" A: It is better if you use file.readlines() instead of file.read(), because it splits it into each line, separating each contestant. With that, you can do stuff with each contestant, like so: fin = open("sample-1.txt.txt") contestants = fin.readlines() final = [] for contestant_string in contestants: if contestant_string == "\n": continue #if there is an extra line at the end, with nothing in it contestant = contestant_string.strip().split(" ") #remove any annoying "\n"s data = [contestant[1], contestant[0]] # reverses name total = 0 # holds total score for score in contestant[2:]: # goes through each score (skips the name) total+=int(score) data.append(total/len(contestant[2:]) # the average final.append(data) fin.close() print(data)
How to Rearrange Some Strings in a List and Find the Average of It's Integers?
For my class, I have to take a file and turn it into a list with lists inside of it separating each "contestant" and from there, rearrange the Strings in it to where if it were a name, the name John Doe would instead become Doe John. On top of this, I also have to take the integers in each list and calculate their average. We haven't done this in class which is why I'm so lost. Thank you for any help. I've been able to turn my file into a list by doing what I've put below, but after that, I'm completely stuck. my_file = open("sample-1.txt.txt") data3 = my_file.read() list1 = data3.split(" ") flist = list() len_flist = 10 for i in range(0, len(list1), len_flist): flist.append(list1[i:i+len_flist]) my_file.close() print(flist) Output: [['Ty', 'Cobb', '13099', '11434', '3053', '724', '295', '117', '1249', '9'], ['\nChipper', 'Jones', '10614', '8984', '1671', '549', '38', '468', '1512', '1'], ['\nJonny', 'Bench', '8674', '7658', '1254', '381', '24', '389', '891', '1'], ['\nHank', 'Aaron', '13941', '12364', '2294', '624', '98', '755', '1402', '3'], ['\nTony', 'Gwynn', '10232', '9288', '2378', '543', '85', '135', '434', '2'], ['\nJohn', 'Smoltz', '1167', '948', '118', '26', '2', '5', '79', '3'], ['\nAaron', 'Woods', '1122', '123', '324', '45', '88', '1561', '9', '18']] The output is how my teacher wants us to write it. But I'm not sure how to flip the names to be "Cobb, Ty," and then calculate the average of the numbers. The way she wants the output to be by the end is "[[Cobb, Ty, 3747.5], [...], [...]}"
[ "It is better if you use file.readlines() instead of file.read(), because it splits it into each line, separating each contestant.\nWith that, you can do stuff with each contestant, like so:\nfin = open(\"sample-1.txt.txt\")\ncontestants = fin.readlines()\nfinal = []\nfor contestant_string in contestants:\n if contestant_string == \"\\n\":\n continue #if there is an extra line at the end, with nothing in it\n contestant = contestant_string.strip().split(\" \") #remove any annoying \"\\n\"s\n data = [contestant[1], contestant[0]] # reverses name\n total = 0 # holds total score\n for score in contestant[2:]: # goes through each score (skips the name)\n total+=int(score)\n data.append(total/len(contestant[2:]) # the average\n final.append(data)\nfin.close()\nprint(data)\n\n" ]
[ 0 ]
[]
[]
[ "integer", "list", "python", "string" ]
stackoverflow_0074577031_integer_list_python_string.txt
Q: Image classifier project python predict.py /path/to/image checkpoint what is the path to image here. i need to give an image as an input, the image is in a folder 1 which is in folder test, which is in the folder flowers. so i have written it as /flowers/test/1/image.jpg, but i am getting it as "no file or directory" i have tried writing the statements in the image, as i said thats not working. i am doing a udacity nanodegree project AI programming with python. A: Unless the flower/ directory is at the root of the file system, you shouldn't have a leading slash in front of the path. To reference the current directory, you should instead do python predict.py ./flowers/test/1/image.jpg checkpoint.
Image classifier project
python predict.py /path/to/image checkpoint what is the path to image here. i need to give an image as an input, the image is in a folder 1 which is in folder test, which is in the folder flowers. so i have written it as /flowers/test/1/image.jpg, but i am getting it as "no file or directory" i have tried writing the statements in the image, as i said thats not working. i am doing a udacity nanodegree project AI programming with python.
[ "Unless the flower/ directory is at the root of the file system, you shouldn't have a leading slash in front of the path. To reference the current directory, you should instead do python predict.py ./flowers/test/1/image.jpg checkpoint.\n" ]
[ 2 ]
[]
[]
[ "python" ]
stackoverflow_0074577219_python.txt
Q: Problem with logging in Django using django.contrib.auth views When I try to login via LoginView, the process seems successful. I'm redirected to LOGIN_REDIRECT_URL. from django.contrib.auth import views as auth_views urlpatterns = [ ... path('login', auth_views.LoginView.as_view(), name='login'), ] But when I try to access a view which requires login, I am redirected to login page. When I output request.user.id in a view, this is also None. class MyView(viewsets.ViewSet): @method_decorator(login_required(login_url='/login')) def list(self, request, server_id): .... What am I missing? Thanks by now. Here's my login form. <form method="POST" action="{% url 'login' %}"> {% csrf_token %} <div class="form-group"> <label>Username</label> <input type="text" class="form-control" id="username" name="username" placeholder="Username"> </div> <div class="form-group"> <label>Password</label> <input type="password" name="password" id="password" class="form-control" placeholder="Password"> </div> <button type="submit" class="btn btn-primary">Submit</button> </form> A: Try something like this: from rest_framework.permissions import IsAuthenticated class MyView(viewsets.ViewSet): permission_classes = [IsAuthenticated] def list(self, request, server_id): .... ViewSet is djangoREST class, and sometimes there are some differences in usage between default django and djangoREST views/other components.
Problem with logging in Django using django.contrib.auth views
When I try to login via LoginView, the process seems successful. I'm redirected to LOGIN_REDIRECT_URL. from django.contrib.auth import views as auth_views urlpatterns = [ ... path('login', auth_views.LoginView.as_view(), name='login'), ] But when I try to access a view which requires login, I am redirected to login page. When I output request.user.id in a view, this is also None. class MyView(viewsets.ViewSet): @method_decorator(login_required(login_url='/login')) def list(self, request, server_id): .... What am I missing? Thanks by now. Here's my login form. <form method="POST" action="{% url 'login' %}"> {% csrf_token %} <div class="form-group"> <label>Username</label> <input type="text" class="form-control" id="username" name="username" placeholder="Username"> </div> <div class="form-group"> <label>Password</label> <input type="password" name="password" id="password" class="form-control" placeholder="Password"> </div> <button type="submit" class="btn btn-primary">Submit</button> </form>
[ "Try something like this:\nfrom rest_framework.permissions import IsAuthenticated\n\n\nclass MyView(viewsets.ViewSet):\n permission_classes = [IsAuthenticated]\n\n def list(self, request, server_id):\n .... \n\nViewSet is djangoREST class, and sometimes there are some differences in usage between default django and djangoREST views/other components.\n" ]
[ 0 ]
[]
[]
[ "django", "django_authentication", "python" ]
stackoverflow_0074576771_django_django_authentication_python.txt
Q: IPython Notebook and SQL: 'ImportError: No module named sql' when running '%load_ext sql' Just set up an IPython Notebook on Ubuntu 16.04 but I can't use %load_ext sql. I get: ImportError: No module named sql I've tried using pip and pip3 with and without sudo to install ipython-sql. All 4 times it installed without issue but nothing changes on the notebook. Thanks in advance! A: I know it's been a long time, but I faced the same issue, and Thomas' advice solved my problem. Just outlining what I did here. When I ran sys.executable in the notebook I saw /usr/bin/python2, while the pip I used to install the package was /usr/local/bin/pip (to find out what pip you are using, just do which pip or sudo which pip if you are installing packages system-wide). So I reinstalled ipython-sql using the following command, and everything worked out just fine. sudo -H /usr/bin/python2 -m pip install ipython-sql This is odd since I always install my packages using pip. I'm wondering maybe there's something special about the magic functions in Jupyter. A: If you're trying to connect the IBM database and came across this problem and the above solutions couldn't do it for you, you could give this a chance. (By the way, this error usually means one of your package installations doesn't meet the requirements or more probably: you're in the wrong kernel/virtual environment and the Jupyter instance can't run your command from the specified packages.) From JupyterLab or Jupyter Notebook go to Kernel>Change Kernel and change the kernel that you've installed the packages. Wait for it to establish a connection. Then use 0, 0 to restart kernel (or Kernel>Restart Kernel Go to any cell and run the below commands to install packages in the current kernel. !pip install sqlalchemy==1.3.9 !pip install ibm_db_sa !pip install ipython-sql Now try %load_ext sql A: I know this answer will be (very) late to contribute to the discussion but maybe it will help someone. I found out what worked for me by following Thomas, who commented above. However, with a bit of a caveat, that I was using pyenv to setup and manage python on my local machine. So when running sys.executable in a jupyter notebook cell I found out my python path was /usr/local/Cellar/jupyterlab/3.2.8/libexec/bin/python3.9, while I expected it to be somewhere along the lines of '/Users/<USER_NAME>/.pyenv/versions/3.9.2/bin/python'. This error was attributed to me having installed jupyter through command brew install jupyter instead of pyenv exec pip install jupyter. I proceeded to uninstall jupyter with brew and then executing the second command, which now got jupyter up and running! (note that you would first have to have pyenv setup properly). A: I doubt you're using different IPython Notebook kernel other than which you've installed ipython-sql in. IPython Notebook can have more than one kernel. If it is the case, make sure you're in the right place first. A: Wherever you installed jupyter, is where jupyter loads exts. You are using the jupyter installed outside of virtual environment, then activated virtual environment, then installed ipython-sql, then trying to load_ext from different level. Just deactivate the virtual environment, install ipython-sql, and then activate the environment. If you want to install ipython-sql only in the virtual environment, then at least you need to install jupyter inside the virtual environment too. Both puppies should be in the same level.
IPython Notebook and SQL: 'ImportError: No module named sql' when running '%load_ext sql'
Just set up an IPython Notebook on Ubuntu 16.04 but I can't use %load_ext sql. I get: ImportError: No module named sql I've tried using pip and pip3 with and without sudo to install ipython-sql. All 4 times it installed without issue but nothing changes on the notebook. Thanks in advance!
[ "I know it's been a long time, but I faced the same issue, and Thomas' advice solved my problem. Just outlining what I did here.\nWhen I ran sys.executable in the notebook I saw /usr/bin/python2, while the pip I used to install the package was /usr/local/bin/pip (to find out what pip you are using, just do which pip or sudo which pip if you are installing packages system-wide). So I reinstalled ipython-sql using the following command, and everything worked out just fine.\nsudo -H /usr/bin/python2 -m pip install ipython-sql\nThis is odd since I always install my packages using pip. I'm wondering maybe there's something special about the magic functions in Jupyter.\n", "If you're trying to connect the IBM database and came across this problem and the above solutions couldn't do it for you, you could give this a chance. (By the way, this error usually means one of your package installations doesn't meet the requirements or more probably: you're in the wrong kernel/virtual environment and the Jupyter instance can't run your command from the specified packages.)\nFrom JupyterLab or Jupyter Notebook go to Kernel>Change Kernel and change the kernel that you've installed the packages. Wait for it to establish a connection. Then use 0, 0 to restart kernel (or Kernel>Restart Kernel\nGo to any cell and run the below commands to install packages in the current kernel.\n!pip install sqlalchemy==1.3.9\n!pip install ibm_db_sa\n!pip install ipython-sql\n\nNow try\n%load_ext sql\n", "I know this answer will be (very) late to contribute to the discussion but maybe it will help someone. I found out what worked for me by following Thomas, who commented above. However, with a bit of a caveat, that I was using pyenv to setup and manage python on my local machine.\nSo when running sys.executable in a jupyter notebook cell I found out my python path was /usr/local/Cellar/jupyterlab/3.2.8/libexec/bin/python3.9, while I expected it to be somewhere along the lines of '/Users/<USER_NAME>/.pyenv/versions/3.9.2/bin/python'.\nThis error was attributed to me having installed jupyter through command brew install jupyter instead of pyenv exec pip install jupyter. I proceeded to uninstall jupyter with brew and then executing the second command, which now got jupyter up and running!\n(note that you would first have to have pyenv setup properly).\n", "I doubt you're using different IPython Notebook kernel other than which you've installed ipython-sql in.\nIPython Notebook can have more than one kernel. If it is the case, make sure you're in the right place first. \n", "Wherever you installed jupyter, is where jupyter loads exts.\nYou are using the jupyter installed outside of virtual environment,\nthen activated virtual environment,\nthen installed ipython-sql,\nthen trying to load_ext from different level.\nJust deactivate the virtual environment, install ipython-sql, and then activate the environment.\nIf you want to install ipython-sql only in the virtual environment, then at least you need to install jupyter inside the virtual environment too.\nBoth puppies should be in the same level.\n" ]
[ 5, 5, 1, 0, 0 ]
[]
[]
[ "ipython", "ipython_sql", "pip", "python" ]
stackoverflow_0037149748_ipython_ipython_sql_pip_python.txt
Q: Gdown is giving Permission error for particular file,although it is opening up fine manually I am not able to download file using gdown package.It is giving permission error. But when i am opening it manually.It is giving no such error and opening up fine. Here is the code i am using and link import gdown url='https://drive.google.com/uc?id=0B1lRQVLFjBRNR3Jqam1menVtZnc' output='letter.pdf' gdown.download(url, output, quiet=False) Error is Permission denied: https://drive.google.com/uc?id=0B1lRQVLFjBRNR3Jqam1menVtZnc Maybe you need to change permission over 'Anyone with the link'? A: In my case, I ran the following command and try using gdown, and problem was solved: pip install --upgrade --no-cache-dir gdown If you are using google-colab, try: !pip install --upgrade --no-cache-dir gdown then: !gdown --id [id of your file] A: If you're working with big files (in my case was a >1gb file), you can solve by copying the url from 'Download anyway' button in Google Drive. A: Create your downloadable zip folder and make it "Anyone with the link" and change "Viewer" to "Editor". Finally use: !gdown --id 'id of the file' A: instead of gdown <drive-id> type gdown "<drive-id>&confirm=t" for large files. A: pip install --upgrade --no-cache-dir gdown didn't work for me. You can try pre-released version of this : pip install -U --no-cache-dir gdown --pre This worked for me.
Gdown is giving Permission error for particular file,although it is opening up fine manually
I am not able to download file using gdown package.It is giving permission error. But when i am opening it manually.It is giving no such error and opening up fine. Here is the code i am using and link import gdown url='https://drive.google.com/uc?id=0B1lRQVLFjBRNR3Jqam1menVtZnc' output='letter.pdf' gdown.download(url, output, quiet=False) Error is Permission denied: https://drive.google.com/uc?id=0B1lRQVLFjBRNR3Jqam1menVtZnc Maybe you need to change permission over 'Anyone with the link'?
[ "In my case, I ran the following command and try using gdown, and problem was solved:\npip install --upgrade --no-cache-dir gdown\n\nIf you are using google-colab, try:\n!pip install --upgrade --no-cache-dir gdown\n\nthen:\n!gdown --id [id of your file]\n", "If you're working with big files (in my case was a >1gb file), you can solve by copying the url from 'Download anyway' button in Google Drive.\n", "Create your downloadable zip folder and make it \"Anyone with the link\" and change \"Viewer\" to \"Editor\".\nFinally use:\n!gdown --id 'id of the file'\n\n", "instead of\ngdown <drive-id>\ntype\ngdown \"<drive-id>&confirm=t\"\nfor large files.\n", "pip install --upgrade --no-cache-dir gdown\ndidn't work for me.\nYou can try pre-released version of this :\npip install -U --no-cache-dir gdown --pre\nThis worked for me.\n" ]
[ 36, 7, 2, 2, 1 ]
[]
[]
[ "google_drive_api", "python" ]
stackoverflow_0060739653_google_drive_api_python.txt
Q: Get features names from scikit pipelines I am working on ML regression problem where I defined a pipeline like below based on a tutorial online. My code looks like below pipe1 = Pipeline([('poly', PolynomialFeatures()), ('fit', linear_model.LinearRegression())]) pipe2 = Pipeline([('poly', PolynomialFeatures()), ('fit', linear_model.Lasso())]) pipe3 = Pipeline([('poly', PolynomialFeatures()), ('fit', linear_model.Ridge())]) pipe4 = Pipeline([('poly', PolynomialFeatures()), ('fit', linear_model.TweedieRegressor())]) models3 = {'OLS': pipe1, 'Lasso': GridSearchCV(pipe2, param_grid=lasso_params).fit(X_train,y_train).best_estimator_ , 'Ridge': GridSearchCV(pipe3, param_grid=ridge_params).fit(X_train,y_train).best_estimator_, 'Tweedie':GridSearchCV(pipe4, param_grid=tweedie_params).fit(X_train,y_train).best_estimator_} test(models3, df) While the above code worked fine and gave me the results, how can I get the list of polynomial features that were created? Or how can I view them in the dataframe? A: You can use the transform method to generate the polynomial feature matrix. To do so, you'll first have to access the corresponding step in the pipeline which, in this case, is at the 0th index. Here is how you can get the polynomial features array for pipe2: feature_matrix = model3['Lasso'][0].transform(X_train) Furthermore, if you wish to generate a DataFrame with the feature names, you can do so by using the get_feature_names_out method: feature_names = model['Lasso'][0].get_feature_names_out() feature_df = pd.DataFrame(feature_matrix, columns=feature_names)
Get features names from scikit pipelines
I am working on ML regression problem where I defined a pipeline like below based on a tutorial online. My code looks like below pipe1 = Pipeline([('poly', PolynomialFeatures()), ('fit', linear_model.LinearRegression())]) pipe2 = Pipeline([('poly', PolynomialFeatures()), ('fit', linear_model.Lasso())]) pipe3 = Pipeline([('poly', PolynomialFeatures()), ('fit', linear_model.Ridge())]) pipe4 = Pipeline([('poly', PolynomialFeatures()), ('fit', linear_model.TweedieRegressor())]) models3 = {'OLS': pipe1, 'Lasso': GridSearchCV(pipe2, param_grid=lasso_params).fit(X_train,y_train).best_estimator_ , 'Ridge': GridSearchCV(pipe3, param_grid=ridge_params).fit(X_train,y_train).best_estimator_, 'Tweedie':GridSearchCV(pipe4, param_grid=tweedie_params).fit(X_train,y_train).best_estimator_} test(models3, df) While the above code worked fine and gave me the results, how can I get the list of polynomial features that were created? Or how can I view them in the dataframe?
[ "You can use the transform method to generate the polynomial feature matrix.\nTo do so, you'll first have to access the corresponding step in the pipeline which, in this case, is at the 0th index. Here is how you can get the polynomial features array for pipe2:\nfeature_matrix = model3['Lasso'][0].transform(X_train)\n\nFurthermore, if you wish to generate a DataFrame with the feature names, you can do so by using the get_feature_names_out method:\nfeature_names = model['Lasso'][0].get_feature_names_out()\nfeature_df = pd.DataFrame(feature_matrix, columns=feature_names)\n\n" ]
[ 1 ]
[]
[]
[ "feature_extraction", "machine_learning", "pipeline", "python", "scikit_learn" ]
stackoverflow_0074570293_feature_extraction_machine_learning_pipeline_python_scikit_learn.txt
Q: Pandas How to use a column value as an index to another row I have the following line of code df["high_int"] = df.Slope * (df.index - df.max_idx) + df,loc['max_idx', 'High] max_idx contains the indexes of the highest highs over a period eg: 15 or 30. I have tried .loc, .iloc, .at, .iat .get, .shift(), as well as df['max_idx'].map(df['High']) Most errors seem to be related to using a series rather than an int (in the case of .iloc) or similar. It seems to me that this should a trivial task. Am I missing something obvious? Thanks in advance A: Last part doesn't really make sense, df.loc[index, columns] takes index filters, and column, or list of columns, not 2 columns. Another thing - assuming you wanted to write df[["max_id", "High"]] - it would also fail, since you cannot force 2 columns into one in this way. Consider the below as example of what you can and cannot do: >>> df =pd.DataFrame({"Slope": [1,3,2, -5, -23.3], "max_id": [1,1,1,2,2], "High": [3,4,4,4,3]}) >>> df["high_int"] = df.Slope * (df.index - df.max_id) >>> df Slope max_id High high_int 0 1.0 1 3 -1.0 1 3.0 1 4 0.0 2 2.0 1 4 2.0 3 -5.0 2 4 -5.0 4 -23.3 2 3 -46.6 >>> df["high_int"] = df.Slope * (df.index - df.max_id) + df[["max_id", "High"]] Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/9skibi2/miniconda3/envs/airflow2/lib/python3.9/site-packages/pandas/core/frame.py", line 3967, in __setitem__ self._set_item_frame_value(key, value) File "/home/9skibi2/miniconda3/envs/airflow2/lib/python3.9/site-packages/pandas/core/frame.py", line 4097, in _set_item_frame_value raise ValueError("Columns must be same length as key") ValueError: Columns must be same length as key
Pandas How to use a column value as an index to another row
I have the following line of code df["high_int"] = df.Slope * (df.index - df.max_idx) + df,loc['max_idx', 'High] max_idx contains the indexes of the highest highs over a period eg: 15 or 30. I have tried .loc, .iloc, .at, .iat .get, .shift(), as well as df['max_idx'].map(df['High']) Most errors seem to be related to using a series rather than an int (in the case of .iloc) or similar. It seems to me that this should a trivial task. Am I missing something obvious? Thanks in advance
[ "Last part doesn't really make sense, df.loc[index, columns] takes index filters, and column, or list of columns, not 2 columns. Another thing - assuming you wanted to write df[[\"max_id\", \"High\"]] - it would also fail, since you cannot force 2 columns into one in this way.\nConsider the below as example of what you can and cannot do:\n>>> df =pd.DataFrame({\"Slope\": [1,3,2, -5, -23.3], \"max_id\": [1,1,1,2,2], \"High\": [3,4,4,4,3]})\n>>> df[\"high_int\"] = df.Slope * (df.index - df.max_id)\n>>> df\n Slope max_id High high_int\n0 1.0 1 3 -1.0\n1 3.0 1 4 0.0\n2 2.0 1 4 2.0\n3 -5.0 2 4 -5.0\n4 -23.3 2 3 -46.6\n\n>>> df[\"high_int\"] = df.Slope * (df.index - df.max_id) + df[[\"max_id\", \"High\"]]\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/home/9skibi2/miniconda3/envs/airflow2/lib/python3.9/site-packages/pandas/core/frame.py\", line 3967, in __setitem__\n self._set_item_frame_value(key, value)\n File \"/home/9skibi2/miniconda3/envs/airflow2/lib/python3.9/site-packages/pandas/core/frame.py\", line 4097, in _set_item_frame_value\n raise ValueError(\"Columns must be same length as key\")\nValueError: Columns must be same length as key\n\n" ]
[ 0 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074577173_pandas_python.txt
Q: How to mock mongodb for python unittests? I am using mock module for Python 2.7 to mock my other functions and using unittest for writing unit tests. I am wondering if mocking the MongoDB is different than using mock functionality (mock.patch a function that is being called?) Or I need to use another different package for that purpose? I do not think I want to have a test mongodb instance running. All I want is some tempo data and being able to call pymongo functionality. I am just a bit lost in thinking of is there a way to write a mock for a module (like pymongo), or anything is achievable by mock module. So appreciate if you could provide an example or tutorial on this. Code to Test from pymongo import MongoClient monog_url = 'mongodb://localhost:27017' client = MongoClient(monog_url) db = client.db class Dao(object): def __init__(self): pass def save(self, user): db_doc = { 'name': user.name, 'email': user.email } db.users.save(db_doc) def getbyname(self, user): db_doc = { 'name': user.name, } return db.users.find(db_doc) To test this, I do not really want a test mongodb up and running! But also, I think I do not want to mock db.userssave and db.users.find because I want to actually be able to retrieve the data that I saved and make sure it is in the db. I think I need to create some fixtures per models that are in my memory and work with them. Just do I need an external tool to do so? I am thinking of keeping some fake data like this, just do not know how to properly deal with it. users = { {'name' : 'Kelly', 'email' : 'kelly@gmail.com'}, {'name': 'Sam', 'email': 'sam@gmail.com'} } A: I recommend using mongomock for mocking mongodb. It's basically an in-memory mongodb with pymongo interface and made specifically for this purpose. https://github.com/mongomock/mongomock A: You can also do this if you're just doing something simple, and you don't really need to retrieve by field. @mock.patch("pymongo.collection.Collection.find") def test_name(self, mock_find): mock_find.return_value = {'name' : 'Kelly', 'email' : 'kelly@gmail.com'} # rest of test A: You can certainly mock PyMongo, but I recommend mocking the MongoDB server itself. I've written a pure-Python emulator for MongoDB that can control fully, which responds to MongoDB Wire Protocol messages however you choose: http://mockupdb.readthedocs.io/tutorial.html Here's an example of using MockupDB with a Python application: https://emptysqua.re/blog/test-mongodb-failures-mockupdb/ It requires intimate knowledge of the MongoDB wire protocol, but that's a useful skill to acquire anyway. A: Adding to @mirthbottle answer, if you want to access attribute of mongo object as a field you can do it as, class MongoDummybject: def __init__(self, _data): for _d in _data: setattr(self, _d, _data[_d]) return_data = {'name' : 'Nishant', 'email' : 'nishant@gmail.com'} @mock.patch("pymongo.collection.Collection.find") def test_name(self, mock_find): mock_find.return_value = MongoDummybject(return_data) A: For unit testing exception wrapped within custom exception, patch functions(e.g.bulk_write) within Collection of mongomock @mock.patch("mongomock.collection.Collection.bulk_write", side_effect=BulkWriteError({})) def test_bulk_wrt_err(self, blk_wrt_err): with self.assertRaises(SpecialBulkWriteExcep) as context: add_user() Sample code here A: Mongomock is recommended way for testing, here's a runnable example to get started: client.py from dataclasses import dataclass import pymongo monog_url = 'mongodb://localhost:27018' client = pymongo.MongoClient(monog_url) db = client.db @dataclass class User: name: str email: str class Dao: def save(self, db, user): db_doc = { 'name': user.name, 'email': user.email } return db.users.insert_one(db_doc).inserted_id def get_by_name(self, db, user): db_doc = { 'name': user.name, } return db.users.find(db_doc) test_client.py import mongomock import pytest from client import Dao, User class TestDao: @pytest.fixture def user(self): yield User(name='John', email='test@gmail.com') @pytest.fixture def dao(self): yield Dao() @pytest.fixture def client_mock(self): yield mongomock.MongoClient() @pytest.fixture def mock_db(self, client_mock): yield client_mock.db def test_save(self, mock_db, dao, user): id = dao.save(mock_db, user) users = list(mock_db.users.find()) assert [obj for obj in users if obj['_id'] == id] assert len(users) == 1 def test_get_by_name(self, mock_db, dao, user): dao.save(mock_db, user) found_user = next(dao.get_by_name(mock_db, user)) found_user.pop('_id') assert found_user == user.__dict__ unknown_user = User(name='Smith', email='john@gmail.com') found_user = next(dao.get_by_name(mock_db, unknown_user), None) assert found_user is None A: Using @funnydman's example. client.py from pymongo import MongoClient class MongoDB(object): def __init__(self) -> None: self.MONGO_URI ='mongodb://localhost:27018' self.client = MongoClient(self.MONGO_URI) self.default_db = self.client.db def ingest_one(self, document: any, collection_name: str, db_name: str = None): if document: db_name = self.client[db_name] if db_name else self.default_db return db_name[collection_name].insert_one(document).inserted_id def find(self, query: dict, collection_name: str, db_name: str = None): db_name = self.client[db_name] if db_name else self.default_db return db_name[collection_name].find(query) test_client.py import mongomock import pytest import pytz import datetime from dataclasses import dataclass from db.mongo_db import MongoDB localtz = pytz.timezone('your_time_zone') @dataclass class Client: document: dict class TestMongoDB: @pytest.fixture def client(self): yield Client(document={ "name": "Juan Roman", "requestDate": str(datetime.datetime.now(localtz)) }) @pytest.fixture def mongo_db(self): yield MongoDB() @pytest.fixture def client_mock(self): yield mongomock.MongoClient() @pytest.fixture def mock_db(self, client_mock): yield client_mock.db def test_ingest_one(self, mock_db, mongo_db, client): id_client = mongo_db.ingest_one(client.document, mock_db.collection.name, mock_db.name) stored_obj = mongo_db.find({'_id': id_client}, mock_db.collection.name, mock_db.name) assert [obj for obj in stored_obj if str(obj['_id']) == str(id_client)]
How to mock mongodb for python unittests?
I am using mock module for Python 2.7 to mock my other functions and using unittest for writing unit tests. I am wondering if mocking the MongoDB is different than using mock functionality (mock.patch a function that is being called?) Or I need to use another different package for that purpose? I do not think I want to have a test mongodb instance running. All I want is some tempo data and being able to call pymongo functionality. I am just a bit lost in thinking of is there a way to write a mock for a module (like pymongo), or anything is achievable by mock module. So appreciate if you could provide an example or tutorial on this. Code to Test from pymongo import MongoClient monog_url = 'mongodb://localhost:27017' client = MongoClient(monog_url) db = client.db class Dao(object): def __init__(self): pass def save(self, user): db_doc = { 'name': user.name, 'email': user.email } db.users.save(db_doc) def getbyname(self, user): db_doc = { 'name': user.name, } return db.users.find(db_doc) To test this, I do not really want a test mongodb up and running! But also, I think I do not want to mock db.userssave and db.users.find because I want to actually be able to retrieve the data that I saved and make sure it is in the db. I think I need to create some fixtures per models that are in my memory and work with them. Just do I need an external tool to do so? I am thinking of keeping some fake data like this, just do not know how to properly deal with it. users = { {'name' : 'Kelly', 'email' : 'kelly@gmail.com'}, {'name': 'Sam', 'email': 'sam@gmail.com'} }
[ "I recommend using mongomock for mocking mongodb. It's basically an in-memory mongodb with pymongo interface and made specifically for this purpose.\nhttps://github.com/mongomock/mongomock\n", "You can also do this if you're just doing something simple, and you don't really need to retrieve by field.\n@mock.patch(\"pymongo.collection.Collection.find\")\ndef test_name(self, mock_find):\n mock_find.return_value = {'name' : 'Kelly', 'email' : 'kelly@gmail.com'}\n # rest of test\n\n", "You can certainly mock PyMongo, but I recommend mocking the MongoDB server itself. I've written a pure-Python emulator for MongoDB that can control fully, which responds to MongoDB Wire Protocol messages however you choose:\nhttp://mockupdb.readthedocs.io/tutorial.html\nHere's an example of using MockupDB with a Python application:\nhttps://emptysqua.re/blog/test-mongodb-failures-mockupdb/\nIt requires intimate knowledge of the MongoDB wire protocol, but that's a useful skill to acquire anyway.\n", "Adding to @mirthbottle answer, if you want to access attribute of mongo object as a field you can do it as,\nclass MongoDummybject:\n def __init__(self, _data):\n for _d in _data:\n setattr(self, _d, _data[_d])\n\nreturn_data = {'name' : 'Nishant', 'email' : 'nishant@gmail.com'}\n\n@mock.patch(\"pymongo.collection.Collection.find\")\ndef test_name(self, mock_find):\n mock_find.return_value = MongoDummybject(return_data)\n\n", "For unit testing exception wrapped within custom exception, patch functions(e.g.bulk_write) within Collection of mongomock\n@mock.patch(\"mongomock.collection.Collection.bulk_write\", side_effect=BulkWriteError({}))\ndef test_bulk_wrt_err(self, blk_wrt_err):\n with self.assertRaises(SpecialBulkWriteExcep) as context:\n add_user()\n\nSample code here\n", "Mongomock is recommended way for testing, here's a runnable example to get started:\nclient.py\nfrom dataclasses import dataclass\n\nimport pymongo\n\nmonog_url = 'mongodb://localhost:27018'\nclient = pymongo.MongoClient(monog_url)\ndb = client.db\n\n\n@dataclass\nclass User:\n name: str\n email: str\n\n\nclass Dao:\n\n def save(self, db, user):\n db_doc = {\n 'name': user.name,\n 'email': user.email\n }\n return db.users.insert_one(db_doc).inserted_id\n\n def get_by_name(self, db, user):\n db_doc = {\n 'name': user.name,\n }\n return db.users.find(db_doc)\n\ntest_client.py\nimport mongomock\nimport pytest\n\nfrom client import Dao, User\n\n\nclass TestDao:\n\n @pytest.fixture\n def user(self):\n yield User(name='John', email='test@gmail.com')\n\n @pytest.fixture\n def dao(self):\n yield Dao()\n\n @pytest.fixture\n def client_mock(self):\n yield mongomock.MongoClient()\n\n @pytest.fixture\n def mock_db(self, client_mock):\n yield client_mock.db\n\n def test_save(self, mock_db, dao, user):\n id = dao.save(mock_db, user)\n\n users = list(mock_db.users.find())\n assert [obj for obj in users if obj['_id'] == id]\n assert len(users) == 1\n\n def test_get_by_name(self, mock_db, dao, user):\n dao.save(mock_db, user)\n found_user = next(dao.get_by_name(mock_db, user))\n found_user.pop('_id')\n assert found_user == user.__dict__\n\n unknown_user = User(name='Smith', email='john@gmail.com')\n found_user = next(dao.get_by_name(mock_db, unknown_user), None)\n assert found_user is None\n\n", "Using @funnydman's example.\nclient.py\nfrom pymongo import MongoClient\n\n\nclass MongoDB(object):\n def __init__(self) -> None:\n self.MONGO_URI ='mongodb://localhost:27018' \n self.client = MongoClient(self.MONGO_URI)\n self.default_db = self.client.db\n\n def ingest_one(self, document: any, collection_name: str, db_name: str = None):\n\n if document:\n db_name = self.client[db_name] if db_name else self.default_db\n return db_name[collection_name].insert_one(document).inserted_id\n\n def find(self, query: dict, collection_name: str, db_name: str = None):\n db_name = self.client[db_name] if db_name else self.default_db\n return db_name[collection_name].find(query)\n\ntest_client.py\nimport mongomock\nimport pytest\nimport pytz\nimport datetime\n\nfrom dataclasses import dataclass\n\nfrom db.mongo_db import MongoDB\n\n\nlocaltz = pytz.timezone('your_time_zone')\n\n\n@dataclass\nclass Client:\n document: dict\n\n\nclass TestMongoDB:\n\n @pytest.fixture\n def client(self):\n yield Client(document={\n \"name\": \"Juan Roman\",\n \"requestDate\": str(datetime.datetime.now(localtz))\n })\n\n @pytest.fixture\n def mongo_db(self):\n yield MongoDB()\n\n @pytest.fixture\n def client_mock(self):\n yield mongomock.MongoClient()\n\n @pytest.fixture\n def mock_db(self, client_mock):\n yield client_mock.db\n\n def test_ingest_one(self, mock_db, mongo_db, client):\n id_client = mongo_db.ingest_one(client.document, mock_db.collection.name, mock_db.name)\n stored_obj = mongo_db.find({'_id': id_client}, mock_db.collection.name, mock_db.name)\n\n assert [obj for obj in stored_obj if str(obj['_id']) == str(id_client)]\n\n" ]
[ 22, 8, 2, 2, 1, 0, 0 ]
[]
[]
[ "mocking", "mongodb", "pymongo", "python", "unit_testing" ]
stackoverflow_0042239241_mocking_mongodb_pymongo_python_unit_testing.txt
Q: Run multiple terminals from python script and execute commands (Ubuntu) What I have is a text file containing all items that need to be deleted from an online app. Every item that needs to be deleted has to be sent 1 at a time. To make deletion process faster, I divide the items in text file in multiple text files and run the script in multiple terminals (~130 for deletion time to be under 30 minutes for ~7000 items). This is the code of the deletion script: from fileinput import filename from WitApiClient import WitApiClient import os dirname = os.path.dirname(__file__) parent_dirname = os.path.dirname(dirname) token = input("Enter the token") file_name = os.path.join(parent_dirname, 'data/deletion_pair.txt') with open(file_name, encoding="utf-8") as file: templates = [line.strip() for line in file.readlines()] for template in templates: entity, keyword = template.split(", ") print(entity, keyword) resp = WitApiClient(token).delete_keyword(entity, keyword) print(resp) So, I divide the items in deletion_pair.txt and run this script multiple times in new terminals (~130 terminals). Is there a way to automate this process or do in more efficient manner? A: I used threading to run multiple functions simultaneously: from fileinput import filename from WitApiClient import WitApiClient import os from threading import Thread dirname = os.path.dirname(__file__) parent_dirname = os.path.dirname(dirname) token = input("Enter the token") file_name = os.path.join(parent_dirname, 'data/deletion_pair.txt') with open(file_name, encoding="utf-8") as file: templates = [line.strip() for line in file.readlines()] batch_size = 20 chunks = [templates[i: i + batch_size] for i in range(0, len(templates), batch_size)] def delete_function(templates, token): for template in templates: entity, keyword = template.split(", ") print(entity, keyword) resp = WitApiClient(token).delete_keyword(entity, keyword) print(resp) for chunk in chunks: thread = Thread(target=delete_function, args=(chunk, token)) thread.start() It worked! Any one has any other solution, please post or if the same code can be written more efficiently then please do tell. Thanks.
Run multiple terminals from python script and execute commands (Ubuntu)
What I have is a text file containing all items that need to be deleted from an online app. Every item that needs to be deleted has to be sent 1 at a time. To make deletion process faster, I divide the items in text file in multiple text files and run the script in multiple terminals (~130 for deletion time to be under 30 minutes for ~7000 items). This is the code of the deletion script: from fileinput import filename from WitApiClient import WitApiClient import os dirname = os.path.dirname(__file__) parent_dirname = os.path.dirname(dirname) token = input("Enter the token") file_name = os.path.join(parent_dirname, 'data/deletion_pair.txt') with open(file_name, encoding="utf-8") as file: templates = [line.strip() for line in file.readlines()] for template in templates: entity, keyword = template.split(", ") print(entity, keyword) resp = WitApiClient(token).delete_keyword(entity, keyword) print(resp) So, I divide the items in deletion_pair.txt and run this script multiple times in new terminals (~130 terminals). Is there a way to automate this process or do in more efficient manner?
[ "I used threading to run multiple functions simultaneously:\nfrom fileinput import filename\nfrom WitApiClient import WitApiClient\nimport os\nfrom threading import Thread\n\ndirname = os.path.dirname(__file__)\nparent_dirname = os.path.dirname(dirname)\ntoken = input(\"Enter the token\")\nfile_name = os.path.join(parent_dirname, 'data/deletion_pair.txt')\n\nwith open(file_name, encoding=\"utf-8\") as file:\n templates = [line.strip() for line in file.readlines()]\n\nbatch_size = 20\nchunks = [templates[i: i + batch_size] for i in range(0, len(templates), batch_size)]\n\ndef delete_function(templates, token):\n for template in templates:\n entity, keyword = template.split(\", \")\n print(entity, keyword)\n resp = WitApiClient(token).delete_keyword(entity, keyword)\n print(resp)\n\nfor chunk in chunks:\n thread = Thread(target=delete_function, args=(chunk, token))\n thread.start()\n\nIt worked! Any one has any other solution, please post or if the same code can be written more efficiently then please do tell. Thanks.\n" ]
[ 0 ]
[]
[]
[ "python", "python_3.x" ]
stackoverflow_0074553769_python_python_3.x.txt
Q: Python requests SSL error - certificate verify failed This code import requests requests.get("https://hcaidcs.phe.org.uk/WebPages/GeneralHomePage.aspx") is giving me this error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:777) I know practically nothing about SSL, but I've tried downloading the site's certificate and pointing to that file using the verify option, but it hasn't worked. Am I missing something? A: As already pointed out in a comment: the site has a bad SSL implementation as can be seen from the SSLLabs report. The main part of this report regarding your problem is: This server's certificate chain is incomplete. Grade capped to B. This means that the server is not sending the full certificate chain as is needed to verify the certificate. This means you need to add the missing certificates yourself when validating. For this you need to include the PEM for the missing chain certificate C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert SHA2 High Assurance Server CA and also for the root CA C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert High Assurance EV Root CA info a file my_trust_store.pem and then you can call: requests.get("https://...", verify='my_trust_store.pem') ... but I've tried downloading the site's certificate and pointing to that file using the verify option This will not work with normal leaf certificates. Since the SSL stack of Python is based on OpenSSL and OpenSSL expects only trusted certificate authorities in the trust store (i.e. given with verify) and a server certificate is not CA certificate it will not help to add it to the trust store. A: cat institution-certificate.pem >> venv/lib/python3.9/site-packages/certifi/cacert.pem This should solve the problem if your network requires a CA A: using the certifi doesn't seem to be implied, so i'll show you what made my solution: import urllib, urllib2, ssl import certifi request = urllib2.Request(url=url) kw = dict() if url.startswith('https://'): certifi_context = ssl.create_default_context(cafile=certifi.where()) kw.update(context=certifi_context) urllib2.urlopen(request, **kw) i found this solution and more on RealPython, here
Python requests SSL error - certificate verify failed
This code import requests requests.get("https://hcaidcs.phe.org.uk/WebPages/GeneralHomePage.aspx") is giving me this error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:777) I know practically nothing about SSL, but I've tried downloading the site's certificate and pointing to that file using the verify option, but it hasn't worked. Am I missing something?
[ "As already pointed out in a comment: the site has a bad SSL implementation as can be seen from the SSLLabs report. The main part of this report regarding your problem is:\n\nThis server's certificate chain is incomplete. Grade capped to B.\n\nThis means that the server is not sending the full certificate chain as is needed to verify the certificate. This means you need to add the missing certificates yourself when validating. For this you need to include the PEM for the missing chain certificate C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert SHA2 High Assurance Server CA and also for the root CA C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert High Assurance EV Root CA info a file my_trust_store.pem and then you can call:\nrequests.get(\"https://...\", verify='my_trust_store.pem')\n\n\n... but I've tried downloading the site's certificate and pointing to that file using the verify option\n\nThis will not work with normal leaf certificates. Since the SSL stack of Python is based on OpenSSL and OpenSSL expects only trusted certificate authorities in the trust store (i.e. given with verify) and a server certificate is not CA certificate it will not help to add it to the trust store.\n", "cat institution-certificate.pem >> venv/lib/python3.9/site-packages/certifi/cacert.pem\n\nThis should solve the problem if your network requires a CA\n", "using the certifi doesn't seem to be implied, so i'll show you what made my solution:\nimport urllib, urllib2, ssl\nimport certifi\n\nrequest = urllib2.Request(url=url)\nkw = dict()\nif url.startswith('https://'):\n certifi_context = ssl.create_default_context(cafile=certifi.where())\n kw.update(context=certifi_context)\nurllib2.urlopen(request, **kw)\n\ni found this solution and more on RealPython, here\n" ]
[ 43, 2, 0 ]
[ "If you can avoid the certificate verification (not secure), set PYTHONHTTPSVERIFY environment variable to 0:\nexport PYTHONHTTPSVERIFY=0\n\nThis will skip the certificate verification.\n", "import requests\nhtml = requests.get(\"https://hcaidcs.phe.org.uk/WebPages/GeneralHomePage.aspx\",verify=False).text\n\nYou should write it like this, and I've verified it\n" ]
[ -1, -5 ]
[ "https", "python", "python_requests", "ssl", "ssl_certificate" ]
stackoverflow_0046604114_https_python_python_requests_ssl_ssl_certificate.txt
Q: Pull large amounts of data from a remote server, into a DataFrame To give as much context as I can / is needed, I'm trying to pull some data stored on a remote postgres server (heroku) into a pandas DataFrame, using psycopg2 to connect. I'm interested in two specific tables, users and events, and the connection works fine, because when pulling down the user data import pandas.io.sql as sql # [...] users = sql.read_sql("SELECT * FROM users", conn) after waiting a few seconds, the DataFrame is returned as expected. <class 'pandas.core.frame.DataFrame'> Int64Index: 67458 entries, 0 to 67457 Data columns (total 35 columns): [...] Yet when trying to pull the bigger, heavier events data straight from ipython, after a long time, it just crashes: In [11]: events = sql.read_sql("SELECT * FROM events", conn) vagrant@data-science-toolbox:~$ and when trying from an iPython notebook I get the Dead kernel error The kernel has died, would you like to restart it? If you do not restart the kernel, you will be able to save the notebook, but running code will not work until the notebook is reopened. Update #1: To get a better idea of the size of the events table I'm trying to pull in, here are the number of records and the number of attributes for each: In [11]: sql.read_sql("SELECT count(*) FROM events", conn) Out[11]: count 0 2711453 In [12]: len(sql.read_sql("SELECT * FROM events LIMIT 1", conn).columns) Out[12]: 18 Update #2: Memory is definitely a bottleneck for the current implementation of read_sql: when pulling down the events and trying to run another instance of iPython the result is vagrant@data-science-toolbox:~$ sudo ipython -bash: fork: Cannot allocate memory Update #3: I first tried with a read_sql_chunked implementation that would just return the array of partial DataFrames: def read_sql_chunked(query, conn, nrows, chunksize=1000): start = 0 dfs = [] while start < nrows: df = pd.read_sql("%s LIMIT %s OFFSET %s" % (query, chunksize, start), conn) start += chunksize dfs.append(df) print "Events added: %s to %s of %s" % (start-chunksize, start, nrows) # print "concatenating dfs" return dfs event_dfs = read_sql_chunked("SELECT * FROM events", conn, events_count, 100000) and that works well, but when trying to concatenate the DataFrames, the kernel dies again. And this is after giving the VM 2GB of RAM. Based on Andy's explanation of read_sql vs. read_csv difference in implementation and performance, the next thing I tried was to append the records into a CSV and then read them all into a DataFrame: event_dfs[0].to_csv(path+'new_events.csv', encoding='utf-8') for df in event_dfs[1:]: df.to_csv(path+'new_events.csv', mode='a', header=False, encoding='utf-8') Again, the writing to CSV completes successfully – a 657MB file – but reading from the CSV never completes. How can one approximate how much RAM would be sufficient to read say a 657MB CSV file, since 2GB seem not to be enough? Feels like I'm missing some fundamental understanding of either DataFrames or psycopg2, but I'm stuck, I can't even pinpoint the bottleneck or where to optimize. What's the proper strategy to pull larger amounts of data from a remote (postgres) server? A: I suspect there's a couple of (related) things at play here causing slowness: read_sql is written in python so it's a little slow (especially compared to read_csv, which is written in cython - and carefully implemented for speed!) and it relies on sqlalchemy rather than some (potentially much faster) C-DBAPI. The impetus to move to sqlalchmey was to make that move easier in the future (as well as cross-sql-platform support). You may be running out of memory as too many python objects are in memory (this is related to not using a C-DBAPI), but potentially could be addressed... I think the immediate solution is a chunk-based approach (and there is a feature request to have this work natively in pandas read_sql and read_sql_table). EDIT: As of Pandas v0.16.2 this chunk based approach is natively implemented in read_sql. Since you're using postgres you have access the the LIMIT and OFFSET queries, which makes chunking quite easy. (Am I right in thinking these aren't available in all sql languages?) First, get the number of rows (or an estimate) in your table: nrows = con.execute('SELECT count(*) FROM users').fetchone()[0] # also works with an sqlalchemy engine Use this to iterate through the table (for debugging you could add some print statements to confirm that it was working/not crashed!) and then combine the result: def read_sql_chunked(query, con, nrows, chunksize=1000): start = 1 dfs = [] # Note: could probably make this neater with a generator/for loop while start < nrows: df = pd.read_sql("%s LIMIT %s OFFSET %s" % (query, chunksize, start), con) dfs.append(df) return pd.concat(dfs, ignore_index=True) Note: this assumes that the database fits in memory! If it doesn't you'll need to work on each chunk (mapreduce style)... or invest in more memory! A: try to use pandas: mysql_cn = mysql.connector.connect(host='localhost', port=123, user='xyz', passwd='****', db='xy_db')** data= pd.read_sql('SELECT * FROM table;', con=mysql_cn) mysql_cn.close() It worked for me. A: Here is a basic cursor example that might be of help: import psycopg2 note that we have to import the Psycopg2 extras library! import psycopg2.extras import sys def main(): conn_string = "host='localhost' dbname='my_database' user='postgres' password='secret'" ### print the connection string we will use to connect conn = psycopg2.connect(conn_string) ### HERE IS THE IMPORTANT PART, by specifying a name for the cursor ### psycopg2 creates a server-side cursor, which prevents all of the ### records from being downloaded at once from the server. cursor = conn.cursor('cursor_unique_name', cursor_factory=psycopg2.extras.DictCursor) cursor.execute('SELECT * FROM my_table LIMIT 1000') ### Because cursor objects are iterable we can just call 'for - in' on ### the cursor object and the cursor will automatically advance itself ### each iteration. ### This loop should run 1000 times, assuming there are at least 1000 ### records in 'my_table' row_count = 0 for row in cursor: row_count += 1 print "row: %s %s\n" % (row_count, row) if name == "main": main() A: Using https://github.com/sfu-db/connector-x much higher speeds seem to be possible: From their readme: ConnectorX enables you to load data from databases into Python in the fastest and most memory efficient way. What you need is one line of code: import connectorx as cx cx.read_sql("postgresql://username:password@server:port/database", "SELECT * FROM lineitem") Optionally, you can accelerate the data loading using parallelism by specifying a partition column. import connectorx as cx cx.read_sql("postgresql://username:password@server:port/database", "SELECT * FROM lineitem", partition_on="l_orderkey", partition_num=10) The function will partition the query by evenly splitting the specified column to the amount of partitions. ConnectorX will assign one thread for each partition to load and write data in parallel. Note: I have not used it myself, but have seen huge improvements by using connector-x in a project of a friend. Not directly relevant to the question, but if the query is more complex, connector-x has some overhead, see the FAQ In that case Arrow as an intermediate destination can be faster. (Arrow can be installed via pip install pyarrow) table = cx.read_sql(db_uri, query, return_type="arrow") # or arrow2 https://github.com/jorgecarleitao/arrow2 df = table.to_pandas(split_blocks=False, date_as_object=False)
Pull large amounts of data from a remote server, into a DataFrame
To give as much context as I can / is needed, I'm trying to pull some data stored on a remote postgres server (heroku) into a pandas DataFrame, using psycopg2 to connect. I'm interested in two specific tables, users and events, and the connection works fine, because when pulling down the user data import pandas.io.sql as sql # [...] users = sql.read_sql("SELECT * FROM users", conn) after waiting a few seconds, the DataFrame is returned as expected. <class 'pandas.core.frame.DataFrame'> Int64Index: 67458 entries, 0 to 67457 Data columns (total 35 columns): [...] Yet when trying to pull the bigger, heavier events data straight from ipython, after a long time, it just crashes: In [11]: events = sql.read_sql("SELECT * FROM events", conn) vagrant@data-science-toolbox:~$ and when trying from an iPython notebook I get the Dead kernel error The kernel has died, would you like to restart it? If you do not restart the kernel, you will be able to save the notebook, but running code will not work until the notebook is reopened. Update #1: To get a better idea of the size of the events table I'm trying to pull in, here are the number of records and the number of attributes for each: In [11]: sql.read_sql("SELECT count(*) FROM events", conn) Out[11]: count 0 2711453 In [12]: len(sql.read_sql("SELECT * FROM events LIMIT 1", conn).columns) Out[12]: 18 Update #2: Memory is definitely a bottleneck for the current implementation of read_sql: when pulling down the events and trying to run another instance of iPython the result is vagrant@data-science-toolbox:~$ sudo ipython -bash: fork: Cannot allocate memory Update #3: I first tried with a read_sql_chunked implementation that would just return the array of partial DataFrames: def read_sql_chunked(query, conn, nrows, chunksize=1000): start = 0 dfs = [] while start < nrows: df = pd.read_sql("%s LIMIT %s OFFSET %s" % (query, chunksize, start), conn) start += chunksize dfs.append(df) print "Events added: %s to %s of %s" % (start-chunksize, start, nrows) # print "concatenating dfs" return dfs event_dfs = read_sql_chunked("SELECT * FROM events", conn, events_count, 100000) and that works well, but when trying to concatenate the DataFrames, the kernel dies again. And this is after giving the VM 2GB of RAM. Based on Andy's explanation of read_sql vs. read_csv difference in implementation and performance, the next thing I tried was to append the records into a CSV and then read them all into a DataFrame: event_dfs[0].to_csv(path+'new_events.csv', encoding='utf-8') for df in event_dfs[1:]: df.to_csv(path+'new_events.csv', mode='a', header=False, encoding='utf-8') Again, the writing to CSV completes successfully – a 657MB file – but reading from the CSV never completes. How can one approximate how much RAM would be sufficient to read say a 657MB CSV file, since 2GB seem not to be enough? Feels like I'm missing some fundamental understanding of either DataFrames or psycopg2, but I'm stuck, I can't even pinpoint the bottleneck or where to optimize. What's the proper strategy to pull larger amounts of data from a remote (postgres) server?
[ "I suspect there's a couple of (related) things at play here causing slowness:\n\nread_sql is written in python so it's a little slow (especially compared to read_csv, which is written in cython - and carefully implemented for speed!) and it relies on sqlalchemy rather than some (potentially much faster) C-DBAPI. The impetus to move to sqlalchmey was to make that move easier in the future (as well as cross-sql-platform support).\nYou may be running out of memory as too many python objects are in memory (this is related to not using a C-DBAPI), but potentially could be addressed...\n\nI think the immediate solution is a chunk-based approach (and there is a feature request to have this work natively in pandas read_sql and read_sql_table).\nEDIT: As of Pandas v0.16.2 this chunk based approach is natively implemented in read_sql.\n\nSince you're using postgres you have access the the LIMIT and OFFSET queries, which makes chunking quite easy. (Am I right in thinking these aren't available in all sql languages?)\nFirst, get the number of rows (or an estimate) in your table:\nnrows = con.execute('SELECT count(*) FROM users').fetchone()[0] # also works with an sqlalchemy engine\n\nUse this to iterate through the table (for debugging you could add some print statements to confirm that it was working/not crashed!) and then combine the result:\ndef read_sql_chunked(query, con, nrows, chunksize=1000):\n start = 1\n dfs = [] # Note: could probably make this neater with a generator/for loop\n while start < nrows:\n df = pd.read_sql(\"%s LIMIT %s OFFSET %s\" % (query, chunksize, start), con)\n dfs.append(df)\n return pd.concat(dfs, ignore_index=True)\n\nNote: this assumes that the database fits in memory! If it doesn't you'll need to work on each chunk (mapreduce style)... or invest in more memory!\n", "try to use pandas:\nmysql_cn = mysql.connector.connect(host='localhost', port=123, user='xyz', passwd='****', db='xy_db')**\n\ndata= pd.read_sql('SELECT * FROM table;', con=mysql_cn)\n\nmysql_cn.close()\n\nIt worked for me.\n", "Here is a basic cursor example that might be of help:\nimport psycopg2\nnote that we have to import the Psycopg2 extras library!\nimport psycopg2.extras\nimport sys\ndef main():\n conn_string = \"host='localhost' dbname='my_database' user='postgres' password='secret'\"\n ### print the connection string we will use to connect\nconn = psycopg2.connect(conn_string)\n\n### HERE IS THE IMPORTANT PART, by specifying a name for the cursor\n### psycopg2 creates a server-side cursor, which prevents all of the\n### records from being downloaded at once from the server.\ncursor = conn.cursor('cursor_unique_name', cursor_factory=psycopg2.extras.DictCursor)\ncursor.execute('SELECT * FROM my_table LIMIT 1000')\n\n### Because cursor objects are iterable we can just call 'for - in' on\n### the cursor object and the cursor will automatically advance itself\n### each iteration.\n### This loop should run 1000 times, assuming there are at least 1000\n### records in 'my_table'\nrow_count = 0\nfor row in cursor:\n row_count += 1\n print \"row: %s %s\\n\" % (row_count, row)\n\nif name == \"main\":\n main()\n", "Using https://github.com/sfu-db/connector-x much higher speeds seem to be possible:\nFrom their readme:\n\nConnectorX enables you to load data from databases into Python in the fastest and most memory efficient way.\nWhat you need is one line of code:\nimport connectorx as cx\n\ncx.read_sql(\"postgresql://username:password@server:port/database\", \"SELECT * FROM lineitem\")\n\nOptionally, you can accelerate the data loading using parallelism by specifying a partition column.\nimport connectorx as cx\n\ncx.read_sql(\"postgresql://username:password@server:port/database\", \"SELECT * FROM lineitem\", partition_on=\"l_orderkey\", partition_num=10)\n\nThe function will partition the query by evenly splitting the specified column to the amount of partitions.\nConnectorX will assign one thread for each partition to load and write data in parallel.\n\nNote: I have not used it myself, but have seen huge improvements by using connector-x in a project of a friend.\n\nNot directly relevant to the question, but if the query is more complex, connector-x has some overhead, see the FAQ\nIn that case Arrow as an intermediate destination can be faster.\n(Arrow can be installed via pip install pyarrow)\ntable = cx.read_sql(db_uri, query, return_type=\"arrow\") # or arrow2 https://github.com/jorgecarleitao/arrow2\ndf = table.to_pandas(split_blocks=False, date_as_object=False)\n\n" ]
[ 5, 0, 0, 0 ]
[]
[]
[ "pandas", "postgresql", "psycopg2", "python" ]
stackoverflow_0025633830_pandas_postgresql_psycopg2_python.txt
Q: Assign a variable to none if the resulting dataframe is empty after filtering columns I'm trying to get values in column z that contains null values or integers: df = pd.DataFrame({'X': [1, 2, 3, 4], 'Y': [2, 10, 13, 18], 'Z': [3, None, 5, None]}) a = df[(df.X == 1) & (df.Y == 2)].Z.item() print(a) #output: 3 b = df[(df.X == 7) & (df.Y == 18)].Z.item() print(b) #output: error It throws a value error: can only convert an array of size 1 to a Python scalar. Because the data frame resulting from filtering the X and Y columns is empty. I want to assign variable b to None if the data frame is empty. I tried the following, and it works: #checking the length of the dataframe b = df[(df.X == 1) & (df.Y == 2)].Z.item() if (len(df[(df.X == 7) & (df.Y == 18)]) == 1) else None print(b) # output: None Is there a better way to do it? A: One alternative is to use next(..., None), which returns None if the iterator is empty: b = next(iter(df[(df.X == 7) & (df.Y == 18)].Z), None) print(b) # None A: 2 things - your result is empty, hence error - .item() apparently throws an error on empty pd.Series. Secondly - the more canonical way of achieving what you're after would be: >>> b = df.loc[(df.X == 7) & (df.Y == 18), "Z"].values >>> if len(b) == 0: b=None ... Also prevents error .item() generates.
Assign a variable to none if the resulting dataframe is empty after filtering columns
I'm trying to get values in column z that contains null values or integers: df = pd.DataFrame({'X': [1, 2, 3, 4], 'Y': [2, 10, 13, 18], 'Z': [3, None, 5, None]}) a = df[(df.X == 1) & (df.Y == 2)].Z.item() print(a) #output: 3 b = df[(df.X == 7) & (df.Y == 18)].Z.item() print(b) #output: error It throws a value error: can only convert an array of size 1 to a Python scalar. Because the data frame resulting from filtering the X and Y columns is empty. I want to assign variable b to None if the data frame is empty. I tried the following, and it works: #checking the length of the dataframe b = df[(df.X == 1) & (df.Y == 2)].Z.item() if (len(df[(df.X == 7) & (df.Y == 18)]) == 1) else None print(b) # output: None Is there a better way to do it?
[ "One alternative is to use next(..., None), which returns None if the iterator is empty:\nb = next(iter(df[(df.X == 7) & (df.Y == 18)].Z), None)\nprint(b)\n# None\n\n", "2 things - your result is empty, hence error - .item() apparently throws an error on empty pd.Series.\nSecondly - the more canonical way of achieving what you're after would be:\n>>> b = df.loc[(df.X == 7) & (df.Y == 18), \"Z\"].values\n>>> if len(b) == 0: b=None\n...\n\nAlso prevents error .item() generates.\n" ]
[ 1, 0 ]
[]
[]
[ "dataframe", "filtering", "pandas", "python" ]
stackoverflow_0074577374_dataframe_filtering_pandas_python.txt
Q: How to generate Sphynx docs when there are imports of folders as modules? I have the following project structure with my code and documentation: ├───docs │ ├───_build │ ├───_static │ ├───... │ ├───conf.py │ ├───index.srt │ ├───make.bat | └───Makefile ├───source │ ├───script1.py │ ├───script2.py | └───script3.py my conf.py: import os import sys sys.path.insert(0, os.path.abspath('../source')) ... extensions = ['sphinx.ext.autodoc'] I am running sphinx-apidoc -o . ../source inside the docs folder. It generates a new file .rst file for each of my scripts. And I am adding modules to the index.rst file After that I'm running make html to generate my documentation. But it's raising a warning and not generating docs for an specific file: WARNING: autodoc: failed to import module 'script3'; the following exception was raised: No module named 'source' I isolate the problem to an import statement made inside script3.py code: from source.scripts2 import MyClass So it seems that since the folder is not a module is raising the error. But I am not sure how to fix it. I have tried to create empty __init__.py file in the project and in the source folder, but the error persist How could I fix it? A: I had the same error with similar structure making output directory... done building [mo]: targets for 0 po files that are out of date building [html]: targets for 3 source files that are out of date updating environment: [new config] 3 added, 0 changed, 0 removed reading sources... [100%] source WARNING: autodoc: failed to import module 'my_python_file' from module 'source'; the following exception was raised: No module named 'source' ... WARNING: autodoc: failed to import module 'source'; the following exception was raised: No module named 'source' looking for now-outdated files... none found ... The solution was to change the sys.path.insert() at conf.py to point one directory up (project root) sys.path.insert(0, os.path.abspath('..')) And also don't forget to add __init__.py file to ./source/
How to generate Sphynx docs when there are imports of folders as modules?
I have the following project structure with my code and documentation: ├───docs │ ├───_build │ ├───_static │ ├───... │ ├───conf.py │ ├───index.srt │ ├───make.bat | └───Makefile ├───source │ ├───script1.py │ ├───script2.py | └───script3.py my conf.py: import os import sys sys.path.insert(0, os.path.abspath('../source')) ... extensions = ['sphinx.ext.autodoc'] I am running sphinx-apidoc -o . ../source inside the docs folder. It generates a new file .rst file for each of my scripts. And I am adding modules to the index.rst file After that I'm running make html to generate my documentation. But it's raising a warning and not generating docs for an specific file: WARNING: autodoc: failed to import module 'script3'; the following exception was raised: No module named 'source' I isolate the problem to an import statement made inside script3.py code: from source.scripts2 import MyClass So it seems that since the folder is not a module is raising the error. But I am not sure how to fix it. I have tried to create empty __init__.py file in the project and in the source folder, but the error persist How could I fix it?
[ "I had the same error with similar structure\nmaking output directory... done\nbuilding [mo]: targets for 0 po files that are out of date\nbuilding [html]: targets for 3 source files that are out of date\nupdating environment: [new config] 3 added, 0 changed, 0 removed\nreading sources... [100%] source \nWARNING: autodoc: failed to import module 'my_python_file' from module 'source'; the following exception was raised:\nNo module named 'source'\n...\nWARNING: autodoc: failed to import module 'source'; the following exception was raised:\nNo module named 'source'\nlooking for now-outdated files... none found\n...\n\nThe solution was to change the sys.path.insert() at conf.py to point one directory up (project root)\nsys.path.insert(0, os.path.abspath('..'))\n\nAnd also don't forget to add __init__.py file to ./source/\n" ]
[ 0 ]
[]
[]
[ "python", "python_sphinx" ]
stackoverflow_0064213870_python_python_sphinx.txt
Q: Pydantic nested model field throws value_error.missing Having following code running fine with Django and Ninja API framework. Schema for data validation: class OfferBase(Schema): """Base offer schema.""" id: int currency_to_sell_id: int currency_to_buy_id: int amount: float exchange_rate: float user_id: int added_time: datetime = None active_state: bool = True class DealBase(Schema): """Base deal schema.""" id: int seller_id: int buyer_id: int offer_id: int deal_time: datetime = None class UserExtraDataOut(UserBase): """Extended user schema with extra data response.""" offers: List[OfferBase] sold: List[DealBase] bought: List[DealBase] Endpoint with user object. Please, note, User model is not modified: @api.get("/users/{user_id}", response=UserExtraDataOut, tags=["User"]) def get_user_info(request, user_id): """Get user profile information with offers and deals.""" user = get_object_or_404(User, pk=user_id) return user Deal model: class Deal(models.Model): """Deal model.""" seller = models.ForeignKey( to=User, related_name="sold", on_delete=models.PROTECT, verbose_name="Seller" ) buyer = models.ForeignKey( to=User, related_name="bought", on_delete=models.PROTECT, verbose_name="Buyer" ) offer = models.ForeignKey( to="Offer", on_delete=models.PROTECT, verbose_name="Offer" ) deal_time = models.DateTimeField(auto_now=True, verbose_name="Time") It gives me this response: { "id": 0, "username": "string", "first_name": "string", "last_name": "string", "email": "string", "offers": [ { "id": 0, "currency_to_sell_id": 0, "currency_to_buy_id": 0, "amount": 0, "exchange_rate": 0, "user_id": 0, "added_time": "2022-11-22T18:37:47.573Z", "active_state": true } ], "sold": [ { "id": 0, "seller_id": 0, "buyer_id": 0, "offer_id": 0, "deal_time": "2022-11-22T18:37:47.573Z" } ], "bought": [ { "id": 0, "seller_id": 0, "buyer_id": 0, "offer_id": 0, "deal_time": "2022-11-22T18:37:47.573Z" } ] } But, I want sold and bought fields to be nested to deals. I do it with this code: class DealExtraDataOut(Schema): """Extended user schema with extra data response.""" sold: List[DealBase] bought: List[DealBase] class UserExtraDataOut(UserBase): """Extended user schema with extra data response.""" offers: List[OfferBase] deals: DealExtraDataOut # extend this from DealExtraDataOut The scheme in Swagger is correct: ... "deals": { "sold": [ { ... } ], "bought": [ { ... } ] } But execution throws me an error: pydantic.error_wrappers.ValidationError: 1 validation error for NinjaResponseSchema response -> deals field required (type=value_error.missing) What is wrong? A: This is what I was looking for: class OfferWithDealOut(OfferBase): """Offer schema for POST method.""" deal: List[DealBase] = Field(..., alias="deal_set") class UserBase(Schema): """Base user schema for GET method.""" id: int username: str first_name: str last_name: str email: str class UserExtraDataOut(UserBase): """Extended user schema with extra data response.""" offers: List[OfferWithDealOut]
Pydantic nested model field throws value_error.missing
Having following code running fine with Django and Ninja API framework. Schema for data validation: class OfferBase(Schema): """Base offer schema.""" id: int currency_to_sell_id: int currency_to_buy_id: int amount: float exchange_rate: float user_id: int added_time: datetime = None active_state: bool = True class DealBase(Schema): """Base deal schema.""" id: int seller_id: int buyer_id: int offer_id: int deal_time: datetime = None class UserExtraDataOut(UserBase): """Extended user schema with extra data response.""" offers: List[OfferBase] sold: List[DealBase] bought: List[DealBase] Endpoint with user object. Please, note, User model is not modified: @api.get("/users/{user_id}", response=UserExtraDataOut, tags=["User"]) def get_user_info(request, user_id): """Get user profile information with offers and deals.""" user = get_object_or_404(User, pk=user_id) return user Deal model: class Deal(models.Model): """Deal model.""" seller = models.ForeignKey( to=User, related_name="sold", on_delete=models.PROTECT, verbose_name="Seller" ) buyer = models.ForeignKey( to=User, related_name="bought", on_delete=models.PROTECT, verbose_name="Buyer" ) offer = models.ForeignKey( to="Offer", on_delete=models.PROTECT, verbose_name="Offer" ) deal_time = models.DateTimeField(auto_now=True, verbose_name="Time") It gives me this response: { "id": 0, "username": "string", "first_name": "string", "last_name": "string", "email": "string", "offers": [ { "id": 0, "currency_to_sell_id": 0, "currency_to_buy_id": 0, "amount": 0, "exchange_rate": 0, "user_id": 0, "added_time": "2022-11-22T18:37:47.573Z", "active_state": true } ], "sold": [ { "id": 0, "seller_id": 0, "buyer_id": 0, "offer_id": 0, "deal_time": "2022-11-22T18:37:47.573Z" } ], "bought": [ { "id": 0, "seller_id": 0, "buyer_id": 0, "offer_id": 0, "deal_time": "2022-11-22T18:37:47.573Z" } ] } But, I want sold and bought fields to be nested to deals. I do it with this code: class DealExtraDataOut(Schema): """Extended user schema with extra data response.""" sold: List[DealBase] bought: List[DealBase] class UserExtraDataOut(UserBase): """Extended user schema with extra data response.""" offers: List[OfferBase] deals: DealExtraDataOut # extend this from DealExtraDataOut The scheme in Swagger is correct: ... "deals": { "sold": [ { ... } ], "bought": [ { ... } ] } But execution throws me an error: pydantic.error_wrappers.ValidationError: 1 validation error for NinjaResponseSchema response -> deals field required (type=value_error.missing) What is wrong?
[ "This is what I was looking for:\nclass OfferWithDealOut(OfferBase):\n \"\"\"Offer schema for POST method.\"\"\"\n\n deal: List[DealBase] = Field(..., alias=\"deal_set\")\n\nclass UserBase(Schema):\n \"\"\"Base user schema for GET method.\"\"\"\n\n id: int\n username: str\n first_name: str\n last_name: str\n email: str\n\n\nclass UserExtraDataOut(UserBase):\n \"\"\"Extended user schema with extra data response.\"\"\"\n\n offers: List[OfferWithDealOut]\n\n" ]
[ 0 ]
[]
[]
[ "django", "pydantic", "python" ]
stackoverflow_0074537704_django_pydantic_python.txt
Q: NaN when converting df to a series I have a dataframe with OHLC data. I need to get the close price into the pandas series, using the timestamp column as the index. I am reading from a sqlite db into my df: conn = sql.connect('allStockData.db') price = pd.read_sql_query("SELECT * from ohlc_minutes", conn) price['timestamp'] = pd.to_datetime(price['timestamp']) print(price) Which returns: timestamp open high low close volume trade_count vwap symbol volume_10_day 0 2022-09-16 08:00:00+00:00 3.19 3.570 3.19 3.350 66475 458 3.404240 AAOI NaN 1 2022-09-16 08:05:00+00:00 3.35 3.440 3.33 3.430 28925 298 3.381131 AAOI NaN 2 2022-09-16 08:10:00+00:00 3.44 3.520 3.35 3.400 62901 643 3.445096 AAOI NaN 3 2022-09-16 08:15:00+00:00 3.37 3.390 3.31 3.360 17943 184 3.339721 AAOI NaN 4 2022-09-16 08:20:00+00:00 3.36 3.410 3.34 3.400 29123 204 3.383370 AAOI NaN ... ... ... ... ... ... ... ... ... ... ... 8759 2022-09-08 23:35:00+00:00 1.35 1.360 1.35 1.355 3835 10 1.350613 RUBY 515994.5 8760 2022-09-08 23:40:00+00:00 1.36 1.360 1.35 1.350 2780 7 1.353687 RUBY 515994.5 8761 2022-09-08 23:45:00+00:00 1.35 1.355 1.35 1.355 7080 11 1.350424 RUBY 515994.5 8762 2022-09-08 23:50:00+00:00 1.35 1.360 1.33 1.360 11664 30 1.351104 RUBY 515994.5 8763 2022-09-08 23:55:00+00:00 1.36 1.360 1.33 1.340 21394 32 1.348223 RUBY 515994.5 [8764 rows x 10 columns] When I try to get the close into a series with the timestamp: price = pd.Series(price['close'], index=price['timestamp']) It returns a bunch of NaNs: 2022-09-16 08:00:00+00:00 NaN 2022-09-16 08:05:00+00:00 NaN 2022-09-16 08:10:00+00:00 NaN 2022-09-16 08:15:00+00:00 NaN 2022-09-16 08:20:00+00:00 NaN .. 2022-09-08 23:35:00+00:00 NaN 2022-09-08 23:40:00+00:00 NaN 2022-09-08 23:45:00+00:00 NaN 2022-09-08 23:50:00+00:00 NaN 2022-09-08 23:55:00+00:00 NaN Name: close, Length: 8764, dtype: float64 If I remove the index: price = pd.Series(price['close']) The close is returned normally: 0 3.350 1 3.430 2 3.400 3 3.360 4 3.400 ... 8759 1.355 8760 1.350 8761 1.355 8762 1.360 8763 1.340 Name: close, Length: 8764, dtype: float64 How can I return the close column as a pandas series, using my timestamp column as the index? A: It's because price['close'] has it's own index which is incompatible with timestamp. Try use .values instead: price = pd.Series(price['close'].values, index=price['timestamp']) A: I needed to set the timestamp to the index before getting the the close as a series: conn = sql.connect('allStockData.db') price = pd.read_sql_query("SELECT * from ohlc_minutes", conn) price['timestamp'] = pd.to_datetime(price['timestamp']) price = price.set_index('timestamp') print(price) price = pd.Series(price['close']) print(price) Gives: 2022-09-16 08:00:00+00:00 3.350 2022-09-16 08:05:00+00:00 3.430 2022-09-16 08:10:00+00:00 3.400 2022-09-16 08:15:00+00:00 3.360 2022-09-16 08:20:00+00:00 3.400 ... 2022-09-08 23:35:00+00:00 1.355 2022-09-08 23:40:00+00:00 1.350 2022-09-08 23:45:00+00:00 1.355 2022-09-08 23:50:00+00:00 1.360 2022-09-08 23:55:00+00:00 1.340 Name: close, Length: 8764, dtype: float64
NaN when converting df to a series
I have a dataframe with OHLC data. I need to get the close price into the pandas series, using the timestamp column as the index. I am reading from a sqlite db into my df: conn = sql.connect('allStockData.db') price = pd.read_sql_query("SELECT * from ohlc_minutes", conn) price['timestamp'] = pd.to_datetime(price['timestamp']) print(price) Which returns: timestamp open high low close volume trade_count vwap symbol volume_10_day 0 2022-09-16 08:00:00+00:00 3.19 3.570 3.19 3.350 66475 458 3.404240 AAOI NaN 1 2022-09-16 08:05:00+00:00 3.35 3.440 3.33 3.430 28925 298 3.381131 AAOI NaN 2 2022-09-16 08:10:00+00:00 3.44 3.520 3.35 3.400 62901 643 3.445096 AAOI NaN 3 2022-09-16 08:15:00+00:00 3.37 3.390 3.31 3.360 17943 184 3.339721 AAOI NaN 4 2022-09-16 08:20:00+00:00 3.36 3.410 3.34 3.400 29123 204 3.383370 AAOI NaN ... ... ... ... ... ... ... ... ... ... ... 8759 2022-09-08 23:35:00+00:00 1.35 1.360 1.35 1.355 3835 10 1.350613 RUBY 515994.5 8760 2022-09-08 23:40:00+00:00 1.36 1.360 1.35 1.350 2780 7 1.353687 RUBY 515994.5 8761 2022-09-08 23:45:00+00:00 1.35 1.355 1.35 1.355 7080 11 1.350424 RUBY 515994.5 8762 2022-09-08 23:50:00+00:00 1.35 1.360 1.33 1.360 11664 30 1.351104 RUBY 515994.5 8763 2022-09-08 23:55:00+00:00 1.36 1.360 1.33 1.340 21394 32 1.348223 RUBY 515994.5 [8764 rows x 10 columns] When I try to get the close into a series with the timestamp: price = pd.Series(price['close'], index=price['timestamp']) It returns a bunch of NaNs: 2022-09-16 08:00:00+00:00 NaN 2022-09-16 08:05:00+00:00 NaN 2022-09-16 08:10:00+00:00 NaN 2022-09-16 08:15:00+00:00 NaN 2022-09-16 08:20:00+00:00 NaN .. 2022-09-08 23:35:00+00:00 NaN 2022-09-08 23:40:00+00:00 NaN 2022-09-08 23:45:00+00:00 NaN 2022-09-08 23:50:00+00:00 NaN 2022-09-08 23:55:00+00:00 NaN Name: close, Length: 8764, dtype: float64 If I remove the index: price = pd.Series(price['close']) The close is returned normally: 0 3.350 1 3.430 2 3.400 3 3.360 4 3.400 ... 8759 1.355 8760 1.350 8761 1.355 8762 1.360 8763 1.340 Name: close, Length: 8764, dtype: float64 How can I return the close column as a pandas series, using my timestamp column as the index?
[ "It's because price['close'] has it's own index which is incompatible with timestamp. Try use .values instead:\nprice = pd.Series(price['close'].values, index=price['timestamp'])\n\n", "I needed to set the timestamp to the index before getting the the close as a series:\nconn = sql.connect('allStockData.db') \nprice = pd.read_sql_query(\"SELECT * from ohlc_minutes\", conn)\nprice['timestamp'] = pd.to_datetime(price['timestamp'])\nprice = price.set_index('timestamp')\nprint(price)\n\nprice = pd.Series(price['close'])\nprint(price)\n\nGives:\n2022-09-16 08:00:00+00:00 3.350\n2022-09-16 08:05:00+00:00 3.430\n2022-09-16 08:10:00+00:00 3.400\n2022-09-16 08:15:00+00:00 3.360\n2022-09-16 08:20:00+00:00 3.400\n ... \n2022-09-08 23:35:00+00:00 1.355\n2022-09-08 23:40:00+00:00 1.350\n2022-09-08 23:45:00+00:00 1.355\n2022-09-08 23:50:00+00:00 1.360\n2022-09-08 23:55:00+00:00 1.340\nName: close, Length: 8764, dtype: float64\n\n" ]
[ 1, 1 ]
[]
[]
[ "numpy", "pandas", "python" ]
stackoverflow_0074577408_numpy_pandas_python.txt
Q: Extracting polynomial coefficients from file in Python I have recently been working with text files that contain data like the following: A = a_0 + a_1*x + ... + a_l*x^l B = b_0 + b_1*x + ... + b_m*x^m . . . G = g_2*x^2 + g_n where l and m are not necessarily the same, and B might not be the longest equation. Is there a way to import the coefficients into a NumPy array, inserting 0s where the coefficients are missing, such that the above example would yield I was initially thinking of using numpy.genfromtxt where the x are filled with 1 (so that only the coefficient survives), but that has the problem that we lose which column and row the coefficient in. Any help is very much appreciated. A: This is very custom type of text you have to write a parser to do so: considering the txt file is read, example here s you probably have to read from file, with open('..') as fid ... s = """A = 2*x + 3*x^5 B = 3 + 2*x + 3*x^5 C = 8 + 20*x + 3*x^9""" like this: equations = s.split('\n') def parse_lines(s): def foo(val): coef, _power,*_ = *val.split('*'), '^' # parse a_0*x^n x, power, *_ = *_power.split('^'), '1' # parse x^n power = 0 if not power else power return int(coef) , int(power) _, eq = s.split('=') return [foo(item) for item in eq.split('+')] indexes = [(idx, *t) for idx, eq in enumerate(equations) for t in parse_lines(eq)] output: (row, val, col) [(0, 2, 1), (0, 3, 5), (1, 3, 0), (1, 2, 1), (1, 3, 5), (2, 8, 0), (2, 20, 1), (2, 3, 9)] you could parse to a format that could be reconstructed to an array. after this it is simple just build array: m, _, n = np.array(d).max(axis=0) m , n = m + 1, n + 1 arr = np.zeros((m,n)) for row, val, col in indexes: arr[row, col] = val output: array([[ 0., 2., 0., 0., 0., 3., 0., 0., 0., 0.], [ 3., 2., 0., 0., 0., 3., 0., 0., 0., 0.], [ 8., 20., 0., 0., 0., 0., 0., 0., 0., 3.]])
Extracting polynomial coefficients from file in Python
I have recently been working with text files that contain data like the following: A = a_0 + a_1*x + ... + a_l*x^l B = b_0 + b_1*x + ... + b_m*x^m . . . G = g_2*x^2 + g_n where l and m are not necessarily the same, and B might not be the longest equation. Is there a way to import the coefficients into a NumPy array, inserting 0s where the coefficients are missing, such that the above example would yield I was initially thinking of using numpy.genfromtxt where the x are filled with 1 (so that only the coefficient survives), but that has the problem that we lose which column and row the coefficient in. Any help is very much appreciated.
[ "This is very custom type of text you have to write a parser to do so:\nconsidering the txt file is read, example here s\nyou probably have to read from file, with open('..') as fid ...\ns = \"\"\"A = 2*x + 3*x^5\nB = 3 + 2*x + 3*x^5\nC = 8 + 20*x + 3*x^9\"\"\"\n\nlike this:\nequations = s.split('\\n')\n\ndef parse_lines(s):\n def foo(val):\n coef, _power,*_ = *val.split('*'), '^' # parse a_0*x^n \n x, power, *_ = *_power.split('^'), '1' # parse x^n\n power = 0 if not power else power \n return int(coef) , int(power)\n \n _, eq = s.split('=')\n return [foo(item) for item in eq.split('+')]\n \n\nindexes = [(idx, *t) for idx, eq in enumerate(equations) for t in parse_lines(eq)]\n\noutput: (row, val, col)\n[(0, 2, 1),\n (0, 3, 5),\n (1, 3, 0),\n (1, 2, 1),\n (1, 3, 5),\n (2, 8, 0),\n (2, 20, 1),\n (2, 3, 9)]\n\nyou could parse to a format that could be reconstructed to an array.\nafter this it is simple just build array:\nm, _, n = np.array(d).max(axis=0)\nm , n = m + 1, n + 1\narr = np.zeros((m,n))\nfor row, val, col in indexes:\n arr[row, col] = val\n\noutput:\narray([[ 0., 2., 0., 0., 0., 3., 0., 0., 0., 0.],\n [ 3., 2., 0., 0., 0., 3., 0., 0., 0., 0.],\n [ 8., 20., 0., 0., 0., 0., 0., 0., 0., 3.]])\n\n" ]
[ 1 ]
[]
[]
[ "numpy", "numpy_ndarray", "python", "python_3.x" ]
stackoverflow_0074575553_numpy_numpy_ndarray_python_python_3.x.txt
Q: How can I implement a working return function in Python So basically, I have a main menu function. The user makes a choice in this menu which is tied to a variable. The main menu function then calls that function. The user chooses a product and its amount there. So it works until here. Then after the user enters everything, I want the user to stay in that main menu until they choose to return to the main menu which is represented by the choice variable. def main_menu(choice=0): while True: choice = int(input(“Make a choice: “)) if choice==1: dishes() if choice==2: desserts() if choice==3: break def dishes(choice=0): print(“1) Buy a dish”) print(“2) Go to main menu”) choice = int(input(“Make a choice: “)) if choice==1: # Select which dish to buy # Select product amount etc. if choice==2: return choice def desserts(choice=0): print(“1) Buy a dish”) print(“2) Go to main menu”) choice = int(input(“Make a choice: “)) if choice==1: # Select which dish to buy # Select product amount etc. if choice==2: return choice main_menu(choice) I don’t want to create a recursion because I cannot terminate the program when I do that. I want to first call a function from the main menu. Then the function should return a value to the main menu but should not call itself again. The main menu function should continue calling that function until the user decides not to. How do I make this work? A: I would refactor the submenus into a while loop: def dishes(): while True: print(“1) Buy a dish”) print(“2) Go to main menu”) choice = int(input(“Make a choice: “)) if choice==1: # Select which dish to buy # Select product amount etc. if choice==2: break return # return a value if that's part of the program Also, I notice that the quotes are "smart" quotes, which python doesn't recognize as regular quotes (not sure if that's just a copy/paste issue with Stack Overflow).
How can I implement a working return function in Python
So basically, I have a main menu function. The user makes a choice in this menu which is tied to a variable. The main menu function then calls that function. The user chooses a product and its amount there. So it works until here. Then after the user enters everything, I want the user to stay in that main menu until they choose to return to the main menu which is represented by the choice variable. def main_menu(choice=0): while True: choice = int(input(“Make a choice: “)) if choice==1: dishes() if choice==2: desserts() if choice==3: break def dishes(choice=0): print(“1) Buy a dish”) print(“2) Go to main menu”) choice = int(input(“Make a choice: “)) if choice==1: # Select which dish to buy # Select product amount etc. if choice==2: return choice def desserts(choice=0): print(“1) Buy a dish”) print(“2) Go to main menu”) choice = int(input(“Make a choice: “)) if choice==1: # Select which dish to buy # Select product amount etc. if choice==2: return choice main_menu(choice) I don’t want to create a recursion because I cannot terminate the program when I do that. I want to first call a function from the main menu. Then the function should return a value to the main menu but should not call itself again. The main menu function should continue calling that function until the user decides not to. How do I make this work?
[ "I would refactor the submenus into a while loop:\ndef dishes():\n while True:\n print(“1) Buy a dish”)\n print(“2) Go to main menu”)\n choice = int(input(“Make a choice: “))\n\n if choice==1:\n # Select which dish to buy\n # Select product amount etc.\n if choice==2:\n break\n\n return # return a value if that's part of the program\n\nAlso, I notice that the quotes are \"smart\" quotes, which python doesn't recognize as regular quotes (not sure if that's just a copy/paste issue with Stack Overflow).\n" ]
[ 0 ]
[]
[]
[ "python", "python_3.x" ]
stackoverflow_0074577196_python_python_3.x.txt
Q: Python - Inherited methods break when overriding __init__ I have a geometric base class ExtrudedSurface and a child class Cylinder, which is a 'kind of' extruded surface. The base class has a method to translate itself (not in-place) by constructing a modified version of itself. I would like to re-use this method by the child class Cylinder, and have it return a new, translated Cylinder. However, as implemented now this does not work because Cylinder has a different __init__ signature which is called in translate. What is the best way to achieve this? How can Cylinder use the inherited method translate and return a new Cylinder object? EDIT 1: I think it has to do with LSP violation but I'm not sure. class ExtrudedSurface: def __init__(self, profile: Curve, direction: Vector, length: float): """ An extruded surface geometry created by sweeping a 3D 'profile' curve along a 'direction' for a given 'length'. """ self.profile = profile self.direction = direction.normalize() self.length = length def translate(self, translation: Vector) -> ExtrudedSurface: """Return a translated version of itself.""" return self.__class__( self.profile.translate(translation), self.length, self.direction ) class Cylinder(ExtrudedSurface): def __init__(self, point: Point, direction: Vector, radius: float, length: float): """Cylinder surface. Args: point: Center point of extruded circle. direction: Direction vector of the cylinder (axis). radius: Cylinder radius. length: Extrusion length in 'direction'. """ direction = direction.normalize() profile = Circle(point, direction, radius) super().__init__(profile, direction, length) A: Short story short: the by-the-book approach there is to override the translate() method, as well, and call the updated constructor from there. Now, you can refactor your class initialization and separate attribute setting from other needed actions, and then create a new class-method to clone an instance with all the attributes from a first instance, and just call this initialization, if needed. If no initialization is needed, just a "clone" method is needed. If you happen to call this method __copy__, then you can use the copy.copy call for that, which is almost as if it was an operator in Python, and it can, by itself, have value for your end-users. Moreover -- if your class requires no initialization besides setting plain attributes, and no calculations at all, copy.copy will just work out of the box with your instances - no extra __copy__ method needed: from copy import copy class ExtrudedSurface: def __init__(self, profile: Curve, direction: Vector, length: float): """ An extruded surface geometry created by sweeping a 3D 'profile' curve along a 'direction' for a given 'length'. """ self.profile = profile self.direction = direction.normalize() self.length = length def translate(self, translation: Vector) -> ExtrudedSurface: """Return a translated version of itself.""" new_profile = self.profile.translate(translation) new_instance = copy(self) new_instance.profile = new_profile return new_instance class Cylinder(ExtrudedSurface): def __init__(self, point: Point, direction: Vector, radius: float, length: float): ... Just attempt to the fact that copy will not recursively copy the attributes, so, if the self.direction vector is a mutable object in the framework you are using, this will keep both clones bound to the same vector, and if it changes in the original, that change will be reflected in the clone. By the nature of your code I am assuming everything is immutable there and all transforms create new instances: then this will work. Otherwise, you should also copy the original .direction attribute into the new instance. In time: yes, that is an LSP violation - but I always think that LSP is given more importance than it should when it come to practical matters. The more generic code I described, should your initialization be more complex would be: class Base: def __init__(self, base_args): #code to set initial attributes ... # call inner init with possible sideeffects: self.inner_init() def inner_init(self): # code with side effects (like, attaching instance to a container) # should be written in order to be repeat-proof - calls on the same instance have no effect ... def __copy__(self): new_instance = self.__class__.new() new_instance.__dict__.update(self.__dict__) self.inner_init()
Python - Inherited methods break when overriding __init__
I have a geometric base class ExtrudedSurface and a child class Cylinder, which is a 'kind of' extruded surface. The base class has a method to translate itself (not in-place) by constructing a modified version of itself. I would like to re-use this method by the child class Cylinder, and have it return a new, translated Cylinder. However, as implemented now this does not work because Cylinder has a different __init__ signature which is called in translate. What is the best way to achieve this? How can Cylinder use the inherited method translate and return a new Cylinder object? EDIT 1: I think it has to do with LSP violation but I'm not sure. class ExtrudedSurface: def __init__(self, profile: Curve, direction: Vector, length: float): """ An extruded surface geometry created by sweeping a 3D 'profile' curve along a 'direction' for a given 'length'. """ self.profile = profile self.direction = direction.normalize() self.length = length def translate(self, translation: Vector) -> ExtrudedSurface: """Return a translated version of itself.""" return self.__class__( self.profile.translate(translation), self.length, self.direction ) class Cylinder(ExtrudedSurface): def __init__(self, point: Point, direction: Vector, radius: float, length: float): """Cylinder surface. Args: point: Center point of extruded circle. direction: Direction vector of the cylinder (axis). radius: Cylinder radius. length: Extrusion length in 'direction'. """ direction = direction.normalize() profile = Circle(point, direction, radius) super().__init__(profile, direction, length)
[ "Short story short: the by-the-book approach there is to override the translate() method, as well, and call the updated constructor from there.\nNow, you can refactor your class initialization and separate attribute setting from other needed actions, and then create a new class-method to clone an instance with all the attributes from a first instance, and just call this initialization, if needed.\nIf no initialization is needed, just a \"clone\" method is needed. If you happen to call this method __copy__, then you can use the copy.copy call for that, which is almost as if it was an operator in Python, and it can, by itself, have value for your end-users.\nMoreover -- if your class requires no initialization besides setting plain attributes, and no calculations at all, copy.copy will just work out of the box with your instances - no extra __copy__ method needed:\nfrom copy import copy\n\nclass ExtrudedSurface:\n def __init__(self, profile: Curve, direction: Vector, length: float):\n \"\"\"\n An extruded surface geometry created by sweeping a 3D 'profile' curve along a \n 'direction' for a given 'length'.\n \"\"\"\n\n self.profile = profile\n self.direction = direction.normalize()\n self.length = length\n\n def translate(self, translation: Vector) -> ExtrudedSurface:\n \"\"\"Return a translated version of itself.\"\"\"\n\n new_profile = self.profile.translate(translation)\n new_instance = copy(self)\n new_instance.profile = new_profile\n return new_instance\n \n\n\nclass Cylinder(ExtrudedSurface):\n\n def __init__(self, point: Point, direction: Vector, radius: float, length: float):\n ...\n \n\nJust attempt to the fact that copy will not recursively copy the attributes, so, if the self.direction vector is a mutable object in the framework you are using, this will keep both clones bound to the same vector, and if it changes in the original, that change will be reflected in the clone. By the nature of your code I am assuming everything is immutable there and all transforms create new instances: then this will work. Otherwise, you should also copy the original .direction attribute into the new instance.\nIn time: yes, that is an LSP violation - but I always think that LSP is given more importance than it should when it come to practical matters.\nThe more generic code I described, should your initialization be more complex would be:\n\nclass Base:\n def __init__(self, base_args):\n #code to set initial attributes\n ...\n # call inner init with possible sideeffects:\n self.inner_init()\n \n def inner_init(self):\n # code with side effects (like, attaching instance to a container)\n # should be written in order to be repeat-proof - calls on the same instance have no effect\n ...\n\n def __copy__(self):\n new_instance = self.__class__.new() \n new_instance.__dict__.update(self.__dict__)\n self.inner_init()\n\n" ]
[ 2 ]
[]
[]
[ "inheritance", "oop", "python" ]
stackoverflow_0074577383_inheritance_oop_python.txt
Q: Printing username on stdout with Django + Gunicorn Application Right now my Django + Gunicorn app is printing only this info: [03.10.2022 19:43:14] INFO [django.request:middleware] GET /analyse/v2/ping - 200 If request is authorized, I would like to show also user (username/email) behind the status code, something like: [03.10.2022 19:43:14] INFO [django.request:middleware] GET /analyse/v2/ping - 200 - useremail@outlook.com If the request is not authorized then write UNAUTHORIZED: [03.10.2022 19:43:14] INFO [django.request:middleware] GET /analyse/v2/ping - 200 - UNAUTHORIZED How can I achieve this with a combination of Django and Gunicorn? Thank you Solved by adding this part of code in settings: def add_username(record): try: username = record.request.user.username except AttributeError: username = "" record.username = username return True LOGGING = { "version": 1, "disable_existing_loggers": True, "root": {"level": "WARNING", "handlers": ["console"]}, "formatters": { "verbose": { "format": "[%(asctime)s] %(levelname)s [%(name)s:%(module)s] [%(username)s] %(message)s", "datefmt": "%d.%m.%Y %H:%M:%S", }, "simple": {"format": "%(levelname)s %(message)s"}, }, "filters": { "add_username": { "()": "django.utils.log.CallbackFilter", "callback": add_username, } }, "handlers": { "console": { "level": "DEBUG", "class": "logging.StreamHandler", "formatter": "verbose", "filters": ["add_username"], }, ... A: It's not clear where this log line comes from. As far as I can see, Django only logs 4xx and 5xx requests to django.request logger. This doesn't look like a gunicorn access log line either. And if you initiated this log line in your own code, you should be able to add the user easily. So, here are a few generic solutions. (Option 1) For a gunicorn access log line You don't have access to Django's request object and thus won't be able to retrieve the user from gunicorn. However, you can work it around by adding the user in the response headers. yourapp/middleware.py class UserHeaderMiddleware: def __init__(self, get_response): self.get_response = get_response def __call__(self, request): response = self.get_response(request) user = request.user response['X-User'] = user.email if user.is_authenticated() else 'UNAUTHORIZED' return response yourproject/settings.py MIDDLEWARE = [ ..., 'django.contrib.auth.middleware.AuthenticationMiddleware', ..., # Your custom middleware must be called after authentication 'yourapp.middleware.UserHeaderMiddleware', ..., ] Then change gunicorn's access_log_format setting to include this header. For instance: '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%({x-user}o)s"' (Option 2) For django.request logger If your log line is sent to django.request logger, there is a chance that it provided the request in the extra context. In that case, you can write a custom Formatter to include the user: yourapp/logging.py from logging import Formatter class RequestFormatter(Formatter): def format(self, record): request = getattr(record, 'request', None) if user := getattr(request, 'user', None): record.user = user.email if user.is_authenticated() else 'UNAUTHORIZED' else: record.user = '-' return super().format(record) yourapp/logging.py LOGGING = { ..., 'formatters': { ..., "django.request": { "()": "yourapp.logging.RequestFormatter", "format": "[{asctime}] {levelname} [{name}] {message} - {status_code} - {user}", "style": "{", }, }, 'loggers': { ..., "django.request": { "handlers": ..., "level": "INFO", "formatter": 'django.request', } ..., }, } (Option 3) Tell Django to log all requests in django.request Django only logs 4xx and 5xx requests in django.request. See source code But we can change this behavior by using a custom WSGI handler. In yourproject/wsgi.py you should have something like this: import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yourproject.settings') application = get_wsgi_application() You can change this to use a custom WSGI handler: import os import django from django.core.wsgi import WSGIHandler from django.conf import settings os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yourproject.settings') class CustomWSGIHandler(WSGIHandler): def get_response(self, request): # Note that this is only a copy of BaseHandler.get_response() # without the condition on log_response() set_urlconf(settings.ROOT_URLCONF) response = self._middleware_chain(request) response._resource_closers.append(request.close) log_response( "%s: %s", response.reason_phrase, request.path, response=response, request=request, ) return response django.setup(set_prefix=False) application = CustomWSGIHandler() Then, refer to Option 2 to include the user in the formatter. (Option 4) Create a middleware to add a new log line If don't have access to this log line to update it and don't have access to the request in the log formatter, you will have to add a new log line manually (and possibly silent the first one to avoid duplicates). yourapp/middleware.py import logging logger = logging.getLogger('django.request') class LoggingMiddleware: def __init__(self, get_response): self.get_response = get_response def __call__(self, request): response = self.get_response(request) user_email = request.user.email if request.user.is_authenticated() else 'UNAUTHORIZED' logger.info(f"{request.method} {request.path} - {request.status_code} - {user_email}") return response yourproject/settings.py MIDDLEWARE = [ ..., 'django.contrib.auth.middleware.AuthenticationMiddleware', ..., # Your custom middleware must be called after authentication 'yourapp.middleware.LoggingMiddleware', ..., ] A: A custom middleware is how you can achieve this easily. You can do something like the below. import logging from loguru import logger # optional if you are not using it already. from django.utils import timezone logger = logging.getLogger('django.request') class LogRequest: def __init__(self, get_response): self.get_response = get_response def __call__(self, request): response = self.get_response(request) user = ( request.user.email if request.user.is_authenticated else "UNAUTHORIZED" ) logger.info( f"[{timezone.now().strftime('%d.%m.%Y %H:%M:%S')}] INFO [myapp.custom_logger] {request.method} {request.path} - {response.status_code} - {user}" ) return response You can then configure your activate your new middleware by registering it. MIDDLEWARE = [ "django.middleware.gzip.GZipMiddleware", "django.middleware.security.SecurityMiddleware", ... "myapp.middleware.LogRequest", ] This produces an output like the below. [25.11.2022 15:57:37] INFO [myapp.custom_logger] GET /analyse/v2/ping - 200 - oppen@heimer.xyz
Printing username on stdout with Django + Gunicorn Application
Right now my Django + Gunicorn app is printing only this info: [03.10.2022 19:43:14] INFO [django.request:middleware] GET /analyse/v2/ping - 200 If request is authorized, I would like to show also user (username/email) behind the status code, something like: [03.10.2022 19:43:14] INFO [django.request:middleware] GET /analyse/v2/ping - 200 - useremail@outlook.com If the request is not authorized then write UNAUTHORIZED: [03.10.2022 19:43:14] INFO [django.request:middleware] GET /analyse/v2/ping - 200 - UNAUTHORIZED How can I achieve this with a combination of Django and Gunicorn? Thank you Solved by adding this part of code in settings: def add_username(record): try: username = record.request.user.username except AttributeError: username = "" record.username = username return True LOGGING = { "version": 1, "disable_existing_loggers": True, "root": {"level": "WARNING", "handlers": ["console"]}, "formatters": { "verbose": { "format": "[%(asctime)s] %(levelname)s [%(name)s:%(module)s] [%(username)s] %(message)s", "datefmt": "%d.%m.%Y %H:%M:%S", }, "simple": {"format": "%(levelname)s %(message)s"}, }, "filters": { "add_username": { "()": "django.utils.log.CallbackFilter", "callback": add_username, } }, "handlers": { "console": { "level": "DEBUG", "class": "logging.StreamHandler", "formatter": "verbose", "filters": ["add_username"], }, ...
[ "It's not clear where this log line comes from. As far as I can see, Django only logs 4xx and 5xx requests to django.request logger. This doesn't look like a gunicorn access log line either. And if you initiated this log line in your own code, you should be able to add the user easily.\nSo, here are a few generic solutions.\n\n(Option 1) For a gunicorn access log line\nYou don't have access to Django's request object and thus won't be able to retrieve the user from gunicorn. However, you can work it around by adding the user in the response headers.\nyourapp/middleware.py\nclass UserHeaderMiddleware:\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n response = self.get_response(request)\n user = request.user\n response['X-User'] = user.email if user.is_authenticated() else 'UNAUTHORIZED'\n return response\n\nyourproject/settings.py\nMIDDLEWARE = [\n ...,\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n ..., # Your custom middleware must be called after authentication\n 'yourapp.middleware.UserHeaderMiddleware',\n ...,\n]\n\nThen change gunicorn's access_log_format setting to include this header. For instance: '%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s \"%(f)s\" \"%({x-user}o)s\"'\n\n(Option 2) For django.request logger\nIf your log line is sent to django.request logger, there is a chance that it provided the request in the extra context.\nIn that case, you can write a custom Formatter to include the user:\nyourapp/logging.py\nfrom logging import Formatter\n\nclass RequestFormatter(Formatter):\n\n def format(self, record):\n request = getattr(record, 'request', None)\n if user := getattr(request, 'user', None):\n record.user = user.email if user.is_authenticated() else 'UNAUTHORIZED'\n else:\n record.user = '-'\n return super().format(record)\n\nyourapp/logging.py\nLOGGING = {\n ...,\n 'formatters': {\n ...,\n \"django.request\": {\n \"()\": \"yourapp.logging.RequestFormatter\",\n \"format\": \"[{asctime}] {levelname} [{name}] {message} - {status_code} - {user}\",\n \"style\": \"{\",\n },\n },\n 'loggers': {\n ...,\n \"django.request\": {\n \"handlers\": ...,\n \"level\": \"INFO\",\n \"formatter\": 'django.request',\n }\n ...,\n },\n}\n\n\n(Option 3) Tell Django to log all requests in django.request\nDjango only logs 4xx and 5xx requests in django.request. See source code\nBut we can change this behavior by using a custom WSGI handler.\nIn yourproject/wsgi.py you should have something like this:\nimport os\nfrom django.core.wsgi import get_wsgi_application\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yourproject.settings')\napplication = get_wsgi_application()\n\nYou can change this to use a custom WSGI handler:\nimport os\nimport django\nfrom django.core.wsgi import WSGIHandler\nfrom django.conf import settings\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yourproject.settings')\n\n\nclass CustomWSGIHandler(WSGIHandler):\n def get_response(self, request):\n # Note that this is only a copy of BaseHandler.get_response()\n # without the condition on log_response()\n set_urlconf(settings.ROOT_URLCONF)\n response = self._middleware_chain(request)\n response._resource_closers.append(request.close)\n log_response(\n \"%s: %s\",\n response.reason_phrase,\n request.path,\n response=response,\n request=request,\n )\n return response\n\ndjango.setup(set_prefix=False)\napplication = CustomWSGIHandler()\n\nThen, refer to Option 2 to include the user in the formatter.\n\n(Option 4) Create a middleware to add a new log line\nIf don't have access to this log line to update it and don't have access to the request in the log formatter, you will have to add a new log line manually (and possibly silent the first one to avoid duplicates).\nyourapp/middleware.py\nimport logging\n\nlogger = logging.getLogger('django.request')\n\nclass LoggingMiddleware:\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n response = self.get_response(request)\n user_email = request.user.email if request.user.is_authenticated() else 'UNAUTHORIZED'\n logger.info(f\"{request.method} {request.path} - {request.status_code} - {user_email}\")\n return response\n\nyourproject/settings.py\nMIDDLEWARE = [\n ...,\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n ..., # Your custom middleware must be called after authentication\n 'yourapp.middleware.LoggingMiddleware',\n ...,\n]\n\n", "A custom middleware is how you can achieve this easily. You can do something like the below.\nimport logging\n\nfrom loguru import logger # optional if you are not using it already.\n\nfrom django.utils import timezone\n\nlogger = logging.getLogger('django.request')\n\n\nclass LogRequest:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n response = self.get_response(request)\n\n user = (\n request.user.email\n if request.user.is_authenticated\n else \"UNAUTHORIZED\"\n )\n\n logger.info(\n f\"[{timezone.now().strftime('%d.%m.%Y %H:%M:%S')}] INFO [myapp.custom_logger] {request.method} {request.path} - {response.status_code} - {user}\"\n )\n\n return response\n\n\nYou can then configure your activate your new middleware by registering it.\nMIDDLEWARE = [\n \"django.middleware.gzip.GZipMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n ...\n \"myapp.middleware.LogRequest\",\n]\n\nThis produces an output like the below.\n[25.11.2022 15:57:37] INFO [myapp.custom_logger] GET /analyse/v2/ping - 200 - oppen@heimer.xyz\n\n" ]
[ 2, 1 ]
[]
[]
[ "django", "gunicorn", "python", "python_3.x", "web" ]
stackoverflow_0074322307_django_gunicorn_python_python_3.x_web.txt
Q: AttributeError: 'NoneType' object has no attribute 'tostring' in application, but works fine in PC I'm trying to make apk file from using python. This code is using cv2.VideoCapture(0) to make phone camera application. Here's my code # Import kivy dependencies first from kivy.app import App from kivy.uix.boxlayout import BoxLayout # Import kivy UX components from kivy.uix.image import Image # Import other kivy stuff from kivy.clock import Clock from kivy.graphics.texture import Texture # Import other dependencies import cv2 class CamApp(App): def build(self): self.vid = cv2.VideoCapture(0) self.web_cam = Image() layout = BoxLayout() layout.add_widget(self.web_cam) #self.capture = cv2.VideoCapture(0) Clock.schedule_interval(self.update, 1.0 / 33.0) self.vid.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) self.vid.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) #w = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH) #h = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT) #print("너비 {} 높이 {}".format(w, h)) return layout def update(self, *args): ret, frame = self.vid.read() #frame = cv2.flip(frame, 0) # Flip horizontal and convert image to texture buf = cv2.flip(frame, 0).tostring() img_texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr') img_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte') self.web_cam.texture = img_texture if __name__ == '__main__': CamApp().run() It works fine on PC and I used buildozer to make application. Here's my part of my buildozer.spec # (list) Application requirements # comma separated e.g. requirements = sqlite3,kivy requirements = python3,kivy,opencv Anyway, when I played the application I made, it shuts off after kivy loading page. So I used adb and got an error message AttributeError: 'NoneType' object has no attribute 'tostring' I can't understand why this error messaged showed up because it worked well in my PC. Please help me... Thank you!! A: try to do this: def update(self, *args): ret, self.frame = self.vid.read() #frame = cv2.flip(frame, 0) # Flip horizontal and convert image to texture buf = cv2.flip(self.frame, 0).tobytes() img_texture = Texture.create(size=(self.frame.shape[1], self.frame.shape[0]), colorfmt='bgr') img_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte') self.web_cam.texture = img_texture
AttributeError: 'NoneType' object has no attribute 'tostring' in application, but works fine in PC
I'm trying to make apk file from using python. This code is using cv2.VideoCapture(0) to make phone camera application. Here's my code # Import kivy dependencies first from kivy.app import App from kivy.uix.boxlayout import BoxLayout # Import kivy UX components from kivy.uix.image import Image # Import other kivy stuff from kivy.clock import Clock from kivy.graphics.texture import Texture # Import other dependencies import cv2 class CamApp(App): def build(self): self.vid = cv2.VideoCapture(0) self.web_cam = Image() layout = BoxLayout() layout.add_widget(self.web_cam) #self.capture = cv2.VideoCapture(0) Clock.schedule_interval(self.update, 1.0 / 33.0) self.vid.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) self.vid.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) #w = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH) #h = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT) #print("너비 {} 높이 {}".format(w, h)) return layout def update(self, *args): ret, frame = self.vid.read() #frame = cv2.flip(frame, 0) # Flip horizontal and convert image to texture buf = cv2.flip(frame, 0).tostring() img_texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr') img_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte') self.web_cam.texture = img_texture if __name__ == '__main__': CamApp().run() It works fine on PC and I used buildozer to make application. Here's my part of my buildozer.spec # (list) Application requirements # comma separated e.g. requirements = sqlite3,kivy requirements = python3,kivy,opencv Anyway, when I played the application I made, it shuts off after kivy loading page. So I used adb and got an error message AttributeError: 'NoneType' object has no attribute 'tostring' I can't understand why this error messaged showed up because it worked well in my PC. Please help me... Thank you!!
[ "try to do this:\ndef update(self, *args):\n ret, self.frame = self.vid.read()\n\n #frame = cv2.flip(frame, 0)\n\n # Flip horizontal and convert image to texture\n buf = cv2.flip(self.frame, 0).tobytes()\n\n img_texture = Texture.create(size=(self.frame.shape[1], self.frame.shape[0]), colorfmt='bgr')\n img_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')\n self.web_cam.texture = img_texture\n\n" ]
[ 0 ]
[]
[]
[ "kivy", "opencv", "python" ]
stackoverflow_0072197918_kivy_opencv_python.txt
Q: How to use Desktop object to connect to app with changing window titles I am new to pywinauto but I have read all the documentation. I am trying to automate the Spotify app on Windows (i.e. the Microsoft Store option). I am using UIA backend. The only problem I am having is that because pywinauto's Desktop object is looking for a specific window title, it doesn't allow me to automate as Spotify's window title can change depending on what song is being played at any given moment (and there's no word "Spotify" in the window title). I experimented with trying to obtain the process ID, but it doesn't seem to find the exe name (Spotify.exe) or doesn't work with the Desktop object, which I need to be able to access child_window, etc. My code is below. Can someone assist with advising how to go about making the Desktop object work when the app's window title is not constant and can't be used to identify the app, and since it's a Microsoft Store app, it spawns multiple processes so using just the Application object is not an option either. import pywinauto import subprocess from pywinauto import Desktop, application from pywinauto.application import Application sp = subprocess.call("start shell:AppsFolder\SpotifyAB.SpotifyMusic_zpdnekdrzrea0!Spotify",shell=True) app = pywinauto.Application(backend='uia').connect(path="Spotify.exe") #this works only if there is NO song playing on Spotify; if any song is playing, #it fails to work since it can't find any app window with the title "Spotify" #and the name of the window is instead the current song playing #which I don't know what it is going to be at any given point dlg = Desktop(backend="uia").Spotify #does not work dlg = Desktop(backend="uia").window(process=app.process) dlg.child_window(title="Your Library", control_type="Hyperlink",found_index=0).wait("exists",30).invoke() dlg.child_window(title="Orchestra", control_type="Hyperlink",found_index=0).wait("exists",30).invoke() try: dlg.child_window(title="Play Orchestra", control_type="Button").wait("exists",5).invoke() except: pass # this also doesn't work most of the time dlg.wait("",10).close() A: From the How To's page in the docs How to specify a dialog of the application, you should be able to use the top_window() method to get the window with the highest Z-Order (although the docs say that it's fairly untested ... It will definitely be a top level window of the application - it just might not be the one highest in the Z-Order. However if there's only one window in the app that shouldn't be an issue. dlg = app.top_window()
How to use Desktop object to connect to app with changing window titles
I am new to pywinauto but I have read all the documentation. I am trying to automate the Spotify app on Windows (i.e. the Microsoft Store option). I am using UIA backend. The only problem I am having is that because pywinauto's Desktop object is looking for a specific window title, it doesn't allow me to automate as Spotify's window title can change depending on what song is being played at any given moment (and there's no word "Spotify" in the window title). I experimented with trying to obtain the process ID, but it doesn't seem to find the exe name (Spotify.exe) or doesn't work with the Desktop object, which I need to be able to access child_window, etc. My code is below. Can someone assist with advising how to go about making the Desktop object work when the app's window title is not constant and can't be used to identify the app, and since it's a Microsoft Store app, it spawns multiple processes so using just the Application object is not an option either. import pywinauto import subprocess from pywinauto import Desktop, application from pywinauto.application import Application sp = subprocess.call("start shell:AppsFolder\SpotifyAB.SpotifyMusic_zpdnekdrzrea0!Spotify",shell=True) app = pywinauto.Application(backend='uia').connect(path="Spotify.exe") #this works only if there is NO song playing on Spotify; if any song is playing, #it fails to work since it can't find any app window with the title "Spotify" #and the name of the window is instead the current song playing #which I don't know what it is going to be at any given point dlg = Desktop(backend="uia").Spotify #does not work dlg = Desktop(backend="uia").window(process=app.process) dlg.child_window(title="Your Library", control_type="Hyperlink",found_index=0).wait("exists",30).invoke() dlg.child_window(title="Orchestra", control_type="Hyperlink",found_index=0).wait("exists",30).invoke() try: dlg.child_window(title="Play Orchestra", control_type="Button").wait("exists",5).invoke() except: pass # this also doesn't work most of the time dlg.wait("",10).close()
[ "From the How To's page in the docs How to specify a dialog of the application, you should be able to use the top_window() method to get the window with the highest Z-Order (although the docs say that it's\n\nfairly untested ... It will definitely be a top level window of the application - it just might not be the one highest in the Z-Order.\n\nHowever if there's only one window in the app that shouldn't be an issue.\ndlg = app.top_window()\n\n" ]
[ 1 ]
[]
[]
[ "python", "pywinauto" ]
stackoverflow_0074577497_python_pywinauto.txt
Q: Fill NaN base on several 'IFS' conditions This is going to be a rather long post to cover all the edge cases and with examples for clarity. A sample of my input data is as below: df = pd.DataFrame({"Set" : [100, 100, 110, 110, 130, 130, 130, 140, 140, 150, 150, 150, 160, 170, 170], "measure" : [np.nan, np.nan, 11, 10, np.nan, np.nan, np.nan, np.nan, np.nan, 10, 13, 8, np.nan, 12, 13], "width" : [0.19, 0.18, 0.2, 0.27, 0.18, 0.17, 0.21, 0.19, 0.16, 0.19, 0.24, 0.3, 0.15, 0.32, 0.28]}) df Set measure width 0 100 NaN 0.19 1 100 NaN 0.18 2 110 11.0 0.20 3 110 10.0 0.27 4 130 NaN 0.18 5 130 NaN 0.17 6 130 NaN 0.21 7 140 NaN 0.19 8 140 NaN 0.16 9 150 10.0 0.19 10 150 13.0 0.24 11 150 8.0 0.30 12 160 NaN 0.15 13 170 12.0 0.32 14 170 13.0 0.28 I want to be able to fill the NaN base on these conditions in this order of IF-ELSE statement ( I will include example explanation for each condition to be clearer): Find the absolute difference between the NaN "Set" and its closest neighbors and fill the NaN with the max value from that closest neighbor - e.g: for index 0 and 1, absolute difference between set 100 and set 110 is 10. Then fill index 0 and 1 NaN with the max value from set 110 which is "11.0" and include the "adjusted width" column which is the width 0.2 for the NaN row at index 0 and 1. If the above absolute difference between its closest neighbors is the same for two neighbors, fill NaN with the max value of the "measure" of that set. e.g: for index 4, 5, 6 that is on set "130", the absolute difference between set 110 and set 130 is 20; also the absolute difference between set 150 and set 130 is also 20. There tis has 2 closest neighbors with same difference. To fill the NaN, we will consider which of the two sets has the max value of "measure". In this case, neighbor 'set 150' has the max value of "measure" which is "13.0". Then fill index 4, 5 and 6 NaN with the max value from set 150 which is "13.0" and include the "adjusted width" column which is the width 0.24 for the NaN row at index 4, 5, 6. (Note, you cannot fill set 150 with that of set 140 eventhough it is closest because 140 is NaN in the first place and was initially not provided). If condition 1 and 2 above are the same, fill NaN with the max value of the "width" of that set. e.g for index 12, that is on set 160, its closest neighbors are set 150 and set 170 each with absolute difference of 10, meaning condition 1 is a tie, then proceeding to condition 2, the max value of "measure" for both set 150 and set 170 is "13.0". To fill the NaN, we will consider which of the two sets has the max value of "width". In this case, neighbor 'set 170' has the max value of "width" which is "0.28". Then fill index 12 NaN with the max value of "measure" corresponding to the row with the max value of "width" from set 170 which is "13.0" and include the "adjusted width" column which is the width 0.28 for the NaN row at index 14. Note Set 140 NaN satisfies condition 1 with set 150 being its closest neighbor and is filled with 13.0 from set 150 and adjusted width of 0.24 from set 150. My final output table will be as below: Set measure width Adjusted Width 0 100 11.0 0.19 0.2 1 100 11.0 0.18 0.2 2 110 11.0 0.20 0.2 3 110 10.0 0.27 0.27 4 130 13.0 0.18 0.24 5 130 13.0 0.17 0.24 6 130 13.0 0.21 0.24 7 140 13.0 0.19 0.24 8 140 13.0 0.16 0.24 9 150 10.0 0.19 0.19 10 150 13.0 0.24 0.24 11 150 8.0 0.30 0.30 12 160 13.0 0.15 0.28 13 170 12.0 0.32 0.32 14 170 13.0 0.28 0.28 I know this is long, I needed to cover all cases, please leave me a comment if you need further clarifications. Thanks in advance. A: There you go. Comments will go a long way helping you understand the flow, but more or less translated your written logic into code. Also, added an additional condition_met column to help you see which condition was met for the different cases. This can be optimised for sure, but it will certainly provide a solid starting point. The crux of it, if there is one, is to filter the original dataframe at every step according to your conditions and run checks against it. The print statements in the else blocks are just in case something goes wrong in real life - but they should not be executed. import pandas as pd import numpy as np df = pd.DataFrame({"set" : [100, 100, 110, 110, 130, 130, 130, 140, 140, 150, 150, 150, 160, 170, 170], "measure" : [np.nan, np.nan, 11, 10, np.nan, np.nan, np.nan, np.nan, np.nan, 10, 13, 8, np.nan, 12, 13], "width" : [0.19, 0.18, 0.2, 0.27, 0.18, 0.17, 0.21, 0.19, 0.16, 0.19, 0.24, 0.3, 0.15, 0.32, 0.28]}) # we will create a copy of the output df to write the results # otherwise when we write the original df, the conditions are change # e.g. when we get to checking set 140, set 130 is resolved, meaning that # set 140 will now meet condition 3, and not condition 1 as desired output_df = df.copy() output_df['adjusted_width'] = output_df['width'] # we will just loop over unique sets instead of the dataframe for some efficiency for set_ind in df['set'].unique(): # here we check that all measures are NaN but that might be conservative if all(np.isnan(df[df['set']==set_ind]['measure'])): # making sure that we remove nan values from measure when we check for the minimum or when we find the neighbours # though that can be extracted in a separate step distance_from_closest_neighbour = min(np.abs(df[(df['set']!=set_ind) & (~np.isnan(df['measure']))]['set']-set_ind)) only_closest_neighbours_df = df[(np.abs(df['set']-set_ind)==distance_from_closest_neighbour) & (~np.isnan(df['measure']))] # if there is only one closest neighbour, then we have met condition 1 if only_closest_neighbours_df['set'].unique().shape[0] == 1: # executing condition 1: get the max measure from the df, the width and set them on the output df max_measure_from_neighbour = max(only_closest_neighbours_df['measure']) width_of_max_measure_from_neighbour = only_closest_neighbours_df[only_closest_neighbours_df['measure']==max_measure_from_neighbour]['width'].iloc[0] output_df.loc[output_df['set'] == set_ind, 'measure'] = max_measure_from_neighbour output_df.loc[output_df['set'] == set_ind, 'adjusted_width'] = width_of_max_measure_from_neighbour output_df.loc[output_df['set'] == set_ind, 'condition_met'] = 'condition 1' # condition 1 not met - must be 2 closest neighbours elif only_closest_neighbours_df['set'].unique().shape[0] == 2: # get the max measure of all closest neighbours and check how many neighbours have that value in measure max_measure_from_all_neighbours = max(only_closest_neighbours_df['measure']) closest_neighbours_with_max_measure = only_closest_neighbours_df[only_closest_neighbours_df['measure']==max_measure_from_all_neighbours] # if only 1 of the closest neighbours has the max value of measure, then we have met condition 2 if closest_neighbours_with_max_measure['set'].unique().shape[0] == 1: # executing condition 2: set the max measure and the width on the output df width_of_max_measure_from_neighbour_for_cond_2 = closest_neighbours_with_max_measure[closest_neighbours_with_max_measure['measure']==max_measure_from_all_neighbours]['width'].iloc[0] output_df.loc[output_df['set'] == set_ind, 'measure'] = max_measure_from_all_neighbours output_df.loc[output_df['set'] == set_ind, 'adjusted_width'] = width_of_max_measure_from_neighbour_for_cond_2 output_df.loc[output_df['set'] == set_ind, 'condition_met'] = 'condition 2' # if both closest neighbours have the max value of measure, then we have met condition 3 elif closest_neighbours_with_max_measure['set'].unique().shape[0] == 2: # executing condition 3: set the max measure and the width of the output df closest_neighbours_with_max_measure_and_max_width = closest_neighbours_with_max_measure.sort_values('width', ascending=False).iloc[0] output_df.loc[output_df['set'] == set_ind, 'measure'] = closest_neighbours_with_max_measure_and_max_width['measure'] output_df.loc[output_df['set'] == set_ind, 'adjusted_width'] = closest_neighbours_with_max_measure_and_max_width['width'] output_df.loc[output_df['set'] == set_ind, 'condition_met'] = 'condition 3' else: print(f'Something went wrong - {set_ind}') else: print(f'Something went wrong 2 - {set_ind}') the output then is: set measure width adjusted_width condition_met 0 100 11.0 0.19 0.20 condition 1 1 100 11.0 0.18 0.20 condition 1 2 110 11.0 0.20 0.20 NaN 3 110 10.0 0.27 0.27 NaN 4 130 13.0 0.18 0.24 condition 2 5 130 13.0 0.17 0.24 condition 2 6 130 13.0 0.21 0.24 condition 2 7 140 13.0 0.19 0.24 condition 1 8 140 13.0 0.16 0.24 condition 1 9 150 10.0 0.19 0.19 NaN 10 150 13.0 0.24 0.24 NaN 11 150 8.0 0.30 0.30 NaN 12 160 13.0 0.15 0.28 condition 3 13 170 12.0 0.32 0.32 NaN 14 170 13.0 0.28 0.28 NaN
Fill NaN base on several 'IFS' conditions
This is going to be a rather long post to cover all the edge cases and with examples for clarity. A sample of my input data is as below: df = pd.DataFrame({"Set" : [100, 100, 110, 110, 130, 130, 130, 140, 140, 150, 150, 150, 160, 170, 170], "measure" : [np.nan, np.nan, 11, 10, np.nan, np.nan, np.nan, np.nan, np.nan, 10, 13, 8, np.nan, 12, 13], "width" : [0.19, 0.18, 0.2, 0.27, 0.18, 0.17, 0.21, 0.19, 0.16, 0.19, 0.24, 0.3, 0.15, 0.32, 0.28]}) df Set measure width 0 100 NaN 0.19 1 100 NaN 0.18 2 110 11.0 0.20 3 110 10.0 0.27 4 130 NaN 0.18 5 130 NaN 0.17 6 130 NaN 0.21 7 140 NaN 0.19 8 140 NaN 0.16 9 150 10.0 0.19 10 150 13.0 0.24 11 150 8.0 0.30 12 160 NaN 0.15 13 170 12.0 0.32 14 170 13.0 0.28 I want to be able to fill the NaN base on these conditions in this order of IF-ELSE statement ( I will include example explanation for each condition to be clearer): Find the absolute difference between the NaN "Set" and its closest neighbors and fill the NaN with the max value from that closest neighbor - e.g: for index 0 and 1, absolute difference between set 100 and set 110 is 10. Then fill index 0 and 1 NaN with the max value from set 110 which is "11.0" and include the "adjusted width" column which is the width 0.2 for the NaN row at index 0 and 1. If the above absolute difference between its closest neighbors is the same for two neighbors, fill NaN with the max value of the "measure" of that set. e.g: for index 4, 5, 6 that is on set "130", the absolute difference between set 110 and set 130 is 20; also the absolute difference between set 150 and set 130 is also 20. There tis has 2 closest neighbors with same difference. To fill the NaN, we will consider which of the two sets has the max value of "measure". In this case, neighbor 'set 150' has the max value of "measure" which is "13.0". Then fill index 4, 5 and 6 NaN with the max value from set 150 which is "13.0" and include the "adjusted width" column which is the width 0.24 for the NaN row at index 4, 5, 6. (Note, you cannot fill set 150 with that of set 140 eventhough it is closest because 140 is NaN in the first place and was initially not provided). If condition 1 and 2 above are the same, fill NaN with the max value of the "width" of that set. e.g for index 12, that is on set 160, its closest neighbors are set 150 and set 170 each with absolute difference of 10, meaning condition 1 is a tie, then proceeding to condition 2, the max value of "measure" for both set 150 and set 170 is "13.0". To fill the NaN, we will consider which of the two sets has the max value of "width". In this case, neighbor 'set 170' has the max value of "width" which is "0.28". Then fill index 12 NaN with the max value of "measure" corresponding to the row with the max value of "width" from set 170 which is "13.0" and include the "adjusted width" column which is the width 0.28 for the NaN row at index 14. Note Set 140 NaN satisfies condition 1 with set 150 being its closest neighbor and is filled with 13.0 from set 150 and adjusted width of 0.24 from set 150. My final output table will be as below: Set measure width Adjusted Width 0 100 11.0 0.19 0.2 1 100 11.0 0.18 0.2 2 110 11.0 0.20 0.2 3 110 10.0 0.27 0.27 4 130 13.0 0.18 0.24 5 130 13.0 0.17 0.24 6 130 13.0 0.21 0.24 7 140 13.0 0.19 0.24 8 140 13.0 0.16 0.24 9 150 10.0 0.19 0.19 10 150 13.0 0.24 0.24 11 150 8.0 0.30 0.30 12 160 13.0 0.15 0.28 13 170 12.0 0.32 0.32 14 170 13.0 0.28 0.28 I know this is long, I needed to cover all cases, please leave me a comment if you need further clarifications. Thanks in advance.
[ "There you go. Comments will go a long way helping you understand the flow, but more or less translated your written logic into code. Also, added an additional condition_met column to help you see which condition was met for the different cases. This can be optimised for sure, but it will certainly provide a solid starting point.\nThe crux of it, if there is one, is to filter the original dataframe at every step according to your conditions and run checks against it.\nThe print statements in the else blocks are just in case something goes wrong in real life - but they should not be executed.\nimport pandas as pd\nimport numpy as np\n\ndf = pd.DataFrame({\"set\" : [100, 100, 110, 110, 130, 130, 130, 140, 140, 150, 150, 150, 160, 170, 170],\n \"measure\" : [np.nan, np.nan, 11, 10, np.nan, np.nan, np.nan, np.nan, np.nan, 10, 13, 8, np.nan, 12, 13],\n \"width\" : [0.19, 0.18, 0.2, 0.27, 0.18, 0.17, 0.21, 0.19, 0.16, 0.19, 0.24, 0.3, 0.15, 0.32, 0.28]})\n\n# we will create a copy of the output df to write the results\n# otherwise when we write the original df, the conditions are change\n# e.g. when we get to checking set 140, set 130 is resolved, meaning that\n# set 140 will now meet condition 3, and not condition 1 as desired\noutput_df = df.copy()\noutput_df['adjusted_width'] = output_df['width']\n\n# we will just loop over unique sets instead of the dataframe for some efficiency\nfor set_ind in df['set'].unique():\n # here we check that all measures are NaN but that might be conservative\n if all(np.isnan(df[df['set']==set_ind]['measure'])):\n # making sure that we remove nan values from measure when we check for the minimum or when we find the neighbours\n # though that can be extracted in a separate step\n distance_from_closest_neighbour = min(np.abs(df[(df['set']!=set_ind) & (~np.isnan(df['measure']))]['set']-set_ind))\n only_closest_neighbours_df = df[(np.abs(df['set']-set_ind)==distance_from_closest_neighbour) & (~np.isnan(df['measure']))]\n \n # if there is only one closest neighbour, then we have met condition 1\n if only_closest_neighbours_df['set'].unique().shape[0] == 1:\n # executing condition 1: get the max measure from the df, the width and set them on the output df\n max_measure_from_neighbour = max(only_closest_neighbours_df['measure'])\n width_of_max_measure_from_neighbour = only_closest_neighbours_df[only_closest_neighbours_df['measure']==max_measure_from_neighbour]['width'].iloc[0]\n \n output_df.loc[output_df['set'] == set_ind, 'measure'] = max_measure_from_neighbour\n output_df.loc[output_df['set'] == set_ind, 'adjusted_width'] = width_of_max_measure_from_neighbour\n output_df.loc[output_df['set'] == set_ind, 'condition_met'] = 'condition 1'\n \n # condition 1 not met - must be 2 closest neighbours\n elif only_closest_neighbours_df['set'].unique().shape[0] == 2:\n # get the max measure of all closest neighbours and check how many neighbours have that value in measure\n max_measure_from_all_neighbours = max(only_closest_neighbours_df['measure'])\n closest_neighbours_with_max_measure = only_closest_neighbours_df[only_closest_neighbours_df['measure']==max_measure_from_all_neighbours]\n\n # if only 1 of the closest neighbours has the max value of measure, then we have met condition 2\n if closest_neighbours_with_max_measure['set'].unique().shape[0] == 1:\n # executing condition 2: set the max measure and the width on the output df\n width_of_max_measure_from_neighbour_for_cond_2 = closest_neighbours_with_max_measure[closest_neighbours_with_max_measure['measure']==max_measure_from_all_neighbours]['width'].iloc[0]\n output_df.loc[output_df['set'] == set_ind, 'measure'] = max_measure_from_all_neighbours\n output_df.loc[output_df['set'] == set_ind, 'adjusted_width'] = width_of_max_measure_from_neighbour_for_cond_2\n output_df.loc[output_df['set'] == set_ind, 'condition_met'] = 'condition 2'\n \n # if both closest neighbours have the max value of measure, then we have met condition 3\n elif closest_neighbours_with_max_measure['set'].unique().shape[0] == 2:\n # executing condition 3: set the max measure and the width of the output df\n closest_neighbours_with_max_measure_and_max_width = closest_neighbours_with_max_measure.sort_values('width', ascending=False).iloc[0]\n output_df.loc[output_df['set'] == set_ind, 'measure'] = closest_neighbours_with_max_measure_and_max_width['measure']\n output_df.loc[output_df['set'] == set_ind, 'adjusted_width'] = closest_neighbours_with_max_measure_and_max_width['width']\n output_df.loc[output_df['set'] == set_ind, 'condition_met'] = 'condition 3'\n else:\n print(f'Something went wrong - {set_ind}')\n else:\n print(f'Something went wrong 2 - {set_ind}')\n\nthe output then is:\n\n set measure width adjusted_width condition_met\n0 100 11.0 0.19 0.20 condition 1\n1 100 11.0 0.18 0.20 condition 1\n2 110 11.0 0.20 0.20 NaN\n3 110 10.0 0.27 0.27 NaN\n4 130 13.0 0.18 0.24 condition 2\n5 130 13.0 0.17 0.24 condition 2\n6 130 13.0 0.21 0.24 condition 2\n7 140 13.0 0.19 0.24 condition 1\n8 140 13.0 0.16 0.24 condition 1\n9 150 10.0 0.19 0.19 NaN\n10 150 13.0 0.24 0.24 NaN\n11 150 8.0 0.30 0.30 NaN\n12 160 13.0 0.15 0.28 condition 3\n13 170 12.0 0.32 0.32 NaN\n14 170 13.0 0.28 0.28 NaN\n\n" ]
[ 0 ]
[]
[]
[ "numpy", "pandas", "python" ]
stackoverflow_0074576974_numpy_pandas_python.txt
Q: Deleting a line in a txt file that contains a certain string I want to search a text file for the user input and delete the line that contains it.Below is the text file. course work.txt:- Eric/20/SL/merc/3433 John/30/AU/BMW/2324 Tony/24/US/ford/4532 Leo/32/JP/Toyota/1344 If the user input is 'Eric', I want the line containing 'Eric' to be deleted and then the text file to be saved as below Updated course work.txt:- John/30/AU/BMW/2324 Tony/24/US/ford/4532 Leo/32/JP/Toyota/1344 Here is the code I created for that with the help of the very very small knowledge I have and some websites. with open('course work.txt','r') as original: #get user input word = input('Search: ') # read all content of file content = original.read() # check if string present in file if word in content: print('User input exsists') confirmation = input('Press enter to delete') if confirmation == '': import os with open('course work.txt', 'r') as original: with open('temp.txt', "w") as temporary: for line in original: # if user input contain in a line then don't write it if word not in line.strip("\n"): temporary.write(line) os.replace('temp.txt', 'course work.txt') else: print('Driver doesn't exsist') What's happening here is, 1.open the course work.txt and read it 2.get the user input and search course work.txt for that user input 3.if that user input is found in the course work.txt, open a new file called temp.txt write the all lines except the line that contains the user input into temp.txt 5.over write temp.txt on course work.txt When I run the code it gives me a 'PermissionError: [WinError 5] ' error.The temp.txt file get created. It contains all the lines except the line i want to delete which is great, but it doesn't over write on the original file. Is there way to solve this or is there a more PYTHONIC way to do the exact same thing? A: You can write a function to take care of that and also by making good use of shutil to copy temp.txt after writing in order to update source-work.txt . import shutil def modify_original_file(): word = input('Search: ').strip().lower() track = 0 with open("course-work.txt", 'r') as original: with open("temp.txt", "w") as temporary: # read all lines of file content = original.readlines() # check if string present in file word_found = False for line in content: if word in line.lower(): word_found = True break if word_found == True: print('User input exist') confirmation = input('Press Enter to delete: ') if confirmation == '': for line in content: if word not in line.lower(): temporary.write(line) track += 1 else: print("Driver doesn't exist") if track > 0: # Update course-work.txt by copying temp.txt file shutil.copyfile("temp.txt", "course-work.txt") modify_original_file() Terminal: Enter Eric or eric for search. Search: eric Output: updated source-work.txt: John/30/AU/BMW/2324 Tony/24/US/ford/4532 Leo/32/JP/Toyota/1344
Deleting a line in a txt file that contains a certain string
I want to search a text file for the user input and delete the line that contains it.Below is the text file. course work.txt:- Eric/20/SL/merc/3433 John/30/AU/BMW/2324 Tony/24/US/ford/4532 Leo/32/JP/Toyota/1344 If the user input is 'Eric', I want the line containing 'Eric' to be deleted and then the text file to be saved as below Updated course work.txt:- John/30/AU/BMW/2324 Tony/24/US/ford/4532 Leo/32/JP/Toyota/1344 Here is the code I created for that with the help of the very very small knowledge I have and some websites. with open('course work.txt','r') as original: #get user input word = input('Search: ') # read all content of file content = original.read() # check if string present in file if word in content: print('User input exsists') confirmation = input('Press enter to delete') if confirmation == '': import os with open('course work.txt', 'r') as original: with open('temp.txt', "w") as temporary: for line in original: # if user input contain in a line then don't write it if word not in line.strip("\n"): temporary.write(line) os.replace('temp.txt', 'course work.txt') else: print('Driver doesn't exsist') What's happening here is, 1.open the course work.txt and read it 2.get the user input and search course work.txt for that user input 3.if that user input is found in the course work.txt, open a new file called temp.txt write the all lines except the line that contains the user input into temp.txt 5.over write temp.txt on course work.txt When I run the code it gives me a 'PermissionError: [WinError 5] ' error.The temp.txt file get created. It contains all the lines except the line i want to delete which is great, but it doesn't over write on the original file. Is there way to solve this or is there a more PYTHONIC way to do the exact same thing?
[ "You can write a function to take care of that and also by making good use of shutil to copy temp.txt after writing in order to update source-work.txt .\nimport shutil\n\n\ndef modify_original_file():\n word = input('Search: ').strip().lower()\n\n track = 0\n with open(\"course-work.txt\", 'r') as original:\n with open(\"temp.txt\", \"w\") as temporary:\n\n # read all lines of file\n content = original.readlines()\n \n # check if string present in file\n word_found = False\n for line in content:\n if word in line.lower():\n word_found = True\n break\n\n if word_found == True:\n print('User input exist')\n confirmation = input('Press Enter to delete: ')\n if confirmation == '':\n for line in content:\n if word not in line.lower():\n temporary.write(line)\n track += 1\n else:\n print(\"Driver doesn't exist\")\n\n if track > 0:\n # Update course-work.txt by copying temp.txt file\n shutil.copyfile(\"temp.txt\", \"course-work.txt\")\n\nmodify_original_file()\n\nTerminal: Enter Eric or eric for search.\nSearch: eric\nOutput: updated source-work.txt:\nJohn/30/AU/BMW/2324\nTony/24/US/ford/4532\nLeo/32/JP/Toyota/1344\n\n" ]
[ 1 ]
[]
[]
[ "python" ]
stackoverflow_0074573502_python.txt
Q: Multiprocessing With kivy. A process in the process pool was terminated abruptly while the future was running or pending error I have "A process in the process pool was terminated abruptly while the future was running or pending." error and I cant figure it out how to solve it. Pls Help me. I have 2 program "proba.py" is the kivy program and "proba2.py" is the file_read program. first program # proba.py for kivy from kivy.uix.widget import Widget from kivy.app import App import proba2 class Main(Widget): def __init__(self, **kwargs): super(Main, self).__init__(**kwargs) def Button(self): print ("W") proba2.File_read().Start() class MainApp(App): def build(self): return Main() if __name__ == '__main__': from kivy.lang import Builder Builder.load_string("""<Main> Button: on_press: root.Button()""") MainApp().run() second program # proba2.py for File_read import concurrent.futures class File_read(): def __init__(self, **kwargs): super(File_read, self).__init__(**kwargs) def file_read (self, y, x): return y*x*self.Name #Read files and give back to data def for_loop(self, Name): self.Name=Name results=[] results.clear() for_loop_result= [] for_loop_result.clear() with concurrent.futures.ProcessPoolExecutor() as ex: for y in range (30): for x in range (30): results.append (ex.submit(self.file_read,y,x)) for f in concurrent.futures.as_completed(results): for_loop_result.append (f.result()) return for_loop_result def Start(self): for Name_change in range (100): self.for_loop(Name_change) print ("Done") "A process in the process pool was terminated abruptly while the future was running or pending." This error stop my program but not immediately. Sometimes can finish the hole program some time stop in the middle of the loop. I read about if name == 'main' but cant use it when I call the other program with Button. I mean I don't know how to use it. This program read 14k file and make a picture of them. When this program finish, I change the file name and want to start over the program for make more pictures. This program is a short version because the original is too long. But the problem is same. How can I make stable this program? I work on windows 10 with python 3.10 A: I find the solution. I take out the concurrent.futures from for cycle. # proba2.py for File_read import concurrent.futures class File_read(): def __init__(self, **kwargs): super(File_read, self).__init__(**kwargs) def file_read (self, y, x): return y*x*self.Name #Read files and give back to data def for_loop(self, Name): with concurrent.futures.ProcessPoolExecutor() as ex: for Name_change in range (100): self.Name=Name results=[] results.clear() for_loop_result= [] for_loop_result.clear() for y in range (30): for x in range (30): results.append (ex.submit(self.file_read,y,x)) for f in concurrent.futures.as_completed(results): for_loop_result.append (f.result()) return for_loop_result def Start(): Name_change() print ("Done")
Multiprocessing With kivy. A process in the process pool was terminated abruptly while the future was running or pending error
I have "A process in the process pool was terminated abruptly while the future was running or pending." error and I cant figure it out how to solve it. Pls Help me. I have 2 program "proba.py" is the kivy program and "proba2.py" is the file_read program. first program # proba.py for kivy from kivy.uix.widget import Widget from kivy.app import App import proba2 class Main(Widget): def __init__(self, **kwargs): super(Main, self).__init__(**kwargs) def Button(self): print ("W") proba2.File_read().Start() class MainApp(App): def build(self): return Main() if __name__ == '__main__': from kivy.lang import Builder Builder.load_string("""<Main> Button: on_press: root.Button()""") MainApp().run() second program # proba2.py for File_read import concurrent.futures class File_read(): def __init__(self, **kwargs): super(File_read, self).__init__(**kwargs) def file_read (self, y, x): return y*x*self.Name #Read files and give back to data def for_loop(self, Name): self.Name=Name results=[] results.clear() for_loop_result= [] for_loop_result.clear() with concurrent.futures.ProcessPoolExecutor() as ex: for y in range (30): for x in range (30): results.append (ex.submit(self.file_read,y,x)) for f in concurrent.futures.as_completed(results): for_loop_result.append (f.result()) return for_loop_result def Start(self): for Name_change in range (100): self.for_loop(Name_change) print ("Done") "A process in the process pool was terminated abruptly while the future was running or pending." This error stop my program but not immediately. Sometimes can finish the hole program some time stop in the middle of the loop. I read about if name == 'main' but cant use it when I call the other program with Button. I mean I don't know how to use it. This program read 14k file and make a picture of them. When this program finish, I change the file name and want to start over the program for make more pictures. This program is a short version because the original is too long. But the problem is same. How can I make stable this program? I work on windows 10 with python 3.10
[ "I find the solution.\nI take out the concurrent.futures from for cycle.\n# proba2.py for File_read\nimport concurrent.futures\nclass File_read():\ndef __init__(self, **kwargs):\n super(File_read, self).__init__(**kwargs)\n\ndef file_read (self, y, x):\n return y*x*self.Name #Read files and give back to data\n\ndef for_loop(self, Name):\n with concurrent.futures.ProcessPoolExecutor() as ex:\n for Name_change in range (100):\n self.Name=Name\n results=[]\n results.clear()\n for_loop_result= []\n for_loop_result.clear()\n \n for y in range (30):\n for x in range (30):\n results.append (ex.submit(self.file_read,y,x))\n\n for f in concurrent.futures.as_completed(results):\n for_loop_result.append (f.result())\n return for_loop_result\n\ndef Start():\n Name_change()\n print (\"Done\")\n\n" ]
[ 0 ]
[]
[]
[ "concurrent.futures", "kivy", "multiprocessing", "python" ]
stackoverflow_0074356551_concurrent.futures_kivy_multiprocessing_python.txt
Q: Unresolved reference "cv2" inside cv2 (cv2.cv2) I've looked around and people seem to have similar problems but none described my case exactly, and solutions that worked for them didn't seem to work for me (or there was no answer to the question at all). Using pycharm, after having installed opencv-python and opencv-contrib-python I noticed that import cv2 works, but when I tried accessing cv2.imread() pycharm complained about not being able to find it. So I went to the cv2 init file, which looks like this: import importlib from .cv2 import * from .data import * # wildcard import above does not import "private" variables like __version__ # this makes them available globals().update(importlib.import_module('cv2.cv2').__dict__) Pycharm detects an unresolved reference on the from .cv2 import * line and I imagine the same problem happens on the last line - I tried doing the following in a python console: import cv2 print(__version__) But I got a NameError, which seems to confirm my suspicion. As I wrote, I have tried installing opencv-contrib-python but that didn't seem to do anything and frankly I'm already out of ideas. Notes: - I'm on Windows 10 x64. - I'm using Python 3.6 x64. - I have a virtual environment set up on my Pycharm project. A: I'm no expert but the following line worked for me: import cv2.cv2 as cv2 Everything seems to work afterwards. Autocompletion is also back A: Have you installed opencv via terminal? For example, like this. $ pip install opencv-python $ pip install opencv-contrib-python I also experienced the same problem. If you use pycharm, you should install opencv via pycharm. File -> Settings... -> Project interpreter -> + A: I got this same issue. You need to try a couple of things. You need to simply import cv2 instead of cv2.cv2. Simply write "import cv2" If you have installed any other library such as cv before, then uninstall it first. The library that we need to install is opencv-python You need to install it via IDE not with the terminal. Steps are as follows: File -> Settings -> (Click on your project) -> Project Interpreter -> + -> (Type opencv-python) -> Download and install it -> It should work now. A: From Python Interpreter download the version of opencv-python 4.5.4.60 and opencv-contrib-python 4.5.5.64.
Unresolved reference "cv2" inside cv2 (cv2.cv2)
I've looked around and people seem to have similar problems but none described my case exactly, and solutions that worked for them didn't seem to work for me (or there was no answer to the question at all). Using pycharm, after having installed opencv-python and opencv-contrib-python I noticed that import cv2 works, but when I tried accessing cv2.imread() pycharm complained about not being able to find it. So I went to the cv2 init file, which looks like this: import importlib from .cv2 import * from .data import * # wildcard import above does not import "private" variables like __version__ # this makes them available globals().update(importlib.import_module('cv2.cv2').__dict__) Pycharm detects an unresolved reference on the from .cv2 import * line and I imagine the same problem happens on the last line - I tried doing the following in a python console: import cv2 print(__version__) But I got a NameError, which seems to confirm my suspicion. As I wrote, I have tried installing opencv-contrib-python but that didn't seem to do anything and frankly I'm already out of ideas. Notes: - I'm on Windows 10 x64. - I'm using Python 3.6 x64. - I have a virtual environment set up on my Pycharm project.
[ "I'm no expert but the following line worked for me:\nimport cv2.cv2 as cv2\n\nEverything seems to work afterwards. Autocompletion is also back\n", "Have you installed opencv via terminal?\nFor example, like this.\n$ pip install opencv-python\n$ pip install opencv-contrib-python\n\nI also experienced the same problem. \nIf you use pycharm, you should install opencv via pycharm.\nFile -> Settings... -> Project interpreter -> + \n", "I got this same issue. You need to try a couple of things.\n\nYou need to simply import cv2 instead of cv2.cv2. Simply write \"import cv2\"\nIf you have installed any other library such as cv before, then uninstall it first.\nThe library that we need to install is opencv-python\nYou need to install it via IDE not with the terminal. Steps are as follows:\nFile -> Settings -> (Click on your project) -> Project Interpreter -> + -> (Type opencv-python) -> Download and install it -> It should work now.\n\n", "From Python Interpreter download the version of opencv-python 4.5.4.60 and opencv-contrib-python 4.5.5.64.\n" ]
[ 3, 0, 0, 0 ]
[]
[]
[ "cv2", "opencv", "package", "python", "python_import" ]
stackoverflow_0051233491_cv2_opencv_package_python_python_import.txt
Q: I can't figure out why the while loop in this MPI code doesn't break I'm doing a parallelization exercise using mpi4py where 2 dice are thrown a defined number of times (divided by the processes, i.e, npp) and the dots are counted. The results are stored in a dictionary, the mean deviation is calculated and until the condition of mean_dev being less than 0.001 the number of throws is doubled. All of this works as expected, the problem is that the code doesn't quit. The condition is met, there's no more outputs, but the code hangs. from ctypes.wintypes import SIZE from dice import * #This is just a class that creates the dictionaries from random import randint import matplotlib.pyplot as plt import numpy as np from mpi4py import MPI from math import sqrt def simulation(f_events, f_sides, f_n_dice): f_X = dice(sides, n_dice).myDice() #Nested dictionary composed of dices (last dict stores the sum) for j in range(f_events): #for loop to handle all the dice throwings aka events n = [] #List to store index respective to number on each dice for i in range(1, f_n_dice+1): #for cycle for each dice k = randint(1, f_sides) #Random number n.append(k) f_X[i][k] += 1 #The index (k) related to each throw is increased for the dice (i) sum_throw = sum(n) #Sum of the last throw f_X[f_n_dice+1][sum_throw] += 1 #Sum dictionary "increases" the index respective to the sum of the last throw return f_X npp = int(4)//4 #Number of events divided by the number of processes sides = 6 #Number of sides per dice n_dice = 2 #Number of dices comm = MPI.COMM_WORLD #Communicator to handle point-to-point communication rank = comm.Get_rank() #Hierarchy of processes size = comm.Get_size() #Number of processes #-------------------- Parallelization portion of the code --------------------# seq = (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) AUX = dict.fromkeys(seq, 0) mean_dev = 1 while True: msg = comm.bcast(npp, root = 0) print("---> msg: ", msg, " for rank ", rank) print("The mean dev for %d" %rank + " is: ", mean_dev) D = simulation(npp, sides, n_dice) Dp = comm.gather(D, root = 0) print("This is Dp: ", Dp) summ = 0 prob = [1/36, 2/36, 3/36, 4/36, 5/36, 6/36, 5/36, 4/36, 3/36, 2/36, 1/36] if rank==0: for p in range(0, size): for n in range(dice().min, dice().max+1): #Range from minimum sum possible to the maximum sum possible depending on the number of dices used AUX[n] += Dp[p][n_dice+1][n] #Adds the new data to the final sum dictionary #of the previously initiated nested dictionary print(Dp[p][n_dice+1]) print("The final dictionary is: ", AUX, sum(AUX[j] for j in AUX)) for i in range(dice().min, dice().max+1): exp = (prob[i-2])*(sum(AUX[j] for j in AUX)) x = (AUX[i]-exp)/exp summ = summ + pow(x, 2) mean_dev = (1/11)*sqrt(summ) print("The deviation for {} is {}.".format(sum(AUX[j] for j in AUX), mean_dev)) if mean_dev > 0.001: npp = 2*npp # new_msg = comm.bcast(npp, root = 0) # print("---> new_msg: ", new_msg, " for rank ", rank) else: break I'm stumped on this one. Thanks in advance for any input! The new code with the solution proposed by @victor-eijkhout: from ctypes.wintypes import SIZE from dice import * from random import randint import matplotlib.pyplot as plt import numpy as np from mpi4py import MPI from math import sqrt def simulation(f_events, f_sides, f_n_dice): f_X = dice(sides, n_dice).myDice() #Nested dictionary composed of dices (last dict stores the sum) for j in range(f_events): #for loop to handle all the dice throwings aka events n = [] #List to store index respective to number on each dice for i in range(1, f_n_dice+1): #for cycle for each dice k = randint(1, f_sides) #Random number n.append(k) f_X[i][k] += 1 #The index (k) related to each throw is increased for the dice (i) sum_throw = sum(n) #Sum of the last throw f_X[f_n_dice+1][sum_throw] += 1 #Sum dictionary "increases" the index respective to the sum of the last throw return f_X npp = int(4)//4 #Number of events divided by the number of processes sides = 6 #Number of sides per dice n_dice = 2 #Number of dices comm = MPI.COMM_WORLD #Communicator to handle point-to-point communication rank = comm.Get_rank() #Hierarchy of processes size = comm.Get_size() #Number of processes #-------------------- Parallelization portion of the code --------------------# seq = (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) AUX = dict.fromkeys(seq, 0) mean_dev = 1 while True: msg = comm.bcast(npp, root = 0) #print("---> msg: ", msg, " for rank ", rank) D = simulation(npp, sides, n_dice) Dp = comm.gather(D, root = 0) #if Dp != None: print("This is Dp: ", Dp) #print("The mean dev for %d" %rank + " is: ", mean_dev) if rank==0: summ = 0 prob = [1/36, 2/36, 3/36, 4/36, 5/36, 6/36, 5/36, 4/36, 3/36, 2/36, 1/36] for p in range(0, size): for n in range(dice().min, dice().max+1): #Range from minimum sum possible to the maximum sum possible depending on the number of dices used AUX[n] += Dp[p][n_dice+1][n] #Adds the new data to the final sum dictionary #of the previously initiated nested dictionary print(Dp[p][n_dice+1]) print("The final dictionary is: ", AUX, sum(AUX[j] for j in AUX)) for i in range(dice().min, dice().max+1): exp = (prob[i-2])*(sum(AUX[j] for j in AUX)) x = (AUX[i]-exp)/exp summ = summ + pow(x, 2) mean_dev = (1/11)*sqrt(summ) print("The deviation for {} is {}.".format(sum(AUX[j] for j in AUX), mean_dev)) #new_mean_dev = comm.gather(mean_dev, root = 0) new_mean_dev = comm.bcast(mean_dev, root = 0) print("---> msg2: ", new_mean_dev, " for rank ", rank) if new_mean_dev < 0.001: break # new_msg = comm.bcast(npp, root = 0) # print("---> new_msg: ", new_msg, " for rank ", rank) else: npp = 2*npp print("The new npp is: ", npp) A: You are computing the mean deviation only on process zero, so that process will exit. However, the other processes do not get the value and so they never quit. You should broadcast the value after you compute it.
I can't figure out why the while loop in this MPI code doesn't break
I'm doing a parallelization exercise using mpi4py where 2 dice are thrown a defined number of times (divided by the processes, i.e, npp) and the dots are counted. The results are stored in a dictionary, the mean deviation is calculated and until the condition of mean_dev being less than 0.001 the number of throws is doubled. All of this works as expected, the problem is that the code doesn't quit. The condition is met, there's no more outputs, but the code hangs. from ctypes.wintypes import SIZE from dice import * #This is just a class that creates the dictionaries from random import randint import matplotlib.pyplot as plt import numpy as np from mpi4py import MPI from math import sqrt def simulation(f_events, f_sides, f_n_dice): f_X = dice(sides, n_dice).myDice() #Nested dictionary composed of dices (last dict stores the sum) for j in range(f_events): #for loop to handle all the dice throwings aka events n = [] #List to store index respective to number on each dice for i in range(1, f_n_dice+1): #for cycle for each dice k = randint(1, f_sides) #Random number n.append(k) f_X[i][k] += 1 #The index (k) related to each throw is increased for the dice (i) sum_throw = sum(n) #Sum of the last throw f_X[f_n_dice+1][sum_throw] += 1 #Sum dictionary "increases" the index respective to the sum of the last throw return f_X npp = int(4)//4 #Number of events divided by the number of processes sides = 6 #Number of sides per dice n_dice = 2 #Number of dices comm = MPI.COMM_WORLD #Communicator to handle point-to-point communication rank = comm.Get_rank() #Hierarchy of processes size = comm.Get_size() #Number of processes #-------------------- Parallelization portion of the code --------------------# seq = (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) AUX = dict.fromkeys(seq, 0) mean_dev = 1 while True: msg = comm.bcast(npp, root = 0) print("---> msg: ", msg, " for rank ", rank) print("The mean dev for %d" %rank + " is: ", mean_dev) D = simulation(npp, sides, n_dice) Dp = comm.gather(D, root = 0) print("This is Dp: ", Dp) summ = 0 prob = [1/36, 2/36, 3/36, 4/36, 5/36, 6/36, 5/36, 4/36, 3/36, 2/36, 1/36] if rank==0: for p in range(0, size): for n in range(dice().min, dice().max+1): #Range from minimum sum possible to the maximum sum possible depending on the number of dices used AUX[n] += Dp[p][n_dice+1][n] #Adds the new data to the final sum dictionary #of the previously initiated nested dictionary print(Dp[p][n_dice+1]) print("The final dictionary is: ", AUX, sum(AUX[j] for j in AUX)) for i in range(dice().min, dice().max+1): exp = (prob[i-2])*(sum(AUX[j] for j in AUX)) x = (AUX[i]-exp)/exp summ = summ + pow(x, 2) mean_dev = (1/11)*sqrt(summ) print("The deviation for {} is {}.".format(sum(AUX[j] for j in AUX), mean_dev)) if mean_dev > 0.001: npp = 2*npp # new_msg = comm.bcast(npp, root = 0) # print("---> new_msg: ", new_msg, " for rank ", rank) else: break I'm stumped on this one. Thanks in advance for any input! The new code with the solution proposed by @victor-eijkhout: from ctypes.wintypes import SIZE from dice import * from random import randint import matplotlib.pyplot as plt import numpy as np from mpi4py import MPI from math import sqrt def simulation(f_events, f_sides, f_n_dice): f_X = dice(sides, n_dice).myDice() #Nested dictionary composed of dices (last dict stores the sum) for j in range(f_events): #for loop to handle all the dice throwings aka events n = [] #List to store index respective to number on each dice for i in range(1, f_n_dice+1): #for cycle for each dice k = randint(1, f_sides) #Random number n.append(k) f_X[i][k] += 1 #The index (k) related to each throw is increased for the dice (i) sum_throw = sum(n) #Sum of the last throw f_X[f_n_dice+1][sum_throw] += 1 #Sum dictionary "increases" the index respective to the sum of the last throw return f_X npp = int(4)//4 #Number of events divided by the number of processes sides = 6 #Number of sides per dice n_dice = 2 #Number of dices comm = MPI.COMM_WORLD #Communicator to handle point-to-point communication rank = comm.Get_rank() #Hierarchy of processes size = comm.Get_size() #Number of processes #-------------------- Parallelization portion of the code --------------------# seq = (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) AUX = dict.fromkeys(seq, 0) mean_dev = 1 while True: msg = comm.bcast(npp, root = 0) #print("---> msg: ", msg, " for rank ", rank) D = simulation(npp, sides, n_dice) Dp = comm.gather(D, root = 0) #if Dp != None: print("This is Dp: ", Dp) #print("The mean dev for %d" %rank + " is: ", mean_dev) if rank==0: summ = 0 prob = [1/36, 2/36, 3/36, 4/36, 5/36, 6/36, 5/36, 4/36, 3/36, 2/36, 1/36] for p in range(0, size): for n in range(dice().min, dice().max+1): #Range from minimum sum possible to the maximum sum possible depending on the number of dices used AUX[n] += Dp[p][n_dice+1][n] #Adds the new data to the final sum dictionary #of the previously initiated nested dictionary print(Dp[p][n_dice+1]) print("The final dictionary is: ", AUX, sum(AUX[j] for j in AUX)) for i in range(dice().min, dice().max+1): exp = (prob[i-2])*(sum(AUX[j] for j in AUX)) x = (AUX[i]-exp)/exp summ = summ + pow(x, 2) mean_dev = (1/11)*sqrt(summ) print("The deviation for {} is {}.".format(sum(AUX[j] for j in AUX), mean_dev)) #new_mean_dev = comm.gather(mean_dev, root = 0) new_mean_dev = comm.bcast(mean_dev, root = 0) print("---> msg2: ", new_mean_dev, " for rank ", rank) if new_mean_dev < 0.001: break # new_msg = comm.bcast(npp, root = 0) # print("---> new_msg: ", new_msg, " for rank ", rank) else: npp = 2*npp print("The new npp is: ", npp)
[ "You are computing the mean deviation only on process zero, so that process will exit. However, the other processes do not get the value and so they never quit. You should broadcast the value after you compute it.\n" ]
[ 1 ]
[ "You are breaking out of your if statement. Just replace while True: with while mean_dev > 0.001: and you should be good. You can also just do an assignment at the end rather than wrapping it in the if.\nIf that doesn’t work it simply means mean_dev is always greater than 0.001. You calculate mean_dev as (1/11)*sqrt(sum …). Not under the whole algorithm, if the minimum sum of 2 dice is 2, then mean_dev will not drop below 0.14 or so. Try putting in a print statement and print out mean_dev each time through the loop and see if it’s working as expected. Should you be dividing mean_dev by npp each time or something like that?\nAs a general rule, these kinds of problems where one is iterating to find a closer approximation generally terminate when the change in the estimate becomes very small. Should you be stopping when the change in mean_dev is less than 0.001? You would need to do something like abs(last_mean_dev-mean_dev)<0.001 .\n" ]
[ -1 ]
[ "mpi", "mpi4py", "python" ]
stackoverflow_0074576200_mpi_mpi4py_python.txt
Q: How to encrypt and decrypt .csv file to .csv.pgp using python script I am trying to encrypt a file using pgpy. I am able to encrypt the content of files but unable to save it. I am trying to get output as .csv.pgp Getting this error: encrypted_file.write(encrypted_f_t_e) TypeError: a bytes-like object is required, not 'PGPMessage' import pgpy from pgpy import PGPKey, PGPMessage PUBLIC_KEY_FILE = 'myPublicKey.asc' pub_key, _ = pgpy.PGPKey.from_file(str(PUBLIC_KEY_FILE)) FILE_TO_ENCRYPT = 'data.csv' f_t_e = pgpy.PGPMessage.new(str(FILE_TO_ENCRYPT),file=True) print(f_t_e.is_encrypted) encrypted_f_t_e = pub_key.encrypt(f_t_e) print(encrypted_f_t_e) with open('data.csv.pgp', 'wb') as encrypted_file: encrypted_file.write(encrypted_f_t_e) A: You need to either write bytes or use w file mode (not wb): Bytes option with open('data.csv.pgp', 'wb') as encrypted_file: encrypted_file.write(bytes(encrypted_f_t_e)) Text option with open('data.csv.pgp', 'w') as encrypted_file: encrypted_file.write(str(encrypted_f_t_e))
How to encrypt and decrypt .csv file to .csv.pgp using python script
I am trying to encrypt a file using pgpy. I am able to encrypt the content of files but unable to save it. I am trying to get output as .csv.pgp Getting this error: encrypted_file.write(encrypted_f_t_e) TypeError: a bytes-like object is required, not 'PGPMessage' import pgpy from pgpy import PGPKey, PGPMessage PUBLIC_KEY_FILE = 'myPublicKey.asc' pub_key, _ = pgpy.PGPKey.from_file(str(PUBLIC_KEY_FILE)) FILE_TO_ENCRYPT = 'data.csv' f_t_e = pgpy.PGPMessage.new(str(FILE_TO_ENCRYPT),file=True) print(f_t_e.is_encrypted) encrypted_f_t_e = pub_key.encrypt(f_t_e) print(encrypted_f_t_e) with open('data.csv.pgp', 'wb') as encrypted_file: encrypted_file.write(encrypted_f_t_e)
[ "You need to either write bytes or use w file mode (not wb):\n\nBytes option\n\nwith open('data.csv.pgp', 'wb') as encrypted_file:\n encrypted_file.write(bytes(encrypted_f_t_e))\n\n\nText option\n\nwith open('data.csv.pgp', 'w') as encrypted_file:\n encrypted_file.write(str(encrypted_f_t_e))\n\n" ]
[ 0 ]
[]
[]
[ "cryptography", "encryption", "openpgp", "pgp", "python" ]
stackoverflow_0073970565_cryptography_encryption_openpgp_pgp_python.txt
Q: Adding different colors for markers in plotly I have a graph that looks like this: I want to sort the color combinations for the dots on this, to achieve something like one color for all the versions that start with 17, different one for 18 and lastly the 20. I don't know if I can do this in plotly since it is very specific and found no information on this. Is it possible to also add different colors for the sub versions, like for 17 we have different categories like 17.2.3, 17.2.2 and so on. here is my data: Days Difference commitDate Year-Month 18538 1291 2021-01-25 11:15:48 2020-01 18539 1135 2020-11-30 05:11:41 2020-11 18540 1100 2020-08-17 07:22:54 2020-08 18541 900 2020-08-17 07:12:05 2020-01 18542 340 2020-01-09 06:21:03 2020-01 18543 203 2019-11-20 06:03:28 2019-11 18544 120 2019-11-15 02:50:28 2019-11 This is the code I have written so far: data1= final_api.query("info_title=='Avi CertificateManagementProfile Object API'") data1['commitDate'] = pd.to_datetime(final_api['commitDate']) import plotly.graph_objects as go fig = go.Figure() fig.add_trace(go.Scatter(mode='lines', x=data1["commitDate"], y=data1["Days_difference"], line_color='black', line_width=1, line_shape='vh', showlegend=False ) ) fig.add_trace(go.Scatter(mode='markers', x=data1["commitDate"], y=data1["Days_difference"], marker=dict(color=data1['day'], colorscale='plasma', size=10), showlegend=False ) ) for _,row in data1.iterrows(): fig.add_annotation( go.layout.Annotation( x=row["commitDate"], y=row["Days_difference"], text=row['info_version'], showarrow=False, align='center', yanchor='bottom', yshift=10, textangle=-90) ) fig.update_layout(template='plotly_white',title_text=' Version Change in Avi CertificateManagementProfile Object API over its Age',title_x=0.5, xaxis_title='Year-Month', yaxis_title='Age of the API (in days)', xaxis_tickformat = '%d %B (%a)<br>%Y', height=700, width=1300) fig.update_xaxes(showline=True, linewidth=1, linecolor='black', mirror=True) fig.update_yaxes(showline=True, linewidth=1, linecolor='black', mirror=True) fig.show() Any help or guidance will be appreciated. A: Currently you are assigning marker color based on the 'day' column in your argument marker=dict(color=data1['day'], colorscale='plasma', size=10), but it sounds like you want to assign the color based on the major version. You can extract the major version from the info_version column, and store it in a new column called "major_version": data1['major_version'] = data1['info_version'].str.split('.').str.get(0) Then I would suggest using px.scatter which makes it easier to pass a categorical column as a color (see the documentation here). Then you would restructure your code to plot the markers first, and then the lines connecting the markers: import plotly.express as px fig = px.scatter(data1, x='commitDate', y='Days_difference', color='major_version') fig.add_trace(go.Scatter(mode='lines', x=data1["commitDate"], y=data1["Days_difference"], line_color='black', line_width=1, line_shape='vh', showlegend=False ) ) fig.update_layout(showlegend=False) And keep the rest of your code the same.
Adding different colors for markers in plotly
I have a graph that looks like this: I want to sort the color combinations for the dots on this, to achieve something like one color for all the versions that start with 17, different one for 18 and lastly the 20. I don't know if I can do this in plotly since it is very specific and found no information on this. Is it possible to also add different colors for the sub versions, like for 17 we have different categories like 17.2.3, 17.2.2 and so on. here is my data: Days Difference commitDate Year-Month 18538 1291 2021-01-25 11:15:48 2020-01 18539 1135 2020-11-30 05:11:41 2020-11 18540 1100 2020-08-17 07:22:54 2020-08 18541 900 2020-08-17 07:12:05 2020-01 18542 340 2020-01-09 06:21:03 2020-01 18543 203 2019-11-20 06:03:28 2019-11 18544 120 2019-11-15 02:50:28 2019-11 This is the code I have written so far: data1= final_api.query("info_title=='Avi CertificateManagementProfile Object API'") data1['commitDate'] = pd.to_datetime(final_api['commitDate']) import plotly.graph_objects as go fig = go.Figure() fig.add_trace(go.Scatter(mode='lines', x=data1["commitDate"], y=data1["Days_difference"], line_color='black', line_width=1, line_shape='vh', showlegend=False ) ) fig.add_trace(go.Scatter(mode='markers', x=data1["commitDate"], y=data1["Days_difference"], marker=dict(color=data1['day'], colorscale='plasma', size=10), showlegend=False ) ) for _,row in data1.iterrows(): fig.add_annotation( go.layout.Annotation( x=row["commitDate"], y=row["Days_difference"], text=row['info_version'], showarrow=False, align='center', yanchor='bottom', yshift=10, textangle=-90) ) fig.update_layout(template='plotly_white',title_text=' Version Change in Avi CertificateManagementProfile Object API over its Age',title_x=0.5, xaxis_title='Year-Month', yaxis_title='Age of the API (in days)', xaxis_tickformat = '%d %B (%a)<br>%Y', height=700, width=1300) fig.update_xaxes(showline=True, linewidth=1, linecolor='black', mirror=True) fig.update_yaxes(showline=True, linewidth=1, linecolor='black', mirror=True) fig.show() Any help or guidance will be appreciated.
[ "Currently you are assigning marker color based on the 'day' column in your argument marker=dict(color=data1['day'], colorscale='plasma', size=10), but it sounds like you want to assign the color based on the major version.\nYou can extract the major version from the info_version column, and store it in a new column called \"major_version\":\ndata1['major_version'] = data1['info_version'].str.split('.').str.get(0)\n\nThen I would suggest using px.scatter which makes it easier to pass a categorical column as a color (see the documentation here).\nThen you would restructure your code to plot the markers first, and then the lines connecting the markers:\nimport plotly.express as px\n\nfig = px.scatter(data1, x='commitDate', y='Days_difference', color='major_version')\n\nfig.add_trace(go.Scatter(mode='lines',\n x=data1[\"commitDate\"],\n y=data1[\"Days_difference\"],\n line_color='black',\n line_width=1,\n line_shape='vh',\n showlegend=False\n )\n )\n\nfig.update_layout(showlegend=False)\n\nAnd keep the rest of your code the same.\n" ]
[ 1 ]
[]
[]
[ "pandas", "plotly", "python" ]
stackoverflow_0074575385_pandas_plotly_python.txt
Q: Pytest cross suite websocket session I'm designing an automated test suite to simulate a client which logs in to the backend via api rest and then opens up a websocket communication. I have to test different features over REST and Websocket. Currently I'm performing each websocket test like this: -The client logs in -The ws communication starts -It sends a ws message and awaits a response -It checks the respose schema and asserts the result -The ws connection is closed -The test end My problem: When I run multiple websocket tests as I described above, they open and close the websocket communication several times and my test client ends up being "Blacklisted" because of the irregular behaveour, and from there is not able to reconnect via ws for a considerable time period. My question: How do I open a websocket connection and keep it open and active across all my tests? I'm using pytest with "requests" module for api calls and "websockets" module for ws communication. I've tried to split the python process into two sub-processes with "multiprosessig" module but I'm quite lost here because I'm not able yet to commuicate the pytest process with the websocket process to send the messages and retrive the responses My websocket connection logic is the following code: async def websocket_connection(device: Device, cmd_list: list[WebsocketMsg] = None): init_cmd = WsInitCommand(device) cmd_list.insert(0, init_cmd) async def wait_for_correct_response(ws_connection, msj_id: str) -> dict: response_received = False ws_response: dict = {} while not response_received: ws_response = json.loads(await ws_connection.recv()) if 'id' in ws_response and ws_response['id'] == msj_id: response_received = True return ws_response async with websockets.connect(init_cmd.url, subprotocols=init_cmd.sub_protocols) as websocket: for cmd in cmd_list: await websocket.send(str(cmd.message)) msg_response: dict = await wait_for_correct_response(websocket, cmd.msg_id) return True A: Use pytest fixture on a session scope to share singleton websocket connection across tests https://docs.pytest.org/en/stable/reference/fixtures.html#higher-scoped-fixtures-are-executed-first. Omit multiprocesses and splitting into two processes as it would bring additional complexity and will be tricky to implement. UPD: answering the comment with this example import pytest class WS: pass @pytest.fixture(scope="session") def websocket_conn(): ws = WS() # establish connection print("This is the same object for each test", id(ws)) yield ws # do some cleanup, close connection del ws def test_something(websocket_conn): print("Receiving data with", websocket_conn) websocket_conn.state = "modified" print("Still the same", id(websocket_conn)) def test_something_else(websocket_conn): print("Sending data with", websocket_conn) print("State preserved:", websocket_conn.state) print("Still the same", id(websocket_conn)) tests/test_ws.py::test_something This is the same object for each test 4498209520 Receiving data with <tests.test_ws.WS object at 0x10c1d3af0> Still the same 4498209520 PASSED tests/test_ws.py::test_something_else Sending data with <tests.test_ws.WS object at 0x10c1d3af0> State preserved: modified Still the same 4498209520 PASSED
Pytest cross suite websocket session
I'm designing an automated test suite to simulate a client which logs in to the backend via api rest and then opens up a websocket communication. I have to test different features over REST and Websocket. Currently I'm performing each websocket test like this: -The client logs in -The ws communication starts -It sends a ws message and awaits a response -It checks the respose schema and asserts the result -The ws connection is closed -The test end My problem: When I run multiple websocket tests as I described above, they open and close the websocket communication several times and my test client ends up being "Blacklisted" because of the irregular behaveour, and from there is not able to reconnect via ws for a considerable time period. My question: How do I open a websocket connection and keep it open and active across all my tests? I'm using pytest with "requests" module for api calls and "websockets" module for ws communication. I've tried to split the python process into two sub-processes with "multiprosessig" module but I'm quite lost here because I'm not able yet to commuicate the pytest process with the websocket process to send the messages and retrive the responses My websocket connection logic is the following code: async def websocket_connection(device: Device, cmd_list: list[WebsocketMsg] = None): init_cmd = WsInitCommand(device) cmd_list.insert(0, init_cmd) async def wait_for_correct_response(ws_connection, msj_id: str) -> dict: response_received = False ws_response: dict = {} while not response_received: ws_response = json.loads(await ws_connection.recv()) if 'id' in ws_response and ws_response['id'] == msj_id: response_received = True return ws_response async with websockets.connect(init_cmd.url, subprotocols=init_cmd.sub_protocols) as websocket: for cmd in cmd_list: await websocket.send(str(cmd.message)) msg_response: dict = await wait_for_correct_response(websocket, cmd.msg_id) return True
[ "Use pytest fixture on a session scope to share singleton websocket connection across tests https://docs.pytest.org/en/stable/reference/fixtures.html#higher-scoped-fixtures-are-executed-first.\nOmit multiprocesses and splitting into two processes as it would bring additional complexity and will be tricky to implement.\nUPD: answering the comment with this example\nimport pytest\n\nclass WS:\n pass\n\n@pytest.fixture(scope=\"session\")\ndef websocket_conn():\n ws = WS()\n # establish connection\n print(\"This is the same object for each test\", id(ws))\n yield ws\n # do some cleanup, close connection\n del ws\n\ndef test_something(websocket_conn):\n print(\"Receiving data with\", websocket_conn)\n websocket_conn.state = \"modified\"\n print(\"Still the same\", id(websocket_conn))\n\ndef test_something_else(websocket_conn):\n print(\"Sending data with\", websocket_conn)\n print(\"State preserved:\", websocket_conn.state)\n print(\"Still the same\", id(websocket_conn))\n\ntests/test_ws.py::test_something This is the same object for each test 4498209520\nReceiving data with <tests.test_ws.WS object at 0x10c1d3af0>\nStill the same 4498209520\nPASSED\ntests/test_ws.py::test_something_else Sending data with <tests.test_ws.WS object at 0x10c1d3af0>\nState preserved: modified\nStill the same 4498209520\nPASSED\n\n" ]
[ 1 ]
[]
[]
[ "automation", "pytest", "python", "testing", "websocket" ]
stackoverflow_0074577526_automation_pytest_python_testing_websocket.txt
Q: Returning reverse of a string using stack data structure My program does return the reverse but in stack form. I want to convert it to a string type def func(str_input): s1 = deque(str_input) s2 = deque() for i in range(len(str_input)): s2.append(s1[-1]) s1.pop() return s2 func("hello") #returns deque(['o', 'l', 'l', 'e', 'h']) Also, would someone be able to explain why this solution "https://github.com/codebasics/data-structures-algorithms-python/blob/master/data_structures/5_Stack/Exercise/reverse_string.py" is ideal and optimal for this question? There seems to be a lot more code needed, but it is the popular solution,not sure why. using str() also just keeps the "deque([list])" as a string. I just want the [list] part out so I can make it a string. EDIT________________________ Ok I made some changes and it returns as a string, however my second question about the linked solution still stands. Thank you in advance! Here is my new solution def somefuc(s): stack = deque(s) reverse = '' while len(stack) != 0: reverse +=stack.pop() return reverse somefuc("hello") A: you don't want it to be a list? if so just use variable[::-1] it will return the given str but reversed
Returning reverse of a string using stack data structure
My program does return the reverse but in stack form. I want to convert it to a string type def func(str_input): s1 = deque(str_input) s2 = deque() for i in range(len(str_input)): s2.append(s1[-1]) s1.pop() return s2 func("hello") #returns deque(['o', 'l', 'l', 'e', 'h']) Also, would someone be able to explain why this solution "https://github.com/codebasics/data-structures-algorithms-python/blob/master/data_structures/5_Stack/Exercise/reverse_string.py" is ideal and optimal for this question? There seems to be a lot more code needed, but it is the popular solution,not sure why. using str() also just keeps the "deque([list])" as a string. I just want the [list] part out so I can make it a string. EDIT________________________ Ok I made some changes and it returns as a string, however my second question about the linked solution still stands. Thank you in advance! Here is my new solution def somefuc(s): stack = deque(s) reverse = '' while len(stack) != 0: reverse +=stack.pop() return reverse somefuc("hello")
[ "you don't want it to be a list? if so just use variable[::-1] it will return the given str but reversed\n" ]
[ 0 ]
[]
[]
[ "data_structures", "python", "stack" ]
stackoverflow_0074577709_data_structures_python_stack.txt
Q: list operation using function in python Ask the user for which team member to assign task and assign it, display the output as team member name and task assigned Is there any solution using without For and While loop?. Please let me know if there is any solution. Member = ["Gahininath", "Vighnesh", "Bhargav", "Amit", "Rahul"] def myfunction(Member): User_Input_Member = input('enter member name') if User_Input_Member in Member: for i in Member: if i==User_Input_Member: y = input('Enter Task what you want tparticularpmember memeber') res = i + ' ' + y x = ':' print(res + x + 'Task assign successfully to the member') count=input('How many member you wnat to select to assign task today') y=int(count) i=1 final_list=[] while y >= i: myfunction(Member) i=i+1 print('Here is your final memeber list with their task ') print(final_list) Output: ['Amit will solve 1st problem', 'Rahul will solve 2nd problem','Rohit will solve 3rd problem'] A: You could achieve the same basic outcome without a for or while loop using recursion - example below. However so long as you want the process to repeat a certain number of times, you're looping - the only difference between my approach below and a for or while loop is the manner in which the same process is repeated. from collections import defaultdict # Define a list of team members members = ["Gahininath", "Vighnesh", "Bhargav", "Amit", "Rahul"] # Create a function that accepts a list of team members, and optionally # a pre-populated "final_list", and then asks the user what tasks to assign, # and whether they are finished or not. def myfunction(members, final_list = None): if final_list is None: final_list = defaultdict(lambda: []) member = input('Pick a team member: ') if member not in members: print ('That is not a valid team member.') else: task = input('What would you like them to do? ') if task: final_list[member].append(task) else: print ('No task entered.') another = input('Would you like to assign another task? (y/n)') if another.lower() == 'y': return myfunction(members, final_list) else: print (f'Here is your final list of members and their tasks: {list(final_list.items())}') return final_list # call the above function, saving the final_list to a new variable. final_list = myfunction(members) Some example runs: With a single member and task: Pick a team member: Amit What would you like them to do? draw Would you like to assign another task? (y/n)n Here is your final list of members and their tasks: [('Amit', ['draw'])] With two members and tasks: Pick a team member: Amit What would you like them to do? draw Would you like to assign another task? (y/n)y Pick a team member: Vighnesh What would you like them to do? fly Would you like to assign another task? (y/n)n Here is your final list of members and their tasks: [('Amit', ['draw']), ('Vighnesh', ['fly'])] With three members and tasks, but the second name entered is not in the list of members: Pick a team member: Amit What would you like them to do? draw Would you like to assign another task? (y/n)y Pick a team member: notaname That is not a valid team member. Would you like to assign another task? (y/n)y Pick a team member: Vighnesh What would you like them to do? fly Would you like to assign another task? (y/n)n Here is your final list of members and their tasks: [('Amit', ['draw']), ('Vighnesh', ['fly'])] With one team member, but no task entered: Pick a team member: Amit What would you like them to do? No task entered. Would you like to assign another task? (y/n) Here is your final list of members and their tasks: [] Finally with three team members, but two of them are the same: Pick a team member: Amit What would you like them to do? draw Would you like to assign another task? (y/n)y Pick a team member: Amit What would you like them to do? fly Would you like to assign another task? (y/n)y Pick a team member: Vighnesh What would you like them to do? cycle Would you like to assign another task? (y/n)n Here is your final list of members and their tasks: [('Amit', ['draw', 'fly']), ('Vighnesh', ['cycle'])] So this seems to do what you want, without a for or while loop in sight - but computationally there's no difference between using a while or for loop and doing what you're doing here - in fact the above approach may be more locally memory intensive if you have a very long lists of tasks.
list operation using function in python
Ask the user for which team member to assign task and assign it, display the output as team member name and task assigned Is there any solution using without For and While loop?. Please let me know if there is any solution. Member = ["Gahininath", "Vighnesh", "Bhargav", "Amit", "Rahul"] def myfunction(Member): User_Input_Member = input('enter member name') if User_Input_Member in Member: for i in Member: if i==User_Input_Member: y = input('Enter Task what you want tparticularpmember memeber') res = i + ' ' + y x = ':' print(res + x + 'Task assign successfully to the member') count=input('How many member you wnat to select to assign task today') y=int(count) i=1 final_list=[] while y >= i: myfunction(Member) i=i+1 print('Here is your final memeber list with their task ') print(final_list) Output: ['Amit will solve 1st problem', 'Rahul will solve 2nd problem','Rohit will solve 3rd problem']
[ "You could achieve the same basic outcome without a for or while loop using recursion - example below. However so long as you want the process to repeat a certain number of times, you're looping - the only difference between my approach below and a for or while loop is the manner in which the same process is repeated.\nfrom collections import defaultdict\n\n# Define a list of team members\nmembers = [\"Gahininath\", \"Vighnesh\", \"Bhargav\", \"Amit\", \"Rahul\"]\n\n# Create a function that accepts a list of team members, and optionally\n# a pre-populated \"final_list\", and then asks the user what tasks to assign, \n# and whether they are finished or not. \n\ndef myfunction(members, final_list = None):\n if final_list is None:\n final_list = defaultdict(lambda: [])\n member = input('Pick a team member: ')\n if member not in members:\n print ('That is not a valid team member.')\n else:\n task = input('What would you like them to do? ')\n if task:\n final_list[member].append(task)\n else:\n print ('No task entered.')\n another = input('Would you like to assign another task? (y/n)')\n if another.lower() == 'y':\n return myfunction(members, final_list)\n else:\n print (f'Here is your final list of members and their tasks: {list(final_list.items())}')\n return final_list\n\n# call the above function, saving the final_list to a new variable. \nfinal_list = myfunction(members)\n\nSome example runs:\nWith a single member and task:\nPick a team member: Amit\n\nWhat would you like them to do? draw\n\nWould you like to assign another task? (y/n)n\nHere is your final list of members and their tasks: [('Amit', ['draw'])]\n\nWith two members and tasks:\nPick a team member: Amit\n\nWhat would you like them to do? draw\n\nWould you like to assign another task? (y/n)y\n\nPick a team member: Vighnesh\n\nWhat would you like them to do? fly\n\nWould you like to assign another task? (y/n)n\nHere is your final list of members and their tasks: [('Amit', ['draw']), ('Vighnesh', ['fly'])]\n\nWith three members and tasks, but the second name entered is not in the list of members:\nPick a team member: Amit\n\nWhat would you like them to do? draw\n\nWould you like to assign another task? (y/n)y\n\nPick a team member: notaname\nThat is not a valid team member.\n\nWould you like to assign another task? (y/n)y\n\nPick a team member: Vighnesh\n\nWhat would you like them to do? fly\n\nWould you like to assign another task? (y/n)n\nHere is your final list of members and their tasks: [('Amit', ['draw']), ('Vighnesh', ['fly'])]\n\nWith one team member, but no task entered:\nPick a team member: Amit\n\nWhat would you like them to do? \nNo task entered.\n\nWould you like to assign another task? (y/n)\nHere is your final list of members and their tasks: []\n\nFinally with three team members, but two of them are the same:\nPick a team member: Amit\n\nWhat would you like them to do? draw\n\nWould you like to assign another task? (y/n)y\n\nPick a team member: Amit\n\nWhat would you like them to do? fly\n\nWould you like to assign another task? (y/n)y\n\nPick a team member: Vighnesh\n\nWhat would you like them to do? cycle\n\nWould you like to assign another task? (y/n)n\nHere is your final list of members and their tasks: [('Amit', ['draw', 'fly']), ('Vighnesh', ['cycle'])]\n\nSo this seems to do what you want, without a for or while loop in sight - but computationally there's no difference between using a while or for loop and doing what you're doing here - in fact the above approach may be more locally memory intensive if you have a very long lists of tasks.\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074574184_python.txt
Q: maximizing loglikelihood function with 3 parameters i would like to find a,b, and c value that maximize this function. enter image description here which the constraint of enter image description here W are collected from a column of a dataframe, and a,b,c are the parameters that i need to find. help please? currently im using a python language and tries to "guess" a,b,c pairs for like thousand of simulations and try to plot the a,b,c values that gives the max function value. hoping that it will converge to a certain value. however this doesn't happen A: This is a linear programming problem. In Python, there is a package PuLP, you can try it: https://pypi.org/project/PuLP/ And you can find more information about Linear Programming: Optimization With Python here: https://realpython.com/linear-programming-python/
maximizing loglikelihood function with 3 parameters
i would like to find a,b, and c value that maximize this function. enter image description here which the constraint of enter image description here W are collected from a column of a dataframe, and a,b,c are the parameters that i need to find. help please? currently im using a python language and tries to "guess" a,b,c pairs for like thousand of simulations and try to plot the a,b,c values that gives the max function value. hoping that it will converge to a certain value. however this doesn't happen
[ "This is a linear programming problem. In Python, there is a package PuLP, you can try it:\nhttps://pypi.org/project/PuLP/\nAnd you can find more information about Linear Programming: Optimization With Python here:\nhttps://realpython.com/linear-programming-python/\n" ]
[ 0 ]
[]
[]
[ "log_likelihood", "python", "statistics" ]
stackoverflow_0074569095_log_likelihood_python_statistics.txt
Q: Which forecast method with only twenty datapoints (yearly)? i am faced with the challenge of forecasting a specific market. For this purpose, I have market figures for the last 20 years. However, these are really only available on an annual basis. there is therefore no possibility of obtaining the data on a quarterly or monthly basis. are there any suggestions for this? the goal should be to forecast the next 2-3 years resulting from it A: If I am not wrong with assuming the following in your case: There is only 20 years of data on annual basis without seasonality details, Forecasting is to be made on annual basis for the next 2-3 years, No more data is either available or can be found, Then, you may try one of the techniques mentioned here: https://docs.oracle.com/cd/E57185_01/CBREG/ch06s02s02s01.html The accuracy and performance of the model may not be significant due to the constraints.
Which forecast method with only twenty datapoints (yearly)?
i am faced with the challenge of forecasting a specific market. For this purpose, I have market figures for the last 20 years. However, these are really only available on an annual basis. there is therefore no possibility of obtaining the data on a quarterly or monthly basis. are there any suggestions for this? the goal should be to forecast the next 2-3 years resulting from it
[ "If I am not wrong with assuming the following in your case:\n\nThere is only 20 years of data on annual basis without seasonality details,\nForecasting is to be made on annual basis for the next 2-3 years,\nNo more data is either available or can be found,\n\nThen, you may try one of the techniques mentioned here:\nhttps://docs.oracle.com/cd/E57185_01/CBREG/ch06s02s02s01.html\nThe accuracy and performance of the model may not be significant due to the constraints.\n" ]
[ 1 ]
[]
[]
[ "machine_learning", "python", "regression", "statistics", "time_series" ]
stackoverflow_0074559179_machine_learning_python_regression_statistics_time_series.txt
Q: Finding the index of an element in a list when there are duplicates in python So for context, there is a popular problem called the "Fibbonaci Clock." Essentially, you have a list of colors, for example ["white","blue","red","green","white"]. The first item in the list holds a value, of 1, then the second holds again a value of 1, the third holds a value of 2, the fourth holds a value of 3, and the 5th holds a value of 5. [1,1,2,3,5]. To find the time of ["white","blue","red","green","white"], you would add the values of Red and Blue to get the hour, and do 5*(Green + Blue) for the minutes. In this case, the blue color is in the second box, meaning it holds a value of 1, and the red value is in the third box, meaning it holds a value of 2. so 1 + 2 = 3, so the hour is 3. The minute is 5*(G + B), green is in the 4th slot, holding a value of 3, and blue is in the second spot, holding a value of 1. 5(3 + 1) = 5(4) = 20. So the time is 3:20. So I'm trying to write a program for this, but I have a problem. There can be repeats of Red, Green, and Blue. For example, ["Red","Red","Blue","Green","White]. In this case, when adding Red and Blue, you would have to add both values of Red, and Blue. This is where I'm confused on how to code it. This is my code: x = [1,1,2,3,5] y = [] r = [] for t in range(1,6,1): print("give me a color") s = input() y.append(s) if "r" in y: if "b" in y: if "g" in y: r_index = y.index("r") r_index2 = y.index("b") r_index3 = y.index("g") r.append(r_index) r.append(r_index2) if r_index == 0: r_index = 1 if r_index == 4: r_index = 5 if r_index2 == 0: r_index2 = 1 if r_index2 == 4: r_index2 = 5 hour = int(r_index) + int(r_index2) minute = 5*(r_index2 + r_index3) print("The final time is",hour,":",minute) If there are ever repeats of Red, Green, Or Blue, my code only adds the smallest value, resulting in the wrong time. I would appreciate an answer on how to fix this, and a fixed code A: fib = [ 1, 1, 2, 3, 5 ] colors = [ "red", "red", "blue", "green", "white" ] def get_sum_for(color): return sum(f for f, c in zip(fib, colors) if c == color) hours = get_sum_for("red") + get_sum_for("blue") minutes = 5 * (get_sum_for("green") + get_sum_for("blue")) print(f"{hours}:{minutes}") A: you could just put in a dic with indices: from collections import defaultdict def get_hour(hour): fib_val = [1,1,2,3,5] fib = defaultdict(list) for idx, h in enumerate(hour): fib[h.lower()].append(fib_val[idx]) h = sum(fib['red'] + fib['blue']) m = 5 * sum(fib['green'] + fib['blue']) return f"{h:02d}:{m:02d}" output: get_hour(["Red","Red","Blue","Green","White"]) '04:25'
Finding the index of an element in a list when there are duplicates in python
So for context, there is a popular problem called the "Fibbonaci Clock." Essentially, you have a list of colors, for example ["white","blue","red","green","white"]. The first item in the list holds a value, of 1, then the second holds again a value of 1, the third holds a value of 2, the fourth holds a value of 3, and the 5th holds a value of 5. [1,1,2,3,5]. To find the time of ["white","blue","red","green","white"], you would add the values of Red and Blue to get the hour, and do 5*(Green + Blue) for the minutes. In this case, the blue color is in the second box, meaning it holds a value of 1, and the red value is in the third box, meaning it holds a value of 2. so 1 + 2 = 3, so the hour is 3. The minute is 5*(G + B), green is in the 4th slot, holding a value of 3, and blue is in the second spot, holding a value of 1. 5(3 + 1) = 5(4) = 20. So the time is 3:20. So I'm trying to write a program for this, but I have a problem. There can be repeats of Red, Green, and Blue. For example, ["Red","Red","Blue","Green","White]. In this case, when adding Red and Blue, you would have to add both values of Red, and Blue. This is where I'm confused on how to code it. This is my code: x = [1,1,2,3,5] y = [] r = [] for t in range(1,6,1): print("give me a color") s = input() y.append(s) if "r" in y: if "b" in y: if "g" in y: r_index = y.index("r") r_index2 = y.index("b") r_index3 = y.index("g") r.append(r_index) r.append(r_index2) if r_index == 0: r_index = 1 if r_index == 4: r_index = 5 if r_index2 == 0: r_index2 = 1 if r_index2 == 4: r_index2 = 5 hour = int(r_index) + int(r_index2) minute = 5*(r_index2 + r_index3) print("The final time is",hour,":",minute) If there are ever repeats of Red, Green, Or Blue, my code only adds the smallest value, resulting in the wrong time. I would appreciate an answer on how to fix this, and a fixed code
[ "fib = [\n 1,\n 1,\n 2,\n 3,\n 5\n]\n\ncolors = [\n \"red\",\n \"red\",\n \"blue\",\n \"green\",\n \"white\"\n]\n\ndef get_sum_for(color):\n return sum(f for f, c in zip(fib, colors) if c == color)\n\nhours = get_sum_for(\"red\") + get_sum_for(\"blue\")\nminutes = 5 * (get_sum_for(\"green\") + get_sum_for(\"blue\"))\n\nprint(f\"{hours}:{minutes}\")\n\n", "you could just put in a dic with indices:\nfrom collections import defaultdict\n\ndef get_hour(hour):\n fib_val = [1,1,2,3,5]\n fib = defaultdict(list)\n for idx, h in enumerate(hour):\n fib[h.lower()].append(fib_val[idx])\n \n h = sum(fib['red'] + fib['blue'])\n m = 5 * sum(fib['green'] + fib['blue'])\n return f\"{h:02d}:{m:02d}\"\n\noutput:\nget_hour([\"Red\",\"Red\",\"Blue\",\"Green\",\"White\"])\n\n'04:25'\n\n" ]
[ 0, 0 ]
[]
[]
[ "duplicates", "indexing", "list", "python" ]
stackoverflow_0074577795_duplicates_indexing_list_python.txt
Q: Finding the lowest value per key in a dictionary with multiple values per key I have a dictionary with multiple keys, and multiple values per key (sometimes). The dictionary is zipped from two lists which I've pulled from an excel sheet using pandas. I've converted the values to integers. My dictionary looks like this: dictionary = {'A223':[1,4,5],'B224':[7,8,9],'A323':[4,5],'B456':[3,3,4,5] } What I need now is to modify the dictionary so that each Key only shows the min value. So desired output would look like this: dictionary = {'A223':1,'B224':7,'A323':4,'B456':3} I can return the key with the lowest value, however this doesn't help me. Here is my code thus far: df = pd.read_excel(PT, sheet_name= "Permit Tracker") permit_list = ['1.Planning', '2.Survey Complete', '3.Design Complete', '4.Permit Submitted', '5.Permit Approved','6.IFC', '7.As-Built', '8.On-Hold', '9.Cancelled'] #original values column, to be converted to int. dicto = { '1.Planning': 1, '2.Survey Complete': 2, '3.Design Complete': 3, '4.Permit Submitted': 4, '5.Permit Approved': 5, '6.IFC': 6, '7.As-Built': 7, '8.On-Hold': 8, '9.Cancelled': 9 } new_int = [dicto[k] for k in permit_list] dfint = df['Permit Status'].dropna().tolist() dfkeys = df['RPATS#'] #this is the keys column in my excel sheet new_conversion = [dicto[k] for k in dfint] dictionary = {} for i, j in zip(dfkeys,new_conversion): dictionary.setdefault(i, []).append(j) print(dictionary) My steps thus far: 1 - read excel into df with the two columns I need 2 - Convert string values in values column into int. 3 - Create a list for values column, dropping na 4 - zipping together keys and values, customizing a dictionary to accept multiple values per key. I'm new, and really at a loss here. Any help would be very much appreciated! I have tried something like: dictionary = {'A223':[1,4,5],'B224':[7,8,9,],'A323':[4,5],'B456':[3,3,4,5] } min(dictionary, key=dictionary.get) Although this only, and obviously, returns the key with the lowest value. A: To keep it in the spirit of python one-liners: >>> dictionary {'A223': [1, 4, 5], 'B224': [7, 8, 9], 'A323': [4, 5], 'B456': [3, 3, 4, 5]} >>> dict(map(lambda x: (x[0], min(x[1])), dictionary.items())) {'A223': 1, 'B224': 7, 'A323': 4, 'B456': 3} A: Alternatively comprehension could be used: newdic = {key: min(value) for key, value in dictionary.items()}
Finding the lowest value per key in a dictionary with multiple values per key
I have a dictionary with multiple keys, and multiple values per key (sometimes). The dictionary is zipped from two lists which I've pulled from an excel sheet using pandas. I've converted the values to integers. My dictionary looks like this: dictionary = {'A223':[1,4,5],'B224':[7,8,9],'A323':[4,5],'B456':[3,3,4,5] } What I need now is to modify the dictionary so that each Key only shows the min value. So desired output would look like this: dictionary = {'A223':1,'B224':7,'A323':4,'B456':3} I can return the key with the lowest value, however this doesn't help me. Here is my code thus far: df = pd.read_excel(PT, sheet_name= "Permit Tracker") permit_list = ['1.Planning', '2.Survey Complete', '3.Design Complete', '4.Permit Submitted', '5.Permit Approved','6.IFC', '7.As-Built', '8.On-Hold', '9.Cancelled'] #original values column, to be converted to int. dicto = { '1.Planning': 1, '2.Survey Complete': 2, '3.Design Complete': 3, '4.Permit Submitted': 4, '5.Permit Approved': 5, '6.IFC': 6, '7.As-Built': 7, '8.On-Hold': 8, '9.Cancelled': 9 } new_int = [dicto[k] for k in permit_list] dfint = df['Permit Status'].dropna().tolist() dfkeys = df['RPATS#'] #this is the keys column in my excel sheet new_conversion = [dicto[k] for k in dfint] dictionary = {} for i, j in zip(dfkeys,new_conversion): dictionary.setdefault(i, []).append(j) print(dictionary) My steps thus far: 1 - read excel into df with the two columns I need 2 - Convert string values in values column into int. 3 - Create a list for values column, dropping na 4 - zipping together keys and values, customizing a dictionary to accept multiple values per key. I'm new, and really at a loss here. Any help would be very much appreciated! I have tried something like: dictionary = {'A223':[1,4,5],'B224':[7,8,9,],'A323':[4,5],'B456':[3,3,4,5] } min(dictionary, key=dictionary.get) Although this only, and obviously, returns the key with the lowest value.
[ "To keep it in the spirit of python one-liners:\n>>> dictionary\n{'A223': [1, 4, 5], 'B224': [7, 8, 9], 'A323': [4, 5], 'B456': [3, 3, 4, 5]}\n>>> dict(map(lambda x: (x[0], min(x[1])), dictionary.items()))\n{'A223': 1, 'B224': 7, 'A323': 4, 'B456': 3}\n\n", "Alternatively comprehension could be used:\nnewdic = {key: min(value) for key, value in dictionary.items()}\n\n" ]
[ 3, 1 ]
[]
[]
[ "dictionary", "pandas", "python" ]
stackoverflow_0074577766_dictionary_pandas_python.txt
Q: remove one charachter from a speficic column in df pandas I have a df with a column that some values are having ... and some .. and some are without dots. Type range Mike 10..13 Ni 3..4 NANA 2...3 Gi 2 desired output should look like this Type range Mike 10 Mike 11 Mike 12 MIke 13 Ni 3 Ni 4 NANA 2 NANA 3 Gi 2 So dots represnt the range of between to number ( inclusive the end number). How am I suppsoed to do it in pandas? A: Parse str as list first and then explode: import re def str_to_list(s): if not s: return [] nums = re.split('\.{2,3}', s) if len(nums) == 1: return nums return list(range(int(nums[0]), int(nums[1]) + 1)) df['range'] = df['range'].astype(str).map(str_to_list) df.explode('range') Type range 0 Mike 10 0 Mike 11 0 Mike 12 0 Mike 13 1 Ni 3 1 Ni 4 2 NANA 2 2 NANA 3 3 Gi 2 A: An approach using numpy.arange with pandas.DataFrame.explode : out = ( df .assign(range= df["range"] .str.replace("\.+", "-", regex=True) .str.split("-") .apply(lambda x: np.arange(list(map(int, x))[0], list(map(int, x))[-1]+1, 1) if len(x)>1 else x)) .explode("range", ignore_index=True) ) # Output : print(out) Type range 0 Mike 10 1 Mike 11 2 Mike 12 3 Mike 13 4 Ni 3 5 Ni 4 6 NANA 2 7 NANA 3 8 Gi 2
remove one charachter from a speficic column in df pandas
I have a df with a column that some values are having ... and some .. and some are without dots. Type range Mike 10..13 Ni 3..4 NANA 2...3 Gi 2 desired output should look like this Type range Mike 10 Mike 11 Mike 12 MIke 13 Ni 3 Ni 4 NANA 2 NANA 3 Gi 2 So dots represnt the range of between to number ( inclusive the end number). How am I suppsoed to do it in pandas?
[ "Parse str as list first and then explode:\nimport re\ndef str_to_list(s):\n if not s: return []\n nums = re.split('\\.{2,3}', s)\n if len(nums) == 1:\n return nums\n return list(range(int(nums[0]), int(nums[1]) + 1))\n\ndf['range'] = df['range'].astype(str).map(str_to_list)\ndf.explode('range')\n\n Type range\n0 Mike 10\n0 Mike 11\n0 Mike 12\n0 Mike 13\n1 Ni 3\n1 Ni 4\n2 NANA 2\n2 NANA 3\n3 Gi 2\n\n", "An approach using numpy.arange with pandas.DataFrame.explode :\nout = (\n df\n .assign(range=\n df[\"range\"]\n .str.replace(\"\\.+\", \"-\", regex=True)\n .str.split(\"-\")\n .apply(lambda x: np.arange(list(map(int, x))[0], list(map(int, x))[-1]+1, 1) if len(x)>1 else x))\n .explode(\"range\", ignore_index=True)\n )\n\n# Output :\nprint(out)\n\n Type range\n0 Mike 10\n1 Mike 11\n2 Mike 12\n3 Mike 13\n4 Ni 3\n5 Ni 4\n6 NANA 2\n7 NANA 3\n8 Gi 2\n\n" ]
[ 3, 0 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074577761_pandas_python.txt
Q: College assignment: Logistic regression in python I have an assignment to do at university in Quantitative Methods course. We have been given a popular and well known paper where my job is to replicate some of the results from that paper. The paper is about labour market discrimination and claims to have evidence that people with white sounding names have a higher chance of getting a callback for job interview than people with African American sounding names. I am about to do some logistic regression. My outcome variable (Y) is callbacks and my predictor variable (x) can be many things like race, number of previous job, gender, years of job experience and so on. I have already done the regression for white sounding names and callbacks and i have done all the work on dummy variables, binary variable and so on. Now I need to run the model again with a new variable together with race, which is years of job experience. My first regression run (which is fine and i understand the results): log_odds_call = smf.logit("call ~ race_w", data = df2_cat).fit() I just dont understand this line of code that my teacher told us to use when adding more x variables: log_ods_call_yearsexp_interaction = smf.logit('call ~ race_w + yearsexp + race_w:yearsexp', data = df2_cat).fit() What does that colon between race_w and yearsexp mean and why should i use it? I mean i have already added yearsexp - why do that colon thing at the end? My outcome is this: outcome A: Your teacher suggests adding an interaction term in your model by multiplying those two columns. That idea is based on the thought that increasing the first variable may also increase the impact of the second variable, leading to a higher slope for the latter. In other words, when two variables get into the model, a synergy occurs, which is referred to as "interaction effect". Probably, your teacher wanted to consider such a synergy between those two variables.
College assignment: Logistic regression in python
I have an assignment to do at university in Quantitative Methods course. We have been given a popular and well known paper where my job is to replicate some of the results from that paper. The paper is about labour market discrimination and claims to have evidence that people with white sounding names have a higher chance of getting a callback for job interview than people with African American sounding names. I am about to do some logistic regression. My outcome variable (Y) is callbacks and my predictor variable (x) can be many things like race, number of previous job, gender, years of job experience and so on. I have already done the regression for white sounding names and callbacks and i have done all the work on dummy variables, binary variable and so on. Now I need to run the model again with a new variable together with race, which is years of job experience. My first regression run (which is fine and i understand the results): log_odds_call = smf.logit("call ~ race_w", data = df2_cat).fit() I just dont understand this line of code that my teacher told us to use when adding more x variables: log_ods_call_yearsexp_interaction = smf.logit('call ~ race_w + yearsexp + race_w:yearsexp', data = df2_cat).fit() What does that colon between race_w and yearsexp mean and why should i use it? I mean i have already added yearsexp - why do that colon thing at the end? My outcome is this: outcome
[ "Your teacher suggests adding an interaction term in your model by multiplying those two columns. That idea is based on the thought that increasing the first variable may also increase the impact of the second variable, leading to a higher slope for the latter. In other words, when two variables get into the model, a synergy occurs, which is referred to as \"interaction effect\". Probably, your teacher wanted to consider such a synergy between those two variables.\n" ]
[ 0 ]
[]
[]
[ "binary", "dummy_variable", "logistic_regression", "python", "statistics" ]
stackoverflow_0074525290_binary_dummy_variable_logistic_regression_python_statistics.txt
Q: Outputting the line number of a text in a .txt-file, which doesn´t include spaces with list-comprehension I have a question reagarding my code, if anybody has some clues how to solve it. I need to write only one line of code, which outputs the line numbers of those lines that don´t include spaces between the words. My attempt was the following: [line for line in range(len(open('test.txt').readlines())) if ' ' not in open('test.txt').readlines(line)] I tried to use enumerate. But it didn`t work out as I intended. I would appreciate any clue on how to change my code, if anything of my code is correct. A: The first step is to write your loop as a regular loop, then compress it into a list comprehension. What you have is: lines = [] for line in range(len(open('test.txt').readlines()): if ' ' not in open('test.txt').readlines(line): lines.append(line) Opening the file twice and using readlines() is rarely the right way to solve a problem. Instead, it's usually better to iterate over each line, one at a time. The wonderful enumerate() function is a great way to get the index of elements as you iterate over them. It returns a tuple of (index, value) for each input of a list, or other iterable. With it, your loop can become something like this: for num, line in enumerate(open('test.txt')): if ' ' not in line: lines.append(num) Converting the loop into a list comprehension is left as an exercise for you. A: This does it: print([i for i,line in (enumerate(open("test.txt").readlines())) if " " not in line]) Content of example file: apricot a p p l e mango banana che rry Output: [0, 2, 3]
Outputting the line number of a text in a .txt-file, which doesn´t include spaces with list-comprehension
I have a question reagarding my code, if anybody has some clues how to solve it. I need to write only one line of code, which outputs the line numbers of those lines that don´t include spaces between the words. My attempt was the following: [line for line in range(len(open('test.txt').readlines())) if ' ' not in open('test.txt').readlines(line)] I tried to use enumerate. But it didn`t work out as I intended. I would appreciate any clue on how to change my code, if anything of my code is correct.
[ "The first step is to write your loop as a regular loop, then compress it into a list comprehension. What you have is:\nlines = []\n\nfor line in range(len(open('test.txt').readlines()):\n if ' ' not in open('test.txt').readlines(line):\n lines.append(line)\n\nOpening the file twice and using readlines() is rarely the right way to solve a problem. Instead, it's usually better to iterate over each line, one at a time.\nThe wonderful enumerate() function is a great way to get the index of elements as you iterate over them. It returns a tuple of (index, value) for each input of a list, or other iterable.\nWith it, your loop can become something like this:\nfor num, line in enumerate(open('test.txt')):\n if ' ' not in line:\n lines.append(num)\n\nConverting the loop into a list comprehension is left as an exercise for you.\n", "This does it:\nprint([i for i,line in (enumerate(open(\"test.txt\").readlines())) if \" \" not in line])\n\nContent of example file:\napricot\na p p l e\nmango\nbanana\nche rry\n\nOutput:\n[0, 2, 3]\n\n" ]
[ 2, 1 ]
[]
[]
[ "enumerate", "list_comprehension", "python" ]
stackoverflow_0074577765_enumerate_list_comprehension_python.txt
Q: python z3 printing a sorted model In this question here: How to print z3 solver results print(s.model()) in order? the first answer points out the problem of 10 coming after 1, and it only being sorted by the first digit, however he says with more processing it could be fixed, what would this processing be? A: Post-processing simply means you just use your Python programming skills to manipulate the output; at this point the problem has nothing to do with z3. For the specific example you're referring to, you can modify it to: from z3 import * v = [Real('v_%s' % (i+1)) for i in range(10)] s = Solver() for i in range(10): s.add(v[i] == i) if s.check() == sat: m = s.model() print (sorted ([(d, m[d]) for d in m], key = lambda x: int(str(x[0])[2:]))) which prints: [(v_1, 0), (v_2, 1), (v_3, 2), (v_4, 3), (v_5, 4), (v_6, 5), (v_7, 6), (v_8, 7), (v_9, 8), (v_10, 9)] sorting the keys numerically. But again, this isn't really a z3 question: You can take this output and just perform regular Python programming on it as you wish.
python z3 printing a sorted model
In this question here: How to print z3 solver results print(s.model()) in order? the first answer points out the problem of 10 coming after 1, and it only being sorted by the first digit, however he says with more processing it could be fixed, what would this processing be?
[ "Post-processing simply means you just use your Python programming skills to manipulate the output; at this point the problem has nothing to do with z3. For the specific example you're referring to, you can modify it to:\nfrom z3 import *\n\nv = [Real('v_%s' % (i+1)) for i in range(10)]\n\ns = Solver()\nfor i in range(10):\n s.add(v[i] == i)\nif s.check() == sat:\n m = s.model()\n print (sorted ([(d, m[d]) for d in m], key = lambda x: int(str(x[0])[2:])))\n\nwhich prints:\n[(v_1, 0), (v_2, 1), (v_3, 2), (v_4, 3), (v_5, 4), (v_6, 5), (v_7, 6), (v_8, 7), (v_9, 8), (v_10, 9)]\n\nsorting the keys numerically. But again, this isn't really a z3 question: You can take this output and just perform regular Python programming on it as you wish.\n" ]
[ 0 ]
[]
[]
[ "list", "list_comprehension", "python", "z3", "z3py" ]
stackoverflow_0074568155_list_list_comprehension_python_z3_z3py.txt
Q: Pandas - How to sort values in one column ascending and another column descending? I have a dataframe with 2 columns. I'm trying to sort one column ('values') by descending order, and when two values are the same, sort another column by ascending order. Currently, my code is: br_df = br_imgfeatures_df.mean().reset_index(name='value').sort_values(by='value', ascending=False) Which is producing this output: As you can see, the values for 'Palm trees' and 'Flowers' are the same, but I'm trying to reorder them with 'Flowers' on top of 'Palm trees'. A: Since pandas.DataFrame.sort_values accepts lists for the by parameter, you can use the code below and replace Column_X by the name of the first/other column : br_df = ( br_imgfeatures_df.mean() .reset_index(name='value') .sort_values(by=['value', 'Column_X'], ascending=[False, True]) ) # Output : Column_X value 0 Nighttime 0.031496 1 Flowers 0.023622 2 Palm trees 0.023622 3 Cliff 0.020997 4 Bridge 0.018373
Pandas - How to sort values in one column ascending and another column descending?
I have a dataframe with 2 columns. I'm trying to sort one column ('values') by descending order, and when two values are the same, sort another column by ascending order. Currently, my code is: br_df = br_imgfeatures_df.mean().reset_index(name='value').sort_values(by='value', ascending=False) Which is producing this output: As you can see, the values for 'Palm trees' and 'Flowers' are the same, but I'm trying to reorder them with 'Flowers' on top of 'Palm trees'.
[ "Since pandas.DataFrame.sort_values accepts lists for the by parameter, you can use the code below and replace Column_X by the name of the first/other column :\nbr_df = (\n br_imgfeatures_df.mean()\n .reset_index(name='value')\n .sort_values(by=['value', 'Column_X'],\n ascending=[False, True])\n )\n\n# Output :\n Column_X value\n0 Nighttime 0.031496\n1 Flowers 0.023622\n2 Palm trees 0.023622\n3 Cliff 0.020997\n4 Bridge 0.018373\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "pandas", "python", "python_3.x" ]
stackoverflow_0074577980_dataframe_pandas_python_python_3.x.txt
Q: I wish to print a pandas dataframe name Please be patient I am new to Python and Pandas. I have a lot of pandas dataframe, but some are duplicates. So I wrote a function that check if 2 dataframes are equal, if they are 1 will be deleted: def check_eq(df1, df2): if df1.equals(df2): del[df2] print( "Deleted %s" (df_name) ) The function works, but I wish to know how to have the variable "df_name" as string with the name of the dataframe. I don't understand, the parameters df1 and df2 are dataframe objects how I can get their name at run-time if I wish to print it? Thanks in advance. A: What you are trying to use is an f-string. def check_eq(df1, df2): if df1.equals(df2): del[df2] print(f"Deleted {df2.name}") I'm not certain whether you can call this print method, though. Since you deleted the dataframe right before you call its name attribute. So df2 is unbound. Instead try this: def check_eq(df1, df2): if df1.equals(df2): print(f"Deleted {df2.name}") del df2 Now, do note that your usage of 'del' is also not correct. I assume you want to delete the second dataframe in your code. However, you only delete it inside the scope of the check_eq method. You should familiarize yourself with the scope concept first. https://www.w3schools.com/python/python_scope.asp The code I used: d = {'col1': [1, 2], 'col2': [3, 4]} df1 = pd.DataFrame(data=d) df2 = pd.DataFrame(data=d) df1.name='dataframe1' df2.name='dataframe2' def check_eq(df1, df2): if df1.equals(df2): print(f"Deleted {df2.name}")
I wish to print a pandas dataframe name
Please be patient I am new to Python and Pandas. I have a lot of pandas dataframe, but some are duplicates. So I wrote a function that check if 2 dataframes are equal, if they are 1 will be deleted: def check_eq(df1, df2): if df1.equals(df2): del[df2] print( "Deleted %s" (df_name) ) The function works, but I wish to know how to have the variable "df_name" as string with the name of the dataframe. I don't understand, the parameters df1 and df2 are dataframe objects how I can get their name at run-time if I wish to print it? Thanks in advance.
[ "What you are trying to use is an f-string.\ndef check_eq(df1, df2):\n if df1.equals(df2):\n del[df2]\n print(f\"Deleted {df2.name}\")\n\nI'm not certain whether you can call this print method, though. Since you deleted the dataframe right before you call its name attribute. So df2 is unbound.\nInstead try this:\ndef check_eq(df1, df2):\n if df1.equals(df2):\n print(f\"Deleted {df2.name}\")\n del df2\n \n\nNow, do note that your usage of 'del' is also not correct. I assume you want to delete the second dataframe in your code. However, you only delete it inside the scope of the check_eq method. You should familiarize yourself with the scope concept first. https://www.w3schools.com/python/python_scope.asp\nThe code I used:\nd = {'col1': [1, 2], 'col2': [3, 4]}\ndf1 = pd.DataFrame(data=d)\ndf2 = pd.DataFrame(data=d)\n\ndf1.name='dataframe1'\ndf2.name='dataframe2'\n\ndef check_eq(df1, df2):\n if df1.equals(df2):\n print(f\"Deleted {df2.name}\")\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "function", "pandas", "printing", "python" ]
stackoverflow_0074577994_dataframe_function_pandas_printing_python.txt
Q: Python - How to take multiple inputs, and repeat code for each one? I'm very new to this. I just started programming last week. I need some basic help. My assignment is to input five numbers and get the output to print out "odd" or "even" for each one. This is how I have started: num = int(input()) if (num % 2) == 0: print('even') else: print('odd') How can I have five numbers in the input? I don't want to make a hardcoded list; the program has to work with different numbers each time. I hope you understand my question. Thank you for helping out. EDIT: I am not supposed to import anything, so I can't use import random. I'm supposed to input 5 numbers. For example: input 3 5 2 1 33 Output Odd Odd Even Odd Odd So I have made some progress but it's still wrong. for _ in range(5): num = int(input()) if (num % 2) == 0: print('even') else: print('odd') I now get the output (odd or even) before I have written all numbers in the input. I don't know how to change that. I want to write the five numbers and then get the output. I hope I have explained this well. Sorry for the confusion. A: Hi Hope you are doing well! If I understood your question correctly, you are trying to achieve something similar to this: import random # you can define your own limits # or you can use numpy to generate random numbers from the different distributions number = random.randint(0, 999) print(f"Current number: {number}.") print("Even!") if number % 2 == 0 else print("Odd!") A: Try this: nums = input('Enter your numbers: ').split() for num in nums: if int(num) % 2 == 0: print('even') else: print('odd') A: Please just learn something new about the list comprehension: ls = [(print(num, "Even") if (num % 2) == 0 else print(num, "Odd")) for num in range(1,6)] Output: 1 Odd 2 Even 3 Odd 4 Even 5 Odd A: A bit on the elaborative side but since you are learning to program, you should be gather various problem specifications and then design you solution import random experiment_to_run = int(input()) # how many times you want to run the experiment, let us say default is 5, but that can be user input as well lower_bound_of_numbers = int(input()) # lower bound of the integer range upper_bound_of_numbers = int(input()) # upper bound of the integer range def print_even_or_odd(experiment_to_run = 5, lower_bound_of_numbers = 1, upper_bound_of_numbers = 10): for current_experiment_run in range(experiment_to_run): current_number = random.randint(lower_bound_of_numbers, upper_bound_of_numbers) # this generates random integer between lower_bound_of_numbers (inclusive) and upper_bound_of_numbers (exclusive) if current_number % 2 == 0: print ('even') else: print ('odd') # dry run print_even_or_odd(experiment_to_run, lower_bound_of_numbers, upper_bound_of_numbers) # feel to add current_number also to the log if needed for debugging purpose A: Since you have edited your question, it appears you don't actually want random numbers, but instead you want the user to enter five numbers in a row. You seem to want the user to enter all five numbers first, and then all the output to be displayed. In order to accomplish this, first run the input function five times using a for loop, and add each input to a list, then use another for loop to output whether each one is odd or even. Here is the code: # Create a list to store the five numbers nums = [] # Fill the list with five inputs from the user for _ in range(5): nums.append(int(input())) # Explanation of above line: # - The `append` function adds a new item to a list # - So this line gets input from the user, # turns it into an integer, # and adds it to the list # Finally, output whether each number is odd or even # Iterate over the `nums` list using a for loop for num in nums: # The code here will run one time for each item in the `nums` list # Each time the code runs, the variable `num` will store the current item # So we can do a test on `num` to see if each item is odd or even if (num % 2) == 0: print(num, "is even") else: print(num, "is odd") Above is one way to do your task, however it does not follow best practices for the following reasons: The code is not split up into functions so it is not as clear what each part accomplishes The main code should be under an if __name__ == "__main__:" so that the code only runs when it is supposed to The code will throw an error if the user enters anything which is not an integer. Instead of ignoring this, we should catch this error and tell the user to try again if they enter something invalid Below is the code I would probably write if I were given this task myself. It is much longer and perhaps overkill for such a simple task, but you should try to get into good coding habits right from the start. #!/usr/bin/env python3 """This script gets five integers from the user and outputs whether each is odd or even""" def main(): nums = [get_user_num() for _ in range(5)] for num in nums: if is_even(num): print(num, "is even") else: print(num, "is odd") def get_user_num(): num = input("Enter an integer: ") while True: # Repeat until the user enters a valid integer try: return int(num) except ValueError: num = input("That was not an integer - please try again: ") def is_even(num): return (num % 2) == 0 if __name__ == "__main__": main() Explanations of some of the things in the improved code: What does the comment right at the top of the code do? How do functions work in Python? List comprehensions in Python if __name__ == "__main__":: This line only runs the code if the file is being run directly as the main code file. If the code is being imported by another Python file (running indirectly), then this line prevents the code from running. This prevents people from running the code by accident and getting an unexpected result. You can find out more about importing Python files here, and more about how this line works here.
Python - How to take multiple inputs, and repeat code for each one?
I'm very new to this. I just started programming last week. I need some basic help. My assignment is to input five numbers and get the output to print out "odd" or "even" for each one. This is how I have started: num = int(input()) if (num % 2) == 0: print('even') else: print('odd') How can I have five numbers in the input? I don't want to make a hardcoded list; the program has to work with different numbers each time. I hope you understand my question. Thank you for helping out. EDIT: I am not supposed to import anything, so I can't use import random. I'm supposed to input 5 numbers. For example: input 3 5 2 1 33 Output Odd Odd Even Odd Odd So I have made some progress but it's still wrong. for _ in range(5): num = int(input()) if (num % 2) == 0: print('even') else: print('odd') I now get the output (odd or even) before I have written all numbers in the input. I don't know how to change that. I want to write the five numbers and then get the output. I hope I have explained this well. Sorry for the confusion.
[ "Hi Hope you are doing well!\nIf I understood your question correctly, you are trying to achieve something similar to this:\nimport random\n\n# you can define your own limits\n# or you can use numpy to generate random numbers from the different distributions\nnumber = random.randint(0, 999)\nprint(f\"Current number: {number}.\")\n\nprint(\"Even!\") if number % 2 == 0 else print(\"Odd!\")\n\n", "Try this:\nnums = input('Enter your numbers: ').split()\nfor num in nums:\n if int(num) % 2 == 0:\n print('even')\n else:\n print('odd')\n\n", "Please just learn something new about the list comprehension:\nls = [(print(num, \"Even\") if (num % 2) == 0 else print(num, \"Odd\")) for num in range(1,6)]\n\nOutput:\n1 Odd\n2 Even\n3 Odd\n4 Even\n5 Odd\n\n", "A bit on the elaborative side but since you are learning to program, you should be gather various problem specifications and then design you solution\nimport random\n\nexperiment_to_run = int(input()) # how many times you want to run the experiment, let us say default is 5, but that can be user input as well\nlower_bound_of_numbers = int(input()) # lower bound of the integer range\nupper_bound_of_numbers = int(input()) # upper bound of the integer range\n\ndef print_even_or_odd(experiment_to_run = 5, lower_bound_of_numbers = 1, upper_bound_of_numbers = 10):\n for current_experiment_run in range(experiment_to_run):\n current_number = random.randint(lower_bound_of_numbers, upper_bound_of_numbers) # this generates random integer between lower_bound_of_numbers (inclusive) and upper_bound_of_numbers (exclusive)\n \n if current_number % 2 == 0:\n print ('even')\n else:\n print ('odd')\n\n# dry run \nprint_even_or_odd(experiment_to_run, lower_bound_of_numbers, upper_bound_of_numbers) # feel to add current_number also to the log if needed for debugging purpose\n\n", "Since you have edited your question, it appears you don't actually want random numbers, but instead you want the user to enter five numbers in a row.\nYou seem to want the user to enter all five numbers first, and then all the output to be displayed.\nIn order to accomplish this, first run the input function five times using a for loop, and add each input to a list, then use another for loop to output whether each one is odd or even.\nHere is the code:\n# Create a list to store the five numbers\nnums = []\n\n# Fill the list with five inputs from the user\nfor _ in range(5):\n nums.append(int(input()))\n # Explanation of above line:\n # - The `append` function adds a new item to a list\n # - So this line gets input from the user,\n # turns it into an integer,\n # and adds it to the list\n\n# Finally, output whether each number is odd or even\n# Iterate over the `nums` list using a for loop\nfor num in nums:\n # The code here will run one time for each item in the `nums` list\n # Each time the code runs, the variable `num` will store the current item\n # So we can do a test on `num` to see if each item is odd or even\n if (num % 2) == 0:\n print(num, \"is even\")\n else:\n print(num, \"is odd\")\n\nAbove is one way to do your task, however it does not follow best practices for the following reasons:\n\nThe code is not split up into functions so it is not as clear what each part accomplishes\nThe main code should be under an if __name__ == \"__main__:\" so that the code only runs when it is supposed to\nThe code will throw an error if the user enters anything which is not an integer. Instead of ignoring this, we should catch this error and tell the user to try again if they enter something invalid\n\nBelow is the code I would probably write if I were given this task myself. It is much longer and perhaps overkill for such a simple task, but you should try to get into good coding habits right from the start.\n#!/usr/bin/env python3\n\"\"\"This script gets five integers from the user and outputs whether each is odd or even\"\"\"\n\ndef main():\n nums = [get_user_num() for _ in range(5)]\n for num in nums:\n if is_even(num):\n print(num, \"is even\")\n else:\n print(num, \"is odd\")\n\n\ndef get_user_num():\n num = input(\"Enter an integer: \")\n while True:\n # Repeat until the user enters a valid integer\n try:\n return int(num)\n except ValueError:\n num = input(\"That was not an integer - please try again: \")\n\n\ndef is_even(num):\n return (num % 2) == 0\n\n\nif __name__ == \"__main__\":\n main()\n\nExplanations of some of the things in the improved code:\n\nWhat does the comment right at the top of the code do?\nHow do functions work in Python?\nList comprehensions in Python\nif __name__ == \"__main__\"::\nThis line only runs the code if the file is being run directly as the main code file. If the code is being imported by another Python file (running indirectly), then this line prevents the code from running. This prevents people from running the code by accident and getting an unexpected result. You can find out more about importing Python files here, and more about how this line works here.\n\n" ]
[ 1, 1, 0, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074576804_python.txt
Q: Insert blank slide with python-pptx in existing presentation I'm working with the python-pptx library and I'm trying to insert a blank slide at a specific place (slide with the same dimensions). I know how to delete a slide : def delete_slides(presentation, index): xml_slides = presentation.slides._sldIdLst slides = list(xml_slides) xml_slides.remove(slides[index]) But how to insert a blank slide? A: Someone will probably come along with a better solution but you could just create a blank slide and then move it to the location you want. from pptx import Presentation def move_slide(old_index, new_index): xml_slides = presentation.slides._sldIdLst slides = list(xml_slides) xml_slides.remove(slides[old_index]) xml_slides.insert(new_index, slides[old_index]) presentation = Presentation("test1.pptx") layout = presentation.slide_masters[0].slide_layouts[6] slide = presentation.slides.add_slide(layout) slide_id = presentation.slides.index(slide) move_slide(slide_id, 5) presentation.save("test1.pptx")
Insert blank slide with python-pptx in existing presentation
I'm working with the python-pptx library and I'm trying to insert a blank slide at a specific place (slide with the same dimensions). I know how to delete a slide : def delete_slides(presentation, index): xml_slides = presentation.slides._sldIdLst slides = list(xml_slides) xml_slides.remove(slides[index]) But how to insert a blank slide?
[ "Someone will probably come along with a better solution but you could just create a blank slide and then move it to the location you want.\nfrom pptx import Presentation\n\ndef move_slide(old_index, new_index):\n xml_slides = presentation.slides._sldIdLst\n slides = list(xml_slides)\n xml_slides.remove(slides[old_index])\n xml_slides.insert(new_index, slides[old_index])\n\npresentation = Presentation(\"test1.pptx\")\nlayout = presentation.slide_masters[0].slide_layouts[6]\nslide = presentation.slides.add_slide(layout)\nslide_id = presentation.slides.index(slide)\nmove_slide(slide_id, 5)\npresentation.save(\"test1.pptx\")\n\n" ]
[ 0 ]
[]
[]
[ "python", "python_pptx" ]
stackoverflow_0074562537_python_python_pptx.txt
Q: For loop with if conditional statement I am trying to understand why first code run only once vs second code is running until it checks all the items in the list. 1. def get_word_over_10_char(list_of_words): for word in list_of_words: if len(word) > 10: return word else: return "" for word in list_of_words: if len(word) > 10: return word return '' word_list = ['soup', 'parameter', 'intuition', 'house-maker', 'fabrication'] Trying to return a word if length is more than 10, and return empty string if less than equal to 10. A: How are the items in your list ordered? Because the when the return statement in a function is called, the function returns the argument and stops. In the first piece of code, either the return in the if clause or the else is called after the first item in list_of_words, so the function stops there. In the second piece of code, the return word statement is only called once a word longer than 10 characters is found. From how you describe the problem, I reckon that the last item in your list_of_words is longer than 10. So with the second piece of code, you notice that that word is not returned, while you do not notice the empty strings not being returned. A: As Timo explained in his answer, function stops when you use return. In order to get all the elements with more than 10 characters, you need to create a list with these words before using return : def get_word_over_10_char(list_of_words): more_than_10 = [] for word in list_of_words: if len(word) > 10: more_than_10.append(word) if len(more_than_10)==0: return "Nothing longer than 10" return more_than_10 word_list = ['soup', 'parameter', 'intuition', 'house-maker', 'fabrication'] print(get_word_over_10_char(word_list)) A more pythonic second version using list comprehension to create more_than_10: def get_word_over_10_char_v2(list_of_words): more_than_10 = [word for word in list_of_words if len(word) > 10] if len(more_than_10)==0: return "Nothing longer than 10" return more_than_10 word_list = ['soup', 'parameter', 'intuition', 'house-maker', 'fabrication'] print(get_word_over_10_char_v2(word_list))
For loop with if conditional statement
I am trying to understand why first code run only once vs second code is running until it checks all the items in the list. 1. def get_word_over_10_char(list_of_words): for word in list_of_words: if len(word) > 10: return word else: return "" for word in list_of_words: if len(word) > 10: return word return '' word_list = ['soup', 'parameter', 'intuition', 'house-maker', 'fabrication'] Trying to return a word if length is more than 10, and return empty string if less than equal to 10.
[ "How are the items in your list ordered? Because the when the return statement in a function is called, the function returns the argument and stops. In the first piece of code, either the return in the if clause or the else is called after the first item in list_of_words, so the function stops there. In the second piece of code, the return word statement is only called once a word longer than 10 characters is found. From how you describe the problem, I reckon that the last item in your list_of_words is longer than 10. So with the second piece of code, you notice that that word is not returned, while you do not notice the empty strings not being returned.\n", "As Timo explained in his answer, function stops when you use return. In order to get all the elements with more than 10 characters, you need to create a list with these words before using return :\ndef get_word_over_10_char(list_of_words):\n more_than_10 = []\n for word in list_of_words:\n if len(word) > 10:\n more_than_10.append(word)\n if len(more_than_10)==0:\n return \"Nothing longer than 10\"\n return more_than_10\n \nword_list = ['soup', 'parameter', 'intuition', 'house-maker', 'fabrication']\n\nprint(get_word_over_10_char(word_list))\n\nA more pythonic second version using list comprehension to create more_than_10:\ndef get_word_over_10_char_v2(list_of_words):\n more_than_10 = [word for word in list_of_words if len(word) > 10]\n if len(more_than_10)==0:\n return \"Nothing longer than 10\"\n return more_than_10\n \nword_list = ['soup', 'parameter', 'intuition', 'house-maker', 'fabrication']\n\nprint(get_word_over_10_char_v2(word_list))\n\n\n" ]
[ 1, 0 ]
[]
[]
[ "for_loop", "if_statement", "python" ]
stackoverflow_0074577699_for_loop_if_statement_python.txt
Q: reading file by pickle module good afternoon! saving list(dict(),dict(),dict()) struct with pickle module, but when reading I get: <class 'function'>, and <function lesson at 0x00000278BA3A0D30> what am I doing wrong? def lesson(user, date): with open(user+"_"+date+".data", 'wb') as file: pickle.dump(lesson, file) file.close() def read(user, date): with open(user+"_"+date+".data", 'rb') as file: lesson = pickle.load(file) file.close() return(lesson) I am using python 3.10.7 A: "saving list(dict(),dict(),dict()) struct with pickle module". No, you're not. You're saving the lesson function. See line 3 of your code.
reading file by pickle module
good afternoon! saving list(dict(),dict(),dict()) struct with pickle module, but when reading I get: <class 'function'>, and <function lesson at 0x00000278BA3A0D30> what am I doing wrong? def lesson(user, date): with open(user+"_"+date+".data", 'wb') as file: pickle.dump(lesson, file) file.close() def read(user, date): with open(user+"_"+date+".data", 'rb') as file: lesson = pickle.load(file) file.close() return(lesson) I am using python 3.10.7
[ "\"saving list(dict(),dict(),dict()) struct with pickle module\". No, you're not. You're saving the lesson function. See line 3 of your code.\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074578037_python.txt
Q: Which of these variables is the best predictor for winning a match given this logit model My goal is to find the best predictor variable for winning a match. I have a slight knowledge of basic statistics, so I decided to use logistic regression because result of match is a binary variable. logit_model=sm.Logit(y,X) result=logit_model.fit() result.summary() This comes out with following result: ======================================================================== Model: Logit Pseudo R-squared: 0.515 Dependent Variable: win AIC: 92784.8133 Date: 2022-11-25 20:30 BIC: 92932.3349 No. Observations: 137967 Log-Likelihood: -46377. Df Model: 14 LL-Null: -95631. Df Residuals: 137952 LLR p-value: 0.0000 Converged: 1.0000 Scale: 1.0000 No. Iterations: 7.0000 ------------------------------------------------------------------------ Coef. Std.Err. z P>|z| [0.025 0.975] ------------------------------------------------------------------------ kills 0.2116 0.0050 42.7261 0.0000 0.2019 0.2213 assists 0.2439 0.0025 98.3537 0.0000 0.2391 0.2488 deaths -0.4083 0.0039 -103.6498 0.0000 -0.4160 -0.4005 baronKills 0.7598 0.0338 22.4612 0.0000 0.6935 0.8261 dragonKills 0.3566 0.0157 22.6557 0.0000 0.3257 0.3874 timeCCingOthers -0.0096 0.0006 -17.2654 0.0000 -0.0107 -0.0085 wardsPlaced 0.0051 0.0012 4.1346 0.0000 0.0027 0.0076 goldEarned -0.0003 0.0000 -45.5422 0.0000 -0.0003 -0.0003 inhibitorTakedowns 2.1111 0.0212 99.5492 0.0000 2.0696 2.1527 largestKillingSpree -0.0504 0.0070 -7.2300 0.0000 -0.0641 -0.0367 largestMultiKill 0.4043 0.0159 25.5014 0.0000 0.3732 0.4354 totalMinionsKilled 0.0043 0.0002 21.6630 0.0000 0.0039 0.0047 consumablesPurchased -0.0395 0.0032 -12.3773 0.0000 -0.0458 -0.0333 damageDealtToBuildings 0.0002 0.0000 25.9200 0.0000 0.0001 0.0002 turretKills 0.3140 0.0131 23.8875 0.0000 0.2882 0.3397 ======================================================================== What would be the best predictor for match win given these results? My initial thinking was I can't use the coefficient, because all variables come from different distributions. Is it valid thinking to use the z-score, since it standardizes values to the same distribution? Can variable assists and inhibitorTakedowns considered to be the best predictor for winning a match, since it has the highest z-score, or is this thinking flawed? A: The first thing that you should pay attention to is if a variable is significant or not given the level of threshold. It seems from your results that all are significant. Z-scores are used to see the level of significance. The second thing is to look at the coefficients to see which one has more impact on the label. The larger the absolute value of the coefficient, the more effect it will have. This effect may be positive or negative. The caveat here is, to standardize all predictor variables before putting them into the model. As you mentioned, they all come from different distributions. So, standardizing them will fix that problem.
Which of these variables is the best predictor for winning a match given this logit model
My goal is to find the best predictor variable for winning a match. I have a slight knowledge of basic statistics, so I decided to use logistic regression because result of match is a binary variable. logit_model=sm.Logit(y,X) result=logit_model.fit() result.summary() This comes out with following result: ======================================================================== Model: Logit Pseudo R-squared: 0.515 Dependent Variable: win AIC: 92784.8133 Date: 2022-11-25 20:30 BIC: 92932.3349 No. Observations: 137967 Log-Likelihood: -46377. Df Model: 14 LL-Null: -95631. Df Residuals: 137952 LLR p-value: 0.0000 Converged: 1.0000 Scale: 1.0000 No. Iterations: 7.0000 ------------------------------------------------------------------------ Coef. Std.Err. z P>|z| [0.025 0.975] ------------------------------------------------------------------------ kills 0.2116 0.0050 42.7261 0.0000 0.2019 0.2213 assists 0.2439 0.0025 98.3537 0.0000 0.2391 0.2488 deaths -0.4083 0.0039 -103.6498 0.0000 -0.4160 -0.4005 baronKills 0.7598 0.0338 22.4612 0.0000 0.6935 0.8261 dragonKills 0.3566 0.0157 22.6557 0.0000 0.3257 0.3874 timeCCingOthers -0.0096 0.0006 -17.2654 0.0000 -0.0107 -0.0085 wardsPlaced 0.0051 0.0012 4.1346 0.0000 0.0027 0.0076 goldEarned -0.0003 0.0000 -45.5422 0.0000 -0.0003 -0.0003 inhibitorTakedowns 2.1111 0.0212 99.5492 0.0000 2.0696 2.1527 largestKillingSpree -0.0504 0.0070 -7.2300 0.0000 -0.0641 -0.0367 largestMultiKill 0.4043 0.0159 25.5014 0.0000 0.3732 0.4354 totalMinionsKilled 0.0043 0.0002 21.6630 0.0000 0.0039 0.0047 consumablesPurchased -0.0395 0.0032 -12.3773 0.0000 -0.0458 -0.0333 damageDealtToBuildings 0.0002 0.0000 25.9200 0.0000 0.0001 0.0002 turretKills 0.3140 0.0131 23.8875 0.0000 0.2882 0.3397 ======================================================================== What would be the best predictor for match win given these results? My initial thinking was I can't use the coefficient, because all variables come from different distributions. Is it valid thinking to use the z-score, since it standardizes values to the same distribution? Can variable assists and inhibitorTakedowns considered to be the best predictor for winning a match, since it has the highest z-score, or is this thinking flawed?
[ "The first thing that you should pay attention to is if a variable is significant or not given the level of threshold. It seems from your results that all are significant. Z-scores are used to see the level of significance. The second thing is to look at the coefficients to see which one has more impact on the label. The larger the absolute value of the coefficient, the more effect it will have. This effect may be positive or negative. The caveat here is, to standardize all predictor variables before putting them into the model. As you mentioned, they all come from different distributions. So, standardizing them will fix that problem.\n" ]
[ 1 ]
[]
[]
[ "data_science", "logistic_regression", "machine_learning", "python", "statistics" ]
stackoverflow_0074577402_data_science_logistic_regression_machine_learning_python_statistics.txt
Q: Using ProccessPoolExecutor for functions with I/O Lately I have been using ProcessPoolExecutor for accelerating the processing of some functions I wrote. I have a question regarding one function I would like to accelerate. This function def thefunction(input_file, output_file, somepar) Involves opening and reading the input file, processing it and writing the results in a output file. Right now I am doing lista=glob.glob(os.path.join(args.thefolders,'path/this.json')) for filen in lista: print("Processing ",filen) thefunction(filen,None,args.somepar) I would like to do some multiprocess mapping like with ProcessPoolExecutor() as process_pool: work_done=list(process_pool.map(partial(thefunction,somepar=args.somepar),lista)) But I am a bit worried since the function involves I/O Provided that the files accessed are different for every member of the list, is it safe to do the above use? A: If the files are different, IO operations from different processes at once are completely reasonable. If the files are the same, such an operation is unsafe and would require to use a synchronization primitive such as a lock, which would render the multiprocessing inefficient.
Using ProccessPoolExecutor for functions with I/O
Lately I have been using ProcessPoolExecutor for accelerating the processing of some functions I wrote. I have a question regarding one function I would like to accelerate. This function def thefunction(input_file, output_file, somepar) Involves opening and reading the input file, processing it and writing the results in a output file. Right now I am doing lista=glob.glob(os.path.join(args.thefolders,'path/this.json')) for filen in lista: print("Processing ",filen) thefunction(filen,None,args.somepar) I would like to do some multiprocess mapping like with ProcessPoolExecutor() as process_pool: work_done=list(process_pool.map(partial(thefunction,somepar=args.somepar),lista)) But I am a bit worried since the function involves I/O Provided that the files accessed are different for every member of the list, is it safe to do the above use?
[ "If the files are different, IO operations from different processes at once are completely reasonable.\nIf the files are the same, such an operation is unsafe and would require to use a synchronization primitive such as a lock, which would render the multiprocessing inefficient.\n" ]
[ 0 ]
[]
[]
[ "process_pool", "python" ]
stackoverflow_0074568519_process_pool_python.txt
Q: How do I print lines from a file in python after and before a match? I would like to print some specific lines from a file, only those lines that come after a certain word appears on a line ('Ingredients:') and before another word appears ('Instructions:'). The file is a list of recipes and I want to be able to print out only the ingredients. example of the text: RECIPE : CACIO E PEPE #PASTA Ingredients: spaghetti: 200-g butter: 25-gr black pepper: as needed pecorino: 50-gr Instructions: I tried this way and many others but nothing seems to work: def find_line_after(target): with open('recipes.txt', 'r') as f: line = f.readline().strip() while line: if line == target: line = f.readline().strip() return f.readline() A: def get_all_ingredients(): flag = False with open('recipes.txt', 'r') as f: for line in f: if 'Instructions' in line: flag = False if flag: print(line.rstrip()) if 'Ingredients' in line: flag = True get_all_ingredients() Tested input: RECIPE : CACIO E PEPE #PASTA Ingredients: spaghetti: 200-g butter: 25-gr black pepper: as needed pecorino: 50-gr Instructions: asd RECIPE : CACIO E PEPE #PASTA Ingredients: salt: 200-g chicken: 25-gr Instructions: qwe qwe RECIPE : CACIO E PEPE #PASTA Ingredients: carrot: 200-g rabbit: 25-gr Instructions: qwe qwe Output got: spaghetti: 200-g butter: 25-gr black pepper: as needed pecorino: 50-gr salt: 200-g chicken: 25-gr carrot: 200-g rabbit: 25-gr A: This may help you: start, stop = 0, 0 with open('recipes.txt', 'r') as f: lines = f.readlines() for n in range(len(lines)): if 'Ingredients' in lines[n]: start = n + 1 elif 'Instructions' in lines[n]: stop = n ingredients = list(filter(('\n').__ne__, lines[start:stop])) for i in ingredients: print(i, end='') Basically looks for the start and stop indexes, fetches the lines from that range and removes the empty lines (and extra new-line chars). Output: spaghetti: 200-g butter: 25-gr black pepper: as needed pecorino: 50-gr A: My approach involves setting a variable is_ingredient to True or False, depending on the line and print if it is True. is_ingredient = False with open("recipes.txt", "r", encoding="utf-8") as stream: for line in stream: if line.startswith("Ingredients:"): is_ingredient = True elif line.startswith("Instructions:"): is_ingredient = False print("---") elif is_ingredient: print(line, end="") The only addition I did was to print the --- as a separator between recipes.
How do I print lines from a file in python after and before a match?
I would like to print some specific lines from a file, only those lines that come after a certain word appears on a line ('Ingredients:') and before another word appears ('Instructions:'). The file is a list of recipes and I want to be able to print out only the ingredients. example of the text: RECIPE : CACIO E PEPE #PASTA Ingredients: spaghetti: 200-g butter: 25-gr black pepper: as needed pecorino: 50-gr Instructions: I tried this way and many others but nothing seems to work: def find_line_after(target): with open('recipes.txt', 'r') as f: line = f.readline().strip() while line: if line == target: line = f.readline().strip() return f.readline()
[ "def get_all_ingredients():\n flag = False\n with open('recipes.txt', 'r') as f:\n for line in f:\n if 'Instructions' in line:\n flag = False\n\n if flag:\n print(line.rstrip())\n\n if 'Ingredients' in line:\n flag = True\n\n\nget_all_ingredients()\n\nTested input:\nRECIPE : CACIO E PEPE #PASTA\nIngredients:\nspaghetti: 200-g\nbutter: 25-gr\nblack pepper: as needed\npecorino: 50-gr\nInstructions:\nasd\nRECIPE : CACIO E PEPE #PASTA\nIngredients:\nsalt: 200-g\nchicken: 25-gr\nInstructions:\nqwe\nqwe\nRECIPE : CACIO E PEPE #PASTA\nIngredients:\ncarrot: 200-g\nrabbit: 25-gr\nInstructions:\nqwe\nqwe\n\nOutput got:\nspaghetti: 200-g\nbutter: 25-gr\nblack pepper: as needed\npecorino: 50-gr\nsalt: 200-g\nchicken: 25-gr\ncarrot: 200-g\nrabbit: 25-gr\n\n", "This may help you:\nstart, stop = 0, 0\nwith open('recipes.txt', 'r') as f:\n lines = f.readlines()\nfor n in range(len(lines)):\n if 'Ingredients' in lines[n]:\n start = n + 1\n elif 'Instructions' in lines[n]:\n stop = n\ningredients = list(filter(('\\n').__ne__, lines[start:stop]))\nfor i in ingredients:\n print(i, end='')\n\nBasically looks for the start and stop indexes, fetches the lines from that range and removes the empty lines (and extra new-line chars).\nOutput:\nspaghetti: 200-g\nbutter: 25-gr\nblack pepper: as needed\npecorino: 50-gr\n\n", "My approach involves setting a variable is_ingredient to True or False, depending on the line and print if it is True.\nis_ingredient = False\nwith open(\"recipes.txt\", \"r\", encoding=\"utf-8\") as stream:\n for line in stream:\n if line.startswith(\"Ingredients:\"):\n is_ingredient = True\n elif line.startswith(\"Instructions:\"):\n is_ingredient = False\n print(\"---\")\n elif is_ingredient:\n print(line, end=\"\")\n\nThe only addition I did was to print the --- as a separator between recipes.\n" ]
[ 1, 0, 0 ]
[]
[]
[ "loops", "python", "readfile" ]
stackoverflow_0074577567_loops_python_readfile.txt
Q: How to detect circle defects? Is there any way to tell if a circle has such defects? Roundness does not work. Or is there a way to eliminate them? perimeter = cv2.arcLength(cnts[0],True) area = cv2.contourArea(cnts[0]) roundness = 4*pi*area/(perimeter*perimeter) print("Roundness:", roundness) A: The "roundness" measure is sensitive to a precise estimate of the perimeter. What cv2.arcLength() does is add the lengths of each of the polygon edges, which severely overestimates the length of outlines. I think this is the main reason that this measure hasn't worked for you. With a better perimeter estimator you would get useful results. An alternative measure that might be more useful is "circularity", defined as the coefficient of variation of the radius. In short, you compute the distance of each polygon vertex (i.e. outline point) to the centroid, then determine the coefficient of variation of these distances (== std / mean). I wrote a quick Python script to compute this starting from an OpenCV contour: import cv2 import numpy as np # read in OP's example image, making sure we ignore the red arrow img = cv2.imread('jGssp.png')[:, :, 1] _, img = cv2.threshold(img, 127, 255, 0) # get the contour of the shape contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) contour = contours[0][:, 0, :] # add the first point as the last, to close it contour = np.concatenate((contour, contour[0, None, :])) # compute centroid def cross_product(v1, v2): """2D cross product.""" return v1[0] * v2[1] - v1[1] * v2[0] sum = 0.0 xsum = 0.0 ysum = 0.0 for ii in range(1, contour.shape[0]): v = cross_product(contour[ii - 1, :], contour[ii, :]) sum += v xsum += (contour[ii - 1, 0] + contour[ii, 0]) * v ysum += (contour[ii - 1, 1] + contour[ii, 1]) * v centroid = np.array([ xsum, ysum ]) / (3 * sum) # Compute coefficient of variation of distances to centroid (==circularity) d = np.sqrt(np.sum((contour - centroid) ** 2, axis=1)) circularity = np.std(d) / np.mean(d) A: This make me think of a similar problem that I had. You could compute the signature of the shape. The signature can be defined as, for each pixel of the border of the shape, the distance between this pixel and the center of the shape. For a perfect circle, the distance from the border to the center should be constant (in an ideal continuous world). When defects are visible on the edge of the circle (either dents or excesses), the ideal constant line changes to a wiggly curve, with huge variation when on the defects. It's fairly easy to detect those variation with FFT for example, which allows to quantify the defect significance. You can expand this solution to any given shape. If your ideal shape is a square, you can compute the signature, which will give you some kind of sinusoidal curve. Defects will appear in a same way on the curve, and would be detectable with the same logic as with a circle. I can't give you an code example, as the project was for a company project, but the idea is still here. A: Here is one way to do that in Python/OpenCV. Read the input Threshold on white (to remove the red arrow) Apply Hough Circle Draw the circle on the thresholded image for comparison Draw a white filled circle on black background from the circle parameters. Get the difference between the thresholded image and the drawn circle image Apply morphology open to remove the ring from the irregular boundary of the original circle Count the number of white pixels in the previous image as the amount off defect Input: import cv2 import numpy as np # Read image img = cv2.imread('circle_defect.png') hh, ww = img.shape[:2] # threshold on white to remove red arrow lower = (255,255,255) upper = (255,255,255) thresh = cv2.inRange(img, lower, upper) # get Hough circles min_dist = int(ww/5) circles = cv2.HoughCircles(thresh, cv2.HOUGH_GRADIENT, 1, minDist=min_dist, param1=150, param2=15, minRadius=0, maxRadius=0) print(circles) # draw circles on input thresh (without red arrow) circle_img = thresh.copy() circle_img = cv2.merge([circle_img,circle_img,circle_img]) for circle in circles[0]: # draw the circle in the output image, then draw a rectangle # corresponding to the center of the circle (x,y,r) = circle x = int(x) y = int(y) r = int(r) cv2.circle(circle_img, (x, y), r, (0, 0, 255), 1) # draw filled circle on black background circle_filled = np.zeros_like(thresh) cv2.circle(circle_filled, (x,y), r, 255, -1) # get difference between the thresh image and the circle_filled image diff = cv2.absdiff(thresh, circle_filled) # apply morphology to remove ring kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)) result = cv2.morphologyEx(diff, cv2.MORPH_OPEN, kernel) # count non-zero pixels defect_count = np.count_nonzero(result) print("defect count:", defect_count) # save results cv2.imwrite('circle_defect_thresh.jpg', thresh) cv2.imwrite('circle_defect_circle.jpg', circle_img) cv2.imwrite('circle_defect_circle_diff.jpg', diff) cv2.imwrite('circle_defect_detected.png', result) # show images cv2.imshow('thresh', thresh) cv2.imshow('circle_filled', circle_filled) cv2.imshow('diff', diff) cv2.imshow('result', result) cv2.waitKey(0) cv2.destroyAllWindows() Input without Red Arrow: Red Circle Drawn on Input: Circle from HoughCircle: Difference: Difference Cleaned Up: Textual Result: defect count: 500
How to detect circle defects?
Is there any way to tell if a circle has such defects? Roundness does not work. Or is there a way to eliminate them? perimeter = cv2.arcLength(cnts[0],True) area = cv2.contourArea(cnts[0]) roundness = 4*pi*area/(perimeter*perimeter) print("Roundness:", roundness)
[ "The \"roundness\" measure is sensitive to a precise estimate of the perimeter. What cv2.arcLength() does is add the lengths of each of the polygon edges, which severely overestimates the length of outlines. I think this is the main reason that this measure hasn't worked for you. With a better perimeter estimator you would get useful results.\nAn alternative measure that might be more useful is \"circularity\", defined as the coefficient of variation of the radius. In short, you compute the distance of each polygon vertex (i.e. outline point) to the centroid, then determine the coefficient of variation of these distances (== std / mean).\nI wrote a quick Python script to compute this starting from an OpenCV contour:\nimport cv2\nimport numpy as np\n\n# read in OP's example image, making sure we ignore the red arrow\nimg = cv2.imread('jGssp.png')[:, :, 1]\n_, img = cv2.threshold(img, 127, 255, 0)\n\n# get the contour of the shape\ncontours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\ncontour = contours[0][:, 0, :]\n\n# add the first point as the last, to close it\ncontour = np.concatenate((contour, contour[0, None, :]))\n\n# compute centroid\ndef cross_product(v1, v2):\n \"\"\"2D cross product.\"\"\"\n return v1[0] * v2[1] - v1[1] * v2[0]\n\nsum = 0.0\nxsum = 0.0\nysum = 0.0\nfor ii in range(1, contour.shape[0]):\n v = cross_product(contour[ii - 1, :], contour[ii, :])\n sum += v\n xsum += (contour[ii - 1, 0] + contour[ii, 0]) * v\n ysum += (contour[ii - 1, 1] + contour[ii, 1]) * v\n\ncentroid = np.array([ xsum, ysum ]) / (3 * sum)\n\n# Compute coefficient of variation of distances to centroid (==circularity)\nd = np.sqrt(np.sum((contour - centroid) ** 2, axis=1))\ncircularity = np.std(d) / np.mean(d)\n\n", "This make me think of a similar problem that I had. You could compute the signature of the shape. The signature can be defined as, for each pixel of the border of the shape, the distance between this pixel and the center of the shape.\nFor a perfect circle, the distance from the border to the center should be constant (in an ideal continuous world). When defects are visible on the edge of the circle (either dents or excesses), the ideal constant line changes to a wiggly curve, with huge variation when on the defects.\nIt's fairly easy to detect those variation with FFT for example, which allows to quantify the defect significance.\nYou can expand this solution to any given shape. If your ideal shape is a square, you can compute the signature, which will give you some kind of sinusoidal curve. Defects will appear in a same way on the curve, and would be detectable with the same logic as with a circle.\n\nI can't give you an code example, as the project was for a company project, but the idea is still here.\n", "Here is one way to do that in Python/OpenCV.\n\nRead the input\nThreshold on white (to remove the red arrow)\nApply Hough Circle\nDraw the circle on the thresholded image for comparison\nDraw a white filled circle on black background from the circle parameters.\nGet the difference between the thresholded image and the drawn circle image\nApply morphology open to remove the ring from the irregular boundary of the original circle\nCount the number of white pixels in the previous image as the amount off defect\n\n\nInput:\n\nimport cv2\nimport numpy as np\n\n# Read image\nimg = cv2.imread('circle_defect.png')\nhh, ww = img.shape[:2]\n\n# threshold on white to remove red arrow\nlower = (255,255,255)\nupper = (255,255,255)\nthresh = cv2.inRange(img, lower, upper)\n\n# get Hough circles\nmin_dist = int(ww/5)\ncircles = cv2.HoughCircles(thresh, cv2.HOUGH_GRADIENT, 1, minDist=min_dist, param1=150, param2=15, minRadius=0, maxRadius=0)\nprint(circles)\n\n# draw circles on input thresh (without red arrow)\ncircle_img = thresh.copy()\ncircle_img = cv2.merge([circle_img,circle_img,circle_img])\nfor circle in circles[0]:\n # draw the circle in the output image, then draw a rectangle\n # corresponding to the center of the circle\n (x,y,r) = circle\n x = int(x)\n y = int(y)\n r = int(r)\n cv2.circle(circle_img, (x, y), r, (0, 0, 255), 1)\n\n# draw filled circle on black background\ncircle_filled = np.zeros_like(thresh)\ncv2.circle(circle_filled, (x,y), r, 255, -1)\n\n# get difference between the thresh image and the circle_filled image\ndiff = cv2.absdiff(thresh, circle_filled)\n\n# apply morphology to remove ring\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))\nresult = cv2.morphologyEx(diff, cv2.MORPH_OPEN, kernel)\n\n# count non-zero pixels\ndefect_count = np.count_nonzero(result)\nprint(\"defect count:\", defect_count)\n\n# save results\ncv2.imwrite('circle_defect_thresh.jpg', thresh)\ncv2.imwrite('circle_defect_circle.jpg', circle_img)\ncv2.imwrite('circle_defect_circle_diff.jpg', diff)\ncv2.imwrite('circle_defect_detected.png', result)\n\n# show images\ncv2.imshow('thresh', thresh)\ncv2.imshow('circle_filled', circle_filled)\ncv2.imshow('diff', diff)\ncv2.imshow('result', result)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\nInput without Red Arrow:\n\nRed Circle Drawn on Input:\n\nCircle from HoughCircle:\n\nDifference:\n\nDifference Cleaned Up:\n\nTextual Result:\ndefect count: 500\n\n" ]
[ 4, 0, 0 ]
[]
[]
[ "image_processing", "opencv", "python" ]
stackoverflow_0074523496_image_processing_opencv_python.txt
Q: How to get values of dataframe that has characteres I have a dataframe: df = pd.DataFrame({'col' : [1,2, 10, np.nan, 'a'], 'col2': ['a', 10, 30, 'c',50], 'col3': [1,2,3,4,5.0]}) How I obtein about the column col2 a new dataframe with has characters. In this case df_final = ['a', 'c'] I try to verify if not number but this doesn't work for me. A: You could use pandas.Series.str.contains in this case by using a regex that does not match numbers. It should be noted that we need to set na argument to False because as per documentation Specifying na to be False instead of NaN replaces NaN values with False. If Series or Index does not contain NaN values the resultant dtype will be bool, otherwise, an object dtype. df.loc[df['col2'].str.contains(r'[^0-9]', na=False, regex=True)] col col2 col3 0 1 a 1.0 3 NaN c 4.0
How to get values of dataframe that has characteres
I have a dataframe: df = pd.DataFrame({'col' : [1,2, 10, np.nan, 'a'], 'col2': ['a', 10, 30, 'c',50], 'col3': [1,2,3,4,5.0]}) How I obtein about the column col2 a new dataframe with has characters. In this case df_final = ['a', 'c'] I try to verify if not number but this doesn't work for me.
[ "You could use pandas.Series.str.contains in this case by using a regex that does not match numbers. It should be noted that we need to set na argument to False because as per documentation\n\nSpecifying na to be False instead of NaN replaces NaN values with\nFalse. If Series or Index does not contain NaN values the resultant\ndtype will be bool, otherwise, an object dtype.\n\ndf.loc[df['col2'].str.contains(r'[^0-9]', na=False, regex=True)]\n\n col col2 col3\n0 1 a 1.0\n3 NaN c 4.0\n\n" ]
[ 2 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074578035_dataframe_pandas_python.txt
Q: Why when web scraping does the valueError: Not enough values to unpack in BeautifulSoup happen and what does it mean So I am scraping a [website][1] and I want to Retrieve the webpages based on these URLs and convert each into a beautifulsoup object Retrieve Car Manufacturing Year, Engine, Price, Dealer information (if it is available), and the URL (href) to access the detailed car information. When I run the code I get the error "ValueError: not enough values to unpack (expected 4, got 3)" when I remove one value instead of having make, model, year, and price I change it to make, model, and price and it comes up with another error "too many values to unpack (expected 3)" import requests import pandas as pd from bs4 import BeautifulSoup url = "https://jammer.ie/used-cars?page={}&per-page=12" all_data = [] for page in range(1, 3): # <-- increase number of pages here soup = BeautifulSoup(requests.get(url.format(page)).text, "html.parser") for car in soup.select(".car"): info = car.select_one(".top-info").get_text(strip=True, separator="|") make, model, year, price = info.split("|") dealer_name = car.select_one(".dealer-name h6").get_text( strip=True, separator=" " ) address = car.select_one(".address").get_text(strip=True) features = {} for feature in car.select(".car--features li"): k = feature.img["src"].split("/")[-1].split(".")[0] v = feature.span.text features[f"feature_{k}"] = v all_data.append( { "make": make, "model": model, "year": year, "price": price, "dealer_name": dealer_name, "address": address, "url": "https://jammer.ie" + car.select_one("a[href*=vehicle]")["href"], **features, } ) df = pd.DataFrame(all_data) # prints sample data to screen: print(df.tail().to_markdown(index=False)) # saves all data to CSV df.to_csv('data.csv', index=False) A: You can make a check if the car contains model or not: import requests import pandas as pd from bs4 import BeautifulSoup url = "https://jammer.ie/used-cars?page={}&per-page=12" all_data = [] for page in range(1, 3): # <-- increase number of pages here soup = BeautifulSoup(requests.get(url.format(page)).text, "html.parser") for car in soup.select(".car"): info = car.select_one(".top-info").get_text(strip=True, separator="|") info = info.split("|") if len(info) == 4: make, model, year, price = info else: make, year, price = info model = "N/A" dealer_name = car.select_one(".dealer-name h6").get_text( strip=True, separator=" " ) address = car.select_one(".address").get_text(strip=True) features = {} for feature in car.select(".car--features li"): k = feature.img["src"].split("/")[-1].split(".")[0] v = feature.span.text features[f"feature_{k}"] = v all_data.append( { "make": make, "model": model, "year": year, "price": price, "dealer_name": dealer_name, "address": address, "url": "https://jammer.ie" + car.select_one("a[href*=vehicle]")["href"], **features, } ) df = pd.DataFrame(all_data) # prints sample data to screen: print(df.tail().to_markdown(index=False)) # saves all data to CSV df.to_csv("data.csv", index=False) Prints: make model year price dealer_name address url feature_speed feature_engine feature_transmission feature_door-icon1 feature_petrol5 feature_hatchback feature_owner feature_paint SEAT Leon 2015 Price on application McNamara Motors Co. Cork https://jammer.ie/vehicle/166591-seat-leon-2015 45000 miles 1.2 litres Manual 5 doors Petrol Hatchback 2 previous owners Grey Toyota Verso 2012 €8,250 Amcc Co. Dublin https://jammer.ie/vehicle/166590-toyota-verso-2012 98179 miles 1.5 litres Automatic 4 doors Petrol MPV nan Purple Mazda Demio 2012 €7,950 Amcc Co. Dublin https://jammer.ie/vehicle/166589-mazda-demio-2012 82644 miles 1.3 litres Automatic 4 doors Petrol Hatchback nan Red Toyota Corolla 2017 €14,950 Amcc Co. Dublin https://jammer.ie/vehicle/166588-toyota-corolla-2017 78916 miles 1.5 litres Automatic 4 doors nan Estate nan Silver Mazda Demio 2013 €8,950 Amcc Co. Dublin https://jammer.ie/vehicle/166587-mazda-demio-2013 53439 miles 1.3 litres Automatic 4 doors Petrol Hatchback nan Grey
Why when web scraping does the valueError: Not enough values to unpack in BeautifulSoup happen and what does it mean
So I am scraping a [website][1] and I want to Retrieve the webpages based on these URLs and convert each into a beautifulsoup object Retrieve Car Manufacturing Year, Engine, Price, Dealer information (if it is available), and the URL (href) to access the detailed car information. When I run the code I get the error "ValueError: not enough values to unpack (expected 4, got 3)" when I remove one value instead of having make, model, year, and price I change it to make, model, and price and it comes up with another error "too many values to unpack (expected 3)" import requests import pandas as pd from bs4 import BeautifulSoup url = "https://jammer.ie/used-cars?page={}&per-page=12" all_data = [] for page in range(1, 3): # <-- increase number of pages here soup = BeautifulSoup(requests.get(url.format(page)).text, "html.parser") for car in soup.select(".car"): info = car.select_one(".top-info").get_text(strip=True, separator="|") make, model, year, price = info.split("|") dealer_name = car.select_one(".dealer-name h6").get_text( strip=True, separator=" " ) address = car.select_one(".address").get_text(strip=True) features = {} for feature in car.select(".car--features li"): k = feature.img["src"].split("/")[-1].split(".")[0] v = feature.span.text features[f"feature_{k}"] = v all_data.append( { "make": make, "model": model, "year": year, "price": price, "dealer_name": dealer_name, "address": address, "url": "https://jammer.ie" + car.select_one("a[href*=vehicle]")["href"], **features, } ) df = pd.DataFrame(all_data) # prints sample data to screen: print(df.tail().to_markdown(index=False)) # saves all data to CSV df.to_csv('data.csv', index=False)
[ "You can make a check if the car contains model or not:\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\n\nurl = \"https://jammer.ie/used-cars?page={}&per-page=12\"\n\nall_data = []\n\nfor page in range(1, 3): # <-- increase number of pages here\n soup = BeautifulSoup(requests.get(url.format(page)).text, \"html.parser\")\n\n for car in soup.select(\".car\"):\n info = car.select_one(\".top-info\").get_text(strip=True, separator=\"|\")\n info = info.split(\"|\")\n if len(info) == 4:\n make, model, year, price = info\n else:\n make, year, price = info\n model = \"N/A\"\n dealer_name = car.select_one(\".dealer-name h6\").get_text(\n strip=True, separator=\" \"\n )\n address = car.select_one(\".address\").get_text(strip=True)\n\n features = {}\n for feature in car.select(\".car--features li\"):\n k = feature.img[\"src\"].split(\"/\")[-1].split(\".\")[0]\n v = feature.span.text\n features[f\"feature_{k}\"] = v\n\n all_data.append(\n {\n \"make\": make,\n \"model\": model,\n \"year\": year,\n \"price\": price,\n \"dealer_name\": dealer_name,\n \"address\": address,\n \"url\": \"https://jammer.ie\"\n + car.select_one(\"a[href*=vehicle]\")[\"href\"],\n **features,\n }\n )\n\ndf = pd.DataFrame(all_data)\n# prints sample data to screen:\nprint(df.tail().to_markdown(index=False))\n# saves all data to CSV\ndf.to_csv(\"data.csv\", index=False)\n\nPrints:\n\n\n\n\nmake\nmodel\nyear\nprice\ndealer_name\naddress\nurl\nfeature_speed\nfeature_engine\nfeature_transmission\nfeature_door-icon1\nfeature_petrol5\nfeature_hatchback\nfeature_owner\nfeature_paint\n\n\n\n\nSEAT\nLeon\n2015\nPrice on application\nMcNamara Motors\nCo. Cork\nhttps://jammer.ie/vehicle/166591-seat-leon-2015\n45000 miles\n1.2 litres\nManual\n5 doors\nPetrol\nHatchback\n2 previous owners\nGrey\n\n\nToyota\nVerso\n2012\n€8,250\nAmcc\nCo. Dublin\nhttps://jammer.ie/vehicle/166590-toyota-verso-2012\n98179 miles\n1.5 litres\nAutomatic\n4 doors\nPetrol\nMPV\nnan\nPurple\n\n\nMazda\nDemio\n2012\n€7,950\nAmcc\nCo. Dublin\nhttps://jammer.ie/vehicle/166589-mazda-demio-2012\n82644 miles\n1.3 litres\nAutomatic\n4 doors\nPetrol\nHatchback\nnan\nRed\n\n\nToyota\nCorolla\n2017\n€14,950\nAmcc\nCo. Dublin\nhttps://jammer.ie/vehicle/166588-toyota-corolla-2017\n78916 miles\n1.5 litres\nAutomatic\n4 doors\nnan\nEstate\nnan\nSilver\n\n\nMazda\nDemio\n2013\n€8,950\nAmcc\nCo. Dublin\nhttps://jammer.ie/vehicle/166587-mazda-demio-2013\n53439 miles\n1.3 litres\nAutomatic\n4 doors\nPetrol\nHatchback\nnan\nGrey\n\n\n\n" ]
[ 1 ]
[]
[]
[ "beautifulsoup", "pandas", "python", "request", "web_scraping" ]
stackoverflow_0074575234_beautifulsoup_pandas_python_request_web_scraping.txt
Q: Django Rest Framework Scope Throttling on function based view Wanted to ask if someone knows a way or a workaround to how to set different throttle scopes for different request methods in a function-based view. For example @api_view(['GET', 'POST']) def someFunction(request): if request.method == 'GET': # set scope for get requests elif request.method == 'POST': # set scope for post requests I tried looking around, but all answers are for class-based views only. Would appreciate the help, thanks. A: You can solve this by creating all the custom throttling classes first. Note: Only the throttles are in classes but the views are functions. class PostAnononymousRateThrottle(throttling.AnonRateThrottle): scope = 'post_anon' def allow_request(self, request, view): if request.method == "GET": return True return super().allow_request(request, view) class GetAnononymousRateThrottle(throttling.AnonRateThrottle): scope = 'get_anon' def allow_request(self, request, view): if request.method == "POST": return True return super().allow_request(request, view) class PostUserRateThrottle(throttling.UserRateThrottle): scope = 'post_user' def allow_request(self, request, view): if request.method == "GET": return True return super().allow_request(request, view) class GetUserRateThrottle(throttling.UserRateThrottle): scope = 'get_user' def allow_request(self, request, view): if request.method == "POST": return True return super().allow_request(request, view) You can choose to eliminate the classes if you are not looking for authentication or method type. Then you need to import this from rest_framework.decorators import api_view, throttle_classes Then you can wrap your function view with throttle_classes decorator with all the permissions created @api_view(['GET', 'POST']) @throttle_classes([PostAnononymousRateThrottle, GetAnononymousRateThrottle, PostUserRateThrottle, GetUserRateThrottle]) def someFunction(request): if request.method == 'POST': return Response({"message": "Got some data!", "data": request.data}) elif request.method == 'GET': return Response({"message": "Hello, world!"}) Don't forget to mention the throttle rate in the settings.py REST_FRAMEWORK = { 'DEFAULT_THROTTLE_RATES': { 'post_anon': '3/minute', 'get_anon': '1/minute', 'post_user': '2/minute', 'get_user': '2/minute' } } Reference: https://medium.com/analytics-vidhya/throttling-requests-with-django-rest-framework-for-different-http-methods-3ab0461044c A: I finally found out a workaround for function based views. Here is how I implemented it. As it was explained in previous answers, we need to extend the UserRateThrottle class or AnonRateThrottle class depending on our need. For my case, I was more interested in throttling requests from users. from rest_framework.throttling import UserRateThrottle class CustomThrottle(UserRateThrottle): scope = 'my_custom_scope' def allow_request(self, request, view): if request.method == 'GET': self.scope = 'get_scope' self.rate = '2/hour' return True return super().allow_request(request, view) And in the settings: 'DEFAULT_THROTTLE_RATES': { 'my_custom_scope': '3/day' } By default, this class will throttle POST requests based on the rate set in the settings file. The thing I added here is altering the scope and rate in case the request method was GET. Without this alteration, some problems may occur because of the default caching used by DRF Throttlers. We need to set the rate and scope inside the CustomThrottle class itself or else the scope affiliated with POST method will be applied on both GET and POST. Finally, we add the decorator on our function-based view. from rest_framework import api_view, throttle_classes import CustomThrottle @api_view(['GET', 'POST']) @throttle_classes([CustomThrottle]) def someFunction(request): if request.method == 'GET': # set scope for get requests elif request.method == 'POST': # set scope for post requests and that'd be it :D A: This is very simple but hard to find. Import throttle_classes from DRF as follows: from rest_framework.decorators import api_view, throttle_classes then use it as follows: @api_view(['POST']) @throttle_classes([UserRateThrottle]) def your_view(request): ....
Django Rest Framework Scope Throttling on function based view
Wanted to ask if someone knows a way or a workaround to how to set different throttle scopes for different request methods in a function-based view. For example @api_view(['GET', 'POST']) def someFunction(request): if request.method == 'GET': # set scope for get requests elif request.method == 'POST': # set scope for post requests I tried looking around, but all answers are for class-based views only. Would appreciate the help, thanks.
[ "You can solve this by creating all the custom throttling classes first. Note: Only the throttles are in classes but the views are functions.\nclass PostAnononymousRateThrottle(throttling.AnonRateThrottle):\n scope = 'post_anon'\n def allow_request(self, request, view):\n if request.method == \"GET\":\n return True\n return super().allow_request(request, view)\n\nclass GetAnononymousRateThrottle(throttling.AnonRateThrottle):\n scope = 'get_anon'\n def allow_request(self, request, view):\n if request.method == \"POST\":\n return True\n return super().allow_request(request, view)\n\nclass PostUserRateThrottle(throttling.UserRateThrottle):\n scope = 'post_user'\n def allow_request(self, request, view):\n if request.method == \"GET\":\n return True\n return super().allow_request(request, view)\n\nclass GetUserRateThrottle(throttling.UserRateThrottle):\n scope = 'get_user'\n def allow_request(self, request, view):\n if request.method == \"POST\":\n return True\n return super().allow_request(request, view)\n\nYou can choose to eliminate the classes if you are not looking for authentication or method type.\nThen you need to import this\nfrom rest_framework.decorators import api_view, throttle_classes\n\nThen you can wrap your function view with throttle_classes decorator with all the permissions created\n@api_view(['GET', 'POST'])\n@throttle_classes([PostAnononymousRateThrottle, GetAnononymousRateThrottle, PostUserRateThrottle, GetUserRateThrottle])\ndef someFunction(request):\n if request.method == 'POST':\n return Response({\"message\": \"Got some data!\", \"data\": request.data})\n elif request.method == 'GET':\n return Response({\"message\": \"Hello, world!\"})\n\nDon't forget to mention the throttle rate in the settings.py\nREST_FRAMEWORK = {\n 'DEFAULT_THROTTLE_RATES': {\n 'post_anon': '3/minute',\n 'get_anon': '1/minute',\n 'post_user': '2/minute',\n 'get_user': '2/minute'\n }\n}\n\nReference: https://medium.com/analytics-vidhya/throttling-requests-with-django-rest-framework-for-different-http-methods-3ab0461044c\n", "I finally found out a workaround for function based views.\nHere is how I implemented it.\nAs it was explained in previous answers, we need to extend the UserRateThrottle class or AnonRateThrottle class depending on our need.\nFor my case, I was more interested in throttling requests from users.\nfrom rest_framework.throttling import UserRateThrottle\n\nclass CustomThrottle(UserRateThrottle):\n scope = 'my_custom_scope'\n def allow_request(self, request, view):\n if request.method == 'GET':\n self.scope = 'get_scope'\n self.rate = '2/hour'\n return True\n return super().allow_request(request, view)\n\n\nAnd in the settings:\n'DEFAULT_THROTTLE_RATES': {\n 'my_custom_scope': '3/day'\n}\n\nBy default, this class will throttle POST requests based on the rate set in the settings file. The thing I added here is altering the scope and rate in case the request method was GET. Without this alteration, some problems may occur because of the default caching used by DRF Throttlers. We need to set the rate and scope inside the CustomThrottle class itself or else the scope affiliated with POST method will be applied on both GET and POST.\nFinally, we add the decorator on our function-based view.\nfrom rest_framework import api_view, throttle_classes\nimport CustomThrottle\n\n@api_view(['GET', 'POST'])\n@throttle_classes([CustomThrottle])\ndef someFunction(request):\n if request.method == 'GET':\n # set scope for get requests\n elif request.method == 'POST':\n # set scope for post requests\n\nand that'd be it :D\n", "This is very simple but hard to find. Import throttle_classes from DRF as follows:\nfrom rest_framework.decorators import api_view, throttle_classes\n\nthen use it as follows:\n@api_view(['POST'])\n@throttle_classes([UserRateThrottle])\ndef your_view(request):\n ....\n\n" ]
[ 4, 1, 0 ]
[]
[]
[ "django", "django_rest_framework", "python", "throttling" ]
stackoverflow_0063454449_django_django_rest_framework_python_throttling.txt
Q: What is the best way to sanitize inputs with Flask and when using MongoDB? I'm writing my application backend with Python Flask. As part of the registration process, I have a form that sends the new user's information to my backend and then adds it to my MongoDB database. I'm pretty new in this world and never wrote something that has to be secured.. My Python code looks like that: from flask import Flask, request app = Flask(__name__) @app.route('/register', methods=['POST']) def register(): username = request.form['username'] password = request.form['password'] Is there a Python library that sanitizes the request.form['username'] and request.form['password'] field for me? Something that make me safe from XSS and data leaks? Thanks ahead! A: One of the best way to avoid injections is to use ORM and avoid raw queries. For MongoDB it can be flask_mongoengine or motor. Both of them provide escaping out of the box (except for some cases). But you should take care of variable types that you pass to queries. For example, query = Model.objects.filter(field=value) Everything is fine while value is str or int, but what if it looks like {'$gte': ''}? So flask-wtf or marshmallow are strong recommended to avoid such problems. A: I have used python bleach in the past. https://github.com/mozilla/bleach I think it might be useful for your case.
What is the best way to sanitize inputs with Flask and when using MongoDB?
I'm writing my application backend with Python Flask. As part of the registration process, I have a form that sends the new user's information to my backend and then adds it to my MongoDB database. I'm pretty new in this world and never wrote something that has to be secured.. My Python code looks like that: from flask import Flask, request app = Flask(__name__) @app.route('/register', methods=['POST']) def register(): username = request.form['username'] password = request.form['password'] Is there a Python library that sanitizes the request.form['username'] and request.form['password'] field for me? Something that make me safe from XSS and data leaks? Thanks ahead!
[ "One of the best way to avoid injections is to use ORM and avoid raw queries. For MongoDB it can be flask_mongoengine or motor. Both of them provide escaping out of the box (except for some cases).\nBut you should take care of variable types that you pass to queries.\nFor example,\nquery = Model.objects.filter(field=value)\n\nEverything is fine while value is str or int, but what if it looks like {'$gte': ''}?\nSo flask-wtf or marshmallow are strong recommended to avoid such problems.\n", "I have used python bleach in the past. https://github.com/mozilla/bleach\nI think it might be useful for your case.\n" ]
[ 1, 0 ]
[]
[]
[ "flask", "mongodb", "python", "security" ]
stackoverflow_0043925397_flask_mongodb_python_security.txt
Q: python list of dictionaries find paired values I have this csv containing some paired rows such as: LabebStoreId,catalog_uuid,lang,cat_0_name,cat_1_name,cat_2_name,cat_3_name,catalogname,description,properties,price,price_before_discount,externallink,Rating,delivery,discount,instock 6021,89028,en,Electronics & Appliances,Batteries & Power,Batteries,Alkaline Batteries,Energizer Max AA Alkaline Battery E91BP Pack of 8,,"{""Number of batteries included"": ""8"", ""Battery voltage"": ""1.5""}",41.5,,https://www.carrefouruae.com//mafuae/en/alkaline-batteries/energizer-max-alke91bp-batery-8-aa/p/89028,,,, 6021,89028,ar,الإلكترونيات والأجهزة المنزلية,البطاريات ومولدات الطاقة,بطاريات,بطاريات الكالاين,ENERGIZER BATTERY AAX8 MAX,,"{""Number of batteries included"": ""8"", ""طاقة البطارية"": ""1.5""}",41.5,,https://www.carrefouruae.com//mafuae/ar/alkaline-batteries/energizer-max-alke91bp-batery-8-aa/p/89028,,,, 6021,742553,en,Electronics & Appliances,Batteries & Power,Batteries,Alkaline Batteries,Energizer Max AAA Alkaline Battery EP2BP Pack of 8,,"{""Number of batteries included"": ""8"", ""Battery voltage"": ""1.5""}",33.0,,https://www.carrefouruae.com//mafuae/en/alkaline-batteries/energizer-max-alke92bp-batery-8-aaa/p/742553,,,, 6021,742553,ar,الإلكترونيات والأجهزة المنزلية,البطاريات ومولدات الطاقة,بطاريات,بطاريات الكالاين,ENERGIZER AAA/8 MAX ALKALINE,,"{""Number of batteries included"": ""8"", ""طاقة البطارية"": ""1.5""}",33.0,,https://www.carrefouruae.com//mafuae/ar/alkaline-batteries/energizer-max-alke92bp-batery-8-aaa/p/742553,,,, 6021,893379,en,Electronics & Appliances,Large Appliances,Fridges,Fridge 101L to 200L,First1 Free Standing 2 shelves Fridge FR-130L,,"{""Dimensions WxDxH"": ""W 49cm x D 45cm x H 83.5cm""}",549.0,,https://www.carrefouruae.com//mafuae/en/fridge-101l-to-200l/mychoice-first1-fridge-fr-130l-94l/p/893379,,Free delivery,,1.0 6021,893379,ar,الإلكترونيات والأجهزة المنزلية,أجهزة منزلية كبيرة,الثلاجات,ثلاجة 101L إلى 200L,FIRST1 FR-130L FRIDGE,,"{""Dimensions WxDxH"": ""W 49cm x D 45cm x H 83.5cm""}",549.0,,https://www.carrefouruae.com//mafuae/ar/fridge-101l-to-200l/mychoice-first1-fridge-fr-130l-94l/p/893379,,,,1.0 6021,915909,en,Electronics & Appliances,Batteries & Power,Power Adaptors & Sockets,Adaptors & Sockets,Elexon Universal Travel Adaptor PP7974A White,,{},14.5,,https://www.carrefouruae.com//mafuae/en/adaptors-sockets/elexon-universal-travel-adaptor/p/915909,,,, 6021,945671,en,Electronics & Appliances,Small Appliances,Food Preparation,Rice Cooker,Mychoice Rice Cooker 350W F-06RC White,,{},69.0,,https://www.carrefouruae.com//mafuae/en/rice-cooker/mychoice-first1-rice-cooker-f-06rc/p/945671,,,, 6021,945671,ar,الإلكترونيات والأجهزة المنزلية,أجهزة منزلية صغيرة,تحضير الطعام,طبخ الأرز,جهاز طهي الأرز الأول F-06RC 350 وات 0.6 لتر,,{},69.0,,https://www.carrefouruae.com//mafuae/ar/rice-cooker/mychoice-first1-rice-cooker-f-06rc/p/945671,,,, one row for English and one for Arabic and I'd like to setup some check like this: if catalog_uuid contains en and ar: payload = { "row": { "LabebStoreId": d["LabebStoreId"], "catalog_uuid": d["catalog_uuid"], "lang": d["lang"], "cat_0_name": d["cat_0_name"], "cat_1_name": d["cat_1_name"], "cat_2_name": d["cat_2_name"], "cat_3_name": d["cat_3_name"], "catalogname": d["catalogname"], "description": d["description"], "properties": d["properties"], "price": d["price"], "price_before_discount": d["price_before_discount"], "externallink": d["externallink"], "Rating": d["Rating"], "delivery": d["delivery"], "discount": d["discount"], "instock": d["instock"], } } payload = { "nextRow": { "LabebStoreId": d["LabebStoreId"], "catalog_uuid": d["catalog_uuid"], "lang": d["lang"], "cat_0_name": d["cat_0_name"], "cat_1_name": d["cat_1_name"], "cat_2_name": d["cat_2_name"], "cat_3_name": d["cat_3_name"], "catalogname": d["catalogname"], "description": d["description"], "properties": d["properties"], "price": d["price"], "price_before_discount": d["price_before_discount"], "externallink": d["externallink"], "Rating": d["Rating"], "delivery": d["delivery"], "discount": d["discount"], "instock": d["instock"], } } elif catalog_uuid contains only en: payload = { "row": { "LabebStoreId": d["LabebStoreId"], "catalog_uuid": d["catalog_uuid"], "lang": d["lang"], "cat_0_name": d["cat_0_name"], "cat_1_name": d["cat_1_name"], "cat_2_name": d["cat_2_name"], "cat_3_name": d["cat_3_name"], "catalogname": d["catalogname"], "description": d["description"], "properties": d["properties"], "price": d["price"], "price_before_discount": d["price_before_discount"], "externallink": d["externallink"], "Rating": d["Rating"], "delivery": d["delivery"], "discount": d["discount"], "instock": d["instock"], } } elif catalog_uuid contains only ar: payload = { "row": { "LabebStoreId": d["LabebStoreId"], "catalog_uuid": d["catalog_uuid"], "lang": d["lang"], "cat_0_name": d["cat_0_name"], "cat_1_name": d["cat_1_name"], "cat_2_name": d["cat_2_name"], "cat_3_name": d["cat_3_name"], "catalogname": d["catalogname"], "description": d["description"], "properties": d["properties"], "price": d["price"], "price_before_discount": d["price_before_discount"], "externallink": d["externallink"], "Rating": d["Rating"], "delivery": d["delivery"], "discount": d["discount"], "instock": d["instock"], } } some rows only contains English and some only contains Arabic so I have to cover both aspects. I am using pandas to read the csv and transforming the dataframe to pd.to_json(orient="index") because I need to post that rows in a POST request as a json. Here is my recent try: import pandas as pd import requests import json import ast HEADERS = {"Accept": "*/*", "Content-Type": "application/json"} df = pd.read_csv("carrefour-uae-items-final.csv").fillna("").astype(str) dict_data = json.loads(df.to_json(orient="index")) list_dict = [d for d in dict_data.values()] for d in list_dict: cat_id = d["catalog_uuid"] if next(item for item in list_dict if item["catalog_uuid"] == cat_id): payload_rows = ["row", "nextRow"] for row in payload_rows: payload = { row: { "LabebStoreId": d["LabebStoreId"], "catalog_uuid": d["catalog_uuid"], "lang": d["lang"], "cat_0_name": d["cat_0_name"], "cat_1_name": d["cat_1_name"], "cat_2_name": d["cat_2_name"], "cat_3_name": d["cat_3_name"], "catalogname": d["catalogname"], "description": d["description"], "properties": d["properties"], "price": d["price"], "price_before_discount": d["price_before_discount"], "externallink": d["externallink"], "Rating": d["Rating"], "delivery": d["delivery"], "discount": d["discount"], "instock": d["instock"], "images": ast.literal_eval(d["encoded_images"]), } } print(json.dumps(payload, indent=4)) response = requests.post( "http://crawlerapi.labeb.com/api/PCCrawler/Crawl?StoreId=6021", headers=HEADERS, json=payload, ) print(response.content.decode()) else: payload = { "row": { "LabebStoreId": d["LabebStoreId"], "catalog_uuid": d["catalog_uuid"], "lang": d["lang"], "cat_0_name": d["cat_0_name"], "cat_1_name": d["cat_1_name"], "cat_2_name": d["cat_2_name"], "cat_3_name": d["cat_3_name"], "catalogname": d["catalogname"], "description": d["description"], "properties": d["properties"], "price": d["price"], "price_before_discount": d["price_before_discount"], "externallink": d["externallink"], "Rating": d["Rating"], "delivery": d["delivery"], "discount": d["discount"], "instock": d["instock"], "images": ast.literal_eval(d["encoded_images"]), } } print(json.dumps(payload, indent=4)) response = requests.post( "http://crawlerapi.labeb.com/api/PCCrawler/Crawl?StoreId=6021", headers=HEADERS, json=payload, ) print(response.content.decode()) Can anyone please help me figure out how to achieve the above logic? Thanks in advance! A: As I understand correctly, you can group the data by LabebStoreId and catalog_uuid and then you make payloads according each group: import csv data = {} with open("data.csv", "r") as f_in: reader = csv.DictReader(f_in) for row in reader: data.setdefault((row["LabebStoreId"], row["catalog_uuid"]), []).append( row ) for payload_no, v in enumerate(data.values(), 1): print("Payload number", payload_no) for d in v: print("Language", d["lang"]) print("\t", d) print("-" * 80) Prints: Payload number 1 Language en {'LabebStoreId': '6021', 'catalog_uuid': '89028', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Batteries & Power', 'cat_2_name': 'Batteries', 'cat_3_name': 'Alkaline Batteries', 'catalogname': 'Energizer Max AA Alkaline Battery E91BP Pack of 8', 'description': '', 'properties': '{"Number of batteries included": "8", "Battery voltage": "1.5"}', 'price': '41.5', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/alkaline-batteries/energizer-max-alke91bp-batery-8-aa/p/89028', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''} Language ar {'LabebStoreId': '6021', 'catalog_uuid': '89028', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'البطاريات ومولدات الطاقة', 'cat_2_name': 'بطاريات', 'cat_3_name': 'بطاريات الكالاين', 'catalogname': 'ENERGIZER BATTERY AAX8 MAX', 'description': '', 'properties': '{"Number of batteries included": "8", "طاقة البطارية": "1.5"}', 'price': '41.5', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/alkaline-batteries/energizer-max-alke91bp-batery-8-aa/p/89028', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''} -------------------------------------------------------------------------------- Payload number 2 Language en {'LabebStoreId': '6021', 'catalog_uuid': '742553', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Batteries & Power', 'cat_2_name': 'Batteries', 'cat_3_name': 'Alkaline Batteries', 'catalogname': 'Energizer Max AAA Alkaline Battery EP2BP Pack of 8', 'description': '', 'properties': '{"Number of batteries included": "8", "Battery voltage": "1.5"}', 'price': '33.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/alkaline-batteries/energizer-max-alke92bp-batery-8-aaa/p/742553', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''} Language ar {'LabebStoreId': '6021', 'catalog_uuid': '742553', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'البطاريات ومولدات الطاقة', 'cat_2_name': 'بطاريات', 'cat_3_name': 'بطاريات الكالاين', 'catalogname': 'ENERGIZER AAA/8 MAX ALKALINE', 'description': '', 'properties': '{"Number of batteries included": "8", "طاقة البطارية": "1.5"}', 'price': '33.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/alkaline-batteries/energizer-max-alke92bp-batery-8-aaa/p/742553', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''} -------------------------------------------------------------------------------- Payload number 3 Language en {'LabebStoreId': '6021', 'catalog_uuid': '893379', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Large Appliances', 'cat_2_name': 'Fridges', 'cat_3_name': 'Fridge 101L to 200L', 'catalogname': 'First1 Free Standing 2 shelves Fridge FR-130L', 'description': '', 'properties': '{"Dimensions WxDxH": "W 49cm x D 45cm x H 83.5cm"}', 'price': '549.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/fridge-101l-to-200l/mychoice-first1-fridge-fr-130l-94l/p/893379', 'Rating': '', 'delivery': 'Free delivery', 'discount': '', 'instock': '1.0'} Language ar {'LabebStoreId': '6021', 'catalog_uuid': '893379', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'أجهزة منزلية كبيرة', 'cat_2_name': 'الثلاجات', 'cat_3_name': 'ثلاجة 101L إلى 200L', 'catalogname': 'FIRST1 FR-130L FRIDGE', 'description': '', 'properties': '{"Dimensions WxDxH": "W 49cm x D 45cm x H 83.5cm"}', 'price': '549.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/fridge-101l-to-200l/mychoice-first1-fridge-fr-130l-94l/p/893379', 'Rating': '', 'delivery': '', 'discount': '', 'instock': '1.0'} -------------------------------------------------------------------------------- Payload number 4 Language en {'LabebStoreId': '6021', 'catalog_uuid': '915909', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Batteries & Power', 'cat_2_name': 'Power Adaptors & Sockets', 'cat_3_name': 'Adaptors & Sockets', 'catalogname': 'Elexon Universal Travel Adaptor PP7974A White', 'description': '', 'properties': '{}', 'price': '14.5', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/adaptors-sockets/elexon-universal-travel-adaptor/p/915909', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''} -------------------------------------------------------------------------------- Payload number 5 Language en {'LabebStoreId': '6021', 'catalog_uuid': '945671', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Small Appliances', 'cat_2_name': 'Food Preparation', 'cat_3_name': 'Rice Cooker', 'catalogname': 'Mychoice Rice Cooker 350W F-06RC White', 'description': '', 'properties': '{}', 'price': '69.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/rice-cooker/mychoice-first1-rice-cooker-f-06rc/p/945671', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''} Language ar {'LabebStoreId': '6021', 'catalog_uuid': '945671', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'أجهزة منزلية صغيرة', 'cat_2_name': 'تحضير الطعام', 'cat_3_name': 'طبخ الأرز', 'catalogname': 'جهاز طهي الأرز الأول F-06RC 350 وات 0.6 لتر', 'description': '', 'properties': '{}', 'price': '69.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/rice-cooker/mychoice-first1-rice-cooker-f-06rc/p/945671', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''} -------------------------------------------------------------------------------- To have {'row':...} or {'row':..., 'nextRow':...} you can check lenght of the group: import csv data = {} with open("data.csv", "r") as f_in: reader = csv.DictReader(f_in) for row in reader: data.setdefault((row["LabebStoreId"], row["catalog_uuid"]), []).append( row ) for payload_no, v in enumerate(data.values(), 1): print("Payload number", payload_no) if len(v) == 1: payload = {"row": v[0]} else: payload = {"row": v[0], "nextRow": v[1]} print("\t", payload) print("-" * 80) Prints: Payload number 1 {'row': {'LabebStoreId': '6021', 'catalog_uuid': '89028', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Batteries & Power', 'cat_2_name': 'Batteries', 'cat_3_name': 'Alkaline Batteries', 'catalogname': 'Energizer Max AA Alkaline Battery E91BP Pack of 8', 'description': '', 'properties': '{"Number of batteries included": "8", "Battery voltage": "1.5"}', 'price': '41.5', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/alkaline-batteries/energizer-max-alke91bp-batery-8-aa/p/89028', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}, 'nextRow': {'LabebStoreId': '6021', 'catalog_uuid': '89028', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'البطاريات ومولدات الطاقة', 'cat_2_name': 'بطاريات', 'cat_3_name': 'بطاريات الكالاين', 'catalogname': 'ENERGIZER BATTERY AAX8 MAX', 'description': '', 'properties': '{"Number of batteries included": "8", "طاقة البطارية": "1.5"}', 'price': '41.5', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/alkaline-batteries/energizer-max-alke91bp-batery-8-aa/p/89028', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}} -------------------------------------------------------------------------------- Payload number 2 {'row': {'LabebStoreId': '6021', 'catalog_uuid': '742553', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Batteries & Power', 'cat_2_name': 'Batteries', 'cat_3_name': 'Alkaline Batteries', 'catalogname': 'Energizer Max AAA Alkaline Battery EP2BP Pack of 8', 'description': '', 'properties': '{"Number of batteries included": "8", "Battery voltage": "1.5"}', 'price': '33.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/alkaline-batteries/energizer-max-alke92bp-batery-8-aaa/p/742553', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}, 'nextRow': {'LabebStoreId': '6021', 'catalog_uuid': '742553', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'البطاريات ومولدات الطاقة', 'cat_2_name': 'بطاريات', 'cat_3_name': 'بطاريات الكالاين', 'catalogname': 'ENERGIZER AAA/8 MAX ALKALINE', 'description': '', 'properties': '{"Number of batteries included": "8", "طاقة البطارية": "1.5"}', 'price': '33.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/alkaline-batteries/energizer-max-alke92bp-batery-8-aaa/p/742553', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}} -------------------------------------------------------------------------------- Payload number 3 {'row': {'LabebStoreId': '6021', 'catalog_uuid': '893379', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Large Appliances', 'cat_2_name': 'Fridges', 'cat_3_name': 'Fridge 101L to 200L', 'catalogname': 'First1 Free Standing 2 shelves Fridge FR-130L', 'description': '', 'properties': '{"Dimensions WxDxH": "W 49cm x D 45cm x H 83.5cm"}', 'price': '549.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/fridge-101l-to-200l/mychoice-first1-fridge-fr-130l-94l/p/893379', 'Rating': '', 'delivery': 'Free delivery', 'discount': '', 'instock': '1.0'}, 'nextRow': {'LabebStoreId': '6021', 'catalog_uuid': '893379', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'أجهزة منزلية كبيرة', 'cat_2_name': 'الثلاجات', 'cat_3_name': 'ثلاجة 101L إلى 200L', 'catalogname': 'FIRST1 FR-130L FRIDGE', 'description': '', 'properties': '{"Dimensions WxDxH": "W 49cm x D 45cm x H 83.5cm"}', 'price': '549.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/fridge-101l-to-200l/mychoice-first1-fridge-fr-130l-94l/p/893379', 'Rating': '', 'delivery': '', 'discount': '', 'instock': '1.0'}} -------------------------------------------------------------------------------- Payload number 4 {'row': {'LabebStoreId': '6021', 'catalog_uuid': '915909', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Batteries & Power', 'cat_2_name': 'Power Adaptors & Sockets', 'cat_3_name': 'Adaptors & Sockets', 'catalogname': 'Elexon Universal Travel Adaptor PP7974A White', 'description': '', 'properties': '{}', 'price': '14.5', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/adaptors-sockets/elexon-universal-travel-adaptor/p/915909', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}} -------------------------------------------------------------------------------- Payload number 5 {'row': {'LabebStoreId': '6021', 'catalog_uuid': '945671', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Small Appliances', 'cat_2_name': 'Food Preparation', 'cat_3_name': 'Rice Cooker', 'catalogname': 'Mychoice Rice Cooker 350W F-06RC White', 'description': '', 'properties': '{}', 'price': '69.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/rice-cooker/mychoice-first1-rice-cooker-f-06rc/p/945671', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}, 'nextRow': {'LabebStoreId': '6021', 'catalog_uuid': '945671', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'أجهزة منزلية صغيرة', 'cat_2_name': 'تحضير الطعام', 'cat_3_name': 'طبخ الأرز', 'catalogname': 'جهاز طهي الأرز الأول F-06RC 350 وات 0.6 لتر', 'description': '', 'properties': '{}', 'price': '69.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/rice-cooker/mychoice-first1-rice-cooker-f-06rc/p/945671', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}} --------------------------------------------------------------------------------
python list of dictionaries find paired values
I have this csv containing some paired rows such as: LabebStoreId,catalog_uuid,lang,cat_0_name,cat_1_name,cat_2_name,cat_3_name,catalogname,description,properties,price,price_before_discount,externallink,Rating,delivery,discount,instock 6021,89028,en,Electronics & Appliances,Batteries & Power,Batteries,Alkaline Batteries,Energizer Max AA Alkaline Battery E91BP Pack of 8,,"{""Number of batteries included"": ""8"", ""Battery voltage"": ""1.5""}",41.5,,https://www.carrefouruae.com//mafuae/en/alkaline-batteries/energizer-max-alke91bp-batery-8-aa/p/89028,,,, 6021,89028,ar,الإلكترونيات والأجهزة المنزلية,البطاريات ومولدات الطاقة,بطاريات,بطاريات الكالاين,ENERGIZER BATTERY AAX8 MAX,,"{""Number of batteries included"": ""8"", ""طاقة البطارية"": ""1.5""}",41.5,,https://www.carrefouruae.com//mafuae/ar/alkaline-batteries/energizer-max-alke91bp-batery-8-aa/p/89028,,,, 6021,742553,en,Electronics & Appliances,Batteries & Power,Batteries,Alkaline Batteries,Energizer Max AAA Alkaline Battery EP2BP Pack of 8,,"{""Number of batteries included"": ""8"", ""Battery voltage"": ""1.5""}",33.0,,https://www.carrefouruae.com//mafuae/en/alkaline-batteries/energizer-max-alke92bp-batery-8-aaa/p/742553,,,, 6021,742553,ar,الإلكترونيات والأجهزة المنزلية,البطاريات ومولدات الطاقة,بطاريات,بطاريات الكالاين,ENERGIZER AAA/8 MAX ALKALINE,,"{""Number of batteries included"": ""8"", ""طاقة البطارية"": ""1.5""}",33.0,,https://www.carrefouruae.com//mafuae/ar/alkaline-batteries/energizer-max-alke92bp-batery-8-aaa/p/742553,,,, 6021,893379,en,Electronics & Appliances,Large Appliances,Fridges,Fridge 101L to 200L,First1 Free Standing 2 shelves Fridge FR-130L,,"{""Dimensions WxDxH"": ""W 49cm x D 45cm x H 83.5cm""}",549.0,,https://www.carrefouruae.com//mafuae/en/fridge-101l-to-200l/mychoice-first1-fridge-fr-130l-94l/p/893379,,Free delivery,,1.0 6021,893379,ar,الإلكترونيات والأجهزة المنزلية,أجهزة منزلية كبيرة,الثلاجات,ثلاجة 101L إلى 200L,FIRST1 FR-130L FRIDGE,,"{""Dimensions WxDxH"": ""W 49cm x D 45cm x H 83.5cm""}",549.0,,https://www.carrefouruae.com//mafuae/ar/fridge-101l-to-200l/mychoice-first1-fridge-fr-130l-94l/p/893379,,,,1.0 6021,915909,en,Electronics & Appliances,Batteries & Power,Power Adaptors & Sockets,Adaptors & Sockets,Elexon Universal Travel Adaptor PP7974A White,,{},14.5,,https://www.carrefouruae.com//mafuae/en/adaptors-sockets/elexon-universal-travel-adaptor/p/915909,,,, 6021,945671,en,Electronics & Appliances,Small Appliances,Food Preparation,Rice Cooker,Mychoice Rice Cooker 350W F-06RC White,,{},69.0,,https://www.carrefouruae.com//mafuae/en/rice-cooker/mychoice-first1-rice-cooker-f-06rc/p/945671,,,, 6021,945671,ar,الإلكترونيات والأجهزة المنزلية,أجهزة منزلية صغيرة,تحضير الطعام,طبخ الأرز,جهاز طهي الأرز الأول F-06RC 350 وات 0.6 لتر,,{},69.0,,https://www.carrefouruae.com//mafuae/ar/rice-cooker/mychoice-first1-rice-cooker-f-06rc/p/945671,,,, one row for English and one for Arabic and I'd like to setup some check like this: if catalog_uuid contains en and ar: payload = { "row": { "LabebStoreId": d["LabebStoreId"], "catalog_uuid": d["catalog_uuid"], "lang": d["lang"], "cat_0_name": d["cat_0_name"], "cat_1_name": d["cat_1_name"], "cat_2_name": d["cat_2_name"], "cat_3_name": d["cat_3_name"], "catalogname": d["catalogname"], "description": d["description"], "properties": d["properties"], "price": d["price"], "price_before_discount": d["price_before_discount"], "externallink": d["externallink"], "Rating": d["Rating"], "delivery": d["delivery"], "discount": d["discount"], "instock": d["instock"], } } payload = { "nextRow": { "LabebStoreId": d["LabebStoreId"], "catalog_uuid": d["catalog_uuid"], "lang": d["lang"], "cat_0_name": d["cat_0_name"], "cat_1_name": d["cat_1_name"], "cat_2_name": d["cat_2_name"], "cat_3_name": d["cat_3_name"], "catalogname": d["catalogname"], "description": d["description"], "properties": d["properties"], "price": d["price"], "price_before_discount": d["price_before_discount"], "externallink": d["externallink"], "Rating": d["Rating"], "delivery": d["delivery"], "discount": d["discount"], "instock": d["instock"], } } elif catalog_uuid contains only en: payload = { "row": { "LabebStoreId": d["LabebStoreId"], "catalog_uuid": d["catalog_uuid"], "lang": d["lang"], "cat_0_name": d["cat_0_name"], "cat_1_name": d["cat_1_name"], "cat_2_name": d["cat_2_name"], "cat_3_name": d["cat_3_name"], "catalogname": d["catalogname"], "description": d["description"], "properties": d["properties"], "price": d["price"], "price_before_discount": d["price_before_discount"], "externallink": d["externallink"], "Rating": d["Rating"], "delivery": d["delivery"], "discount": d["discount"], "instock": d["instock"], } } elif catalog_uuid contains only ar: payload = { "row": { "LabebStoreId": d["LabebStoreId"], "catalog_uuid": d["catalog_uuid"], "lang": d["lang"], "cat_0_name": d["cat_0_name"], "cat_1_name": d["cat_1_name"], "cat_2_name": d["cat_2_name"], "cat_3_name": d["cat_3_name"], "catalogname": d["catalogname"], "description": d["description"], "properties": d["properties"], "price": d["price"], "price_before_discount": d["price_before_discount"], "externallink": d["externallink"], "Rating": d["Rating"], "delivery": d["delivery"], "discount": d["discount"], "instock": d["instock"], } } some rows only contains English and some only contains Arabic so I have to cover both aspects. I am using pandas to read the csv and transforming the dataframe to pd.to_json(orient="index") because I need to post that rows in a POST request as a json. Here is my recent try: import pandas as pd import requests import json import ast HEADERS = {"Accept": "*/*", "Content-Type": "application/json"} df = pd.read_csv("carrefour-uae-items-final.csv").fillna("").astype(str) dict_data = json.loads(df.to_json(orient="index")) list_dict = [d for d in dict_data.values()] for d in list_dict: cat_id = d["catalog_uuid"] if next(item for item in list_dict if item["catalog_uuid"] == cat_id): payload_rows = ["row", "nextRow"] for row in payload_rows: payload = { row: { "LabebStoreId": d["LabebStoreId"], "catalog_uuid": d["catalog_uuid"], "lang": d["lang"], "cat_0_name": d["cat_0_name"], "cat_1_name": d["cat_1_name"], "cat_2_name": d["cat_2_name"], "cat_3_name": d["cat_3_name"], "catalogname": d["catalogname"], "description": d["description"], "properties": d["properties"], "price": d["price"], "price_before_discount": d["price_before_discount"], "externallink": d["externallink"], "Rating": d["Rating"], "delivery": d["delivery"], "discount": d["discount"], "instock": d["instock"], "images": ast.literal_eval(d["encoded_images"]), } } print(json.dumps(payload, indent=4)) response = requests.post( "http://crawlerapi.labeb.com/api/PCCrawler/Crawl?StoreId=6021", headers=HEADERS, json=payload, ) print(response.content.decode()) else: payload = { "row": { "LabebStoreId": d["LabebStoreId"], "catalog_uuid": d["catalog_uuid"], "lang": d["lang"], "cat_0_name": d["cat_0_name"], "cat_1_name": d["cat_1_name"], "cat_2_name": d["cat_2_name"], "cat_3_name": d["cat_3_name"], "catalogname": d["catalogname"], "description": d["description"], "properties": d["properties"], "price": d["price"], "price_before_discount": d["price_before_discount"], "externallink": d["externallink"], "Rating": d["Rating"], "delivery": d["delivery"], "discount": d["discount"], "instock": d["instock"], "images": ast.literal_eval(d["encoded_images"]), } } print(json.dumps(payload, indent=4)) response = requests.post( "http://crawlerapi.labeb.com/api/PCCrawler/Crawl?StoreId=6021", headers=HEADERS, json=payload, ) print(response.content.decode()) Can anyone please help me figure out how to achieve the above logic? Thanks in advance!
[ "As I understand correctly, you can group the data by LabebStoreId and catalog_uuid and then you make payloads according each group:\nimport csv\n\ndata = {}\nwith open(\"data.csv\", \"r\") as f_in:\n reader = csv.DictReader(f_in)\n for row in reader:\n data.setdefault((row[\"LabebStoreId\"], row[\"catalog_uuid\"]), []).append(\n row\n )\n\nfor payload_no, v in enumerate(data.values(), 1):\n print(\"Payload number\", payload_no)\n for d in v:\n print(\"Language\", d[\"lang\"])\n print(\"\\t\", d)\n print(\"-\" * 80)\n\nPrints:\nPayload number 1\nLanguage en\n {'LabebStoreId': '6021', 'catalog_uuid': '89028', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Batteries & Power', 'cat_2_name': 'Batteries', 'cat_3_name': 'Alkaline Batteries', 'catalogname': 'Energizer Max AA Alkaline Battery E91BP Pack of 8', 'description': '', 'properties': '{\"Number of batteries included\": \"8\", \"Battery voltage\": \"1.5\"}', 'price': '41.5', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/alkaline-batteries/energizer-max-alke91bp-batery-8-aa/p/89028', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}\nLanguage ar\n {'LabebStoreId': '6021', 'catalog_uuid': '89028', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'البطاريات ومولدات الطاقة', 'cat_2_name': 'بطاريات', 'cat_3_name': 'بطاريات الكالاين', 'catalogname': 'ENERGIZER BATTERY AAX8 MAX', 'description': '', 'properties': '{\"Number of batteries included\": \"8\", \"طاقة البطارية\": \"1.5\"}', 'price': '41.5', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/alkaline-batteries/energizer-max-alke91bp-batery-8-aa/p/89028', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}\n--------------------------------------------------------------------------------\nPayload number 2\nLanguage en\n {'LabebStoreId': '6021', 'catalog_uuid': '742553', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Batteries & Power', 'cat_2_name': 'Batteries', 'cat_3_name': 'Alkaline Batteries', 'catalogname': 'Energizer Max AAA Alkaline Battery EP2BP Pack of 8', 'description': '', 'properties': '{\"Number of batteries included\": \"8\", \"Battery voltage\": \"1.5\"}', 'price': '33.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/alkaline-batteries/energizer-max-alke92bp-batery-8-aaa/p/742553', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}\nLanguage ar\n {'LabebStoreId': '6021', 'catalog_uuid': '742553', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'البطاريات ومولدات الطاقة', 'cat_2_name': 'بطاريات', 'cat_3_name': 'بطاريات الكالاين', 'catalogname': 'ENERGIZER AAA/8 MAX ALKALINE', 'description': '', 'properties': '{\"Number of batteries included\": \"8\", \"طاقة البطارية\": \"1.5\"}', 'price': '33.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/alkaline-batteries/energizer-max-alke92bp-batery-8-aaa/p/742553', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}\n--------------------------------------------------------------------------------\nPayload number 3\nLanguage en\n {'LabebStoreId': '6021', 'catalog_uuid': '893379', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Large Appliances', 'cat_2_name': 'Fridges', 'cat_3_name': 'Fridge 101L to 200L', 'catalogname': 'First1 Free Standing 2 shelves Fridge FR-130L', 'description': '', 'properties': '{\"Dimensions WxDxH\": \"W 49cm x D 45cm x H 83.5cm\"}', 'price': '549.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/fridge-101l-to-200l/mychoice-first1-fridge-fr-130l-94l/p/893379', 'Rating': '', 'delivery': 'Free delivery', 'discount': '', 'instock': '1.0'}\nLanguage ar\n {'LabebStoreId': '6021', 'catalog_uuid': '893379', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'أجهزة منزلية كبيرة', 'cat_2_name': 'الثلاجات', 'cat_3_name': 'ثلاجة 101L إلى 200L', 'catalogname': 'FIRST1 FR-130L FRIDGE', 'description': '', 'properties': '{\"Dimensions WxDxH\": \"W 49cm x D 45cm x H 83.5cm\"}', 'price': '549.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/fridge-101l-to-200l/mychoice-first1-fridge-fr-130l-94l/p/893379', 'Rating': '', 'delivery': '', 'discount': '', 'instock': '1.0'}\n--------------------------------------------------------------------------------\nPayload number 4\nLanguage en\n {'LabebStoreId': '6021', 'catalog_uuid': '915909', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Batteries & Power', 'cat_2_name': 'Power Adaptors & Sockets', 'cat_3_name': 'Adaptors & Sockets', 'catalogname': 'Elexon Universal Travel Adaptor PP7974A White', 'description': '', 'properties': '{}', 'price': '14.5', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/adaptors-sockets/elexon-universal-travel-adaptor/p/915909', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}\n--------------------------------------------------------------------------------\nPayload number 5\nLanguage en\n {'LabebStoreId': '6021', 'catalog_uuid': '945671', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Small Appliances', 'cat_2_name': 'Food Preparation', 'cat_3_name': 'Rice Cooker', 'catalogname': 'Mychoice Rice Cooker 350W F-06RC White', 'description': '', 'properties': '{}', 'price': '69.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/rice-cooker/mychoice-first1-rice-cooker-f-06rc/p/945671', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}\nLanguage ar\n {'LabebStoreId': '6021', 'catalog_uuid': '945671', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'أجهزة منزلية صغيرة', 'cat_2_name': 'تحضير الطعام', 'cat_3_name': 'طبخ الأرز', 'catalogname': 'جهاز طهي الأرز الأول F-06RC 350 وات 0.6 لتر', 'description': '', 'properties': '{}', 'price': '69.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/rice-cooker/mychoice-first1-rice-cooker-f-06rc/p/945671', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}\n--------------------------------------------------------------------------------\n\n\nTo have {'row':...} or {'row':..., 'nextRow':...} you can check lenght of the group:\nimport csv\n\ndata = {}\nwith open(\"data.csv\", \"r\") as f_in:\n reader = csv.DictReader(f_in)\n for row in reader:\n data.setdefault((row[\"LabebStoreId\"], row[\"catalog_uuid\"]), []).append(\n row\n )\n\nfor payload_no, v in enumerate(data.values(), 1):\n print(\"Payload number\", payload_no)\n if len(v) == 1:\n payload = {\"row\": v[0]}\n else:\n payload = {\"row\": v[0], \"nextRow\": v[1]}\n\n print(\"\\t\", payload)\n print(\"-\" * 80)\n\nPrints:\nPayload number 1\n {'row': {'LabebStoreId': '6021', 'catalog_uuid': '89028', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Batteries & Power', 'cat_2_name': 'Batteries', 'cat_3_name': 'Alkaline Batteries', 'catalogname': 'Energizer Max AA Alkaline Battery E91BP Pack of 8', 'description': '', 'properties': '{\"Number of batteries included\": \"8\", \"Battery voltage\": \"1.5\"}', 'price': '41.5', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/alkaline-batteries/energizer-max-alke91bp-batery-8-aa/p/89028', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}, 'nextRow': {'LabebStoreId': '6021', 'catalog_uuid': '89028', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'البطاريات ومولدات الطاقة', 'cat_2_name': 'بطاريات', 'cat_3_name': 'بطاريات الكالاين', 'catalogname': 'ENERGIZER BATTERY AAX8 MAX', 'description': '', 'properties': '{\"Number of batteries included\": \"8\", \"طاقة البطارية\": \"1.5\"}', 'price': '41.5', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/alkaline-batteries/energizer-max-alke91bp-batery-8-aa/p/89028', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}}\n--------------------------------------------------------------------------------\nPayload number 2\n {'row': {'LabebStoreId': '6021', 'catalog_uuid': '742553', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Batteries & Power', 'cat_2_name': 'Batteries', 'cat_3_name': 'Alkaline Batteries', 'catalogname': 'Energizer Max AAA Alkaline Battery EP2BP Pack of 8', 'description': '', 'properties': '{\"Number of batteries included\": \"8\", \"Battery voltage\": \"1.5\"}', 'price': '33.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/alkaline-batteries/energizer-max-alke92bp-batery-8-aaa/p/742553', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}, 'nextRow': {'LabebStoreId': '6021', 'catalog_uuid': '742553', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'البطاريات ومولدات الطاقة', 'cat_2_name': 'بطاريات', 'cat_3_name': 'بطاريات الكالاين', 'catalogname': 'ENERGIZER AAA/8 MAX ALKALINE', 'description': '', 'properties': '{\"Number of batteries included\": \"8\", \"طاقة البطارية\": \"1.5\"}', 'price': '33.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/alkaline-batteries/energizer-max-alke92bp-batery-8-aaa/p/742553', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}}\n--------------------------------------------------------------------------------\nPayload number 3\n {'row': {'LabebStoreId': '6021', 'catalog_uuid': '893379', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Large Appliances', 'cat_2_name': 'Fridges', 'cat_3_name': 'Fridge 101L to 200L', 'catalogname': 'First1 Free Standing 2 shelves Fridge FR-130L', 'description': '', 'properties': '{\"Dimensions WxDxH\": \"W 49cm x D 45cm x H 83.5cm\"}', 'price': '549.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/fridge-101l-to-200l/mychoice-first1-fridge-fr-130l-94l/p/893379', 'Rating': '', 'delivery': 'Free delivery', 'discount': '', 'instock': '1.0'}, 'nextRow': {'LabebStoreId': '6021', 'catalog_uuid': '893379', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'أجهزة منزلية كبيرة', 'cat_2_name': 'الثلاجات', 'cat_3_name': 'ثلاجة 101L إلى 200L', 'catalogname': 'FIRST1 FR-130L FRIDGE', 'description': '', 'properties': '{\"Dimensions WxDxH\": \"W 49cm x D 45cm x H 83.5cm\"}', 'price': '549.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/fridge-101l-to-200l/mychoice-first1-fridge-fr-130l-94l/p/893379', 'Rating': '', 'delivery': '', 'discount': '', 'instock': '1.0'}}\n--------------------------------------------------------------------------------\nPayload number 4\n {'row': {'LabebStoreId': '6021', 'catalog_uuid': '915909', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Batteries & Power', 'cat_2_name': 'Power Adaptors & Sockets', 'cat_3_name': 'Adaptors & Sockets', 'catalogname': 'Elexon Universal Travel Adaptor PP7974A White', 'description': '', 'properties': '{}', 'price': '14.5', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/adaptors-sockets/elexon-universal-travel-adaptor/p/915909', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}}\n--------------------------------------------------------------------------------\nPayload number 5\n {'row': {'LabebStoreId': '6021', 'catalog_uuid': '945671', 'lang': 'en', 'cat_0_name': 'Electronics & Appliances', 'cat_1_name': 'Small Appliances', 'cat_2_name': 'Food Preparation', 'cat_3_name': 'Rice Cooker', 'catalogname': 'Mychoice Rice Cooker 350W F-06RC White', 'description': '', 'properties': '{}', 'price': '69.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/en/rice-cooker/mychoice-first1-rice-cooker-f-06rc/p/945671', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}, 'nextRow': {'LabebStoreId': '6021', 'catalog_uuid': '945671', 'lang': 'ar', 'cat_0_name': 'الإلكترونيات والأجهزة المنزلية', 'cat_1_name': 'أجهزة منزلية صغيرة', 'cat_2_name': 'تحضير الطعام', 'cat_3_name': 'طبخ الأرز', 'catalogname': 'جهاز طهي الأرز الأول F-06RC 350 وات 0.6 لتر', 'description': '', 'properties': '{}', 'price': '69.0', 'price_before_discount': '', 'externallink': 'https://www.carrefouruae.com//mafuae/ar/rice-cooker/mychoice-first1-rice-cooker-f-06rc/p/945671', 'Rating': '', 'delivery': '', 'discount': '', 'instock': ''}}\n--------------------------------------------------------------------------------\n\n" ]
[ 2 ]
[]
[]
[ "csv", "dictionary", "pandas", "python", "python_3.x" ]
stackoverflow_0074577770_csv_dictionary_pandas_python_python_3.x.txt
Q: How can I clear console but leave needed print-line? For example I have code: print("Your status - online") while(True): print("Searching for server") And I need to clear console, but leave "Your status - online", so "Searching for server" isn't overlapping. A: You can use a carriage return to get back to the beginning of the line. Then, when you print, it overrides the previously printed line. print("Your status - online") while True: print("Searching for server", end="\r")
How can I clear console but leave needed print-line?
For example I have code: print("Your status - online") while(True): print("Searching for server") And I need to clear console, but leave "Your status - online", so "Searching for server" isn't overlapping.
[ "You can use a carriage return to get back to the beginning of the line. Then, when you print, it overrides the previously printed line.\nprint(\"Your status - online\")\nwhile True:\n print(\"Searching for server\", end=\"\\r\")\n\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074578153_python.txt
Q: Restricting access of some roles to some specific pages @bp.route("/products/wishlist", methods=["GET"]) @login_required @roles_required( "ADMIN", "CUSTOMER_STORE_MANAGER" ) def product_wishlist(): return product_wishlist_page() I have role restrictions like this where each page has some role requirements, what I need to do is restricting some roles so they could have access to only some specific pages, for example when CUSTOMER_STORE_MANAGER logins into the webpage, they should only be able to view the product_wishlist I thought about defining pages for each role and check if they are trying to access to pages which they have authorization. But I wonder if there is a more convenient way to do this in Flask?
Restricting access of some roles to some specific pages
@bp.route("/products/wishlist", methods=["GET"]) @login_required @roles_required( "ADMIN", "CUSTOMER_STORE_MANAGER" ) def product_wishlist(): return product_wishlist_page() I have role restrictions like this where each page has some role requirements, what I need to do is restricting some roles so they could have access to only some specific pages, for example when CUSTOMER_STORE_MANAGER logins into the webpage, they should only be able to view the product_wishlist I thought about defining pages for each role and check if they are trying to access to pages which they have authorization. But I wonder if there is a more convenient way to do this in Flask?
[]
[]
[ "Why don't you filter in the template based on the roles\n{% if current_user.role == CUSTOMER_STORE_MANAGER %}\n {# show what this role can view or do #}\n{% elif current_user.role == \"Admin\"%}\n ...\n{% endif %}\n\nThis is simple and rendered on the backend so it is the same as doing multiple templates with less code.\n" ]
[ -1 ]
[ "flask", "python" ]
stackoverflow_0074570576_flask_python.txt
Q: Stopping scroll on a specific page when LinkedIn scraping - python I am trying to webscrape jobs on LinkedIn based on url and the specific number of jobs. The code below uses infinite scrolling and scrolls until page 39, this creates 1000 elements in my 'jobs; lost but I only want 500. How can I make it stop so that it only scrolls to so that I have 500 elements. url = 'https://www.linkedin.com/jobs/search?keywords=Python%20%28Programming%20Language%29&location=Canada&geoId=101174742&position=1&pageNum=0' time.sleep(2) no_of_jobs = 500 driver = 'location of driver' driver.set_window_size(1024, 600) driver.maximize_window() driver.get(url) SCROLL_PAUSE_TIME = 1.5 i = 2 while i <= int(no_of_jobs/25) + 1: last_height = driver.execute_script("return document.body.scrollHeight") while True: driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") time.sleep(SCROLL_PAUSE_TIME) # Calculate new scroll height and compare with last scroll height new_height = driver.execute_script("return document.body.scrollHeight") if new_height == last_height: break last_height = new_height i = i + 1 try: driver.find_element(By.XPATH, '/html/body/div[1]/div/main/section[2]/button').click() time.sleep(2) except: pass time.sleep(2) job_lists = driver.find_element(By.CLASS_NAME, 'jobs-search__results-list') jobs = job_lists.find_elements(By.TAG_NAME, 'li') # return a list time.sleep(5) Thanks I know it is because my code is asking it to infinite scroll but when I had the code below, the program would run to the bottom of one page and then load the next page and scroll all the way up and start again and do that for every page. i = 2 while i <= int(no_of_jobs/25)+1: driver.execute_script('window.scrollTo(0, document.body.scrollHeight);') i = i+1 try: driver.find_element(By.XPATH, '/html/body/div[1]/div/main/section[2]/button').click() time.sleep(2) except: pass time.sleep(2) A: Looks like you are trying to scroll down while it's possible and only then extract jobs from resulting page. Try to extract jobs after every scroll down and add break statement when you reach 500 jobs. Also pass here is unnecessary: except: pass time.sleep(2) Your code should be like this: def extract_jobs(driver): job_lists = driver.find_element(By.CLASS_NAME, 'jobs-search__results-list') return job_lists.find_elements(By.TAG_NAME, 'li') # return a list jobs = [] last_height = driver.execute_script("return document.body.scrollHeight") while True: # Scroll down driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") time.sleep(SCROLL_PAUSE_TIME) # Extract jobs jobs = extract_jobs(driver) # Check if we have reached 500 if len(jobs) >= 500: break # Calculate new scroll height and compare with last scroll height new_height = driver.execute_script("return document.body.scrollHeight") if new_height == last_height: break last_height = new_height # Get you jobs here ... another_method(jobs) ...
Stopping scroll on a specific page when LinkedIn scraping - python
I am trying to webscrape jobs on LinkedIn based on url and the specific number of jobs. The code below uses infinite scrolling and scrolls until page 39, this creates 1000 elements in my 'jobs; lost but I only want 500. How can I make it stop so that it only scrolls to so that I have 500 elements. url = 'https://www.linkedin.com/jobs/search?keywords=Python%20%28Programming%20Language%29&location=Canada&geoId=101174742&position=1&pageNum=0' time.sleep(2) no_of_jobs = 500 driver = 'location of driver' driver.set_window_size(1024, 600) driver.maximize_window() driver.get(url) SCROLL_PAUSE_TIME = 1.5 i = 2 while i <= int(no_of_jobs/25) + 1: last_height = driver.execute_script("return document.body.scrollHeight") while True: driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") time.sleep(SCROLL_PAUSE_TIME) # Calculate new scroll height and compare with last scroll height new_height = driver.execute_script("return document.body.scrollHeight") if new_height == last_height: break last_height = new_height i = i + 1 try: driver.find_element(By.XPATH, '/html/body/div[1]/div/main/section[2]/button').click() time.sleep(2) except: pass time.sleep(2) job_lists = driver.find_element(By.CLASS_NAME, 'jobs-search__results-list') jobs = job_lists.find_elements(By.TAG_NAME, 'li') # return a list time.sleep(5) Thanks I know it is because my code is asking it to infinite scroll but when I had the code below, the program would run to the bottom of one page and then load the next page and scroll all the way up and start again and do that for every page. i = 2 while i <= int(no_of_jobs/25)+1: driver.execute_script('window.scrollTo(0, document.body.scrollHeight);') i = i+1 try: driver.find_element(By.XPATH, '/html/body/div[1]/div/main/section[2]/button').click() time.sleep(2) except: pass time.sleep(2)
[ "Looks like you are trying to scroll down while it's possible and only then extract jobs from resulting page.\nTry to extract jobs after every scroll down and add break statement when you reach 500 jobs.\nAlso pass here is unnecessary:\nexcept:\n pass\n time.sleep(2)\n\nYour code should be like this:\ndef extract_jobs(driver):\n job_lists = driver.find_element(By.CLASS_NAME, 'jobs-search__results-list')\n return job_lists.find_elements(By.TAG_NAME, 'li') # return a list\n\n\njobs = []\nlast_height = driver.execute_script(\"return document.body.scrollHeight\")\nwhile True:\n # Scroll down\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(SCROLL_PAUSE_TIME)\n \n # Extract jobs\n jobs = extract_jobs(driver)\n\n # Check if we have reached 500\n if len(jobs) >= 500:\n break\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n\n# Get you jobs here\n...\nanother_method(jobs)\n...\n\n" ]
[ 0 ]
[]
[]
[ "linkedin", "python", "scroll", "selenium", "web_scraping" ]
stackoverflow_0074578236_linkedin_python_scroll_selenium_web_scraping.txt
Q: I'm looking for Regular expressions to exclude a specific substring from a match Basically I have these strings and I'm programming on Python 3.9 : 'P425-TK-1501' 'P425-UN-1840' 'P900-TP-1001' What if I want to match each of these strings EXCEPT the one with TP (P900-TP-1001). As you can see, my challenge here was to INCLUDE the P425-TK-1501 but EXCLUDE P900-TP-1001 I tried excluding but it doesn't seem to work with what I need. [UPDATE] So basically, I've written a code to extract specific strings within a txt file. In the text file, there are words, numbers, spaces, etc. The strings I want to extract have specific format which is searched using this Regex : '[BPC][3-9]\d{2}-[A-Z]{2}-[1-2]\d{3}' . What it does : Searches for B, P or C for the first character. Searches for number from 3 to 9 for the second. Searches for any number for the third & fourth. Searches for a hyphen for the fifth. Searches for a letter from A to Z for the sixth and seventh. Etc... What I would like, is for the Regex to EXCLUDE the specific strings 'TP' (in that order), WITHOUT excluding 'TK' which may occur multiple times in the text file. A: To exclude a substring, simply accept the strings which do not have it. No regex needed. data = ['P425-TK-1501', 'P425-UN-1840', 'P900-TP-1001'] new = [x for x in data if 'P900' not in x] produces ['P425-TK-1501', 'P425-UN-1840']
I'm looking for Regular expressions to exclude a specific substring from a match
Basically I have these strings and I'm programming on Python 3.9 : 'P425-TK-1501' 'P425-UN-1840' 'P900-TP-1001' What if I want to match each of these strings EXCEPT the one with TP (P900-TP-1001). As you can see, my challenge here was to INCLUDE the P425-TK-1501 but EXCLUDE P900-TP-1001 I tried excluding but it doesn't seem to work with what I need. [UPDATE] So basically, I've written a code to extract specific strings within a txt file. In the text file, there are words, numbers, spaces, etc. The strings I want to extract have specific format which is searched using this Regex : '[BPC][3-9]\d{2}-[A-Z]{2}-[1-2]\d{3}' . What it does : Searches for B, P or C for the first character. Searches for number from 3 to 9 for the second. Searches for any number for the third & fourth. Searches for a hyphen for the fifth. Searches for a letter from A to Z for the sixth and seventh. Etc... What I would like, is for the Regex to EXCLUDE the specific strings 'TP' (in that order), WITHOUT excluding 'TK' which may occur multiple times in the text file.
[ "To exclude a substring, simply accept the strings which do not have it. No regex needed.\ndata = ['P425-TK-1501', 'P425-UN-1840', 'P900-TP-1001']\n\nnew = [x for x in data if 'P900' not in x]\n\nproduces\n['P425-TK-1501', 'P425-UN-1840']\n\n" ]
[ 0 ]
[]
[]
[ "expression", "multiple_matches", "python", "regex" ]
stackoverflow_0074578160_expression_multiple_matches_python_regex.txt
Q: Keras multiprocessing model prediction I have a simple MNIST Keras model to make predictions and save the loss. I am running on a server with multiple CPUs, so I want to use multiprocessing for speedup. I have successfully used multiprocessing with some basic functions, but for model prediction these processes never finish, while using the non-multiprocessing approach, they work fine. I suspect that the issue might be with the model, as there is a single model it cannot be used in different parallel processes, so I loaded the model in each process, but it did not work. My code is this: from multiprocessing import Process import tensorflow as tf #make a prediction on a training sample def predict(idx, return_dict): x = tf.convert_to_tensor(np.expand_dims(x_train[idx],axis=0)) local_model=tf.keras.models.load_model('model.h5') y=local_model(x) print('this never gets printed') y_expanded=np.expand_dims(y_train[train_idx],axis=0) loss=tf.keras.losses.CategoricalCrossentropy(y_expanded,y) return_dict[i]=loss manager = multiprocessing.Manager() return_dict = manager.dict() jobs = [] for i in range(10): p = Process(target=predict, args=(i, return_dict)) jobs.append(p) p.start() for proc in jobs: proc.join() print(return_dict.values()) The print line in the predict function is never shown and the problem is with the model. Even without loading the model in the function and using a global one, the problem still persisted. I followed this this thread but it did not work. My questions are now these: How to solve the model issue Can I use the same X_train for all the processes? Thanks. A: I found the answer. First of all, Keras has issues with multiprocessing 1, 2. Moreover, TensorFlow should always have one session. So, it must be imported only in the function, not anywhere else. And the model should be loaded from the disk in each function. This may be the source of improvement (moving the model to RAM, serializing the model as a file, and passing it to function). Nevertheless, the below code works: def predict(idx, return_dict): import tensorflow as tf x=tf.convert_to_tensor(x_train[idx]) cce = tf.keras.losses.CategoricalCrossentropy() local=tf.keras.models.load_model('model.h5') y=local(np.expand_dims(x,axis=0)) y_expanded=np.expand_dims(y_train[train_idx],axis=0) loss=cce(y_expanded,y) return_dict[idx]=loss The same x_train may be used.
Keras multiprocessing model prediction
I have a simple MNIST Keras model to make predictions and save the loss. I am running on a server with multiple CPUs, so I want to use multiprocessing for speedup. I have successfully used multiprocessing with some basic functions, but for model prediction these processes never finish, while using the non-multiprocessing approach, they work fine. I suspect that the issue might be with the model, as there is a single model it cannot be used in different parallel processes, so I loaded the model in each process, but it did not work. My code is this: from multiprocessing import Process import tensorflow as tf #make a prediction on a training sample def predict(idx, return_dict): x = tf.convert_to_tensor(np.expand_dims(x_train[idx],axis=0)) local_model=tf.keras.models.load_model('model.h5') y=local_model(x) print('this never gets printed') y_expanded=np.expand_dims(y_train[train_idx],axis=0) loss=tf.keras.losses.CategoricalCrossentropy(y_expanded,y) return_dict[i]=loss manager = multiprocessing.Manager() return_dict = manager.dict() jobs = [] for i in range(10): p = Process(target=predict, args=(i, return_dict)) jobs.append(p) p.start() for proc in jobs: proc.join() print(return_dict.values()) The print line in the predict function is never shown and the problem is with the model. Even without loading the model in the function and using a global one, the problem still persisted. I followed this this thread but it did not work. My questions are now these: How to solve the model issue Can I use the same X_train for all the processes? Thanks.
[ "I found the answer. First of all, Keras has issues with multiprocessing 1, 2. Moreover, TensorFlow should always have one session. So, it must be imported only in the function, not anywhere else. And the model should be loaded from the disk in each function. This may be the source of improvement (moving the model to RAM, serializing the model as a file, and passing it to function).\nNevertheless, the below code works:\ndef predict(idx, return_dict):\n\n import tensorflow as tf\n\n x=tf.convert_to_tensor(x_train[idx])\n cce = tf.keras.losses.CategoricalCrossentropy()\n\n local=tf.keras.models.load_model('model.h5')\n\n y=local(np.expand_dims(x,axis=0))\n y_expanded=np.expand_dims(y_train[train_idx],axis=0)\n loss=cce(y_expanded,y)\n \n return_dict[idx]=loss\n\nThe same x_train may be used.\n" ]
[ 0 ]
[]
[]
[ "keras", "multiprocessing", "python", "tensorflow" ]
stackoverflow_0074540699_keras_multiprocessing_python_tensorflow.txt
Q: how to crop a colour 8 bit per pixel png image and save in colour in python I have a png image that I want to crop, removing the top and bottom white space. I use the following code: from PIL import Image for f in pa_files: img = f im = Image.open(img) width, height = im.size pixels = list(im.getdata()) pixels = [pixels[i * width:(i + 1) * width] for i in range(height)] white_lines = 0 for line in pixels: white_count = sum([sum(x) for x in line]) - im.width * 255*4 if (white_count) == 0: white_lines += 1 else: break crop_from_top = white_lines pixels.reverse() white_lines = 0 for line in pixels: white_count = sum([sum(x) for x in line]) - im.width * 255*4 if (white_count) == 0: white_lines += 1 #print(white_count) else: break crop_from_bottom = white_lines crop_from_bottom, crop_from_top, im.size # Setting the points for cropped image left = 0 top = crop_from_top - 5 right = im.width bottom = im.height - (crop_from_bottom- 5) im1 = im.crop((left, top, right, bottom)) im1.save(img) this works for a 32 bit png but now I come across an 8 bit png, and tried running the same script, but came across this error: TypeError: 'int' object is not iterable Looking further, I see that each pixel is represented by 0:255 and we see pixel value 153 appears 2m times. I played around cropping with the following: im = Image.open(f).convert('L') im = im.crop((x1, y1, x2, y2)) im.save('_0.png') successfully, but then my image returned grayscale. before: after: it went from blue to grayscale. How is it possible to crop the margins dynamically of an 8bit type image, and save it again in colour? A: The thing is that you have to consider many different cases. 8 bits R,G,B,A images (that is what you have, apparently, at first) 8 bits R,G,B images 8 bits gray level 8 bits indexed images For 8 bits gray level, pixels are not 4-uplets (R,G,B,A) but numbers. So, sum(x) should be replaced by x. And then you can expect it to worth 255, not 255*4 for white (but that is not a sure thing neither. There are some 'MINISWHITE' format also. Since I don't have an example, and not very familiar with PIL (that you are obviously using), can't be sure if PIL would make this transparent (I mean, if it would convert it when loading). For example in the 1st part of your code white_lines = 0 for line in pixels: white_count = sum([x for x in line]) - im.width * 255 if (white_count) == 0: white_lines += 1 else: break For R,G,B image, your code would be OK, but white is not when sum is 255*4, but 255*3. Your second example is an indexed image. So 8 bits, but color anyway. By converting it to 'L', that is gray level, you got what you are complaining about. So, the simple answer here would be to advise you to convert everything to RGB or RGBA and then use your code. for f in pa_files: img = f im = Image.open(img) width, height = im.size pixels = list(im.convert('RGBA').getdata()) pixels = [pixels[i * width:(i + 1) * width] for i in range(height)] # Rest of code unchanged im1 = im.crop((left, top, right, bottom)) im1.save(img) The conversion would not impact the image (it won't convert a gray level image in a stupid RGBA image whose all R=G=B, wasting space), since the conversion is only to get pixel array used for computation of crop area, and crop is performed on the original unconverted image. I can't resist the urge to advise you to avoid iterating over pixels at all cost, tho. You could, instead of creating a python list (that you have to reshape yourself), get a numpy array view of the data. import numpy as np arr=np.asarray(im.convert('RGB')) Then, to check if a line i is white LineIisWhite=(arr[i]==255).all() (arr[i]==255) is an array of booleans, of the same shape of your line, that is, here W×3, with True where there where 255, and False elsewhere. (arr[i]==255).all() is a boolean saying whether all boolean in previous arrays are True or not. So if line is white. That still wouldn't avoid an iteration over lines. But we can do better. Restricting all to the 2 last axis (W and 3), by adding axis=(1,2) and applying on the whole image, we get an array of H booleans, that are True if all W×3 booleans are true in each line. whitelines=(arr==255).all(axis=(1,2)) In the example image I build, that result in array([ True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True]) Then, no need to iterate over this array of booleans to count number of True at the beginning or at the end crop_from_top = np.argmin(whitelines) crop_from_bottom = np.argmin(whitelines[::-1]) So, altogether import numpy as np for f in pa_files: img = f im = Image.open(img) width, height = im.size arr = np.asarray(im.convert('RGB')) whitelines=(arr==255).all(axis=(1,2)) crop_from_top = np.argmin(whitelines) crop_from_bottom = np.argmin(whitelines[::-1]) # Setting the points for cropped image left = 0 top = crop_from_top - 5 right = im.width bottom = im.height - (crop_from_bottom- 5) im1 = im.crop((left, top, right, bottom)) im1.save(img) Last remark: because of indexation that has to approximate colors, or even because of JPEG encoding, white pixels may not be purely white. So you may want to change arr==255 by arr>=250 or something like that. Note that the numpy array here is used read-only. We only use it to compute how many lines to crop. A: We may simply convert every input image to RGB using im = im.convert('RGB'): im = Image.open(img) if im.mode != 'RGBA': # Keep RGBA (32 bits) images unmodified im = im.convert('RGB') ... If the input image is RGB, it is not going to change. If the image is indexed image (has say 8 bits per pixel) and palette, the image is converted to RGB. If the image is 8 bits grayscale, it is also converted to RGB (where R=G=B for each pixel). Code sample: from PIL import Image file_name = 'indexed_image.png' # file_name = 'rgb_image.png' # file_name = 'gray_image.png' im = Image.open(file_name) if im.mode != 'RGBA': # Keep RGBA images unmodified im = im.convert('RGB') # Convert indexed image to RGB image if required (pass each pixel through the palette). width, height = im.size pixels = list(im.getdata()) pixels = [pixels[i * width:(i + 1) * width] for i in range(height)] white_lines = 0 for line in pixels: white_count = sum([sum(x) for x in line]) - im.width * 255*4 if (white_count) == 0: white_lines += 1 else: break crop_from_top = white_lines pixels.reverse() white_lines = 0 for line in pixels: white_count = sum([sum(x) for x in line]) - im.width * 255*4 if (white_count) == 0: white_lines += 1 #print(white_count) else: break crop_from_bottom = white_lines crop_from_bottom, crop_from_top, im.size # Setting the points for cropped image left = 0 top = crop_from_top - 5 right = im.width bottom = im.height - (crop_from_bottom- 5) im1 = im.crop((left, top, right, bottom)) im1.save(file_name) Sample images (used for testing): RGB: Indexed image: Grayscale image: RGBA image:
how to crop a colour 8 bit per pixel png image and save in colour in python
I have a png image that I want to crop, removing the top and bottom white space. I use the following code: from PIL import Image for f in pa_files: img = f im = Image.open(img) width, height = im.size pixels = list(im.getdata()) pixels = [pixels[i * width:(i + 1) * width] for i in range(height)] white_lines = 0 for line in pixels: white_count = sum([sum(x) for x in line]) - im.width * 255*4 if (white_count) == 0: white_lines += 1 else: break crop_from_top = white_lines pixels.reverse() white_lines = 0 for line in pixels: white_count = sum([sum(x) for x in line]) - im.width * 255*4 if (white_count) == 0: white_lines += 1 #print(white_count) else: break crop_from_bottom = white_lines crop_from_bottom, crop_from_top, im.size # Setting the points for cropped image left = 0 top = crop_from_top - 5 right = im.width bottom = im.height - (crop_from_bottom- 5) im1 = im.crop((left, top, right, bottom)) im1.save(img) this works for a 32 bit png but now I come across an 8 bit png, and tried running the same script, but came across this error: TypeError: 'int' object is not iterable Looking further, I see that each pixel is represented by 0:255 and we see pixel value 153 appears 2m times. I played around cropping with the following: im = Image.open(f).convert('L') im = im.crop((x1, y1, x2, y2)) im.save('_0.png') successfully, but then my image returned grayscale. before: after: it went from blue to grayscale. How is it possible to crop the margins dynamically of an 8bit type image, and save it again in colour?
[ "The thing is that you have to consider many different cases.\n\n8 bits R,G,B,A images (that is what you have, apparently, at first)\n8 bits R,G,B images\n8 bits gray level\n8 bits indexed images\n\nFor 8 bits gray level, pixels are not 4-uplets (R,G,B,A) but numbers. So, sum(x) should be replaced by x. And then you can expect it to worth 255, not 255*4 for white (but that is not a sure thing neither. There are some 'MINISWHITE' format also. Since I don't have an example, and not very familiar with PIL (that you are obviously using), can't be sure if PIL would make this transparent (I mean, if it would convert it when loading).\nFor example in the 1st part of your code\n\n white_lines = 0\n for line in pixels:\n white_count = sum([x for x in line]) - im.width * 255\n if (white_count) == 0:\n white_lines += 1\n else:\n break\n\nFor R,G,B image, your code would be OK, but white is not when sum is 255*4, but 255*3.\nYour second example is an indexed image. So 8 bits, but color anyway.\nBy converting it to 'L', that is gray level, you got what you are complaining about.\nSo, the simple answer here would be to advise you to convert everything to RGB or RGBA and then use your code.\nfor f in pa_files:\n img = f\n im = Image.open(img)\n width, height = im.size\n pixels = list(im.convert('RGBA').getdata())\n pixels = [pixels[i * width:(i + 1) * width] for i in range(height)]\n# Rest of code unchanged\n im1 = im.crop((left, top, right, bottom))\n\n im1.save(img)\n\nThe conversion would not impact the image (it won't convert a gray level image in a stupid RGBA image whose all R=G=B, wasting space), since the conversion is only to get pixel array used for computation of crop area, and crop is performed on the original unconverted image.\nI can't resist the urge to advise you to avoid iterating over pixels at all cost, tho.\nYou could, instead of creating a python list (that you have to reshape yourself), get a numpy array view of the data.\nimport numpy as np\narr=np.asarray(im.convert('RGB'))\n\nThen, to check if a line i is white\nLineIisWhite=(arr[i]==255).all()\n\n(arr[i]==255) is an array of booleans, of the same shape of your line, that is, here W×3, with True where there where 255, and False elsewhere.\n(arr[i]==255).all() is a boolean saying whether all boolean in previous arrays are True or not. So if line is white.\nThat still wouldn't avoid an iteration over lines. But we can do better.\nRestricting all to the 2 last axis (W and 3), by adding axis=(1,2) and applying on the whole image, we get an array of H booleans, that are True if all W×3 booleans are true in each line.\nwhitelines=(arr==255).all(axis=(1,2))\n\nIn the example image I build, that result in\narray([ True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, False, False,\n False, False, False, False, False, False, False, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True])\n\nThen, no need to iterate over this array of booleans to count number of True at the beginning or at the end\ncrop_from_top = np.argmin(whitelines)\ncrop_from_bottom = np.argmin(whitelines[::-1])\n\nSo, altogether\nimport numpy as np\nfor f in pa_files:\n img = f\n im = Image.open(img)\n width, height = im.size\n arr = np.asarray(im.convert('RGB'))\n whitelines=(arr==255).all(axis=(1,2))\n crop_from_top = np.argmin(whitelines)\n crop_from_bottom = np.argmin(whitelines[::-1])\n\n # Setting the points for cropped image\n left = 0\n top = crop_from_top - 5\n right = im.width\n bottom = im.height - (crop_from_bottom- 5)\n\n im1 = im.crop((left, top, right, bottom))\n\n im1.save(img)\n\nLast remark: because of indexation that has to approximate colors, or even because of JPEG encoding, white pixels may not be purely white.\nSo you may want to change arr==255 by arr>=250 or something like that.\nNote that the numpy array here is used read-only. We only use it to compute how many lines to crop.\n", "We may simply convert every input image to RGB using im = im.convert('RGB'):\nim = Image.open(img)\n\nif im.mode != 'RGBA': # Keep RGBA (32 bits) images unmodified\n im = im.convert('RGB')\n...\n\n\nIf the input image is RGB, it is not going to change.\nIf the image is indexed image (has say 8 bits per pixel) and palette, the image is converted to RGB.\nIf the image is 8 bits grayscale, it is also converted to RGB (where R=G=B for each pixel).\n\n\nCode sample:\nfrom PIL import Image\n\nfile_name = 'indexed_image.png' # file_name = 'rgb_image.png' # file_name = 'gray_image.png'\nim = Image.open(file_name)\n\nif im.mode != 'RGBA': # Keep RGBA images unmodified\n im = im.convert('RGB') # Convert indexed image to RGB image if required (pass each pixel through the palette).\n\nwidth, height = im.size\npixels = list(im.getdata())\npixels = [pixels[i * width:(i + 1) * width] for i in range(height)]\n\nwhite_lines = 0\nfor line in pixels:\n white_count = sum([sum(x) for x in line]) - im.width * 255*4\n if (white_count) == 0:\n white_lines += 1\n else:\n break\n\ncrop_from_top = white_lines\n\npixels.reverse()\n\nwhite_lines = 0\nfor line in pixels:\n white_count = sum([sum(x) for x in line]) - im.width * 255*4\n if (white_count) == 0:\n white_lines += 1\n #print(white_count)\n else:\n break\n\ncrop_from_bottom = white_lines\n\ncrop_from_bottom, crop_from_top, im.size\n\n# Setting the points for cropped image\nleft = 0\ntop = crop_from_top - 5\nright = im.width\nbottom = im.height - (crop_from_bottom- 5)\n\nim1 = im.crop((left, top, right, bottom))\n\nim1.save(file_name)\n\n\nSample images (used for testing):\nRGB:\n\nIndexed image:\n\nGrayscale image:\n\nRGBA image:\n\n" ]
[ 0, 0 ]
[]
[]
[ "colors", "image", "python", "python_imaging_library" ]
stackoverflow_0074577896_colors_image_python_python_imaging_library.txt
Q: How do you modify a dictionary from a text file when you only need to get specific values? So say we have some sort of file with maybe like 6 columns, and 6 rows. If I wanted to get one specific column that reads one line, and modify a current dictionary I have, how would I approach that? The output should be all the data with the the key being the second column, and the 2 values being the first column and 4th column? Can somebody please help me start this off? I've tried using: for line in file: (key, val) = line.split() data[int(key)] = val print (data) But, obviously this'll fail, since this expects only 2 values. I need 1 key value, and 2 value values. A: split returns a list. Use the list instead of expanding into named variables. data = {} for line in file: row = line.strip().split() data[int(row[1])] = row[0], row[3] print (data)
How do you modify a dictionary from a text file when you only need to get specific values?
So say we have some sort of file with maybe like 6 columns, and 6 rows. If I wanted to get one specific column that reads one line, and modify a current dictionary I have, how would I approach that? The output should be all the data with the the key being the second column, and the 2 values being the first column and 4th column? Can somebody please help me start this off? I've tried using: for line in file: (key, val) = line.split() data[int(key)] = val print (data) But, obviously this'll fail, since this expects only 2 values. I need 1 key value, and 2 value values.
[ "split returns a list. Use the list instead of expanding into named variables.\ndata = {}\nfor line in file:\n row = line.strip().split()\n data[int(row[1])] = row[0], row[3]\nprint (data)\n\n" ]
[ 0 ]
[]
[]
[ "dictionary", "file", "python" ]
stackoverflow_0074578298_dictionary_file_python.txt
Q: Call OpenAI API with Python requests is missing a model parameter I'm trying to call OpenAI API from Python. I know they have their own openai package, but I want to use a generic solution. I chose the requests package for its flexibility. Here is my call >>> headers = {"Authorization": "Bearer xxx"} >>> url = 'https://api.openai.com/v1/completions' >>> data = {'model': 'text-davinci-002', 'prompt': 'Once upon a time'} >>> requests.get(url, headers=headers, data=data).content ... "error": {\n "message": "you must provide a model parameter" The header contains the API token. It's correct, I tried it. I also tried to pass the same dictionary as json, as data but as a json string. Always the same error message. Any idea how to make the call? Update: >>> requests.get(url, headers=headers, json=data).content >>> requests.get(url, headers=headers, json=json.dumps(data)).content >>> requests.get(url, headers=headers, data=json.dumps(data)).content >>> requests.get(url, headers=headers, data=json.dumps(data).encode()).content These all return the same error. I tried to add 'Content-Type': 'application/json' to the headers too. update2: It works for the completion endpoint with POST, but not for the edit endpoint. >>> completion_url = "https://api.openai.com/v1/completions" >>> completion_data = {'model': 'text-davinci-002', 'prompt': 'Once upon a time'} >>> requests.post(completion_url, headers=headers, json=completion_data).json() ... # it works >>> edit_url = "https://api.openai.com/v1/edits" >>> completion_data = {'model': 'text-davinci-002', 'input': 'Once upon a time', 'instruction': 'Continue'} >>> requests.get(edit_url, headers=headers, json=edit_data).json()['error']['message'] 'you must provide a model parameter' >>> requests.post(edit_url, headers=headers, json=edit_data).json()['error']['message'] 'Invalid URL (POST /v1/edits)' A: The API expects a JSON request body,not a form-encoded request. And, you need to use the requests.post() method to send the right HTTP method. Use the json argument, not the data argument, and the right method: requests.post(url, headers=headers, json=data) See the Create completion section of the OpenAI documentation, where the curl source code sample posts JSON: curl https://api.openai.com/v1/completions \ -H 'Content-Type: application/json' \ -H 'Authorization: Bearer YOUR_API_KEY' \ -d '{ "model": "text-davinci-002", "prompt": "Say this is a test", "max_tokens": 6, "temperature": 0 }' as well as the More complicated POST requests section of the documentation: Typically, you want to send some form-encoded data — much like an HTML form. To do this, simply pass a dictionary to the data argument. Your dictionary of data will automatically be form-encoded when the request is made[.] [...] There are times that you may want to send data that is not form-encoded. [...]. If you need [the application/json header] set and you don’t want to encode the dict yourself, you can also pass it directly using the json parameter (added in version 2.4.2) and it will be encoded automatically[.] (Bold emphasis mine, slightly edited for clarity). Demo: >>> import requests >>> key = "<APIKEY>" >>> headers = {"Authorization": f"Bearer {key}"} >>> data = {'model': 'text-davinci-002', 'prompt': 'Once upon a time'} >>> requests.post(url, headers=headers, json=data).json() {'id': 'cmpl-6HIPWd1eDo6veh3FkTRsv9aJyezBv', 'object': 'text_completion', 'created': 1669580366, 'model': 'text-davinci-002', 'choices': [{'text': ' there was a castle up in space. In this castle there was a queen who', 'index': 0, 'logprobs': None, 'finish_reason': 'length'}], 'usage': {'prompt_tokens': 4, 'completion_tokens': 16, 'total_tokens': 20}} The openai Python library uses the requests library under the hood but takes care of details like how to send HTTP requests correctly for you.
Call OpenAI API with Python requests is missing a model parameter
I'm trying to call OpenAI API from Python. I know they have their own openai package, but I want to use a generic solution. I chose the requests package for its flexibility. Here is my call >>> headers = {"Authorization": "Bearer xxx"} >>> url = 'https://api.openai.com/v1/completions' >>> data = {'model': 'text-davinci-002', 'prompt': 'Once upon a time'} >>> requests.get(url, headers=headers, data=data).content ... "error": {\n "message": "you must provide a model parameter" The header contains the API token. It's correct, I tried it. I also tried to pass the same dictionary as json, as data but as a json string. Always the same error message. Any idea how to make the call? Update: >>> requests.get(url, headers=headers, json=data).content >>> requests.get(url, headers=headers, json=json.dumps(data)).content >>> requests.get(url, headers=headers, data=json.dumps(data)).content >>> requests.get(url, headers=headers, data=json.dumps(data).encode()).content These all return the same error. I tried to add 'Content-Type': 'application/json' to the headers too. update2: It works for the completion endpoint with POST, but not for the edit endpoint. >>> completion_url = "https://api.openai.com/v1/completions" >>> completion_data = {'model': 'text-davinci-002', 'prompt': 'Once upon a time'} >>> requests.post(completion_url, headers=headers, json=completion_data).json() ... # it works >>> edit_url = "https://api.openai.com/v1/edits" >>> completion_data = {'model': 'text-davinci-002', 'input': 'Once upon a time', 'instruction': 'Continue'} >>> requests.get(edit_url, headers=headers, json=edit_data).json()['error']['message'] 'you must provide a model parameter' >>> requests.post(edit_url, headers=headers, json=edit_data).json()['error']['message'] 'Invalid URL (POST /v1/edits)'
[ "The API expects a JSON request body,not a form-encoded request. And, you need to use the requests.post() method to send the right HTTP method.\nUse the json argument, not the data argument, and the right method:\nrequests.post(url, headers=headers, json=data)\n\nSee the Create completion section of the OpenAI documentation, where the curl source code sample posts JSON:\ncurl https://api.openai.com/v1/completions \\\n -H 'Content-Type: application/json' \\\n -H 'Authorization: Bearer YOUR_API_KEY' \\\n -d '{\n \"model\": \"text-davinci-002\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 6,\n \"temperature\": 0\n}'\n\nas well as the More complicated POST requests section of the documentation:\n\nTypically, you want to send some form-encoded data — much like an HTML form. To do this, simply pass a dictionary to the data argument. Your dictionary of data will automatically be form-encoded when the request is made[.]\n[...]\nThere are times that you may want to send data that is not form-encoded.\n[...].\nIf you need [the application/json header] set and you don’t want to encode the dict yourself, you can also pass it directly using the json parameter (added in version 2.4.2) and it will be encoded automatically[.]\n\n(Bold emphasis mine, slightly edited for clarity).\nDemo:\n>>> import requests\n>>> key = \"<APIKEY>\"\n>>> headers = {\"Authorization\": f\"Bearer {key}\"}\n>>> data = {'model': 'text-davinci-002', 'prompt': 'Once upon a time'}\n>>> requests.post(url, headers=headers, json=data).json()\n{'id': 'cmpl-6HIPWd1eDo6veh3FkTRsv9aJyezBv', 'object': 'text_completion', 'created': 1669580366, 'model': 'text-davinci-002', 'choices': [{'text': ' there was a castle up in space. In this castle there was a queen who', 'index': 0, 'logprobs': None, 'finish_reason': 'length'}], 'usage': {'prompt_tokens': 4, 'completion_tokens': 16, 'total_tokens': 20}}\n\nThe openai Python library uses the requests library under the hood but takes care of details like how to send HTTP requests correctly for you.\n" ]
[ 2 ]
[]
[]
[ "api", "openai", "python", "python_requests" ]
stackoverflow_0074578315_api_openai_python_python_requests.txt
Q: Filling a template slot, content doesn't appear In my folder Templates I created 2 html files: main.html user.html The structure of the main.html is: <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>DJANGO</title> </head> <body> {% block userinfo %} {% endblock userinfo %} </body> </html> The structure of the user.html is: {% extends "main.html" %} {% block userinfo %} <h2>John Doe</h2> <p>Explorer of life.</p> {% endblock userinfo %} I don't understand why <h2>John Doe</h2> <p>Explorer of life.</p> doesn't appear in the browser when I call main.html I have tried writing in this way too {% extends "main.html" %} {% block userinfo %} <h2>John Doe</h2> <p>Explorer of life.</p> {% endblock %} without user in the endblock but it does not work. In settings.py file in Templates list and DIR list I added: os.path.join(BASE_DIR,'templates'), and I importend os too. In views.py file that I've created I have written from django.shortcuts import render from django.http import HttpResponse def main(request): return render(request,'main.html') In urls.py file that I've created I have written from django.urls import path from . import views urlpatterns = [ path('main/',views.main,name='') ] When I call the page with http://localhost:8000/main/ I don't have any error. The only problem is that the page is blank. And If I try to add some text in main.html it appers on the screen, but the content from user.html doesn't appear. Can someone help me? A: When you render main.html directly, your user.html gets ignored. If you render user.html from django, you will get expected result. To inject user.html contents to main.html you should use something like {% include "user.html" %} insead of blocks statement. A: Extension in templates follows the same logic that class inheritance, it goes the way down not up. Once said, main is the base template and you will never see the content of templates in which you are extending, its the opposite, you will see the content of the base template (the one you are extending for) in the child template (the one that has the extends). I guess the behavior that you are expecting to have is the one that gives the include tag main.html <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>DJANGO</title> </head> <body> {% include "user.html" %} </body> </html> user.html <h2>John Doe</h2> <p>Explorer of life.</p>
Filling a template slot, content doesn't appear
In my folder Templates I created 2 html files: main.html user.html The structure of the main.html is: <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>DJANGO</title> </head> <body> {% block userinfo %} {% endblock userinfo %} </body> </html> The structure of the user.html is: {% extends "main.html" %} {% block userinfo %} <h2>John Doe</h2> <p>Explorer of life.</p> {% endblock userinfo %} I don't understand why <h2>John Doe</h2> <p>Explorer of life.</p> doesn't appear in the browser when I call main.html I have tried writing in this way too {% extends "main.html" %} {% block userinfo %} <h2>John Doe</h2> <p>Explorer of life.</p> {% endblock %} without user in the endblock but it does not work. In settings.py file in Templates list and DIR list I added: os.path.join(BASE_DIR,'templates'), and I importend os too. In views.py file that I've created I have written from django.shortcuts import render from django.http import HttpResponse def main(request): return render(request,'main.html') In urls.py file that I've created I have written from django.urls import path from . import views urlpatterns = [ path('main/',views.main,name='') ] When I call the page with http://localhost:8000/main/ I don't have any error. The only problem is that the page is blank. And If I try to add some text in main.html it appers on the screen, but the content from user.html doesn't appear. Can someone help me?
[ "When you render main.html directly, your user.html gets ignored.\nIf you render user.html from django, you will get expected result.\nTo inject user.html contents to main.html you should use something like {% include \"user.html\" %} insead of blocks statement.\n", "Extension in templates follows the same logic that class inheritance, it goes the way down not up.\nOnce said, main is the base template and you will never see the content of templates in which you are extending, its the opposite, you will see the content of the base template (the one you are extending for) in the child template (the one that has the extends).\nI guess the behavior that you are expecting to have is the one that gives the include tag\nmain.html\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>DJANGO</title>\n</head>\n<body>\n {% include \"user.html\" %}\n</body>\n</html>\n\nuser.html\n <h2>John Doe</h2>\n <p>Explorer of life.</p>\n\n" ]
[ 0, 0 ]
[]
[]
[ "django", "jinja2", "python" ]
stackoverflow_0074578213_django_jinja2_python.txt
Q: Replace space in between double quote to underscore I want to replace a space to underscore, if the space is in between double quotes. Example: given = 'hello "welcome to" python "blog"' expected = 'hello "welcome_to" python "blog"' My actual string is in SQL code and I need to transform it to use underscore for migration purpose. What I tried import re s = 'hello "welcome to" java 2 "blog"' a = re.sub('(\"[\w\s]+\")', '_', s) print (a) Also been trying and trying to google but can't find yet. How to do in Python? A: If you aren't forced to use regex, don't, because that's not a good option here. inp = 'hello "welcome to" python "blog"' data = inp.split('"') for i, part in enumerate(data[:-1]): if i % 2 == 1: data[i] = part.replace(' ', '_') out = '"'.join(data) print(out) 'hello "welcome_to" python "blog"' You can do this with a list comprehension if you want, but it looks worse imo '"'.join(s if i % 2 == 0 else s.replace(' ', '_') for i, s in enumerate(inp.split('"'))) or formatted '"'.join( s if i % 2 == 0 else s.replace(' ', '_') for i, s in enumerate(inp.split('"')[:-1]) ) A: Well I think you can use the loop for iterate your string and generate new string with the char of source string (change it when you found space between double quotes). But the bad side of my solution is a creating a new string every iteration. given = 'hello "welcome to" python "blog"' double_quote = False expected = '' for c in given: if double_quote: if c == ' ': c = '_' elif c == '"': double_quote = False elif c == '"': double_quote = True expected += c print(expected) Maybe we can optimize my code within a list given = 'hello "welcome to" python "blog"' double_quote = False expected = [] for c in given: if double_quote: if c == ' ': c = '_' elif c == '"': double_quote = False elif c == '"': double_quote = True expected.append(c) expected = ''.join(expected) A: Here's an example of how to replace spaces with underscores within double quotes using re.sub(): import re pat = re.compile(r'\"[^\"]+\"') def repl(m): return m[0].replace(' ', '_') inp = 'hello "welcome to" python "blog"' out = re.sub(pat, repl, inp) print(out) The regular expression will match a double quote \", followed by anything that isn't a double quote [^\"]+, followed by another double quote \" Then the replacement logic is in the repl() function, which just takes the first match of the group and replaces all the spaces with underscores. As long as the parentheses are balanced, this will work. From Python's re docs: If repl is a function, it is called for every non-overlapping occurrence of pattern. @Samathingamajig's solution is excellent, and probably a bit faster, but here's this option in case you're wanting to do it with regex (or see an example of how it might be done).
Replace space in between double quote to underscore
I want to replace a space to underscore, if the space is in between double quotes. Example: given = 'hello "welcome to" python "blog"' expected = 'hello "welcome_to" python "blog"' My actual string is in SQL code and I need to transform it to use underscore for migration purpose. What I tried import re s = 'hello "welcome to" java 2 "blog"' a = re.sub('(\"[\w\s]+\")', '_', s) print (a) Also been trying and trying to google but can't find yet. How to do in Python?
[ "If you aren't forced to use regex, don't, because that's not a good option here.\ninp = 'hello \"welcome to\" python \"blog\"'\ndata = inp.split('\"')\nfor i, part in enumerate(data[:-1]):\n if i % 2 == 1:\n data[i] = part.replace(' ', '_')\nout = '\"'.join(data)\nprint(out)\n\n'hello \"welcome_to\" python \"blog\"'\n\nYou can do this with a list comprehension if you want, but it looks worse imo\n'\"'.join(s if i % 2 == 0 else s.replace(' ', '_') for i, s in enumerate(inp.split('\"')))\n\nor formatted\n'\"'.join(\n s if i % 2 == 0\n else s.replace(' ', '_')\n for i, s in enumerate(inp.split('\"')[:-1])\n)\n\n", "Well I think you can use the loop for iterate your string and generate new string with the char of source string (change it when you found space between double quotes). But the bad side of my solution is a creating a new string every iteration.\ngiven = 'hello \"welcome to\" python \"blog\"'\ndouble_quote = False\nexpected = ''\nfor c in given:\n if double_quote:\n if c == ' ':\n c = '_'\n elif c == '\"':\n double_quote = False\n elif c == '\"':\n double_quote = True\n expected += c\nprint(expected)\n\nMaybe we can optimize my code within a list\ngiven = 'hello \"welcome to\" python \"blog\"'\ndouble_quote = False\nexpected = []\nfor c in given:\n if double_quote:\n if c == ' ':\n c = '_'\n elif c == '\"':\n double_quote = False\n elif c == '\"':\n double_quote = True\n expected.append(c)\nexpected = ''.join(expected)\n\n", "Here's an example of how to replace spaces with underscores within double quotes using re.sub():\nimport re\n\npat = re.compile(r'\\\"[^\\\"]+\\\"')\n\ndef repl(m):\n return m[0].replace(' ', '_')\n\ninp = 'hello \"welcome to\" python \"blog\"'\nout = re.sub(pat, repl, inp)\nprint(out)\n\nThe regular expression will match\n\na double quote \\\", followed by\nanything that isn't a double quote [^\\\"]+, followed by\nanother double quote \\\"\n\nThen the replacement logic is in the repl() function, which just takes the first match of the group and replaces all the spaces with underscores.\nAs long as the parentheses are balanced, this will work. From Python's re docs:\n\nIf repl is a function, it is called for every non-overlapping occurrence of pattern.\n\n@Samathingamajig's solution is excellent, and probably a bit faster, but here's this option in case you're wanting to do it with regex (or see an example of how it might be done).\n" ]
[ 5, 0, 0 ]
[]
[]
[ "python", "replace", "string" ]
stackoverflow_0074569246_python_replace_string.txt
Q: How to set colors for nodes in NetworkX? I created my graph, everything looks great so far, but I want to update color of my nodes after creation. My goal is to visualize DFS, I will first show the initial graph and then color nodes step by step as DFS solves the problem. If anyone is interested, sample code is available on Github A: All you need is to specify a color map which maps a color to each node and send it to nx.draw function. To clarify, for a 20 node I want to color the first 10 in blue and the rest in green. The code will be as follows: G = nx.erdos_renyi_graph(20, 0.1) color_map = [] for node in G: if node < 10: color_map.append('blue') else: color_map.append('green') nx.draw(G, node_color=color_map, with_labels=True) plt.show() You will find the graph in the attached image. A: Refer to node_color parameter: nx.draw_networkx_nodes(G, pos, node_size=200, node_color='#00b4d9') A: has been answered before, but u can do this as well: # define color map. user_node = red, book_nodes = green color_map = ['red' if node == user_id else 'green' for node in G] graph = nx.draw_networkx(G,pos, node_color=color_map) # node lables A: In my case, I had 2 groups of nodes (from sklearn.model_selection import train_test_split). I wanted to change the color of each group (default color are awful!). It took me while to figure it out how to change it but, Tensor is numpy based and Matplotlib is the core of networkx library. Therefore ... test=data.y test=test.numpy() test=test.astype(np.str_) test[test == '0'] = '#C6442A' test[test == '1'] = '#9E2AC6' nx.draw(G, with_labels=True, node_color=test, node_size=400, font_color='whitesmoke') Long story short: convert the Tensor in numpy array with string type, check your best Hex color codes for HTML (https://htmlcolorcodes.com/) and you are ready to go!
How to set colors for nodes in NetworkX?
I created my graph, everything looks great so far, but I want to update color of my nodes after creation. My goal is to visualize DFS, I will first show the initial graph and then color nodes step by step as DFS solves the problem. If anyone is interested, sample code is available on Github
[ "All you need is to specify a color map which maps a color to each node and send it to nx.draw function. To clarify, for a 20 node I want to color the first 10 in blue and the rest in green. The code will be as follows:\nG = nx.erdos_renyi_graph(20, 0.1)\ncolor_map = []\nfor node in G:\n if node < 10:\n color_map.append('blue')\n else: \n color_map.append('green') \nnx.draw(G, node_color=color_map, with_labels=True)\nplt.show()\n\nYou will find the graph in the attached image.\n", "Refer to node_color parameter:\nnx.draw_networkx_nodes(G, pos, node_size=200, node_color='#00b4d9')\n\n", "has been answered before, but u can do this as well:\n# define color map. user_node = red, book_nodes = green\ncolor_map = ['red' if node == user_id else 'green' for node in G] \ngraph = nx.draw_networkx(G,pos, node_color=color_map) # node lables\n\n", "In my case, I had 2 groups of nodes (from sklearn.model_selection import train_test_split). I wanted to change the color of each group (default color are awful!). It took me while to figure it out how to change it but, Tensor is numpy based and Matplotlib is the core of networkx library. Therefore ...\ntest=data.y\ntest=test.numpy()\ntest=test.astype(np.str_)\ntest[test == '0'] = '#C6442A'\ntest[test == '1'] = '#9E2AC6'\n\nnx.draw(G, with_labels=True, node_color=test, node_size=400, font_color='whitesmoke')\n\nLong story short: convert the Tensor in numpy array with string type, check your best Hex color codes for HTML (https://htmlcolorcodes.com/) and you are ready to go!\n" ]
[ 126, 7, 2, 0 ]
[]
[]
[ "networkx", "python" ]
stackoverflow_0027030473_networkx_python.txt
Q: How to trigger DAG in Airflow everytime an external event state is True (Event based triggering) The basic concept of Airflow does not allow to trigger a Dag on an irregular interval. Actually I want to trigger a dag everytime a new file is placed on a remote server (like https, sftp, s3 ...) But Airflow requires a defined data_interval. Using e.g. HttpSensor works only once during the scheduled time window. In my current example I am using redis to persist the current file state. """ DAG for operational District heating """ import json from datetime import datetime import redis import requests from airflow import DAG from airflow.providers.amazon.aws.operators.aws_lambda import AwsLambdaInvokeFunctionOperator from airflow.providers.http.sensors.http import HttpSensor def check_up_for_new_file( response: requests.models.Response, ) -> bool: """ uses redis to check if a new file is on the server""" current_header = { key.decode() if isinstance(key, bytes) else key: value.decode() if isinstance(value, bytes) else value for key, value in response.headers.items() } conn = redis.Redis(host='redis', port=6379) recent_header = conn.hgetall("header_dict") recent_header = { key.decode() if isinstance(key, bytes) else key: value.decode() if isinstance(value, bytes) else value for key, value in recent_header.items() } if 'Content-Length' not in recent_header.keys(): conn.hmset("header_dict", current_header) return False if recent_header['Content-Length'] != current_header['Content-Length']: conn.hmset("header_dict", current_header) return True else: return False default_args = { 'owner': 'airflow', 'depends_on_past': False, 'email': ['info@airflow.com'], 'email_on_failure': True, 'email_on_retry': False, 'retries': 2, 'concurrency': 6 } with DAG( dag_id='check_ext', start_date=datetime(2022, 11, 24), tags=['test'], catchup=False, default_args=default_args, ) as dag: check_for_new_file = HttpSensor( task_id='check_up_for_new_file', http_conn_id='_conn_id', endpoint='<some-url>', poke_interval=20, dag=dag, response_check=check_up_for_new_file ) invoke_lambda_function = AwsLambdaInvokeFunctionOperator( task_id='run_process_with_external_files', function_name='LAMBDA_FUNCTION', payload=json.dumps({"source_type": "some stuff"}), ) check_for_new_file >> invoke_lambda_function How does this dag re-start after success to check again for new files? A: You have to take care about the following two points to have a Dag that runs everytime a sensor recognize an external event. schedule_interval: Use the preset None Use TriggerDagRunOperator It is by design to create an infinite loop to check out the external """ DAG for operational District heating """ import json from datetime import datetime import redis import requests from airflow import DAG from airflow.providers.amazon.aws.operators.aws_lambda import AwsLambdaInvokeFunctionOperator from airflow.providers.http.sensors.http import HttpSensor from airflow.operators.dagrun_operator import TriggerDagRunOperator def check_up_for_new_file( response: requests.models.Response, ) -> bool: """ uses redis to check if a new file is on the server""" current_header = { key.decode() if isinstance(key, bytes) else key: value.decode() if isinstance(value, bytes) else value for key, value in response.headers.items() } conn = redis.Redis(host='redis', port=6379) recent_header = conn.hgetall("header_dict") recent_header = { key.decode() if isinstance(key, bytes) else key: value.decode() if isinstance(value, bytes) else value for key, value in recent_header.items() } if 'Content-Length' not in recent_header.keys(): conn.hmset("header_dict", current_header) return False if recent_header['Content-Length'] != current_header['Content-Length']: conn.hmset("header_dict", current_header) return True else: return False default_args = { 'owner': 'airflow', 'depends_on_past': False, 'email': ['info@airflow.com'], 'email_on_failure': True, 'email_on_retry': False, 'retries': 2, 'concurrency': 6 } with DAG( dag_id='check_ext', start_date=datetime(2022, 11, 24), tags=['test'], catchup=False, schedule_interval=None, default_args=default_args, ) as dag: check_for_new_file = HttpSensor( task_id='check_up_for_new_file', http_conn_id='_conn_id', endpoint='<some-url>', poke_interval=20, dag=dag, response_check=check_up_for_new_file ) invoke_lambda_function = AwsLambdaInvokeFunctionOperator( task_id='run_process_with_external_files', function_name='LAMBDA_FUNCTION_NAME', payload=json.dumps({"source_type": "some stuff"}), ) restart_dag = TriggerDagRunOperator( task_id='restart_dag', trigger_dag_id='check_ext', dag=dag ) check_for_new_file >> invoke_lambda_function >> restart_dag For some of you not familiar with HttpSensor. The base path to the server has to be defined as environment variable with AIRFLOW_CONN_{_CONN_ID}=https://remote_server.com . Then you can call the connection by matching the _CONN_ID.
How to trigger DAG in Airflow everytime an external event state is True (Event based triggering)
The basic concept of Airflow does not allow to trigger a Dag on an irregular interval. Actually I want to trigger a dag everytime a new file is placed on a remote server (like https, sftp, s3 ...) But Airflow requires a defined data_interval. Using e.g. HttpSensor works only once during the scheduled time window. In my current example I am using redis to persist the current file state. """ DAG for operational District heating """ import json from datetime import datetime import redis import requests from airflow import DAG from airflow.providers.amazon.aws.operators.aws_lambda import AwsLambdaInvokeFunctionOperator from airflow.providers.http.sensors.http import HttpSensor def check_up_for_new_file( response: requests.models.Response, ) -> bool: """ uses redis to check if a new file is on the server""" current_header = { key.decode() if isinstance(key, bytes) else key: value.decode() if isinstance(value, bytes) else value for key, value in response.headers.items() } conn = redis.Redis(host='redis', port=6379) recent_header = conn.hgetall("header_dict") recent_header = { key.decode() if isinstance(key, bytes) else key: value.decode() if isinstance(value, bytes) else value for key, value in recent_header.items() } if 'Content-Length' not in recent_header.keys(): conn.hmset("header_dict", current_header) return False if recent_header['Content-Length'] != current_header['Content-Length']: conn.hmset("header_dict", current_header) return True else: return False default_args = { 'owner': 'airflow', 'depends_on_past': False, 'email': ['info@airflow.com'], 'email_on_failure': True, 'email_on_retry': False, 'retries': 2, 'concurrency': 6 } with DAG( dag_id='check_ext', start_date=datetime(2022, 11, 24), tags=['test'], catchup=False, default_args=default_args, ) as dag: check_for_new_file = HttpSensor( task_id='check_up_for_new_file', http_conn_id='_conn_id', endpoint='<some-url>', poke_interval=20, dag=dag, response_check=check_up_for_new_file ) invoke_lambda_function = AwsLambdaInvokeFunctionOperator( task_id='run_process_with_external_files', function_name='LAMBDA_FUNCTION', payload=json.dumps({"source_type": "some stuff"}), ) check_for_new_file >> invoke_lambda_function How does this dag re-start after success to check again for new files?
[ "You have to take care about the following two points to have a Dag that runs everytime a sensor recognize an external event.\n\nschedule_interval: Use the preset None\nUse TriggerDagRunOperator\n\nIt is by design to create an infinite loop to check out the external\n\"\"\" DAG for operational District heating \"\"\"\nimport json\nfrom datetime import datetime\n\nimport redis\nimport requests\nfrom airflow import DAG\nfrom airflow.providers.amazon.aws.operators.aws_lambda import AwsLambdaInvokeFunctionOperator\nfrom airflow.providers.http.sensors.http import HttpSensor\nfrom airflow.operators.dagrun_operator import TriggerDagRunOperator\n\n\ndef check_up_for_new_file(\n response: requests.models.Response,\n) -> bool:\n \"\"\" uses redis to check if a new file is on the server\"\"\"\n current_header = {\n key.decode() if isinstance(key, bytes) else key: value.decode() if isinstance(value, bytes) else value\n for key, value in response.headers.items()\n }\n\n conn = redis.Redis(host='redis', port=6379)\n recent_header = conn.hgetall(\"header_dict\")\n\n recent_header = {\n key.decode() if isinstance(key, bytes) else key: value.decode() if isinstance(value, bytes) else value\n for key, value in recent_header.items()\n }\n\n if 'Content-Length' not in recent_header.keys():\n conn.hmset(\"header_dict\", current_header)\n return False\n\n if recent_header['Content-Length'] != current_header['Content-Length']:\n conn.hmset(\"header_dict\", current_header)\n return True\n else:\n return False\n\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'email': ['info@airflow.com'],\n 'email_on_failure': True,\n 'email_on_retry': False,\n 'retries': 2,\n 'concurrency': 6\n}\n\nwith DAG(\n dag_id='check_ext',\n start_date=datetime(2022, 11, 24),\n tags=['test'],\n catchup=False,\n schedule_interval=None,\n default_args=default_args,\n) as dag:\n check_for_new_file = HttpSensor(\n task_id='check_up_for_new_file',\n http_conn_id='_conn_id',\n endpoint='<some-url>',\n poke_interval=20,\n dag=dag,\n response_check=check_up_for_new_file\n )\n invoke_lambda_function = AwsLambdaInvokeFunctionOperator(\n task_id='run_process_with_external_files',\n function_name='LAMBDA_FUNCTION_NAME',\n payload=json.dumps({\"source_type\": \"some stuff\"}),\n )\n restart_dag = TriggerDagRunOperator(\n task_id='restart_dag',\n trigger_dag_id='check_ext',\n dag=dag\n )\n check_for_new_file >> invoke_lambda_function >> restart_dag\n\n\nFor some of you not familiar with HttpSensor. The base path to the server has to be defined as environment variable with AIRFLOW_CONN_{_CONN_ID}=https://remote_server.com\n. Then you can call the connection by matching the _CONN_ID.\n" ]
[ 0 ]
[]
[]
[ "airflow", "event_handling", "python", "python_3.x" ]
stackoverflow_0074578403_airflow_event_handling_python_python_3.x.txt
Q: How to explore elements in a nested boolean list and get aggregated scores? Hello this is a pretty simple question but i wanted to follow the dry principles correctly and couldn't think of a way to do it without repeating code so given game outcomes in this format game outcome = [['wins', 'loses'], ['loses', 'wins'], ['loses', 'wins']] the gameoutcome[0][0] till gameoutcome[2][0] are all user game outcomes so for that paticular user he won and lost 2 whereas the computer won twice but lost once what i want to do is aggregate the users out comes and the computer outcomes based on the number of wins then finally in this scenario if(computer_outcome > user_outcome): computer wins else user wins A: Here you get the 2 results: game_outcome = [['wins', 'loses'], ['loses', 'wins'], ['loses', 'wins']] user_outcome = sum([i[0] == 'wins' for i in game_outcome]) computer_outcome = sum([i[1] == 'wins' for i in game_outcome]) A: Should you have only 2 possible outcomes, you can skip the second calculation: game_outcomes = [['wins', 'loses'], ['loses', 'wins'], ['loses', 'wins']] user_wins = sum(item[0] == 'wins' for item in game_outcomes) computer_wins = len(game_outcomes) - user_wins
How to explore elements in a nested boolean list and get aggregated scores?
Hello this is a pretty simple question but i wanted to follow the dry principles correctly and couldn't think of a way to do it without repeating code so given game outcomes in this format game outcome = [['wins', 'loses'], ['loses', 'wins'], ['loses', 'wins']] the gameoutcome[0][0] till gameoutcome[2][0] are all user game outcomes so for that paticular user he won and lost 2 whereas the computer won twice but lost once what i want to do is aggregate the users out comes and the computer outcomes based on the number of wins then finally in this scenario if(computer_outcome > user_outcome): computer wins else user wins
[ "Here you get the 2 results:\ngame_outcome = [['wins', 'loses'], ['loses', 'wins'], ['loses', 'wins']]\nuser_outcome = sum([i[0] == 'wins' for i in game_outcome])\ncomputer_outcome = sum([i[1] == 'wins' for i in game_outcome])\n\n", "Should you have only 2 possible outcomes, you can skip the second calculation:\ngame_outcomes = [['wins', 'loses'], ['loses', 'wins'], ['loses', 'wins']]\nuser_wins = sum(item[0] == 'wins' for item in game_outcomes)\ncomputer_wins = len(game_outcomes) - user_wins\n\n" ]
[ 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074522566_python.txt
Q: How to print a float with underscores separating thousandths? If possible, i want to format (via f-string or any older way) a float, so that it's thousandths get separated by underscores. I know you can do: print(f"{10000000:_}") # 10_000_000 But i want: print(f"{7.012345678:<something>}") # 7.012_345_678 A: Hopefully someone has a better idea than I do, but if worse comes to worse you could do something like this: def format_decimals(f): s = str(f) parts = s.split('.') if len(parts) != 2: raise ValueError(f'{s} not a valid decimal number') n,d = parts d = '_'.join(d[i:i+3] for i in range(0,len(d),3)) return f'{n}.{d}' import math print(format_decimals(math.pi)) #3.141_592_653_589_793
How to print a float with underscores separating thousandths?
If possible, i want to format (via f-string or any older way) a float, so that it's thousandths get separated by underscores. I know you can do: print(f"{10000000:_}") # 10_000_000 But i want: print(f"{7.012345678:<something>}") # 7.012_345_678
[ "Hopefully someone has a better idea than I do, but if worse comes to worse you could do something like this:\ndef format_decimals(f):\n s = str(f)\n parts = s.split('.')\n if len(parts) != 2:\n raise ValueError(f'{s} not a valid decimal number')\n n,d = parts\n d = '_'.join(d[i:i+3] for i in range(0,len(d),3))\n return f'{n}.{d}'\n\nimport math\nprint(format_decimals(math.pi)) #3.141_592_653_589_793\n\n" ]
[ 0 ]
[]
[]
[ "mantissa", "printing", "python" ]
stackoverflow_0074578246_mantissa_printing_python.txt
Q: Creating new value inside registry Run key with Python? I am trying to create a new value under the Run key in Windows 7. I am using Python 3.5 and I am having trouble writing to the key. My current code is creating a new key under the key I am trying to modify the values of. from winreg import * aKey = OpenKey(HKEY_CURRENT_USER, "Software\Microsoft\Windows\CurrentVersion\Run", 0, KEY_ALL_ACCESS) SetValue(aKey, 'NameOfNewValue', REG_SZ, '%windir%\system32\calc.exe') When I run this, it makes a key under the Run and names it "NameOfNewKey" and then sets the default value to the calc.exe path. However, I want to add a new value to the Run key so that when I startup, calc.exe will run. EDIT: I found the answer. It should be the SetValueEx function instead of SetValue. A: Here is a function which can set/delete a run key. Code: def set_run_key(key, value): """ Set/Remove Run Key in windows registry. :param key: Run Key Name :param value: Program to Run :return: None """ # This is for the system run variable reg_key = winreg.OpenKey( winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Run', 0, winreg.KEY_SET_VALUE) with reg_key: if value is None: winreg.DeleteValue(reg_key, key) else: if '%' in value: var_type = winreg.REG_EXPAND_SZ else: var_type = winreg.REG_SZ winreg.SetValueEx(reg_key, key, 0, var_type, value) To set: set_run_key('NameOfNewValue', '%windir%\system32\calc.exe') To remove: set_run_key('NameOfNewValue', None) To import win32 libs: try: import _winreg as winreg except ImportError: # this has been renamed in python 3 import winreg A: I ran into this same problem. The source of confusion here is the poorly named functions: winreg.SetValue(): sets or creates a subkey winreg.SetValueEx(): sets or creates a named value Normally when a function has an "Ex" suffix, it means the caller can specify additional arguments for the same operation. In this case, the functions have different semantics; the "SetValue()" should have been named something like "SetKey()".
Creating new value inside registry Run key with Python?
I am trying to create a new value under the Run key in Windows 7. I am using Python 3.5 and I am having trouble writing to the key. My current code is creating a new key under the key I am trying to modify the values of. from winreg import * aKey = OpenKey(HKEY_CURRENT_USER, "Software\Microsoft\Windows\CurrentVersion\Run", 0, KEY_ALL_ACCESS) SetValue(aKey, 'NameOfNewValue', REG_SZ, '%windir%\system32\calc.exe') When I run this, it makes a key under the Run and names it "NameOfNewKey" and then sets the default value to the calc.exe path. However, I want to add a new value to the Run key so that when I startup, calc.exe will run. EDIT: I found the answer. It should be the SetValueEx function instead of SetValue.
[ "Here is a function which can set/delete a run key.\nCode:\ndef set_run_key(key, value):\n \"\"\"\n Set/Remove Run Key in windows registry.\n\n :param key: Run Key Name\n :param value: Program to Run\n :return: None\n \"\"\"\n # This is for the system run variable\n reg_key = winreg.OpenKey(\n winreg.HKEY_CURRENT_USER,\n r'Software\\Microsoft\\Windows\\CurrentVersion\\Run',\n 0, winreg.KEY_SET_VALUE)\n\n with reg_key:\n if value is None:\n winreg.DeleteValue(reg_key, key)\n else:\n if '%' in value:\n var_type = winreg.REG_EXPAND_SZ\n else:\n var_type = winreg.REG_SZ\n winreg.SetValueEx(reg_key, key, 0, var_type, value)\n\nTo set:\nset_run_key('NameOfNewValue', '%windir%\\system32\\calc.exe')\n\nTo remove:\nset_run_key('NameOfNewValue', None)\n\nTo import win32 libs:\ntry:\n import _winreg as winreg\nexcept ImportError:\n # this has been renamed in python 3\n import winreg\n\n", "I ran into this same problem. The source of confusion here is the poorly named functions:\n\nwinreg.SetValue(): sets or creates a subkey\nwinreg.SetValueEx(): sets or creates a named value\n\nNormally when a function has an \"Ex\" suffix, it means the caller can specify additional arguments for the same operation. In this case, the functions have different semantics; the \"SetValue()\" should have been named something like \"SetKey()\".\n" ]
[ 4, 0 ]
[]
[]
[ "python", "python_3.x", "pywin32" ]
stackoverflow_0042605055_python_python_3.x_pywin32.txt
Q: First N elements of a 2D Numpy array where N is a list Say I have an M x N array, >>> M = 5 >>> N = 4 >>> a = np.ones((M, N)) array([[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]]) and I would like to get the first x elements of each array but where x is a list of size M. So, if x is array([2, 3, 1, 4]) then my output would be [[1, 1], [1, 1, 1], [1], [1, 1, 1, 1]] I have attempted to do this using this technique and others like it. This method does not work because the index input cannot be an array as far as I can tell. x = [2, 3, 1, 4] a[:, :x] I know this could be done using a for-loop but I would like to avoid that since I am working with large amounts of data. Also, the output will most likely need to be a list of arrays rather than a 2D array since Numpy seems to have deprecated having an array of different-sized arrays. A: you could write what you want by generator but at the end of the way at is loop: import numpy as np M = 5 N = 4 a = np.ones((M, N)) d = np.array([2, 3, 1, 4]) out = list(map(lambda a:a[0][:a[1]], zip(a, d))) maybe you could save slightly compared to for loop (not sure even), but best is to see what is the purpose of something like that, if you really care to keep as such list or you only wants the values, to optimise.
First N elements of a 2D Numpy array where N is a list
Say I have an M x N array, >>> M = 5 >>> N = 4 >>> a = np.ones((M, N)) array([[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]]) and I would like to get the first x elements of each array but where x is a list of size M. So, if x is array([2, 3, 1, 4]) then my output would be [[1, 1], [1, 1, 1], [1], [1, 1, 1, 1]] I have attempted to do this using this technique and others like it. This method does not work because the index input cannot be an array as far as I can tell. x = [2, 3, 1, 4] a[:, :x] I know this could be done using a for-loop but I would like to avoid that since I am working with large amounts of data. Also, the output will most likely need to be a list of arrays rather than a 2D array since Numpy seems to have deprecated having an array of different-sized arrays.
[ "you could write what you want by generator but at the end of the way at is loop:\nimport numpy as np\nM = 5\nN = 4\na = np.ones((M, N))\nd = np.array([2, 3, 1, 4])\n\nout = list(map(lambda a:a[0][:a[1]], zip(a, d)))\n\nmaybe you could save slightly compared to for loop (not sure even), but best is to see what is the purpose of something like that, if you really care to keep as such list or you only wants the values, to optimise.\n" ]
[ 0 ]
[]
[]
[ "arrays", "numpy", "python" ]
stackoverflow_0074578424_arrays_numpy_python.txt
Q: Python - How can I aggregate a pandas dataframe base on conditions on different rows? I have a pandas data frame with information about road segments. PRIRTECODE PRIM_BMP PRIM_EMP SEGMENT_LENGTH ELEMENT_ID RAMP CURVE_YEAR SEGMENT_TYPE 0001A 0 0.147 0.147 4850943 0 2019 Line 0001A 0.147 0.183 0.036 4850943 0 2019 Line 0001A 0.183 0.24 0.057 4850943 0 2019 Arc left 0001A 0.24 0.251 0.011 4850945 0 2019 Arc left 0001A 0.251 0.27 0.019 4850945 0 2019 Arc left 0001A 0.27 0.295 0.025 4048920 0 2019 Arc left 0001A 0.295 0.31 0.015 4048920 0 2019 Line 0001A 0.31 0.36 0.05 4048921 0 2019 Line 0001A 0.36 0.363 0.003 4048779 0 2019 Line 0001A 0.363 0.437 0.074 4048779 0 2019 Arc left 0001A 0.437 0.483 0.046 4048779 0 2019 Arc right 0001A 0.483 0.568 0.085 4048779 0 2019 Arc right 0001A 0.568 0.6 0.032 4048779 0 2019 Line I need to aggregate based on similar characteristics as SEGMENT TYPE, and sum the SEGMENT_LENGTH. I can do this with pandas group_by. However, I need to make sure that the segments to aggregate are contiguous. To do that, I need to look the following variables: PRIM_BMP: mile in which the segment begins. PRIM_EMP: mile in which the segment ends. So two segments are continuous if the PRIM_EMP of one segment is equal to the PRIM_BMP of the second segment. Also, I need to keep the PRIM_BMP of the first segment and the PRIM_EMP of the last segment. The end result should look like this: PRIRTECODE PRIM_BMP PRIM_EMP SEGMENT_LENGTH RAMP CURVE_YEAR SEGMENT_TYPE 0001A 0 0.183 0.183 0 2019 Line 0001A 0.183 0.295 0.112 0 2019 Arc left 0001A 0.295 0.363 0.068 0 2019 Line 0001A 0.363 0.568 0.205 0 2019 Arc right 0001A 0.568 0.6 0.032 0 2019 Line I have tried with groupby using the characteristic in which I need to aggregate the segments, but I have not found a way to aggregate solely the contiguous segments. A: Just in case you run out of options, here's convtools based solution. (I must confess - I'm the author). from convtools import conversion as c from convtools.contrib.tables import Table rows = [ {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.0, 'PRIM_EMP': 0.147, 'SEGMENT_LENGTH': 0.147, 'ELEMENT_ID': 4850943, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Line'}, {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.147, 'PRIM_EMP': 0.183, 'SEGMENT_LENGTH': 0.036, 'ELEMENT_ID': 4850943, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Line'}, {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.183, 'PRIM_EMP': 0.24, 'SEGMENT_LENGTH': 0.057, 'ELEMENT_ID': 4850943, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc left'}, {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.24, 'PRIM_EMP': 0.251, 'SEGMENT_LENGTH': 0.011, 'ELEMENT_ID': 4850945, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc left'}, {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.251, 'PRIM_EMP': 0.27, 'SEGMENT_LENGTH': 0.019, 'ELEMENT_ID': 4850945, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc left'}, {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.27, 'PRIM_EMP': 0.295, 'SEGMENT_LENGTH': 0.025, 'ELEMENT_ID': 4048920, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc left'}, {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.295, 'PRIM_EMP': 0.31, 'SEGMENT_LENGTH': 0.015, 'ELEMENT_ID': 4048920, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Line'}, {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.31, 'PRIM_EMP': 0.36, 'SEGMENT_LENGTH': 0.05, 'ELEMENT_ID': 4048921, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Line'}, {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.36, 'PRIM_EMP': 0.363, 'SEGMENT_LENGTH': 0.003, 'ELEMENT_ID': 4048779, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Line'}, {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.363, 'PRIM_EMP': 0.437, 'SEGMENT_LENGTH': 0.074, 'ELEMENT_ID': 4048779, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc left'}, {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.437, 'PRIM_EMP': 0.483, 'SEGMENT_LENGTH': 0.046, 'ELEMENT_ID': 4048779, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc right'}, {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.483, 'PRIM_EMP': 0.568, 'SEGMENT_LENGTH': 0.085, 'ELEMENT_ID': 4048779, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc right'}, {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.568, 'PRIM_EMP': 0.6, 'SEGMENT_LENGTH': 0.032, 'ELEMENT_ID': 4048779, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Line'}, ] iterable_of_results = ( c.chunk_by_condition( c.and_( c.CHUNK.item(-1, "SEGMENT_TYPE") == c.item("SEGMENT_TYPE"), c.CHUNK.item(-1, "PRIM_EMP") == c.item("PRIM_BMP"), ) ) .aggregate( { "SEGMENT_TYPE": c.ReduceFuncs.First(c.item("SEGMENT_TYPE")), "length": c.ReduceFuncs.Sum(c.item("SEGMENT_LENGTH")), } ) # should you want to reuse this conversion multiple times, run # .gen_converter() to get a function and store it for further reuse .execute(rows) ) Result: In [54]: list(iterable_of_results) Out[54]: [{'SEGMENT_TYPE': 'Line', 'length': 0.183}, {'SEGMENT_TYPE': 'Arc left', 'length': 0.11200000000000002}, {'SEGMENT_TYPE': 'Line', 'length': 0.068}, {'SEGMENT_TYPE': 'Arc left', 'length': 0.074}, {'SEGMENT_TYPE': 'Arc right', 'length': 0.131}, {'SEGMENT_TYPE': 'Line', 'length': 0.032}]
Python - How can I aggregate a pandas dataframe base on conditions on different rows?
I have a pandas data frame with information about road segments. PRIRTECODE PRIM_BMP PRIM_EMP SEGMENT_LENGTH ELEMENT_ID RAMP CURVE_YEAR SEGMENT_TYPE 0001A 0 0.147 0.147 4850943 0 2019 Line 0001A 0.147 0.183 0.036 4850943 0 2019 Line 0001A 0.183 0.24 0.057 4850943 0 2019 Arc left 0001A 0.24 0.251 0.011 4850945 0 2019 Arc left 0001A 0.251 0.27 0.019 4850945 0 2019 Arc left 0001A 0.27 0.295 0.025 4048920 0 2019 Arc left 0001A 0.295 0.31 0.015 4048920 0 2019 Line 0001A 0.31 0.36 0.05 4048921 0 2019 Line 0001A 0.36 0.363 0.003 4048779 0 2019 Line 0001A 0.363 0.437 0.074 4048779 0 2019 Arc left 0001A 0.437 0.483 0.046 4048779 0 2019 Arc right 0001A 0.483 0.568 0.085 4048779 0 2019 Arc right 0001A 0.568 0.6 0.032 4048779 0 2019 Line I need to aggregate based on similar characteristics as SEGMENT TYPE, and sum the SEGMENT_LENGTH. I can do this with pandas group_by. However, I need to make sure that the segments to aggregate are contiguous. To do that, I need to look the following variables: PRIM_BMP: mile in which the segment begins. PRIM_EMP: mile in which the segment ends. So two segments are continuous if the PRIM_EMP of one segment is equal to the PRIM_BMP of the second segment. Also, I need to keep the PRIM_BMP of the first segment and the PRIM_EMP of the last segment. The end result should look like this: PRIRTECODE PRIM_BMP PRIM_EMP SEGMENT_LENGTH RAMP CURVE_YEAR SEGMENT_TYPE 0001A 0 0.183 0.183 0 2019 Line 0001A 0.183 0.295 0.112 0 2019 Arc left 0001A 0.295 0.363 0.068 0 2019 Line 0001A 0.363 0.568 0.205 0 2019 Arc right 0001A 0.568 0.6 0.032 0 2019 Line I have tried with groupby using the characteristic in which I need to aggregate the segments, but I have not found a way to aggregate solely the contiguous segments.
[ "Just in case you run out of options, here's convtools based solution. (I must confess - I'm the author).\nfrom convtools import conversion as c\nfrom convtools.contrib.tables import Table\n\n\nrows = [\n {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.0, 'PRIM_EMP': 0.147, 'SEGMENT_LENGTH': 0.147, 'ELEMENT_ID': 4850943, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Line'},\n {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.147, 'PRIM_EMP': 0.183, 'SEGMENT_LENGTH': 0.036, 'ELEMENT_ID': 4850943, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Line'},\n {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.183, 'PRIM_EMP': 0.24, 'SEGMENT_LENGTH': 0.057, 'ELEMENT_ID': 4850943, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc left'},\n {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.24, 'PRIM_EMP': 0.251, 'SEGMENT_LENGTH': 0.011, 'ELEMENT_ID': 4850945, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc left'},\n {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.251, 'PRIM_EMP': 0.27, 'SEGMENT_LENGTH': 0.019, 'ELEMENT_ID': 4850945, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc left'},\n {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.27, 'PRIM_EMP': 0.295, 'SEGMENT_LENGTH': 0.025, 'ELEMENT_ID': 4048920, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc left'},\n {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.295, 'PRIM_EMP': 0.31, 'SEGMENT_LENGTH': 0.015, 'ELEMENT_ID': 4048920, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Line'},\n {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.31, 'PRIM_EMP': 0.36, 'SEGMENT_LENGTH': 0.05, 'ELEMENT_ID': 4048921, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Line'},\n {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.36, 'PRIM_EMP': 0.363, 'SEGMENT_LENGTH': 0.003, 'ELEMENT_ID': 4048779, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Line'},\n {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.363, 'PRIM_EMP': 0.437, 'SEGMENT_LENGTH': 0.074, 'ELEMENT_ID': 4048779, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc left'},\n {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.437, 'PRIM_EMP': 0.483, 'SEGMENT_LENGTH': 0.046, 'ELEMENT_ID': 4048779, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc right'},\n {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.483, 'PRIM_EMP': 0.568, 'SEGMENT_LENGTH': 0.085, 'ELEMENT_ID': 4048779, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Arc right'},\n {'PRIRTECODE': '0001A', 'PRIM_BMP': 0.568, 'PRIM_EMP': 0.6, 'SEGMENT_LENGTH': 0.032, 'ELEMENT_ID': 4048779, 'RAMP': 0, 'CURVE_YEAR': 2019, 'SEGMENT_TYPE': 'Line'},\n]\n\niterable_of_results = (\n c.chunk_by_condition(\n c.and_(\n c.CHUNK.item(-1, \"SEGMENT_TYPE\") == c.item(\"SEGMENT_TYPE\"),\n c.CHUNK.item(-1, \"PRIM_EMP\") == c.item(\"PRIM_BMP\"),\n )\n )\n .aggregate(\n {\n \"SEGMENT_TYPE\": c.ReduceFuncs.First(c.item(\"SEGMENT_TYPE\")),\n \"length\": c.ReduceFuncs.Sum(c.item(\"SEGMENT_LENGTH\")),\n }\n )\n # should you want to reuse this conversion multiple times, run\n # .gen_converter() to get a function and store it for further reuse\n .execute(rows)\n)\n\n\nResult:\nIn [54]: list(iterable_of_results)\nOut[54]:\n[{'SEGMENT_TYPE': 'Line', 'length': 0.183},\n {'SEGMENT_TYPE': 'Arc left', 'length': 0.11200000000000002},\n {'SEGMENT_TYPE': 'Line', 'length': 0.068},\n {'SEGMENT_TYPE': 'Arc left', 'length': 0.074},\n {'SEGMENT_TYPE': 'Arc right', 'length': 0.131},\n {'SEGMENT_TYPE': 'Line', 'length': 0.032}]\n\n" ]
[ 0 ]
[]
[]
[ "aggregation", "dataframe", "group_by", "python" ]
stackoverflow_0074504250_aggregation_dataframe_group_by_python.txt
Q: Python: Group and count number of consecutive repetitive values in a column in a dataframe I am desperate over a data analysis task that I would like to perform on a dataframe in python. So, this is the dataframe that I have: df = pd.DataFrame({"Person": ["P1", "P1","P1","P1","P1","P1","P1","P1","P1","P1", "P2", "P2","P2","P2","P2","P2","P2","P2","P2","P2"], "Activity": ["A", "A", "A", "B", "A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "A", "A", "B", "A", "B", "A"], "Time": ["0", "0", "1", "1", "1", "3", "5", "5", "6", "6", "6", "6", "6", "6", "6", "6", "6", "6", "6", "6"] }) I would like to find the number of groups with more than 2 consecutive repetitive activities "A" per Person and to calculate the average time of consecutive repetitive "A"s as end time minus start time for each group divided by the number of groups I.e. the targeted resulting dataframe should look like this (AVGTime for P1 calculates as (1-0 + 6-1)/2): solution = pd.DataFrame({"Person": ["P1", "P2"], "Activity": ["A", "A"], "Count": [2, 1], "AVGTime": [3, 0]}) I understand there is kind of a close solution here: https://datascience-stackexchange-com.translate.goog/questions/41428/how-to-find-the-count-of-consecutive-same-string-values-in-a-pandas-dataframe?_x_tr_sl=en&_x_tr_tl=de&_x_tr_hl=de&_x_tr_pto=sc However, the solution does not aggregate over a col, such as "Person" in my case. Also the solution does not seem to perform well given that I have a dataframe with about 7 Mio. rows. I would really appreciate any hint! A: You can process the data as a stream without creating a dataframe, which should fit into memory. I'd suggest trying convtools library (I must confess - I'm the author). Since you already have a dataframe, let's use it as an input: import pandas as pd from convtools import conversion as c from convtools.contrib.tables import Table # fmt: off df = pd.DataFrame({ "Person": ["P1", "P1","P1","P1","P1","P1","P1","P1","P1","P1", "P2", "P2","P2","P2","P2","P2","P2","P2","P2","P2"], "Activity": ["A", "A", "A", "B", "A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "A", "A", "B", "A", "B", "A"], "Time": ["0", "0", "1", "1", "1", "3", "5", "5", "6", "6", "6", "6", "6", "6", "6", "6", "6", "6", "6", "6"] }) # fmt: on # transforming DataFrame into an iterable of dicts not to allocate all rows at # once by df.to_dict("records") iter_rows = Table.from_rows( df.itertuples(index=False), header=list(df.columns) ).into_iter_rows(dict) result = ( # chunk by consecutive "person"+"activity" pairs c.chunk_by(c.item("Person"), c.item("Activity")) .aggregate( # each chunk gets transformed into a dict like this: { "Person": c.ReduceFuncs.First(c.item("Person")), "Activity": c.ReduceFuncs.First(c.item("Activity")), "length": c.ReduceFuncs.Count(), "time": ( c.ReduceFuncs.Last(c.item("Time")).as_type(float) - c.ReduceFuncs.First(c.item("Time")).as_type(float) ), } ) # remove short groups .filter(c.item("length") > 2) .pipe( # now group by "person"+"activity" pair to calculate avg time c.group_by(c.item("Person"), c.item("Activity")).aggregate( { "Person": c.item("Person"), "Activity": c.item("Activity"), "avg_time": c.ReduceFuncs.Average(c.item("time")), "number_of_groups": c.ReduceFuncs.Count(), } ) ) # should you want to reuse this conversion multiple times, run # .gen_converter() to get a function and store it for further reuse .execute(iter_rows) ) Result: In [37]: result Out[37]: [{'Person': 'P1', 'Activity': 'A', 'avg_time': 3.0, 'number_of_groups': 2}, {'Person': 'P2', 'Activity': 'A', 'avg_time': 0.0, 'number_of_groups': 1}] A: Try: def group_func(x): groups = [] for _, g in x.groupby((x["Activity"] != x["Activity"].shift()).cumsum()): if len(g) > 2 and g["Activity"].iat[0] == "A": groups.append(g) avgs = sum(g["Time"].max() - g["Time"].min() for g in groups) / len(groups) return pd.Series( ["A", len(groups), avgs], index=["Activity", "Count", "AVGTime"] ) df["Time"] = df["Time"].astype(int) x = df.groupby("Person", as_index=False).apply(group_func) print(x) Prints: Person Activity Count AVGTime 0 P1 A 2 3.0 1 P2 A 1 0.0
Python: Group and count number of consecutive repetitive values in a column in a dataframe
I am desperate over a data analysis task that I would like to perform on a dataframe in python. So, this is the dataframe that I have: df = pd.DataFrame({"Person": ["P1", "P1","P1","P1","P1","P1","P1","P1","P1","P1", "P2", "P2","P2","P2","P2","P2","P2","P2","P2","P2"], "Activity": ["A", "A", "A", "B", "A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "A", "A", "B", "A", "B", "A"], "Time": ["0", "0", "1", "1", "1", "3", "5", "5", "6", "6", "6", "6", "6", "6", "6", "6", "6", "6", "6", "6"] }) I would like to find the number of groups with more than 2 consecutive repetitive activities "A" per Person and to calculate the average time of consecutive repetitive "A"s as end time minus start time for each group divided by the number of groups I.e. the targeted resulting dataframe should look like this (AVGTime for P1 calculates as (1-0 + 6-1)/2): solution = pd.DataFrame({"Person": ["P1", "P2"], "Activity": ["A", "A"], "Count": [2, 1], "AVGTime": [3, 0]}) I understand there is kind of a close solution here: https://datascience-stackexchange-com.translate.goog/questions/41428/how-to-find-the-count-of-consecutive-same-string-values-in-a-pandas-dataframe?_x_tr_sl=en&_x_tr_tl=de&_x_tr_hl=de&_x_tr_pto=sc However, the solution does not aggregate over a col, such as "Person" in my case. Also the solution does not seem to perform well given that I have a dataframe with about 7 Mio. rows. I would really appreciate any hint!
[ "You can process the data as a stream without creating a dataframe, which should fit into memory. I'd suggest trying convtools library (I must confess - I'm the author).\nSince you already have a dataframe, let's use it as an input:\nimport pandas as pd\n\nfrom convtools import conversion as c\nfrom convtools.contrib.tables import Table\n\n\n# fmt: off\ndf = pd.DataFrame({\n \"Person\": [\"P1\", \"P1\",\"P1\",\"P1\",\"P1\",\"P1\",\"P1\",\"P1\",\"P1\",\"P1\", \"P2\", \"P2\",\"P2\",\"P2\",\"P2\",\"P2\",\"P2\",\"P2\",\"P2\",\"P2\"], \n \"Activity\": [\"A\", \"A\", \"A\", \"B\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"B\", \"A\", \"A\", \"B\", \"A\", \"B\", \"A\"],\n \"Time\": [\"0\", \"0\", \"1\", \"1\", \"1\", \"3\", \"5\", \"5\", \"6\", \"6\", \"6\", \"6\", \"6\", \"6\", \"6\", \"6\", \"6\", \"6\", \"6\", \"6\"]\n})\n# fmt: on\n\n# transforming DataFrame into an iterable of dicts not to allocate all rows at\n# once by df.to_dict(\"records\")\niter_rows = Table.from_rows(\n df.itertuples(index=False), header=list(df.columns)\n).into_iter_rows(dict)\n\n\nresult = (\n # chunk by consecutive \"person\"+\"activity\" pairs\n c.chunk_by(c.item(\"Person\"), c.item(\"Activity\"))\n .aggregate(\n # each chunk gets transformed into a dict like this:\n {\n \"Person\": c.ReduceFuncs.First(c.item(\"Person\")),\n \"Activity\": c.ReduceFuncs.First(c.item(\"Activity\")),\n \"length\": c.ReduceFuncs.Count(),\n \"time\": (\n c.ReduceFuncs.Last(c.item(\"Time\")).as_type(float)\n - c.ReduceFuncs.First(c.item(\"Time\")).as_type(float)\n ),\n }\n )\n # remove short groups\n .filter(c.item(\"length\") > 2)\n .pipe(\n # now group by \"person\"+\"activity\" pair to calculate avg time\n c.group_by(c.item(\"Person\"), c.item(\"Activity\")).aggregate(\n {\n \"Person\": c.item(\"Person\"),\n \"Activity\": c.item(\"Activity\"),\n \"avg_time\": c.ReduceFuncs.Average(c.item(\"time\")),\n \"number_of_groups\": c.ReduceFuncs.Count(),\n }\n )\n )\n # should you want to reuse this conversion multiple times, run\n # .gen_converter() to get a function and store it for further reuse\n .execute(iter_rows)\n)\n\nResult:\nIn [37]: result\nOut[37]:\n[{'Person': 'P1', 'Activity': 'A', 'avg_time': 3.0, 'number_of_groups': 2},\n {'Person': 'P2', 'Activity': 'A', 'avg_time': 0.0, 'number_of_groups': 1}]\n\n", "Try:\ndef group_func(x):\n groups = []\n for _, g in x.groupby((x[\"Activity\"] != x[\"Activity\"].shift()).cumsum()):\n if len(g) > 2 and g[\"Activity\"].iat[0] == \"A\":\n groups.append(g)\n\n avgs = sum(g[\"Time\"].max() - g[\"Time\"].min() for g in groups) / len(groups)\n\n return pd.Series(\n [\"A\", len(groups), avgs], index=[\"Activity\", \"Count\", \"AVGTime\"]\n )\n\n\ndf[\"Time\"] = df[\"Time\"].astype(int)\nx = df.groupby(\"Person\", as_index=False).apply(group_func)\nprint(x)\n\nPrints:\n Person Activity Count AVGTime\n0 P1 A 2 3.0\n1 P2 A 1 0.0\n\n" ]
[ 0, 0 ]
[]
[]
[ "dataframe", "group_by", "pandas", "python" ]
stackoverflow_0074575294_dataframe_group_by_pandas_python.txt
Q: How can I remove files that have an unknown number in them? I have some code that writes out files with names like this: body00123.txt body00124.txt body00125.txt body-1-2126.txt body-1-2127.txt body-1-2128.txt body-3-3129.txt body-3-3130.txt body-3-3131.txt Such that the first two numbers in the file can be 'negative', but the last 3 numbers are not. I have a list such as this: 123 127 129 And I want to remove all the files that don't end with one of these numbers. An example of the desired leftover files would be like this: body00123.txt body-1-2127.txt body-3-3129.txt My code is running in python, so I have tried: if i not in myList: os.system('rm body*' + str(i) + '.txt') And this resulted in every file being deleted. The solution should be such that any .txt file that ends with a number contained by myList should be kept. All other .txt files should be deleted. This is why I'm trying to use a wildcard in my attempt. A: Because all the files end in .txt, you can cut that part out and use the str.endswith() function. str.endswith() accepts a tuple of strings, and sees if your string ends in any of them. As a result, you can do something like this: all_file_list = [...] keep_list = [...] files_to_remove = [] file_to_remove_tup = tuple(files_to_remove) for name in all_file_list: if name[:-4].endswith(file_to_remove_tup) files_to_remove.append(name) # or os.remove(name)
How can I remove files that have an unknown number in them?
I have some code that writes out files with names like this: body00123.txt body00124.txt body00125.txt body-1-2126.txt body-1-2127.txt body-1-2128.txt body-3-3129.txt body-3-3130.txt body-3-3131.txt Such that the first two numbers in the file can be 'negative', but the last 3 numbers are not. I have a list such as this: 123 127 129 And I want to remove all the files that don't end with one of these numbers. An example of the desired leftover files would be like this: body00123.txt body-1-2127.txt body-3-3129.txt My code is running in python, so I have tried: if i not in myList: os.system('rm body*' + str(i) + '.txt') And this resulted in every file being deleted. The solution should be such that any .txt file that ends with a number contained by myList should be kept. All other .txt files should be deleted. This is why I'm trying to use a wildcard in my attempt.
[ "Because all the files end in .txt, you can cut that part out and use the str.endswith() function. str.endswith() accepts a tuple of strings, and sees if your string ends in any of them. As a result, you can do something like this:\nall_file_list = [...]\nkeep_list = [...]\n\nfiles_to_remove = []\n\nfile_to_remove_tup = tuple(files_to_remove)\n\nfor name in all_file_list:\n if name[:-4].endswith(file_to_remove_tup)\n files_to_remove.append(name)\n # or os.remove(name)\n\n" ]
[ 0 ]
[]
[]
[ "filenames", "python", "wildcard" ]
stackoverflow_0074578460_filenames_python_wildcard.txt
Q: How do I remove the border of a Image Button in Tkinter? I know how to remove the border of a Tkinter Button and Image. It is done pretty much exactly like how you do it for everything else borderwidth=0 What I need help with if why, even though I put that in the widget's 'design parameters', it still has a border. My code is below. # Imports the tkinter library. from tkinter import * from tkmacosx import Button selectedBackground = "black" selectedForeground = "#22fd35" root = Tk() root.configure(bg=selectedBackground) def openCipher(): print("open cipher") def openDecipher(): print("open decipher") cipherButton = Button(root, text=" Cipher ", padx=40, pady=20, command=openCipher, borderwidth=0, background=selectedBackground, foreground=selectedForeground, highlightbackground=selectedForeground) cipherButton.grid(row=1, column=0) decipherButton = Button(root, text="Decipher", padx=40, pady=20, command=openDecipher, borderwidth=0, background=selectedBackground, foreground=selectedForeground, highlightbackground=selectedForeground).grid(row=1, column=2) spacer1 = Label(root, text=" ", padx=10, pady=1, background=selectedBackground, foreground=selectedForeground, highlightbackground=selectedForeground).grid(row=4, column=1) quitButton = Button(root, text="Exit d3cryptt", padx=10, pady=5, command=root.quit, borderwidth=0, background=selectedBackground, foreground=selectedForeground, highlightbackground=selectedForeground).grid(row=5, column=1) spacer2 = Label(root, text=" ", padx=10, pady=1, background=selectedBackground, foreground=selectedForeground, highlightbackground=selectedForeground).grid(row=6, column=1, pady=30) # changecolour = Button(root, text="change colour", padx=1, pady=5, background=selectedBackground, foreground=selectedForeground, highlightbackground=selectedForeground, command=lambda: changeColour(selectedBackground3, selectedForeground3)).grid(row=7, column=0) theme1 = PhotoImage(file = "/Documents/theme1button.png") theme1Button = Button(root, image=theme1, borderwidth=0, background=selectedBackground, command=openCipher) theme1Button.place(x=50, y=100) #Enter the event main loop root.mainloop() here is the image for the button if you want to test it out yourself. https://i.stack.imgur.com/OzB58.png The image appears on the screen with a border around it, even with borderwidth = 0, as seen in the image below. I am not sure of any other solutions on how to fix this. I have even tried changing it from .place to .grid and it still had the border around it. It may be because it is not on a canvas, but I will need someone to clarify for me if that is the problem. And if they could instruct me on how to do that, or a helpful video on how to do that, it would be greatly appreciated. I appreciate any advice given. A: try hightlightthickness: theme1Button = Button(root, image=theme1, borderwidth=0, highlightthickness=0, background=selectedBackground, command=openCipher) A: It's because of MacOs. I had a similar problem; the only fix was using a label instead and then binding the click event with a function. You could do something like: theme1Label = Label(root, image=theme1, borderwidth=0, background=selectedBackground) theme1Label.bind('<Button>', openCipher) theme1Label.place(x=50, y=100) And then in the function just pass the 'event' parameter: def openCipher(event): print("open cipher")
How do I remove the border of a Image Button in Tkinter?
I know how to remove the border of a Tkinter Button and Image. It is done pretty much exactly like how you do it for everything else borderwidth=0 What I need help with if why, even though I put that in the widget's 'design parameters', it still has a border. My code is below. # Imports the tkinter library. from tkinter import * from tkmacosx import Button selectedBackground = "black" selectedForeground = "#22fd35" root = Tk() root.configure(bg=selectedBackground) def openCipher(): print("open cipher") def openDecipher(): print("open decipher") cipherButton = Button(root, text=" Cipher ", padx=40, pady=20, command=openCipher, borderwidth=0, background=selectedBackground, foreground=selectedForeground, highlightbackground=selectedForeground) cipherButton.grid(row=1, column=0) decipherButton = Button(root, text="Decipher", padx=40, pady=20, command=openDecipher, borderwidth=0, background=selectedBackground, foreground=selectedForeground, highlightbackground=selectedForeground).grid(row=1, column=2) spacer1 = Label(root, text=" ", padx=10, pady=1, background=selectedBackground, foreground=selectedForeground, highlightbackground=selectedForeground).grid(row=4, column=1) quitButton = Button(root, text="Exit d3cryptt", padx=10, pady=5, command=root.quit, borderwidth=0, background=selectedBackground, foreground=selectedForeground, highlightbackground=selectedForeground).grid(row=5, column=1) spacer2 = Label(root, text=" ", padx=10, pady=1, background=selectedBackground, foreground=selectedForeground, highlightbackground=selectedForeground).grid(row=6, column=1, pady=30) # changecolour = Button(root, text="change colour", padx=1, pady=5, background=selectedBackground, foreground=selectedForeground, highlightbackground=selectedForeground, command=lambda: changeColour(selectedBackground3, selectedForeground3)).grid(row=7, column=0) theme1 = PhotoImage(file = "/Documents/theme1button.png") theme1Button = Button(root, image=theme1, borderwidth=0, background=selectedBackground, command=openCipher) theme1Button.place(x=50, y=100) #Enter the event main loop root.mainloop() here is the image for the button if you want to test it out yourself. https://i.stack.imgur.com/OzB58.png The image appears on the screen with a border around it, even with borderwidth = 0, as seen in the image below. I am not sure of any other solutions on how to fix this. I have even tried changing it from .place to .grid and it still had the border around it. It may be because it is not on a canvas, but I will need someone to clarify for me if that is the problem. And if they could instruct me on how to do that, or a helpful video on how to do that, it would be greatly appreciated. I appreciate any advice given.
[ "try hightlightthickness:\ntheme1Button = Button(root, image=theme1, borderwidth=0, highlightthickness=0, background=selectedBackground, command=openCipher)\n\n", "It's because of MacOs. I had a similar problem; the only fix was using a label instead and then binding the click event with a function.\nYou could do something like:\ntheme1Label = Label(root, image=theme1, borderwidth=0, background=selectedBackground) \ntheme1Label.bind('<Button>', openCipher)\ntheme1Label.place(x=50, y=100)\n\nAnd then in the function just pass the 'event' parameter:\ndef openCipher(event):\n print(\"open cipher\")\n\n" ]
[ 4, 0 ]
[]
[]
[ "python", "tkinter", "tkinter_button", "tkinter_label" ]
stackoverflow_0072920042_python_tkinter_tkinter_button_tkinter_label.txt
Q: How to sort my output form my program without changing the order of my whole loop? I have this code: words = open(input('Enter the name of the file: ')).read().lower().split() number_of_words = int(input('Enter how many top words you want to see: ')) uniques = [] stop_words = ["a", "an", "and", "in", "is"] for word in words: check_special = False if word.isalnum(): check_special = True if word not in uniques and word not in stop_words and check_special: uniques.append(word) counts = [] for unique in uniques: count = 0 for word in words: if word == unique: count += 1 counts.append((count, unique)) counts.sort() counts.reverse() counts_dict = {count: [] for count, word in counts} for count, word in counts: counts_dict[count].append(word) sorted_count = sorted(counts_dict) count_num_word = 0 for count in counts_dict: if count_num_word >= number_of_words: break print('The following words appeared %d times each: %s' % (count, ', '.join(counts_dict[count]))) count_num_word += 1 It prints the most common words in a txt file. It outputs this: The following words appeared 8 times each: what, just, apple I want the output after "each:" to be sorted in an alphabetical order without changing the order lines print. How can I do this? Thanks! Something like this: The following words appeared 8 times each: apple, just, what A: Use sorted: print('The following words appeared %d times each: %s' % (count, ', '.join(sorted(counts_dict[count]))))
How to sort my output form my program without changing the order of my whole loop?
I have this code: words = open(input('Enter the name of the file: ')).read().lower().split() number_of_words = int(input('Enter how many top words you want to see: ')) uniques = [] stop_words = ["a", "an", "and", "in", "is"] for word in words: check_special = False if word.isalnum(): check_special = True if word not in uniques and word not in stop_words and check_special: uniques.append(word) counts = [] for unique in uniques: count = 0 for word in words: if word == unique: count += 1 counts.append((count, unique)) counts.sort() counts.reverse() counts_dict = {count: [] for count, word in counts} for count, word in counts: counts_dict[count].append(word) sorted_count = sorted(counts_dict) count_num_word = 0 for count in counts_dict: if count_num_word >= number_of_words: break print('The following words appeared %d times each: %s' % (count, ', '.join(counts_dict[count]))) count_num_word += 1 It prints the most common words in a txt file. It outputs this: The following words appeared 8 times each: what, just, apple I want the output after "each:" to be sorted in an alphabetical order without changing the order lines print. How can I do this? Thanks! Something like this: The following words appeared 8 times each: apple, just, what
[ "Use sorted:\n print('The following words appeared %d times each: %s' % (count, ', '.join(sorted(counts_dict[count]))))\n\n" ]
[ 0 ]
[]
[]
[ "python", "python_3.x" ]
stackoverflow_0074578550_python_python_3.x.txt
Q: I did it by the tutorial and it's not working I found some projects on youtube, and this is one of them. I'm trying with the password manager program. Here's the link: https://www.youtube.com/watch?v=DLn3jOsNRVE And here's my code: from cryptography.fernet import Fernet ''' def write_key(): key = Fernet.generate_key() with open("key.key", "wb") as key_file: key_file.write(key)''' def load_key(): file = open("key.key", "rb") key = file.read() file.close() return key key = load_key() fer = Fernet(key) def view(): with open('passwords.txt', 'r') as f: for line in f.readlines(): data = line.rstrip() user, passw = data.split("|") print("User:", user, "| Password:", fer.decrypt(passw.encode()).decode()) def add(): name = input('Account Name: ') pwd = input("Password: ") with open('passwords.txt', 'a') as f: f.write(name + "|" + fer.encrypt(pwd.encode()).decode() + "\n") while True: mode = input( "Would you like to add a new password or view existing ones (view, add), press q to quit? ").lower() if mode == "q": break if mode == "view": view() elif mode == "add": add() else: print("Invalid mode.") continue #this is a module that will alow you to encrypt txt-s # pass is used as a placeholder for future code # rstrip removes any traling chars #split will look for the char in the arg and it will split the string there #a append w write r read r+ read and write #with w mode, you completely overwrite the file so be careful #with a mode you can add smthing to the end I followed the instructions precisely and I have no idea what could cause my problem. When I run it, I get an error message: The guy even has his version of the code on github. I copy-pasted it and still doesn't work On the video, does the key.key file generate itself, or not? A: The only possible code that could write to the key.key file is both: commented out; and not called even if it were not commented out, So, no, it's not correct to say that "the key.key file generate[s] itself". Looking over the linked video, the presenter at some point (at 1:29:50, more precisely) had the code call that function to create the file, then removed that call and commented out that function. Likely cause of your problem is that you made a mistake in the process somewhere(1), and this resulted in the file not being created (or being created somewhere other than where it's expected). Suggest you go back to that point in the video and re-do it. Or, you could just create the key.key file, containing the content (taken from the video): Raq7IMZ4QkqK20j7lKT3bxJTgwxeJFYx4ADjTqVKdQY= That may get you going faster than revisiting the steps that the presenter took. (1) Re your comment that "[t]he guy even has his version of the code on github", it may be that you thought you could bypass the video and just go straight to the final code. If so, that was a mistake, as the final code expects you to have run the incomplete code in order to generate the key file. If so, I would consider that a failing of the presenter. It would have been far better to leave the keyfile-creating code in and call it, for example, when you ran the code with python the_code.py --make-key-file.
I did it by the tutorial and it's not working
I found some projects on youtube, and this is one of them. I'm trying with the password manager program. Here's the link: https://www.youtube.com/watch?v=DLn3jOsNRVE And here's my code: from cryptography.fernet import Fernet ''' def write_key(): key = Fernet.generate_key() with open("key.key", "wb") as key_file: key_file.write(key)''' def load_key(): file = open("key.key", "rb") key = file.read() file.close() return key key = load_key() fer = Fernet(key) def view(): with open('passwords.txt', 'r') as f: for line in f.readlines(): data = line.rstrip() user, passw = data.split("|") print("User:", user, "| Password:", fer.decrypt(passw.encode()).decode()) def add(): name = input('Account Name: ') pwd = input("Password: ") with open('passwords.txt', 'a') as f: f.write(name + "|" + fer.encrypt(pwd.encode()).decode() + "\n") while True: mode = input( "Would you like to add a new password or view existing ones (view, add), press q to quit? ").lower() if mode == "q": break if mode == "view": view() elif mode == "add": add() else: print("Invalid mode.") continue #this is a module that will alow you to encrypt txt-s # pass is used as a placeholder for future code # rstrip removes any traling chars #split will look for the char in the arg and it will split the string there #a append w write r read r+ read and write #with w mode, you completely overwrite the file so be careful #with a mode you can add smthing to the end I followed the instructions precisely and I have no idea what could cause my problem. When I run it, I get an error message: The guy even has his version of the code on github. I copy-pasted it and still doesn't work On the video, does the key.key file generate itself, or not?
[ "The only possible code that could write to the key.key file is both:\n\ncommented out; and\nnot called even if it were not commented out,\n\nSo, no, it's not correct to say that \"the key.key file generate[s] itself\".\n\nLooking over the linked video, the presenter at some point (at 1:29:50, more precisely) had the code call that function to create the file, then removed that call and commented out that function.\nLikely cause of your problem is that you made a mistake in the process somewhere(1), and this resulted in the file not being created (or being created somewhere other than where it's expected). Suggest you go back to that point in the video and re-do it.\nOr, you could just create the key.key file, containing the content (taken from the video):\nRaq7IMZ4QkqK20j7lKT3bxJTgwxeJFYx4ADjTqVKdQY=\n\nThat may get you going faster than revisiting the steps that the presenter took.\n\n(1) Re your comment that \"[t]he guy even has his version of the code on github\", it may be that you thought you could bypass the video and just go straight to the final code. If so, that was a mistake, as the final code expects you to have run the incomplete code in order to generate the key file.\nIf so, I would consider that a failing of the presenter. It would have been far better to leave the keyfile-creating code in and call it, for example, when you ran the code with python the_code.py --make-key-file.\n" ]
[ 0 ]
[]
[]
[ "file", "filenotfounderror", "python" ]
stackoverflow_0074578541_file_filenotfounderror_python.txt
Q: how to print a string a certain number of times on a line then move to a new line I have the list ['a','b','c','d','e','f','g']. I want to print it a certain way like this: a b c d e f g this is what I've tried: result = '' for i in range(len(example)): result += example[i] + ' ' if len(result) == 3: print('\n') print(result) but with this I continue to get one single line A: Iterate over a range of indices and step by 3, creating slices of three elements at a time. >>> a = ['a','b','c','d','e','f','g'] >>> for i in range(0, len(a), 3): ... print(a[i:i+3]) ... ['a', 'b', 'c'] ['d', 'e', 'f'] ['g'] >>> To format the data, you could either join the slice with ' ' or expand it out and use the sep argument to print. >>> for i in range(0, len(a), 3): ... print(' '.join(a[i:i+3])) ... a b c d e f g >>> for i in range(0, len(a), 3): ... print(*a[i:i+3], sep=' ', end='\n') ... a b c d e f g >>> A: The problem with the example code above is that len(result) will never be 3. result is always increased by a character plus a space, so its length will always be a multiple of 2. While you could adjust the check value to compensate, it will break if the list elements are ever more than 1 character. Additionally, you don't need to explicitly print a "\n" in Python, as any print statement will automatically end with a new line character unless you pass a parameter to not do that. The following takes a different approach and will work for any list, printing 3 elements, separated by spaces, and then printing a new line after every third element. The end parameter of print is what should be added after the other arguments are printed. By default it is "\n", so here I use a space instead. Once the index counter exceeds the list size, we break out of the loop. i = 0 while True: print(example[i], end=' ') i += 1 if i >= len(example): break if i % 3 == 0: print() # new line
how to print a string a certain number of times on a line then move to a new line
I have the list ['a','b','c','d','e','f','g']. I want to print it a certain way like this: a b c d e f g this is what I've tried: result = '' for i in range(len(example)): result += example[i] + ' ' if len(result) == 3: print('\n') print(result) but with this I continue to get one single line
[ "Iterate over a range of indices and step by 3, creating slices of three elements at a time.\n>>> a = ['a','b','c','d','e','f','g']\n>>> for i in range(0, len(a), 3):\n... print(a[i:i+3])\n... \n['a', 'b', 'c']\n['d', 'e', 'f']\n['g']\n>>> \n\nTo format the data, you could either join the slice with ' ' or expand it out and use the sep argument to print.\n>>> for i in range(0, len(a), 3):\n... print(' '.join(a[i:i+3]))\n... \na b c\nd e f\ng\n>>> for i in range(0, len(a), 3):\n... print(*a[i:i+3], sep=' ', end='\\n')\n... \na b c\nd e f\ng\n>>> \n\n", "The problem with the example code above is that len(result) will never be 3. result is always increased by a character plus a space, so its length will always be a multiple of 2. While you could adjust the check value to compensate, it will break if the list elements are ever more than 1 character.\nAdditionally, you don't need to explicitly print a \"\\n\" in Python, as any print statement will automatically end with a new line character unless you pass a parameter to not do that.\nThe following takes a different approach and will work for any list, printing 3 elements, separated by spaces, and then printing a new line after every third element.\nThe end parameter of print is what should be added after the other arguments are printed. By default it is \"\\n\", so here I use a space instead.\nOnce the index counter exceeds the list size, we break out of the loop.\ni = 0\nwhile True:\n print(example[i], end=' ')\n i += 1\n if i >= len(example):\n break\n if i % 3 == 0:\n print() # new line\n\n" ]
[ 2, 0 ]
[ "Using enumerate\nexample = ['a','b','c','d','e','f','g'] \nmax_rows = 3\nresult = \"\"\nfor index, element in enumerate(example):\n if (index % max_rows) == 0:\n result += \"\\n\"\n result += element\n\nprint(result)\n\n\n" ]
[ -1 ]
[ "python" ]
stackoverflow_0074578552_python.txt