content
stringlengths
85
101k
title
stringlengths
0
150
question
stringlengths
15
48k
answers
list
answers_scores
list
non_answers
list
non_answers_scores
list
tags
list
name
stringlengths
35
137
Q: How to pad edge with black color (0 value) in affine registration using SimpleITK I used SimpleITK to do affine registration and find that after transform the moving image was scaled smaller than its original size while the edge was padded with gray color. How to pad the edge with black color (0 value) instead? The output I got: Moved Image The output I want: Expected Moved Image # Read moving and fixed images moving_path = os.path.join(DATA_DIR, 'image/original', df['MovingFile'][i]) fixed_path = os.path.join(DATA_DIR, 'image/original', df['FixedFile'][i]) moving = sitk.ReadImage(moving_path, sitk.sitkFloat32, imageIO='PNGImageIO') fixed = sitk.ReadImage(fixed_path, sitk.sitkFloat32, imageIO='PNGImageIO') # The affine registration initialTx = sitk.CenteredTransformInitializer(fixed, moving, sitk.AffineTransform(fixed.GetDimension())) R = sitk.ImageRegistrationMethod() shrinkFactors = [3, 2, 1] smoothingSigmas = [2.0, 1.0, 1.0] R.SetShrinkFactorsPerLevel(shrinkFactors) R.SetSmoothingSigmasPerLevel(smoothingSigmas) R.SetMetricAsJointHistogramMutualInformation(20) R.MetricUseFixedImageGradientFilterOff() lr = 1.0 iterations = 100 min_converge = 1e-6 window_size = 10 estimate_lr = R.EachIteration R.SetOptimizerAsGradientDescent(lr, iterations, min_converge, window_size, estimate_lr) R.SetOptimizerScalesFromIndexShift() R.SetInitialTransform(initialTx) R.SetInterpolator(sitk.sitkLinear) outTx = R.Execute(fixed, moving) # Save transform matrix pair_name = '{}_{}_{}-{}'.format( df['PatientID'][i], df['Time'][i], re.split('[-_.]', df['MovingFile'][i])[6], re.split('[-_.]', df['FixedFile'][i])[6]) sitk.WriteTransform(outTx, '{}/matric/{}/{}.mat'.format(DATA_DIR, METHOD, pair_name)) # Resampling resampler = sitk.ResampleImageFilter() resampler.SetReferenceImage(fixed) resampler.SetInterpolator(sitk.sitkLinear) resampler.SetDefaultPixelValue(100) resampler.SetTransform(outTx) # Transform out = resampler.Execute(moving) simg1 = sitk.Cast(sitk.RescaleIntensity(fixed), sitk.sitkUInt8) simg2 = sitk.Cast(sitk.RescaleIntensity(out), sitk.sitkUInt8) cimg = sitk.Compose(simg1, simg2, simg1 // 2.0 + simg2 // 2.0) # Save moved and diff image moved_path = '{}/image/moved/{}/moved_{}.png'.format(DATA_DIR, METHOD, pair_name) sitk.WriteImage(cimg, '{}/image/diff/{}/diff_{}.png'.format(DATA_DIR, METHOD, pair_name), imageIO='PNGImageIO') sitk.WriteImage(simg2, moved_path, imageIO='PNGImageIO') the output I got: Moved Image The output I want: Expected Moved Image A: You have the default pixel value set to 100 in the resampled. That's your grey. If you set it to 0, you'll get the black background you want.
How to pad edge with black color (0 value) in affine registration using SimpleITK
I used SimpleITK to do affine registration and find that after transform the moving image was scaled smaller than its original size while the edge was padded with gray color. How to pad the edge with black color (0 value) instead? The output I got: Moved Image The output I want: Expected Moved Image # Read moving and fixed images moving_path = os.path.join(DATA_DIR, 'image/original', df['MovingFile'][i]) fixed_path = os.path.join(DATA_DIR, 'image/original', df['FixedFile'][i]) moving = sitk.ReadImage(moving_path, sitk.sitkFloat32, imageIO='PNGImageIO') fixed = sitk.ReadImage(fixed_path, sitk.sitkFloat32, imageIO='PNGImageIO') # The affine registration initialTx = sitk.CenteredTransformInitializer(fixed, moving, sitk.AffineTransform(fixed.GetDimension())) R = sitk.ImageRegistrationMethod() shrinkFactors = [3, 2, 1] smoothingSigmas = [2.0, 1.0, 1.0] R.SetShrinkFactorsPerLevel(shrinkFactors) R.SetSmoothingSigmasPerLevel(smoothingSigmas) R.SetMetricAsJointHistogramMutualInformation(20) R.MetricUseFixedImageGradientFilterOff() lr = 1.0 iterations = 100 min_converge = 1e-6 window_size = 10 estimate_lr = R.EachIteration R.SetOptimizerAsGradientDescent(lr, iterations, min_converge, window_size, estimate_lr) R.SetOptimizerScalesFromIndexShift() R.SetInitialTransform(initialTx) R.SetInterpolator(sitk.sitkLinear) outTx = R.Execute(fixed, moving) # Save transform matrix pair_name = '{}_{}_{}-{}'.format( df['PatientID'][i], df['Time'][i], re.split('[-_.]', df['MovingFile'][i])[6], re.split('[-_.]', df['FixedFile'][i])[6]) sitk.WriteTransform(outTx, '{}/matric/{}/{}.mat'.format(DATA_DIR, METHOD, pair_name)) # Resampling resampler = sitk.ResampleImageFilter() resampler.SetReferenceImage(fixed) resampler.SetInterpolator(sitk.sitkLinear) resampler.SetDefaultPixelValue(100) resampler.SetTransform(outTx) # Transform out = resampler.Execute(moving) simg1 = sitk.Cast(sitk.RescaleIntensity(fixed), sitk.sitkUInt8) simg2 = sitk.Cast(sitk.RescaleIntensity(out), sitk.sitkUInt8) cimg = sitk.Compose(simg1, simg2, simg1 // 2.0 + simg2 // 2.0) # Save moved and diff image moved_path = '{}/image/moved/{}/moved_{}.png'.format(DATA_DIR, METHOD, pair_name) sitk.WriteImage(cimg, '{}/image/diff/{}/diff_{}.png'.format(DATA_DIR, METHOD, pair_name), imageIO='PNGImageIO') sitk.WriteImage(simg2, moved_path, imageIO='PNGImageIO') the output I got: Moved Image The output I want: Expected Moved Image
[ "You have the default pixel value set to 100 in the resampled. That's your grey. If you set it to 0, you'll get the black background you want.\n" ]
[ 0 ]
[]
[]
[ "affinetransform", "padding", "python", "registration", "simpleitk" ]
stackoverflow_0074621536_affinetransform_padding_python_registration_simpleitk.txt
Q: Could not load dynamic library 'cudnn64_8.dll'; dlerror: cudnn64_8.dll not found Using tensorflow 2.4.1 When I run my program, I'm getting this error and can't use my gpu. I'm using CUDA 11.0, cudnn 8.0 2021-02-07 03:36:18.132005: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cudart64_110.dll WARNING:tensorflow:From D:/PycharmProjects/pythonProject/models/kpş,i.py:5: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.config.list_physical_devices('GPU')` instead. 2021-02-07 03:36:19.735127: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. 2021-02-07 03:36:19.739052: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library nvcuda.dll 2021-02-07 03:36:20.715634: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 0 with properties: pciBusID: 0000:01:00.0 name: GeForce GTX 1650 computeCapability: 7.5 coreClock: 1.56GHz coreCount: 16 deviceMemorySize: 4.00GiB deviceMemoryBandwidth: 119.24GiB/s 2021-02-07 03:36:20.716281: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cudart64_110.dll 2021-02-07 03:36:20.723519: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cublas64_11.dll 2021-02-07 03:36:20.724040: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cublasLt64_11.dll 2021-02-07 03:36:20.729436: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cufft64_10.dll 2021-02-07 03:36:20.731800: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library curand64_10.dll 2021-02-07 03:36:20.741580: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cusolver64_10.dll 2021-02-07 03:36:20.745576: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cusparse64_11.dll 2021-02-07 03:36:20.746657: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'cudnn64_8.dll'; dlerror: cudnn64_8.dll not found 2021-02-07 03:36:20.746971: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1757] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform. Skipping registering GPU devices... 2021-02-07 03:36:20.836861: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1261] Device interconnect StreamExecutor with strength 1 edge matrix: 2021-02-07 03:36:20.837144: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1267] 0 2021-02-07 03:36:20.837314: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1280] 0: N 2021-02-07 03:36:20.837493: I tensorflow/compiler/jit/xla_gpu_device.cc:99] Not creating XLA devices, tf_xla_enable_xla_devices not set A: I think I can help you with providing a cudnn64_8.dll file (this is the download link: https://www.dll-files.com/cudnn64_8.dll.html). When you get the file, you can just put in your bin directory. For example, usually in windows platform, you can put it into C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.3\bin. A: The missing dll file is located in the cuDNN folder. I was able to resolve the issue by copying the cudnn64_8.dll file to the CUDA folder, i.e., C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.6\bin. cuDNN is listed as a requirement for tensorflow to work and you can download it here. You need to register a developer account first though. A: watch this video to solve this problem, this file not found error arises due to the missing of Microsoft visual studio C++ reproducible file in the CUDA folder. additional; with the PyTorch in conda environment, there is no addition CUDA and Cudnn installation, because after type conda install pytorch, conda installs both CUDA and cudnn into that conda environment. A: I just had this issue as well after following all of the installation instructions for CuDNN. The root cause of the problem is very simple. In the installation instructions, it tells you to add <root>\NVIDIA\CUDNN\v8.x to your PATH. At least for Tensorflow, this happens to be wrong. You need to add <root>\NVIDIA\CUDNN\v8.x\bin to your PATH. That should fix the problem. It did for me. I see a couple answers talking about moving the cudnn64_8.dll file to C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.6\bin. The reason why this works is that CUDA automatically adds that bin directory to your PATH when you install it. So, moving the cudnn64_8.dll there effectively adds it to your PATH. I'd rather keep things in their proper location so I like this way more. A: I've stumbled at this error as well and what I've figured is that cudnn requires a different installation. Here is a guide for the installation and the packages required https://developer.nvidia.com/cudnn. *edit -> in the link above you can download the cuddn version that tensorflow requires, then you will place them in the needed folders and that will fix your issue. A: This will solve your problem: conda install -c conda-forge cudatoolkit=11.2 cudnn=8.1.0 A: I had the same error. The solution proposed in this link worked for me. They propose to install cuDNN and explain how to do so here For me personally, our of the explanation above all that was missing was ZLIB (or more specifically zlibwapi) which I installed via Anaconda. A: I would like to share the steps that allowed me to solve all the issues like Could not load dynamic library 'cudnn64_8.dll'; dlerror: cudnn64_8.dll not found Could not load library cudnn_cnn_infer64_8.dll. Could not load library cudnn_ops_infer64_8.dll. First make sure that the driver for your NVIDIA GPU is installed. You can download the driver here. Install the CUDA Toolkit that you can download here. Download the cudnn zip file from here and unzip it. From the unzipped cudnn, copy-paste the files in the bin, include and lib subdirectories to the corresponding bin, include and lib subdirectories located at ...\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.x. Download the Zlib archive from here and unzip it. Copy-paste zlibwapi.dll from the Zlib archive to the bin subdirectory at ...\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.x.
Could not load dynamic library 'cudnn64_8.dll'; dlerror: cudnn64_8.dll not found
Using tensorflow 2.4.1 When I run my program, I'm getting this error and can't use my gpu. I'm using CUDA 11.0, cudnn 8.0 2021-02-07 03:36:18.132005: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cudart64_110.dll WARNING:tensorflow:From D:/PycharmProjects/pythonProject/models/kpş,i.py:5: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.config.list_physical_devices('GPU')` instead. 2021-02-07 03:36:19.735127: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. 2021-02-07 03:36:19.739052: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library nvcuda.dll 2021-02-07 03:36:20.715634: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 0 with properties: pciBusID: 0000:01:00.0 name: GeForce GTX 1650 computeCapability: 7.5 coreClock: 1.56GHz coreCount: 16 deviceMemorySize: 4.00GiB deviceMemoryBandwidth: 119.24GiB/s 2021-02-07 03:36:20.716281: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cudart64_110.dll 2021-02-07 03:36:20.723519: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cublas64_11.dll 2021-02-07 03:36:20.724040: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cublasLt64_11.dll 2021-02-07 03:36:20.729436: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cufft64_10.dll 2021-02-07 03:36:20.731800: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library curand64_10.dll 2021-02-07 03:36:20.741580: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cusolver64_10.dll 2021-02-07 03:36:20.745576: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cusparse64_11.dll 2021-02-07 03:36:20.746657: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'cudnn64_8.dll'; dlerror: cudnn64_8.dll not found 2021-02-07 03:36:20.746971: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1757] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform. Skipping registering GPU devices... 2021-02-07 03:36:20.836861: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1261] Device interconnect StreamExecutor with strength 1 edge matrix: 2021-02-07 03:36:20.837144: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1267] 0 2021-02-07 03:36:20.837314: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1280] 0: N 2021-02-07 03:36:20.837493: I tensorflow/compiler/jit/xla_gpu_device.cc:99] Not creating XLA devices, tf_xla_enable_xla_devices not set
[ "I think I can help you with providing a cudnn64_8.dll file (this is the download link: https://www.dll-files.com/cudnn64_8.dll.html). When you get the file, you can just put in your bin directory. For example, usually in windows platform, you can put it into C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.3\\bin.\n", "The missing dll file is located in the cuDNN folder. I was able to resolve the issue by copying the cudnn64_8.dll file to the CUDA folder, i.e., C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.6\\bin.\ncuDNN is listed as a requirement for tensorflow to work and you can download it here. You need to register a developer account first though.\n", "watch this video to solve this problem, \nthis file not found error arises due to the missing of Microsoft visual studio C++ reproducible file in the CUDA folder. \nadditional;\nwith the PyTorch in conda environment, there is no addition CUDA and Cudnn installation, because after type conda install pytorch, conda installs both CUDA and cudnn into that conda environment.\n", "I just had this issue as well after following all of the installation instructions for CuDNN. The root cause of the problem is very simple. In the installation instructions, it tells you to add <root>\\NVIDIA\\CUDNN\\v8.x to your PATH. At least for Tensorflow, this happens to be wrong. You need to add <root>\\NVIDIA\\CUDNN\\v8.x\\bin to your PATH. That should fix the problem. It did for me.\nI see a couple answers talking about moving the cudnn64_8.dll file to C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.6\\bin. The reason why this works is that CUDA automatically adds that bin directory to your PATH when you install it. So, moving the cudnn64_8.dll there effectively adds it to your PATH.\nI'd rather keep things in their proper location so I like this way more.\n", "I've stumbled at this error as well and what I've figured is that cudnn requires a different installation. Here is a guide for the installation and the packages required https://developer.nvidia.com/cudnn.\n*edit -> in the link above you can download the cuddn version that tensorflow requires, then you will place them in the needed folders and that will fix your issue.\n", "This will solve your problem:\nconda install -c conda-forge cudatoolkit=11.2 cudnn=8.1.0\n\n", "I had the same error. The solution proposed in this link worked for me. They propose to install cuDNN and explain how to do so here\nFor me personally, our of the explanation above all that was missing was ZLIB (or more specifically zlibwapi) which I installed via Anaconda.\n", "I would like to share the steps that allowed me to solve all the issues like\n\nCould not load dynamic library 'cudnn64_8.dll'; dlerror: cudnn64_8.dll not found\nCould not load library cudnn_cnn_infer64_8.dll.\nCould not load library cudnn_ops_infer64_8.dll.\n\n\nFirst make sure that the driver for your NVIDIA GPU is installed. You can download the driver here.\nInstall the CUDA Toolkit that you can download here.\nDownload the cudnn zip file from here and unzip it.\nFrom the unzipped cudnn, copy-paste the files in the bin, include and lib subdirectories to the corresponding bin, include and lib subdirectories located at ...\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.x.\nDownload the Zlib archive from here and unzip it.\nCopy-paste zlibwapi.dll from the Zlib archive to the bin subdirectory at ...\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.x.\n\n" ]
[ 24, 19, 9, 5, 1, 0, 0, 0 ]
[]
[]
[ "python", "tensorflow" ]
stackoverflow_0066083545_python_tensorflow.txt
Q: Can't import module - python (unknown location) EDIT: I was able to get the modules to work by doing import google.cloud.bigquery instead of from google.cloud import BigQuery - But I am not sure why that is.... I am trying to connect to BigQuery using python for my first time ever. I looked on here for answers but I tried all answers I saw with no avail (Which my steps are show below). Before I even test out connecting with credentials, I just wanted to make sure I can get the modules working without error. Here are my steps: python -m venv .venv venv/scripts/activate (I'm on windows) pip install --upgrade google-cloud-BigQuery Here is the python file: import requests from google.cloud import BigQuery Here is the error: File "C:\Users\Jaton\Documents\Webapps\bigquery\test.py", line 2, in <module> from google.cloud import BigQuery ImportError: cannot import name 'BigQuery' from 'google.cloud' (unknown location) I also tried doing pip install google-cloud but that didn't work. I already tried these links: Importing error "from google.cloud import bigquery" ImportError: cannot import name 'bigquery' from 'google.cloud' (unknown location) ImportError: cannot import name 'bigquery' from 'google.cloud' (unknown location) persists A: To install the library, execute: pip install google-cloud Although the documentation mentions 'BigQuery', the case-sensitive spelling to use in the code is bigquery as in from google.cloud import bigquery One thing annoying about python documentations in many libraries is that the internals (e.g., bigquery members above) are not specified as a list in detail (contrary to java api documentations that give specific details).
Can't import module - python (unknown location)
EDIT: I was able to get the modules to work by doing import google.cloud.bigquery instead of from google.cloud import BigQuery - But I am not sure why that is.... I am trying to connect to BigQuery using python for my first time ever. I looked on here for answers but I tried all answers I saw with no avail (Which my steps are show below). Before I even test out connecting with credentials, I just wanted to make sure I can get the modules working without error. Here are my steps: python -m venv .venv venv/scripts/activate (I'm on windows) pip install --upgrade google-cloud-BigQuery Here is the python file: import requests from google.cloud import BigQuery Here is the error: File "C:\Users\Jaton\Documents\Webapps\bigquery\test.py", line 2, in <module> from google.cloud import BigQuery ImportError: cannot import name 'BigQuery' from 'google.cloud' (unknown location) I also tried doing pip install google-cloud but that didn't work. I already tried these links: Importing error "from google.cloud import bigquery" ImportError: cannot import name 'bigquery' from 'google.cloud' (unknown location) ImportError: cannot import name 'bigquery' from 'google.cloud' (unknown location) persists
[ "\nTo install the library, execute: pip install google-cloud\nAlthough\nthe documentation mentions 'BigQuery', the case-sensitive spelling to\nuse in the code is bigquery as in from google.cloud import bigquery\n\nOne thing annoying about python documentations in many libraries is that the internals (e.g., bigquery members above) are not specified as a list in detail (contrary to java api documentations that give specific details).\n" ]
[ 1 ]
[]
[]
[ "google_bigquery", "python", "python_import" ]
stackoverflow_0074621095_google_bigquery_python_python_import.txt
Q: How do I represent a string as a number? I need to represent a string as a number, however it is 8928313 characters long, note this string can contain more than just alphabet letters, and I have to be able to convert it back efficiently too. My current (too slow) code looks like this: alpha = 'abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ,.?!@()+-=[]/*1234567890^*{}\'"$\\&#;|%<>:`~_' alphaLeng = len(alpha) def letterNumber(letters): letters = str(letters) cof = 1 nr = 0 for i in range(len(letters)): nr += cof*alpha.find(letters[i]) cof *= alphaLeng print(i,' ',len(letters)) return str(nr) A: Ok, since other people are giving awful answers, I'm going to step in. You shouldn't do this. You shouldn't do this. An integer and an array of characters are ultimately the same thing: bytes. You can access the values in the same way. Most number representations cap out at 8 bytes (64-bits). You're looking at 8 MB, or 1 million times the largest integer representation. You shouldn't do this. Really. You shouldn't do this. Your number will just be a custom, gigantic number type that would be identical under the hood. If you really want to do this, despite all the reasons above, here's how... Code def lshift(a, b): # bitwise left shift 8 return (a << (8 * b)) def string_to_int(data): sum_ = 0 r = range(len(data)-1, -1, -1) for a, b in zip(bytearray(data), r): sum_ += lshift(a, b) return sum_; DONT DO THIS Explanation Characters are essentially bytes: they can be encoded in different ways, but ultimately you can treat them within a given encoding as a sequence of bytes. In order to convert them to a number, we can shift them left 8-bits for their position in the sequence, creating a unique number. r, the range value, is the position in reverse order: the 4th element needs to go left 24 bytes (3*8), etc. After getting the range and converting our data to 8-bit integers, we can then transform the data and take the sum, giving us our unique identifier. It will be identical byte-wise (or in reverse byte-order) of the original number, but just "as a number". This is entirely futile. Don't do it. Performance Any performance is going to be outweighed by the fact that you're creating an identical object for no valid reason, but this solution is decently performant. 1,000 elements takes ~486 microseconds, 10,000 elements takes ~20.5 ms, while 100,000 elements takes about 1.5 seconds. It would work, but you shouldn't do it. This means it's scaled as O(n**2), which is likely due to memory overhead of reallocating the data each time the integer size gets larger. This might take ~4 hours to process all 8e6 elements (14365 seconds, calculated fitting the lower-order data to ax**2+bx+c). Remember, this is all to get the identical byte representation as the original data. Futility Remember, there are ~1e78 to 1e82 atoms in the entire universe, on current estimates. This is ~2^275. Your value will be able to represent 2^71426504, or about 260,000 times as many bits as you need to represent every atom in the universe. You don't need such a number. You never will. A: If there are only ANSII characters. You can use ord() and chr(). built-in functions A: There are several optimizations you can perform. For example, the find method requires searching through your string for the corresponding letter. A dictionary would be faster. Even faster might be (benchmark!) the chr function (if you're not too picky about the letter ordering) and the ord function to reverse the chr. But if you're not picky about ordering, it might be better if you just left-NULL-padded your string and treated it as a big binary number in memory if you don't need to display the value in any particular format. You might get some speedup by iterating over characters instead of character indices. If you're using Python 2, a large range will be slow since a list needs to be generated (use xrange instead for Python 2); Python 3 uses a generator, so it's better. Your print function is going to slow down output a fair bit, especially if you're outputting to a tty. A big number library may also buy you speed-up: Handling big numbers in code A: Your alpha.find() function needs to iterate through alpha on each loop. You can probably speed things up by using a dict, as dictionary lookups are O(1): alpha = 'abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ,.?!@()+-=[]/*1234567890^*{}\'"$\\&#;|%<>:`~_' alpha_dict = { letter: index for index, letter in enumerate(alpha)} print(alpha.find('$')) # 83 print(alpha_dict['$']) # 83 A: Store your strings in an array of distinct values; i.e. a string table. In your dataset, use a reference number. A reference number of n corresponds to the nth element of the string table array.
How do I represent a string as a number?
I need to represent a string as a number, however it is 8928313 characters long, note this string can contain more than just alphabet letters, and I have to be able to convert it back efficiently too. My current (too slow) code looks like this: alpha = 'abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ,.?!@()+-=[]/*1234567890^*{}\'"$\\&#;|%<>:`~_' alphaLeng = len(alpha) def letterNumber(letters): letters = str(letters) cof = 1 nr = 0 for i in range(len(letters)): nr += cof*alpha.find(letters[i]) cof *= alphaLeng print(i,' ',len(letters)) return str(nr)
[ "Ok, since other people are giving awful answers, I'm going to step in.\n\nYou shouldn't do this.\nYou shouldn't do this.\nAn integer and an array of characters are ultimately the same thing: bytes. You can access the values in the same way.\nMost number representations cap out at 8 bytes (64-bits). You're looking at 8 MB, or 1 million times the largest integer representation. You shouldn't do this. Really.\nYou shouldn't do this. Your number will just be a custom, gigantic number type that would be identical under the hood.\nIf you really want to do this, despite all the reasons above, here's how...\n\nCode\ndef lshift(a, b):\n # bitwise left shift 8\n return (a << (8 * b))\n\ndef string_to_int(data):\n sum_ = 0\n r = range(len(data)-1, -1, -1)\n for a, b in zip(bytearray(data), r):\n sum_ += lshift(a, b)\n return sum_;\n\nDONT DO THIS\nExplanation\nCharacters are essentially bytes: they can be encoded in different ways, but ultimately you can treat them within a given encoding as a sequence of bytes. In order to convert them to a number, we can shift them left 8-bits for their position in the sequence, creating a unique number. r, the range value, is the position in reverse order: the 4th element needs to go left 24 bytes (3*8), etc.\nAfter getting the range and converting our data to 8-bit integers, we can then transform the data and take the sum, giving us our unique identifier. It will be identical byte-wise (or in reverse byte-order) of the original number, but just \"as a number\". This is entirely futile. Don't do it.\nPerformance\nAny performance is going to be outweighed by the fact that you're creating an identical object for no valid reason, but this solution is decently performant.\n1,000 elements takes ~486 microseconds, 10,000 elements takes ~20.5 ms, while 100,000 elements takes about 1.5 seconds. It would work, but you shouldn't do it. This means it's scaled as O(n**2), which is likely due to memory overhead of reallocating the data each time the integer size gets larger. This might take ~4 hours to process all 8e6 elements (14365 seconds, calculated fitting the lower-order data to ax**2+bx+c). Remember, this is all to get the identical byte representation as the original data.\nFutility\nRemember, there are ~1e78 to 1e82 atoms in the entire universe, on current estimates. This is ~2^275. Your value will be able to represent 2^71426504, or about 260,000 times as many bits as you need to represent every atom in the universe. You don't need such a number. You never will.\n", "If there are only ANSII characters. You can use ord() and chr().\nbuilt-in functions\n", "There are several optimizations you can perform. For example, the find method requires searching through your string for the corresponding letter. A dictionary would be faster. Even faster might be (benchmark!) the chr function (if you're not too picky about the letter ordering) and the ord function to reverse the chr. But if you're not picky about ordering, it might be better if you just left-NULL-padded your string and treated it as a big binary number in memory if you don't need to display the value in any particular format.\nYou might get some speedup by iterating over characters instead of character indices. If you're using Python 2, a large range will be slow since a list needs to be generated (use xrange instead for Python 2); Python 3 uses a generator, so it's better.\nYour print function is going to slow down output a fair bit, especially if you're outputting to a tty.\nA big number library may also buy you speed-up: Handling big numbers in code\n", "Your alpha.find() function needs to iterate through alpha on each loop.\nYou can probably speed things up by using a dict, as dictionary lookups are O(1):\nalpha = 'abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ,.?!@()+-=[]/*1234567890^*{}\\'\"$\\\\&#;|%<>:`~_'\n\nalpha_dict = { letter: index for index, letter in enumerate(alpha)}\nprint(alpha.find('$'))\n# 83\nprint(alpha_dict['$'])\n# 83\n\n", "Store your strings in an array of distinct values; i.e. a string table. In your dataset, use a reference number. A reference number of n corresponds to the nth element of the string table array.\n" ]
[ 2, 1, 0, 0, 0 ]
[]
[]
[ "numbers", "python", "python_2.7", "python_3.x" ]
stackoverflow_0044833017_numbers_python_python_2.7_python_3.x.txt
Q: How to remove tuples in a list of tuples when the value of the first tuple in contained in an other list? I have a list containing tuples and I would like to remove tuples that contain words in the first position of the tuple based on words from a second list. list_of_tuples = [ ("apple",2), ("banana",54), ("flower", 5), ("apple",4), ("fruit", 3) ] list_of_words = [ "apple", "banana" ] The final result should look like this: [("flower", 5), ("fruit", 3)] A: This code will do the trick: list_of_tuples = [ ("apple", 2), ("banana", 54), ("flower", 5), ("apple", 4), ("fruit", 3) ] list_of_words = [ "apple", "banana" ] final_list_of_tuples = [tup for tup in list_of_tuples if tup[0] not in list_of_words] print(final_list_of_tuples) The one liner technique is called a list comprehension. You can find more information about it here: Python List Comprehensions A: Rather than a complete solution, here is a breakdown of the various operations you can put together to accomplish your task. Hopefully it gives you a feel for the Python building blocks you can use for these types of problems in the future: list_of_tuples = [ ("apple",2), ("banana",54), ("flower", 5), ("apple",4), ("fruit", 3) ] list_of_words = ["apple", "banana"] # demonstrates tuple unpacking in Python word, quantity = list_of_tuples[0] print(word, quantity) # demonstrates how to test against a collection print(word in list_of_words) # demonstrates how to iterate over a list of tuples and unpack for word, quantity in list_of_tuples: print(f"word: {fruit}, quantity: {quantity}") # demonstrates how to create a new list from an existing list new_list_of_tuples = [] for word, quantity in list_of_tuples: if word != "flower": new_list_of_tuples.append((word, quantity)) print(new_list_of_tuples) Output: apple 2 True word: apple, quantity: 2 word: apple, quantity: 54 word: apple, quantity: 5 word: apple, quantity: 4 word: apple, quantity: 3 [('apple', 2), ('banana', 54), ('apple', 4), ('fruit', 3)]
How to remove tuples in a list of tuples when the value of the first tuple in contained in an other list?
I have a list containing tuples and I would like to remove tuples that contain words in the first position of the tuple based on words from a second list. list_of_tuples = [ ("apple",2), ("banana",54), ("flower", 5), ("apple",4), ("fruit", 3) ] list_of_words = [ "apple", "banana" ] The final result should look like this: [("flower", 5), ("fruit", 3)]
[ "This code will do the trick:\nlist_of_tuples = [\n (\"apple\", 2),\n (\"banana\", 54),\n (\"flower\", 5),\n (\"apple\", 4),\n (\"fruit\", 3)\n]\n\nlist_of_words = [\n \"apple\",\n \"banana\"\n]\n\nfinal_list_of_tuples = [tup for tup in list_of_tuples if tup[0] not in list_of_words]\n\nprint(final_list_of_tuples)\n\nThe one liner technique is called a list comprehension.\nYou can find more information about it here:\nPython List Comprehensions\n", "Rather than a complete solution, here is a breakdown of the various operations you can put together to accomplish your task. Hopefully it gives you a feel for the Python building blocks you can use for these types of problems in the future:\nlist_of_tuples = [\n (\"apple\",2),\n (\"banana\",54), \n (\"flower\", 5), \n (\"apple\",4), \n (\"fruit\", 3)\n]\n\nlist_of_words = [\"apple\", \"banana\"]\n\n# demonstrates tuple unpacking in Python\nword, quantity = list_of_tuples[0]\nprint(word, quantity)\n\n# demonstrates how to test against a collection\nprint(word in list_of_words)\n\n# demonstrates how to iterate over a list of tuples and unpack\nfor word, quantity in list_of_tuples:\n print(f\"word: {fruit}, quantity: {quantity}\")\n\n# demonstrates how to create a new list from an existing list\nnew_list_of_tuples = []\nfor word, quantity in list_of_tuples:\n if word != \"flower\":\n new_list_of_tuples.append((word, quantity))\nprint(new_list_of_tuples)\n\nOutput:\napple 2\nTrue\nword: apple, quantity: 2\nword: apple, quantity: 54\nword: apple, quantity: 5\nword: apple, quantity: 4\nword: apple, quantity: 3\n[('apple', 2), ('banana', 54), ('apple', 4), ('fruit', 3)]\n\n" ]
[ 1, 0 ]
[]
[]
[ "python", "tuples" ]
stackoverflow_0074621520_python_tuples.txt
Q: Can overlapping matches with the same start position be found using regex? I am looking for a regex or a regex flag in python/BigQuery that enables me to find overlapping occurrences. For example, I have the string 1.2.5.6.8.10.12 and I would like to extract: [1., 1.2., 1.2.5., 1.2.5.6., ..., 1.2.5.6.8.10.12] I tried running the python code re.findall("^(\d+(?:\.|$))+", string) and it resulted in ['12'] A: While the regex parser walks down the string each position gets consumed. To extract substrings with the same starting position it would be needed to look behind and capture matches towards start. Capturing overlapping matches needs to be done inside a lookaround for not consuming the captured parts. Python re does not support lookbehinds of variable length but PyPI regex does. import regex as re res = re.findall(r"(?<=(.*\d(?:\.|$)))", s) See this Python demo at tio.run or a Regex101 demo (captures will be in the first group). In PyPI there is even an overlapped=True option which lets avoid to capture inside the lookbehind. Together with (?r) another interesting flag for doing a reverse search it could also be achieved. res = re.findall(r'(?r).*\d(?:\.|$)', s, overlapped=True)[::-1] The result just needs to be reversed afterwards for receiving the desired order: Python demo Using standard re an idea can be to reverse the string and do capturing inside a lookahead. The needed parts get captured from the reversed string and finally each list item gets reversed again before reversing the entire list. I don't know if this is worth the effort but it seems to work as well. res = [x[::-1] for x in re.findall(r'(?=((?:\.\d|^).*))', s[::-1])][::-1] Another Python demo at tio.run or a Regex101 demo (shows matching on the reversed string). A: Use below (BigQuery) select text, array( select regexp_extract(text, r'((?:[^.]+.){' || i || '})') from unnest(generate_array(1, array_length(split(text, '.')))) i ) as extracted from your_table with output
Can overlapping matches with the same start position be found using regex?
I am looking for a regex or a regex flag in python/BigQuery that enables me to find overlapping occurrences. For example, I have the string 1.2.5.6.8.10.12 and I would like to extract: [1., 1.2., 1.2.5., 1.2.5.6., ..., 1.2.5.6.8.10.12] I tried running the python code re.findall("^(\d+(?:\.|$))+", string) and it resulted in ['12']
[ "While the regex parser walks down the string each position gets consumed. To extract substrings with the same starting position it would be needed to look behind and capture matches towards start. Capturing overlapping matches needs to be done inside a lookaround for not consuming the captured parts. Python re does not support lookbehinds of variable length but PyPI regex does.\nimport regex as re\n\nres = re.findall(r\"(?<=(.*\\d(?:\\.|$)))\", s)\n\nSee this Python demo at tio.run or a Regex101 demo (captures will be in the first group).\nIn PyPI there is even an overlapped=True option which lets avoid to capture inside the lookbehind. Together with (?r) another interesting flag for doing a reverse search it could also be achieved.\nres = re.findall(r'(?r).*\\d(?:\\.|$)', s, overlapped=True)[::-1]\n\nThe result just needs to be reversed afterwards for receiving the desired order: Python demo\n\nUsing standard re an idea can be to reverse the string and do capturing inside a lookahead. The needed parts get captured from the reversed string and finally each list item gets reversed again before reversing the entire list. I don't know if this is worth the effort but it seems to work as well.\nres = [x[::-1] for x in re.findall(r'(?=((?:\\.\\d|^).*))', s[::-1])][::-1]\n\nAnother Python demo at tio.run or a Regex101 demo (shows matching on the reversed string).\n", "Use below (BigQuery)\nselect text, \n array(\n select regexp_extract(text, r'((?:[^.]+.){' || i || '})')\n from unnest(generate_array(1, array_length(split(text, '.')))) i\n ) as extracted\nfrom your_table \n\nwith output\n\n" ]
[ 0, 0 ]
[]
[]
[ "findall", "google_bigquery", "python", "python_3.x", "regex" ]
stackoverflow_0074618335_findall_google_bigquery_python_python_3.x_regex.txt
Q: Encrypting and Decrypting with python and nodejs I'm trying to encrypt some content in Python and decrypt it in a nodejs application. I'm struggling to get the two AES implementations to work together though. Here is where I am at. In node: var crypto = require('crypto'); var password = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'; var input = 'hello world'; var encrypt = function (input, password, callback) { var m = crypto.createHash('md5'); m.update(password) var key = m.digest('hex'); m = crypto.createHash('md5'); m.update(password + key) var iv = m.digest('hex'); // add padding while (input.length % 16 !== 0) { input += ' '; } var data = new Buffer(input, 'utf8').toString('binary'); var cipher = crypto.createCipheriv('aes-256-cbc', key, iv.slice(0,16)); var encrypted = cipher.update(data, 'binary') + cipher.final('binary'); var encoded = new Buffer(encrypted, 'binary').toString('base64'); callback(encoded); }; var decrypt = function (input, password, callback) { // Convert urlsafe base64 to normal base64 var input = input.replace('-', '+').replace('/', '_'); // Convert from base64 to binary string var edata = new Buffer(input, 'base64').toString('binary') // Create key from password var m = crypto.createHash('md5'); m.update(password) var key = m.digest('hex'); // Create iv from password and key m = crypto.createHash('md5'); m.update(password + key) var iv = m.digest('hex'); // Decipher encrypted data var decipher = crypto.createDecipheriv('aes-256-cbc', key, iv.slice(0,16)); var decrypted = decipher.update(edata, 'binary') + decipher.final('binary'); var plaintext = new Buffer(decrypted, 'binary').toString('utf8'); callback(plaintext); }; encrypt(input, password, function (encoded) { console.log(encoded); decrypt(encoded, password, function (output) { console.log(output); }); }); This produces the output: BXSGjDAYKeXlaRXVVJGuREKTPiiXeam8W9e96Nknt3E= hello world In python from Crypto.Cipher import AES from hashlib import md5 import base64 password = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' input = 'hello world' def _encrypt(data, nonce, password): m = md5() m.update(password) key = m.hexdigest() m = md5() m.update(password + key) iv = m.hexdigest() # pad to 16 bytes data = data + " " * (16 - len(data) % 16) aes = AES.new(key, AES.MODE_CBC, iv[:16]) encrypted = aes.encrypt(data) return base64.urlsafe_b64encode(encrypted) def _decrypt(edata, nonce, password): edata = base64.urlsafe_b64decode(edata) m = md5() m.update(password) key = m.hexdigest() m = md5() m.update(password + key) iv = m.hexdigest() aes = AES.new(key, AES.MODE_CBC, iv[:16]) return aes.decrypt(edata) output = _encrypt(input, "", password) print(output) plaintext = _decrypt(output, "", password) print(plaintext) This produces the output BXSGjDAYKeXlaRXVVJGuRA== hello world Clearly they are very close, but node seems to be padding the output with something. Any ideas how I can get the two to interoperate? A: OK, I've figured it out, node uses OpenSSL which uses PKCS5 to do padding. PyCrypto doesn't handle the padding so I was doing it myself just add ' ' in both. If I add PKCS5 padding in the python code and remove the padding in the node code, it works. So updated working code. Node: var crypto = require('crypto'); var password = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'; var input = 'hello world'; var encrypt = function (input, password, callback) { var m = crypto.createHash('md5'); m.update(password) var key = m.digest('hex'); m = crypto.createHash('md5'); m.update(password + key) var iv = m.digest('hex'); var data = new Buffer(input, 'utf8').toString('binary'); var cipher = crypto.createCipheriv('aes-256-cbc', key, iv.slice(0,16)); // UPDATE: crypto changed in v0.10 // https://github.com/joyent/node/wiki/Api-changes-between-v0.8-and-v0.10 var nodev = process.version.match(/^v(\d+)\.(\d+)/); var encrypted; if( nodev[1] === '0' && parseInt(nodev[2]) < 10) { encrypted = cipher.update(data, 'binary') + cipher.final('binary'); } else { encrypted = cipher.update(data, 'utf8', 'binary') + cipher.final('binary'); } var encoded = new Buffer(encrypted, 'binary').toString('base64'); callback(encoded); }; var decrypt = function (input, password, callback) { // Convert urlsafe base64 to normal base64 var input = input.replace(/\-/g, '+').replace(/_/g, '/'); // Convert from base64 to binary string var edata = new Buffer(input, 'base64').toString('binary') // Create key from password var m = crypto.createHash('md5'); m.update(password) var key = m.digest('hex'); // Create iv from password and key m = crypto.createHash('md5'); m.update(password + key) var iv = m.digest('hex'); // Decipher encrypted data var decipher = crypto.createDecipheriv('aes-256-cbc', key, iv.slice(0,16)); // UPDATE: crypto changed in v0.10 // https://github.com/joyent/node/wiki/Api-changes-between-v0.8-and-v0.10 var nodev = process.version.match(/^v(\d+)\.(\d+)/); var decrypted, plaintext; if( nodev[1] === '0' && parseInt(nodev[2]) < 10) { decrypted = decipher.update(edata, 'binary') + decipher.final('binary'); plaintext = new Buffer(decrypted, 'binary').toString('utf8'); } else { plaintext = (decipher.update(edata, 'binary', 'utf8') + decipher.final('utf8')); } callback(plaintext); }; encrypt(input, password, function (encoded) { console.log(encoded); decrypt(encoded, password, function (output) { console.log(output); }); }); Python: from Crypto.Cipher import AES from hashlib import md5 import base64 password = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' input = 'hello world' BLOCK_SIZE = 16 def pad (data): pad = BLOCK_SIZE - len(data) % BLOCK_SIZE return data + pad * chr(pad) def unpad (padded): pad = ord(chr(padded[-1])) return padded[:-pad] def get_key_iv (password): m = md5() m.update(password.encode('utf-8')) key = m.hexdigest() m = md5() m.update((password + key).encode('utf-8')) iv = m.hexdigest() return [key,iv] def _encrypt(data, password): key,iv = get_key_iv(password) data = pad(data) aes = AES.new(key, AES.MODE_CBC, iv[:16]) encrypted = aes.encrypt(data) return base64.urlsafe_b64encode(encrypted) def _decrypt(edata, password): edata = base64.urlsafe_b64decode(edata) key,iv = get_key_iv(password) aes = AES.new(key, AES.MODE_CBC, iv[:16]) return unpad(aes.decrypt(edata)) output = _encrypt(input, password) print(output) plaintext = _decrypt(output, password) print(plaintext) A: while trying to run the Python script using Python 3.8 I encountered the following error: m.update(password) TypeError: Unicode-objects must be encoded before hashing the password should be : password = b'abcd' I also got the following error : m.update(password + key) TypeError: can't concat str to bytes I was able to fix it by adding the following line after key: key = bytes.fromhex(key_) The python script should work this way : from Crypto.Cipher import AES from hashlib import md5 import base64 password = b'abcd' input = 'hello world' BLOCK_SIZE = 16 def pad (data): pad = BLOCK_SIZE - len(data) % BLOCK_SIZE return data + pad * chr(pad) def unpad (padded): pad = ord(chr(padded[-1])) return padded[:-pad] def _encrypt(data, nonce, password): m = md5() m.update(password) key_ = m.hexdigest() key = bytes.fromhex(key_) m = md5() m.update(password + key) iv = m.hexdigest() iv = bytes.fromhex(iv) data = pad(data) aes = AES.new(key, AES.MODE_CBC, iv[:16]) encrypted = aes.encrypt(data.encode('utf-8')) return base64.urlsafe_b64encode(encrypted) def _decrypt(edata, nonce, password): edata = base64.urlsafe_b64decode(edata) m = md5() m.update(password) key = m.hexdigest() key = bytes.fromhex(key) m = md5() m.update(password + key) iv = m.hexdigest() iv = bytes.fromhex(iv) aes = AES.new(key, AES.MODE_CBC, iv[:16]) return unpad(aes.decrypt(edata)) output = _encrypt(input, "", password) print(output) plaintext = _decrypt(output, "", password) print(plaintext) A: Just for any one that is similar to me, who was finding a simple way to do the encryption and decryption for AES in python that is doing the same thing in node.js. The class here supports different bits of AES and both hex and base64 encoding that produces same result in node.js. Also noted that if you are missing the package Crypto, you can simply install it by pip install pycrypto The code for python is as follows: import base64 import hashlib from Crypto.Cipher import AES class AESCrypto(object): def __init__(self, algorithm, password): self.algorithm = filter(lambda x: not x.isdigit(), algorithm).lower() self.bits = int(filter(str.isdigit, algorithm)) self.bs = 16 if not self.algorithm == 'aes': raise Exception('Only AES crypto is supported') if not self.bits % 8 == 0: raise Exception('Bits of crypto must be a multiply of 8.') self.bytes = self.bits / 8 self.password = password self.generateKeyAndIv() def generateKeyAndIv(self): last = '' allBytes = '' maxBytes = self.bytes + self.bs while len(allBytes) < maxBytes: last = hashlib.md5(last + self.password).digest() allBytes += last self.key = allBytes[:self.bytes] self.iv = allBytes[self.bytes:maxBytes] def encrypt(self, raw, outputEncoding): outputEncoding = outputEncoding.lower() raw = self._pad(raw) cipher = AES.new(self.key, AES.MODE_CBC, self.iv) encrypted = cipher.encrypt(raw) if outputEncoding == 'hex': return encrypted.encode('hex') elif outputEncoding == 'base64': return base64.b64encode(encrypted) else: raise Exception('Encoding is not supported.') def decrypt(self, data, inputEncoding): inputEncoding = inputEncoding.lower() if inputEncoding == 'hex': data = ''.join(map(chr, bytearray.fromhex(data))) elif inputEncoding == 'base64': data = base64.b64decode(data) cipher = AES.new(self.key, AES.MODE_CBC, self.iv) return self._unpad(cipher.decrypt(data)) def _pad(self, data): padding = self.bs - len(data) % self.bs return data + padding * chr(padding) @staticmethod def _unpad(data): return data[0:-ord(data[-1])] The following are examples to use the class: Encryption Example: password = 'some_random_password' content = 'content_to_be_encrypted' cipher = AESCrypto('aes192', password) encrypted = cipher.encrypt(content, 'hex') Decryption Example: password = 'some_random_password' content = 'encrypted_content' cipher = AESCrypto('aes192', password) decrypted = cipher.decrypt(content, 'hex') A: Because I spent way too much time on this with Python 3.10.7 and Node.js v18.6.0. Here is a working code totally compatible between two languages with examples. Only the secret is needed for getting same values as expected :) Note pycryptodome is needed for Python. Code should be tweaked for supporting different algorithms. const crypto = require('crypto') function get_crypto(secret, encode) { // Create hashed key from password/key let m = crypto.createHash('md5').update(secret) const key = m.digest('hex') m = crypto.createHash('md5').update(secret + key) const iv = m.digest('hex').slice(0, 16) // only in aes-256 return encode ? crypto.createCipheriv('aes-256-cbc', key, iv) : crypto.createDecipheriv('aes-256-cbc', key, iv) } const secret = 'f8abb29f13cb932704badb0de414ab08ca9f6c63' // crypto.randomBytes(20).toString('hex') const value = 'hello world' const data = Buffer.from(value, 'utf8').toString('binary') const cipher = get_crypto(secret, true) const encrypted = Buffer.concat([cipher.update(data, 'utf8'), cipher.final()]).toString('binary') const encoded = Buffer.from(encrypted, 'binary').toString('base64') console.log('encoded:', encoded) const edata = Buffer.from(encoded, 'base64').toString('binary') const decipher = get_crypto(secret, false) const decoded = Buffer.concat([decipher.update(edata, 'binary'), decipher.final()]).toString('utf-8') console.log('decoded:', decoded) # This script needs pycryptodome dependency # pip install pycryptodome from Crypto.Cipher import AES from hashlib import md5 import base64 BLOCK_SIZE = AES.block_size def get_aes(s): m = md5() m.update(s.encode('utf-8')) key = m.hexdigest() m = md5() m.update((s + key).encode('utf-8')) iv = m.hexdigest() return AES.new(key.encode("utf8"), AES.MODE_CBC, iv.encode("utf8")[:BLOCK_SIZE]) # pkcs5 padding def pad(byte_array): pad_len = BLOCK_SIZE - len(byte_array) % BLOCK_SIZE return byte_array + (bytes([pad_len]) * pad_len) # pkcs5 - unpadding def unpad(byte_array): return byte_array[:-ord(byte_array[-1:])] def _encrypt(s, data): data = pad(data.encode("UTF-8")) aes = get_aes(s) encrypted = aes.encrypt(data) return base64.urlsafe_b64encode(encrypted).decode('utf-8') def _decrypt(s, edata): edata = base64.urlsafe_b64decode(edata) aes = get_aes(s) return unpad(aes.decrypt(edata)).decode('utf-8') if __name__ == '__main__': secret = 'f8abb29f13cb932704badb0de414ab08ca9f6c63' value = 'hello world' encoded = _encrypt(secret, value) print('encoded:', encoded) decoded = _decrypt(secret, encoded) print('decoded:', decoded) Help from: Implementing AES/ECB/PKCS5 padding in Python Node.js - Set padding in crypto module Python Encrypting with PyCrypto AES
Encrypting and Decrypting with python and nodejs
I'm trying to encrypt some content in Python and decrypt it in a nodejs application. I'm struggling to get the two AES implementations to work together though. Here is where I am at. In node: var crypto = require('crypto'); var password = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'; var input = 'hello world'; var encrypt = function (input, password, callback) { var m = crypto.createHash('md5'); m.update(password) var key = m.digest('hex'); m = crypto.createHash('md5'); m.update(password + key) var iv = m.digest('hex'); // add padding while (input.length % 16 !== 0) { input += ' '; } var data = new Buffer(input, 'utf8').toString('binary'); var cipher = crypto.createCipheriv('aes-256-cbc', key, iv.slice(0,16)); var encrypted = cipher.update(data, 'binary') + cipher.final('binary'); var encoded = new Buffer(encrypted, 'binary').toString('base64'); callback(encoded); }; var decrypt = function (input, password, callback) { // Convert urlsafe base64 to normal base64 var input = input.replace('-', '+').replace('/', '_'); // Convert from base64 to binary string var edata = new Buffer(input, 'base64').toString('binary') // Create key from password var m = crypto.createHash('md5'); m.update(password) var key = m.digest('hex'); // Create iv from password and key m = crypto.createHash('md5'); m.update(password + key) var iv = m.digest('hex'); // Decipher encrypted data var decipher = crypto.createDecipheriv('aes-256-cbc', key, iv.slice(0,16)); var decrypted = decipher.update(edata, 'binary') + decipher.final('binary'); var plaintext = new Buffer(decrypted, 'binary').toString('utf8'); callback(plaintext); }; encrypt(input, password, function (encoded) { console.log(encoded); decrypt(encoded, password, function (output) { console.log(output); }); }); This produces the output: BXSGjDAYKeXlaRXVVJGuREKTPiiXeam8W9e96Nknt3E= hello world In python from Crypto.Cipher import AES from hashlib import md5 import base64 password = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' input = 'hello world' def _encrypt(data, nonce, password): m = md5() m.update(password) key = m.hexdigest() m = md5() m.update(password + key) iv = m.hexdigest() # pad to 16 bytes data = data + " " * (16 - len(data) % 16) aes = AES.new(key, AES.MODE_CBC, iv[:16]) encrypted = aes.encrypt(data) return base64.urlsafe_b64encode(encrypted) def _decrypt(edata, nonce, password): edata = base64.urlsafe_b64decode(edata) m = md5() m.update(password) key = m.hexdigest() m = md5() m.update(password + key) iv = m.hexdigest() aes = AES.new(key, AES.MODE_CBC, iv[:16]) return aes.decrypt(edata) output = _encrypt(input, "", password) print(output) plaintext = _decrypt(output, "", password) print(plaintext) This produces the output BXSGjDAYKeXlaRXVVJGuRA== hello world Clearly they are very close, but node seems to be padding the output with something. Any ideas how I can get the two to interoperate?
[ "OK, I've figured it out, node uses OpenSSL which uses PKCS5 to do padding. PyCrypto doesn't handle the padding so I was doing it myself just add ' ' in both.\nIf I add PKCS5 padding in the python code and remove the padding in the node code, it works.\nSo updated working code.\nNode:\nvar crypto = require('crypto');\n\nvar password = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa';\nvar input = 'hello world';\n\nvar encrypt = function (input, password, callback) {\n var m = crypto.createHash('md5');\n m.update(password)\n var key = m.digest('hex');\n\n m = crypto.createHash('md5');\n m.update(password + key)\n var iv = m.digest('hex');\n\n var data = new Buffer(input, 'utf8').toString('binary');\n\n var cipher = crypto.createCipheriv('aes-256-cbc', key, iv.slice(0,16));\n \n // UPDATE: crypto changed in v0.10\n // https://github.com/joyent/node/wiki/Api-changes-between-v0.8-and-v0.10 \n var nodev = process.version.match(/^v(\\d+)\\.(\\d+)/);\n var encrypted;\n\n if( nodev[1] === '0' && parseInt(nodev[2]) < 10) {\n encrypted = cipher.update(data, 'binary') + cipher.final('binary');\n } else {\n encrypted = cipher.update(data, 'utf8', 'binary') + cipher.final('binary');\n }\n\n var encoded = new Buffer(encrypted, 'binary').toString('base64');\n\n callback(encoded);\n};\n\nvar decrypt = function (input, password, callback) {\n // Convert urlsafe base64 to normal base64\n var input = input.replace(/\\-/g, '+').replace(/_/g, '/');\n // Convert from base64 to binary string\n var edata = new Buffer(input, 'base64').toString('binary')\n \n // Create key from password\n var m = crypto.createHash('md5');\n m.update(password)\n var key = m.digest('hex');\n\n // Create iv from password and key\n m = crypto.createHash('md5');\n m.update(password + key)\n var iv = m.digest('hex');\n\n // Decipher encrypted data\n var decipher = crypto.createDecipheriv('aes-256-cbc', key, iv.slice(0,16));\n\n // UPDATE: crypto changed in v0.10\n // https://github.com/joyent/node/wiki/Api-changes-between-v0.8-and-v0.10 \n var nodev = process.version.match(/^v(\\d+)\\.(\\d+)/);\n var decrypted, plaintext;\n\n if( nodev[1] === '0' && parseInt(nodev[2]) < 10) { \n decrypted = decipher.update(edata, 'binary') + decipher.final('binary'); \n plaintext = new Buffer(decrypted, 'binary').toString('utf8');\n } else {\n plaintext = (decipher.update(edata, 'binary', 'utf8') + decipher.final('utf8'));\n }\n\n callback(plaintext);\n};\n\nencrypt(input, password, function (encoded) {\n console.log(encoded);\n decrypt(encoded, password, function (output) {\n console.log(output);\n });\n});\n\nPython:\nfrom Crypto.Cipher import AES\nfrom hashlib import md5\nimport base64\n\npassword = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'\ninput = 'hello world'\n\nBLOCK_SIZE = 16\n\ndef pad (data):\n pad = BLOCK_SIZE - len(data) % BLOCK_SIZE\n return data + pad * chr(pad)\n\ndef unpad (padded):\n pad = ord(chr(padded[-1]))\n return padded[:-pad]\n\ndef get_key_iv (password):\n m = md5()\n m.update(password.encode('utf-8'))\n key = m.hexdigest()\n\n m = md5()\n m.update((password + key).encode('utf-8'))\n iv = m.hexdigest()\n \n return [key,iv]\n\ndef _encrypt(data, password):\n\n key,iv = get_key_iv(password)\n data = pad(data)\n\n aes = AES.new(key, AES.MODE_CBC, iv[:16])\n\n encrypted = aes.encrypt(data)\n return base64.urlsafe_b64encode(encrypted)\n\ndef _decrypt(edata, password):\n edata = base64.urlsafe_b64decode(edata)\n key,iv = get_key_iv(password)\n\n aes = AES.new(key, AES.MODE_CBC, iv[:16])\n return unpad(aes.decrypt(edata))\n\n\noutput = _encrypt(input, password) \nprint(output)\nplaintext = _decrypt(output, password)\nprint(plaintext)\n\n", "while trying to run the Python script using Python 3.8 I encountered the following error:\n m.update(password) \n TypeError: Unicode-objects must be encoded before hashing\n\nthe password should be :\n password = b'abcd'\n\nI also got the following error :\nm.update(password + key) \nTypeError: can't concat str to bytes\n\nI was able to fix it by adding the following line after key:\n key = bytes.fromhex(key_)\n\nThe python script should work this way :\nfrom Crypto.Cipher import AES\nfrom hashlib import md5\nimport base64\n\n\npassword = b'abcd'\ninput = 'hello world'\n\nBLOCK_SIZE = 16\n\ndef pad (data):\n pad = BLOCK_SIZE - len(data) % BLOCK_SIZE\n return data + pad * chr(pad)\n\ndef unpad (padded):\n pad = ord(chr(padded[-1]))\n return padded[:-pad]\n\ndef _encrypt(data, nonce, password):\n m = md5()\n m.update(password)\n key_ = m.hexdigest()\n key = bytes.fromhex(key_)\n\n m = md5()\n m.update(password + key)\n iv = m.hexdigest()\n iv = bytes.fromhex(iv)\n\n data = pad(data)\n\n aes = AES.new(key, AES.MODE_CBC, iv[:16])\n\n encrypted = aes.encrypt(data.encode('utf-8'))\n return base64.urlsafe_b64encode(encrypted)\n\ndef _decrypt(edata, nonce, password):\n edata = base64.urlsafe_b64decode(edata)\n\n m = md5()\n m.update(password)\n key = m.hexdigest()\n key = bytes.fromhex(key)\n\n m = md5()\n m.update(password + key)\n iv = m.hexdigest()\n iv = bytes.fromhex(iv)\n\n aes = AES.new(key, AES.MODE_CBC, iv[:16])\n return unpad(aes.decrypt(edata))\n\noutput = _encrypt(input, \"\", password) \nprint(output)\nplaintext = _decrypt(output, \"\", password)\nprint(plaintext)\n\n", "Just for any one that is similar to me, who was finding a simple way to do the encryption and decryption for AES in python that is doing the same thing in node.js. The class here supports different bits of AES and both hex and base64 encoding that produces same result in node.js.\nAlso noted that if you are missing the package Crypto, you can simply install it by\npip install pycrypto\n\nThe code for python is as follows:\nimport base64\nimport hashlib\nfrom Crypto.Cipher import AES\n\nclass AESCrypto(object):\n def __init__(self, algorithm, password):\n self.algorithm = filter(lambda x: not x.isdigit(), algorithm).lower()\n self.bits = int(filter(str.isdigit, algorithm))\n self.bs = 16\n if not self.algorithm == 'aes':\n raise Exception('Only AES crypto is supported')\n if not self.bits % 8 == 0:\n raise Exception('Bits of crypto must be a multiply of 8.')\n self.bytes = self.bits / 8\n self.password = password\n self.generateKeyAndIv()\n\n def generateKeyAndIv(self):\n last = ''\n allBytes = ''\n maxBytes = self.bytes + self.bs\n while len(allBytes) < maxBytes:\n last = hashlib.md5(last + self.password).digest()\n allBytes += last\n self.key = allBytes[:self.bytes]\n self.iv = allBytes[self.bytes:maxBytes]\n\n def encrypt(self, raw, outputEncoding):\n outputEncoding = outputEncoding.lower()\n raw = self._pad(raw)\n cipher = AES.new(self.key, AES.MODE_CBC, self.iv)\n encrypted = cipher.encrypt(raw)\n if outputEncoding == 'hex':\n return encrypted.encode('hex')\n elif outputEncoding == 'base64':\n return base64.b64encode(encrypted)\n else:\n raise Exception('Encoding is not supported.')\n\n def decrypt(self, data, inputEncoding):\n inputEncoding = inputEncoding.lower()\n if inputEncoding == 'hex':\n data = ''.join(map(chr, bytearray.fromhex(data)))\n elif inputEncoding == 'base64':\n data = base64.b64decode(data)\n cipher = AES.new(self.key, AES.MODE_CBC, self.iv)\n return self._unpad(cipher.decrypt(data))\n\n def _pad(self, data):\n padding = self.bs - len(data) % self.bs\n return data + padding * chr(padding)\n\n @staticmethod\n def _unpad(data):\n return data[0:-ord(data[-1])]\n\nThe following are examples to use the class:\nEncryption Example:\npassword = 'some_random_password'\ncontent = 'content_to_be_encrypted'\ncipher = AESCrypto('aes192', password)\nencrypted = cipher.encrypt(content, 'hex')\n\nDecryption Example:\npassword = 'some_random_password'\ncontent = 'encrypted_content'\ncipher = AESCrypto('aes192', password)\ndecrypted = cipher.decrypt(content, 'hex')\n\n", "Because I spent way too much time on this with Python 3.10.7 and Node.js v18.6.0.\nHere is a working code totally compatible between two languages with examples.\nOnly the secret is needed for getting same values as expected :)\nNote pycryptodome is needed for Python. Code should be tweaked for supporting different algorithms.\nconst crypto = require('crypto')\n\nfunction get_crypto(secret, encode) {\n // Create hashed key from password/key\n let m = crypto.createHash('md5').update(secret)\n const key = m.digest('hex')\n m = crypto.createHash('md5').update(secret + key)\n const iv = m.digest('hex').slice(0, 16) // only in aes-256\n\n return encode\n ? crypto.createCipheriv('aes-256-cbc', key, iv)\n : crypto.createDecipheriv('aes-256-cbc', key, iv)\n}\n\nconst secret = 'f8abb29f13cb932704badb0de414ab08ca9f6c63' // crypto.randomBytes(20).toString('hex')\n\nconst value = 'hello world'\nconst data = Buffer.from(value, 'utf8').toString('binary')\nconst cipher = get_crypto(secret, true)\nconst encrypted = Buffer.concat([cipher.update(data, 'utf8'), cipher.final()]).toString('binary')\nconst encoded = Buffer.from(encrypted, 'binary').toString('base64')\nconsole.log('encoded:', encoded)\n\nconst edata = Buffer.from(encoded, 'base64').toString('binary')\nconst decipher = get_crypto(secret, false)\nconst decoded = Buffer.concat([decipher.update(edata, 'binary'), decipher.final()]).toString('utf-8')\nconsole.log('decoded:', decoded)\n\n# This script needs pycryptodome dependency\n# pip install pycryptodome\n\nfrom Crypto.Cipher import AES\nfrom hashlib import md5\nimport base64\n\nBLOCK_SIZE = AES.block_size\n\n\ndef get_aes(s):\n m = md5()\n m.update(s.encode('utf-8'))\n key = m.hexdigest()\n m = md5()\n m.update((s + key).encode('utf-8'))\n iv = m.hexdigest()\n\n return AES.new(key.encode(\"utf8\"), AES.MODE_CBC, iv.encode(\"utf8\")[:BLOCK_SIZE])\n\n\n# pkcs5 padding\ndef pad(byte_array):\n pad_len = BLOCK_SIZE - len(byte_array) % BLOCK_SIZE\n return byte_array + (bytes([pad_len]) * pad_len)\n\n\n# pkcs5 - unpadding\ndef unpad(byte_array):\n return byte_array[:-ord(byte_array[-1:])]\n\n\ndef _encrypt(s, data):\n data = pad(data.encode(\"UTF-8\"))\n aes = get_aes(s)\n encrypted = aes.encrypt(data)\n return base64.urlsafe_b64encode(encrypted).decode('utf-8')\n\n\ndef _decrypt(s, edata):\n edata = base64.urlsafe_b64decode(edata)\n aes = get_aes(s)\n return unpad(aes.decrypt(edata)).decode('utf-8')\n\n\nif __name__ == '__main__':\n secret = 'f8abb29f13cb932704badb0de414ab08ca9f6c63'\n value = 'hello world'\n encoded = _encrypt(secret, value)\n print('encoded:', encoded)\n decoded = _decrypt(secret, encoded)\n print('decoded:', decoded)\n\nHelp from:\n\nImplementing AES/ECB/PKCS5 padding in Python\nNode.js - Set padding in crypto module\nPython Encrypting with PyCrypto AES\n\n" ]
[ 27, 3, 0, 0 ]
[]
[]
[ "aes", "encryption", "node.js", "python" ]
stackoverflow_0010548973_aes_encryption_node.js_python.txt
Q: Extract part of string based on a template in Python I'd like to use Python to read in a list of directories and store data in variables based on a template such as /home/user/Music/%artist%/[%year%] %album%. An example would be: artist, year, album = None, None, None template = "/home/user/Music/%artist%/[%year%] %album%" path = "/home/user/Music/3 Doors Down/[2002] Away From The Sun" if text == "%artist%": artist = key if text == "%year%": year = key if text == "%album%": album = key print(artist) # 3 Doors Down print(year) # 2002 print(album) # Away From The Sun I can do the reverse easily enough with str.replace("%artist%", artist) but how can extract the data? A: If your folder structure template is reliable the following should work without the need for regular expressions. path = "/home/user/Music/3 Doors Down/[2002] Away From The Sun" path_parts = path.split("/") # divide up the path into array by slashes print(path_parts) artist = path_parts[4] # get element of array at index 4 year = path_parts[5][1:5] # get characters at index 1-5 for the element of array at index 5 album = path_parts[5][7:] print(artist) # 3 Doors Down print(year) # 2002 print(album) # Away From The Sun # to put the path back together again using an F-string (No need for str.replace) reconstructed_path = f"/home/user/Music/{artist}/[{year}] {album}" print(reconstructed_path) output: ['', 'home', 'user', 'Music', '3 Doors Down', '[2002] Away From The Sun'] 3 Doors Down 2002 Away From The Sun /home/user/Music/3 Doors Down/[2002] Away From The Sun A: The following works for me: from difflib import SequenceMatcher def extract(template, text): seq = SequenceMatcher(None, template, text, True) return [text[c:d] for tag, a, b, c, d in seq.get_opcodes() if tag == 'replace'] template = "home/user/Music/%/[%] %" path = "home/user/Music/3 Doors Down/[2002] Away From The Sun" artist, year, album = extract(template, path) print(artist) print(year) print(album) Output: 3 Doors Down 2002 Away From The Sun Each template placeholder can be any single character as long as the character is not present in the value to be returned.
Extract part of string based on a template in Python
I'd like to use Python to read in a list of directories and store data in variables based on a template such as /home/user/Music/%artist%/[%year%] %album%. An example would be: artist, year, album = None, None, None template = "/home/user/Music/%artist%/[%year%] %album%" path = "/home/user/Music/3 Doors Down/[2002] Away From The Sun" if text == "%artist%": artist = key if text == "%year%": year = key if text == "%album%": album = key print(artist) # 3 Doors Down print(year) # 2002 print(album) # Away From The Sun I can do the reverse easily enough with str.replace("%artist%", artist) but how can extract the data?
[ "If your folder structure template is reliable the following should work without the need for regular expressions.\npath = \"/home/user/Music/3 Doors Down/[2002] Away From The Sun\"\n\npath_parts = path.split(\"/\") # divide up the path into array by slashes\n\nprint(path_parts) \n\nartist = path_parts[4] # get element of array at index 4\n\nyear = path_parts[5][1:5] # get characters at index 1-5 for the element of array at index 5\n\nalbum = path_parts[5][7:]\n\nprint(artist)\n# 3 Doors Down\n\nprint(year)\n# 2002\n \nprint(album)\n# Away From The Sun\n \n# to put the path back together again using an F-string (No need for str.replace)\nreconstructed_path = f\"/home/user/Music/{artist}/[{year}] {album}\"\n \nprint(reconstructed_path)\n\noutput:\n['', 'home', 'user', 'Music', '3 Doors Down', '[2002] Away From The Sun']\n3 Doors Down\n2002\nAway From The Sun\n/home/user/Music/3 Doors Down/[2002] Away From The Sun\n\n", "The following works for me:\nfrom difflib import SequenceMatcher\n\ndef extract(template, text):\n seq = SequenceMatcher(None, template, text, True)\n return [text[c:d] for tag, a, b, c, d in seq.get_opcodes() if tag == 'replace']\n\ntemplate = \"home/user/Music/%/[%] %\"\npath = \"home/user/Music/3 Doors Down/[2002] Away From The Sun\"\n\nartist, year, album = extract(template, path)\n\nprint(artist)\nprint(year)\nprint(album)\n\nOutput:\n3 Doors Down\n2002\nAway From The Sun\n\nEach template placeholder can be any single character as long as the character is not present in the value to be returned.\n" ]
[ 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074023028_python.txt
Q: How To Display Popup Message in Python Currently have two functions, one to display for a win and one for a loss. I think I have it coded to where a popup occurs with a message and a button, but not exactly how I want it to be. I want the pop up to come up in the center of the screen, make the button larger, as well as the popup. Any help is appreciated. Thanks import tkinter as tk # popup window from tkinter import ttk # popup window def animate_win(): reg_font = ("Verdana", 22) msg = "You WON!" popup = tk.Tk() popup.wm_title("Result") popup.geometry("500x500") text = ttk.Label(popup, text=msg, font=reg_font) text.pack(side="top") b1 = ttk.Button(popup, text=":)", command=popup.destroy) b1.pack() popup.mainloop() def animate_loss(): reg_font = ("Verdana", 22) msg = "You LOST!" popup = tk.Tk() popup.wm_title("Result") popup.geometry("500x500") text = ttk.Label(popup, text=msg, font=reg_font) text.pack(side="top") b1 = ttk.Button(popup, text=":(", command=popup.destroy) b1.pack() popup.mainloop() A: The following code should resize your popup window and the button, I suppose you had problems with defining height and width beacuse of imports done as following: import tkinter as tk # popup window from tkinter import ttk # popup window In this code snippet second import is redundant and you should probbably avoid the second import in your code aswell. Check the following discusion: Python Tkitner : unknown option "-height". Can't change the size of button import tkinter as tk # popup window def animate_win(): reg_font = ("Verdana", 22) msg = "You WON!" popup = tk.Tk() popup.minsize(800, 600) # popup.wm_title("Result") popup.geometry("500x500") text = tk.Label(popup, text=msg, font=reg_font) text.pack(side="top") b1 = tk.Button(popup, text=":)", command=popup.destroy) b1.config(height = 20, width = 150) # b1.pack() popup.mainloop() def animate_loss(): reg_font = ("Verdana", 22) msg = "You LOST!" popup = tk.Tk() popup.minsize(800, 600) # popup.wm_title("Result") popup.geometry("500x500") text = tk.Label(popup, text=msg, font=reg_font) text.pack(side="top") b1 = tk.Button(popup, text=":(", command=popup.destroy) b1.config(height = 20, width = 150) # b1.pack() popup.mainloop() With this error out of the way, resizing the popup window and button is simple, only thing left to do is centering the window, for which I would direct you to the following discussion: Tkinter Make My Pop Up Window in Middle of the Screen
How To Display Popup Message in Python
Currently have two functions, one to display for a win and one for a loss. I think I have it coded to where a popup occurs with a message and a button, but not exactly how I want it to be. I want the pop up to come up in the center of the screen, make the button larger, as well as the popup. Any help is appreciated. Thanks import tkinter as tk # popup window from tkinter import ttk # popup window def animate_win(): reg_font = ("Verdana", 22) msg = "You WON!" popup = tk.Tk() popup.wm_title("Result") popup.geometry("500x500") text = ttk.Label(popup, text=msg, font=reg_font) text.pack(side="top") b1 = ttk.Button(popup, text=":)", command=popup.destroy) b1.pack() popup.mainloop() def animate_loss(): reg_font = ("Verdana", 22) msg = "You LOST!" popup = tk.Tk() popup.wm_title("Result") popup.geometry("500x500") text = ttk.Label(popup, text=msg, font=reg_font) text.pack(side="top") b1 = ttk.Button(popup, text=":(", command=popup.destroy) b1.pack() popup.mainloop()
[ "The following code should resize your popup window and the button, I suppose you had problems with defining height and width beacuse of imports done as following:\nimport tkinter as tk # popup window\nfrom tkinter import ttk # popup window\n\nIn this code snippet second import is redundant and you should probbably avoid the second import in your code aswell. Check the following discusion:\nPython Tkitner : unknown option \"-height\". Can't change the size of button\nimport tkinter as tk # popup window\n\n\n\ndef animate_win():\n reg_font = (\"Verdana\", 22)\n msg = \"You WON!\"\n popup = tk.Tk()\n \n popup.minsize(800, 600) #\n \n popup.wm_title(\"Result\")\n popup.geometry(\"500x500\")\n text = tk.Label(popup, text=msg, font=reg_font)\n text.pack(side=\"top\")\n b1 = tk.Button(popup, text=\":)\", command=popup.destroy)\n \n b1.config(height = 20, width = 150) #\n \n \n b1.pack()\n popup.mainloop()\n\ndef animate_loss():\n reg_font = (\"Verdana\", 22)\n msg = \"You LOST!\"\n popup = tk.Tk()\n\n popup.minsize(800, 600) #\n \n popup.wm_title(\"Result\")\n popup.geometry(\"500x500\")\n text = tk.Label(popup, text=msg, font=reg_font)\n text.pack(side=\"top\")\n b1 = tk.Button(popup, text=\":(\", command=popup.destroy)\n \n b1.config(height = 20, width = 150) #\n \n b1.pack()\n popup.mainloop()\n\nWith this error out of the way, resizing the popup window and button is simple, only thing left to do is centering the window, for which I would direct you to the following discussion:\nTkinter Make My Pop Up Window in Middle of the Screen\n" ]
[ 0 ]
[]
[]
[ "python", "tkinter", "user_interface" ]
stackoverflow_0074621603_python_tkinter_user_interface.txt
Q: Why does Anaconda install pytorch cpuonly when I install cuda? I have created a Python 3.7 conda virtual environment and installed the following packages using this command: conda install pytorch torchvision torchaudio cudatoolkit=11.3 matplotlib scipy opencv -c pytorch They install fine, but then when I come to run my program I get the following error which suggests that a CUDA enabled device is not found: raise RuntimeError('Attempting to deserialize object on a CUDA ' RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU. I have an NVIDIA RTX 3060ti GPU, which as far as I am aware is cuda enabled, but whenever I go into the Python interactive shell within my conda environment I get False when evaluating torch.cuda.is_available() suggesting that perhaps CUDA is not installed properly or is not found. When I then perform a conda list to view my installed packages: # packages in environment at /home/user/anaconda3/envs/FGVC: # # Name Version Build Channel _libgcc_mutex 0.1 main _openmp_mutex 4.5 1_gnu blas 1.0 mkl brotli 1.0.9 he6710b0_2 bzip2 1.0.8 h7b6447c_0 ca-certificates 2021.10.26 h06a4308_2 cairo 1.16.0 hf32fb01_1 certifi 2021.10.8 py37h06a4308_2 cpuonly 1.0 0 pytorch cudatoolkit 11.3.1 h2bc3f7f_2 cycler 0.11.0 pyhd3eb1b0_0 dbus 1.13.18 hb2f20db_0 expat 2.4.4 h295c915_0 ffmpeg 4.0 hcdf2ecd_0 fontconfig 2.13.1 h6c09931_0 fonttools 4.25.0 pyhd3eb1b0_0 freeglut 3.0.0 hf484d3e_5 freetype 2.11.0 h70c0345_0 giflib 5.2.1 h7b6447c_0 glib 2.69.1 h4ff587b_1 graphite2 1.3.14 h23475e2_0 gst-plugins-base 1.14.0 h8213a91_2 gstreamer 1.14.0 h28cd5cc_2 harfbuzz 1.8.8 hffaf4a1_0 hdf5 1.10.2 hba1933b_1 icu 58.2 he6710b0_3 imageio 2.16.0 pypi_0 pypi imageio-ffmpeg 0.4.5 pypi_0 pypi imutils 0.5.4 pypi_0 pypi intel-openmp 2021.4.0 h06a4308_3561 jasper 2.0.14 hd8c5072_2 jpeg 9d h7f8727e_0 kiwisolver 1.3.2 py37h295c915_0 lcms2 2.12 h3be6417_0 ld_impl_linux-64 2.35.1 h7274673_9 libffi 3.3 he6710b0_2 libgcc-ng 9.3.0 h5101ec6_17 libgfortran-ng 7.5.0 ha8ba4b0_17 libgfortran4 7.5.0 ha8ba4b0_17 libglu 9.0.0 hf484d3e_1 libgomp 9.3.0 h5101ec6_17 libopencv 3.4.2 hb342d67_1 libopus 1.3.1 h7b6447c_0 libpng 1.6.37 hbc83047_0 libstdcxx-ng 9.3.0 hd4cf53a_17 libtiff 4.2.0 h85742a9_0 libuuid 1.0.3 h7f8727e_2 libuv 1.40.0 h7b6447c_0 libvpx 1.7.0 h439df22_0 libwebp 1.2.0 h89dd481_0 libwebp-base 1.2.0 h27cfd23_0 libxcb 1.14 h7b6447c_0 libxml2 2.9.12 h03d6c58_0 lz4-c 1.9.3 h295c915_1 matplotlib 3.5.0 py37h06a4308_0 matplotlib-base 3.5.0 py37h3ed280b_0 mkl 2021.4.0 h06a4308_640 mkl-service 2.4.0 py37h7f8727e_0 mkl_fft 1.3.1 py37hd3c417c_0 mkl_random 1.2.2 py37h51133e4_0 munkres 1.1.4 py_0 ncurses 6.3 h7f8727e_2 networkx 2.6.3 pypi_0 pypi ninja 1.10.2 py37hd09550d_3 numpy 1.21.2 py37h20f2e39_0 numpy-base 1.21.2 py37h79a1101_0 olefile 0.46 py37_0 opencv 3.4.2 py37h6fd60c2_1 openssl 1.1.1m h7f8727e_0 packaging 21.3 pyhd3eb1b0_0 pcre 8.45 h295c915_0 pillow 8.4.0 py37h5aabda8_0 pip 21.2.2 py37h06a4308_0 pixman 0.40.0 h7f8727e_1 py-opencv 3.4.2 py37hb342d67_1 pyparsing 3.0.4 pyhd3eb1b0_0 pyqt 5.9.2 py37h05f1152_2 python 3.7.11 h12debd9_0 python-dateutil 2.8.2 pyhd3eb1b0_0 pytorch 1.7.0 py3.7_cpu_0 [cpuonly] pytorch pywavelets 1.2.0 pypi_0 pypi qt 5.9.7 h5867ecd_1 readline 8.1.2 h7f8727e_1 scikit-image 0.19.1 pypi_0 pypi scipy 1.7.3 py37hc147768_0 setuptools 58.0.4 py37h06a4308_0 sip 4.19.8 py37hf484d3e_0 six 1.16.0 pyhd3eb1b0_1 sqlite 3.37.2 hc218d9a_0 tifffile 2021.11.2 pypi_0 pypi tk 8.6.11 h1ccaba5_0 torchaudio 0.7.0 py37 pytorch torchvision 0.8.1 py37_cpu [cpuonly] pytorch tornado 6.1 py37h27cfd23_0 typing_extensions 3.10.0.2 pyh06a4308_0 wheel 0.37.1 pyhd3eb1b0_0 xz 5.2.5 h7b6447c_0 zlib 1.2.11 h7f8727e_4 zstd 1.4.9 haebb681_0 There seems to be a lot of things saying cpuonly, but I am not sure how they came about, since I did not install them. I am running Ubuntu version 20.04.4 LTS A: I ran into a similar problem when I tried to install Pytorch with CUDA 11.1. Although the anaconda site explicitly lists a pre-built version of Pytorch with CUDA 11.1 is available, conda still tries to install the cpu-only version. After a lot of trial-and-fail, I realize that the packages torchvision torchaudio are the root cause of the problem. So installing just PyTorch would fix this: conda install pytorch cudatoolkit=11.1 -c pytorch -c nvidia A: I believe I had the following things wrong that prevented me from using Cuda. Despite having cuda installed the nvcc --version command indicated that Cuda was not installed and so what I did was add it to the path using this answer. Despite doing that and deleting my original conda environment and using the conda install pytorch torchvision torchaudio cudatoolkit=11.3 matplotlib scipy opencv -c pytorch command again I still got False when evaluating torch.cuda.is_available(). I then used this command conda install pytorch torchvision torchaudio cudatoolkit=10.2 matplotlib scipy opencv -c pytorch changing cudatoolkit from verison 11.3 to version 10.2 and then it worked! Now torch.cuda.is_available() evaluates to True Unfortunately, Cuda version 10.2 was incompatible with my RTX 3060 gpu (and I'm assuming it is not compatible with all RTX 3000 cards). Cuda version 11.0 was giving me errors and Cuda version 11.3 only installs the CPU only versions for some reason. Cuda version 11.1 worked perfectly though! This is the command I used to get it to work in the end: pip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html A: Installing jupyter inside conda's virtual environment solve my issue. I was having the same issue, even pytorch with cuda is installed and !nvidia-smi showing GPU , but while trying to access jupyter notebook , it was showing only cpu. While I was trying from command line torch is finding CUDA but from jupyter is not showing, So I just pip install jupyter on virtual environment of conda and after that problem is solved . A: You can ask conda to install a specific build of your required package.pytorch builds supporting cuda have the phrase cuda somewhere in their build string, so you can ask conda to match that spec. For more information, have a look at conda's package match spec. $ conda install pytorch=*=*cuda* cudatoolkit -c pytorch
Why does Anaconda install pytorch cpuonly when I install cuda?
I have created a Python 3.7 conda virtual environment and installed the following packages using this command: conda install pytorch torchvision torchaudio cudatoolkit=11.3 matplotlib scipy opencv -c pytorch They install fine, but then when I come to run my program I get the following error which suggests that a CUDA enabled device is not found: raise RuntimeError('Attempting to deserialize object on a CUDA ' RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU. I have an NVIDIA RTX 3060ti GPU, which as far as I am aware is cuda enabled, but whenever I go into the Python interactive shell within my conda environment I get False when evaluating torch.cuda.is_available() suggesting that perhaps CUDA is not installed properly or is not found. When I then perform a conda list to view my installed packages: # packages in environment at /home/user/anaconda3/envs/FGVC: # # Name Version Build Channel _libgcc_mutex 0.1 main _openmp_mutex 4.5 1_gnu blas 1.0 mkl brotli 1.0.9 he6710b0_2 bzip2 1.0.8 h7b6447c_0 ca-certificates 2021.10.26 h06a4308_2 cairo 1.16.0 hf32fb01_1 certifi 2021.10.8 py37h06a4308_2 cpuonly 1.0 0 pytorch cudatoolkit 11.3.1 h2bc3f7f_2 cycler 0.11.0 pyhd3eb1b0_0 dbus 1.13.18 hb2f20db_0 expat 2.4.4 h295c915_0 ffmpeg 4.0 hcdf2ecd_0 fontconfig 2.13.1 h6c09931_0 fonttools 4.25.0 pyhd3eb1b0_0 freeglut 3.0.0 hf484d3e_5 freetype 2.11.0 h70c0345_0 giflib 5.2.1 h7b6447c_0 glib 2.69.1 h4ff587b_1 graphite2 1.3.14 h23475e2_0 gst-plugins-base 1.14.0 h8213a91_2 gstreamer 1.14.0 h28cd5cc_2 harfbuzz 1.8.8 hffaf4a1_0 hdf5 1.10.2 hba1933b_1 icu 58.2 he6710b0_3 imageio 2.16.0 pypi_0 pypi imageio-ffmpeg 0.4.5 pypi_0 pypi imutils 0.5.4 pypi_0 pypi intel-openmp 2021.4.0 h06a4308_3561 jasper 2.0.14 hd8c5072_2 jpeg 9d h7f8727e_0 kiwisolver 1.3.2 py37h295c915_0 lcms2 2.12 h3be6417_0 ld_impl_linux-64 2.35.1 h7274673_9 libffi 3.3 he6710b0_2 libgcc-ng 9.3.0 h5101ec6_17 libgfortran-ng 7.5.0 ha8ba4b0_17 libgfortran4 7.5.0 ha8ba4b0_17 libglu 9.0.0 hf484d3e_1 libgomp 9.3.0 h5101ec6_17 libopencv 3.4.2 hb342d67_1 libopus 1.3.1 h7b6447c_0 libpng 1.6.37 hbc83047_0 libstdcxx-ng 9.3.0 hd4cf53a_17 libtiff 4.2.0 h85742a9_0 libuuid 1.0.3 h7f8727e_2 libuv 1.40.0 h7b6447c_0 libvpx 1.7.0 h439df22_0 libwebp 1.2.0 h89dd481_0 libwebp-base 1.2.0 h27cfd23_0 libxcb 1.14 h7b6447c_0 libxml2 2.9.12 h03d6c58_0 lz4-c 1.9.3 h295c915_1 matplotlib 3.5.0 py37h06a4308_0 matplotlib-base 3.5.0 py37h3ed280b_0 mkl 2021.4.0 h06a4308_640 mkl-service 2.4.0 py37h7f8727e_0 mkl_fft 1.3.1 py37hd3c417c_0 mkl_random 1.2.2 py37h51133e4_0 munkres 1.1.4 py_0 ncurses 6.3 h7f8727e_2 networkx 2.6.3 pypi_0 pypi ninja 1.10.2 py37hd09550d_3 numpy 1.21.2 py37h20f2e39_0 numpy-base 1.21.2 py37h79a1101_0 olefile 0.46 py37_0 opencv 3.4.2 py37h6fd60c2_1 openssl 1.1.1m h7f8727e_0 packaging 21.3 pyhd3eb1b0_0 pcre 8.45 h295c915_0 pillow 8.4.0 py37h5aabda8_0 pip 21.2.2 py37h06a4308_0 pixman 0.40.0 h7f8727e_1 py-opencv 3.4.2 py37hb342d67_1 pyparsing 3.0.4 pyhd3eb1b0_0 pyqt 5.9.2 py37h05f1152_2 python 3.7.11 h12debd9_0 python-dateutil 2.8.2 pyhd3eb1b0_0 pytorch 1.7.0 py3.7_cpu_0 [cpuonly] pytorch pywavelets 1.2.0 pypi_0 pypi qt 5.9.7 h5867ecd_1 readline 8.1.2 h7f8727e_1 scikit-image 0.19.1 pypi_0 pypi scipy 1.7.3 py37hc147768_0 setuptools 58.0.4 py37h06a4308_0 sip 4.19.8 py37hf484d3e_0 six 1.16.0 pyhd3eb1b0_1 sqlite 3.37.2 hc218d9a_0 tifffile 2021.11.2 pypi_0 pypi tk 8.6.11 h1ccaba5_0 torchaudio 0.7.0 py37 pytorch torchvision 0.8.1 py37_cpu [cpuonly] pytorch tornado 6.1 py37h27cfd23_0 typing_extensions 3.10.0.2 pyh06a4308_0 wheel 0.37.1 pyhd3eb1b0_0 xz 5.2.5 h7b6447c_0 zlib 1.2.11 h7f8727e_4 zstd 1.4.9 haebb681_0 There seems to be a lot of things saying cpuonly, but I am not sure how they came about, since I did not install them. I am running Ubuntu version 20.04.4 LTS
[ "I ran into a similar problem when I tried to install Pytorch with CUDA 11.1. Although the anaconda site explicitly lists a pre-built version of Pytorch with CUDA 11.1 is available, conda still tries to install the cpu-only version. After a lot of trial-and-fail, I realize that the packages torchvision torchaudio are the root cause of the problem. So installing just PyTorch would fix this:\nconda install pytorch cudatoolkit=11.1 -c pytorch -c nvidia\n\n", "I believe I had the following things wrong that prevented me from using Cuda. Despite having cuda installed the nvcc --version command indicated that Cuda was not installed and so what I did was add it to the path using this answer.\nDespite doing that and deleting my original conda environment and using the conda install pytorch torchvision torchaudio cudatoolkit=11.3 matplotlib scipy opencv -c pytorch command again I still got False when evaluating torch.cuda.is_available().\nI then used this command conda install pytorch torchvision torchaudio cudatoolkit=10.2 matplotlib scipy opencv -c pytorch changing cudatoolkit from verison 11.3 to version 10.2 and then it worked!\nNow torch.cuda.is_available() evaluates to True\nUnfortunately, Cuda version 10.2 was incompatible with my RTX 3060 gpu (and I'm assuming it is not compatible with all RTX 3000 cards). Cuda version 11.0 was giving me errors and Cuda version 11.3 only installs the CPU only versions for some reason. Cuda version 11.1 worked perfectly though!\nThis is the command I used to get it to work in the end:\npip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html\n", "Installing jupyter inside conda's virtual environment solve my issue. I was having the same issue, even pytorch with cuda is installed and !nvidia-smi showing GPU , but while trying to access jupyter notebook , it was showing only cpu.\nWhile I was trying from command line torch is finding CUDA but from jupyter is not showing, So I just pip install jupyter on virtual environment of conda and after that problem is solved .\n", "You can ask conda to install a specific build of your required package.pytorch builds supporting cuda have the phrase cuda somewhere in their build string, so you can ask conda to match that spec. For more information, have a look at conda's package match spec.\n$ conda install pytorch=*=*cuda* cudatoolkit -c pytorch\n\n" ]
[ 5, 1, 0, 0 ]
[]
[]
[ "anaconda", "conda", "python", "pytorch" ]
stackoverflow_0071162459_anaconda_conda_python_pytorch.txt
Q: Step Counter Python Lab I'm trying to solve this programming problem in Python: A pedometer treats walking 1 step as walking 2.5 feet. Define a function named feet_to_steps that takes a float as a parameter, representing the number of feet walked, and returns an integer that represents the number of steps walked. Then, write a main program that reads the number of feet walked as an input, calls function feet_to_steps() with the input as an argument, and outputs the number of steps. Use floating-point arithmetic to perform the conversion. Ex: If the input is: 150.5 the output is: 60 This is my code so far: def feet_to_steps(user_feet): steps_walked = user_feet / 2.5 return steps_walked if __name__ == '__main__': input_feet = float(input()) steps_walked = feet_to_steps(input_feet) print(int(steps_walked)) Two of my test cases passed where the input was 150.5 with an output of 60, and another with an input of 10000 with an output of 4000. The other two failed saying this: feet_to_steps(11) incorrectly returned 4.4 feet_to_steps(79.25) incorrectly returned 31.7 I know it's the second part of the prompt that is messing me up, but I can't figure out how to make the two different conditions work together with the same function. I also don't understand the if__name__=='main': part at all either. It was just included in the default template. Sorry for the long post, just wanted to be as descriptive as possible. Any help would be appreciated. A: Your code is mostly fine, however, you are converting the answer from a float to an int too late in your code (outside of the feet_to_steps function). Try replacing return steps_walked with return int(steps_walked) Lastly, if__name__=='main': is a way of telling Python to only run the next block of code if it is being run directly (i.e. not imported as a module). This makes it so if you were to create a second .py file and import this file, the code within the if__name__=='main': block would not run, but you would still be able to access and use the functions defined in the file. A: Your primary issue is that, by dividing by 2.5, the result is converted to a floating-point (decimal) value. We want the value to be an integer value, so instead of using / to do division, we can use // for floor division, which will produce an integer value from the operation, discarding any fractional value. def feet_to_steps(user_feet): steps_walked = user_feet // 2.5 return steps_walked if __name__ == '__main__': input_feet = float(input()) steps_walked = feet_to_steps(input_feet) print(int(steps_walked)) A: Sorry for being a bit late, but I wanted to provide some help here in case anyone else gets stuck on this. Now, I do not know what the correct inputs would be for the two that you missed because there are different types of division in Python that could dictate more than one "correct" answer based on your guidelines and/or expected output. However, because someone already explained whole/floor division (using // instead of /) and the directions say to use "floating point arithmetic", I will provide another means of rounding off a float into an integer. To start, I want to explain what happens in your example that may be causing the issues. When you data cast something into an integer such as the following: print(int(steps_walked)) You are actually rounding down to the nearest integer. So let's say your function produced a return value of 7.9999. If you data cast it like in the example above, you will get 7. Using whole or floor division will do the same thing, with only difference in output being that you will have a trailing ".0" because you are using whole/floor division with floats. The reason your lab returned float points as an incorrect value may be due to how the lab tests your code. It may strictly be checking the function and may not even be referring to your print statement at the bottom. You can check this by data casting the return statement in the function rather than the print statement in main. Instead, what you may want to use is the round() function. It rounds floating points up if the decimal point is >= .5 or down if < .5. See below: def feet_to_steps(user_feet): steps_walked = user_feet / 2.5 return round(steps_walked) if __name__ == '__main__': input_feet = float(input()) steps_walked = feet_to_steps(input_feet) print(steps_walked) If you use the round() function, you will notice that you get the same output with the first three inputs (150.5, 10000, and 11) if you data cast or use whole/floor division. However, the difference comes up with your last input(79.25). When your division is evaluated to 31.7, using round() will push your quotient to 32 because of the .7. However, data casting and whole/floor division will push your quotient down to 31 and 31.0 respectively. round() can take one or two arguments. If you use one argument such as the following: round(31.789) It will round to the nearest whole number. However if you specify what place you want it rounded to in a second argument such as this: round(31.789, 1) You will get 31.8 or 1 place to the right of the decimal. I hope this helps. Again, I do not know what division your lab intended so even though the round() feature is more accurate, your lab may expect your floating points to be rounded down to get full points. If that is the case, at least you learned something new :) Happy coding!
Step Counter Python Lab
I'm trying to solve this programming problem in Python: A pedometer treats walking 1 step as walking 2.5 feet. Define a function named feet_to_steps that takes a float as a parameter, representing the number of feet walked, and returns an integer that represents the number of steps walked. Then, write a main program that reads the number of feet walked as an input, calls function feet_to_steps() with the input as an argument, and outputs the number of steps. Use floating-point arithmetic to perform the conversion. Ex: If the input is: 150.5 the output is: 60 This is my code so far: def feet_to_steps(user_feet): steps_walked = user_feet / 2.5 return steps_walked if __name__ == '__main__': input_feet = float(input()) steps_walked = feet_to_steps(input_feet) print(int(steps_walked)) Two of my test cases passed where the input was 150.5 with an output of 60, and another with an input of 10000 with an output of 4000. The other two failed saying this: feet_to_steps(11) incorrectly returned 4.4 feet_to_steps(79.25) incorrectly returned 31.7 I know it's the second part of the prompt that is messing me up, but I can't figure out how to make the two different conditions work together with the same function. I also don't understand the if__name__=='main': part at all either. It was just included in the default template. Sorry for the long post, just wanted to be as descriptive as possible. Any help would be appreciated.
[ "Your code is mostly fine, however, you are converting the answer from a float to an int too late in your code (outside of the feet_to_steps function). Try replacing return steps_walked with return int(steps_walked)\nLastly, if__name__=='main': is a way of telling Python to only run the next block of code if it is being run directly (i.e. not imported as a module).\nThis makes it so if you were to create a second .py file and import this file, the code within the if__name__=='main': block would not run, but you would still be able to access and use the functions defined in the file.\n", "Your primary issue is that, by dividing by 2.5, the result is converted to a floating-point (decimal) value. We want the value to be an integer value, so instead of using / to do division, we can use // for floor division, which will produce an integer value from the operation, discarding any fractional value.\ndef feet_to_steps(user_feet):\n steps_walked = user_feet // 2.5\n return steps_walked\n \n \nif __name__ == '__main__':\n \n input_feet = float(input())\n steps_walked = feet_to_steps(input_feet)\n print(int(steps_walked))\n\n", "Sorry for being a bit late, but I wanted to provide some help here in case anyone else gets stuck on this. Now, I do not know what the correct inputs would be for the two that you missed because there are different types of division in Python that could dictate more than one \"correct\" answer based on your guidelines and/or expected output. However, because someone already explained whole/floor division (using // instead of /) and the directions say to use \"floating point arithmetic\", I will provide another means of rounding off a float into an integer.\nTo start, I want to explain what happens in your example that may be causing the issues. When you data cast something into an integer such as the following:\nprint(int(steps_walked))\n\nYou are actually rounding down to the nearest integer. So let's say your function produced a return value of 7.9999. If you data cast it like in the example above, you will get 7. Using whole or floor division will do the same thing, with only difference in output being that you will have a trailing \".0\" because you are using whole/floor division with floats.\nThe reason your lab returned float points as an incorrect value may be due to how the lab tests your code. It may strictly be checking the function and may not even be referring to your print statement at the bottom. You can check this by data casting the return statement in the function rather than the print statement in main.\nInstead, what you may want to use is the round() function. It rounds floating points up if the decimal point is >= .5 or down if < .5. See below:\ndef feet_to_steps(user_feet):\n steps_walked = user_feet / 2.5\n return round(steps_walked)\n \n \nif __name__ == '__main__':\n \n input_feet = float(input())\n steps_walked = feet_to_steps(input_feet)\n print(steps_walked)\n\nIf you use the round() function, you will notice that you get the same output with the first three inputs (150.5, 10000, and 11) if you data cast or use whole/floor division. However, the difference comes up with your last input(79.25). When your division is evaluated to 31.7, using round() will push your quotient to 32 because of the .7. However, data casting and whole/floor division will push your quotient down to 31 and 31.0 respectively.\nround() can take one or two arguments. If you use one argument such as the following:\nround(31.789)\n\nIt will round to the nearest whole number. However if you specify what place you want it rounded to in a second argument such as this:\nround(31.789, 1)\n\nYou will get 31.8 or 1 place to the right of the decimal.\nI hope this helps. Again, I do not know what division your lab intended so even though the round() feature is more accurate, your lab may expect your floating points to be rounded down to get full points. If that is the case, at least you learned something new :) Happy coding!\n" ]
[ 1, 1, 0 ]
[]
[]
[ "python" ]
stackoverflow_0070372208_python.txt
Q: mypy complains about extended base class' attribute type I have two base classes A and B that are defined like this: class A(object): def common_function(self): pass class B(object): def __init__(self, a: A): self.a = a def another_common_function(self): pass Class A holds some management information, whereas class B holds some other information, which is based on the information contained in class A, and therefore it knows about it's instance of A. I also have two derived classes dA and dB that are defined like this: class dA(A): def __init__(self, t: B): self.t = t class dB(B): def __init__(self, a: dA): super(A, self).__init__(a) These classes (among others (dA1, dB1, dA2, dB2) ... that are similarly designed) are used for some special operation and therefore they need to store some more information, e.g. the t from the example for this pair of classes, other classes have different stuff to store. The problem is, that mypy complains about the usage of dB.a.t: class dB(B): def __init__(self, a: dA): super(A, self).__init__(a) def do(self): if self.a.t is None: print("something") test.py: error: "A" has no attribute "t" The complain is actually right. A doesn't have an attribute t. I also told mypy that B.a is of type A, but in this particular case I use dB.a as of type dA, which actually has a t, but I explicitly told mypy otherwise. The questions are: Is this a violation of Liskov principle? If not 1, is there a way to tell mypy that in this particular case dB.a is of type dA? Do I need to use a TypeVar? If 1, is there a way to restructure the classes to not violate the Liskov principle as well as have the type checker be able to recognize the correct types? I found the question mypy: base class has no attribute x, how to type hint in base class, however, the solution to extend the base class is not feasible, as this would make t available in all derived classes, not only dA (what somehow smells bad). A: It is possible to ensure that self.a is of type dA by using assert: class dB(B): def __init__(self, a: dA): super(A, self).__init__(a) def do(self): assert isinstance(self.a, dA) if self.a.t is None: print("something") This assert is recognized by mypy, so that self.a is known as instance of dA afterwards and thus has an attribute t. A: You are able to specify the type of self.a in this instance by doing the following; class dB(B): a: dA ... # Rest as before It should be noted that I was not able to replicate the code provided by OP, as mypy was complaining that the second argument to super in dB was not of the same instance as argument 1. None the less, this should work for you as it did me in a situation similar to this.
mypy complains about extended base class' attribute type
I have two base classes A and B that are defined like this: class A(object): def common_function(self): pass class B(object): def __init__(self, a: A): self.a = a def another_common_function(self): pass Class A holds some management information, whereas class B holds some other information, which is based on the information contained in class A, and therefore it knows about it's instance of A. I also have two derived classes dA and dB that are defined like this: class dA(A): def __init__(self, t: B): self.t = t class dB(B): def __init__(self, a: dA): super(A, self).__init__(a) These classes (among others (dA1, dB1, dA2, dB2) ... that are similarly designed) are used for some special operation and therefore they need to store some more information, e.g. the t from the example for this pair of classes, other classes have different stuff to store. The problem is, that mypy complains about the usage of dB.a.t: class dB(B): def __init__(self, a: dA): super(A, self).__init__(a) def do(self): if self.a.t is None: print("something") test.py: error: "A" has no attribute "t" The complain is actually right. A doesn't have an attribute t. I also told mypy that B.a is of type A, but in this particular case I use dB.a as of type dA, which actually has a t, but I explicitly told mypy otherwise. The questions are: Is this a violation of Liskov principle? If not 1, is there a way to tell mypy that in this particular case dB.a is of type dA? Do I need to use a TypeVar? If 1, is there a way to restructure the classes to not violate the Liskov principle as well as have the type checker be able to recognize the correct types? I found the question mypy: base class has no attribute x, how to type hint in base class, however, the solution to extend the base class is not feasible, as this would make t available in all derived classes, not only dA (what somehow smells bad).
[ "It is possible to ensure that self.a is of type dA by using assert:\nclass dB(B):\n def __init__(self, a: dA):\n super(A, self).__init__(a)\n def do(self):\n assert isinstance(self.a, dA)\n if self.a.t is None:\n print(\"something\")\n\nThis assert is recognized by mypy, so that self.a is known as instance of dA afterwards and thus has an attribute t.\n", "You are able to specify the type of self.a in this instance by doing the following;\nclass dB(B):\n a: dA\n\n ... # Rest as before\n\nIt should be noted that I was not able to replicate the code provided by OP, as mypy was complaining that the second argument to super in dB was not of the same instance as argument 1. None the less, this should work for you as it did me in a situation similar to this.\n" ]
[ 1, 0 ]
[]
[]
[ "derived_class", "mypy", "python", "typechecking" ]
stackoverflow_0056479404_derived_class_mypy_python_typechecking.txt
Q: How to get a subproject from commit list I'm trying to get all the commits from a GitLab repository, and it was all going smoothly and fine. Because of another unrelated problem, I had to update my python from 3.7 to 3.9. Since then, every time I run my program I run this specific error: Exception has occurred: ValueError SHA b'7e944e65ee1a628e7ba0d53aac7a7bb13e79fe53' could not be resolved, git returned: b'7e944e65ee1a628e7ba0d53aac7a7bb13e79fe53 missing' Meanwhile, I discovered that this specific commit that is causing the error, besides modifying a file, also has this: Subproject commit 7e944e65ee1a628e7ba0d53aac7a7bb13e79fe53 Does anyone know how I can fix this problem? My code is as follows: try: repo = Repository(repo_name).traverse_commits() except: print("Repository not found") exit(0) for commit in repo: for f in commit.modified_files: #(error here) #(get info) A: A "subproject commit" (as printed this way by Git itself) is actually a gitlink, which is a very specific kind of item stored in one of two places: in Git's index, as a path name and mode 160000 plus a hash ID; or in a tree object, as a component name, mode 160000, and hash ID. The hash ID in this case is the 7e944e65ee1a628e7ba0d53aac7a7bb13e79fe53 value, which is not itself a byte string but would often be represented as a byte string in Python. The trick here is that this is the hash ID of a commit that should exist in some other Git repository. Unless the "other" Git repository is in fact this Git repository—a rare but not unheard-of situation1—it will not exist in this Git repository. You therefore can't look it up using this Git repository. To get the commit, you must: Clone the other Git repository, if you have not yet done so: you'll find its URL in the .gitmodules file in the commit that contains this gitlink. If this gitlink's path is P, you must read the .gitmodules file and find each [submodule "name"] entry and under that entry find each url and path value. If the path value matches P, the URL value is the URL for the submodule. Now that the submodule is cloned, attempt to load the specified commit from the other Git repository. It may not exist: if that's the case, this is simply a bad / invalid gitlink, and cannot be used. If the specified commit does exist, that's the commit that this superproject commit says should be checked out, if this particular commit is also to be checked out. Note that no commit contains any modified files: for f in commit.modified_files: At a guess, this refers to the pydriller library, which provides such a field. The problem here is that Git doesn't have this. Instead, Git commits have snapshots and metadata. One can synthetically compute a list of "modified files" by obtaining not only this commit's set of files in its snapshot, but also some previous commit's set of files, in its snapshot, and then comparing the two. Computing this list—the "modified files" implied by comparing a commit to its (single) parent—is very useful, so both Pydriller and Git itself have a way to do that: in Git you run can run git show or git diff-tree for instance, while Pydriller simply Just Does It. But if you're not careful with this synthesized information, you will be led astray, just as you were here. It's important to realize that this list of modified files is an illusion, albeit a useful one. In some situations, it's less useful than others. When working with submodules, a difference of the form old gitlink was <hash1>, new gitlink is <hash2> is just that: a difference in gitlinks. It's up to you to realize that these are both gitlinks and hence both refer to some commit that should (but may or may not, at this point) exist in some other repository. 1If a repository refers to itself in a gitlink, this is a bit recursive and you may need to use even more care here. The only case I know of where it's common is with GitHub Pages, where people will insert a gitlink to the original repository, but carefully store different commits in different commit-chains so that the recursion terminates immediately.
How to get a subproject from commit list
I'm trying to get all the commits from a GitLab repository, and it was all going smoothly and fine. Because of another unrelated problem, I had to update my python from 3.7 to 3.9. Since then, every time I run my program I run this specific error: Exception has occurred: ValueError SHA b'7e944e65ee1a628e7ba0d53aac7a7bb13e79fe53' could not be resolved, git returned: b'7e944e65ee1a628e7ba0d53aac7a7bb13e79fe53 missing' Meanwhile, I discovered that this specific commit that is causing the error, besides modifying a file, also has this: Subproject commit 7e944e65ee1a628e7ba0d53aac7a7bb13e79fe53 Does anyone know how I can fix this problem? My code is as follows: try: repo = Repository(repo_name).traverse_commits() except: print("Repository not found") exit(0) for commit in repo: for f in commit.modified_files: #(error here) #(get info)
[ "A \"subproject commit\" (as printed this way by Git itself) is actually a gitlink, which is a very specific kind of item stored in one of two places:\n\nin Git's index, as a path name and mode 160000 plus a hash ID; or\nin a tree object, as a component name, mode 160000, and hash ID.\n\nThe hash ID in this case is the 7e944e65ee1a628e7ba0d53aac7a7bb13e79fe53 value, which is not itself a byte string but would often be represented as a byte string in Python.\nThe trick here is that this is the hash ID of a commit that should exist in some other Git repository. Unless the \"other\" Git repository is in fact this Git repository—a rare but not unheard-of situation1—it will not exist in this Git repository. You therefore can't look it up using this Git repository.\nTo get the commit, you must:\n\nClone the other Git repository, if you have not yet done so: you'll find its URL in the .gitmodules file in the commit that contains this gitlink. If this gitlink's path is P, you must read the .gitmodules file and find each [submodule \"name\"] entry and under that entry find each url and path value. If the path value matches P, the URL value is the URL for the submodule.\nNow that the submodule is cloned, attempt to load the specified commit from the other Git repository. It may not exist: if that's the case, this is simply a bad / invalid gitlink, and cannot be used. If the specified commit does exist, that's the commit that this superproject commit says should be checked out, if this particular commit is also to be checked out.\n\nNote that no commit contains any modified files:\n\nfor f in commit.modified_files:\n\n\nAt a guess, this refers to the pydriller library, which provides such a field. The problem here is that Git doesn't have this. Instead, Git commits have snapshots and metadata. One can synthetically compute a list of \"modified files\" by obtaining not only this commit's set of files in its snapshot, but also some previous commit's set of files, in its snapshot, and then comparing the two.\nComputing this list—the \"modified files\" implied by comparing a commit to its (single) parent—is very useful, so both Pydriller and Git itself have a way to do that: in Git you run can run git show or git diff-tree for instance, while Pydriller simply Just Does It. But if you're not careful with this synthesized information, you will be led astray, just as you were here. It's important to realize that this list of modified files is an illusion, albeit a useful one. In some situations, it's less useful than others. When working with submodules, a difference of the form old gitlink was <hash1>, new gitlink is <hash2> is just that: a difference in gitlinks. It's up to you to realize that these are both gitlinks and hence both refer to some commit that should (but may or may not, at this point) exist in some other repository.\n\n1If a repository refers to itself in a gitlink, this is a bit recursive and you may need to use even more care here. The only case I know of where it's common is with GitHub Pages, where people will insert a gitlink to the original repository, but carefully store different commits in different commit-chains so that the recursion terminates immediately.\n" ]
[ 1 ]
[]
[]
[ "git", "gitlab", "python", "repository", "sha" ]
stackoverflow_0074616438_git_gitlab_python_repository_sha.txt
Q: Different strategies of memoization lead to vastly different runtime I was trying to solve leetcode problem 416 - https://leetcode.com/problems/partition-equal-subset-sum/description/ Given a non-empty array nums containing only positive integers, find if the array can be partitioned into two subsets such that the sum of elements in both subsets is equal. I encountered some interesting results trying to optimize my solution. I tried to optimize my recursive solution to avoid the dreaded "time limit exceeded" like so: class Solution: def canPartition(self, nums: List[int]) -> bool: totalSum = sum(nums) if totalSum % 2 == 1: return False @cache def helper(index, setOne): if setOne < 0: return False if setOne == 0: return True for x in range(index, len(nums)): if helper(x +1, setOne - nums[x]): return True return False return helper(0,totalSum // 2) It "helped" to improve the runtime, 36 -> 74 test cases passed, but I am still getting TLE I took a look at the accepted solutions and I have calculated my solution should have the same exact runtime, yet different thought process. Here is the solution link - https://leetcode.com/problems/partition-equal-subset-sum/solutions/1624939/c-python-5-simple-solutions-w-explanation-optimization-from-brute-force-to-dp-to-bitmask/ and the python code which I am referring to: class Solution: def canPartition(self, nums): @cache def subsetSum(s, i): if s == 0: return True if i >= len(nums) or s < 0: return False return subsetSum(s-nums[i], i+1) or subsetSum(s, i+1) total_sum = sum(nums) return total_sum & 1 == 0 and subsetSum(total_sum // 2, 0) At this point the closest answer I have is there is some hidden variable I have forgotten to memoize. Would love to get some insight why my code is inefficient. Side Note: At first I thought that my code was inefficient ie. o(n * 2^n), but further analysis leads me to believe that it is o(2^n) as the for loop only makes a binary choice for each index (choose index or not), please correct me if I am wrong! Edit: I have listened to user2357112's advice, but still get TLE. I have edited the improved code above. It improved the runtime significantly, but still not accepted. I am guess this is probably an issue with the leetcode algorithm at this point. A: You forgot to stop recursing if you overshoot the target. Unrelated: note that some of the "solutions" in that solution link don't actually work. For example "solution" 3 fails on input [100, 125, 185, 60, 195, 25], because the memoization logic is broken - memoizing the index as well as the subset sum really is necessary.
Different strategies of memoization lead to vastly different runtime
I was trying to solve leetcode problem 416 - https://leetcode.com/problems/partition-equal-subset-sum/description/ Given a non-empty array nums containing only positive integers, find if the array can be partitioned into two subsets such that the sum of elements in both subsets is equal. I encountered some interesting results trying to optimize my solution. I tried to optimize my recursive solution to avoid the dreaded "time limit exceeded" like so: class Solution: def canPartition(self, nums: List[int]) -> bool: totalSum = sum(nums) if totalSum % 2 == 1: return False @cache def helper(index, setOne): if setOne < 0: return False if setOne == 0: return True for x in range(index, len(nums)): if helper(x +1, setOne - nums[x]): return True return False return helper(0,totalSum // 2) It "helped" to improve the runtime, 36 -> 74 test cases passed, but I am still getting TLE I took a look at the accepted solutions and I have calculated my solution should have the same exact runtime, yet different thought process. Here is the solution link - https://leetcode.com/problems/partition-equal-subset-sum/solutions/1624939/c-python-5-simple-solutions-w-explanation-optimization-from-brute-force-to-dp-to-bitmask/ and the python code which I am referring to: class Solution: def canPartition(self, nums): @cache def subsetSum(s, i): if s == 0: return True if i >= len(nums) or s < 0: return False return subsetSum(s-nums[i], i+1) or subsetSum(s, i+1) total_sum = sum(nums) return total_sum & 1 == 0 and subsetSum(total_sum // 2, 0) At this point the closest answer I have is there is some hidden variable I have forgotten to memoize. Would love to get some insight why my code is inefficient. Side Note: At first I thought that my code was inefficient ie. o(n * 2^n), but further analysis leads me to believe that it is o(2^n) as the for loop only makes a binary choice for each index (choose index or not), please correct me if I am wrong! Edit: I have listened to user2357112's advice, but still get TLE. I have edited the improved code above. It improved the runtime significantly, but still not accepted. I am guess this is probably an issue with the leetcode algorithm at this point.
[ "You forgot to stop recursing if you overshoot the target.\nUnrelated: note that some of the \"solutions\" in that solution link don't actually work. For example \"solution\" 3 fails on input [100, 125, 185, 60, 195, 25], because the memoization logic is broken - memoizing the index as well as the subset sum really is necessary.\n" ]
[ 1 ]
[]
[]
[ "algorithm", "python", "recursion" ]
stackoverflow_0074621729_algorithm_python_recursion.txt
Q: import module from s3 in sagemaker I have a .py file in an s3 bucket which I am trying to load in as a python module within Sagemaker I've tried adding the file path to the sys path with: sys.path.append('foo') but get an error with : import bar.py I can read the py file with: pd.read_csv('foo/bar.py') but get an error with: open('foo/bar.py) Please can you guide me in how to import this .py file as a module A: You can first download the from S3: import os os.system("aws s3 cp s3://<S3location> .") Then you can import the file.
import module from s3 in sagemaker
I have a .py file in an s3 bucket which I am trying to load in as a python module within Sagemaker I've tried adding the file path to the sys path with: sys.path.append('foo') but get an error with : import bar.py I can read the py file with: pd.read_csv('foo/bar.py') but get an error with: open('foo/bar.py) Please can you guide me in how to import this .py file as a module
[ "You can first download the from S3:\nimport os\nos.system(\"aws s3 cp s3://<S3location> .\")\n\nThen you can import the file.\n" ]
[ 0 ]
[]
[]
[ "amazon_s3", "amazon_sagemaker", "python", "python_module" ]
stackoverflow_0074602697_amazon_s3_amazon_sagemaker_python_python_module.txt
Q: I have 2 dataframes. The first main dataframe has a column missing information that the second contains. I just need to add missing column to first I am trying to move information from one dataframe to add to the main dataframe. They look like this DF1 = |Year | V1 | V2 | V3 | V4 | |-----------------------------| |2023 | X0 | Y0 | Z0 | A0 | |2022 | X1 | Y1 | Z1 | A1 | |2021 | X2 | Y2 | Z2 | A2 | |2020 | NAN | Y3 | Z3 | A3 | |2019 | NAN | Y4 | Z4 | A4 | DF2 = |Year | V1 | |-----------| |2020 | X3 | |2019 | X4 | My desired goal is: DF3 = |Year | V1 | V2 | V3 | V4 | |-----------------------------| |2023 | X0 | Y0 | Z0 | A0 | |2022 | X1 | Y1 | Z1 | A1 | |2021 | X2 | Y2 | Z2 | A2 | |2020 | X3 | Y3 | Z3 | A3 | |2019 | X4 | Y4 | Z4 | A4 | New to python not sure how to go about this. I have tried merge with no success. df3 = pd.merge(df1, df2, how='inner', on='Year') This will get rid of the previous years and add many x_x, x_y type variables A: here is one way to do it using map # map the value of V1 from DF2 based on year. # fill null mapping result with value from the DF df['V1']=df['Year'].map(df2.set_index('Year')['V1']).fillna(df['V1']) df Year V1 V2 V3 V4 0 2023 X0 Y0 Z0 A0 1 2022 X1 Y1 Z1 A1 2 2021 X2 Y2 Z2 A2 3 2020 X3 Y3 Z3 A3 4 2019 X4 Y4 Z4 A4
I have 2 dataframes. The first main dataframe has a column missing information that the second contains. I just need to add missing column to first
I am trying to move information from one dataframe to add to the main dataframe. They look like this DF1 = |Year | V1 | V2 | V3 | V4 | |-----------------------------| |2023 | X0 | Y0 | Z0 | A0 | |2022 | X1 | Y1 | Z1 | A1 | |2021 | X2 | Y2 | Z2 | A2 | |2020 | NAN | Y3 | Z3 | A3 | |2019 | NAN | Y4 | Z4 | A4 | DF2 = |Year | V1 | |-----------| |2020 | X3 | |2019 | X4 | My desired goal is: DF3 = |Year | V1 | V2 | V3 | V4 | |-----------------------------| |2023 | X0 | Y0 | Z0 | A0 | |2022 | X1 | Y1 | Z1 | A1 | |2021 | X2 | Y2 | Z2 | A2 | |2020 | X3 | Y3 | Z3 | A3 | |2019 | X4 | Y4 | Z4 | A4 | New to python not sure how to go about this. I have tried merge with no success. df3 = pd.merge(df1, df2, how='inner', on='Year') This will get rid of the previous years and add many x_x, x_y type variables
[ "here is one way to do it using map\n# map the value of V1 from DF2 based on year.\n# fill null mapping result with value from the DF\n\ndf['V1']=df['Year'].map(df2.set_index('Year')['V1']).fillna(df['V1'])\ndf\n\nYear V1 V2 V3 V4\n0 2023 X0 Y0 Z0 A0\n1 2022 X1 Y1 Z1 A1\n2 2021 X2 Y2 Z2 A2\n3 2020 X3 Y3 Z3 A3\n4 2019 X4 Y4 Z4 A4\n\n" ]
[ 1 ]
[]
[]
[ "concatenation", "join", "merge", "pandas", "python" ]
stackoverflow_0074621808_concatenation_join_merge_pandas_python.txt
Q: Automate the execution of a .ipynb file in AWS SageMaker by Websocket I have an issue. At November 10th before, my lambda code use websocket to communicate sagemaker which is okay to automatic execution of a .ipynb file. The code is below import boto3 import time from botocore.vendored import requests import websocket def lambda_handler(event, context): sm_client = boto3.client('sagemaker') notebook_instance_name = 'test' url = sm_client.create_presigned_notebook_instance_url(NotebookInstanceName=notebook_instance_name)['AuthorizedUrl'] url_tokens = url.split('/') http_proto = url_tokens[0] http_hn = url_tokens[2].split('?')[0].split('#')[0] s = requests.Session() r = s.get(url) cookies = "; ".join(key + "=" + value for key, value in s.cookies.items()) ws = websocket.create_connection ("wss://{}/terminals/websocket/1".format(http_hn), cookie=cookies, host=http_hn, origin=http_proto + "//" + http_hn, header=["User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"]) ws.send("""[ "stdin", "jupyter nbconvert --execute --to notebook --inplace /home/ec2-user/SageMaker/Scikit.ipynb --ExecutePreprocessor.kernel_name=python3 --ExecutePreprocessor.timeout=1500\\r" ]""") time.sleep(1) ws.close() return None But when November 18th, I discovered that websocket connects getting the error. The error code is WebSocketBadStatusException: Handshake status 200 OK. I can't figure out for a few days. Does anyone have a suggestion / solution? I survey this article for many days. But I don't get the answer. A: You could look at automating your notebook using SageMaker Processing Jobs. Kindly see this blog post here: https://aws.amazon.com/blogs/machine-learning/scheduling-jupyter-notebooks-on-sagemaker-ephemeral-instances/
Automate the execution of a .ipynb file in AWS SageMaker by Websocket
I have an issue. At November 10th before, my lambda code use websocket to communicate sagemaker which is okay to automatic execution of a .ipynb file. The code is below import boto3 import time from botocore.vendored import requests import websocket def lambda_handler(event, context): sm_client = boto3.client('sagemaker') notebook_instance_name = 'test' url = sm_client.create_presigned_notebook_instance_url(NotebookInstanceName=notebook_instance_name)['AuthorizedUrl'] url_tokens = url.split('/') http_proto = url_tokens[0] http_hn = url_tokens[2].split('?')[0].split('#')[0] s = requests.Session() r = s.get(url) cookies = "; ".join(key + "=" + value for key, value in s.cookies.items()) ws = websocket.create_connection ("wss://{}/terminals/websocket/1".format(http_hn), cookie=cookies, host=http_hn, origin=http_proto + "//" + http_hn, header=["User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"]) ws.send("""[ "stdin", "jupyter nbconvert --execute --to notebook --inplace /home/ec2-user/SageMaker/Scikit.ipynb --ExecutePreprocessor.kernel_name=python3 --ExecutePreprocessor.timeout=1500\\r" ]""") time.sleep(1) ws.close() return None But when November 18th, I discovered that websocket connects getting the error. The error code is WebSocketBadStatusException: Handshake status 200 OK. I can't figure out for a few days. Does anyone have a suggestion / solution? I survey this article for many days. But I don't get the answer.
[ "You could look at automating your notebook using SageMaker Processing Jobs.\nKindly see this blog post here: https://aws.amazon.com/blogs/machine-learning/scheduling-jupyter-notebooks-on-sagemaker-ephemeral-instances/\n" ]
[ 0 ]
[]
[]
[ "amazon_sagemaker", "amazon_web_services", "python", "websocket" ]
stackoverflow_0074597843_amazon_sagemaker_amazon_web_services_python_websocket.txt
Q: How can i delete a couple lines of text that I inputted into a text file in python? I am making a small simple password manager in python. I have the functions of creating an account which has 3 inputs, Username, Password, and Website. I have a function to view all the accounts which shows the contents of the file info.txt where all that information goes. Im trying to create a function to delete an entry but im not sure how to make the function delete all the lines of information associated with the Username. I want an input asking "Which account to delete" you put the username, and it will delete all information associated with the username in info.txt Code: import os.path #Imports os module using path for file access def checkExistence(): #Checking for existence of file if os.path.exists("info.txt"): pass #pass is used as a placeholder bc if no code is ran in an if statement and error comes. else: file = open("info.txt", "w") #creates file with name of info.txt and W for write access file.close() def appendNew(): #This function will append a new password in the txt file file = open("info.txt", "a") #Open info.txt use a for appending IMPORTANT: opening a file with w for write will write over all existing data userName = input("Enter username: ") print(userName) os.system('cls') password = input("Enter password: ") print(password) os.system('cls') website = input("Enter website: ") print(website) os.system('cls') print() print() usrnm = "Username: " + userName + "\n" #Makes the variable usrnm have a value of "Username: {our username}" and a new line pwd = "Password: " + password + "\n" web = "Website: " + website + "\n" file.write("----------------------------------\n") file.write(usrnm) file.write(pwd) file.write(web) file.write("----------------------------------\n") file.write("\n") file.close() def readPasswords(): file = open("info.txt", "r") #Open info.txt with r for read content = file.read() # Content is everything read from file variable (info.txt) file.close() print(content) checkExistence() while True: choice = input("Do you want to: \n 1. Add account\n 2. View accounts\n 3. Delete account\n") print(choice) if choice == "1": os.system('cls') appendNew() elif choice == "2": os.system('cls') readPasswords() elif choice == "3": os.system('cls') else: os.system('cls') print("huh? thats not an input.. Try again.\n") I tried making a delete account function by deleting the line which matched the username. My only problem is that it only deletes the line in info.txt with the username, but not the password and website associated with that username. A: Firstly, you're using the wrong tool for the problem. A good library to try is pandas, using .csv files (which one can think of as pore program oriented excel files). However, if you really want to use the text file based approach, your solution would look something like this: with open(textfile, 'r+') as f: lines = [line.replace('\n', '') for line in f.readlines()] # The above makes a list of all lines in the file without \n char index = lines.index(username) # Find index of username in these lines for i in range(5): lines.pop(index) # Delete the next five lines - check your 'appendNew' function # you're using five lines to write each user's data print(lines) f.write("\n".join(lines)) # Finally, write the lines back with the '\n' char we removed in line 2 # Here is your readymade function: def removeName(username): with open("info.txt", 'r+') as f: lines = [line.replace('\n', '') for line in f.readlines()] try: index = lines.index(username) except ValueError: print("Username not in file!") return for i in range(5): lines.pop(index) print(lines) f.write("\n".join(lines)) # Function that also asks for username by itself def removeName_2(): username = input("Enter username to remove:\t") with open("info.txt", 'r+') as f: lines = [line.replace('\n', '') for line in f.readlines()] try: index = lines.index(username) except ValueError: print("Username not in file!") return for i in range(5): lines.pop(index) print(lines) f.write("\n".join(lines)) # Usage: removeName(some_username_variable) removeName_2() Again, this is a rather clunky and error prone approach. If you ever change the format in which each user's details are stored, your would have to change the number of lines deleted in the for loop. Try pandas and csv files, they save a lot of time. If you're uncomfortable with those or you're just starting to code, try the json library and .json files - at a high level they're simple ways of storing data into files and they can be parsed with the json library in a single line of code. You should be able to find plenty of advice online about pandas and json. If you're unable to follow what the function does, try reading up on try-except blocks and function parameters (as well as maybe global variables).
How can i delete a couple lines of text that I inputted into a text file in python?
I am making a small simple password manager in python. I have the functions of creating an account which has 3 inputs, Username, Password, and Website. I have a function to view all the accounts which shows the contents of the file info.txt where all that information goes. Im trying to create a function to delete an entry but im not sure how to make the function delete all the lines of information associated with the Username. I want an input asking "Which account to delete" you put the username, and it will delete all information associated with the username in info.txt Code: import os.path #Imports os module using path for file access def checkExistence(): #Checking for existence of file if os.path.exists("info.txt"): pass #pass is used as a placeholder bc if no code is ran in an if statement and error comes. else: file = open("info.txt", "w") #creates file with name of info.txt and W for write access file.close() def appendNew(): #This function will append a new password in the txt file file = open("info.txt", "a") #Open info.txt use a for appending IMPORTANT: opening a file with w for write will write over all existing data userName = input("Enter username: ") print(userName) os.system('cls') password = input("Enter password: ") print(password) os.system('cls') website = input("Enter website: ") print(website) os.system('cls') print() print() usrnm = "Username: " + userName + "\n" #Makes the variable usrnm have a value of "Username: {our username}" and a new line pwd = "Password: " + password + "\n" web = "Website: " + website + "\n" file.write("----------------------------------\n") file.write(usrnm) file.write(pwd) file.write(web) file.write("----------------------------------\n") file.write("\n") file.close() def readPasswords(): file = open("info.txt", "r") #Open info.txt with r for read content = file.read() # Content is everything read from file variable (info.txt) file.close() print(content) checkExistence() while True: choice = input("Do you want to: \n 1. Add account\n 2. View accounts\n 3. Delete account\n") print(choice) if choice == "1": os.system('cls') appendNew() elif choice == "2": os.system('cls') readPasswords() elif choice == "3": os.system('cls') else: os.system('cls') print("huh? thats not an input.. Try again.\n") I tried making a delete account function by deleting the line which matched the username. My only problem is that it only deletes the line in info.txt with the username, but not the password and website associated with that username.
[ "Firstly, you're using the wrong tool for the problem. A good library to try is pandas, using .csv files (which one can think of as pore program oriented excel files). However, if you really want to use the text file based approach, your solution would look something like this:\nwith open(textfile, 'r+') as f:\n lines = [line.replace('\\n', '') for line in f.readlines()]\n # The above makes a list of all lines in the file without \\n char\n index = lines.index(username)\n # Find index of username in these lines\n for i in range(5):\n lines.pop(index)\n # Delete the next five lines - check your 'appendNew' function\n # you're using five lines to write each user's data\n print(lines)\n f.write(\"\\n\".join(lines))\n # Finally, write the lines back with the '\\n' char we removed in line 2\n\n\n# Here is your readymade function:\n\ndef removeName(username):\n with open(\"info.txt\", 'r+') as f:\n lines = [line.replace('\\n', '') for line in f.readlines()]\n try:\n index = lines.index(username)\n except ValueError:\n print(\"Username not in file!\")\n return\n for i in range(5):\n lines.pop(index)\n print(lines)\n f.write(\"\\n\".join(lines))\n\n\n# Function that also asks for username by itself\n\ndef removeName_2():\n username = input(\"Enter username to remove:\\t\")\n with open(\"info.txt\", 'r+') as f:\n lines = [line.replace('\\n', '') for line in f.readlines()]\n try:\n index = lines.index(username)\n except ValueError:\n print(\"Username not in file!\")\n return\n for i in range(5):\n lines.pop(index)\n print(lines)\n f.write(\"\\n\".join(lines))\n\n\n# Usage:\nremoveName(some_username_variable)\nremoveName_2()\n\nAgain, this is a rather clunky and error prone approach. If you ever change the format in which each user's details are stored, your would have to change the number of lines deleted in the for loop. Try pandas and csv files, they save a lot of time.\nIf you're uncomfortable with those or you're just starting to code, try the json library and .json files - at a high level they're simple ways of storing data into files and they can be parsed with the json library in a single line of code. You should be able to find plenty of advice online about pandas and json.\nIf you're unable to follow what the function does, try reading up on try-except blocks and function parameters (as well as maybe global variables).\n" ]
[ 0 ]
[]
[]
[ "python", "python_3.x" ]
stackoverflow_0074621844_python_python_3.x.txt
Q: Wrong attribution to second condition in permutation I want my code to print: Backflip Complete Backflip Hyper 180 Round Complete 180 Round Mega Gumbi Complete But it insteat prints: Backflip Complete Backflip Hyper 180 Round Complete 180 Round Hyper Gumbi Complete Gumbi Hyper It looks like it only takes the first if argument for landing in def landings(tricks), so all of the tricks are printing the landing for backflip, instead of a different argument for each of the tricks. Why does it do this and how do I fix it? Is it getting iterated over or something or am I not assigning something correctly? (Edit: I'm going to add a lot more variables and read these from an excel sheet, so I'd like to not manually type out each one like I did below. I just don't understand how to make it a tree so to say, and assign the dependencies correctly.) My Code: import itertools backflip = "Backflip" one80round = "180 Round" gumbi = "Gumbi" tricks = [backflip,one80round,gumbi] complete = " Complete" hyper = " Hyper" mega = " Mega" backflip_landing = [complete,hyper] one80round_landing = [complete,mega] gumbi_landing = [complete] def landings(tricks): for i in tricks: if i == backflip: landing = backflip_landing elif i == one80round: landing = one80round_landing elif i == gumbi: landing = gumbi_landing return landing for i in itertools.product(tricks, landings(tricks)): print(i[0] + i[1]) A: You could eliminate most of your variables since there is little point in having one variable for each string. You could also forget about itertools since you really don't want a product at all. Furthermore, the function landings can be replaced by a dictionary of the same name (the fact that you made landings a function of tricks rather than of individual tricks was one of your bugs). After making that dictionary, a simple nested for-loop prints what you want: tricks = ['Backflip','180 Round','Gumbi'] landings = {'Backflip': ['Complete','Hyper'], '180 Round': ['Complete','Mega'], 'Gumbi': ['Complete']} for trick in tricks: for landing in landings[trick]: print(trick,landing, sep = ' ') Output: Backflip Complete Backflip Hyper 180 Round Complete 180 Round Mega Gumbi Complete
Wrong attribution to second condition in permutation
I want my code to print: Backflip Complete Backflip Hyper 180 Round Complete 180 Round Mega Gumbi Complete But it insteat prints: Backflip Complete Backflip Hyper 180 Round Complete 180 Round Hyper Gumbi Complete Gumbi Hyper It looks like it only takes the first if argument for landing in def landings(tricks), so all of the tricks are printing the landing for backflip, instead of a different argument for each of the tricks. Why does it do this and how do I fix it? Is it getting iterated over or something or am I not assigning something correctly? (Edit: I'm going to add a lot more variables and read these from an excel sheet, so I'd like to not manually type out each one like I did below. I just don't understand how to make it a tree so to say, and assign the dependencies correctly.) My Code: import itertools backflip = "Backflip" one80round = "180 Round" gumbi = "Gumbi" tricks = [backflip,one80round,gumbi] complete = " Complete" hyper = " Hyper" mega = " Mega" backflip_landing = [complete,hyper] one80round_landing = [complete,mega] gumbi_landing = [complete] def landings(tricks): for i in tricks: if i == backflip: landing = backflip_landing elif i == one80round: landing = one80round_landing elif i == gumbi: landing = gumbi_landing return landing for i in itertools.product(tricks, landings(tricks)): print(i[0] + i[1])
[ "You could eliminate most of your variables since there is little point in having one variable for each string. You could also forget about itertools since you really don't want a product at all. Furthermore, the function landings can be replaced by a dictionary of the same name (the fact that you made landings a function of tricks rather than of individual tricks was one of your bugs). After making that dictionary, a simple nested for-loop prints what you want:\ntricks = ['Backflip','180 Round','Gumbi']\n\nlandings = {'Backflip': ['Complete','Hyper'],\n '180 Round': ['Complete','Mega'],\n 'Gumbi': ['Complete']}\n \nfor trick in tricks:\n for landing in landings[trick]:\n print(trick,landing, sep = ' ')\n\nOutput:\nBackflip Complete\nBackflip Hyper\n180 Round Complete\n180 Round Mega\nGumbi Complete\n\n" ]
[ 0 ]
[]
[]
[ "function", "loops", "permutation", "python" ]
stackoverflow_0074621799_function_loops_permutation_python.txt
Q: How to properly use a dictionary? This function was built in to try and know how to use a dictionary properly. dict(d, 'bonjour') hello Unknown Unknown Unknown It returns hello, and then Unknown. Why? It should only return hello. Help would be appreciated! Thanks, A: def dict(d,s): s = s.lower() for e,f in d.items(): if s == e: print (f) return elif s == f: print (e) return print ('Unknown') d = {"hello":"bonjour","Goodbye":"aurevoir","eat":"mange","world":"monde"} dict(d,'eat') A: Python dictionaries provide powerful mapping capabilities out of the box. This solution maximizes that fact: from collections import ChainMap e_to_f = {"hello":"bonjour","goodbye":"aurevoir","eat":"mange","world":"monde"} f_to_e = {v: k for k, v in e_to_f.items()} # flip e_to_f dictionary c = ChainMap(e_to_f, f_to_e) # create a virtual dictionary of all words def translate(word): return c.get(word.lower(), "Unknown") for word in ["hello", "bonjour", "WorLd", "fruit fly"]: print(f"{word}: {translate(word)}") Output: hello: bonjour bonjour: hello WorLd: monde fruit fly: Unknown
How to properly use a dictionary?
This function was built in to try and know how to use a dictionary properly. dict(d, 'bonjour') hello Unknown Unknown Unknown It returns hello, and then Unknown. Why? It should only return hello. Help would be appreciated! Thanks,
[ "def dict(d,s):\n\n s = s.lower()\n\n for e,f in d.items():\n if s == e:\n print (f) \n return\n elif s == f:\n print (e)\n return\n print ('Unknown')\n\n \nd = {\"hello\":\"bonjour\",\"Goodbye\":\"aurevoir\",\"eat\":\"mange\",\"world\":\"monde\"}\ndict(d,'eat')\n\n", "Python dictionaries provide powerful mapping capabilities out of the box. This solution maximizes that fact:\nfrom collections import ChainMap\n\ne_to_f = {\"hello\":\"bonjour\",\"goodbye\":\"aurevoir\",\"eat\":\"mange\",\"world\":\"monde\"}\nf_to_e = {v: k for k, v in e_to_f.items()} # flip e_to_f dictionary\n\nc = ChainMap(e_to_f, f_to_e) # create a virtual dictionary of all words\n\ndef translate(word):\n return c.get(word.lower(), \"Unknown\")\n\nfor word in [\"hello\", \"bonjour\", \"WorLd\", \"fruit fly\"]:\n print(f\"{word}: {translate(word)}\")\n\nOutput:\nhello: bonjour\nbonjour: hello\nWorLd: monde\nfruit fly: Unknown\n\n" ]
[ 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074621877_python.txt
Q: Get all children of self-referencing Django model in nested hierarchy Introduction We’re currently working on a Django REST Framework project. It connects to a Postgres database that holds some hierarchical (tree structure) data, that goes a number of levels deep. We should offer an endpoint for GET requests that returns the entire nested tree structure (parent, children, grandchildren etc.) when no parameter is offered. Sample data The table below shows the sample data of regions, where each region can have a parent, indicating the hierarchy of regions. In this example, the hierarchy is three levels deep (world>continent>country). But in reality, the tree could go much deeper, having an unknown number of levels (world>continent>country>province>city>neighborhood>etc.). id region parent_region_id 1 world NULL 2 europe 1 3 asia 1 4 africa 1 5 belgium 2 6 germany 2 7 spain 2 8 japan 3 9 indonesia 3 10 vietnam 3 11 tanzania 4 12 egypt 4 13 senegal 4 Our goal The JSON output shown below is what we try to achieve. It’s the goal for the response body of the GET request for the /region resource. { "id":1, "region":"world", "children":[ { "id":2, "region":"europe", "children":[ { "id":5, "region":"belgium" }, { "id":6, "region":"germany" }, { "id":7, "region":"spain" } ] }, { "id":3, "region":"asia", "children":[ { "id":8, "region":"japan" }, { "id":9, "region":"indonesia" }, { "id":10, "region":"vietnam" } ] }, { "id":4, "region":"africa", "children":[ { "id":11, "region":"tanzania" }, { "id":12, "region":"egypt" }, { "id":13, "region":"senegal" } ] } ] } What we’ve tried and achieved so far Here’s how we tried to achieve our goal. See code below for models, serializers and views: Models.py ________ class HierarchyData: region = models.CharField(max_length=100, null=False, default=None) parent = models.ForeignKey("self", models.DO_NOTHING, null=True, blank=True, db_column='parent', related_name="children") Serializers.py __________ class HeirarchyDataSerializer(serialisers.ModelSerializer): class Meta: model = HierarchyData fields = [“id”,”region”, “children”] Views.py __________ Class ListHierarchyData(generics.ListAPIView): queryset = HierarchyData.objects.all() serializer_class = HeirarchyDataSerializer permission_classes = [isAuthenticated] When I call the end point for given scenario, I get the JSON response in the following format: { “id”: 1, “region”: “world”, “children”: [ 2,3,4] } Related Stack Overflow questions that didn’t seem to answer my issue How to recursively query in django efficiently? Django - Models - Recursively retrieve parents of a leaf node Django self-recursive foreignkey filter query for all childs Above mentioned question partially solves my problem but I’m still unable to get the desired result. See details below: 1: I can’t touch database directly, I have to interact with database with ORM only. 2: Recursive time out and can’t serialize, saying object of type “Model” is not serializable. 3: This one partially worked for me: Based on this post, I tried to add the following in the model: def get_children(self): children = list() children.append(self) for child in self.children.all(): children.extend(children.get_children()) return children I then get all nested children, but all nested values are on the same level. For example world has children [2,3,4] and those have (grand)children themselves. Then it lists those on the same line, e.g children = [2,3,4,5,6,7,8,9, 10, 11,12,13]. This doesn’t represent the levels in the sample data. Then I tried the following solution for the model: def get_all_children(self, include_self=True): r = [] if include_self: r.append(self) for c in Person.objects.filter(parent=self): _r = c.get_all_children(include_self=True) if 0 < len(_r): r.extend(_r) return r That one works; it finds the nested children but it creates two issues: a. It gives me serializer errors when I use the code as it is, but if I add ‘get_all_children’ in serializer and add a different serializer for that attribute, then it serializes the objects, which I’m ok with. b. It is unable to append them in a nested fashion, it just nests a list inside another list without having children. It shows the data like this (limited to Europe, to not have a huge example shown here): { "id":1, "region":"world", "get_all_children":[ [ { "id":2, "region":"europe" } ], [ [ { "id":5, "region":"belgium" } ], [ { "id":6, "region":"germany" } ], [ { "id":7, "region":"spain" } ] ] ] } Now the data is fine except that after Europe it doesn’t start to nest the children inside the same array, it just starts a new array for the children and append them with outer list. It basically adds a nested structure, without nesting it inside the parent. Our question How can we return the output mentioned in ‘our goal’ for this region data, that holds a tree structure that goes an unknown amount of levels deep? of course it's finite depth. The only constraint I have to follow is that I can't edit the views part! A: You can use depth attribute on your serializer ie class Meta: model = Model fields = ['id', 'region', 'children', 'parent'] depth = 2 Or use to_representation method on your serializer: def to_representation(self, instance): self.fields['parent'] = SerializerClass(many=False, read_only=True) self.fields['children'] = SerializerClass(many=True, read_only=True) return super(SerializerClass, self).to_representation(instance) So this will allow you query children with related_name set on the model as well as the parent
Get all children of self-referencing Django model in nested hierarchy
Introduction We’re currently working on a Django REST Framework project. It connects to a Postgres database that holds some hierarchical (tree structure) data, that goes a number of levels deep. We should offer an endpoint for GET requests that returns the entire nested tree structure (parent, children, grandchildren etc.) when no parameter is offered. Sample data The table below shows the sample data of regions, where each region can have a parent, indicating the hierarchy of regions. In this example, the hierarchy is three levels deep (world>continent>country). But in reality, the tree could go much deeper, having an unknown number of levels (world>continent>country>province>city>neighborhood>etc.). id region parent_region_id 1 world NULL 2 europe 1 3 asia 1 4 africa 1 5 belgium 2 6 germany 2 7 spain 2 8 japan 3 9 indonesia 3 10 vietnam 3 11 tanzania 4 12 egypt 4 13 senegal 4 Our goal The JSON output shown below is what we try to achieve. It’s the goal for the response body of the GET request for the /region resource. { "id":1, "region":"world", "children":[ { "id":2, "region":"europe", "children":[ { "id":5, "region":"belgium" }, { "id":6, "region":"germany" }, { "id":7, "region":"spain" } ] }, { "id":3, "region":"asia", "children":[ { "id":8, "region":"japan" }, { "id":9, "region":"indonesia" }, { "id":10, "region":"vietnam" } ] }, { "id":4, "region":"africa", "children":[ { "id":11, "region":"tanzania" }, { "id":12, "region":"egypt" }, { "id":13, "region":"senegal" } ] } ] } What we’ve tried and achieved so far Here’s how we tried to achieve our goal. See code below for models, serializers and views: Models.py ________ class HierarchyData: region = models.CharField(max_length=100, null=False, default=None) parent = models.ForeignKey("self", models.DO_NOTHING, null=True, blank=True, db_column='parent', related_name="children") Serializers.py __________ class HeirarchyDataSerializer(serialisers.ModelSerializer): class Meta: model = HierarchyData fields = [“id”,”region”, “children”] Views.py __________ Class ListHierarchyData(generics.ListAPIView): queryset = HierarchyData.objects.all() serializer_class = HeirarchyDataSerializer permission_classes = [isAuthenticated] When I call the end point for given scenario, I get the JSON response in the following format: { “id”: 1, “region”: “world”, “children”: [ 2,3,4] } Related Stack Overflow questions that didn’t seem to answer my issue How to recursively query in django efficiently? Django - Models - Recursively retrieve parents of a leaf node Django self-recursive foreignkey filter query for all childs Above mentioned question partially solves my problem but I’m still unable to get the desired result. See details below: 1: I can’t touch database directly, I have to interact with database with ORM only. 2: Recursive time out and can’t serialize, saying object of type “Model” is not serializable. 3: This one partially worked for me: Based on this post, I tried to add the following in the model: def get_children(self): children = list() children.append(self) for child in self.children.all(): children.extend(children.get_children()) return children I then get all nested children, but all nested values are on the same level. For example world has children [2,3,4] and those have (grand)children themselves. Then it lists those on the same line, e.g children = [2,3,4,5,6,7,8,9, 10, 11,12,13]. This doesn’t represent the levels in the sample data. Then I tried the following solution for the model: def get_all_children(self, include_self=True): r = [] if include_self: r.append(self) for c in Person.objects.filter(parent=self): _r = c.get_all_children(include_self=True) if 0 < len(_r): r.extend(_r) return r That one works; it finds the nested children but it creates two issues: a. It gives me serializer errors when I use the code as it is, but if I add ‘get_all_children’ in serializer and add a different serializer for that attribute, then it serializes the objects, which I’m ok with. b. It is unable to append them in a nested fashion, it just nests a list inside another list without having children. It shows the data like this (limited to Europe, to not have a huge example shown here): { "id":1, "region":"world", "get_all_children":[ [ { "id":2, "region":"europe" } ], [ [ { "id":5, "region":"belgium" } ], [ { "id":6, "region":"germany" } ], [ { "id":7, "region":"spain" } ] ] ] } Now the data is fine except that after Europe it doesn’t start to nest the children inside the same array, it just starts a new array for the children and append them with outer list. It basically adds a nested structure, without nesting it inside the parent. Our question How can we return the output mentioned in ‘our goal’ for this region data, that holds a tree structure that goes an unknown amount of levels deep? of course it's finite depth. The only constraint I have to follow is that I can't edit the views part!
[ "You can use depth attribute on your serializer ie\nclass Meta:\n model = Model\n fields = ['id', 'region', 'children', 'parent']\n depth = 2\n\nOr use to_representation method on your serializer:\ndef to_representation(self, instance):\n self.fields['parent'] = SerializerClass(many=False, read_only=True)\n self.fields['children'] = SerializerClass(many=True, read_only=True)\n return super(SerializerClass, self).to_representation(instance)\n\nSo this will allow you query children with related_name set on the model as well as the parent\n" ]
[ 0 ]
[]
[]
[ "django", "django_models", "django_rest_framework", "django_viewsets", "python" ]
stackoverflow_0074074768_django_django_models_django_rest_framework_django_viewsets_python.txt
Q: List to dataframe conversion Having data as below: my_list=[(B_BC,0.3140561085683502, 0.27612272457883213) (BR_BR,0.1968307181527823, 0.18806346643096217)] I need to convert this to data frame with 3 column. First Column with location and second and third column should be a and b. Expected Output A: I am assuming you are using Python, then this would work: my_list = [('B_BC',0.3140561085683502, 0.27612272457883213), ('BR_BR',0.1968307181527823, 0.18806346643096217)] import pandas as pd df = pd.DataFrame(my_list, columns = ['Location','A','B']) print(df) Location A B B_BC 0.314056 0.276123 BR_BR 0.196831 0.188063
List to dataframe conversion
Having data as below: my_list=[(B_BC,0.3140561085683502, 0.27612272457883213) (BR_BR,0.1968307181527823, 0.18806346643096217)] I need to convert this to data frame with 3 column. First Column with location and second and third column should be a and b. Expected Output
[ "I am assuming you are using Python, then this would work:\nmy_list = [('B_BC',0.3140561085683502, 0.27612272457883213),\n('BR_BR',0.1968307181527823, 0.18806346643096217)]\n \nimport pandas as pd\n\ndf = pd.DataFrame(my_list, columns = ['Location','A','B'])\n\nprint(df)\n\nLocation A B\n B_BC 0.314056 0.276123\n BR_BR 0.196831 0.188063\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "list", "pandas", "python" ]
stackoverflow_0074620235_dataframe_list_pandas_python.txt
Q: Python variable not updating Basically I'm creating a program to help with my work. It will send emails to people in an excel list and move down to the next first name and email address in the list until it's done. Heres the code so far `#AutoMail Version 2 #Goal of new version is to run on any computer. With minimal or no mouse and keyboard input import pandas as pd import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText #Random Variables sender_address = str(input("Please enter your email address!: ")) sender_pass = str(input("Please enter your email password (No data is stored anywhere!): ")) count = 0 #This prompts user to input the file path of their CSV file. file_path = "C:/Users/Spring/Documents/test_book_py.csv" #Change to input later!!!!!! df = pd.read_csv(file_path, usecols=['First Name', 'Email Address']) amount = int(input("How many emails would you like to send? ")) #Important Variables cell_value = 0 #Which cell the info is coming from #Cell Varialbes name_cell = df["First Name"].values[cell_value] email_cell = df["Email Address"].values[cell_value] #Gmail info Variables receiver_address = email_cell email_subj = "This is a test subject" email_body = "Hello " + name_cell + ",\n\nThis is a test body" message = MIMEMultipart() #Create SMTP session for sending the mail session = smtplib.SMTP('smtp.gmail.com', 587) #use gmail with port session.starttls() #enable security session.login(sender_address, sender_pass) #login with mail_id and password #Emailing Process Start message['From'] = sender_address message['To'] = receiver_address message['Subject'] = email_subj message.attach(MIMEText(email_body, 'plain')) text = message.as_string() #Email sending while count < amount: session.sendmail(sender_address, receiver_address, text) cell_value = cell_value + 1 count = count + 1 print(cell_value)` I've tried every fix I could find online for variables not updating. When I print the "cell_value" varible it prints with the updated value however the other lines in the code specifically lines 21 and 22 use that variable and they aren't using the updated varible so it is always at a constant 0 value when it should be cell_value + 1 every time the loop repeats. Is there a different way I should loop the variable updating? I need it to change that value by +1 every time so that it continues to move down the list. Keep in mind that I am a huge beginner so my code probably looks very confusing. A: The issue is updating cell_value doesn't automatically updates all the data that was calculated with cell_value's old value. Once "Hello " + name_cell + ",\n\nThis is a test body" evaluates, for example, the resulting string has no relation to name_cell, and wan't change when name_cell changes. If you want that string to change when name_cell changes, you need to rerun the code that created that string. For your case here, it looks like you could just loop over the latter half of the code. The closest to what you already have would be: # i instead of cell_value for clarity for i in range(amount): name_cell = df["First Name"].values[cell_value] email_cell = df["Email Address"].values[cell_value] receiver_address = email_cell email_subj = "This is a test subject" email_body = "Hello " + name_cell + ",\n\nThis is a test body" message = MIMEMultipart() session = smtplib.SMTP('smtp.gmail.com', 587) #use gmail with port session.starttls() #enable security session.login(sender_address, sender_pass) #login with mail_id and password message['From'] = sender_address message['To'] = receiver_address message['Subject'] = email_subj message.attach(MIMEText(email_body, 'plain')) text = message.as_string() session.sendmail(sender_address, receiver_address, text) Arguably, it would be may be considered more idiomatic to zip the two .values objects that you're looping over, then islice amount-many elements from that, but I think this is cleaner.
Python variable not updating
Basically I'm creating a program to help with my work. It will send emails to people in an excel list and move down to the next first name and email address in the list until it's done. Heres the code so far `#AutoMail Version 2 #Goal of new version is to run on any computer. With minimal or no mouse and keyboard input import pandas as pd import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText #Random Variables sender_address = str(input("Please enter your email address!: ")) sender_pass = str(input("Please enter your email password (No data is stored anywhere!): ")) count = 0 #This prompts user to input the file path of their CSV file. file_path = "C:/Users/Spring/Documents/test_book_py.csv" #Change to input later!!!!!! df = pd.read_csv(file_path, usecols=['First Name', 'Email Address']) amount = int(input("How many emails would you like to send? ")) #Important Variables cell_value = 0 #Which cell the info is coming from #Cell Varialbes name_cell = df["First Name"].values[cell_value] email_cell = df["Email Address"].values[cell_value] #Gmail info Variables receiver_address = email_cell email_subj = "This is a test subject" email_body = "Hello " + name_cell + ",\n\nThis is a test body" message = MIMEMultipart() #Create SMTP session for sending the mail session = smtplib.SMTP('smtp.gmail.com', 587) #use gmail with port session.starttls() #enable security session.login(sender_address, sender_pass) #login with mail_id and password #Emailing Process Start message['From'] = sender_address message['To'] = receiver_address message['Subject'] = email_subj message.attach(MIMEText(email_body, 'plain')) text = message.as_string() #Email sending while count < amount: session.sendmail(sender_address, receiver_address, text) cell_value = cell_value + 1 count = count + 1 print(cell_value)` I've tried every fix I could find online for variables not updating. When I print the "cell_value" varible it prints with the updated value however the other lines in the code specifically lines 21 and 22 use that variable and they aren't using the updated varible so it is always at a constant 0 value when it should be cell_value + 1 every time the loop repeats. Is there a different way I should loop the variable updating? I need it to change that value by +1 every time so that it continues to move down the list. Keep in mind that I am a huge beginner so my code probably looks very confusing.
[ "The issue is updating cell_value doesn't automatically updates all the data that was calculated with cell_value's old value. Once \"Hello \" + name_cell + \",\\n\\nThis is a test body\" evaluates, for example, the resulting string has no relation to name_cell, and wan't change when name_cell changes. If you want that string to change when name_cell changes, you need to rerun the code that created that string.\nFor your case here, it looks like you could just loop over the latter half of the code. The closest to what you already have would be:\n# i instead of cell_value for clarity\nfor i in range(amount):\n name_cell = df[\"First Name\"].values[cell_value]\n email_cell = df[\"Email Address\"].values[cell_value]\n \n receiver_address = email_cell\n email_subj = \"This is a test subject\"\n email_body = \"Hello \" + name_cell + \",\\n\\nThis is a test body\"\n message = MIMEMultipart()\n \n session = smtplib.SMTP('smtp.gmail.com', 587) #use gmail with port\n session.starttls() #enable security\n session.login(sender_address, sender_pass) #login with mail_id and password\n \n message['From'] = sender_address\n message['To'] = receiver_address\n message['Subject'] = email_subj\n message.attach(MIMEText(email_body, 'plain'))\n text = message.as_string()\n \n session.sendmail(sender_address, receiver_address, text)\n\nArguably, it would be may be considered more idiomatic to zip the two .values objects that you're looping over, then islice amount-many elements from that, but I think this is cleaner.\n" ]
[ 0 ]
[]
[]
[ "python", "python_3.x", "smtplib", "variables", "while_loop" ]
stackoverflow_0074621873_python_python_3.x_smtplib_variables_while_loop.txt
Q: How to communicate between 2 html pages in the same widget I am using jupyter notebook to develop a sort of proof of concept for a project of mine, right now I have the 2 pages loaded in the same iframe in one jupyter notebook cell. Right now I don't know what approach to take to solve the communication between these 2 pages in the same widget. My 2 pages: <!DOCTYPE html> <!-- PAGE 1 --> <html> <body> <button type="button" class="snd_Button">Click</button> </body> </html> --------------- <!DOCTYPE html> <!-- PAGE 2 --> <html> <body> <h1 class ="Listener">I must react.</h1> </body> </html> As you can see they are quite simple, what I wanted to know is what would be a good approach to communicate between them. I want to make so that when I click on the button of page 1, the text of page 2 dinamically changes. I am planning on using javascript and searched solutions around it but I am not sure of how to continue, as I am new in JS programming and not familiar with its libraries (I've found some frameowrks like node.js or electron but I am not sure they should be applied here). A: HTML doesn't support modifying the contents of another html page in this way. You'll likely need some server-side code to facilitate this. In your case, I'd recommend using Websockets. There are plenty of youtube tutorials for similar functionality - search "Websockets Chat App" which should give you a good understanding of how to fire an event on one page, and then listen for that event on the second page. Edit: You mentioned you have two pages rendering inside the same iframe - that doesn't sound right as iframes can only point to a single source. In the case that you want to interact between an iframe and its HTML parent, maybe using cross-document messaging like this will work
How to communicate between 2 html pages in the same widget
I am using jupyter notebook to develop a sort of proof of concept for a project of mine, right now I have the 2 pages loaded in the same iframe in one jupyter notebook cell. Right now I don't know what approach to take to solve the communication between these 2 pages in the same widget. My 2 pages: <!DOCTYPE html> <!-- PAGE 1 --> <html> <body> <button type="button" class="snd_Button">Click</button> </body> </html> --------------- <!DOCTYPE html> <!-- PAGE 2 --> <html> <body> <h1 class ="Listener">I must react.</h1> </body> </html> As you can see they are quite simple, what I wanted to know is what would be a good approach to communicate between them. I want to make so that when I click on the button of page 1, the text of page 2 dinamically changes. I am planning on using javascript and searched solutions around it but I am not sure of how to continue, as I am new in JS programming and not familiar with its libraries (I've found some frameowrks like node.js or electron but I am not sure they should be applied here).
[ "HTML doesn't support modifying the contents of another html page in this way. You'll likely need some server-side code to facilitate this.\nIn your case, I'd recommend using Websockets. There are plenty of youtube tutorials for similar functionality - search \"Websockets Chat App\" which should give you a good understanding of how to fire an event on one page, and then listen for that event on the second page.\n\nEdit: You mentioned you have two pages rendering inside the same iframe - that doesn't sound right as iframes can only point to a single source.\nIn the case that you want to interact between an iframe and its HTML parent, maybe using cross-document messaging like this will work\n" ]
[ 0 ]
[]
[]
[ "html", "javascript", "jupyter_notebook", "node.js", "python" ]
stackoverflow_0074621832_html_javascript_jupyter_notebook_node.js_python.txt
Q: Trouble Understanding Pytest I'm working through the auditor version of CS50P and am a bit confused on how pytest works, specifically on the test_twttr exercise. The main program is to remove any vowels from a string and the below code is intended to test it I believe my code is set up properly for pytest to test my functions; however, when I run it I always get back a "no test ran" responses. Shouldn't this be printing out each function with a "no error" type of message if it is function properly? I also tried the course checker and a error on the first check. Am i missing something with how this feature works? from twttr import shorten def main(): upperV() lowerV() number() symbol() def upperV(): assert shorten("sAo") == "so" assert shorten("sEo") == "so" assert shorten("sIo") == "so" assert shorten("sOo") == "so" assert shorten("sUo") == "so" def lowerV(): assert shorten("sao") == "so" assert shorten("seo") == "so" assert shorten("sio") == "so" assert shorten("soo") == "so" assert shorten("suo") == "so" def number(): assert shorten("s1o") == "s1o" assert shorten("s0o") == "s0o" assert shorten("s2o") == "s2o" assert shorten("s4o") == "s4o" assert shorten("s5o") == "s5o" def space(): assert shorten("s o") == "s o" assert shorten("s o") == "s o" assert shorten("s o") == "s o" assert shorten("s o") == "s o" assert shorten("s o") == "s o" def symbol(): assert shorten("s@o") == "s@o" assert shorten("s#o") == "s#o" if __name__ == "__main__": main() response message from pytest A: Pytest doesn't run you test code like a script. It loads the content as a module and then looks for functions/methods that start with test_ to create a list of items it needs to test. And because it's not run as a script, you don't need to create any main method yourself, that's part of what's offered by the pytest framework itself. eg. from twttr import shorten def test_upperV(): assert shorten("sAo") == "so" assert shorten("sEo") == "so" assert shorten("sIo") == "so" assert shorten("sOo") == "so" assert shorten("sUo") == "so" P.S pytest also has a number of handy tools like parametrize which can further simplify your test setups! import pytest from twttr import shorten @pytest.mark.parametrize("given, expected", [ # upperV ("sAo", "so"), ("sEo", "so"), ("sIo", "so"), ("sOo", "so"), ("sUo", "so"), # lowerV ("sao", "so"), ("seo", "so"), ("sio", "so"), ("soo", "so"), ("suo", "so"), # number ("s1o", "s1o"), ("s0o", "s0o"), ("s2o", "s2o"), ("s4o", "s4o"), ("s5o", "s5o"), # space ("s o", "s o"), ("s o", "s o"), ("s o", "s o"), ("s o", "s o"), ("s o", "s o"), # symbol ("s@o", "s@o"), ("s#o", "s#o"), ]) def test_shorten(given, expected): assert shorten(given) == expected
Trouble Understanding Pytest
I'm working through the auditor version of CS50P and am a bit confused on how pytest works, specifically on the test_twttr exercise. The main program is to remove any vowels from a string and the below code is intended to test it I believe my code is set up properly for pytest to test my functions; however, when I run it I always get back a "no test ran" responses. Shouldn't this be printing out each function with a "no error" type of message if it is function properly? I also tried the course checker and a error on the first check. Am i missing something with how this feature works? from twttr import shorten def main(): upperV() lowerV() number() symbol() def upperV(): assert shorten("sAo") == "so" assert shorten("sEo") == "so" assert shorten("sIo") == "so" assert shorten("sOo") == "so" assert shorten("sUo") == "so" def lowerV(): assert shorten("sao") == "so" assert shorten("seo") == "so" assert shorten("sio") == "so" assert shorten("soo") == "so" assert shorten("suo") == "so" def number(): assert shorten("s1o") == "s1o" assert shorten("s0o") == "s0o" assert shorten("s2o") == "s2o" assert shorten("s4o") == "s4o" assert shorten("s5o") == "s5o" def space(): assert shorten("s o") == "s o" assert shorten("s o") == "s o" assert shorten("s o") == "s o" assert shorten("s o") == "s o" assert shorten("s o") == "s o" def symbol(): assert shorten("s@o") == "s@o" assert shorten("s#o") == "s#o" if __name__ == "__main__": main() response message from pytest
[ "Pytest doesn't run you test code like a script. It loads the content as a module and then looks for functions/methods that start with test_ to create a list of items it needs to test. And because it's not run as a script, you don't need to create any main method yourself, that's part of what's offered by the pytest framework itself.\neg.\nfrom twttr import shorten\n\n\ndef test_upperV():\n assert shorten(\"sAo\") == \"so\"\n assert shorten(\"sEo\") == \"so\"\n assert shorten(\"sIo\") == \"so\"\n assert shorten(\"sOo\") == \"so\"\n assert shorten(\"sUo\") == \"so\"\n\nP.S pytest also has a number of handy tools like parametrize which can further simplify your test setups!\nimport pytest\nfrom twttr import shorten\n\n@pytest.mark.parametrize(\"given, expected\", [\n # upperV\n (\"sAo\", \"so\"), (\"sEo\", \"so\"), (\"sIo\", \"so\"), (\"sOo\", \"so\"), (\"sUo\", \"so\"),\n # lowerV\n (\"sao\", \"so\"), (\"seo\", \"so\"), (\"sio\", \"so\"), (\"soo\", \"so\"), (\"suo\", \"so\"),\n # number\n (\"s1o\", \"s1o\"), (\"s0o\", \"s0o\"), (\"s2o\", \"s2o\"), (\"s4o\", \"s4o\"), (\"s5o\", \"s5o\"),\n # space\n (\"s o\", \"s o\"), (\"s o\", \"s o\"), (\"s o\", \"s o\"), (\"s o\", \"s o\"), (\"s o\", \"s o\"),\n # symbol\n (\"s@o\", \"s@o\"), (\"s#o\", \"s#o\"),\n])\ndef test_shorten(given, expected):\n assert shorten(given) == expected\n\n" ]
[ 1 ]
[]
[]
[ "cs50", "pytest", "python", "testing" ]
stackoverflow_0074621975_cs50_pytest_python_testing.txt
Q: Visual Studio Code: Syntax Error: invalid syntax Unfamiliar error message in VScode when using Python. SyntaxError: invalid syntax and <stdin> ????? Yesterday, I was doing a normal python work and some assignments. Everything was normal. On VS code on Mac, with python official plugin. all running latest version. But today, when I run this very simple code while True: plyName = input("Enter player Name (10 characters)\t: ") if len(plyName) > 10: print("Invalid Player name.") continue else: break But this error show up >>> /usr/local/bin/python3 "/Users/tharindumunasinghe/Documents/VSC/Python/DnD text game/GameplayOff.py" File "<stdin>", line 1 /usr/local/bin/python3 "/Users/tharindumunasinghe/Documents/VSC/Python/DnD text game/GameplayOff.py" ^ SyntaxError: invalid syntax >>> I'M not used to Mac OS as I recently start using it. Same for VS code. I like to receive advice for this sudden issue that occuring now. A: You should distinguish two types of terminals: vscode integrated terminal and python interactive terminal. If you create a new terminal directly in vscode, the vscode integrated terminal will be opened, which is the same as the external powershell window. If you execute the python command in the terminal, the python interactive terminal will open, and you can clearly see the >>> symbol at the beginning of the line, where you can directly enter the python code and run it. Now what happens to you is that you open the python interactive terminal and then use the play button to run the code. Actually that button is to run the script file using the poweshell command. So what happens is that you execute a shell command in a python interactive terminal, then of course a syntax error is raised because it's not python code. You can use CTRL+Z to exit the python interactive terminal, or delete this terminal directly. Then execute the file using the play button.
Visual Studio Code: Syntax Error: invalid syntax
Unfamiliar error message in VScode when using Python. SyntaxError: invalid syntax and <stdin> ????? Yesterday, I was doing a normal python work and some assignments. Everything was normal. On VS code on Mac, with python official plugin. all running latest version. But today, when I run this very simple code while True: plyName = input("Enter player Name (10 characters)\t: ") if len(plyName) > 10: print("Invalid Player name.") continue else: break But this error show up >>> /usr/local/bin/python3 "/Users/tharindumunasinghe/Documents/VSC/Python/DnD text game/GameplayOff.py" File "<stdin>", line 1 /usr/local/bin/python3 "/Users/tharindumunasinghe/Documents/VSC/Python/DnD text game/GameplayOff.py" ^ SyntaxError: invalid syntax >>> I'M not used to Mac OS as I recently start using it. Same for VS code. I like to receive advice for this sudden issue that occuring now.
[ "You should distinguish two types of terminals: vscode integrated terminal and python interactive terminal.\nIf you create a new terminal directly in vscode, the vscode integrated terminal will be opened, which is the same as the external powershell window.\n\nIf you execute the python command in the terminal, the python interactive terminal will open, and you can clearly see the >>> symbol at the beginning of the line, where you can directly enter the python code and run it.\n\nNow what happens to you is that you open the python interactive terminal and then use the play button to run the code. Actually that button is to run the script file using the poweshell command. So what happens is that you execute a shell command in a python interactive terminal, then of course a syntax error is raised because it's not python code.\n\nYou can use CTRL+Z to exit the python interactive terminal, or delete this terminal directly. Then execute the file using the play button.\n\n" ]
[ 0 ]
[]
[]
[ "python", "syntax_error", "visual_studio_code" ]
stackoverflow_0074611311_python_syntax_error_visual_studio_code.txt
Q: Optimizing apply and lambda function with pandas I am trying to optimize a function returning the value (wage)of a variable given a condition (largest enrollment within MSA) for every year. I thought combining apply and lambda would be efficient, but my actual dataset is large (shape of 321681x272) making the computation extremely slow. Is there a faster way of going about this ? I think vectorizing the operations instead of iterating through df could be a solution, but I am unsure of the structure it would follow as an alternative to df.apply and lambda df = pd.DataFrame({'year': [2000, 2000, 2001, 2001], 'msa': ['NYC-Newark', 'NYC-Newark', 'NYC-Newark', 'NYC-Newark'], 'leaname':['NYC School District', 'Newark School District', 'NYC School District', 'Newark School District'], 'enroll': [100000,50000,110000,60000], 'wage': [5,2,7,3] }) def function1(x,y, var): ''' Returns the selected variable's value for school district with largest enrollment in a given year ''' t = df[(df['msa'] == x) & (df['year'] == y)] e = pd.DataFrame(t.groupby(['msa',var]).mean()['enroll']) return e.loc[e.groupby(level=[0])['enroll'].idxmax()].reset_index()[var] df['main_city_wage'] = df.apply(lambda x: function1(x['msa'], x['year'], 'wage'), axis = 1) Sample Output year msa leaname enroll wage main_wage 0 2000 NYC-Newark NYC School District 100000 5 5 1 2000 NYC-Newark Newark School District 50000 2 5 2 2001 NYC-Newark NYC School District 110000 7 7 3 2001 NYC-Newark Newark School District 60000 3 7 A: Something like df['main_wage'] = df.set_index('wage').groupby(['year', 'msa'])['enroll'].transform('idxmax').values
Optimizing apply and lambda function with pandas
I am trying to optimize a function returning the value (wage)of a variable given a condition (largest enrollment within MSA) for every year. I thought combining apply and lambda would be efficient, but my actual dataset is large (shape of 321681x272) making the computation extremely slow. Is there a faster way of going about this ? I think vectorizing the operations instead of iterating through df could be a solution, but I am unsure of the structure it would follow as an alternative to df.apply and lambda df = pd.DataFrame({'year': [2000, 2000, 2001, 2001], 'msa': ['NYC-Newark', 'NYC-Newark', 'NYC-Newark', 'NYC-Newark'], 'leaname':['NYC School District', 'Newark School District', 'NYC School District', 'Newark School District'], 'enroll': [100000,50000,110000,60000], 'wage': [5,2,7,3] }) def function1(x,y, var): ''' Returns the selected variable's value for school district with largest enrollment in a given year ''' t = df[(df['msa'] == x) & (df['year'] == y)] e = pd.DataFrame(t.groupby(['msa',var]).mean()['enroll']) return e.loc[e.groupby(level=[0])['enroll'].idxmax()].reset_index()[var] df['main_city_wage'] = df.apply(lambda x: function1(x['msa'], x['year'], 'wage'), axis = 1) Sample Output year msa leaname enroll wage main_wage 0 2000 NYC-Newark NYC School District 100000 5 5 1 2000 NYC-Newark Newark School District 50000 2 5 2 2001 NYC-Newark NYC School District 110000 7 7 3 2001 NYC-Newark Newark School District 60000 3 7
[ "Something like\ndf['main_wage'] = df.set_index('wage').groupby(['year', 'msa'])['enroll'].transform('idxmax').values\n\n" ]
[ 1 ]
[]
[]
[ "apply", "lambda", "pandas", "python" ]
stackoverflow_0074621775_apply_lambda_pandas_python.txt
Q: How to display ID for each form in Django Formset Need to make the UI more ordered, can i have indexing for the forms in formset or access the form ID? <div class="card"> <div class="card-body"> <div id="form-container"> {% csrf_token %} {{ formset1.management_form }} {% for form in formset1 %} <div class="test-form"> {% crispy form %} </div> {% endfor %} <button id="add-form" type="button" class="btn btn-primary">Add Another Request </button> <button type="submit" class="btn btn-primary">Submit</button> </div> </div> </div> A: Yeah, you can use forloop.counter within your forloop like this, can assign the value to name or id <div class="card"> <div class="card-body"> <div id="form-container"> {% csrf_token %} {{ formset1.management_form }} {% for form in formset1 %} {% crispy form %} </div> {% endfor %} <button id="add-form" type="button" class="btn btn-primary"> Add Another Request </button> <button type="submit" class="btn btn-primary">Submit</button> </div> </div> </div>
How to display ID for each form in Django Formset
Need to make the UI more ordered, can i have indexing for the forms in formset or access the form ID? <div class="card"> <div class="card-body"> <div id="form-container"> {% csrf_token %} {{ formset1.management_form }} {% for form in formset1 %} <div class="test-form"> {% crispy form %} </div> {% endfor %} <button id="add-form" type="button" class="btn btn-primary">Add Another Request </button> <button type="submit" class="btn btn-primary">Submit</button> </div> </div> </div>
[ "Yeah, you can use forloop.counter within your forloop like this, can assign the value to name or id\n<div class=\"card\">\n <div class=\"card-body\">\n <div id=\"form-container\">\n {% csrf_token %}\n {{ formset1.management_form }}\n {% for form in formset1 %}\n {% crispy form %}\n </div>\n\n{% endfor %}\n\n <button id=\"add-form\" type=\"button\" class=\"btn btn-primary\"> Add Another Request </button>\n <button type=\"submit\" class=\"btn btn-primary\">Submit</button>\n </div>\n </div>\n</div>\n\n" ]
[ 0 ]
[]
[]
[ "django", "django_forms", "django_views", "python" ]
stackoverflow_0072695194_django_django_forms_django_views_python.txt
Q: Use Regex to exclude numbers based on certain conditions I am trying to match and extract numbers if: They are not a single 2 They are not a single 4 They are not a 4-digit number *Note: Placement of numbers in the string is completely random - the numbers can occur at the beginning, middle, or end and can be any length other than 4. Here is a table with examples of strings and desired matches. Text Desired Match(es) HELLO123 123 B4UGO 1984 ANIMAL FARM 45 45 GOT 2 GO SOME OTHER 1000 22 AND 44 AND 1234567 22, 44, 1234567 TEST567TRUE 567 I found an SO article that begins to address the single 2 and single 4 issue here. The regex I have thus far is '\b(?!2\b|4\b|\d{4})\d+\b', but that requires the numbers to be standalone (surrounded by spaces) and also will not extract numbers that have 4 digits, but exceed it (e.g. 1234567). I'd appreciate some help if anyone has some ideas. A: You could use negative lookarounds (?<!\d) and (?!\d) as boundaries: (?<!\d)(?!([24]|\d{4})(?!\d))\d+ See this demo at regex101 Inside the first negative lookahead disallowed numbers get alternated in a group.
Use Regex to exclude numbers based on certain conditions
I am trying to match and extract numbers if: They are not a single 2 They are not a single 4 They are not a 4-digit number *Note: Placement of numbers in the string is completely random - the numbers can occur at the beginning, middle, or end and can be any length other than 4. Here is a table with examples of strings and desired matches. Text Desired Match(es) HELLO123 123 B4UGO 1984 ANIMAL FARM 45 45 GOT 2 GO SOME OTHER 1000 22 AND 44 AND 1234567 22, 44, 1234567 TEST567TRUE 567 I found an SO article that begins to address the single 2 and single 4 issue here. The regex I have thus far is '\b(?!2\b|4\b|\d{4})\d+\b', but that requires the numbers to be standalone (surrounded by spaces) and also will not extract numbers that have 4 digits, but exceed it (e.g. 1234567). I'd appreciate some help if anyone has some ideas.
[ "You could use negative lookarounds (?<!\\d) and (?!\\d) as boundaries:\n(?<!\\d)(?!([24]|\\d{4})(?!\\d))\\d+\n\nSee this demo at regex101\nInside the first negative lookahead disallowed numbers get alternated in a group.\n" ]
[ 2 ]
[]
[]
[ "python", "regex" ]
stackoverflow_0074621957_python_regex.txt
Q: How do I replace column values of a dataframe with values of another dataframe based on a common column? I have two dataframes, one that looks like this: hec_df: accident year factor age 2007 1.5 13 2008 1.6 11 2009 1.7 15 and hec_ldfs: accident year factor 2007 1.6 2008 1.64 2009 1.7 My goal is to replace the factor value of df1 with the factor value of df2. My code for this is hec_df['factor'] = hec_df['factor'].map(hec_ldfs.set_index('accident year')['factor']) But it returns NaN on the factor column. Does anyone know why this is happening? EDIT: I'm not sure why my first dataframe is formatted like that, does anyone know how to fix it? A: you're mapping factor to the accident_year, instead of hec_df.accident_year to the hec_df.accident year hec_df['factor'] = hec_df['accident year'].map(hec_ldfs.set_index('accident year')['factor']).fillna(hec_df['factor']) hec_df accident year factor age 0 2007 1.60 13 1 2008 1.64 11 2 2009 1.70 15
How do I replace column values of a dataframe with values of another dataframe based on a common column?
I have two dataframes, one that looks like this: hec_df: accident year factor age 2007 1.5 13 2008 1.6 11 2009 1.7 15 and hec_ldfs: accident year factor 2007 1.6 2008 1.64 2009 1.7 My goal is to replace the factor value of df1 with the factor value of df2. My code for this is hec_df['factor'] = hec_df['factor'].map(hec_ldfs.set_index('accident year')['factor']) But it returns NaN on the factor column. Does anyone know why this is happening? EDIT: I'm not sure why my first dataframe is formatted like that, does anyone know how to fix it?
[ "you're mapping factor to the accident_year, instead of hec_df.accident_year to the hec_df.accident year\nhec_df['factor'] = hec_df['accident year'].map(hec_ldfs.set_index('accident year')['factor']).fillna(hec_df['factor'])\nhec_df\n\naccident year factor age\n0 2007 1.60 13\n1 2008 1.64 11\n2 2009 1.70 15\n\n" ]
[ 3 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074622022_pandas_python.txt
Q: How to normalize email addresses without regex The problem is that no matter what I do, I cannot seem to make a function that normalizes (not validates) without the help of regex. For example, instead of my code printing invalid or valid email address, I want it to: filter out + signs, . signs, etc. that are BEFORE the @gmail.com part; make it so that the program doesn't discriminate between capitalized and uncapitalized letters (return the same thing). Here are the current email addresses I'm trying to filter: johnsmith+panerabread@gmail.com # should return johnsmith@gmail.com jOhN.sMiTh@gmail.com # should return johnsmith@gmail.com Here's what I worked on so far: def normalizeEmail(emailIn): if emailIn != regex: ch_1 = '+' ch_2 = '.' new_emailIn = (emailIn.lower().split(ch_1, 1)[0]).replace() + '@gmail.com' if emailIn.endswith('@gmail.com'): return new_emailIn if new_emailIn != new_emailIn.endswith('gmailcom'): return (new_emailIn.lower().split(ch_2, 1)[0]) if __name__ == "__main__": print(normalizeEmail('johnsmith+panerabread@gmail.com')) print(normalizeEmail('jOhN.sMitH@gmail.com')) The . needs to be replaced, and anything after the + should be removed. Earlier, I tried it with regex, but it never seemed to properly normalize the emails and would return my customized exception error: Invalid Email Address With regex, I tried: import re def normalizeEmail(emailIn) regex = '/^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$/' if emailIn == regex: return 'emailIn' if emailIn != regex return 'Invalid Email Address' Output: Invalid Email Address I've been cracking at this program for awhile, and it's really frustrating me that I cannot crack it. Mind you, the function cannot be too specific because it needs to normalize a dictionary of 35 other email addresses. Then, I'll sum down the list to the normalized emails and create a list of them. Please help. This is my first 3 months of programming and I'm already dying. I tried regex and expected the expected email format to be returned, but it wasn't, so I am seeking a solution without regex. A: This seems like a trivial task for the str class methods: addresses = [ 'johnsmith+panerabread@gmail.com', 'jOhN.sMiTh@gmail.com' ] for address in addresses: name, domain = map(str.lower, address.split('@')) if domain == 'gmail.com': name = name.replace('.', '') if '+' in name: name, tag = name.split('+', 1) print(f'{name}@{domain}') The code above will result in the following output: johnsmith@gmail.com johnsmith@gmail.com Or, if you want it as a function: def normalize_address(address): name, domain = map(str.lower, address.split('@')) if domain == 'gmail.com': name = name.replace('.', '') if '+' in name: name, tag = name.split('+', 1) return f'{name}@{domain}' addresses = [ 'johnsmith+panerabread@gmail.com', 'jOhN.sMiTh@gmail.com' ] for address in addresses: print(normalize_address(address))
How to normalize email addresses without regex
The problem is that no matter what I do, I cannot seem to make a function that normalizes (not validates) without the help of regex. For example, instead of my code printing invalid or valid email address, I want it to: filter out + signs, . signs, etc. that are BEFORE the @gmail.com part; make it so that the program doesn't discriminate between capitalized and uncapitalized letters (return the same thing). Here are the current email addresses I'm trying to filter: johnsmith+panerabread@gmail.com # should return johnsmith@gmail.com jOhN.sMiTh@gmail.com # should return johnsmith@gmail.com Here's what I worked on so far: def normalizeEmail(emailIn): if emailIn != regex: ch_1 = '+' ch_2 = '.' new_emailIn = (emailIn.lower().split(ch_1, 1)[0]).replace() + '@gmail.com' if emailIn.endswith('@gmail.com'): return new_emailIn if new_emailIn != new_emailIn.endswith('gmailcom'): return (new_emailIn.lower().split(ch_2, 1)[0]) if __name__ == "__main__": print(normalizeEmail('johnsmith+panerabread@gmail.com')) print(normalizeEmail('jOhN.sMitH@gmail.com')) The . needs to be replaced, and anything after the + should be removed. Earlier, I tried it with regex, but it never seemed to properly normalize the emails and would return my customized exception error: Invalid Email Address With regex, I tried: import re def normalizeEmail(emailIn) regex = '/^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$/' if emailIn == regex: return 'emailIn' if emailIn != regex return 'Invalid Email Address' Output: Invalid Email Address I've been cracking at this program for awhile, and it's really frustrating me that I cannot crack it. Mind you, the function cannot be too specific because it needs to normalize a dictionary of 35 other email addresses. Then, I'll sum down the list to the normalized emails and create a list of them. Please help. This is my first 3 months of programming and I'm already dying. I tried regex and expected the expected email format to be returned, but it wasn't, so I am seeking a solution without regex.
[ "This seems like a trivial task for the str class methods:\naddresses = [\n 'johnsmith+panerabread@gmail.com',\n 'jOhN.sMiTh@gmail.com'\n]\n\nfor address in addresses:\n name, domain = map(str.lower, address.split('@'))\n if domain == 'gmail.com':\n name = name.replace('.', '')\n if '+' in name:\n name, tag = name.split('+', 1)\n print(f'{name}@{domain}')\n\nThe code above will result in the following output:\njohnsmith@gmail.com\njohnsmith@gmail.com\n\nOr, if you want it as a function:\ndef normalize_address(address):\n name, domain = map(str.lower, address.split('@'))\n if domain == 'gmail.com':\n name = name.replace('.', '')\n if '+' in name:\n name, tag = name.split('+', 1)\n return f'{name}@{domain}'\n\n\naddresses = [\n 'johnsmith+panerabread@gmail.com',\n 'jOhN.sMiTh@gmail.com'\n]\n\nfor address in addresses:\n print(normalize_address(address))\n\n" ]
[ 2 ]
[]
[]
[ "python", "regex" ]
stackoverflow_0074622111_python_regex.txt
Q: ImportError: The 'enchant' C library was not found. Please install it via your OS package manager, or use a pre-built binary wheel from PyPI The question is why I see the error message in the title when trying to import enchant. I am using Win64. A: On Ubuntu, run sudo apt-get install libenchant1c2a A: I found the answer in this GitHub page. In a nutshell, they have not shipped a wheel for the win_amd64 platform yet. A: Resolved: On Win7-64 I ran pip3 install pyenchant==1.6.6 which seems to be the latest version of PyEnchant that still shipped with Win-64 binaries. Newer versions did not install for me, but this one did. A: For amazon ubuntu instance use. yum install enchant and then pip install pyenchant A: For me, the problem I ran into was that I had an old version of pip. I installed the latest version and was able to download the pyenchant library. pip install -U pip A: On Windows x64 I've solved this problem as follows: Click link https://pypi.org/project/pyenchant/#files and download pyenchant-2.0.0.win32.exe Launch it and while installing it using the installation wizard you must specify your python interpreter location which in my case is: C:\Users\Asus\AppData\Local\Programs\Python\Python36\python.exe Important:İf you use python32 interpreter you must specify this location: C:\Users\Asus\AppData\Local\Programs\Python\Python36-32\python.exe if you use python 3.5 interpreter version your location may be like this: C:\Users\Asus\AppData\Local\Programs\Python\Python35\python.exe and finish the installation. Pip commands will work now for other dependency packages. A: On MacOS, you can install it via brew: brew install enchant pip install pyenchant A: I have fix the bugs on the colab. !apt update !apt install enchant --fix-missing After fixing the missing files, you could run the enchant. A: To run with docker on AWS: With apt-get: sudo apt-get update sudo apt-get -y install enchant-2 With apt: sudo apt update sudo apt -y install enchant-2 Thank you
ImportError: The 'enchant' C library was not found. Please install it via your OS package manager, or use a pre-built binary wheel from PyPI
The question is why I see the error message in the title when trying to import enchant. I am using Win64.
[ "On Ubuntu, run sudo apt-get install libenchant1c2a\n", "I found the answer in this GitHub page.\nIn a nutshell, they have not shipped a wheel for the win_amd64 platform yet.\n", "Resolved: On Win7-64 I ran\npip3 install pyenchant==1.6.6\nwhich seems to be the latest version of PyEnchant that still shipped with Win-64 binaries. Newer versions did not install for me, but this one did.\n", "For amazon ubuntu instance use.\nyum install enchant \n\nand then \npip install pyenchant\n\n", "For me, the problem I ran into was that I had an old version of pip. I installed the latest version and was able to download the pyenchant library.\npip install -U pip\n", "On Windows x64 I've solved this problem as follows:\n\nClick link https://pypi.org/project/pyenchant/#files and download pyenchant-2.0.0.win32.exe\nLaunch it and while installing it using the installation wizard you must specify your python interpreter location which in my case is:\n\n\nC:\\Users\\Asus\\AppData\\Local\\Programs\\Python\\Python36\\python.exe\n\nImportant:İf you use python32 interpreter you must specify this location:\n\nC:\\Users\\Asus\\AppData\\Local\\Programs\\Python\\Python36-32\\python.exe\n\nif you use python 3.5 interpreter version your location may be like this:\n\nC:\\Users\\Asus\\AppData\\Local\\Programs\\Python\\Python35\\python.exe\n\nand finish the installation. Pip commands will work now for other dependency packages.\n", "On MacOS, you can install it via brew:\nbrew install enchant\npip install pyenchant\n\n", "I have fix the bugs on the colab.\n!apt update\n!apt install enchant --fix-missing\nAfter fixing the missing files, you could run the enchant.\n", "To run with docker on AWS:\n\nWith apt-get:\n\nsudo apt-get update\nsudo apt-get -y install enchant-2\n\n\nWith apt:\n\nsudo apt update\nsudo apt -y install enchant-2\n\nThank you\n" ]
[ 24, 13, 13, 7, 3, 2, 1, 0, 0 ]
[]
[]
[ "enchant", "file_not_found", "import", "python", "win64" ]
stackoverflow_0029381919_enchant_file_not_found_import_python_win64.txt
Q: How to handle typing of member variable that is initialized during __post_init__() of dataclass The variable below is initialized as none, but during __post_init__ it is replaced with an instance of outlook client. @dataclass class Config: """Outlook configuration""" mailbox: str inbox: str mailbox_obj: Union["Mailbox", None] = None However, static type analysis correctly informs that mailbox_obj has no members (...is not a known member of "None"). I don't want to guard everything with if mailbox_obj just to satisfy the type analysis. Is there another way using a dataclass field or something? The problem would go away if I just used a regular class since I can initialize the problem variable in init where the type will be inferred to it's set value, but then I have to write that extra boilerplate. Writing this question has reminded me of the below, which is probably what I'm looking for: mailbox_obj: "Mailbox" = field(init=False) Is that the right way? A: Yes, you want to specify that it is not an init field, so you just want something like this: import dataclasses class Mailbox: pass @dataclasses.dataclass class Config: """Outlook configuration""" mailbox: str inbox: str mailbox_obj: "Mailbox" = dataclasses.field(init=False) def __post_init__(self): # do some stuff... self.mailbox_obj = Mailbox() I saved the above code in a file called test_typing.py and here is mypy: (py310) Juans-MBP:test juan$ mypy test_typing.py Success: no issues found in 1 source file
How to handle typing of member variable that is initialized during __post_init__() of dataclass
The variable below is initialized as none, but during __post_init__ it is replaced with an instance of outlook client. @dataclass class Config: """Outlook configuration""" mailbox: str inbox: str mailbox_obj: Union["Mailbox", None] = None However, static type analysis correctly informs that mailbox_obj has no members (...is not a known member of "None"). I don't want to guard everything with if mailbox_obj just to satisfy the type analysis. Is there another way using a dataclass field or something? The problem would go away if I just used a regular class since I can initialize the problem variable in init where the type will be inferred to it's set value, but then I have to write that extra boilerplate. Writing this question has reminded me of the below, which is probably what I'm looking for: mailbox_obj: "Mailbox" = field(init=False) Is that the right way?
[ "Yes, you want to specify that it is not an init field, so you just want something like this:\nimport dataclasses\n\nclass Mailbox:\n pass\n\n@dataclasses.dataclass\nclass Config:\n \"\"\"Outlook configuration\"\"\"\n\n mailbox: str\n inbox: str\n mailbox_obj: \"Mailbox\" = dataclasses.field(init=False)\n\n def __post_init__(self):\n # do some stuff...\n self.mailbox_obj = Mailbox()\n\nI saved the above code in a file called test_typing.py and here is mypy:\n(py310) Juans-MBP:test juan$ mypy test_typing.py\nSuccess: no issues found in 1 source file\n\n" ]
[ 1 ]
[]
[]
[ "python", "python_3.x", "python_dataclasses", "python_typing" ]
stackoverflow_0074621969_python_python_3.x_python_dataclasses_python_typing.txt
Q: Kivy Access ids from .kv file to .py file . I'm new into kivy and I want to make an android app. I almost finish GUI, the front-end part, but I have a very big problem. I've searched all over the internet but without answer. I don't know how to access ids from .kv to use them into .py functions. I've tried all of what I've found on the internet, but didn't work. I want to access ids from .kv file to work with them. For exemple, I have a profile screen, where user write his first name, and last name, and in the next page I want to show his first and last name by using a function. Here's the .kv profile page: <Profile> FloatLayout: canvas.before: Color: rgba:(1,1,1,1) Rectangle: source:"CreateProfileImg.png" size: root.width, root.height pos: self.pos Label: pos_hint: {"top": 1, "left": 1} size_hint: 1, .1 text:"Create your profile" font_size: 65 font_name:"FreeSansBoldOblique-BYJ3.otf" color: rgba(247,251,246,255) id: profile_label Label: text: "First Name: " font_size: 45 color: rgba(247,251,246,255) size_hint: 0.1, 0.1 pos_hint: {"x":0.20, "top":0.8} TextInput: id: name multiline: False size_hint: 0.5, 0.1 pos_hint: {"x": 0.35, "top": 0.8} Label: text: "Last Name: " font_size: 45 color: rgba(247,251,246,255) size_hint: 0.1, 0.1 pos_hint: {"x":0.16, "top":0.7} TextInput: id: prenume multiline: False size_hint: 0.5, 0.1 pos_hint: {"x": 0.35, "top": 0.7} Label: text: "Currency: " font_size: 45 color: rgba(247,251,246,255) size_hint: 0.1, 0.1 pos_hint: {"x":0.18, "top":0.6} Spinner: id: moneda text:"Select currency" color: 0, 0, 0 ,1 background_normal:"MoneyButton.png" size_hint: 0.5, 0.1 pos_hint: {"x":0.35, "top":0.6} values: ['Ron', 'Euro', 'Dolar','Lira Sterlina'] sync_height: True #on_text: root.currency_clicked(moneda.text) GridLayout: rows:1 pos_hint:{"top": .2, "left": 1} size_hint: 1, .2 ImageButton: source:"Next_Button_On_Press.png" on_press: self.source = "Next_Button_On_Release.png" app.printname() on_release: self.source = "Next_Button_On_Press.png" app.change_screen("page1") I have more .kv files with lots of ids, but I think if I learn how to work with thise two, next will be easier. I want to specify that I have a 'main.kv' which contains: 1. name 2.ids from all over my .kv files. I use those ids to navigate between pages. Here's the code: #:include homescreen.kv #:include page1.kv #:include profile.kv GridLayout: cols: 1 ScreenManager: id: screen_manager HomeScreen: name: "home_screen" id: home_screen Profile: name: "profile" id: profile Page1: name: "page1" id: page1 Let me explain for the last time what I want to do, maybe will be usefull for you to understand and trying to help me. As you see, into 'profile' I have 3 ids. When get TextInput from user, those ids store the information. I want to use those information into next page, where I want to say [[[" Hello " + ids]]]. So, please help me!! Make me to understand! A: I also had trouble with this as well when I was first leaning Kivy a couple years back, but I finally figured out the way. there is a bit of boilier-plate required to maintain the connection. in this example my_label is a kivy id and I am connecting it to a Python object of the same name. this is done with the line: my_label: my_label <Screen2>: # Python: KIVY id(s) my_label: my_label BoxLayout: Label: text: "Screen2" MDLabel: id: my_label text: "-" and in the Python code matching that object there is a line in the class definitionl my_label: MDLabel which is providing a type hint. If you are using an IDE such as PyCharm this can help you to have auto-complete in your code according to the object type. class Screen2(Screen): my_label: MDLabel def __init__(self, **kwargs): super().__init__(**kwargs) self.my_label.text = "test" and the line self.my_label.text = "test" is of course to show how you can use the object. you can also do more complicated things such as put multiple items from a kv layout into a list or even a dictionary. kivy code: # a list of spinners kv_spinner_list is a ListProperty kv_spinner_list: [kv_spinner_0, kv_spinner_1, kv_spinner_2,] and this can be a more organized way to bring multiple items into the Python code.
Kivy Access ids from .kv file to .py file
. I'm new into kivy and I want to make an android app. I almost finish GUI, the front-end part, but I have a very big problem. I've searched all over the internet but without answer. I don't know how to access ids from .kv to use them into .py functions. I've tried all of what I've found on the internet, but didn't work. I want to access ids from .kv file to work with them. For exemple, I have a profile screen, where user write his first name, and last name, and in the next page I want to show his first and last name by using a function. Here's the .kv profile page: <Profile> FloatLayout: canvas.before: Color: rgba:(1,1,1,1) Rectangle: source:"CreateProfileImg.png" size: root.width, root.height pos: self.pos Label: pos_hint: {"top": 1, "left": 1} size_hint: 1, .1 text:"Create your profile" font_size: 65 font_name:"FreeSansBoldOblique-BYJ3.otf" color: rgba(247,251,246,255) id: profile_label Label: text: "First Name: " font_size: 45 color: rgba(247,251,246,255) size_hint: 0.1, 0.1 pos_hint: {"x":0.20, "top":0.8} TextInput: id: name multiline: False size_hint: 0.5, 0.1 pos_hint: {"x": 0.35, "top": 0.8} Label: text: "Last Name: " font_size: 45 color: rgba(247,251,246,255) size_hint: 0.1, 0.1 pos_hint: {"x":0.16, "top":0.7} TextInput: id: prenume multiline: False size_hint: 0.5, 0.1 pos_hint: {"x": 0.35, "top": 0.7} Label: text: "Currency: " font_size: 45 color: rgba(247,251,246,255) size_hint: 0.1, 0.1 pos_hint: {"x":0.18, "top":0.6} Spinner: id: moneda text:"Select currency" color: 0, 0, 0 ,1 background_normal:"MoneyButton.png" size_hint: 0.5, 0.1 pos_hint: {"x":0.35, "top":0.6} values: ['Ron', 'Euro', 'Dolar','Lira Sterlina'] sync_height: True #on_text: root.currency_clicked(moneda.text) GridLayout: rows:1 pos_hint:{"top": .2, "left": 1} size_hint: 1, .2 ImageButton: source:"Next_Button_On_Press.png" on_press: self.source = "Next_Button_On_Release.png" app.printname() on_release: self.source = "Next_Button_On_Press.png" app.change_screen("page1") I have more .kv files with lots of ids, but I think if I learn how to work with thise two, next will be easier. I want to specify that I have a 'main.kv' which contains: 1. name 2.ids from all over my .kv files. I use those ids to navigate between pages. Here's the code: #:include homescreen.kv #:include page1.kv #:include profile.kv GridLayout: cols: 1 ScreenManager: id: screen_manager HomeScreen: name: "home_screen" id: home_screen Profile: name: "profile" id: profile Page1: name: "page1" id: page1 Let me explain for the last time what I want to do, maybe will be usefull for you to understand and trying to help me. As you see, into 'profile' I have 3 ids. When get TextInput from user, those ids store the information. I want to use those information into next page, where I want to say [[[" Hello " + ids]]]. So, please help me!! Make me to understand!
[ "I also had trouble with this as well when I was first leaning Kivy a couple years back, but I finally figured out the way. there is a bit of boilier-plate required to maintain the connection.\nin this example my_label is a kivy id and I am connecting it to a Python object of the same name. this is done with the line: my_label: my_label\n<Screen2>:\n # Python: KIVY id(s)\n my_label: my_label\n BoxLayout:\n Label:\n text: \"Screen2\"\n MDLabel:\n id: my_label\n text: \"-\"\n\nand in the Python code matching that object there is a line in the class definitionl my_label: MDLabel\nwhich is providing a type hint. If you are using an IDE such as PyCharm this can help you to have auto-complete in your code according to the object type.\nclass Screen2(Screen):\n my_label: MDLabel\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.my_label.text = \"test\"\n\nand the line self.my_label.text = \"test\" is of course to show how you can use the object.\nyou can also do more complicated things such as put multiple items from a kv layout into a list or even a dictionary.\nkivy code:\n # a list of spinners kv_spinner_list is a ListProperty\n kv_spinner_list: [kv_spinner_0, kv_spinner_1, kv_spinner_2,]\n\nand this can be a more organized way to bring multiple items into the Python code.\n" ]
[ 0 ]
[]
[]
[ "kivy", "kivymd", "python" ]
stackoverflow_0074620837_kivy_kivymd_python.txt
Q: Can't scrape table BeautifulSoup I'm trying to scrape the following table from this URL: https://baseballsavant.mlb.com/leaderboard/outs_above_average?type=Fielder&startYear=2022&endYear=2022&split=no&team=&range=year&min=10&pos=of&roles=&viz=show This is my code: import requests from bs4 import BeautifulSoup url = "https://baseballsavant.mlb.com/leaderboard/outs_above_average?type=Fielder&startYear=2022&endYear=2022&split=no&team=&range=year&min=10&pos=of&roles=&viz=show" r = requests.get(url) soup = BeautifulSoup(r.content, "lxml") table = soup.find("table") for row in table.findAll("tr"): print([i.text for i in row.findAll("td")]) However, my variable table returns None, even though there is clearly a table tag in the HTML code of the website. How do I get it? A: The webpage is loaded dynamically and relies on JavaScript, therefore requests won't support it. You could use another parser library such as selenium. from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support import expected_conditions as EC driver = webdriver.Firefox() url = "https://baseballsavant.mlb.com/leaderboard/outs_above_average?type=Fielder&startYear=2022&endYear=2022&split=no&team=&range=year&min=10&pos=of&roles=&viz=show" driver.get(url) wait = WebDriverWait(driver, 10) wait.until(EC.presence_of_element_located((By.TAG_NAME, 'table'))) table = driver.find_element(By.TAG_NAME, 'table') table_html = table.get_attribute('innerHTML') # print('table html:', table_html) for tr_web_element in table.find_elements(By.TAG_NAME, 'tr'): for td_web_element in tr_web_element.find_elements(By.TAG_NAME, 'td'): print(td_web_element.text) driver.close() Or see this answer to incorporate Selenium with BeautifulSoup.
Can't scrape table BeautifulSoup
I'm trying to scrape the following table from this URL: https://baseballsavant.mlb.com/leaderboard/outs_above_average?type=Fielder&startYear=2022&endYear=2022&split=no&team=&range=year&min=10&pos=of&roles=&viz=show This is my code: import requests from bs4 import BeautifulSoup url = "https://baseballsavant.mlb.com/leaderboard/outs_above_average?type=Fielder&startYear=2022&endYear=2022&split=no&team=&range=year&min=10&pos=of&roles=&viz=show" r = requests.get(url) soup = BeautifulSoup(r.content, "lxml") table = soup.find("table") for row in table.findAll("tr"): print([i.text for i in row.findAll("td")]) However, my variable table returns None, even though there is clearly a table tag in the HTML code of the website. How do I get it?
[ "The webpage is loaded dynamically and relies on JavaScript, therefore requests won't support it. You could use another parser library such as selenium.\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\ndriver = webdriver.Firefox()\n\nurl = \"https://baseballsavant.mlb.com/leaderboard/outs_above_average?type=Fielder&startYear=2022&endYear=2022&split=no&team=&range=year&min=10&pos=of&roles=&viz=show\"\n\ndriver.get(url)\n\nwait = WebDriverWait(driver, 10)\nwait.until(EC.presence_of_element_located((By.TAG_NAME, 'table')))\n\ntable = driver.find_element(By.TAG_NAME, 'table')\n\ntable_html = table.get_attribute('innerHTML')\n\n# print('table html:', table_html)\n\nfor tr_web_element in table.find_elements(By.TAG_NAME, 'tr'):\n for td_web_element in tr_web_element.find_elements(By.TAG_NAME, 'td'):\n print(td_web_element.text)\n\ndriver.close()\n\nOr see this answer to incorporate Selenium with BeautifulSoup.\n" ]
[ 2 ]
[]
[]
[ "beautifulsoup", "python", "web_scraping" ]
stackoverflow_0074621973_beautifulsoup_python_web_scraping.txt
Q: python mysql delete statement not working here I am trying to remove any users which containt a " in their email/username. def removeQuote(self, tbl,record): """ Updates the record """ statmt="select id from %s WHERE `email` LIKE '%%\"%%'" % (tbl) self.cursor.execute(statmt) rows=list(self.cursor.fetchall()) for idx, val in enumerate(rows): id= val[0] delstatmt = "DELETE FROM `maillist_subscription` WHERE id = '%s'" % id print delstatmt self.cursor.execute(delstatmt) The output of this shows as if the action completed successfully, but the record remains in the database. Output also shows a correct mysql statement: DELETE FROM `maillist_subscription` WHERE id = '8288754' Thanks for all your help! A: You need to commit the change, using the commit() method on the connection object. Most DBAPI interfaces use implicit transactions. Also, don't use string formatting for SQL query generation! It will open you up to SQL injections: UNSAFE!! # What happens if id = "1'; DROP DATABASE somedb" ? delstatmt = "DELETE FROM `maillist_subscription` WHERE id = '%s'" % (id,) cursor.execute(delstatmt) conn.commit() SAFE! delstatmt = "DELETE FROM `maillist_subscription` WHERE id = ?" cursor.execute(delstatmt, (id,)) conn.commit() A: cursor.execute("DELETE FROM maillist_subscription WHERE id = '"+id+"'") conn.commit() A: I am trying to execute the following Redshift SQL in a Python script, but records are not being deleted. There is no error, either. sql_del = "DELETE FROM table_name where id in (select id from table2)" cursor.execute(sql_del) conn.commit()`
python mysql delete statement not working
here I am trying to remove any users which containt a " in their email/username. def removeQuote(self, tbl,record): """ Updates the record """ statmt="select id from %s WHERE `email` LIKE '%%\"%%'" % (tbl) self.cursor.execute(statmt) rows=list(self.cursor.fetchall()) for idx, val in enumerate(rows): id= val[0] delstatmt = "DELETE FROM `maillist_subscription` WHERE id = '%s'" % id print delstatmt self.cursor.execute(delstatmt) The output of this shows as if the action completed successfully, but the record remains in the database. Output also shows a correct mysql statement: DELETE FROM `maillist_subscription` WHERE id = '8288754' Thanks for all your help!
[ "You need to commit the change, using the commit() method on the connection object. Most DBAPI interfaces use implicit transactions.\nAlso, don't use string formatting for SQL query generation! It will open you up to SQL injections:\nUNSAFE!!\n# What happens if id = \"1'; DROP DATABASE somedb\" ?\ndelstatmt = \"DELETE FROM `maillist_subscription` WHERE id = '%s'\" % (id,)\ncursor.execute(delstatmt)\nconn.commit()\n\nSAFE!\ndelstatmt = \"DELETE FROM `maillist_subscription` WHERE id = ?\"\ncursor.execute(delstatmt, (id,))\nconn.commit()\n\n", "cursor.execute(\"DELETE FROM maillist_subscription WHERE id = '\"+id+\"'\")\nconn.commit()\n", "I am trying to execute the following Redshift SQL in a Python script, but records are not being deleted. There is no error, either.\nsql_del = \"DELETE FROM table_name where id in (select id from table2)\"\ncursor.execute(sql_del)\nconn.commit()`\n\n" ]
[ 27, 1, 0 ]
[]
[]
[ "python", "sql", "transactional" ]
stackoverflow_0012082360_python_sql_transactional.txt
Q: connect to a postgres database inside of a docker container from django running on host machine I have a postgres database running inside a container with pgadmin connected to it, the docker-compose.yml is as follows: postgres: image: postgres:13.0-alpine volumes: - postgres:/var/lib/postgresql/data ports: - "5432:5432" env_file: - $ENV_FILE pgadmin: image: dpage/pgadmin4 volumes: - pgadmin:/var/lib/pgadmin ports: - "${PGADMIN_PORT:-5050}:80" restart: unless-stopped depends_on: - postgres env_file: - $ENV_FILE my django database settings are: DATABASES = { "default": { "ENGINE": os.environ.get("POSTGRES_ENGINE", "django.db.backends.postgresql"), "NAME": os.environ.get("POSTGRES_NAME", "postgres"), "USER": os.environ.get("POSTGRES_USER", "admin"), "PASSWORD": os.environ.get("POSTGRES_PASS", "admin"), "HOST": os.environ.get("POSTGRES_HOST", "127.0.0.1"), "PORT": os.environ.get("POSTGRES_PORT", "5432"), } } The traceback is: Traceback (most recent call last): File "C:\Users\liam.obrien\AppData\Local\Programs\Python\Python310\lib\threading.py", line 1009, in _bootstrap_inner self.run() File "C:\Users\liam.obrien\AppData\Local\Programs\Python\Python310\lib\threading.py", line 946, in run self._target(*self._args, **self._kwargs) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\utils\autoreload.py", line 64, in wrapper fn(*args, **kwargs) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\channels\management\commands\runserver.py", line 76, in inner_run self.check_migrations() File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\core\management\base.py", line 576, in check_migrations executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS]) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\migrations\executor.py", line 18, in __init__ self.loader = MigrationLoader(self.connection) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\migrations\loader.py", line 58, in __init__ self.build_graph() File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\migrations\loader.py", line 235, in build_graph self.applied_migrations = recorder.applied_migrations() File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\migrations\recorder.py", line 81, in applied_migrations if self.has_table(): File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\migrations\recorder.py", line 57, in has_table with self.connection.cursor() as cursor: File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\utils\asyncio.py", line 26, in inner return func(*args, **kwargs) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\backends\base\base.py", line 284, in cursor return self._cursor() File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\backends\base\base.py", line 260, in _cursor self.ensure_connection() File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\utils\asyncio.py", line 26, in inner return func(*args, **kwargs) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\backends\base\base.py", line 243, in ensure_connection with self.wrap_database_errors: File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\utils.py", line 91, in __exit__ raise dj_exc_value.with_traceback(traceback) from exc_value File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\backends\base\base.py", line 244, in ensure_connection self.connect() File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\utils\asyncio.py", line 26, in inner return func(*args, **kwargs) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\backends\base\base.py", line 225, in connect self.connection = self.get_new_connection(conn_params) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\utils\asyncio.py", line 26, in inner return func(*args, **kwargs) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\backends\postgresql\base.py", line 203, in get_new_connection connection = Database.connect(**conn_params) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\psycopg2\__init__.py", line 122, in connect conn = _connect(dsn, connection_factory=connection_factory, **kwasync) django.db.utils.OperationalError: connection to server at "127.0.0.1", port 5432 failed: FATAL: password authentication failed for user "admin" Absolutely no idea what is going on as my env file has the exact same password in it as the settings.py in my django project. To be clear, i am running the postgres containers in docker and then trying to run the django test server locally on the host machine, and the error is thrown on startup. A: So for anyone that finds this, i was helped along hugely by this thread; Connecting to Postgresql in a docker container from outside The problem ended up being that for some reason port 5432 was in use, i think by django itself (although if anyone knows the real answer that knowledge would be greatly appreciated), and the solution was to change the port mapping to another port, in my case: postgres: ports: - "6543:5432" you can then test if you can acces your postgres db from a terminal on your host machine through a uri as follows: psql postgresql://<postgres_user_name>:<postgres_pass>@localhost:6543/postgres another problem i ran into is that to get the psql command, you need to add postgres to your system environment variables path as follows: %PROGRAMFILES%/PostgreSQL/<postgres_version_num>/bin/
connect to a postgres database inside of a docker container from django running on host machine
I have a postgres database running inside a container with pgadmin connected to it, the docker-compose.yml is as follows: postgres: image: postgres:13.0-alpine volumes: - postgres:/var/lib/postgresql/data ports: - "5432:5432" env_file: - $ENV_FILE pgadmin: image: dpage/pgadmin4 volumes: - pgadmin:/var/lib/pgadmin ports: - "${PGADMIN_PORT:-5050}:80" restart: unless-stopped depends_on: - postgres env_file: - $ENV_FILE my django database settings are: DATABASES = { "default": { "ENGINE": os.environ.get("POSTGRES_ENGINE", "django.db.backends.postgresql"), "NAME": os.environ.get("POSTGRES_NAME", "postgres"), "USER": os.environ.get("POSTGRES_USER", "admin"), "PASSWORD": os.environ.get("POSTGRES_PASS", "admin"), "HOST": os.environ.get("POSTGRES_HOST", "127.0.0.1"), "PORT": os.environ.get("POSTGRES_PORT", "5432"), } } The traceback is: Traceback (most recent call last): File "C:\Users\liam.obrien\AppData\Local\Programs\Python\Python310\lib\threading.py", line 1009, in _bootstrap_inner self.run() File "C:\Users\liam.obrien\AppData\Local\Programs\Python\Python310\lib\threading.py", line 946, in run self._target(*self._args, **self._kwargs) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\utils\autoreload.py", line 64, in wrapper fn(*args, **kwargs) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\channels\management\commands\runserver.py", line 76, in inner_run self.check_migrations() File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\core\management\base.py", line 576, in check_migrations executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS]) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\migrations\executor.py", line 18, in __init__ self.loader = MigrationLoader(self.connection) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\migrations\loader.py", line 58, in __init__ self.build_graph() File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\migrations\loader.py", line 235, in build_graph self.applied_migrations = recorder.applied_migrations() File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\migrations\recorder.py", line 81, in applied_migrations if self.has_table(): File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\migrations\recorder.py", line 57, in has_table with self.connection.cursor() as cursor: File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\utils\asyncio.py", line 26, in inner return func(*args, **kwargs) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\backends\base\base.py", line 284, in cursor return self._cursor() File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\backends\base\base.py", line 260, in _cursor self.ensure_connection() File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\utils\asyncio.py", line 26, in inner return func(*args, **kwargs) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\backends\base\base.py", line 243, in ensure_connection with self.wrap_database_errors: File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\utils.py", line 91, in __exit__ raise dj_exc_value.with_traceback(traceback) from exc_value File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\backends\base\base.py", line 244, in ensure_connection self.connect() File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\utils\asyncio.py", line 26, in inner return func(*args, **kwargs) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\backends\base\base.py", line 225, in connect self.connection = self.get_new_connection(conn_params) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\utils\asyncio.py", line 26, in inner return func(*args, **kwargs) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\django\db\backends\postgresql\base.py", line 203, in get_new_connection connection = Database.connect(**conn_params) File "C:\Users\liam.obrien\AppData\Local\pypoetry\Cache\virtualenvs\tdd-framework-FOYpVOaj-py3.10\lib\site-packages\psycopg2\__init__.py", line 122, in connect conn = _connect(dsn, connection_factory=connection_factory, **kwasync) django.db.utils.OperationalError: connection to server at "127.0.0.1", port 5432 failed: FATAL: password authentication failed for user "admin" Absolutely no idea what is going on as my env file has the exact same password in it as the settings.py in my django project. To be clear, i am running the postgres containers in docker and then trying to run the django test server locally on the host machine, and the error is thrown on startup.
[ "So for anyone that finds this, i was helped along hugely by this thread; Connecting to Postgresql in a docker container from outside\nThe problem ended up being that for some reason port 5432 was in use, i think by django itself (although if anyone knows the real answer that knowledge would be greatly appreciated), and the solution was to change the port mapping to another port, in my case:\npostgres:\n ports:\n - \"6543:5432\"\n\nyou can then test if you can acces your postgres db from a terminal on your host machine through a uri as follows:\npsql postgresql://<postgres_user_name>:<postgres_pass>@localhost:6543/postgres\n\nanother problem i ran into is that to get the psql command, you need to add postgres to your system environment variables path as follows:\n%PROGRAMFILES%/PostgreSQL/<postgres_version_num>/bin/\n" ]
[ 1 ]
[]
[]
[ "django", "docker_compose", "postgresql", "python" ]
stackoverflow_0074621910_django_docker_compose_postgresql_python.txt
Q: Can't adding my camera widget to the Screen to make the camera open in kivymd form .kv I made a WebHomeScreen in which i make a two functions for my webcam to start/launch and read the live feed using opencv when i run my code the webcam starts but its not showing on the app screen. Below is the code of my main.py file. ` class WebCamScreen(Screen): def do_start(self): self.capture = cv2.VideoCapture(0) Clock.schedule_interval(self.load_video, 1.0 / 24.0) def load_video(self, *args): ret, frame = self.capture.read() # self.image_frame = frame buffer = cv2.flip(frame, 0).tostring() image_texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt="bgr") image_texture.blit_buffer(buffer, colorfmt="bgr", bufferfmt="ubyte") self.texture = image_texture class MainApp(MDApp): def build(self): screen_manger = ScreenManager() screen_manger.add_widget(LoginScreen(name="login")) screen_manger.add_widget(RegistrationScreen(name="registration")) screen_manger.add_widget(HomeScreen(name="home")) screen_manger.add_widget(WebCamScreen(name="camera")) return screen_manger if __name__ == "__main__": MainApp().run() ` and my .kv file code is <WebCamScreen> MDBoxLayout: MDRaisedButton: text: "Start Camera" size_hint_x: None size_hint_y: None md_bg_color: "orange" pos_hint: {"center_x": 0.2, "center_y": 0.5} on_press: root.do_start() How can i put my webcam which is a live feed on the this WebCamScreen A: You can just add an Image to your kv: <WebCamScreen>: MDBoxLayout: MDRaisedButton: text: "Start Camera" size_hint_x: None size_hint_y: None md_bg_color: "orange" pos_hint: {"center_x": 0.2, "center_y": 0.5} on_press: root.do_start() Image: id: img Then set the texture of that Image to the camera output: def load_video(self, *args): ret, frame = self.capture.read() # self.image_frame = frame buffer = cv2.flip(frame, 0).tostring() image_texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt="bgr") image_texture.blit_buffer(buffer, colorfmt="bgr", bufferfmt="ubyte") self.ids.img.texture = image_texture
Can't adding my camera widget to the Screen to make the camera open in kivymd form .kv
I made a WebHomeScreen in which i make a two functions for my webcam to start/launch and read the live feed using opencv when i run my code the webcam starts but its not showing on the app screen. Below is the code of my main.py file. ` class WebCamScreen(Screen): def do_start(self): self.capture = cv2.VideoCapture(0) Clock.schedule_interval(self.load_video, 1.0 / 24.0) def load_video(self, *args): ret, frame = self.capture.read() # self.image_frame = frame buffer = cv2.flip(frame, 0).tostring() image_texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt="bgr") image_texture.blit_buffer(buffer, colorfmt="bgr", bufferfmt="ubyte") self.texture = image_texture class MainApp(MDApp): def build(self): screen_manger = ScreenManager() screen_manger.add_widget(LoginScreen(name="login")) screen_manger.add_widget(RegistrationScreen(name="registration")) screen_manger.add_widget(HomeScreen(name="home")) screen_manger.add_widget(WebCamScreen(name="camera")) return screen_manger if __name__ == "__main__": MainApp().run() ` and my .kv file code is <WebCamScreen> MDBoxLayout: MDRaisedButton: text: "Start Camera" size_hint_x: None size_hint_y: None md_bg_color: "orange" pos_hint: {"center_x": 0.2, "center_y": 0.5} on_press: root.do_start() How can i put my webcam which is a live feed on the this WebCamScreen
[ "You can just add an Image to your kv:\n<WebCamScreen>:\n MDBoxLayout:\n MDRaisedButton:\n text: \"Start Camera\"\n size_hint_x: None\n size_hint_y: None\n md_bg_color: \"orange\"\n pos_hint: {\"center_x\": 0.2, \"center_y\": 0.5}\n on_press:\n root.do_start()\n Image:\n id: img\n\nThen set the texture of that Image to the camera output:\ndef load_video(self, *args):\n ret, frame = self.capture.read()\n # self.image_frame = frame\n buffer = cv2.flip(frame, 0).tostring()\n image_texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt=\"bgr\")\n image_texture.blit_buffer(buffer, colorfmt=\"bgr\", bufferfmt=\"ubyte\")\n self.ids.img.texture = image_texture\n\n" ]
[ 0 ]
[]
[]
[ "cross_platform", "kivy", "kivymd", "python" ]
stackoverflow_0074619958_cross_platform_kivy_kivymd_python.txt
Q: Pandas lagged rolling average on aggregate data with multiple groups and missing dates I'd like to calculate a lagged rolling average on a complicated time-series dataset. Consider the toy example as follows: import numpy as np import pandas as pd np.random.seed(101) fruit = ['apples', 'apples', 'apples', 'oranges', 'apples', 'oranges', 'oranges', 'oranges', 'apples', 'oranges', 'apples', 'apples'] people = ['alice']*6+['bob']*6 date = ['2022-01-01', '2022-01-03', '2022-01-04', '2022-01-04', '2022-01-11', '2022-01-11', '2022-01-04', '2022-01-05', '2022-01-05', '2022-01-20', '2022-01-20', '2022-01-25'] count = np.random.poisson(4,size=12) weight_per = np.round(np.random.uniform(1,3,size=12),2) df = pd.DataFrame({'date':date, 'people':people, 'fruit':fruit, 'count':count, 'weight':weight_per*count}) df['date'] = pd.to_datetime(df.date) This results in the following DataFrame: date people fruit count weight 0 2022-01-01 alice apples 2 2.72 1 2022-01-03 alice apples 6 11.28 2 2022-01-04 alice apples 5 13.80 3 2022-01-04 alice oranges 3 8.70 4 2022-01-11 alice apples 2 3.92 5 2022-01-11 alice oranges 3 5.76 6 2022-01-04 bob oranges 8 18.16 7 2022-01-05 bob oranges 5 8.25 8 2022-01-05 bob apples 5 6.20 9 2022-01-20 bob oranges 4 4.40 10 2022-01-20 bob apples 2 4.56 11 2022-01-25 bob apples 2 5.24 Now I'd like to add a column representing the average weight per fruit for the previous 7 days: wgt_per_frt_prev_7d. It should be defined as the sum of all the fruit weights divided by the sum of all the fruit counts for the past 7 days, not including the current day. While there are many ways to brute force this answer, I'm looking for something with relatively good time complexity. If I were to calculate this column by hand, these would be the calculations and expected results: df['wgt_per_frt_prev_7d'] = np.nan df.loc[1, 'wgt_per_frt_prev_7d'] = 2.72/2 # row 0 df.loc[2, 'wgt_per_frt_prev_7d'] = (2.72+11.28)/(2+6) # row 0 and 1 df.loc[3, 'wgt_per_frt_prev_7d'] = (2.72+11.28)/(2+6) df.loc[4, 'wgt_per_frt_prev_7d'] = (8.70+13.80+6.20+8.25+18.16)/(3+5+5+5+8) # row 2,3,6,7,8 df.loc[5, 'wgt_per_frt_prev_7d'] = (8.70+13.80+6.20+8.25+18.16)/(3+5+5+5+8) df.loc[6, 'wgt_per_frt_prev_7d'] = (2.72+11.28)/(2+6) # row 0,1 df.loc[7, 'wgt_per_frt_prev_7d'] = (8.70+13.80+2.72+11.28+18.16)/(3+5+6+2+8) # row 0,1,2,3,6 df.loc[8, 'wgt_per_frt_prev_7d'] = (8.70+13.80+2.72+11.28+18.16)/(3+5+6+2+8) df.loc[11, 'wgt_per_frt_prev_7d'] = (4.40+4.56)/(2+4) # row 9,10 Final DF: date people fruit count weight wgt_per_frt_prev_7d 0 2022-01-01 alice apples 2 2.72 NaN 1 2022-01-03 alice apples 6 11.28 1.360000 2 2022-01-04 alice apples 5 13.80 1.750000 3 2022-01-04 alice oranges 3 8.70 1.750000 4 2022-01-11 alice apples 2 3.92 2.119615 5 2022-01-11 alice oranges 3 5.76 2.119615 6 2022-01-04 bob oranges 8 18.16 1.750000 7 2022-01-05 bob oranges 5 8.25 2.277500 8 2022-01-05 bob apples 5 6.20 2.277500 9 2022-01-20 bob oranges 4 4.40 NaN 10 2022-01-20 bob apples 2 4.56 NaN 11 2022-01-25 bob apples 2 5.24 1.493333 EDIT The final column I'd like to add is wgt_per_apl_prev_7d, which only considers the apple weights when calculating this field, but still applies to all rows, even rows with just oranges. The output of this calculation should be as follows: date people fruit count weight wgt_per_frt_prev_7d wgt_per_apl_prev_7d 0 2022-01-01 alice apples 2 2.72 NaN NaN 1 2022-01-03 alice apples 6 11.28 1.360000 1.360000 2 2022-01-04 alice apples 5 13.80 1.750000 1.750000 3 2022-01-04 alice oranges 3 8.70 1.750000 1.750000 4 2022-01-11 alice apples 2 3.92 2.119615 2.000000 5 2022-01-11 alice oranges 3 5.76 2.119615 2.000000 6 2022-01-04 bob oranges 8 18.16 1.750000 1.750000 7 2022-01-05 bob oranges 5 8.25 2.277500 2.138462 8 2022-01-05 bob apples 5 6.20 2.277500 2.138462 9 2022-01-20 bob oranges 4 4.40 NaN NaN 10 2022-01-20 bob apples 2 4.56 NaN NaN 11 2022-01-25 bob apples 2 5.24 1.493333 2.280000 A: import numpy as np import pandas as pd import datetime np.random.seed(101) fruit = ['apples', 'apples', 'apples', 'oranges', 'apples', 'oranges', 'oranges', 'oranges', 'apples', 'oranges', 'apples', 'apples'] people = ['alice']*6+['bob']*6 date = ['2022-01-01', '2022-01-03', '2022-01-04', '2022-01-04', '2022-01-11', '2022-01-11', '2022-01-04', '2022-01-05', '2022-01-05', '2022-01-20', '2022-01-20', '2022-01-25'] count = np.random.poisson(4,size=12) weight_per = np.round(np.random.uniform(1,3,size=12),2) df = pd.DataFrame({'date':date, 'people':people, 'fruit':fruit, 'count':count, 'weight':weight_per*count}) df['date'] = pd.to_datetime(df.date) df['date_ini'] = df['date'].apply(lambda x: x - datetime.timedelta(days=8)) df['wgt_per_frt_prev_7d'] = df.apply(lambda x: df[(df['date'] > x['date_ini']) & (df['date'] < x['date'])]['weight'].sum()/df[(df['date'] > x['date_ini']) & (df['date'] < x['date'])]['count'].sum() if df[(df['date'] > x['date_ini']) & (df['date'] < x['date'])]['count'].sum()>0 else np.nan, axis=1) df = df.drop('date_ini', axis=1) df A: Try this df2 = df[['date', 'count', 'weight']].groupby('date').sum() df2 = df2.rolling('8D').apply(np.sum, raw=True) - df2 df = df.merge((df2['weight']/df2['count']).rename('avg').to_frame().reset_index(), on='date', how='left') df2 = df[df['fruit'] == 'apples'][['date', 'count', 'weight']].groupby('date').sum() df2 = df2.rolling('8D').apply(np.sum, raw=True) - df2 df = df.merge((df2['weight']/df2['count']).rename('avg_apple').to_frame().reset_index(), on='date', how='left') Output date people fruit count weight avg avg_apple 0 2022-01-01 alice apples 2 2.72 NaN NaN 1 2022-01-03 alice apples 6 11.28 1.360000 1.360000 2 2022-01-04 alice apples 5 13.80 1.750000 1.750000 3 2022-01-04 alice oranges 3 8.70 1.750000 1.750000 4 2022-01-11 alice apples 2 3.92 2.119615 2.000000 5 2022-01-11 alice oranges 3 5.76 2.119615 2.000000 6 2022-01-04 bob oranges 8 18.16 1.750000 1.750000 7 2022-01-05 bob oranges 5 8.25 2.277500 2.138462 8 2022-01-05 bob apples 5 6.20 2.277500 2.138462 9 2022-01-20 bob oranges 4 4.40 NaN NaN 10 2022-01-20 bob apples 2 4.56 NaN NaN 11 2022-01-25 bob apples 2 5.24 1.493333 2.280000
Pandas lagged rolling average on aggregate data with multiple groups and missing dates
I'd like to calculate a lagged rolling average on a complicated time-series dataset. Consider the toy example as follows: import numpy as np import pandas as pd np.random.seed(101) fruit = ['apples', 'apples', 'apples', 'oranges', 'apples', 'oranges', 'oranges', 'oranges', 'apples', 'oranges', 'apples', 'apples'] people = ['alice']*6+['bob']*6 date = ['2022-01-01', '2022-01-03', '2022-01-04', '2022-01-04', '2022-01-11', '2022-01-11', '2022-01-04', '2022-01-05', '2022-01-05', '2022-01-20', '2022-01-20', '2022-01-25'] count = np.random.poisson(4,size=12) weight_per = np.round(np.random.uniform(1,3,size=12),2) df = pd.DataFrame({'date':date, 'people':people, 'fruit':fruit, 'count':count, 'weight':weight_per*count}) df['date'] = pd.to_datetime(df.date) This results in the following DataFrame: date people fruit count weight 0 2022-01-01 alice apples 2 2.72 1 2022-01-03 alice apples 6 11.28 2 2022-01-04 alice apples 5 13.80 3 2022-01-04 alice oranges 3 8.70 4 2022-01-11 alice apples 2 3.92 5 2022-01-11 alice oranges 3 5.76 6 2022-01-04 bob oranges 8 18.16 7 2022-01-05 bob oranges 5 8.25 8 2022-01-05 bob apples 5 6.20 9 2022-01-20 bob oranges 4 4.40 10 2022-01-20 bob apples 2 4.56 11 2022-01-25 bob apples 2 5.24 Now I'd like to add a column representing the average weight per fruit for the previous 7 days: wgt_per_frt_prev_7d. It should be defined as the sum of all the fruit weights divided by the sum of all the fruit counts for the past 7 days, not including the current day. While there are many ways to brute force this answer, I'm looking for something with relatively good time complexity. If I were to calculate this column by hand, these would be the calculations and expected results: df['wgt_per_frt_prev_7d'] = np.nan df.loc[1, 'wgt_per_frt_prev_7d'] = 2.72/2 # row 0 df.loc[2, 'wgt_per_frt_prev_7d'] = (2.72+11.28)/(2+6) # row 0 and 1 df.loc[3, 'wgt_per_frt_prev_7d'] = (2.72+11.28)/(2+6) df.loc[4, 'wgt_per_frt_prev_7d'] = (8.70+13.80+6.20+8.25+18.16)/(3+5+5+5+8) # row 2,3,6,7,8 df.loc[5, 'wgt_per_frt_prev_7d'] = (8.70+13.80+6.20+8.25+18.16)/(3+5+5+5+8) df.loc[6, 'wgt_per_frt_prev_7d'] = (2.72+11.28)/(2+6) # row 0,1 df.loc[7, 'wgt_per_frt_prev_7d'] = (8.70+13.80+2.72+11.28+18.16)/(3+5+6+2+8) # row 0,1,2,3,6 df.loc[8, 'wgt_per_frt_prev_7d'] = (8.70+13.80+2.72+11.28+18.16)/(3+5+6+2+8) df.loc[11, 'wgt_per_frt_prev_7d'] = (4.40+4.56)/(2+4) # row 9,10 Final DF: date people fruit count weight wgt_per_frt_prev_7d 0 2022-01-01 alice apples 2 2.72 NaN 1 2022-01-03 alice apples 6 11.28 1.360000 2 2022-01-04 alice apples 5 13.80 1.750000 3 2022-01-04 alice oranges 3 8.70 1.750000 4 2022-01-11 alice apples 2 3.92 2.119615 5 2022-01-11 alice oranges 3 5.76 2.119615 6 2022-01-04 bob oranges 8 18.16 1.750000 7 2022-01-05 bob oranges 5 8.25 2.277500 8 2022-01-05 bob apples 5 6.20 2.277500 9 2022-01-20 bob oranges 4 4.40 NaN 10 2022-01-20 bob apples 2 4.56 NaN 11 2022-01-25 bob apples 2 5.24 1.493333 EDIT The final column I'd like to add is wgt_per_apl_prev_7d, which only considers the apple weights when calculating this field, but still applies to all rows, even rows with just oranges. The output of this calculation should be as follows: date people fruit count weight wgt_per_frt_prev_7d wgt_per_apl_prev_7d 0 2022-01-01 alice apples 2 2.72 NaN NaN 1 2022-01-03 alice apples 6 11.28 1.360000 1.360000 2 2022-01-04 alice apples 5 13.80 1.750000 1.750000 3 2022-01-04 alice oranges 3 8.70 1.750000 1.750000 4 2022-01-11 alice apples 2 3.92 2.119615 2.000000 5 2022-01-11 alice oranges 3 5.76 2.119615 2.000000 6 2022-01-04 bob oranges 8 18.16 1.750000 1.750000 7 2022-01-05 bob oranges 5 8.25 2.277500 2.138462 8 2022-01-05 bob apples 5 6.20 2.277500 2.138462 9 2022-01-20 bob oranges 4 4.40 NaN NaN 10 2022-01-20 bob apples 2 4.56 NaN NaN 11 2022-01-25 bob apples 2 5.24 1.493333 2.280000
[ "import numpy as np\nimport pandas as pd\nimport datetime\n\nnp.random.seed(101)\n\nfruit = ['apples', 'apples', 'apples', 'oranges', 'apples', 'oranges', 'oranges',\n 'oranges', 'apples', 'oranges', 'apples', 'apples']\npeople = ['alice']*6+['bob']*6\ndate = ['2022-01-01', '2022-01-03', '2022-01-04', '2022-01-04', '2022-01-11', '2022-01-11',\n '2022-01-04', '2022-01-05', '2022-01-05', '2022-01-20', '2022-01-20', '2022-01-25']\ncount = np.random.poisson(4,size=12)\nweight_per = np.round(np.random.uniform(1,3,size=12),2)\n\ndf = pd.DataFrame({'date':date, 'people':people, 'fruit':fruit,\n 'count':count, 'weight':weight_per*count})\ndf['date'] = pd.to_datetime(df.date)\ndf['date_ini'] = df['date'].apply(lambda x: x - datetime.timedelta(days=8))\ndf['wgt_per_frt_prev_7d'] = df.apply(lambda x: df[(df['date'] > x['date_ini']) & (df['date'] < x['date'])]['weight'].sum()/df[(df['date'] > x['date_ini']) & (df['date'] < x['date'])]['count'].sum() if df[(df['date'] > x['date_ini']) & (df['date'] < x['date'])]['count'].sum()>0 else np.nan, axis=1)\ndf = df.drop('date_ini', axis=1)\ndf\n\n", "Try this\ndf2 = df[['date', 'count', 'weight']].groupby('date').sum()\ndf2 = df2.rolling('8D').apply(np.sum, raw=True) - df2\ndf = df.merge((df2['weight']/df2['count']).rename('avg').to_frame().reset_index(), on='date', how='left')\n\ndf2 = df[df['fruit'] == 'apples'][['date', 'count', 'weight']].groupby('date').sum()\ndf2 = df2.rolling('8D').apply(np.sum, raw=True) - df2\ndf = df.merge((df2['weight']/df2['count']).rename('avg_apple').to_frame().reset_index(), on='date', how='left')\n\nOutput\n date people fruit count weight avg avg_apple\n0 2022-01-01 alice apples 2 2.72 NaN NaN\n1 2022-01-03 alice apples 6 11.28 1.360000 1.360000\n2 2022-01-04 alice apples 5 13.80 1.750000 1.750000\n3 2022-01-04 alice oranges 3 8.70 1.750000 1.750000\n4 2022-01-11 alice apples 2 3.92 2.119615 2.000000\n5 2022-01-11 alice oranges 3 5.76 2.119615 2.000000\n6 2022-01-04 bob oranges 8 18.16 1.750000 1.750000\n7 2022-01-05 bob oranges 5 8.25 2.277500 2.138462\n8 2022-01-05 bob apples 5 6.20 2.277500 2.138462\n9 2022-01-20 bob oranges 4 4.40 NaN NaN\n10 2022-01-20 bob apples 2 4.56 NaN NaN\n11 2022-01-25 bob apples 2 5.24 1.493333 2.280000\n\n" ]
[ 1, 1 ]
[]
[]
[ "dataframe", "pandas", "python", "rolling_computation", "time_series" ]
stackoverflow_0074620602_dataframe_pandas_python_rolling_computation_time_series.txt
Q: Pandas: `NaNs` when reading `.txt` file I have a .txt file that I am attempting to read in pandas. When I open the .txt file, I see it has the content and data I expect. However, when I read the file in pandas, the data is missing and I only NaNs. here's sample content from .txt file: 980145115 189699454 SD Vacant Land Agricultural/Horticultural/Forest Vacant Land 3290522 216200 43.585481 -96.626588 10255 46099 I 707951172 189699522 AZ Government, Special Purpose Religious 91630 26730 102-55-008 4013 I I have tried different parameters of encoding and sep in read_csv. import pandas as pd df = pd.read_csv('s3://filepath', encoding='latin-1', sep="\t") Is there anything else I can try to read the data? A: It's probably due to the separator you have choosen in pandas.read_csv. Try to use whitespaces instead with sep="\s\s+" : df = pd.read_csv('s3://filepath', encoding='latin-1', sep="\s\s+", engine="python", header=None) Or with delim_whitespace=True : df = pd.read_csv('s3://filepath', encoding='latin-1', delim_whitespace=True, header=None) A: The short answer I would change two things to your call to read_csv: pass the header=None argument, pass the na_filter=False argument. df = pd.read_csv('s3://filepath', encoding='latin-1', sep="\t", header=None, na_filter=False) Details I copied your two sample data lines in a file called file.tsv on my computer. No headers You say your file does not contain a header, and by default read_csv() tries to interpret the first line as column headers. Just doing that, I get NaNs only for fields that were empty in your sample: >>> pd.read_csv('file.tsv', encoding='latin-1', sep="\t", header=None) 0 1 2 3 4 5 6 7 8 ... 10 11 12 13 14 15 16 17 18 0 980145115 189699454 NaN NaN SD NaN NaN NaN Vacant Land ... NaN NaN 3290522 216200 43.585481 -96.626588 10255 46099 I 1 707951172 189699522 NaN NaN AZ NaN NaN NaN Government, Special Purpose ... NaN NaN 91630 26730 NaN NaN 102-55-008 4013 I No NaN filter The manual for pd.read_csv() says that na_filter=True is the default, and that means logic is applied to detect missing values. If that's not useful for you, and you just want to keep empty fields as empty values in your DF, turn that off: >>> pd.read_csv('file.tsv', encoding='latin-1', sep="\t", header=None, na_filter=False) 0 1 2 3 4 5 6 7 8 ... 10 11 12 13 14 15 16 17 18 0 980145115 189699454 SD Vacant Land ... 3290522 216200 43.585481 -96.626588 10255 46099 I 1 707951172 189699522 AZ Government, Special Purpose ... 91630 26730 102-55-008 4013 I A lot of empty values Now, notice that with no NaN filter, columns 2, 3, 5, 6, 7 are all empty, and several others too further down. If you look at the actual data carefully, you will see that you have several consecutive tab characters, which means actual empty values in your data. That's just fine, presumably those fields were optional in the original database, but they're behind a lot of the NaNs that show up when not using na_filter=False.
Pandas: `NaNs` when reading `.txt` file
I have a .txt file that I am attempting to read in pandas. When I open the .txt file, I see it has the content and data I expect. However, when I read the file in pandas, the data is missing and I only NaNs. here's sample content from .txt file: 980145115 189699454 SD Vacant Land Agricultural/Horticultural/Forest Vacant Land 3290522 216200 43.585481 -96.626588 10255 46099 I 707951172 189699522 AZ Government, Special Purpose Religious 91630 26730 102-55-008 4013 I I have tried different parameters of encoding and sep in read_csv. import pandas as pd df = pd.read_csv('s3://filepath', encoding='latin-1', sep="\t") Is there anything else I can try to read the data?
[ "It's probably due to the separator you have choosen in pandas.read_csv.\nTry to use whitespaces instead with sep=\"\\s\\s+\" :\ndf = pd.read_csv('s3://filepath', encoding='latin-1', sep=\"\\s\\s+\", engine=\"python\", header=None)\n\nOr with delim_whitespace=True :\ndf = pd.read_csv('s3://filepath', encoding='latin-1', delim_whitespace=True, header=None)\n\n", "The short answer\nI would change two things to your call to read_csv:\n\npass the header=None argument,\npass the na_filter=False argument.\n\n\ndf = pd.read_csv('s3://filepath', encoding='latin-1', sep=\"\\t\", header=None, na_filter=False)\n\nDetails\nI copied your two sample data lines in a file called file.tsv on my computer.\nNo headers\nYou say your file does not contain a header, and by default read_csv() tries to interpret the first line as column headers. Just doing that, I get NaNs only for fields that were empty in your sample:\n>>> pd.read_csv('file.tsv', encoding='latin-1', sep=\"\\t\", header=None)\n 0 1 2 3 4 5 6 7 8 ... 10 11 12 13 14 15 16 17 18\n0 980145115 189699454 NaN NaN SD NaN NaN NaN Vacant Land ... NaN NaN 3290522 216200 43.585481 -96.626588 10255 46099 I\n1 707951172 189699522 NaN NaN AZ NaN NaN NaN Government, Special Purpose ... NaN NaN 91630 26730 NaN NaN 102-55-008 4013 I\n\nNo NaN filter\nThe manual for pd.read_csv() says that na_filter=True is the default, and that means logic is applied to detect missing values. If that's not useful for you, and you just want to keep empty fields as empty values in your DF, turn that off:\n>>> pd.read_csv('file.tsv', encoding='latin-1', sep=\"\\t\", header=None, na_filter=False)\n 0 1 2 3 4 5 6 7 8 ... 10 11 12 13 14 15 16 17 18\n0 980145115 189699454 SD Vacant Land ... 3290522 216200 43.585481 -96.626588 10255 46099 I\n1 707951172 189699522 AZ Government, Special Purpose ... 91630 26730 102-55-008 4013 I\n\nA lot of empty values\nNow, notice that with no NaN filter, columns 2, 3, 5, 6, 7 are all empty, and several others too further down. If you look at the actual data carefully, you will see that you have several consecutive tab characters, which means actual empty values in your data. That's just fine, presumably those fields were optional in the original database, but they're behind a lot of the NaNs that show up when not using na_filter=False.\n" ]
[ 1, 1 ]
[]
[]
[ "csv", "pandas", "python" ]
stackoverflow_0074620973_csv_pandas_python.txt
Q: How can I learn to CREATE a data visualization tool in Python or in any other Language? I want to understand and learn how a data visualization tool works and is made. Tried searching it on google but didn't found anything. Only matplotlib tutorials. I dont want to to learn to use a tool. I want to learn to create one. Tried searching it on google but didn't found anything. Expecting to get an online documentation/tutorial/course/YT playlist to learn it. A long answer will work too. I will read it. A: I would start with learning to create plots from scratch. Since you tagged python, look up python visual libraries like tk or PIL to get started. Once you make some basic visuals of dots, try to make a graph template, then add some dummy data on top of it.
How can I learn to CREATE a data visualization tool in Python or in any other Language?
I want to understand and learn how a data visualization tool works and is made. Tried searching it on google but didn't found anything. Only matplotlib tutorials. I dont want to to learn to use a tool. I want to learn to create one. Tried searching it on google but didn't found anything. Expecting to get an online documentation/tutorial/course/YT playlist to learn it. A long answer will work too. I will read it.
[ "I would start with learning to create plots from scratch. Since you tagged python, look up python visual libraries like tk or PIL to get started. Once you make some basic visuals of dots, try to make a graph template, then add some dummy data on top of it.\n" ]
[ 0 ]
[]
[]
[ "c++", "python", "visualization" ]
stackoverflow_0074622230_c++_python_visualization.txt
Q: How do I write a Python code to check if the given sequence is a palindrome or not? I wish to learn more about the rev_string I tried to see if "MOM" is a palindrome and I wanted the result to be yes/no A: It's very simple, First, you need to reverse the String that you want to check whether it's a palindrome or not. Then Compare the reverse String with the input one. If it's the same then it's a palindrome else it's not. string = "MoM" revstring = "".join(reversed(string)) print("Yes" if string == revstring else "No")
How do I write a Python code to check if the given sequence is a palindrome or not?
I wish to learn more about the rev_string I tried to see if "MOM" is a palindrome and I wanted the result to be yes/no
[ "It's very simple, First, you need to reverse the String that you want to check whether it's a palindrome or not. Then Compare the reverse String with the input one. If it's the same then it's a palindrome else it's not.\nstring = \"MoM\"\nrevstring = \"\".join(reversed(string))\n\nprint(\"Yes\" if string == revstring else \"No\")\n\n" ]
[ 0 ]
[]
[]
[ "arrays", "palindrome", "python", "reverse", "string" ]
stackoverflow_0074622176_arrays_palindrome_python_reverse_string.txt
Q: Number Recognition on 7 segment using python I am writing a code on Jupyter notebook using python to recognize the number on the device with 7segment(FND). I used opencv and got the edge of the image. import cv2 import matplotlib.pyplot as plt def detect_edge(image): ''' function Detecting Edges ''' image_with_edges = cv2.Canny(image , 100, 200) images = [image , image_with_edges] location = [121, 122] for loc, img in zip(location, images): plt.subplot(loc) plt.imshow(img, cmap='gray') plt.savefig('edge.png') plt.show() image = cv2.imread('/Users/USER/Desktop/test/test2.png', 0) detect_edge(image) This is the screenshot of the sample input and output data I got form the code above: I am not sure how to proceed from here. I want to get the recognize the number 51.12 in this case. Should I crop the FND part that the numbers are on first before I run deep learning? And how should I proceed from here? A: I feel like using a CNN is overkill for a problem like this. Especially given that this is a 7-segment display we should be able to solve this without resorting to that kind of complexity. You've marked out the corners so I'll assume that you can reliably crop out and un-rotate (make it flat) the display. We want to grab just the numbers. In this case I first converted to LAB and thresholded on the b-channel. Then I used opencv's findContours to mark out the perimeters: After that I cropped out each individual number: and then I looked for each segment individually and determined the number based on which segments were active (I used a special case for 1 where I checked the ratio of the width and height). Here's the code I used (two files) segments.py import numpy as np class Segments: def __init__(self): # create a 7seg model self.flags = []; self.segments = []; h1 = [[0, 1.0],[0, 0.1]]; # 0 h2 = [[0, 1.0],[0.45, 0.55]]; # 1 h3 = [[0, 1.0],[0.9, 1.0]]; # 2 vl1 = [[0, 0.2],[0, 0.5]]; # 3 # upper-left vl2 = [[0, 0.2],[0.5, 1.0]]; # 4 vr1 = [[0.8, 1.0],[0, 0.5]]; # 5 # upper-right vr2 = [[0.8, 1.0], [0.5, 1.0]]; # 6 self.segments.append(h1); self.segments.append(h2); self.segments.append(h3); self.segments.append(vl1); self.segments.append(vl2); self.segments.append(vr1); self.segments.append(vr2); # process an image and set flags def digest(self, number): # reset flags self.flags = []; # check res to see if it's a one h, w = number.shape[:2]; if w < 0.5 * h: self.flags.append(5); self.flags.append(6); return; # check for segments for a in range(len(self.segments)): seg = self.segments[a]; # get bounds xl, xh = seg[0]; yl, yh = seg[1]; # convert to pix coords xl = int(xl * w); xh = int(xh * w); yl = int(yl * h); yh = int(yh * h); sw = xh - xl; sh = yh - yl; # check count = np.count_nonzero(number[yl:yh, xl:xh] == 255); if count / (sh * sw) > 0.5: # 0.5 is a sensitivity measure self.flags.append(a); # returns the stored number (stored in self.flags) def getNum(self): # hardcoding outputs if self.flags == [0,2,3,4,5,6]: return 0; if self.flags == [5,6]: return 1; if self.flags == [0,1,2,4,5]: return 2; if self.flags == [0,1,2,5,6]: return 3; if self.flags == [1,3,5,6]: return 4; if self.flags == [0,1,2,3,6]: return 5; if self.flags == [0,1,2,3,4,6]: return 6; if self.flags == [0,5,6]: return 7; if self.flags == [0,1,2,3,4,5,6]: return 8; if self.flags == [0,1,2,3,5,6]: return 9; # ERROR return -1; main.py import cv2 import numpy as np from segments import Segments # load image img = cv2.imread("seg7.jpg"); # crop img = img[300:800,100:800,:]; # lab lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB); l,a,b = cv2.split(lab); # show cv2.imshow("orig", img); # closing operation kernel = np.ones((5,5), np.uint8); # threshold params low = 165; high = 200; iters = 3; # make copy copy = b.copy(); # threshold thresh = cv2.inRange(copy, low, high); # dilate for a in range(iters): thresh = cv2.dilate(thresh, kernel); # erode for a in range(iters): thresh = cv2.erode(thresh, kernel); # show image cv2.imshow("thresh", thresh); cv2.imwrite("threshold.jpg", thresh); # start processing _, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE); # draw for contour in contours: cv2.drawContours(img, [contour], 0, (0,255,0), 3); # get res of each number bounds = []; h, w = img.shape[:2]; for contour in contours: left = w; right = 0; top = h; bottom = 0; for point in contour: point = point[0]; x, y = point; if x < left: left = x; if x > right: right = x; if y < top: top = y; if y > bottom: bottom = y; tl = [left, top]; br = [right, bottom]; bounds.append([tl, br]); # crop out each number cuts = []; number = 0; for bound in bounds: tl, br = bound; cut_img = thresh[tl[1]:br[1], tl[0]:br[0]]; cuts.append(cut_img); number += 1; cv2.imshow(str(number), cut_img); # font font = cv2.FONT_HERSHEY_SIMPLEX; # create a segment model model = Segments(); index = 0; for cut in cuts: # save image cv2.imwrite(str(index) + "_" + str(number) + ".jpg", cut); # process model.digest(cut); number = model.getNum(); print(number); cv2.imshow(str(index), cut); # draw and save again h, w = cut.shape[:2]; drawn = np.zeros((h, w, 3), np.uint8); drawn[:, :, 0] = cut; drawn = cv2.putText(drawn, str(number), (10,30), font, 1, (0,0,255), 2, cv2.LINE_AA); cv2.imwrite("drawn" + str(index) + "_" + str(number) + ".jpg", drawn); index += 1; # cv2.waitKey(0); # show cv2.imshow("contours", img); cv2.imwrite("contours.jpg", img); cv2.waitKey(0); I can't guarantee that this always works, but it should be usable given a little tweaking. Remember to un-rotate the image if it isn't flat. The segment model assumes the numbers are mostly upright. A: If you want to use deep learning, one way to approach this would be to use a convolutional neural network (CNN). Whether you first want to crop the images depends on your application. Do you want to recognize the display from a picture like the one you attached? Then you should not crop the image manually. Furthermore you would need a lot of data to train your own CNN. An alternative would be to use an off-the-shelf Optical Character Recognition engine such as tesseract pytesseract. These are already trained and can achieve good results. I have no experience with detecting 7 segment displays though, so it could be that they do not work for 7 segment displays. They have tried OCR with tesseract for 7 segment displays here: ocr + 7 segment display. Last thing you could try is first detect the display from a large picture and then feed the cropped region that was detected to an OCR engine. A: dot point Issue ~ the dot Point on right bottom of each Numbers seem to impact especially the recognition rate of right botton side variable vr2 #6 checking while checking Numpy.NonZero(in your sample code) when dot point was light on (while image threshed & findcontours)
Number Recognition on 7 segment using python
I am writing a code on Jupyter notebook using python to recognize the number on the device with 7segment(FND). I used opencv and got the edge of the image. import cv2 import matplotlib.pyplot as plt def detect_edge(image): ''' function Detecting Edges ''' image_with_edges = cv2.Canny(image , 100, 200) images = [image , image_with_edges] location = [121, 122] for loc, img in zip(location, images): plt.subplot(loc) plt.imshow(img, cmap='gray') plt.savefig('edge.png') plt.show() image = cv2.imread('/Users/USER/Desktop/test/test2.png', 0) detect_edge(image) This is the screenshot of the sample input and output data I got form the code above: I am not sure how to proceed from here. I want to get the recognize the number 51.12 in this case. Should I crop the FND part that the numbers are on first before I run deep learning? And how should I proceed from here?
[ "I feel like using a CNN is overkill for a problem like this. Especially given that this is a 7-segment display we should be able to solve this without resorting to that kind of complexity.\nYou've marked out the corners so I'll assume that you can reliably crop out and un-rotate (make it flat) the display.\nWe want to grab just the numbers. In this case I first converted to LAB and thresholded on the b-channel.\n\nThen I used opencv's findContours to mark out the perimeters:\n\nAfter that I cropped out each individual number:\n\nand then I looked for each segment individually and determined the number based on which segments were active (I used a special case for 1 where I checked the ratio of the width and height).\n\nHere's the code I used (two files)\nsegments.py\nimport numpy as np\n\nclass Segments:\n def __init__(self):\n # create a 7seg model\n self.flags = [];\n self.segments = [];\n h1 = [[0, 1.0],[0, 0.1]]; # 0\n h2 = [[0, 1.0],[0.45, 0.55]]; # 1\n h3 = [[0, 1.0],[0.9, 1.0]]; # 2\n vl1 = [[0, 0.2],[0, 0.5]]; # 3 # upper-left\n vl2 = [[0, 0.2],[0.5, 1.0]]; # 4\n vr1 = [[0.8, 1.0],[0, 0.5]]; # 5 # upper-right\n vr2 = [[0.8, 1.0], [0.5, 1.0]]; # 6\n self.segments.append(h1);\n self.segments.append(h2);\n self.segments.append(h3);\n self.segments.append(vl1);\n self.segments.append(vl2);\n self.segments.append(vr1);\n self.segments.append(vr2);\n\n # process an image and set flags\n def digest(self, number):\n # reset flags\n self.flags = [];\n\n # check res to see if it's a one\n h, w = number.shape[:2];\n if w < 0.5 * h:\n self.flags.append(5);\n self.flags.append(6);\n return;\n\n # check for segments\n for a in range(len(self.segments)):\n seg = self.segments[a];\n # get bounds\n xl, xh = seg[0];\n yl, yh = seg[1];\n # convert to pix coords\n xl = int(xl * w);\n xh = int(xh * w);\n yl = int(yl * h);\n yh = int(yh * h);\n sw = xh - xl;\n sh = yh - yl;\n # check\n count = np.count_nonzero(number[yl:yh, xl:xh] == 255);\n if count / (sh * sw) > 0.5: # 0.5 is a sensitivity measure\n self.flags.append(a);\n\n # returns the stored number (stored in self.flags)\n def getNum(self):\n # hardcoding outputs\n if self.flags == [0,2,3,4,5,6]:\n return 0;\n if self.flags == [5,6]:\n return 1;\n if self.flags == [0,1,2,4,5]:\n return 2;\n if self.flags == [0,1,2,5,6]:\n return 3;\n if self.flags == [1,3,5,6]:\n return 4;\n if self.flags == [0,1,2,3,6]:\n return 5;\n if self.flags == [0,1,2,3,4,6]:\n return 6;\n if self.flags == [0,5,6]:\n return 7;\n if self.flags == [0,1,2,3,4,5,6]:\n return 8;\n if self.flags == [0,1,2,3,5,6]:\n return 9;\n # ERROR\n return -1;\n\nmain.py\nimport cv2\nimport numpy as np\nfrom segments import Segments\n\n# load image\nimg = cv2.imread(\"seg7.jpg\");\n\n# crop\nimg = img[300:800,100:800,:];\n\n# lab\nlab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB);\nl,a,b = cv2.split(lab);\n\n# show\ncv2.imshow(\"orig\", img);\n\n# closing operation\nkernel = np.ones((5,5), np.uint8);\n\n# threshold params\nlow = 165;\nhigh = 200;\niters = 3;\n\n# make copy\ncopy = b.copy();\n\n# threshold\nthresh = cv2.inRange(copy, low, high);\n\n# dilate\nfor a in range(iters):\n thresh = cv2.dilate(thresh, kernel);\n\n# erode\nfor a in range(iters):\n thresh = cv2.erode(thresh, kernel);\n\n# show image\ncv2.imshow(\"thresh\", thresh);\ncv2.imwrite(\"threshold.jpg\", thresh);\n\n# start processing\n_, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);\n\n# draw\nfor contour in contours:\n cv2.drawContours(img, [contour], 0, (0,255,0), 3);\n\n# get res of each number\nbounds = [];\nh, w = img.shape[:2];\nfor contour in contours:\n left = w;\n right = 0;\n top = h;\n bottom = 0;\n for point in contour:\n point = point[0];\n x, y = point;\n if x < left:\n left = x;\n if x > right:\n right = x;\n if y < top:\n top = y;\n if y > bottom:\n bottom = y;\n tl = [left, top];\n br = [right, bottom];\n bounds.append([tl, br]);\n\n# crop out each number\ncuts = [];\nnumber = 0;\nfor bound in bounds:\n tl, br = bound;\n cut_img = thresh[tl[1]:br[1], tl[0]:br[0]];\n cuts.append(cut_img);\n number += 1;\n cv2.imshow(str(number), cut_img);\n\n# font \nfont = cv2.FONT_HERSHEY_SIMPLEX;\n\n# create a segment model\nmodel = Segments();\nindex = 0;\nfor cut in cuts:\n # save image\n cv2.imwrite(str(index) + \"_\" + str(number) + \".jpg\", cut);\n\n # process\n model.digest(cut);\n number = model.getNum();\n print(number);\n cv2.imshow(str(index), cut);\n\n # draw and save again\n h, w = cut.shape[:2];\n drawn = np.zeros((h, w, 3), np.uint8);\n drawn[:, :, 0] = cut;\n drawn = cv2.putText(drawn, str(number), (10,30), font, 1, (0,0,255), 2, cv2.LINE_AA);\n cv2.imwrite(\"drawn\" + str(index) + \"_\" + str(number) + \".jpg\", drawn);\n \n index += 1;\n # cv2.waitKey(0);\n\n\n# show\ncv2.imshow(\"contours\", img);\ncv2.imwrite(\"contours.jpg\", img);\ncv2.waitKey(0);\n\nI can't guarantee that this always works, but it should be usable given a little tweaking. Remember to un-rotate the image if it isn't flat. The segment model assumes the numbers are mostly upright.\n", "If you want to use deep learning, one way to approach this would be to use a convolutional neural network (CNN). Whether you first want to crop the images depends on your application. Do you want to recognize the display from a picture like the one you attached? Then you should not crop the image manually. Furthermore you would need a lot of data to train your own CNN.\nAn alternative would be to use an off-the-shelf Optical Character Recognition engine such as tesseract pytesseract. These are already trained and can achieve good results. I have no experience with detecting 7 segment displays though, so it could be that they do not work for 7 segment displays. They have tried OCR with tesseract for 7 segment displays here: ocr + 7 segment display.\nLast thing you could try is first detect the display from a large picture and then feed the cropped region that was detected to an OCR engine.\n", "dot point Issue ~ the dot Point on right bottom of each Numbers seem to impact especially the recognition rate of right botton side variable vr2 #6 checking while checking Numpy.NonZero(in your sample code) when dot point was light on (while image threshed & findcontours)\n" ]
[ 6, 2, 0 ]
[]
[]
[ "artificial_intelligence", "deep_learning", "image_processing", "opencv", "python" ]
stackoverflow_0065559254_artificial_intelligence_deep_learning_image_processing_opencv_python.txt
Q: Pandas time-series: aggregate by date and transpose I have the following time series dataframe: dataframe = pd.DataFrame({ 'date': pd.to_datetime([ '2020-04-01', '2020-04-02', '2020-04-03', '2020-04-01', '2020-04-02', '2020-04-03']), 'Ticker': ['A', 'A', 'A', 'AAPL', 'AAPL', 'AAPL'], 'Price': ['8', '10', '12', '100', '200', '50']}) date Ticker Price 0 2020-04-01 A 8 1 2020-04-02 A 10 2 2020-04-03 A 12 3 2020-04-01 AAPL 100 4 2020-04-02 AAPL 200 5 2020-04-03 AAPL 50 The final result should look like: dataframe_2 = pd.DataFrame({ 'date': pd.to_datetime(['2020-04-01', '2020-04-02','2020-04-03']), 'A': [8, 10, 12], 'AAPL': [100, 200, 50]}) date A AAPL 0 2020-04-01 8 100 1 2020-04-02 10 200 2 2020-04-03 12 50 Initially I tried using the groupby function but with not much success. A: The operation you are trying to do is called pivoting. That is, creating new columns from the categorical values of a column. You can do either of these (same results): df = dataframe.set_index("date").pivot(columns="Ticker", values="Price") df = dataframe.pivot(index="date", columns="Ticker", values="Price") It is important to set the index; otherwise, the pivot will not know how to combine rows and you will get extra rows with NaN values. For the sample data, without the index, it would not know to treat rows 0 and 3 in your base data as the same date.
Pandas time-series: aggregate by date and transpose
I have the following time series dataframe: dataframe = pd.DataFrame({ 'date': pd.to_datetime([ '2020-04-01', '2020-04-02', '2020-04-03', '2020-04-01', '2020-04-02', '2020-04-03']), 'Ticker': ['A', 'A', 'A', 'AAPL', 'AAPL', 'AAPL'], 'Price': ['8', '10', '12', '100', '200', '50']}) date Ticker Price 0 2020-04-01 A 8 1 2020-04-02 A 10 2 2020-04-03 A 12 3 2020-04-01 AAPL 100 4 2020-04-02 AAPL 200 5 2020-04-03 AAPL 50 The final result should look like: dataframe_2 = pd.DataFrame({ 'date': pd.to_datetime(['2020-04-01', '2020-04-02','2020-04-03']), 'A': [8, 10, 12], 'AAPL': [100, 200, 50]}) date A AAPL 0 2020-04-01 8 100 1 2020-04-02 10 200 2 2020-04-03 12 50 Initially I tried using the groupby function but with not much success.
[ "The operation you are trying to do is called pivoting. That is, creating new columns from the categorical values of a column.\nYou can do either of these (same results):\ndf = dataframe.set_index(\"date\").pivot(columns=\"Ticker\", values=\"Price\")\n\n\ndf = dataframe.pivot(index=\"date\", columns=\"Ticker\", values=\"Price\")\n\nIt is important to set the index; otherwise, the pivot will not know how to combine rows and you will get extra rows with NaN values. For the sample data, without the index, it would not know to treat rows 0 and 3 in your base data as the same date.\n" ]
[ 1 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074622270_pandas_python.txt
Q: Reading Json text response from url with PowerBi Desktop I am new to PowerBi. I would like to get a table on PowerIb, from this JSON API Sample of data: {"data": [{"user_id": 54710, "hp_user_id": 5806514, "username": "Jay_J1", "user_profile_url": "https://h30434.www3.hp.com/t5/user/viewprofilepage/user-id/5806514", "user_blocked": 0, "hp_post_id": 8550808, "post_datetime": "2022-11-28 10:54:00", "post_url": "https://h30434.www3.hp.com/t5/Notebook-Hardware-and-Upgrade-Questions/HP-Envy-360-broken-left-hinge-and-screen-separating/m-p/8550808?search-action-id=587041408161&search-result-uid=8550808", "post_summary": "Like many others have experienced, the hinge on my HP Envy 360 broke last night. All I did was simply open it, but I heard a crack, and now the metal part is stuck at an angle to where I can no l...", "me_too": "", "post_tags": "\"[\"HP ENVY x360 Laptop - 15m-ee0023dx\",\"Microsoft Windows 11\"]\""}, {"user_id": 52629, "hp_user_id": 5800465, "username": "BrookeDorbit", "user_profile_url": "https://h30434.www3.hp.com/t5/user/viewprofilepage/user-id/5800465", "user_blocked": 0, "hp_post_id": 8550124, "post_datetime": "2022-11-27 15:39:00", "post_url": "https://h30434.www3.hp.com/t5/Notebook-Hardware-and-Upgrade-Questions/Hinge-Issue/m-p/8550124?search-action-id=586827468132&search-result-uid=8550124", "post_summary": "I\u2019ve seen many others mention the same issue with their HP envy laptop but I am just as upset. I purchased my laptop in the summer of 2020 and only 2 years later, the hinge is broken. I have never o...", "me_too": "\"[{\"username\":\"Jay_J1\",\"hp_user_id\":\"5806514\",\"post_datetime\":\"2022-11-28\"}]\"", "post_tags": "\"[\"HP ENVY Laptop - 13t-ba000 CTO\"]\""}, ]} I can change the JSON Response if needed. So far, I followed these steps: Click on "New Source" and select "JSON" or "Web" (same issue): Then I get a select between Html and Text which is fair: And if I select Text, I get the text of the JSON, which is fair... My question is how would I get the table associated with this JSON response. As well is it possible to skip the GUI and import the Json text in Python or JS, and deliver it to the GUI? End result, would be a table with the items in the "data" key. Thanks a lot, and best! A: Fundamentally, you are converting a plain JSON to a table. The JSON just happens to be sourced as plaintext from a URL. JSON-to-table is well-explained in the docs - https://learn.microsoft.com/en-us/power-query/connectors/json. Additionally, I highly recommend going through this excellent Power Query primer - https://bengribaudo.com/blog/2017/11/17/4107/power-query-m-primer-part1-introduction-simple-expressions-let.
Reading Json text response from url with PowerBi Desktop
I am new to PowerBi. I would like to get a table on PowerIb, from this JSON API Sample of data: {"data": [{"user_id": 54710, "hp_user_id": 5806514, "username": "Jay_J1", "user_profile_url": "https://h30434.www3.hp.com/t5/user/viewprofilepage/user-id/5806514", "user_blocked": 0, "hp_post_id": 8550808, "post_datetime": "2022-11-28 10:54:00", "post_url": "https://h30434.www3.hp.com/t5/Notebook-Hardware-and-Upgrade-Questions/HP-Envy-360-broken-left-hinge-and-screen-separating/m-p/8550808?search-action-id=587041408161&search-result-uid=8550808", "post_summary": "Like many others have experienced, the hinge on my HP Envy 360 broke last night. All I did was simply open it, but I heard a crack, and now the metal part is stuck at an angle to where I can no l...", "me_too": "", "post_tags": "\"[\"HP ENVY x360 Laptop - 15m-ee0023dx\",\"Microsoft Windows 11\"]\""}, {"user_id": 52629, "hp_user_id": 5800465, "username": "BrookeDorbit", "user_profile_url": "https://h30434.www3.hp.com/t5/user/viewprofilepage/user-id/5800465", "user_blocked": 0, "hp_post_id": 8550124, "post_datetime": "2022-11-27 15:39:00", "post_url": "https://h30434.www3.hp.com/t5/Notebook-Hardware-and-Upgrade-Questions/Hinge-Issue/m-p/8550124?search-action-id=586827468132&search-result-uid=8550124", "post_summary": "I\u2019ve seen many others mention the same issue with their HP envy laptop but I am just as upset. I purchased my laptop in the summer of 2020 and only 2 years later, the hinge is broken. I have never o...", "me_too": "\"[{\"username\":\"Jay_J1\",\"hp_user_id\":\"5806514\",\"post_datetime\":\"2022-11-28\"}]\"", "post_tags": "\"[\"HP ENVY Laptop - 13t-ba000 CTO\"]\""}, ]} I can change the JSON Response if needed. So far, I followed these steps: Click on "New Source" and select "JSON" or "Web" (same issue): Then I get a select between Html and Text which is fair: And if I select Text, I get the text of the JSON, which is fair... My question is how would I get the table associated with this JSON response. As well is it possible to skip the GUI and import the Json text in Python or JS, and deliver it to the GUI? End result, would be a table with the items in the "data" key. Thanks a lot, and best!
[ "Fundamentally, you are converting a plain JSON to a table. The JSON just happens to be sourced as plaintext from a URL.\nJSON-to-table is well-explained in the docs - https://learn.microsoft.com/en-us/power-query/connectors/json.\nAdditionally, I highly recommend going through this excellent Power Query primer - https://bengribaudo.com/blog/2017/11/17/4107/power-query-m-primer-part1-introduction-simple-expressions-let.\n" ]
[ 0 ]
[ "I got an answer from another website and it worked:\nSteps to follow:\n\nfrom \"New Source\", select \"Blank Query\" (at the full bottom)\nInthe ribons, select \"Advanced Editor\"\nWrite \"your code\" in M (not easy...)\n\n= let\n Source = Json.Document(Web.Contents(\"https://hptrial.pythonanywhere.com/rest_api_data\")),\n data = Source[data],\n #\"Converted to Table\" = Table.FromList(data, Splitter.SplitByNothing(), null, null, ExtraValues.Error),\n #\"Expanded Column1\" = Table.ExpandRecordColumn(#\"Converted to Table\", \"Column1\", {\"user_id\", \"hp_user_id\", \"username\", \"user_profile_url\", \"user_blocked\", \"hp_post_id\", \"post_datetime\", \"post_url\", \"post_summary\", \"me_too\", \"post_tags\"}, {\"user_id\", \"hp_user_id\", \"username\", \"user_profile_url\", \"user_blocked\", \"hp_post_id\", \"post_datetime\", \"post_url\", \"post_summary\", \"me_too\", \"post_tags\"})\n\n\nAll done thanks to: ppm\nTx a lot\nAnswer found on: this website\n" ]
[ -1 ]
[ "javascript", "json", "powerbi", "python" ]
stackoverflow_0074621497_javascript_json_powerbi_python.txt
Q: need to limit BeautifulSoup href result to first occurence - or - account for an open parenthesis in href string I want ONLY the <a href NPPES Data Dissemination in the Full Replacement Monthly NPI File section of https://download.cms.gov/nppes/NPI_Files.html. There are other <a href NPPES Data Dissemination files in the Weekly Incremental NPI Files that I do NOT want. Here is the code that gets ALL NPPES Data Dissemination files in the monthly and weekly sections: import subprocess import re from bs4 import BeautifulSoup import requests import wget def get_urls(soup): urls = [] for a in soup.find_all('a', href=True): ul = a.find_all(text=re.compile('NPPES Data Dissemination')) if ul != []: urls.append(a) print('done scraping the url...') return urls def download_and_extract(urls): for texts in urls: text = str(texts) file = text[55:99] print('zip file :', file) zip_link = texts['href'] print('Downloading %s :' %zip_link) slashurl = zip_link.split('/') print(slashurl) wget.download("https://download.cms.gov/nppes/"+ slashurl[1]) r = requests.get('https://download.cms.gov/nppes/NPI_Files.html') soup = BeautifulSoup(r.content, 'html.parser') urls = get_urls(soup) download_and_extract(urls) Tried: Limit=1 does not work as I have it below, as all NPPES Data Dissemination files are still collected def get_urls(soup): urls = [] for a in soup.find_all('a', href=True): ul = a.find_all(text=re.compile('NPPES Data Dissemination'), limit=1) if ul != []: urls.append(a) print('done scraping the url......!!!!') return urls Tried: If I use the open parenthesis 'NPPES Data Dissemination (' as it is only in the Full Replacement Monthly NPI File section, I get errors (below) def get_urls(soup): urls = [] for a in soup.find_all('a', href=True): ul = a.find_all(text=re.compile('NPPES Data Dissemination ('), limit=1) if ul != []: urls.append(a) print('done scraping the url......!!!!') return urls thank you for any assistance you may provide!!!! A: If what you need is only the first link So what happen here is, the limit you set is the first regex found in the link But you still loop searching it for all links The simple solution to get the first link is just add break when you found so it will stop the loop def get_urls(soup): urls = [] for a in soup.find_all('a', href=True): ul = a.find_all(text=re.compile('NPPES Data Dissemination')) if ul != []: urls.append(a) # break (stop loop) if found break print('done scraping the url......!!!!') return urls Update: when I look at the website actually you can update it by using regex only (not using break) Full Replacement Monthly NPI File -> re.compile('NPPES Data Dissemination (') Full Replacement Monthly NPI Deactivation File -> re.compile('NPPES Data Dissemination - Monthly Deactivation Update') Weekly Incremental NPI Files -> re.compile('NPPES Data Dissemination - Weekly Update')
need to limit BeautifulSoup href result to first occurence - or - account for an open parenthesis in href string
I want ONLY the <a href NPPES Data Dissemination in the Full Replacement Monthly NPI File section of https://download.cms.gov/nppes/NPI_Files.html. There are other <a href NPPES Data Dissemination files in the Weekly Incremental NPI Files that I do NOT want. Here is the code that gets ALL NPPES Data Dissemination files in the monthly and weekly sections: import subprocess import re from bs4 import BeautifulSoup import requests import wget def get_urls(soup): urls = [] for a in soup.find_all('a', href=True): ul = a.find_all(text=re.compile('NPPES Data Dissemination')) if ul != []: urls.append(a) print('done scraping the url...') return urls def download_and_extract(urls): for texts in urls: text = str(texts) file = text[55:99] print('zip file :', file) zip_link = texts['href'] print('Downloading %s :' %zip_link) slashurl = zip_link.split('/') print(slashurl) wget.download("https://download.cms.gov/nppes/"+ slashurl[1]) r = requests.get('https://download.cms.gov/nppes/NPI_Files.html') soup = BeautifulSoup(r.content, 'html.parser') urls = get_urls(soup) download_and_extract(urls) Tried: Limit=1 does not work as I have it below, as all NPPES Data Dissemination files are still collected def get_urls(soup): urls = [] for a in soup.find_all('a', href=True): ul = a.find_all(text=re.compile('NPPES Data Dissemination'), limit=1) if ul != []: urls.append(a) print('done scraping the url......!!!!') return urls Tried: If I use the open parenthesis 'NPPES Data Dissemination (' as it is only in the Full Replacement Monthly NPI File section, I get errors (below) def get_urls(soup): urls = [] for a in soup.find_all('a', href=True): ul = a.find_all(text=re.compile('NPPES Data Dissemination ('), limit=1) if ul != []: urls.append(a) print('done scraping the url......!!!!') return urls thank you for any assistance you may provide!!!!
[ "If what you need is only the first link\nSo what happen here is, the limit you set is the first regex found in the link\nBut you still loop searching it for all links\nThe simple solution to get the first link is just add break when you found so it will stop the loop\ndef get_urls(soup):\n urls = []\n for a in soup.find_all('a', href=True):\n ul = a.find_all(text=re.compile('NPPES Data Dissemination'))\n if ul != []:\n urls.append(a)\n # break (stop loop) if found\n break\n print('done scraping the url......!!!!')\n return urls\n\nUpdate: when I look at the website\nactually you can update it by using regex only (not using break)\nFull Replacement Monthly NPI File -> re.compile('NPPES Data Dissemination (')\nFull Replacement Monthly NPI Deactivation File -> re.compile('NPPES Data Dissemination - Monthly Deactivation Update')\nWeekly Incremental NPI Files -> re.compile('NPPES Data Dissemination - Weekly Update')\n" ]
[ 0 ]
[]
[]
[ "beautifulsoup", "href", "limit", "python" ]
stackoverflow_0074621951_beautifulsoup_href_limit_python.txt
Q: is there a way of comparing the similarities between these two sequences in python? I am new to Python and I need your help in getting the similarity between two sequences. Assuming they are not of the same length and some may have (-) gap symbols. So here is my code bellow in getting the similarity in only one sequence. seq1 = "AAAATCCCTAGGGTCAT" def similarity(seq1): base_dic={} for i in range(len(seq1)): if seq1[i] in base_dic.keys(): base_dic[seq1[i]]+=1 else: base_dic[seq1[i]]=1 for key in base_dic.keys(): base_dic[key]=base_dic[key]/len(seq1)*100 return base_dic similarity(seq1) Output: {'A': 35.294117647058826, 'T': 23.52941176470588, 'C': 23.52941176470588, 'G': 17.647058823529413} My question is how could I modify this code, so that it can take two sequences at a time and find the similarities? for ex. seq1 = "AAAATCCCTAGAAAGGTCAT" seq2 = “AAGATC---TTTCTACT” Any ideas? Thanks i am expecting to get the similarity of A, T, G, C but not -. as they should be counted as unsimilar. A: There are 2 parts to this answer. Polishes to the code in question. Standard algorithm to find how similar 2 strings are. Part-1: Polishes to your code in the question. The key takeaways here for you are to make your code more pythonic from collections import Counter seq1 = "AAAATCCCTAGGGTCAT" seq2 = "AAGATC---TTTCTACT" def get_count(s): H = defaultdict(int) n = len(s) s = [c for c in s if c in "ATCG"] ctr = Counter(s) ctr = {k: (v / n) * 100 for k, v in ctr.items()} return ctr def calculate_similarity(s1, s2): result1 = get_count(seq1) result2 = get_count(seq2) print(result1) print(result2) """ Add some custom logic here to compute how similar the strings are (or) similarity of A, T, C & G """ calculate_similarity(seq1, seq2) Part-2: Standard algorithm to find the similarity b/w to strings is given by Levenshtein distance. The Levenshtein distance is a string metric for measuring the difference between two sequences. Consider a scenario where you can convert sequence A to sequence B by adding (or) deleting (or) replacing a character. For simplicity, let's assume we have to calculate the number of operations to convert sequence A to B. This can be computed by the following logic. def minDistance(word1: str, word2: str) -> int: m, n = len(word1), len(word2) dp = [[0] * (n + 1) for _ in range(m + 1)] dp[0][0] = 0 for i in range(m + 1): dp[i][0] = i for j in range(n + 1): dp[0][j] = j for i in range(1, m + 1): for j in range(1, n + 1): if word1[i - 1] == word2[j - 1]: dp[i][j] = dp[i - 1][j - 1] else: # dp[i-1][j] -> insert # dp[i][j-1] -> remove # dp[i-1][j-1] -> replace dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1 return dp[-1][-1] # The number of operations to convert seq1 to seq2 # This number can help you compare how similar 2 strings are. result = minDistance(seq1, seq2). print(result) References: https://en.wikipedia.org/wiki/Levenshtein_distance
is there a way of comparing the similarities between these two sequences in python?
I am new to Python and I need your help in getting the similarity between two sequences. Assuming they are not of the same length and some may have (-) gap symbols. So here is my code bellow in getting the similarity in only one sequence. seq1 = "AAAATCCCTAGGGTCAT" def similarity(seq1): base_dic={} for i in range(len(seq1)): if seq1[i] in base_dic.keys(): base_dic[seq1[i]]+=1 else: base_dic[seq1[i]]=1 for key in base_dic.keys(): base_dic[key]=base_dic[key]/len(seq1)*100 return base_dic similarity(seq1) Output: {'A': 35.294117647058826, 'T': 23.52941176470588, 'C': 23.52941176470588, 'G': 17.647058823529413} My question is how could I modify this code, so that it can take two sequences at a time and find the similarities? for ex. seq1 = "AAAATCCCTAGAAAGGTCAT" seq2 = “AAGATC---TTTCTACT” Any ideas? Thanks i am expecting to get the similarity of A, T, G, C but not -. as they should be counted as unsimilar.
[ "There are 2 parts to this answer.\n\nPolishes to the code in question.\nStandard algorithm to find how similar 2 strings are.\n\nPart-1: Polishes to your code in the question.\nThe key takeaways here for you are to make your code more pythonic\nfrom collections import Counter\n\nseq1 = \"AAAATCCCTAGGGTCAT\"\nseq2 = \"AAGATC---TTTCTACT\"\n\n\ndef get_count(s):\n H = defaultdict(int)\n n = len(s)\n s = [c for c in s if c in \"ATCG\"]\n ctr = Counter(s)\n ctr = {k: (v / n) * 100 for k, v in ctr.items()}\n return ctr\n\n\ndef calculate_similarity(s1, s2):\n result1 = get_count(seq1)\n result2 = get_count(seq2)\n print(result1)\n print(result2)\n \"\"\"\n Add some custom logic here to compute\n how similar the strings are (or)\n similarity of A, T, C & G\n \"\"\"\n\ncalculate_similarity(seq1, seq2)\n\nPart-2: Standard algorithm to find the similarity b/w to strings is given by Levenshtein distance.\n\nThe Levenshtein distance is a string metric for measuring the difference between two sequences.\nConsider a scenario where you can convert sequence A to sequence B by adding (or) deleting (or) replacing a character. For simplicity, let's assume we have to calculate the number of operations to convert sequence A to B.\nThis can be computed by the following logic.\n\ndef minDistance(word1: str, word2: str) -> int:\n m, n = len(word1), len(word2)\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n dp[0][0] = 0\n for i in range(m + 1):\n dp[i][0] = i\n for j in range(n + 1):\n dp[0][j] = j\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if word1[i - 1] == word2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n # dp[i-1][j] -> insert\n # dp[i][j-1] -> remove\n # dp[i-1][j-1] -> replace\n dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1\n return dp[-1][-1]\n\n# The number of operations to convert seq1 to seq2\n# This number can help you compare how similar 2 strings are.\nresult = minDistance(seq1, seq2). \n\nprint(result)\n\nReferences:\n\nhttps://en.wikipedia.org/wiki/Levenshtein_distance\n\n" ]
[ 0 ]
[ "To avoid counting “-“, you can add an if clause when you enter the for loop. If the character is “-“, we skip the loop for that character and move to next one.\nif seq1[i] == “-“:\n continue\n\nThis will help solve your problem.\n\n" ]
[ -1 ]
[ "bioinformatics", "python", "python_3.x", "spyder" ]
stackoverflow_0074622122_bioinformatics_python_python_3.x_spyder.txt
Q: Django - Null Integrity error not allowing POST The error that I am getting when trying to POST is: django.db.utils.IntegrityError: null value in column "interest_category_id" of relation "teamStart_project" violates not-null constraint Here are my Serializers: class InterestSerializer(serializers.ModelSerializer): class Meta: model = Interests fields = ('id', 'interest_name') class ProjectsSerializer(serializers.ModelSerializer): interest_category = serializers.StringRelatedField() class Meta: model = Project fields = ( 'project_title', 'project_description', 'interest_category', 'created_at', 'updated_at', ) Here are my models: class Interests(models.Model): interest_name = models.CharField(max_length=50) created_by = models.ForeignKey(User, on_delete=models.CASCADE) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now_add=True) def __str__(self): return self.interest_name class Project(models.Model): project_title = models.CharField(max_length=255) project_description = models.TextField(max_length=1500) interest_category = models.ForeignKey(Interests, on_delete=models.CASCADE) created_by = models.ForeignKey(User, related_name='projects', on_delete=models.CASCADE) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now_add=True) def __str__(self): return "Project name:" + "\n" + self.project_title + "\n" + "|" + "\n" + "Created By:" + "\n" + self.created_by.username I can get rid of the error by removing interest_category = serializers.StringRelatedField() but this issue is, if I do not remove this line, the frontend shows the ID of interest_category and not the respective name. For example, this is what the frontend will show: With this line: interest_category = serializers.StringRelatedField() -------------------------------------------------------------------- Project Title: TestItem1 Project Description: TestItem1Desc Interest Category: Django Without the line: --------------------------------------------------------------------- Project Title: TestItem1 Project Description: TestItem1Desc Interest Category: 1 When I have the interest_category = serializers.StringRelatedField(), I am able to get the correct String Related name that I need to be displayed but I get the NullIntegrity error stating that the column of "interest_category_id" violates the not-null constraint. Whereas, If I do not have that line I can POST with no issues or errors but I cannot display the correct String Related name, it will only show the ID of interest_category How can I get the name of the item, in order to display the item without having to worry about the NullIntegrity error with the interest_category = serializers.StringRelatedField() line. A: change that interest_category_name = serializers.StringRelatedField() to interest_category = serializers.SerializerMethodField() and add the following function to your ProjectsSerializer def get_interest_category_name(self, instance): return instance.interest_category.interest_name and then add "interest_category_name" in your fields of ProjectsSerializer. That will solve your problem.
Django - Null Integrity error not allowing POST
The error that I am getting when trying to POST is: django.db.utils.IntegrityError: null value in column "interest_category_id" of relation "teamStart_project" violates not-null constraint Here are my Serializers: class InterestSerializer(serializers.ModelSerializer): class Meta: model = Interests fields = ('id', 'interest_name') class ProjectsSerializer(serializers.ModelSerializer): interest_category = serializers.StringRelatedField() class Meta: model = Project fields = ( 'project_title', 'project_description', 'interest_category', 'created_at', 'updated_at', ) Here are my models: class Interests(models.Model): interest_name = models.CharField(max_length=50) created_by = models.ForeignKey(User, on_delete=models.CASCADE) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now_add=True) def __str__(self): return self.interest_name class Project(models.Model): project_title = models.CharField(max_length=255) project_description = models.TextField(max_length=1500) interest_category = models.ForeignKey(Interests, on_delete=models.CASCADE) created_by = models.ForeignKey(User, related_name='projects', on_delete=models.CASCADE) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now_add=True) def __str__(self): return "Project name:" + "\n" + self.project_title + "\n" + "|" + "\n" + "Created By:" + "\n" + self.created_by.username I can get rid of the error by removing interest_category = serializers.StringRelatedField() but this issue is, if I do not remove this line, the frontend shows the ID of interest_category and not the respective name. For example, this is what the frontend will show: With this line: interest_category = serializers.StringRelatedField() -------------------------------------------------------------------- Project Title: TestItem1 Project Description: TestItem1Desc Interest Category: Django Without the line: --------------------------------------------------------------------- Project Title: TestItem1 Project Description: TestItem1Desc Interest Category: 1 When I have the interest_category = serializers.StringRelatedField(), I am able to get the correct String Related name that I need to be displayed but I get the NullIntegrity error stating that the column of "interest_category_id" violates the not-null constraint. Whereas, If I do not have that line I can POST with no issues or errors but I cannot display the correct String Related name, it will only show the ID of interest_category How can I get the name of the item, in order to display the item without having to worry about the NullIntegrity error with the interest_category = serializers.StringRelatedField() line.
[ "change that interest_category_name = serializers.StringRelatedField() to\ninterest_category = serializers.SerializerMethodField()\nand add the following function to your ProjectsSerializer\ndef get_interest_category_name(self, instance):\n return instance.interest_category.interest_name\n\nand then add \"interest_category_name\" in your fields of ProjectsSerializer. That will solve your problem.\n" ]
[ 0 ]
[]
[]
[ "django", "python" ]
stackoverflow_0074622177_django_python.txt
Q: Housing Data Set Not Able to Load From 'Hands-On Machine Learning' I have followed other solutions that were posted on stackoverflow about trying to load the housing dataset which mostly included trying to call 'fetch_housing_data()' as well. However, even after I do that, I still get a filenotfound error indicating that there is no dataset called 'datasets/housing'. Here is the code that I have. import numpy as np import pandas as pd import matplotlib.pyplot as plt import os import tarfile from six.moves import urllib DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/" HOUSING_PATH = os.path.join('datasets', 'housing') HOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + '/housing.tgz' def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH): if not os.path.isdir(housing_path): os.mkdir(housing_path) tgz_path = os.path.join(housing_path, 'housing.tgz') urllib.request.urlretrieve(housing_url, tgz_path) housing_tgz = tarfile.open(tgz_path) housing_tgz.extractall(path=housing_path) housing_tgz.close() fetch_housing_data() def load_housing_data(housing_path=HOUSING_PATH): csv_path = os.path.join(housing_path, 'housing.csv') return pd.read_csv(csv_path) dataset = load_housing_data() dataset.head() I tried to get the housing dataset from the link provided in the book with the proper function call and expected for the dataset to be retrieved. However, it has still produced an error for me despite the call. A: The traceback I got when running the code looked like: Traceback (most recent call last): File "/home/hayesall/answer.py", line 23, in <module> fetch_housing_data() File "/home/hayesall/answer.py", line 15, in fetch_housing_data os.mkdir(housing_path) FileNotFoundError: [Errno 2] No such file or directory: 'datasets/housing' This occurs because the datasets directory did not exist. If you first run: mkdir datasets Then re-run the code, you see two files under datasets/housing/ datasets/ └── housing ├── housing.csv └── housing.tgz Alternatively, replace the os.mkdir call with os.makedirs to recursively create the directories with nested paths: def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH): if not os.path.isdir(housing_path): os.makedirs(housing_path)
Housing Data Set Not Able to Load From 'Hands-On Machine Learning'
I have followed other solutions that were posted on stackoverflow about trying to load the housing dataset which mostly included trying to call 'fetch_housing_data()' as well. However, even after I do that, I still get a filenotfound error indicating that there is no dataset called 'datasets/housing'. Here is the code that I have. import numpy as np import pandas as pd import matplotlib.pyplot as plt import os import tarfile from six.moves import urllib DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/" HOUSING_PATH = os.path.join('datasets', 'housing') HOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + '/housing.tgz' def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH): if not os.path.isdir(housing_path): os.mkdir(housing_path) tgz_path = os.path.join(housing_path, 'housing.tgz') urllib.request.urlretrieve(housing_url, tgz_path) housing_tgz = tarfile.open(tgz_path) housing_tgz.extractall(path=housing_path) housing_tgz.close() fetch_housing_data() def load_housing_data(housing_path=HOUSING_PATH): csv_path = os.path.join(housing_path, 'housing.csv') return pd.read_csv(csv_path) dataset = load_housing_data() dataset.head() I tried to get the housing dataset from the link provided in the book with the proper function call and expected for the dataset to be retrieved. However, it has still produced an error for me despite the call.
[ "The traceback I got when running the code looked like:\nTraceback (most recent call last):\n File \"/home/hayesall/answer.py\", line 23, in <module>\n fetch_housing_data()\n File \"/home/hayesall/answer.py\", line 15, in fetch_housing_data\n os.mkdir(housing_path)\nFileNotFoundError: [Errno 2] No such file or directory: 'datasets/housing'\n\nThis occurs because the datasets directory did not exist.\nIf you first run:\nmkdir datasets\n\nThen re-run the code, you see two files under datasets/housing/\ndatasets/\n└── housing\n ├── housing.csv\n └── housing.tgz\n\n\nAlternatively, replace the os.mkdir call with os.makedirs to recursively create the directories with nested paths:\ndef fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n if not os.path.isdir(housing_path):\n os.makedirs(housing_path)\n\n" ]
[ 0 ]
[]
[]
[ "machine_learning", "python" ]
stackoverflow_0074622321_machine_learning_python.txt
Q: Why is my class not accepting arguments in python? Context: The full code isn't below as to make it easier to read. Therefore some of the code may not make sense as it isn't used. Also the big picture is that I am attempting to make a object orientated text based adventure in python. The problem I have is that I'm trying to have a gate object then create instances of it in order to use in the setup function of my level object. However it is throwing up an error that the gate object does not accept arguments and I was wondering how I would go about fixing this. The type of code throwing this error looks like this: gate1 = gate("north", deep_forest_area) #EXCRIPT OF TEXT BASED ADVENTURE OOP PYTHON PROGRAM #defines level class class level(): def __innit__(self): self.name = ("") #level setup function def setup(self, name, directions, description): self.name = name self.directions = directions self.description = description def enter(self): print(self.description) if len(self.directions) >= 1: print("there is a gate to ", end=" ") for d in self.directions: d.print_gate() #defines gate class class gate: def __innit__(self, direction, gate_to): self.gate_to = gate_to self.direction = direction def is_gate(self, text): return self.direction in text def print_gate(self): print("the %s," % self.direction, end= " ") #initialises levels start_area = level() deep_forest_area = level() #defines gates and levels gate1 = gate("north", deep_forest_area) start_area.setup("forest", [gate1], "You are in a dusk lit forest surrounded by trees. The only direction is deeper into the forest") gate1 = gate("south", start_area) deep_forest_area.setup("deep forest", [gate1], "You are in a seemingly endless tunnel of dark oak trees") I tried analysing the code logically in terms of class logic and instances etc, however I couldn't see anything wrong and I feel as if I'm missing out on a piece of information or something. I also tried copying the template code I was working off more heavily but the error still didn't budge, which is weird because I was border line copying at one point out of frustration. Help would be appreciated. A: The constructor should be called __init__ and not __innit__. Since your classes don't have a constructor, python provides a default, no arg constructor and you cannot pass any arguments to it, hence the error.
Why is my class not accepting arguments in python?
Context: The full code isn't below as to make it easier to read. Therefore some of the code may not make sense as it isn't used. Also the big picture is that I am attempting to make a object orientated text based adventure in python. The problem I have is that I'm trying to have a gate object then create instances of it in order to use in the setup function of my level object. However it is throwing up an error that the gate object does not accept arguments and I was wondering how I would go about fixing this. The type of code throwing this error looks like this: gate1 = gate("north", deep_forest_area) #EXCRIPT OF TEXT BASED ADVENTURE OOP PYTHON PROGRAM #defines level class class level(): def __innit__(self): self.name = ("") #level setup function def setup(self, name, directions, description): self.name = name self.directions = directions self.description = description def enter(self): print(self.description) if len(self.directions) >= 1: print("there is a gate to ", end=" ") for d in self.directions: d.print_gate() #defines gate class class gate: def __innit__(self, direction, gate_to): self.gate_to = gate_to self.direction = direction def is_gate(self, text): return self.direction in text def print_gate(self): print("the %s," % self.direction, end= " ") #initialises levels start_area = level() deep_forest_area = level() #defines gates and levels gate1 = gate("north", deep_forest_area) start_area.setup("forest", [gate1], "You are in a dusk lit forest surrounded by trees. The only direction is deeper into the forest") gate1 = gate("south", start_area) deep_forest_area.setup("deep forest", [gate1], "You are in a seemingly endless tunnel of dark oak trees") I tried analysing the code logically in terms of class logic and instances etc, however I couldn't see anything wrong and I feel as if I'm missing out on a piece of information or something. I also tried copying the template code I was working off more heavily but the error still didn't budge, which is weird because I was border line copying at one point out of frustration. Help would be appreciated.
[ "The constructor should be called __init__ and not __innit__. Since your classes don't have a constructor, python provides a default, no arg constructor and you cannot pass any arguments to it, hence the error.\n" ]
[ 0 ]
[]
[]
[ "oop", "python" ]
stackoverflow_0074622374_oop_python.txt
Q: Python money denomination loop does not work on decimal I am learning python and I what I need to achieve is to count how many denominations of 1000, 500, 200, 100, 50, 20, 10, 5, 1 , 0.25, 0.01 count base on my input data of 1575.78.This specific code bums me out. def withdraw_money(): denoms = (1000, 500, 200, 100, 50, 20, 10,5,1,.25,0.01) while True: try: withdraw = 1575.770 break except Exception as e: print('Incorrect input: %s' % e) print("Here is the bill breakdown for the amount input") for d in denoms: count = withdraw // d print('P%i = %i' % (d, count)) withdraw -= count * d withdraw_money() My current output is: Here is the bill breakdown for the amount input P1000 = 1 P500 = 1 P200 = 0 P100 = 0 P50 = 1 P20 = 1 P10 = 0 P5 = 1 P1 = 0 P0.25 = 3 P0.01 = 2 which is wrong because the P0.01 = 2 is suppose to be P0.01 =3. However this code is correct when running whole numbers like 1500, or 20 but large number with decimal it get wrong on the 0.01 denomination count. A: After debugging the code, I found the error. Using round() solve the problem. def withdraw_money(): denoms = (1000, 500, 200, 100, 50, 20, 10,5,1,0.25,0.01) while True: try: withdraw = 1575.77 break except Exception as e: print('Incorrect input: %s' % e) print("Here is the bill breakdown for the amount input") for i in range(len(denoms)): if denoms[i] != 0.01: count = withdraw // denoms[i] else: count = withdraw / denoms[i] print(f'P{denoms[i]} = {count:0.0f}') withdraw = round(withdraw % denoms[i],2) withdraw_money() A: You can use f string here like below def withdraw_money(): denoms = (1000, 500, 200, 100, 50, 20, 10, 5, 1, .25, 0.01) while True: try: withdraw = 1575.770 break except Exception as e: print('Incorrect input: %s' % e) print("Here is the bill breakdown for the amount input") for d in denoms: count = withdraw // d print(f'P{d} = {count:0.0f}') withdraw -= count * d withdraw_money() Output: Here is the bill breakdown for the amount input P1000 = 1 P500 = 1 P200 = 0 P100 = 0 P50 = 1 P20 = 1 P10 = 0 P5 = 1 P1 = 0 P0.25 = 3 P0.01 = 1
Python money denomination loop does not work on decimal
I am learning python and I what I need to achieve is to count how many denominations of 1000, 500, 200, 100, 50, 20, 10, 5, 1 , 0.25, 0.01 count base on my input data of 1575.78.This specific code bums me out. def withdraw_money(): denoms = (1000, 500, 200, 100, 50, 20, 10,5,1,.25,0.01) while True: try: withdraw = 1575.770 break except Exception as e: print('Incorrect input: %s' % e) print("Here is the bill breakdown for the amount input") for d in denoms: count = withdraw // d print('P%i = %i' % (d, count)) withdraw -= count * d withdraw_money() My current output is: Here is the bill breakdown for the amount input P1000 = 1 P500 = 1 P200 = 0 P100 = 0 P50 = 1 P20 = 1 P10 = 0 P5 = 1 P1 = 0 P0.25 = 3 P0.01 = 2 which is wrong because the P0.01 = 2 is suppose to be P0.01 =3. However this code is correct when running whole numbers like 1500, or 20 but large number with decimal it get wrong on the 0.01 denomination count.
[ "After debugging the code, I found the error. Using round() solve the problem.\ndef withdraw_money():\n denoms = (1000, 500, 200, 100, 50, 20, 10,5,1,0.25,0.01)\n while True:\n try:\n withdraw = 1575.77\n break\n except Exception as e:\n print('Incorrect input: %s' % e)\n \n print(\"Here is the bill breakdown for the amount input\")\n for i in range(len(denoms)):\n if denoms[i] != 0.01: count = withdraw // denoms[i]\n else: count = withdraw / denoms[i]\n print(f'P{denoms[i]} = {count:0.0f}')\n withdraw = round(withdraw % denoms[i],2)\n \n\nwithdraw_money()\n\n", "You can use f string here like below\ndef withdraw_money():\n denoms = (1000, 500, 200, 100, 50, 20, 10, 5, 1, .25, 0.01)\n while True:\n try:\n withdraw = 1575.770\n break\n except Exception as e:\n print('Incorrect input: %s' % e)\n\n print(\"Here is the bill breakdown for the amount input\")\n for d in denoms:\n count = withdraw // d\n print(f'P{d} = {count:0.0f}')\n withdraw -= count * d\n\n\nwithdraw_money()\n\n\nOutput:\nHere is the bill breakdown for the amount input\nP1000 = 1\nP500 = 1\nP200 = 0\nP100 = 0\nP50 = 1\nP20 = 1\nP10 = 0\nP5 = 1\nP1 = 0\nP0.25 = 3\nP0.01 = 1\n\n" ]
[ 2, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074622105_python.txt
Q: Login credentials not working with Gmail SMTP I am attempting to send an email in Python, through Gmail. Here is my code: import smtplib fromaddr = '......................' toaddrs = '......................' msg = 'Spam email Test' username = '.......' password = '.......' server = smtplib.SMTP('smtp.gmail.com', 587) server.ehlo() server.starttls() server.login(username, password) server.sendmail(fromaddr, toaddrs, msg) server.quit() I get the error: Traceback (most recent call last): File "email_send.py", line 18, in <module> server.login(username, password) File "C:\.....\Python\lib\smtplib.py", line 633 , in login raise SMTPAuthenticationError(code, resp) smtplib.SMTPAuthenticationError: (535, b'5.7.8 Username and Password not accepte d. Learn more at\n5.7.8 http://support.google.com/mail/bin/answer.py?answer=1425 7\n5.7.8 {BADCREDENTIALS} s10sm9426107qam.7 - gsmtp') This seems to be a problem with the login. I am certain that my login details are correct, except for one thing. Should username be "blah@gmail.com", or simply "blah"? I tried both, same error. Any idea whats wrong? NOTE: all the periods are instead of password/email/file paths/etc. A: UPDATE: This feature is no longer supported as of May 30th, 2022. See https://support.google.com/accounts/answer/6010255?hl=en&visit_id=637896899107643254-869975220&p=less-secure-apps&rd=1#zippy=%2Cuse-an-app-password ORIGINAL ANSWER (No longer working): I ran into a similar problem and stumbled on this question. I got an SMTP Authentication Error but my user name / pass was correct. Here is what fixed it. I read this: https://support.google.com/accounts/answer/6010255 In a nutshell, google is not allowing you to log in via smtplib because it has flagged this sort of login as "less secure", so what you have to do is go to this link while you're logged in to your google account, and allow the access: https://www.google.com/settings/security/lesssecureapps Once that is set (see my screenshot below), it should work. Login now works: smtpserver = smtplib.SMTP("smtp.gmail.com", 587) smtpserver.ehlo() smtpserver.starttls() smtpserver.ehlo() smtpserver.login('me@gmail.com', 'me_pass') Response after change: (235, '2.7.0 Accepted') Response prior: smtplib.SMTPAuthenticationError: (535, '5.7.8 Username and Password not accepted. Learn more at\n5.7.8 http://support.google.com/mail/bin/answer.py?answer=14257 g66sm2224117qgf.37 - gsmtp') Still not working? If you still get the SMTPAuthenticationError but now the code is 534, its because the location is unknown. Follow this link: https://accounts.google.com/DisplayUnlockCaptcha Click continue and this should give you 10 minutes for registering your new app. So proceed to doing another login attempt now and it should work. This doesn't seem to work right away you may be stuck for a while getting this error in smptlib: 235 == 'Authentication successful' 503 == 'Error: already authenticated' The message says to use the browser to sign in: SMTPAuthenticationError: (534, '5.7.9 Please log in with your web browser and then try again. Learn more at\n5.7.9 https://support.google.com/mail/bin/answer.py?answer=78754 qo11sm4014232igb.17 - gsmtp') After enabling 'lesssecureapps', go for a coffee, come back, and try the 'DisplayUnlockCaptcha' link again. From user experience, it may take up to an hour for the change to kick in. Then try the sign-in process again. UPDATE:: See my answer here: How to send an email with Gmail as provider using Python? A: I had the same issue. The Authentication Error can be because of your security settings, the 2-step verification for instance. It wont allow third party apps to override the authentication. Log in to your Google account, and use these links: Step 1 [Link of Disabling 2-step verification]: https://myaccount.google.com/security?utm_source=OGB&utm_medium=act#signin Step 2: [Link for Allowing less secure apps] https://myaccount.google.com/u/1/lesssecureapps?pli=1&pageId=none It should be all good now. A: If you're using smtp.gmail.com, then you have to do the following: Turn on the less secure apps You'll get the security mail in your gmail inbox, Click 'Yes,it's me' in that. Now run your code again. A: I am also faced with the same error message when I try to use smtplib. The error message was like this; error (535, b'5.7.8 username and password not accepted. Google has changed access to less secure apps. Before that, there was a field to give access to. Now you can follow these steps; Go to Google Account Page>Security>Signing in to Google section turn on 2-Step Verification. You need this feature on. When 2SV is on, go to App Password and generate new-app-password for mail access. It will generate it for you, then use this password in gmail_password function in Python. gmail_password='google_generate_it_for_you' A: I had same issue. And I fix it with creating an app-password for Email application on Mac. You can find it at my account -> Security -> Signing in to Google -> App passwords. below is the link for it. https://myaccount.google.com/apppasswords?utm_source=google-account&utm_medium=web A: Denied The solution of using "Access for the less secure app" in Gmail has been denied (find more here). Update By the way, you can get access to the gmail account by the solution proposed by Google, called "App password". The solution is simple: 1. Active two-step verification of the corresponding account. 2. Create an app password. 3. Exactly do the same implementation that you have for sending an email (explained in your question). Except, replace the password with the generated app password (a sixteen digit password). For more details you can also follow this post (it's working well for me). A: If you turn-on 2-Step Verification, you need generate a special app password instead of using your common password. https://myaccount.google.com/security#signin A: if you are getting error this(535, b'5.7.8 Username and Password not accepted. Learn more at\n5.7.8 https://support.google.com/mail/?p=BadCredentials o60sm2132303pje.21 - gsmtp') then simply go in you google accountsettings of security section and make a less secure account and turn on the less secure button A: I had the same issue , i solved this by allowing "less secure app access" . This can be found in Security tab on Google Account: A: Nov, 2022 Update: You need to use an app password to allow your app to access your google account. Sign in with App Passwords: An App Password is a 16-digit passcode that gives a less secure app or device permission to access your Google Account. App Passwords can only be used with accounts that have 2-Step Verification turned on. In addition, google hasn't allowed your app to access your google account with username(email address) and password since May 30, 2022. So now, you need username(email address) and an app password to access your google account. Less secure apps & your Google Account: To help keep your account secure, from May 30, 2022, ​​Google no longer supports the use of third-party apps or devices which ask you to sign in to your Google Account using only your username and password. How to generate an app password: First, click on Account from 9 dots: Then, click on App passwords from Security. *Don't forget to turn on 2-Step Verification before generating an app password otherwise you cannot generate an app password: Then, click on Other (Custom name): Then, put your app name, then click on GENERATE: Finally, you could generate the app password xylnudjdiwpojwzm: So, your code with the app password above is as shown below: import smtplib fromaddr = '......................' toaddrs = '......................' msg = 'Spam email Test' username = '.......' password = 'xylnudjdiwpojwzm' # Here server = smtplib.SMTP('smtp.gmail.com', 587) server.ehlo() server.starttls() server.login(username, password) server.sendmail(fromaddr, toaddrs, msg) server.quit() In addition, settings.py with the app password above in Django is as shown below: # "settings.py" EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_USE_TLS = True EMAIL_HOST_USER = 'myaccount@gmail.com' EMAIL_HOST_PASSWORD = 'xylnudjdiwpojwzm' # Here A: Enable 2FA for your Google account Then set an app password for your app Use that new password A: For email created by G-Suite or Google Workspace, you have to ask your admin to activate the Gmail app and assigned it to your email account. Without this step, trying either above methods still does not work. A: Oct 3 2022 As per all the answers mentioned above. Enabling 2Factor Authentication and creating an App Pasword does the trick with SMTP. NOTE: Less secure apps toggle doesn't work anymore tbh?(Others confirm?)
Login credentials not working with Gmail SMTP
I am attempting to send an email in Python, through Gmail. Here is my code: import smtplib fromaddr = '......................' toaddrs = '......................' msg = 'Spam email Test' username = '.......' password = '.......' server = smtplib.SMTP('smtp.gmail.com', 587) server.ehlo() server.starttls() server.login(username, password) server.sendmail(fromaddr, toaddrs, msg) server.quit() I get the error: Traceback (most recent call last): File "email_send.py", line 18, in <module> server.login(username, password) File "C:\.....\Python\lib\smtplib.py", line 633 , in login raise SMTPAuthenticationError(code, resp) smtplib.SMTPAuthenticationError: (535, b'5.7.8 Username and Password not accepte d. Learn more at\n5.7.8 http://support.google.com/mail/bin/answer.py?answer=1425 7\n5.7.8 {BADCREDENTIALS} s10sm9426107qam.7 - gsmtp') This seems to be a problem with the login. I am certain that my login details are correct, except for one thing. Should username be "blah@gmail.com", or simply "blah"? I tried both, same error. Any idea whats wrong? NOTE: all the periods are instead of password/email/file paths/etc.
[ "UPDATE:\nThis feature is no longer supported as of May 30th, 2022. See https://support.google.com/accounts/answer/6010255?hl=en&visit_id=637896899107643254-869975220&p=less-secure-apps&rd=1#zippy=%2Cuse-an-app-password\nORIGINAL ANSWER (No longer working):\nI ran into a similar problem and stumbled on this question. I got an SMTP Authentication Error but my user name / pass was correct. Here is what fixed it. I read this:\nhttps://support.google.com/accounts/answer/6010255\nIn a nutshell, google is not allowing you to log in via smtplib because it has flagged this sort of login as \"less secure\", so what you have to do is go to this link while you're logged in to your google account, and allow the access:\nhttps://www.google.com/settings/security/lesssecureapps\nOnce that is set (see my screenshot below), it should work.\n\nLogin now works:\nsmtpserver = smtplib.SMTP(\"smtp.gmail.com\", 587)\nsmtpserver.ehlo()\nsmtpserver.starttls()\nsmtpserver.ehlo()\nsmtpserver.login('me@gmail.com', 'me_pass')\n\nResponse after change:\n(235, '2.7.0 Accepted')\n\nResponse prior:\nsmtplib.SMTPAuthenticationError: (535, '5.7.8 Username and Password not accepted. Learn more at\\n5.7.8 http://support.google.com/mail/bin/answer.py?answer=14257 g66sm2224117qgf.37 - gsmtp')\n\nStill not working? If you still get the SMTPAuthenticationError but now the code is 534, its because the location is unknown. Follow this link:\nhttps://accounts.google.com/DisplayUnlockCaptcha\nClick continue and this should give you 10 minutes for registering your new app. So proceed to doing another login attempt now and it should work.\nThis doesn't seem to work right away you may be stuck for a while getting this error in smptlib:\n235 == 'Authentication successful'\n503 == 'Error: already authenticated'\n\nThe message says to use the browser to sign in:\nSMTPAuthenticationError: (534, '5.7.9 Please log in with your web browser and then try again. Learn more at\\n5.7.9 https://support.google.com/mail/bin/answer.py?answer=78754 qo11sm4014232igb.17 - gsmtp')\n\nAfter enabling 'lesssecureapps', go for a coffee, come back, and try the 'DisplayUnlockCaptcha' link again. From user experience, it may take up to an hour for the change to kick in. Then try the sign-in process again.\nUPDATE:: See my answer here:\nHow to send an email with Gmail as provider using Python?\n", "I had the same issue. The Authentication Error can be because of your security settings, the 2-step verification for instance. It wont allow third party apps to override the authentication.\nLog in to your Google account, and use these links:\nStep 1 [Link of Disabling 2-step verification]:\nhttps://myaccount.google.com/security?utm_source=OGB&utm_medium=act#signin\nStep 2: [Link for Allowing less secure apps]\nhttps://myaccount.google.com/u/1/lesssecureapps?pli=1&pageId=none\nIt should be all good now.\n", "If you're using smtp.gmail.com, then you have to do the following:\n\nTurn on the less secure apps\n\nYou'll get the security mail in your gmail inbox, Click 'Yes,it's me' in that.\n\nNow run your code again.\n\n\n", "I am also faced with the same error message when I try to use smtplib. The error message was like this;\nerror (535, b'5.7.8 username and password not accepted.\n\nGoogle has changed access to less secure apps. Before that, there was a field to give access to. Now you can follow these steps;\n\nGo to Google Account Page>Security>Signing in to Google section\nturn on 2-Step Verification. You need this feature on.\n\n\nWhen 2SV is on, go to App Password and generate new-app-password for mail access. It will generate it for you, then use this password in gmail_password function in Python.\ngmail_password='google_generate_it_for_you'\n\n\n\n", "I had same issue. And I fix it with creating an app-password for Email application on Mac.\nYou can find it at my account -> Security -> Signing in to Google -> App passwords.\nbelow is the link for it.\nhttps://myaccount.google.com/apppasswords?utm_source=google-account&utm_medium=web\n", "Denied\nThe solution of using \"Access for the less secure app\" in Gmail has been denied (find more here).\nUpdate\nBy the way, you can get access to the gmail account by the solution proposed by Google, called \"App password\". The solution is simple:\n1. Active two-step verification of the corresponding account.\n2. Create an app password. \n3. Exactly do the same implementation that you have for sending an email (explained in your question). \n Except, replace the password with the generated app password (a sixteen digit password).\n\nFor more details you can also follow this post (it's working well for me).\n", "If you turn-on 2-Step Verification, you need generate a special app password instead of using your common password.\nhttps://myaccount.google.com/security#signin\n", "if you are getting error this(535, b'5.7.8 Username and Password not accepted. Learn more at\\n5.7.8 https://support.google.com/mail/?p=BadCredentials o60sm2132303pje.21 - gsmtp')\nthen simply go in you google accountsettings of security section and make a less secure account and turn on the less secure button\n", "I had the same issue , i solved this by allowing \"less secure app access\" . This can be found in Security tab on Google Account: \n", "Nov, 2022 Update:\nYou need to use an app password to allow your app to access your google account.\nSign in with App Passwords:\n\nAn App Password is a 16-digit passcode that gives a less secure app or\ndevice permission to access your Google Account. App Passwords can\nonly be used with accounts that have 2-Step Verification turned on.\n\nIn addition, google hasn't allowed your app to access your google account with username(email address) and password since May 30, 2022. So now, you need username(email address) and an app password to access your google account.\nLess secure apps & your Google Account:\n\nTo help keep your account secure, from May 30, 2022, ​​Google no\nlonger supports the use of third-party apps or devices which ask you\nto sign in to your Google Account using only your username and\npassword.\n\nHow to generate an app password:\nFirst, click on Account from 9 dots:\n\nThen, click on App passwords from Security. *Don't forget to turn on 2-Step Verification before generating an app password otherwise you cannot generate an app password:\n\nThen, click on Other (Custom name):\n\nThen, put your app name, then click on GENERATE:\n\nFinally, you could generate the app password xylnudjdiwpojwzm:\n\nSo, your code with the app password above is as shown below:\nimport smtplib\n\nfromaddr = '......................' \ntoaddrs = '......................' \nmsg = 'Spam email Test' \n\nusername = '.......' \npassword = 'xylnudjdiwpojwzm' # Here\n\nserver = smtplib.SMTP('smtp.gmail.com', 587) \nserver.ehlo()\nserver.starttls()\nserver.login(username, password) \nserver.sendmail(fromaddr, toaddrs, msg) \nserver.quit()\n\nIn addition, settings.py with the app password above in Django is as shown below:\n# \"settings.py\"\n\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\nEMAIL_HOST_USER = 'myaccount@gmail.com'\nEMAIL_HOST_PASSWORD = 'xylnudjdiwpojwzm' # Here\n\n", "\nEnable 2FA for your Google account\nThen set an app password for your app\nUse that new password\n\n", "For email created by G-Suite or Google Workspace, you have to ask your admin to activate the Gmail app and assigned it to your email account. Without this step, trying either above methods still does not work.\n", "Oct 3 2022\nAs per all the answers mentioned above.\nEnabling 2Factor Authentication and creating an App Pasword does the trick with SMTP.\nNOTE: Less secure apps toggle doesn't work anymore tbh?(Others confirm?)\n" ]
[ 311, 44, 21, 15, 12, 10, 7, 7, 4, 1, 0, 0, 0 ]
[ "In my case, I allowed the access, but the problem was that I was using server.login('Name <email>', password). Please, make sure to use only your email here: server.login('youremail@gmail.com', password).\nResponse after change:\n(235, '2.7.0 Accepted')\n\nResponse prior:\nsmtplib.SMTPAuthenticationError: (535, b'5.7.8 Username and Password not accepted. Learn more at\\n5.7.8 https://support.google.com/mail/?p=BadCredentials 130sm13119106qkh.99 - gsmtp')\n\n", "Try turning on less secure apps and make sure that you have smtp.gmail.com or for different search up\n" ]
[ -1, -2 ]
[ "authentication", "gmail", "python", "smtp", "smtp_auth" ]
stackoverflow_0016512592_authentication_gmail_python_smtp_smtp_auth.txt
Q: OpenCV stereo calibration error - (-3:Internal error) CALIB_CHECK_COND - Ill-conditioned matrix for input array 1 in function 'CalibrateExtrinsics' opencv version is 3.4.9 I use this code to calibrate. My stereoscopic camera consists of two GoPro Session cameras (with settings - 1080p 30 fps, medium angle) fixed on an aluminium plane 90 cm away from each other, also used a wifi remote controller for the cameras to be in sync. I shot a 2 minute video covering every angel of both cameras, I used my TV screen as the board (I needed it to be big because the cameras are too far away from each other), and I broke down the video into frames, also I deleted all the frames where the algorithm couldn't find the chessboard (if one of the cameras can't find the pattern that exact frame was deleted from both cameras). When testing the undistortion with using that 9x6 chessboard pattern I noticed that every picture looks horrible after undistortion here's the original. I tried everything, with light, with printed board on the paper and glued to a flat board, every time undistorded image looks like this and many times it looks worse than this, and also it's worth to mention that i resize the images from 1920x1080 to 640x360 before undistorting the image. The main problem is that I get this error every time I run that code from github cv2.error: OpenCV(3.4.9) /io/opencv/modules/calib3d/src/fisheye.cpp:1421: error: (-3:Internal error) CALIB_CHECK_COND - Ill-conditioned matrix for input array 1 in function 'CalibrateExtrinsics' and I have zero idea what to do. I think that bad undistortion problem causes this error, or am I wrong?? Please tell me what to do. A: So the error is raised here: https://github.com/opencv/opencv/blob/master/modules/calib3d/src/fisheye.cpp#L1421 when evaluating the vector of singular values w output from Singular Value Decomposition (SVD). Specifically, the ratio of the first singular value and the last singular value (svd.w.at<double>(0) / svd.w.at<double>((int)svd.w.total() - 1)) exceed a threshold value (in this case, 1e6, based on the variable thresh_cond). It looks like you can configure whether this condition (check_cond) is evaluated using CALIB_CHECK_COND (https://github.com/opencv/opencv/blob/master/modules/calib3d/src/fisheye.cpp#L740), which can be provided in the stereoCalibrate function: https://docs.opencv.org/3.4/db/d58/group__calib3d__fisheye.html#gadbb3a6ca6429528ef302c784df47949b. The stereoCalibrate function is called at this line in the script you're using; you'll just have to change cv2.CALIB_FIX_INTRINSIC to cv2.fisheye.CALIB_CHECK_COND. Hope this helps, or at least gets you some results! Update After changing the flag from cv2.CALIB_FIX_INTRINSIC to cv2.fisheye.CALIB_CHECK_COND, a new error is raised due to a failed assertion: (-215:Assertion failed) abs_max < threshold in function 'stereoCalibrate'. This assertion occurs here: https://github.com/opencv/opencv/blob/master/modules/calib3d/src/fisheye.cpp#L1023. A quick google search yields these two questions from the OpenCV question forum: https://answers.opencv.org/question/213931/assertion-failed-abs_max-threshold-in-function-stereocalibrate/ https://answers.opencv.org/question/67855/opencv-error-assertion-failed-abs_max-threshold-in-stereocalibrate/ Both of which conclude the same thing: in the sets of images they were using, there were some bad stereo pairs that needed to be removed. Also, in the line from the source code, there is a single comment above the assertion when computing abs_max: //check goodness of stereopair (https://github.com/opencv/opencv/blob/master/modules/calib3d/src/fisheye.cpp#L1013). One of the answers in the OpenCV forums from recently (March 2020), had some logic to check for bad stereo pairs: https://answers.opencv.org/question/213931/assertion-failed-abs_max-threshold-in-function-stereocalibrate/?answer=227486#post-id-227486 In the script your using, I think it would translate to adding these lines here: diff = cornersL - cornersR lengths = np.linalg.norm(diff[:, :, 1], axis=-1) sum = np.sum(lengths, axis=0) if (sum > 2000.0): print(f"THIS STEREO PAIR IS BROKEN!!! Diff is: {sum}") cornersR = np.flipud(cornersR) Alternatively, you could just continue if that condition is met, and skip those pairs altogether (although I'm not sure if that would be ideal). You may also want to print/save the names of the images (leftName) and (rightName) so you can inspect them - although at that point, I would recommend just adding a breakpoint where the check occurs and step through to evaluate in real time. A: the undistorted image should be in the same size as those in your calibration, unless you did some modification to the calibration parameters
OpenCV stereo calibration error - (-3:Internal error) CALIB_CHECK_COND - Ill-conditioned matrix for input array 1 in function 'CalibrateExtrinsics'
opencv version is 3.4.9 I use this code to calibrate. My stereoscopic camera consists of two GoPro Session cameras (with settings - 1080p 30 fps, medium angle) fixed on an aluminium plane 90 cm away from each other, also used a wifi remote controller for the cameras to be in sync. I shot a 2 minute video covering every angel of both cameras, I used my TV screen as the board (I needed it to be big because the cameras are too far away from each other), and I broke down the video into frames, also I deleted all the frames where the algorithm couldn't find the chessboard (if one of the cameras can't find the pattern that exact frame was deleted from both cameras). When testing the undistortion with using that 9x6 chessboard pattern I noticed that every picture looks horrible after undistortion here's the original. I tried everything, with light, with printed board on the paper and glued to a flat board, every time undistorded image looks like this and many times it looks worse than this, and also it's worth to mention that i resize the images from 1920x1080 to 640x360 before undistorting the image. The main problem is that I get this error every time I run that code from github cv2.error: OpenCV(3.4.9) /io/opencv/modules/calib3d/src/fisheye.cpp:1421: error: (-3:Internal error) CALIB_CHECK_COND - Ill-conditioned matrix for input array 1 in function 'CalibrateExtrinsics' and I have zero idea what to do. I think that bad undistortion problem causes this error, or am I wrong?? Please tell me what to do.
[ "So the error is raised here: https://github.com/opencv/opencv/blob/master/modules/calib3d/src/fisheye.cpp#L1421 when evaluating the vector of singular values w output from Singular Value Decomposition (SVD). Specifically, the ratio of the first singular value and the last singular value (svd.w.at<double>(0) / svd.w.at<double>((int)svd.w.total() - 1)) exceed a threshold value (in this case, 1e6, based on the variable thresh_cond).\nIt looks like you can configure whether this condition (check_cond) is evaluated using CALIB_CHECK_COND (https://github.com/opencv/opencv/blob/master/modules/calib3d/src/fisheye.cpp#L740), which can be provided in the stereoCalibrate function: https://docs.opencv.org/3.4/db/d58/group__calib3d__fisheye.html#gadbb3a6ca6429528ef302c784df47949b. The stereoCalibrate function is called at this line in the script you're using; you'll just have to change cv2.CALIB_FIX_INTRINSIC to cv2.fisheye.CALIB_CHECK_COND.\nHope this helps, or at least gets you some results!\nUpdate\nAfter changing the flag from cv2.CALIB_FIX_INTRINSIC to cv2.fisheye.CALIB_CHECK_COND, a new error is raised due to a failed assertion: (-215:Assertion failed) abs_max < threshold in function 'stereoCalibrate'. This assertion occurs here: https://github.com/opencv/opencv/blob/master/modules/calib3d/src/fisheye.cpp#L1023. A quick google search yields these two questions from the OpenCV question forum:\n\nhttps://answers.opencv.org/question/213931/assertion-failed-abs_max-threshold-in-function-stereocalibrate/\n\nhttps://answers.opencv.org/question/67855/opencv-error-assertion-failed-abs_max-threshold-in-stereocalibrate/\n\n\nBoth of which conclude the same thing: in the sets of images they were using, there were some bad stereo pairs that needed to be removed. Also, in the line from the source code, there is a single comment above the assertion when computing abs_max: //check goodness of stereopair (https://github.com/opencv/opencv/blob/master/modules/calib3d/src/fisheye.cpp#L1013).\nOne of the answers in the OpenCV forums from recently (March 2020), had some logic to check for bad stereo pairs: https://answers.opencv.org/question/213931/assertion-failed-abs_max-threshold-in-function-stereocalibrate/?answer=227486#post-id-227486\nIn the script your using, I think it would translate to adding these lines here:\n diff = cornersL - cornersR\n lengths = np.linalg.norm(diff[:, :, 1], axis=-1)\n sum = np.sum(lengths, axis=0)\n if (sum > 2000.0):\n print(f\"THIS STEREO PAIR IS BROKEN!!! Diff is: {sum}\")\n cornersR = np.flipud(cornersR)\n\nAlternatively, you could just continue if that condition is met, and skip those pairs altogether (although I'm not sure if that would be ideal). You may also want to print/save the names of the images (leftName) and (rightName) so you can inspect them - although at that point, I would recommend just adding a breakpoint where the check occurs and step through to evaluate in real time.\n", "the undistorted image should be in the same size as those in your calibration, unless you did some modification to the calibration parameters\n" ]
[ 4, 0 ]
[]
[]
[ "camera_calibration", "computer_vision", "opencv", "python", "stereoscopy" ]
stackoverflow_0061002436_camera_calibration_computer_vision_opencv_python_stereoscopy.txt
Q: How to use all() in python? I want to check if all elements of a list are not present in a string. ex : l = ["abc","ghi"] s1 = "xyzjkl" s2 = "abcdef" So , when l is compared with s1 it should return True, when l is compared with s2 it should return False. This is what i tried : all(x for x in l if x not in s1) = True all(x for x in l if x not in s2) = True I am getting True for both cases, But it should be false in second case. Can someone please help, any solution will help, i just want to have it in a single line. Thanks, A: You need a list of True, False. But you were simply getting the matched items, so when you do an all on Truthy values you will get True. Instead do: all([x not in s1 for x in l]) all([x not in s2 for x in l]) or just without list comp, because all accepts an iterable. all(x not in s1 for x in l) all(x not in s2 for x in l) A: If the goal is: Check that all elements of a list are not present in a string. The pattern should be: all(s not in my_string for s in input_list) l = ["abc","ghi"] s1 = "xyzjkl" s2 = "abcdef" print(all(s not in s1 for s in l)) # True print(all(s not in s2 for s in l)) # False A: I think what you want to do is: all(x not in s1 for x in l) # True all(x not in s2 for x in l) # False
How to use all() in python?
I want to check if all elements of a list are not present in a string. ex : l = ["abc","ghi"] s1 = "xyzjkl" s2 = "abcdef" So , when l is compared with s1 it should return True, when l is compared with s2 it should return False. This is what i tried : all(x for x in l if x not in s1) = True all(x for x in l if x not in s2) = True I am getting True for both cases, But it should be false in second case. Can someone please help, any solution will help, i just want to have it in a single line. Thanks,
[ "You need a list of True, False. But you were simply getting the matched items, so when you do an all on Truthy values you will get True. Instead do:\nall([x not in s1 for x in l])\nall([x not in s2 for x in l])\n\nor just without list comp, because all accepts an iterable.\nall(x not in s1 for x in l)\nall(x not in s2 for x in l)\n\n", "If the goal is:\n\nCheck that all elements of a list are not present in a string.\n\nThe pattern should be: all(s not in my_string for s in input_list)\nl = [\"abc\",\"ghi\"]\ns1 = \"xyzjkl\"\ns2 = \"abcdef\"\n\nprint(all(s not in s1 for s in l)) # True\nprint(all(s not in s2 for s in l)) # False\n\n", "I think what you want to do is:\nall(x not in s1 for x in l) # True\nall(x not in s2 for x in l) # False\n\n" ]
[ 2, 1, 1 ]
[]
[]
[ "python" ]
stackoverflow_0074622440_python.txt
Q: eBay Digital Signatures for APIs signature header generation Having read through eBay's guide for including digital signatures to certain of their REST API calls, I am having trouble with generating the signature header. Rather than including all of the documentation here (there is a lot!), I'll provide links to the appropriate pages and some of the documentation. The following page it the starting point provided by eBay: https://developer.ebay.com/develop/guides/digital-signatures-for-apis The next page is where I am lead to from the previous page describing how to create the signature: https://www.ietf.org/archive/id/draft-ietf-httpbis-message-signatures-13.html#name-eddsa-using-curve-edwards25 Which leads me onto the following : https://www.rfc-editor.org/rfc/rfc8032#section-5.1.6 5.1.6. Sign The inputs to the signing procedure is the private key, a 32-octet string, and a message M of arbitrary size. For Ed25519ctx and Ed25519ph, there is additionally a context C of at most 255 octets and a flag F, 0 for Ed25519ctx and 1 for Ed25519ph. 1. Hash the private key, 32 octets, using SHA-512. Let h denote the resulting digest. Construct the secret scalar s from the first half of the digest, and the corresponding public key A, as described in the previous section. Let prefix denote the second half of the hash digest, h[32],...,h[63]. 2. Compute SHA-512(dom2(F, C) || prefix || PH(M)), where M is the message to be signed. Interpret the 64-octet digest as a little- endian integer r. 3. Compute the point [r]B. For efficiency, do this by first reducing r modulo L, the group order of B. Let the string R be the encoding of this point. 4. Compute SHA512(dom2(F, C) || R || A || PH(M)), and interpret the 64-octet digest as a little-endian integer k. 5. Compute S = (r + k * s) mod L. For efficiency, again reduce k modulo L first. 6. Form the signature of the concatenation of R (32 octets) and the little-endian encoding of S (32 octets; the three most significant bits of the final octet are always zero). I have some Python code from the appendix from this same web page (https://www.rfc-editor.org/rfc/rfc8032#section-6): ## First, some preliminaries that will be needed. import hashlib def sha512(s): return hashlib.sha512(s).digest() # Base field Z_p p = 2**255 - 19 def modp_inv(x): return pow(x, p-2, p) # Curve constant d = -121665 * modp_inv(121666) % p # Group order q = 2**252 + 27742317777372353535851937790883648493 def sha512_modq(s): return int.from_bytes(sha512(s), "little") % q ## Then follows functions to perform point operations. # Points are represented as tuples (X, Y, Z, T) of extended # coordinates, with x = X/Z, y = Y/Z, x*y = T/Z def point_add(P, Q): A, B = (P[1]-P[0]) * (Q[1]-Q[0]) % p, (P[1]+P[0]) * (Q[1]+Q[0]) % p; C, D = 2 * P[3] * Q[3] * d % p, 2 * P[2] * Q[2] % p; E, F, G, H = B-A, D-C, D+C, B+A; return (E*F, G*H, F*G, E*H); # Computes Q = s * Q def point_mul(s, P): Q = (0, 1, 1, 0) # Neutral element while s > 0: if s & 1: Q = point_add(Q, P) P = point_add(P, P) s >>= 1 return Q def point_equal(P, Q): # x1 / z1 == x2 / z2 <==> x1 * z2 == x2 * z1 if (P[0] * Q[2] - Q[0] * P[2]) % p != 0: return False if (P[1] * Q[2] - Q[1] * P[2]) % p != 0: return False return True ## Now follows functions for point compression. # Square root of -1 modp_sqrt_m1 = pow(2, (p-1) // 4, p) # Compute corresponding x-coordinate, with low bit corresponding to # sign, or return None on failure def recover_x(y, sign): if y >= p: return None x2 = (y*y-1) * modp_inv(d*y*y+1) if x2 == 0: if sign: return None else: return 0 # Compute square root of x2 x = pow(x2, (p+3) // 8, p) if (x*x - x2) % p != 0: x = x * modp_sqrt_m1 % p if (x*x - x2) % p != 0: return None if (x & 1) != sign: x = p - x return x # Base point g_y = 4 * modp_inv(5) % p g_x = recover_x(g_y, 0) G = (g_x, g_y, 1, g_x * g_y % p) def point_compress(P): zinv = modp_inv(P[2]) x = P[0] * zinv % p y = P[1] * zinv % p return int.to_bytes(y | ((x & 1) << 255), 32, "little") def point_decompress(s): if len(s) != 32: raise Exception("Invalid input length for decompression") y = int.from_bytes(s, "little") sign = y >> 255 y &= (1 << 255) - 1 x = recover_x(y, sign) if x is None: return None else: return (x, y, 1, x*y % p) ## These are functions for manipulating the private key. def secret_expand(secret): if len(secret) != 32: raise Exception("Bad size of private key") h = sha512(secret) a = int.from_bytes(h[:32], "little") a &= (1 << 254) - 8 a |= (1 << 254) return (a, h[32:]) def secret_to_public(secret): (a, dummy) = secret_expand(secret) return point_compress(point_mul(a, G)) ## The signature function works as below. def sign(secret, msg): a, prefix = secret_expand(secret) A = point_compress(point_mul(a, G)) r = sha512_modq(prefix + msg) R = point_mul(r, G) Rs = point_compress(R) h = sha512_modq(Rs + A + msg) s = (r + h * a) % q return Rs + int.to_bytes(s, 32, "little") ## And finally the verification function. def verify(public, msg, signature): if len(public) != 32: raise Exception("Bad public key length") if len(signature) != 64: Exception("Bad signature length") A = point_decompress(public) if not A: return False Rs = signature[:32] R = point_decompress(Rs) if not R: return False s = int.from_bytes(signature[32:], "little") if s >= q: return False h = sha512_modq(Rs + public + msg) sB = point_mul(s, G) hA = point_mul(h, A) return point_equal(sB, point_add(R, hA)) Now, the problem that I am having is that this code insists on the "secret" consisting of a 32 byte array: if len(secret) != 32: raise Exception("Bad size of private key") However, the secret is described as being the private key provided by eBay's Key Management API (https://developer.ebay.com/api-docs/developer/key-management/overview.html), which is not a 32 byte array, but a 64 character ASCII string (see https://developer.ebay.com/api-docs/developer/key-management/resources/signing_key/methods/createSigningKey#h2-samples): "privateKey": "MC4CAQAwBQYDK2VwBCIEI******************************************n" When I try to generate a signature with the eBay private key using this Python code, it gives me an error saying it is a "Bad size of private key". If I convert the private key from eBay to a bytearray, it is 64 bytes long. How can I use the Python code to generate the signature header using the private key supplied by eBay? To further complicate things, I am actually using Excel VBA (Visual Basic) to make the API call after using Python to generate the signature (simply because Python is better at this kind of thing!). eBay's PAID FOR technical support has confirmed that the following headers are correct and that there is no "message" as described in https://www.rfc-editor.org/rfc/rfc8032#section-5.1.6, but they have not yet been of any further help other than suggesting that there may be a "bug". http.setRequestHeader "signature-input", "sig1=(""x-ebay-signature-key"" ""@method"" ""@path"" ""@authority"");created=1667386210" http.setRequestHeader "x-ebay-signature-key", "<jwe returned by eBay>" http.setRequestHeader "x-ebay-enforce-signature", "true" The remaining header would be as follows once I can generate a valid signature: http.setRequestHeader "signature" "sig1=:<signature>:" Everything I have tried results in the same response: { "errors": [ { "errorId": 215122, "domain": "ACCESS", "category": "REQUEST", "message": "Signature validation failed", "longMessage": "Signature validation failed to fulfill the request." } ] } Here are some example keys like the ones generated by eBay. https://www.ietf.org/archive/id/draft-ietf-httpbis-message-signatures-11.html#appendix-B.1.4 "The following key is an elliptical curve key over the Edwards curve ed25519, referred to in this document as test-key-ed25519. This key is PCKS#8 encoded in PEM format, with no encryption." -----BEGIN PUBLIC KEY----- MCowBQYDK2VwAyEAJrQLj5P/89iXES9+vFgrIy29clF9CC/oPPsw3c5D0bs= -----END PUBLIC KEY----- -----BEGIN PRIVATE KEY----- MC4CAQAwBQYDK2VwBCIEIJ+DYvh6SEqVTm50DFtMDoQikTmiCqirVv9mWG9qfSnF -----END PRIVATE KEY----- This is the format of private key that I believe that I need to convert to a 32-byte array to work with the above Python code. I believe that there is a typo on the linked to web page and it should be "PKCS", not "PCKS". UPDATE: If I run the following command: openssl ec -in test.pem -text Where test.pem is a text file containing: -----BEGIN PRIVATE KEY----- MC4CAQAwBQYDK2VwBCIEIJ+DYvh6SEqVTm50DFtMDoQikTmiCqirVv9mWG9qfSnF -----END PRIVATE KEY----- It displays private and public keys as 32 byte hex dumps, but even when using these values I get the same response as above with the 215122 error. When I verify using the Python "verify" method in the code above with these 32 byte hex dump keys, validation is successful. A: Alright so this is where Im at right now, not using the content-digest as it's simply a GET request so just trying to get the basics working, but none of this seems to work. $public = "xxx"; $private = "yyy"; $jwe = "jwe"; $path = "/sell/fulfillment/v1/order/" . "11-xxxx-yyyy"; $signature_input_txt = '("x-ebay-signature-key" "@method" "@path" "@authority");created=' . time(); // $signature_base = '"content-digest": sha-256=:' . base64_encode($contentDigest) . ":\n"; $signature_base = '"x-ebay-signature-key": ' . $jwe; $signature_base .= '"@method": POST'; $signature_base .= '"@path": ' . $path; $signature_base .= '"@authority": ' . "apiz.ebay.com"; $signature_base .= '"@signature-params": ' . $signature_input_txt; // ensure signature_base is UTF-8 if (!mb_check_encoding($signature_base, 'UTF-8')) { $signature_base = mb_convert_encoding($signature_base, 'UTF-8'); } // dd($signature_base); // base 64 encode our signature_base $signature_base_base64_encoded = base64_encode($signature_base); // format the private key as required $formatted_private_key = "-----BEGIN RSA PRIVATE KEY-----" . PHP_EOL . $private . PHP_EOL . "-----END RSA PRIVATE KEY-----"; // sign openssl_sign($signature_base_base64_encoded, $signed_signature, $formatted_private_key, "sha256WithRSAEncryption"); return [ 'Authorization' => 'Bearer ' . $this->marketplace->getToken('oauth2.access_token', 'production'), 'Accept' => 'application/json', 'Content-Type' => 'application/json', 'Signature-Input' => 'sig1=' . $signature_input_txt, 'Signature' => 'sig1=:' . base64_encode($signed_signature) . ':', 'x-ebay-signature-key' => $jwe, 'x-ebay-enforce-signature' => true ]; A: I'm going to put this here for anyone struggling to get this working with PHP, adapted from Renegade_Mtl answer (you'd missed the need for a new line for each signature_base and it didn't need to be encoded). /** * @param $method - e.g. POST, GET * @param $path - e.g /sell/finances/v1/seller_funds_summary * @param $host - e.g. api.ebay.com * @param $keyset // public, private and jwt keys generated from https://apiz.ebay.com/developer/key_management/v1/signing_key * @param $timestamp - e.g. time() * @return array of headers */ private function createHeaders(string $method, string $path, string $host, array $tokens, int $time) { $signature_input_txt = '("x-ebay-signature-key" "@method" "@path" "@authority");created=' . $time; // $signature_base = '"content-digest": sha-256=:' . base64_encode($contentDigest) . ":\n"; $signature_base = '"x-ebay-signature-key": ' . $tokens['jwe']."\n"; $signature_base .= '"@method": ' . $method."\n"; $signature_base .= '"@path": ' . $path."\n"; $signature_base .= '"@authority": ' . $host."\n"; $signature_base .= '"@signature-params": ' . $signature_input_txt; // format the private key as required $formatted_private_key = "-----BEGIN RSA PRIVATE KEY-----" . PHP_EOL . $tokens['privateKey'] . PHP_EOL . "-----END RSA PRIVATE KEY-----"; openssl_sign($signature_base, $signed_signature, $formatted_private_key, "sha256WithRSAEncryption"); return [ 'Signature-Input' => 'sig1=' . $signature_input_txt, 'Signature' => 'sig1=:' . base64_encode($signed_signature) . ':', 'x-ebay-signature-key' => $tokens['jwe'], 'x-ebay-enforce-signature' => "true" ]; } We only use GET's but if you also POST then you'd need also the content digest... Hope this helps someone from wasting hours and hours trying to figure it out.
eBay Digital Signatures for APIs signature header generation
Having read through eBay's guide for including digital signatures to certain of their REST API calls, I am having trouble with generating the signature header. Rather than including all of the documentation here (there is a lot!), I'll provide links to the appropriate pages and some of the documentation. The following page it the starting point provided by eBay: https://developer.ebay.com/develop/guides/digital-signatures-for-apis The next page is where I am lead to from the previous page describing how to create the signature: https://www.ietf.org/archive/id/draft-ietf-httpbis-message-signatures-13.html#name-eddsa-using-curve-edwards25 Which leads me onto the following : https://www.rfc-editor.org/rfc/rfc8032#section-5.1.6 5.1.6. Sign The inputs to the signing procedure is the private key, a 32-octet string, and a message M of arbitrary size. For Ed25519ctx and Ed25519ph, there is additionally a context C of at most 255 octets and a flag F, 0 for Ed25519ctx and 1 for Ed25519ph. 1. Hash the private key, 32 octets, using SHA-512. Let h denote the resulting digest. Construct the secret scalar s from the first half of the digest, and the corresponding public key A, as described in the previous section. Let prefix denote the second half of the hash digest, h[32],...,h[63]. 2. Compute SHA-512(dom2(F, C) || prefix || PH(M)), where M is the message to be signed. Interpret the 64-octet digest as a little- endian integer r. 3. Compute the point [r]B. For efficiency, do this by first reducing r modulo L, the group order of B. Let the string R be the encoding of this point. 4. Compute SHA512(dom2(F, C) || R || A || PH(M)), and interpret the 64-octet digest as a little-endian integer k. 5. Compute S = (r + k * s) mod L. For efficiency, again reduce k modulo L first. 6. Form the signature of the concatenation of R (32 octets) and the little-endian encoding of S (32 octets; the three most significant bits of the final octet are always zero). I have some Python code from the appendix from this same web page (https://www.rfc-editor.org/rfc/rfc8032#section-6): ## First, some preliminaries that will be needed. import hashlib def sha512(s): return hashlib.sha512(s).digest() # Base field Z_p p = 2**255 - 19 def modp_inv(x): return pow(x, p-2, p) # Curve constant d = -121665 * modp_inv(121666) % p # Group order q = 2**252 + 27742317777372353535851937790883648493 def sha512_modq(s): return int.from_bytes(sha512(s), "little") % q ## Then follows functions to perform point operations. # Points are represented as tuples (X, Y, Z, T) of extended # coordinates, with x = X/Z, y = Y/Z, x*y = T/Z def point_add(P, Q): A, B = (P[1]-P[0]) * (Q[1]-Q[0]) % p, (P[1]+P[0]) * (Q[1]+Q[0]) % p; C, D = 2 * P[3] * Q[3] * d % p, 2 * P[2] * Q[2] % p; E, F, G, H = B-A, D-C, D+C, B+A; return (E*F, G*H, F*G, E*H); # Computes Q = s * Q def point_mul(s, P): Q = (0, 1, 1, 0) # Neutral element while s > 0: if s & 1: Q = point_add(Q, P) P = point_add(P, P) s >>= 1 return Q def point_equal(P, Q): # x1 / z1 == x2 / z2 <==> x1 * z2 == x2 * z1 if (P[0] * Q[2] - Q[0] * P[2]) % p != 0: return False if (P[1] * Q[2] - Q[1] * P[2]) % p != 0: return False return True ## Now follows functions for point compression. # Square root of -1 modp_sqrt_m1 = pow(2, (p-1) // 4, p) # Compute corresponding x-coordinate, with low bit corresponding to # sign, or return None on failure def recover_x(y, sign): if y >= p: return None x2 = (y*y-1) * modp_inv(d*y*y+1) if x2 == 0: if sign: return None else: return 0 # Compute square root of x2 x = pow(x2, (p+3) // 8, p) if (x*x - x2) % p != 0: x = x * modp_sqrt_m1 % p if (x*x - x2) % p != 0: return None if (x & 1) != sign: x = p - x return x # Base point g_y = 4 * modp_inv(5) % p g_x = recover_x(g_y, 0) G = (g_x, g_y, 1, g_x * g_y % p) def point_compress(P): zinv = modp_inv(P[2]) x = P[0] * zinv % p y = P[1] * zinv % p return int.to_bytes(y | ((x & 1) << 255), 32, "little") def point_decompress(s): if len(s) != 32: raise Exception("Invalid input length for decompression") y = int.from_bytes(s, "little") sign = y >> 255 y &= (1 << 255) - 1 x = recover_x(y, sign) if x is None: return None else: return (x, y, 1, x*y % p) ## These are functions for manipulating the private key. def secret_expand(secret): if len(secret) != 32: raise Exception("Bad size of private key") h = sha512(secret) a = int.from_bytes(h[:32], "little") a &= (1 << 254) - 8 a |= (1 << 254) return (a, h[32:]) def secret_to_public(secret): (a, dummy) = secret_expand(secret) return point_compress(point_mul(a, G)) ## The signature function works as below. def sign(secret, msg): a, prefix = secret_expand(secret) A = point_compress(point_mul(a, G)) r = sha512_modq(prefix + msg) R = point_mul(r, G) Rs = point_compress(R) h = sha512_modq(Rs + A + msg) s = (r + h * a) % q return Rs + int.to_bytes(s, 32, "little") ## And finally the verification function. def verify(public, msg, signature): if len(public) != 32: raise Exception("Bad public key length") if len(signature) != 64: Exception("Bad signature length") A = point_decompress(public) if not A: return False Rs = signature[:32] R = point_decompress(Rs) if not R: return False s = int.from_bytes(signature[32:], "little") if s >= q: return False h = sha512_modq(Rs + public + msg) sB = point_mul(s, G) hA = point_mul(h, A) return point_equal(sB, point_add(R, hA)) Now, the problem that I am having is that this code insists on the "secret" consisting of a 32 byte array: if len(secret) != 32: raise Exception("Bad size of private key") However, the secret is described as being the private key provided by eBay's Key Management API (https://developer.ebay.com/api-docs/developer/key-management/overview.html), which is not a 32 byte array, but a 64 character ASCII string (see https://developer.ebay.com/api-docs/developer/key-management/resources/signing_key/methods/createSigningKey#h2-samples): "privateKey": "MC4CAQAwBQYDK2VwBCIEI******************************************n" When I try to generate a signature with the eBay private key using this Python code, it gives me an error saying it is a "Bad size of private key". If I convert the private key from eBay to a bytearray, it is 64 bytes long. How can I use the Python code to generate the signature header using the private key supplied by eBay? To further complicate things, I am actually using Excel VBA (Visual Basic) to make the API call after using Python to generate the signature (simply because Python is better at this kind of thing!). eBay's PAID FOR technical support has confirmed that the following headers are correct and that there is no "message" as described in https://www.rfc-editor.org/rfc/rfc8032#section-5.1.6, but they have not yet been of any further help other than suggesting that there may be a "bug". http.setRequestHeader "signature-input", "sig1=(""x-ebay-signature-key"" ""@method"" ""@path"" ""@authority"");created=1667386210" http.setRequestHeader "x-ebay-signature-key", "<jwe returned by eBay>" http.setRequestHeader "x-ebay-enforce-signature", "true" The remaining header would be as follows once I can generate a valid signature: http.setRequestHeader "signature" "sig1=:<signature>:" Everything I have tried results in the same response: { "errors": [ { "errorId": 215122, "domain": "ACCESS", "category": "REQUEST", "message": "Signature validation failed", "longMessage": "Signature validation failed to fulfill the request." } ] } Here are some example keys like the ones generated by eBay. https://www.ietf.org/archive/id/draft-ietf-httpbis-message-signatures-11.html#appendix-B.1.4 "The following key is an elliptical curve key over the Edwards curve ed25519, referred to in this document as test-key-ed25519. This key is PCKS#8 encoded in PEM format, with no encryption." -----BEGIN PUBLIC KEY----- MCowBQYDK2VwAyEAJrQLj5P/89iXES9+vFgrIy29clF9CC/oPPsw3c5D0bs= -----END PUBLIC KEY----- -----BEGIN PRIVATE KEY----- MC4CAQAwBQYDK2VwBCIEIJ+DYvh6SEqVTm50DFtMDoQikTmiCqirVv9mWG9qfSnF -----END PRIVATE KEY----- This is the format of private key that I believe that I need to convert to a 32-byte array to work with the above Python code. I believe that there is a typo on the linked to web page and it should be "PKCS", not "PCKS". UPDATE: If I run the following command: openssl ec -in test.pem -text Where test.pem is a text file containing: -----BEGIN PRIVATE KEY----- MC4CAQAwBQYDK2VwBCIEIJ+DYvh6SEqVTm50DFtMDoQikTmiCqirVv9mWG9qfSnF -----END PRIVATE KEY----- It displays private and public keys as 32 byte hex dumps, but even when using these values I get the same response as above with the 215122 error. When I verify using the Python "verify" method in the code above with these 32 byte hex dump keys, validation is successful.
[ "Alright so this is where Im at right now, not using the content-digest as it's simply a GET request so just trying to get the basics working, but none of this seems to work.\n $public = \"xxx\";\n $private = \"yyy\";\n $jwe = \"jwe\";\n $path = \"/sell/fulfillment/v1/order/\" . \"11-xxxx-yyyy\";\n $signature_input_txt = '(\"x-ebay-signature-key\" \"@method\" \"@path\" \"@authority\");created=' . time();\n\n // $signature_base = '\"content-digest\": sha-256=:' . base64_encode($contentDigest) . \":\\n\";\n $signature_base = '\"x-ebay-signature-key\": ' . $jwe;\n $signature_base .= '\"@method\": POST';\n $signature_base .= '\"@path\": ' . $path;\n $signature_base .= '\"@authority\": ' . \"apiz.ebay.com\";\n $signature_base .= '\"@signature-params\": ' . $signature_input_txt;\n\n // ensure signature_base is UTF-8\n if (!mb_check_encoding($signature_base, 'UTF-8')) {\n $signature_base = mb_convert_encoding($signature_base, 'UTF-8');\n }\n\n\n // dd($signature_base);\n // base 64 encode our signature_base\n $signature_base_base64_encoded = base64_encode($signature_base);\n\n // format the private key as required\n $formatted_private_key = \"-----BEGIN RSA PRIVATE KEY-----\" . PHP_EOL . $private . PHP_EOL . \"-----END RSA PRIVATE KEY-----\";\n\n // sign\n openssl_sign($signature_base_base64_encoded, $signed_signature, $formatted_private_key, \"sha256WithRSAEncryption\");\n\n return [\n 'Authorization' => 'Bearer ' . $this->marketplace->getToken('oauth2.access_token', 'production'),\n 'Accept' => 'application/json',\n 'Content-Type' => 'application/json',\n 'Signature-Input' => 'sig1=' . $signature_input_txt,\n 'Signature' => 'sig1=:' . base64_encode($signed_signature) . ':',\n 'x-ebay-signature-key' => $jwe,\n 'x-ebay-enforce-signature' => true\n ];\n\n", "I'm going to put this here for anyone struggling to get this working with PHP, adapted from Renegade_Mtl answer (you'd missed the need for a new line for each signature_base and it didn't need to be encoded).\n/**\n * @param $method - e.g. POST, GET\n * @param $path - e.g /sell/finances/v1/seller_funds_summary\n * @param $host - e.g. api.ebay.com\n * @param $keyset // public, private and jwt keys generated from https://apiz.ebay.com/developer/key_management/v1/signing_key\n * @param $timestamp - e.g. time()\n * @return array of headers\n */\nprivate function createHeaders(string $method, string $path, string $host, array $tokens, int $time) {\n $signature_input_txt = '(\"x-ebay-signature-key\" \"@method\" \"@path\" \"@authority\");created=' . $time;\n\n // $signature_base = '\"content-digest\": sha-256=:' . base64_encode($contentDigest) . \":\\n\";\n $signature_base = '\"x-ebay-signature-key\": ' . $tokens['jwe'].\"\\n\";\n $signature_base .= '\"@method\": ' . $method.\"\\n\";\n $signature_base .= '\"@path\": ' . $path.\"\\n\";\n $signature_base .= '\"@authority\": ' . $host.\"\\n\";\n $signature_base .= '\"@signature-params\": ' . $signature_input_txt;\n \n // format the private key as required\n $formatted_private_key = \"-----BEGIN RSA PRIVATE KEY-----\" . PHP_EOL . $tokens['privateKey'] . PHP_EOL . \"-----END RSA PRIVATE KEY-----\";\n\n openssl_sign($signature_base, $signed_signature, $formatted_private_key, \"sha256WithRSAEncryption\");\n return [\n 'Signature-Input' => 'sig1=' . $signature_input_txt,\n 'Signature' => 'sig1=:' . base64_encode($signed_signature) . ':',\n 'x-ebay-signature-key' => $tokens['jwe'],\n 'x-ebay-enforce-signature' => \"true\"\n ];\n}\n\nWe only use GET's but if you also POST then you'd need also the content digest... Hope this helps someone from wasting hours and hours trying to figure it out.\n" ]
[ 0, 0 ]
[]
[]
[ "digital_signature", "ebay_api", "python", "python_3.x", "rest" ]
stackoverflow_0074234508_digital_signature_ebay_api_python_python_3.x_rest.txt
Q: Python: How to find the second highest number in a list? def second_highest(list): """ (list of int) -> int How do you find the second highest value from the list of integers without using remove, pop, or sort (which I tried) since I need to use the same list later on? There will be no duplication of numbers. list.sort() return list[-2] I tried removing the highest number using max, sorting the list, but since those mutates the list, I can't use them. A: Use the builtin sorted oy mylist, which will not modify mylist(thanks to @Tofystedeth) mylist = [1, 2, 8, 3, 12] print(sorted(mylist, reverse=True)[1]) A: data = [1,2,8,3,12] largest = None second_largest = None for a in data: if not largest or a > largest: if largest: second_largest = largest largest = a print("largest: {}".format(largest)) print("second_largest: {}".format(second_largest)) A: arr = [2, 3, 4, 2, 4, -3, 43, -4, -25, 45, 9] my_list = list(set(arr)) my_list.sort() if len(my_list) == 1: print(my_list[0]) elif len(my_list) >= 2: print(my_list[-2]) A: nums=[1,2,3,3,5,4,5,5,2] #making new set to maintain uniqueness new_list= set(nums) high= max(nums) new_list.remove(high) print(max(new_list)) #This code uses remove but does not do any changes to the given list print(nums) A: ##This will work even if numbers are negative. Logic is : convert the list() to set() and back to list to use the sort() method and then print the -2 position value in the list. This gives you the second-highest value.## "Python 3" if __name__ == '__main__': n = int(input()) arr = list(map(int, input().split())) z = list(set(arr)) z.sort() print(z[-2]) A: array = [2, 3, 4, 2, 4, -3, 43, -4, -25, 45, 9] arr = array.copy() z = max(arr) # if list contains duplicate max elements removing them until We get Second highest while max(arr) == z: arr.remove(max(arr)) print(max(arr)) print(array) A: Here is the code to find the 2nd largest number in the list without using any inbuilt functions. data = [11,22,1,2,5,67,21,32] max1 = data[0] # largest num max2 = data[1] # second largest num for num in data: if num > max1: max2 = max1 # Now this number would be second largest max1 = num # This num is largest number in list now. # Check with second largest elif num > max2: max2 = num # Now this would be second largest. print(max2) A: I think my answer is more simple and more readable for beginners x=[2,3,4,2,5,9,8,4,5,6,7,8,9,2,1,3,4,5] max=-10000000 for i in x: if(i>max): secondmax=max max=i elif(i>secondmax and i!=max): secondmax=i print(secondmax) A: Here is a way to do it using sort. However, it gives you the possibility to reuse the list since you are temporarily storing the list arr in sortedArr. Calling arr by itself would return the original list. Here you can Try it online! # Finding a runner up number in a List of Arrays # Second highest number in a list arr = [2,3,6,6,5] sortedArr = sorted(arr,reverse=True) # Sorting the array in descending order. highest = sortedArr[0] # Assign the highest value in the array to the variable `highest`. secondHighest = 0 # Initializing the variable `secondHighest` to 0. for x in (sortedArr): # Iterating through the sorted array and checking if the value is equal to the highest value. if(x == highest): continue # If it is, it will continue to the next value. else: secondHighest = x # If it is not, it will assign the value to the variable `secondHighest` break # break out of the loop. print(secondHighest) # Printing the value of the variable `secondHighest`. >>> 5
Python: How to find the second highest number in a list?
def second_highest(list): """ (list of int) -> int How do you find the second highest value from the list of integers without using remove, pop, or sort (which I tried) since I need to use the same list later on? There will be no duplication of numbers. list.sort() return list[-2] I tried removing the highest number using max, sorting the list, but since those mutates the list, I can't use them.
[ "Use the builtin sorted oy mylist, which will not modify mylist(thanks to @Tofystedeth) \nmylist = [1, 2, 8, 3, 12]\nprint(sorted(mylist, reverse=True)[1])\n\n", "data = [1,2,8,3,12]\n\nlargest = None\nsecond_largest = None\n\nfor a in data:\n if not largest or a > largest:\n if largest:\n second_largest = largest\n largest = a\n\nprint(\"largest: {}\".format(largest))\nprint(\"second_largest: {}\".format(second_largest))\n\n", " arr = [2, 3, 4, 2, 4, -3, 43, -4, -25, 45, 9]\n my_list = list(set(arr))\n my_list.sort()\n if len(my_list) == 1:\n print(my_list[0])\n elif len(my_list) >= 2:\n print(my_list[-2])\n\n", "nums=[1,2,3,3,5,4,5,5,2]\n\n#making new set to maintain uniqueness\nnew_list= set(nums)\n\nhigh= max(nums)\nnew_list.remove(high)\nprint(max(new_list))\n\n#This code uses remove but does not do any changes to the given list\nprint(nums)\n\n", "##This will work even if numbers are negative. Logic is : convert the list() to set() and back to list to use the sort() method and then print the -2 position value in the list. This gives you the second-highest value.## \"Python 3\"\nif __name__ == '__main__':\n n = int(input())\n arr = list(map(int, input().split()))\n z = list(set(arr))\n z.sort()\n print(z[-2])\n\n", "array = [2, 3, 4, 2, 4, -3, 43, -4, -25, 45, 9]\n\narr = array.copy()\n\nz = max(arr)\n\n# if list contains duplicate max elements removing them until We get Second highest\n\nwhile max(arr) == z:\n arr.remove(max(arr))\n\nprint(max(arr))\nprint(array)\n\n", "Here is the code to find the 2nd largest number in the list without using any inbuilt functions.\ndata = [11,22,1,2,5,67,21,32]\n\nmax1 = data[0] # largest num\nmax2 = data[1] # second largest num\n\n\nfor num in data:\n if num > max1:\n max2 = max1 # Now this number would be second largest\n max1 = num # This num is largest number in list now.\n \n # Check with second largest\n elif num > max2:\n max2 = num # Now this would be second largest.\n\nprint(max2)\n\n", "I think my answer is more simple and more readable for beginners\nx=[2,3,4,2,5,9,8,4,5,6,7,8,9,2,1,3,4,5]\n\nmax=-10000000\nfor i in x:\n if(i>max):\n secondmax=max\n max=i\n elif(i>secondmax and i!=max):\n secondmax=i\n \n \nprint(secondmax) \n \n\n", "Here is a way to do it using sort. However, it gives you the possibility to reuse the list since you are temporarily storing the list arr in sortedArr. Calling arr by itself would return the original list. Here you can Try it online!\n# Finding a runner up number in a List of Arrays\n# Second highest number in a list\n\narr = [2,3,6,6,5]\nsortedArr = sorted(arr,reverse=True) # Sorting the array in descending order.\nhighest = sortedArr[0] # Assign the highest value in the array to the variable `highest`.\nsecondHighest = 0 # Initializing the variable `secondHighest` to 0.\n\n\nfor x in (sortedArr): # Iterating through the sorted array and checking if the value is equal to the highest value. \n if(x == highest):\n continue # If it is, it will continue to the next value. \n else:\n secondHighest = x # If it is not, it will assign the value to the variable `secondHighest` \n break # break out of the loop.\n\nprint(secondHighest) # Printing the value of the variable `secondHighest`.\n\n>>> 5\n\n" ]
[ 11, 4, 1, 1, 1, 0, 0, 0, 0 ]
[ "\nCopy unique list elements to another list (if Already the list elements are unique, go to step 2) .\nYou should find the maximum in the list and save its index. Then remove it from the list using the remove() function and then find the maximum of the new list (with the original maximum value removed) and that will be your second highest element. You can then use the insert() method to add back the original maximum back into the list.\n\n", "Here is the code to find the maximum and second maximum number in a list or array(python). Tried with all the combination of elements(negative and positive), it is working fine. We can optimize this code as i am not using any inbuilt method.\nBelow is the list combinations this code is tested with:\nla = [1,1,1,1,1],la = [1,1,1,1,1,0],la = [5,4,1,2,3],la = [3,6,2,7],la = [1,2,3,4,5],la = [4,5,1,2,3],la = [5,5,4,3,2,1],la = [-1,1,0,0,0],la = [-1,-2,-3,-4], la = [-1,0],la = [-2,-2,-2,-2,0,1,1,1,1]\ndef findSecmax(la):\n mx = la[0]\n sec = 0 # or sec = min(la) or we can take (- sys.maxsize)\n for i in la:\n if i < sec:\n sec = i\n\n for i in la:\n if i > mx:\n mx = i\n\n for i in la:\n if i > sec and mx > i:\n sec = i\n\n if sec == mx:\n return \"we have same elements in the list. So max is {}\".format(mx)\n else:\n return mx,sec\n\n\nprint(findSecmax(la))\n\n" ]
[ -2, -2 ]
[ "python" ]
stackoverflow_0033486058_python.txt
Q: How I get data from a Input to run a function in Django I'm following a django tutorial and I have troubles getting the data from a input in my HTML. This is the code from the tutorial: views.py def buscar(request): if request.GET["prd"]: producto = request.GET["prd"] articulo = Escucha.objects.filter(user__icontains=producto) return render(request, "Producto/resultados_busqueda.html", {"articulo": articulo, "query": producto}) else: mensaje = "Nos has introducido nada" return HttpResponse(mensaje) HTML: <html lang="en"> <head> <title>Busqueda de producto</title> </head> <body> <form action="/buscar/" method="get"> <input type="text" name="prd"> <input type="submit" value="Buscar"> </form> </body> </html> And this is the code I try to run: views.py def suma(request): if request.get["first"]: first = request.GET["Primer"] second = request.GET["Segundo"] variable = first + second return render(request, "resultado.html", {"variable": variable}) else: mensaje = "lo siento" return HttpResponse(mensaje) HTML (pruebas.HTML) <html lang="en"> <head> <title>Document</title> </head> <body> <form action="/pruebas/" method="get"> <input type="text" name="first"> <input type="text" name="second"> <input type="submit" value="suma"> <p>Resultado: {{ variable }}</p> </form> </body> </html> And the problem I get is: AttributeError at /pruebas/ 'WSGIRequest' object has no attribute 'get' Y really don't know what's the problem, for me the two codes are similar. A: And the problem I get is: AttributeError at /pruebas/ 'WSGIRequest' object has no attribute 'get' Y really don't know what's the problem, for me the two codes are similar. No, the codes are not similar, if you look at it correctly, it should be request.GET["first"] not request.get["first"]. And also in Html you have named the inputs as first and second so the view should be: def suma(request): if request.GET["first"]: first = request.GET["first"] second = request.GET["second"] variable = first + second return render(request, "resultado.html", {"variable": variable}) else: mensaje = "lo siento" return HttpResponse(mensaje)
How I get data from a Input to run a function in Django
I'm following a django tutorial and I have troubles getting the data from a input in my HTML. This is the code from the tutorial: views.py def buscar(request): if request.GET["prd"]: producto = request.GET["prd"] articulo = Escucha.objects.filter(user__icontains=producto) return render(request, "Producto/resultados_busqueda.html", {"articulo": articulo, "query": producto}) else: mensaje = "Nos has introducido nada" return HttpResponse(mensaje) HTML: <html lang="en"> <head> <title>Busqueda de producto</title> </head> <body> <form action="/buscar/" method="get"> <input type="text" name="prd"> <input type="submit" value="Buscar"> </form> </body> </html> And this is the code I try to run: views.py def suma(request): if request.get["first"]: first = request.GET["Primer"] second = request.GET["Segundo"] variable = first + second return render(request, "resultado.html", {"variable": variable}) else: mensaje = "lo siento" return HttpResponse(mensaje) HTML (pruebas.HTML) <html lang="en"> <head> <title>Document</title> </head> <body> <form action="/pruebas/" method="get"> <input type="text" name="first"> <input type="text" name="second"> <input type="submit" value="suma"> <p>Resultado: {{ variable }}</p> </form> </body> </html> And the problem I get is: AttributeError at /pruebas/ 'WSGIRequest' object has no attribute 'get' Y really don't know what's the problem, for me the two codes are similar.
[ "\nAnd the problem I get is: AttributeError at /pruebas/ 'WSGIRequest' object has no attribute 'get'\n\n\nY really don't know what's the problem, for me the two codes are similar.\n\nNo, the codes are not similar, if you look at it correctly, it should be request.GET[\"first\"] not request.get[\"first\"].\nAnd also in Html you have named the inputs as first and second so the view should be:\ndef suma(request):\n\n if request.GET[\"first\"]:\n first = request.GET[\"first\"]\n second = request.GET[\"second\"]\n\n variable = first + second\n return render(request, \"resultado.html\", {\"variable\": variable})\n else:\n mensaje = \"lo siento\"\n return HttpResponse(mensaje)\n\n" ]
[ 0 ]
[]
[]
[ "django", "django_templates", "django_views", "python" ]
stackoverflow_0074621441_django_django_templates_django_views_python.txt
Q: Django SMTPAuthenticationError I am new in django and developing a web application using django. I have successfully set the Signup functionality using Userena in my web application and can Register as a user with Verification Email. I can show you my SMTP settings in my settings.py file EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_USE_TLS = True EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_HOST_USER = 'raihncse@gmail.com' DEFAULT_FROM_EMAIL = 'raihncse@gmail.com' SERVER_EMAIL = 'raihncse@gmail.com' EMAIL_HOST_PASSWORD = '**************' everything was fine before i have changed my EMAIL_HOST_PASSWORD Infact, for some reason I have to change the previous password of that SERVER_EMAIL(raihncse@gmail.com). I have already edit the EMAIL_HOST_PASSWORD according to the new SERVER_EMAIL password. but now, if I want to register as a new user, I am facing the following error SMTPAuthenticationError at /accounts/signup/ (534, '5.7.14 <https://accounts.google.com/ContinueSignIn?sarp=1&scc=1&plt=AKgnsbvNq\n5.7.14 S3l1pFXENupDa_SdPphNHrnzeLPUOyf6O0l1s31w7h_UARx11P89AxPeeZ6xBi2KeQRjsw\n5.7.14 nvpxZhPVv771W9ljEDyeWnqpqv3_nakuPo36BEl3IlYj9qVujNB5cm0TYFf9LQAxRjFdda\n5.7.14 xh-y5spA9zIQONDsvRRgN3e0DXoIkgxTO3Mu75IaACi-XlvFtFfPBiQ81gUrBZ_PhZsBmh\n5.7.14 ajsSf-flNEmoSydyOTNdmwdB0__8> Please log in via your web browser and\n5.7.14 then try again.\n5.7.14 Learn more at\n5.7.14 https://support.google.com/mail/bin/answer.py?answer=78754 40sm12125121qgi.47 - gsmtp') A: A relatively recent change in Google's authentication system means you're going to have to "allow less secure app access" to your Google account, in order for this to work. In your error, you are recommended to visit this link: https://support.google.com/mail/answer/78754 On that page: Step #2 asks you to try Displaying an Unlock Captcha Step #3 explains how to allow less secure app access. In summary: Go to Allow less secure apps and choose "Allow" to let less secure apps access your Google account. We don't recommend this option because it may make it easier for someone to gain access to your account. A: Recent security protocol of Google will not allow you to send messages through less secure apps. So the fix is: Turn ON less secure apps. But only enabling this doesn't seem to work now. Google does not allow apps to send messages using your default gmail password. So to send messages you have to follow these two simple steps: Turn ON two factor authentication of your gmail account. Link to turn it on. Then create app password for your gmail account from Google. It will be a 16 digit code. Put it in settings.py or _credentials.py by: EMAIL_HOST_PASSWORD = 'sixt eend igit code' Link to know how to set up your application password for email And BINGO! Problem will be solved. Note: App password can only be created if you have enabled 2 factor authentication for your account. A: For me, I did turn on the less secure app option even then I was getting SMTP error.This is how I solved it. Go to https://accounts.google.com/DisplayUnlockCaptcha and allow access to your Google account by clicking on continue and then try resetting password again in Django. A: This works for me: Turn ON two factors authentication of your Gmail account. Then create an app password for your Gmail account from Google. It will be a 16 digit code. Put it in settings.py or env.py by EMAIL_HOST_PASSWORD = 'sixteen-digit code you get from your Gmail' and remember to add them in Django config vars also A: You need turn on support for less secure devices: https://www.google.com/settings/security/lesssecureapps A: If you are integrating with AWS, my suggestion is to use SES(simple Email Service). You can add and verify your email address in SES which wouldn't raise SMTPAuthenticationError. I have followed this - https://medium.com/hackernoon/the-easiest-way-to-send-emails-with-django-using-ses-from-aws-62f3d3d33efd. A: This worked for me: I first created email_info.py in the same folder as settings.py: from .email_info import * EMAIL_USE_TLS = True EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = 'youremail@gmail.com' EMAIL_HOST_PASSWORD = 'your_pass' EMAIL_PORT = 587 Then in settings.py I added: from .email_info import * EMAIL_USE_TLS = EMAIL_USE_TLS EMAIL_HOST = EMAIL_HOST EMAIL_HOST_USER = EMAIL_HOST_USER EMAIL_HOST_PASSWORD = EMAIL_HOST_PASSWORD EMAIL_PORT = EMAIL_PORT And finally in views.py: from django.conf import settings from django.core.mail import send_mail send_mail(subject, message, from_email, to_list, fail_silently=Tre) Here's a YouTube video that explains this process: https://www.youtube.com/watch?v=51mmqf5a0Ss A: You can also generate a dedicated email password for Django application. 1- Please check this link https://support.google.com/accounts/answer/185839?co=GENIE.Platform%3DDesktop&hl=en 2- Open the 2 Step verification setting from GOOGLE ACCOUNT--> MANAGE YOUR ACCOUNT --> SECURITY ---> 2 STEP VERIFICATION 3- Third, create a dedicated pass for Django, it will be 16 characters. 4- You can find the django email settings below EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = 'your_account@gmail.com' EMAIL_HOST_PASSWORD = '16characters password' EMAIL_PORT = 587 EMAIL_USE_TLS = True A: Make sure the password in EMAIL_HOST_PASSWORD is the password that you can use to login to the gmail account in EMAIL_HOST_USER. To confirm that, go to gmail.com and login to the gmail account in EMAIL_HOST_USER using the password in EMAIL_HOST_PASSWORD. If it works in one go without any form of 2-step authentication, then you're set. Otherwise, allow less secure apps and disable display unlock captcha using the tutorials found in other answers. For me, doing both and most importantly, using the correct password worked for me. A: How to get emails from Django or any library: Notes Note: If your account is using two step authentication it won't work. First open an Incognito window with the account you want to send emails with. Open Less secure apps Then enable it After that navigate to Unlock Captcha Then enable it After that fill the account email and password in the library which you are using In Django: EMAIL_HOST_USER is the email address. Ex: email@example.com EMAIL_HOST_PASSWORD is the email password. Ex: smtpmessages.manager@2012 Run the program First it will not work, To make it work open Gmail on the incognito window. Then there will be an email from google saying that an app needs access to your google account Open the email and click the link and select yes it was me Then retry and it should work Thank You! For more info visit: Check Gmail through other email platforms A: Turn ON two factors authentication of your Gmail account. Generate App Password(You will get this option after Two factor authentication) >> select app >> select device (In my case App is Gmail and device is Windows Machine) Copy the 16 digit password Paste this password at EMAIL_HOST_PASSWORD = "(Paste password here)" A: This is not exactly an answer to your question. But considering privacy and security of your google account, use Environment Variables in Advanced system settings. And access using "os.environ.get('Host_Email') and ('Host_Pass)" instead of putting your email and password in your code. https://www.youtube.com/watch?v=IolxqkL7cD8 A: You need to use an app password to allow your app to access your google account. *You can see my answer explaining how to generate an app password. Sign in with App Passwords: An App Password is a 16-digit passcode that gives a less secure app or device permission to access your Google Account. App Passwords can only be used with accounts that have 2-Step Verification turned on. So, your code with an app password of 16-digit passcode is something like below: EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_USE_TLS = True EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_HOST_USER = 'raihncse@gmail.com' DEFAULT_FROM_EMAIL = 'raihncse@gmail.com' SERVER_EMAIL = 'raihncse@gmail.com' EMAIL_HOST_PASSWORD = 'xylnudjdiwpojwzm' # Here In addition, google hasn't allowed your app to access your google account with username(email address) and password since May 30, 2022. So now, you need username(email address) and an app password to access your google account. Less secure apps & your Google Account: To help keep your account secure, from May 30, 2022, ​​Google no longer supports the use of third-party apps or devices which ask you to sign in to your Google Account using only your username and password.
Django SMTPAuthenticationError
I am new in django and developing a web application using django. I have successfully set the Signup functionality using Userena in my web application and can Register as a user with Verification Email. I can show you my SMTP settings in my settings.py file EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_USE_TLS = True EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_HOST_USER = 'raihncse@gmail.com' DEFAULT_FROM_EMAIL = 'raihncse@gmail.com' SERVER_EMAIL = 'raihncse@gmail.com' EMAIL_HOST_PASSWORD = '**************' everything was fine before i have changed my EMAIL_HOST_PASSWORD Infact, for some reason I have to change the previous password of that SERVER_EMAIL(raihncse@gmail.com). I have already edit the EMAIL_HOST_PASSWORD according to the new SERVER_EMAIL password. but now, if I want to register as a new user, I am facing the following error SMTPAuthenticationError at /accounts/signup/ (534, '5.7.14 <https://accounts.google.com/ContinueSignIn?sarp=1&scc=1&plt=AKgnsbvNq\n5.7.14 S3l1pFXENupDa_SdPphNHrnzeLPUOyf6O0l1s31w7h_UARx11P89AxPeeZ6xBi2KeQRjsw\n5.7.14 nvpxZhPVv771W9ljEDyeWnqpqv3_nakuPo36BEl3IlYj9qVujNB5cm0TYFf9LQAxRjFdda\n5.7.14 xh-y5spA9zIQONDsvRRgN3e0DXoIkgxTO3Mu75IaACi-XlvFtFfPBiQ81gUrBZ_PhZsBmh\n5.7.14 ajsSf-flNEmoSydyOTNdmwdB0__8> Please log in via your web browser and\n5.7.14 then try again.\n5.7.14 Learn more at\n5.7.14 https://support.google.com/mail/bin/answer.py?answer=78754 40sm12125121qgi.47 - gsmtp')
[ "A relatively recent change in Google's authentication system means you're going to have to \"allow less secure app access\" to your Google account, in order for this to work.\nIn your error, you are recommended to visit this link: https://support.google.com/mail/answer/78754\nOn that page:\nStep #2 asks you to try Displaying an Unlock Captcha\nStep #3 explains how to allow less secure app access. In summary:\n\nGo to Allow less secure apps and choose \"Allow\" to let less secure apps access your Google account. We don't recommend this option because it may make it easier for someone to gain access to your account.\n\n", "Recent security protocol of Google will not allow you to send messages through less secure apps.\nSo the fix is:\n\nTurn ON less secure apps.\n\nBut only enabling this doesn't seem to work now. Google does not allow apps to send messages using your default gmail password. So to send messages you have to follow these two simple steps:\n\n\nTurn ON two factor authentication of your gmail account. Link to turn it on.\n\nThen create app password for your gmail account from Google. It will be a 16 digit code. Put it in settings.py or _credentials.py by: EMAIL_HOST_PASSWORD = 'sixt eend igit code' Link to know how to set up your application password for email\n\n\n\nAnd BINGO! Problem will be solved.\nNote: App password can only be created if you have enabled 2 factor authentication for your account.\n", "For me, I did turn on the less secure app option even then I was getting SMTP error.This is how I solved it.\nGo to https://accounts.google.com/DisplayUnlockCaptcha and allow access to your Google account by clicking on continue and then try resetting password again in Django.\n", "This works for me:\n\nTurn ON two factors authentication of your Gmail account.\n\nThen create an app password for your Gmail account from Google. It will be a 16 digit code. Put it in settings.py or env.py by EMAIL_HOST_PASSWORD = 'sixteen-digit code you get from your Gmail' and remember to add them in Django config vars also\n", "You need turn on support for less secure devices:\nhttps://www.google.com/settings/security/lesssecureapps\n", "If you are integrating with AWS, my suggestion is to use SES(simple Email Service). You can add and verify your email address in SES which wouldn't raise SMTPAuthenticationError.\nI have followed this - https://medium.com/hackernoon/the-easiest-way-to-send-emails-with-django-using-ses-from-aws-62f3d3d33efd.\n", "This worked for me:\nI first created email_info.py in the same folder as settings.py:\nfrom .email_info import *\n\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_HOST_USER = 'youremail@gmail.com'\nEMAIL_HOST_PASSWORD = 'your_pass'\nEMAIL_PORT = 587\n\nThen in settings.py I added:\nfrom .email_info import *\n\nEMAIL_USE_TLS = EMAIL_USE_TLS\nEMAIL_HOST = EMAIL_HOST\nEMAIL_HOST_USER = EMAIL_HOST_USER\nEMAIL_HOST_PASSWORD = EMAIL_HOST_PASSWORD\nEMAIL_PORT = EMAIL_PORT\n\nAnd finally in views.py:\nfrom django.conf import settings\nfrom django.core.mail import send_mail\n\nsend_mail(subject, message, from_email, to_list, fail_silently=Tre)\n\nHere's a YouTube video that explains this process: https://www.youtube.com/watch?v=51mmqf5a0Ss\n", "You can also generate a dedicated email password for Django application.\n1- Please check this link https://support.google.com/accounts/answer/185839?co=GENIE.Platform%3DDesktop&hl=en\n2- Open the 2 Step verification setting from GOOGLE ACCOUNT--> MANAGE YOUR ACCOUNT --> SECURITY ---> 2 STEP VERIFICATION\n3- Third, create a dedicated pass for Django, it will be 16 characters.\n4- You can find the django email settings below\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_HOST_USER = 'your_account@gmail.com'\nEMAIL_HOST_PASSWORD = '16characters password'\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\n", "Make sure the password in EMAIL_HOST_PASSWORD is the password that you can use to login to the gmail account in EMAIL_HOST_USER. To confirm that, go to gmail.com and login to the gmail account in EMAIL_HOST_USER using the password in EMAIL_HOST_PASSWORD. If it works in one go without any form of 2-step authentication, then you're set. Otherwise, allow less secure apps and disable display unlock captcha using the tutorials found in other answers. For me, doing both and most importantly, using the correct password worked for me.\n", "How to get emails from Django or any library:\nNotes\nNote: If your account is using two step authentication it won't work.\nFirst open an Incognito window with the account you want to send emails with.\nOpen Less secure apps\nThen enable it\nAfter that navigate to Unlock Captcha\nThen enable it\nAfter that fill the account email and password in the library which you are using\nIn Django:\n\nEMAIL_HOST_USER is the email address. Ex: email@example.com\n\nEMAIL_HOST_PASSWORD is the email password. Ex: smtpmessages.manager@2012\n\n\nRun the program\nFirst it will not work, To make it work open Gmail on the incognito window.\nThen there will be an email from google saying that an app needs access to your google account\nOpen the email and click the link and select yes it was me\nThen retry and it should work\nThank You!\nFor more info visit: Check Gmail through other email platforms\n", "\nTurn ON two factors authentication of your Gmail account.\nGenerate App Password(You will get this option after Two factor authentication) >> select app >> select device (In my case App is Gmail and device is Windows Machine)\nCopy the 16 digit password\nPaste this password at EMAIL_HOST_PASSWORD = \"(Paste password here)\"\n\n", "This is not exactly an answer to your question. But considering privacy and security of your google account, use Environment Variables in Advanced system settings. And access using \"os.environ.get('Host_Email') and ('Host_Pass)\" instead of putting your email and password in your code.\nhttps://www.youtube.com/watch?v=IolxqkL7cD8\n", "You need to use an app password to allow your app to access your google account. *You can see my answer explaining how to generate an app password.\nSign in with App Passwords:\n\nAn App Password is a 16-digit passcode that gives a less secure app or\ndevice permission to access your Google Account. App Passwords can\nonly be used with accounts that have 2-Step Verification turned on.\n\nSo, your code with an app password of 16-digit passcode is something like below:\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_PORT = 587\nEMAIL_HOST_USER = 'raihncse@gmail.com'\nDEFAULT_FROM_EMAIL = 'raihncse@gmail.com'\nSERVER_EMAIL = 'raihncse@gmail.com'\nEMAIL_HOST_PASSWORD = 'xylnudjdiwpojwzm' # Here\n\nIn addition, google hasn't allowed your app to access your google account with username(email address) and password since May 30, 2022. So now, you need username(email address) and an app password to access your google account.\nLess secure apps & your Google Account:\n\nTo help keep your account secure, from May 30, 2022, ​​Google no\nlonger supports the use of third-party apps or devices which ask you\nto sign in to your Google Account using only your username and\npassword.\n\n" ]
[ 77, 6, 5, 3, 2, 2, 1, 1, 1, 0, 0, 0, 0 ]
[ "I think you need to turn on google less secure apps. Login to your account and go to less secure apps to change your setting. It is not good but you can try your code.\n", "If you already have allowed access to less secure apps and still having problems go to your account >> login and security >> notifications and activity in your devices, and see if there is a device that tried to access and google denied it, allow that access.\n", "Make sure that you have selected \"Turn On\" option in Allow less secure apps page (https://www.google.com/settings/security/lesssecureapps). \nI suggest you to refresh the page once after selecting \"Turn On\" radio button. The radio button selection should not move \"Turn Off\"\nSome how it is getting \"Turn Off\" automatically.\n" ]
[ -1, -1, -2 ]
[ "authentication", "django", "gmail", "python", "smtp_auth" ]
stackoverflow_0026697565_authentication_django_gmail_python_smtp_auth.txt
Q: Python float mysteriously off by anywhere between 0.1 to 0.3 I'm writing a function to convert a weirdly formatted Degrees Minutes Seconds to Degrees Decimal. My code is: def fromDMS(coordinate): lat_dms = coordinate[0:10] lon_dms = coordinate[11:21] lat_sign = lat_dms[0] lat_deg = float(lat_dms[1:3]) lat_min = float(lat_dms[3:5]) lat_sec = float(lat_dms[5:]) lon_sign = lon_dms[0] lon_deg = float(lon_dms[1:4]) lon_min = float(lat_dms[4:6]) lon_sec = float(lat_dms[6:]) lat_deg = (lat_deg + (lat_min/60) + (lat_sec/(60 * 2))) if lat_sign == "-": lat_deg = lat_deg * -1 lon_deg = (lon_deg + (lon_min/60) + (lon_sec/(60 * 2))) if lon_deg == "-": lon_deg = lon_deg * -1 return lat_deg, lon_deg The format in question is this string -365535.000+1745401.000 where "-365535.000" (-36 degrees, 55 minutes, 35 seconds) is the latitude and "+1745401.000" (174 degrees, 55 minutes, and 1 second) is the longitude. Using an online calculator, these values should result in "-36.926389" and "174.916944", but end up as "37.20833333333333" and "174.92499999999998". I've heard that float's can be a little weird sometimes, but not to this extent. A: According to https://www.fcc.gov/media/radio/dms-decimal, the decimal degrees should be -36.926389 and 174.900278. One problem with this code is that it divides the seconds by 60 * 2 (i.e., 120) instead of 60 ** 2 (i.e., 3600). Making this change causes the latitude to be -36.92638888888889. The second problem is that the assignments to lon_min and lon_sec use lat_dms instead of lon_dms. Making this change causes the longitude to be 174.90027777777777. A: lon_sign = lon_dms[0] lon_deg = float(lon_dms[1:4]) lon_min = float(lat_dms[4:6]) lon_sec = float(lat_dms[6:]) On the third and fourth lines, you're converting from lat_dms instead of lon_dms.
Python float mysteriously off by anywhere between 0.1 to 0.3
I'm writing a function to convert a weirdly formatted Degrees Minutes Seconds to Degrees Decimal. My code is: def fromDMS(coordinate): lat_dms = coordinate[0:10] lon_dms = coordinate[11:21] lat_sign = lat_dms[0] lat_deg = float(lat_dms[1:3]) lat_min = float(lat_dms[3:5]) lat_sec = float(lat_dms[5:]) lon_sign = lon_dms[0] lon_deg = float(lon_dms[1:4]) lon_min = float(lat_dms[4:6]) lon_sec = float(lat_dms[6:]) lat_deg = (lat_deg + (lat_min/60) + (lat_sec/(60 * 2))) if lat_sign == "-": lat_deg = lat_deg * -1 lon_deg = (lon_deg + (lon_min/60) + (lon_sec/(60 * 2))) if lon_deg == "-": lon_deg = lon_deg * -1 return lat_deg, lon_deg The format in question is this string -365535.000+1745401.000 where "-365535.000" (-36 degrees, 55 minutes, 35 seconds) is the latitude and "+1745401.000" (174 degrees, 55 minutes, and 1 second) is the longitude. Using an online calculator, these values should result in "-36.926389" and "174.916944", but end up as "37.20833333333333" and "174.92499999999998". I've heard that float's can be a little weird sometimes, but not to this extent.
[ "According to https://www.fcc.gov/media/radio/dms-decimal, the decimal degrees should be -36.926389 and 174.900278.\nOne problem with this code is that it divides the seconds by 60 * 2 (i.e., 120) instead of 60 ** 2 (i.e., 3600). Making this change causes the latitude to be -36.92638888888889.\nThe second problem is that the assignments to lon_min and lon_sec use lat_dms instead of lon_dms. Making this change causes the longitude to be 174.90027777777777.\n", "lon_sign = lon_dms[0]\nlon_deg = float(lon_dms[1:4])\nlon_min = float(lat_dms[4:6])\nlon_sec = float(lat_dms[6:])\n\nOn the third and fourth lines, you're converting from lat_dms instead of lon_dms.\n" ]
[ 3, 0 ]
[]
[]
[ "floating_point", "python", "python_3.x" ]
stackoverflow_0074622426_floating_point_python_python_3.x.txt
Q: Modify bound variables of a closure in Python Is there any way to modify the bound value of one of the variables inside a closure? Look at the example to understand it better. def foo(): var_a = 2 var_b = 3 def _closure(x): return var_a + var_b + x return _closure localClosure = foo() # Local closure is now "return 2 + 3 + x" a = localClosure(1) # 2 + 3 + 1 == 6 # DO SOME MAGIC HERE TO TURN "var_a" of the closure into 0 # ...but what magic? Is this even possible? # Local closure is now "return 0 + 3 + x" b = localClosure(1) # 0 + 3 +1 == 4 A: It is quite possible in python 3 thanks to the magic of nonlocal. def foo(): var_a = 2 var_b = 3 def _closure(x, magic = None): nonlocal var_a if magic is not None: var_a = magic return var_a + var_b + x return _closure localClosure = foo() # Local closure is now "return 2 + 3 + x" a = localClosure(1) # 2 + 3 + 1 == 6 print(a) # DO SOME MAGIC HERE TO TURN "var_a" of the closure into 0 localClosure(0, 0) # Local closure is now "return 0 + 3 + x" b = localClosure(1) # 0 + 3 +1 == 4 print(b) A: I don't think there is any way to do that in Python. When the closure is defined, the current state of variables in the enclosing scope is captured and no longer has a directly referenceable name (from outside the closure). If you were to call foo() again, the new closure would have a different set of variables from the enclosing scope. In your simple example, you might be better off using a class: class foo: def __init__(self): self.var_a = 2 self.var_b = 3 def __call__(self, x): return self.var_a + self.var_b + x localClosure = foo() # Local closure is now "return 2 + 3 + x" a = localClosure(1) # 2 + 3 + 1 == 6 # DO SOME MAGIC HERE TO TURN "var_a" of the closure into 0 # ...but what magic? Is this even possible? localClosure.var_a = 0 # Local closure is now "return 0 + 3 + x" b = localClosure(1) # 0 + 3 +1 == 4 If you do use this technique I would no longer use the name localClosure because it is no longer actually a closure. However, it works the same as one. A: I've found an alternate answer answer to Greg's, slightly less verbose because it uses Python 2.1's custom function attributes (which conveniently enough can be accessed from inside their own function). def foo(): var_b = 3 def _closure(x): return _closure.var_a + var_b + x _closure.func_dict['var_a'] = 2 return _closure localClosure = foo() # Local closure is now "return 2 + 3 + x" a = localClosure(1) # 2 + 3 + 1 == 6 # DO SOME MAGIC HERE TO TURN "var_a" of the closure into 0 # ...but what magic? Is this even possible? # apparently, it is localClosure.var_a = 0 # Local closure is now "return 0 + 3 + x" b = localClosure(1) # 0 + 3 +1 == 4 Thought I'd post it for completeness. Cheers anyways. A: We've done the following. I think it's simpler than other solutions here. class State: pass def foo(): st = State() st.var_a = 2 st.var_b = 3 def _closure(x): return st.var_a + st.var_b + x def _set_a(a): st.var_a = a return _closure, _set_a localClosure, localSetA = foo() # Local closure is now "return 2 + 3 + x" a = localClosure(1) # 2 + 3 + 1 == 6 # DO SOME MAGIC HERE TO TURN "var_a" of the closure into 0 localSetA(0) # Local closure is now "return 0 + 3 + x" b = localClosure(1) # 0 + 3 +1 == 4 print a, b A: I worked around a similar limitation by using one-item lists instead of a plain variable. It's ugly but it works because modifying a list item doesn't get treated as a binding operation by the interpreter. For example: def my_function() max_value = [0] def callback (data) if (data.val > max_value[0]): max_value[0] = data.val # more code here # . . . results = some_function (callback) store_max (max_value[0]) A: slightly different from what was asked, but you could do: def f(): a = 1 b = 2 def g(x, a=a, b=b): return a + b + x return g h = f() print(h(0)) print(h(0,2,3)) print(h(0)) and make the closure the default, to be overridden when needed. A: Maybe there's a further approach (even if it seems to be some years too late for my proposal :-) def foo(): def _closure(x): return _closure.var_a + _closure.var_b + x _closure.var_a = 2 _closure.var_b = 3 return _closure localClosure = foo() # Local closure is now "return 2 + 3 + x" a = localClosure(1) # 2 + 3 + 1 == 6 print(a) # DO SOME MAGIC HERE TO TURN "var_a" of the closure into 0 # ...but what magic? Is this even possible? localClosure.var_a = 0 # Local closure is now "return 0 + 3 + x" b = localClosure(1) # 0 + 3 +1 == 4 print(b) From my point of view the class solution proposed is easier to read. But if you try to modiy a free variable inside a decorator this solution might come in handy: In comparison to a class based solution it's easier to work with functools.wraps to preserve the meta data of the decorated function. A: Question Is there any way to modify the bound value of one of the variables inside a closure? TLDR Yes, this is possible starting in Python 3.7.0 alpha 1: localClosure.__closure__[0].cell_contents = 0 Details In Python, a closure remembers the variables from the scope in which it was defined by using a special __closure__ attribute. The __closure__ attribute is a tuple of cell objects representing the variables from the outer scope, and the values of those variables are stored in the cell_contents attribute of each cell. Given the code from the question, this can be seen by running the following: # print the list of cells print(localClosure.__closure__) # (<cell at 0x7f941ca27a00: int object at 0x7f941a621950>, <cell at 0x7f941ca27eb0: int object at 0x7f941a621970>) # print the values in the cells print(', '.join(str(cell.cell_contents) for cell in localClosure.__closure__)) # 2, 3 # print the value in the first cell (var_a) print(localClosure.__closure__[0].cell_contents) # 2 The cell_contents attribute of the cell objects first became writable with bpo-30486 which was first included in Python 3.7.0 alpha 1 Complete working example: def foo(): var_a = 2 var_b = 3 def _closure(x): return var_a + var_b + x return _closure localClosure = foo() # Local closure is now "return 2 + 3 + x" a = localClosure(1) # 2 + 3 + 1 == 6 # DO SOME MAGIC HERE TO TURN "var_a" of the closure into 0 # ...but what magic? Is this even possible? # the magic # this changes the value in the cell representing var_a to be 0 localClosure.__closure__[0].cell_contents = 0 # Local closure is now "return 0 + 3 + x" b = localClosure(1) # 0 + 3 + 1 == 4 A: Why not make var_a and var_b arguments of the function foo? def foo(var_a = 2, var_b = 3): def _closure(x): return var_a + var_b + x return _closure localClosure = foo() # uses default arguments 2, 3 print localClosure(1) # 2 + 3 + 1 = 6 localClosure = foo(0, 3) print localClosure(1) # 0 + 3 + 1 = 4 A: def foo(): var_a = 2 var_b = 3 def _closure(x): return var_a + var_b + x return _closure def bar(): var_a = [2] var_b = [3] def _closure(x): return var_a[0] + var_b[0] + x def _magic(y): var_a[0] = y return _closure, _magic localClosureFoo = foo() a = localClosureFoo(1) print a localClosureBar, localClosureBarMAGIC = bar() b = localClosureBar(1) print b localClosureBarMAGIC(0) b = localClosureBar(1) print b
Modify bound variables of a closure in Python
Is there any way to modify the bound value of one of the variables inside a closure? Look at the example to understand it better. def foo(): var_a = 2 var_b = 3 def _closure(x): return var_a + var_b + x return _closure localClosure = foo() # Local closure is now "return 2 + 3 + x" a = localClosure(1) # 2 + 3 + 1 == 6 # DO SOME MAGIC HERE TO TURN "var_a" of the closure into 0 # ...but what magic? Is this even possible? # Local closure is now "return 0 + 3 + x" b = localClosure(1) # 0 + 3 +1 == 4
[ "It is quite possible in python 3 thanks to the magic of nonlocal.\ndef foo():\n var_a = 2\n var_b = 3\n\n def _closure(x, magic = None):\n nonlocal var_a\n if magic is not None:\n var_a = magic\n\n return var_a + var_b + x\n\n return _closure\n\n\nlocalClosure = foo()\n\n# Local closure is now \"return 2 + 3 + x\"\na = localClosure(1) # 2 + 3 + 1 == 6\nprint(a)\n\n# DO SOME MAGIC HERE TO TURN \"var_a\" of the closure into 0\nlocalClosure(0, 0)\n\n# Local closure is now \"return 0 + 3 + x\"\nb = localClosure(1) # 0 + 3 +1 == 4\nprint(b)\n\n", "I don't think there is any way to do that in Python. When the closure is defined, the current state of variables in the enclosing scope is captured and no longer has a directly referenceable name (from outside the closure). If you were to call foo() again, the new closure would have a different set of variables from the enclosing scope.\nIn your simple example, you might be better off using a class:\nclass foo:\n def __init__(self):\n self.var_a = 2\n self.var_b = 3\n\n def __call__(self, x):\n return self.var_a + self.var_b + x\n\nlocalClosure = foo()\n\n# Local closure is now \"return 2 + 3 + x\"\na = localClosure(1) # 2 + 3 + 1 == 6\n\n# DO SOME MAGIC HERE TO TURN \"var_a\" of the closure into 0\n# ...but what magic? Is this even possible?\nlocalClosure.var_a = 0\n\n# Local closure is now \"return 0 + 3 + x\"\nb = localClosure(1) # 0 + 3 +1 == 4\n\nIf you do use this technique I would no longer use the name localClosure because it is no longer actually a closure. However, it works the same as one.\n", "I've found an alternate answer answer to Greg's, slightly less verbose because it uses Python 2.1's custom function attributes (which conveniently enough can be accessed from inside their own function).\ndef foo():\n var_b = 3\n\n def _closure(x):\n return _closure.var_a + var_b + x\n\n _closure.func_dict['var_a'] = 2\n return _closure\n\n\nlocalClosure = foo()\n\n# Local closure is now \"return 2 + 3 + x\"\na = localClosure(1) # 2 + 3 + 1 == 6\n\n# DO SOME MAGIC HERE TO TURN \"var_a\" of the closure into 0\n# ...but what magic? Is this even possible?\n# apparently, it is\nlocalClosure.var_a = 0\n\n# Local closure is now \"return 0 + 3 + x\"\nb = localClosure(1) # 0 + 3 +1 == 4\n\nThought I'd post it for completeness. Cheers anyways.\n", "We've done the following. I think it's simpler than other solutions here.\nclass State:\n pass\n\ndef foo():\n st = State()\n st.var_a = 2\n st.var_b = 3\n\n def _closure(x):\n return st.var_a + st.var_b + x\n def _set_a(a):\n st.var_a = a\n\n return _closure, _set_a\n\n\nlocalClosure, localSetA = foo()\n\n# Local closure is now \"return 2 + 3 + x\"\na = localClosure(1) # 2 + 3 + 1 == 6\n\n# DO SOME MAGIC HERE TO TURN \"var_a\" of the closure into 0\nlocalSetA(0)\n\n# Local closure is now \"return 0 + 3 + x\"\nb = localClosure(1) # 0 + 3 +1 == 4\n\nprint a, b\n\n", "I worked around a similar limitation by using one-item lists instead of a plain variable. It's ugly but it works because modifying a list item doesn't get treated as a binding operation by the interpreter. \nFor example:\ndef my_function()\n max_value = [0]\n\n def callback (data)\n\n if (data.val > max_value[0]):\n max_value[0] = data.val\n\n # more code here\n # . . . \n\n results = some_function (callback)\n\n store_max (max_value[0])\n\n", "slightly different from what was asked, but you could do:\ndef f():\n a = 1\n b = 2\n def g(x, a=a, b=b):\n return a + b + x\n return g\n\nh = f()\nprint(h(0))\nprint(h(0,2,3))\nprint(h(0))\n\nand make the closure the default, to be overridden when needed. \n", "Maybe there's a further approach (even if it seems to be some years too late for my proposal :-)\ndef foo():\n def _closure(x):\n return _closure.var_a + _closure.var_b + x\n _closure.var_a = 2\n _closure.var_b = 3\n return _closure\n\n\nlocalClosure = foo()\n\n# Local closure is now \"return 2 + 3 + x\"\na = localClosure(1) # 2 + 3 + 1 == 6\nprint(a)\n\n# DO SOME MAGIC HERE TO TURN \"var_a\" of the closure into 0\n# ...but what magic? Is this even possible?\nlocalClosure.var_a = 0\n\n# Local closure is now \"return 0 + 3 + x\"\nb = localClosure(1) # 0 + 3 +1 == 4\nprint(b)\n\nFrom my point of view the class solution proposed is easier to read. But if you try to modiy a free variable inside a decorator this solution might come in handy: In comparison to a class based solution it's easier to work with functools.wraps to preserve the meta data of the decorated function.\n", "Question\n\nIs there any way to modify the bound value of one of the variables inside a closure?\n\nTLDR\nYes, this is possible starting in Python 3.7.0 alpha 1:\nlocalClosure.__closure__[0].cell_contents = 0\n\nDetails\nIn Python, a closure remembers the variables from the scope in which it was defined by using a special __closure__ attribute. The __closure__ attribute is a tuple of cell objects representing the variables from the outer scope, and the values of those variables are stored in the cell_contents attribute of each cell.\nGiven the code from the question, this can be seen by running the following:\n# print the list of cells\nprint(localClosure.__closure__)\n# (<cell at 0x7f941ca27a00: int object at 0x7f941a621950>, <cell at 0x7f941ca27eb0: int object at 0x7f941a621970>)\n\n# print the values in the cells\nprint(', '.join(str(cell.cell_contents) for cell in localClosure.__closure__))\n# 2, 3\n\n# print the value in the first cell (var_a)\nprint(localClosure.__closure__[0].cell_contents)\n# 2\n\nThe cell_contents attribute of the cell objects first became writable with bpo-30486 which was first included in Python 3.7.0 alpha 1\nComplete working example:\ndef foo():\n var_a = 2\n var_b = 3\n\n def _closure(x):\n return var_a + var_b + x\n\n return _closure\n\n\nlocalClosure = foo()\n\n# Local closure is now \"return 2 + 3 + x\"\na = localClosure(1) # 2 + 3 + 1 == 6\n\n# DO SOME MAGIC HERE TO TURN \"var_a\" of the closure into 0\n# ...but what magic? Is this even possible?\n\n# the magic\n# this changes the value in the cell representing var_a to be 0\nlocalClosure.__closure__[0].cell_contents = 0\n\n# Local closure is now \"return 0 + 3 + x\"\nb = localClosure(1) # 0 + 3 + 1 == 4\n\n\n", "Why not make var_a and var_b arguments of the function foo?\ndef foo(var_a = 2, var_b = 3):\n def _closure(x):\n return var_a + var_b + x\n return _closure\n\nlocalClosure = foo() # uses default arguments 2, 3\nprint localClosure(1) # 2 + 3 + 1 = 6\n\nlocalClosure = foo(0, 3)\nprint localClosure(1) # 0 + 3 + 1 = 4\n\n", "def foo():\n var_a = 2\n var_b = 3\n\n def _closure(x):\n return var_a + var_b + x\n\n return _closure\n\ndef bar():\n var_a = [2]\n var_b = [3]\n\n def _closure(x):\n return var_a[0] + var_b[0] + x\n\n\n def _magic(y):\n var_a[0] = y\n\n return _closure, _magic\n\nlocalClosureFoo = foo()\na = localClosureFoo(1)\nprint a\n\n\n\nlocalClosureBar, localClosureBarMAGIC = bar()\nb = localClosureBar(1)\nprint b\nlocalClosureBarMAGIC(0)\nb = localClosureBar(1)\nprint b\n\n" ]
[ 50, 22, 11, 10, 4, 1, 1, 1, 0, 0 ]
[]
[]
[ "closures", "functional_programming", "python" ]
stackoverflow_0000392349_closures_functional_programming_python.txt
Q: stat() got an unexpected keyword argument 'follow_symlinks' Web search found links to bugs, I don't write complicated code on Python, just want to confirm I understand syntax: https://docs.python.org/3/library/pathlib.html Path.stat(*, follow_symlinks=True)¶ But when I write Path(filepath).stat(follow_symlinks=False) I'm getting "stat() got an unexpected keyword argument 'follow_symlinks'" error. lstat() in place of stat(follow_symlinks=False) does job done. Python 3.8.5. TIA A: You're reading it correctly. You just missed the footnote. From the page you linked Changed in version 3.10: The follow_symlinks parameter was added. So if you want to use that keyword argument, you need Python 3.10 or newer. Otherwise, as you've already figured out, just use lstat.
stat() got an unexpected keyword argument 'follow_symlinks'
Web search found links to bugs, I don't write complicated code on Python, just want to confirm I understand syntax: https://docs.python.org/3/library/pathlib.html Path.stat(*, follow_symlinks=True)¶ But when I write Path(filepath).stat(follow_symlinks=False) I'm getting "stat() got an unexpected keyword argument 'follow_symlinks'" error. lstat() in place of stat(follow_symlinks=False) does job done. Python 3.8.5. TIA
[ "You're reading it correctly. You just missed the footnote. From the page you linked\n\nChanged in version 3.10: The follow_symlinks parameter was added.\n\nSo if you want to use that keyword argument, you need Python 3.10 or newer. Otherwise, as you've already figured out, just use lstat.\n" ]
[ 1 ]
[]
[]
[ "python" ]
stackoverflow_0074622601_python.txt
Q: Is there a way to set as initial camera in a 3D plot that the upper left corner is (0,0,0)? Plotly I'm trying to set as initial camera of a 3D volume plot where the upper left corner is the origin (x, y, z = 0). I've read the documentation about the camera controls but cannot figure out how can I accomplish this. The initial view I want it's something like this: A: I tried it and this one work on me If you want the front upper left corner as (0,0,0) camera = dict( eye=dict(x=0, y=-0.5, z=-2.5) ) fig.update_layout(scene_camera=camera, title=name) fig.show() what I understand from this eye is basically the position of the eye(or you) look at eyepoint(0,0,0) which is I believe the center of the 3D graph (not the coordinate) And if you need to change the axes direction to the opposite, you can try to put it on negative on the eye position, and if it is zero you can put negative small number (in this example I used -0.5, but you can use -0.01 too)
Is there a way to set as initial camera in a 3D plot that the upper left corner is (0,0,0)? Plotly
I'm trying to set as initial camera of a 3D volume plot where the upper left corner is the origin (x, y, z = 0). I've read the documentation about the camera controls but cannot figure out how can I accomplish this. The initial view I want it's something like this:
[ "I tried it and this one work on me\nIf you want the front upper left corner as (0,0,0)\ncamera = dict(\n eye=dict(x=0, y=-0.5, z=-2.5)\n)\nfig.update_layout(scene_camera=camera, title=name)\nfig.show()\n\nwhat I understand from this eye is basically the position of the eye(or you) look at eyepoint(0,0,0) which is I believe the center of the 3D graph (not the coordinate)\nAnd if you need to change the axes direction to the opposite, you can try to put it on negative on the eye position, and if it is zero you can put negative small number (in this example I used -0.5, but you can use -0.01 too)\n" ]
[ 1 ]
[]
[]
[ "plotly", "plotly_python", "python" ]
stackoverflow_0074622347_plotly_plotly_python_python.txt
Q: prints the sum of the numbers 1 to n in python GOAL: Write a program that asks the user for a number n and prints the sum of the numbers 1 to n. The program keeps asking for a number until the user enters 0. expected output: enter an integer number (0 to end): 5 1+2+3+4+5 = 15 I am able to solve the second problem which is until the user enters 0. the problem I'm having is printing the numbers in a loop. 1+2+3+4+5 = 15 I am thinking maybe if I use a loop within a loop I can accomplish this. This is my current code.I've seen other questions answer this programming question but I wanna know how to specifically print the numbers leading up to the number I entered num = int(input( "enter a integer: " )) sum_num =0 if num != 0: for i in range(1, num+1): sum_num += i print(sum_num) else: exit() A: Several issues with your code: Your while loop will never end. Its intended purpose is not clear. You are summing 1 instead of i each time in your loop. Your print statement only occurs at the end. You can include it within your loop. In Python, range(n) excludes n, so use range(n + 1) instead. You do not need to convert integers to string in order to print them. Putting this all together: num = int(input( "enter a integer: " )) sum_num = 0 for i in range(1, num+1): sum_num += i print(sum_num) enter a integer: 5 1 3 6 10 15 A: I think this matches what you want: while True: output = "" num = int(input("enter a integer: ")) if num == 0: exit() for i in range(1, num+1): output += "{}".format(i) if i != num: output += "+" output += " = {}".format(sum(range(num+1))) print (output) This prints out the sum of integers and then the answer, then waits for the next input. Example output: $ python test.py enter a integer: 5 1+2+3+4+5 = 15 enter a integer: 4 1+2+3+4 = 10 enter a integer: 3 1+2+3 = 6 enter a integer: 2 1+2 = 3 enter a integer: 1 1 = 1 enter a integer: 0 = 0 A: num=int(input("Enter the Number " )) sum=0 for i in range(1, num + 1): sum = sum+ i print(sum) Enter the Number 9 45 A: n = int(input("enter the no. : ")) sum = 0 for i in range(1,n+1): if(i<n): print(i,"+", end=" ") else: print(i,end=" ") sum = sum + i print("=",sum) A: What about: def cumsum(n: int) -> int: """Helper function to calculate cumulative sum""" return sum(range(n)) if __name__ == "__main__": """Main entry point""" while n := int(input("Number: ")): print(cumsum(n)) cumsum calculates the cumulative sum __name__ == "__main__" is python-specific boilerplate n := is the "walrus-operator", meaning it assigns to n at the point it's evaluating the input. The while loop terminates at 0 simply because 0 is "falsy". Without the boilerplate code, in 2 lines: while n := int(input("Number: ")): print(sum(range(n))) A: n = int (input ('')) print(n*(n+1)//2)
prints the sum of the numbers 1 to n in python
GOAL: Write a program that asks the user for a number n and prints the sum of the numbers 1 to n. The program keeps asking for a number until the user enters 0. expected output: enter an integer number (0 to end): 5 1+2+3+4+5 = 15 I am able to solve the second problem which is until the user enters 0. the problem I'm having is printing the numbers in a loop. 1+2+3+4+5 = 15 I am thinking maybe if I use a loop within a loop I can accomplish this. This is my current code.I've seen other questions answer this programming question but I wanna know how to specifically print the numbers leading up to the number I entered num = int(input( "enter a integer: " )) sum_num =0 if num != 0: for i in range(1, num+1): sum_num += i print(sum_num) else: exit()
[ "Several issues with your code:\n\nYour while loop will never end. Its intended purpose is not clear.\nYou are summing 1 instead of i each time in your loop.\nYour print statement only occurs at the end. You can include it within your loop.\nIn Python, range(n) excludes n, so use range(n + 1) instead.\nYou do not need to convert integers to string in order to print them.\n\nPutting this all together:\nnum = int(input( \"enter a integer: \" ))\nsum_num = 0\n\nfor i in range(1, num+1): \n sum_num += i\n print(sum_num)\n\nenter a integer: 5\n1\n3\n6\n10\n15\n\n", "I think this matches what you want: \nwhile True:\n output = \"\"\n num = int(input(\"enter a integer: \"))\n\n if num == 0:\n exit()\n\n for i in range(1, num+1):\n output += \"{}\".format(i)\n if i != num:\n output += \"+\"\n output += \" = {}\".format(sum(range(num+1)))\n print (output)\n\nThis prints out the sum of integers and then the answer, then waits for the next input. Example output:\n$ python test.py\nenter a integer: 5\n1+2+3+4+5 = 15\nenter a integer: 4\n1+2+3+4 = 10\nenter a integer: 3\n1+2+3 = 6\nenter a integer: 2\n1+2 = 3\nenter a integer: 1\n1 = 1\nenter a integer: 0\n = 0\n\n", "num=int(input(\"Enter the Number \" ))\nsum=0\n\nfor i in range(1, num + 1):\n sum = sum+ i\nprint(sum)\n\n\nEnter the Number 9\n45\n\n", "n = int(input(\"enter the no. : \"))\nsum = 0\nfor i in range(1,n+1):\n if(i<n):\n print(i,\"+\", end=\" \")\n else:\n print(i,end=\" \")\n sum = sum + i\nprint(\"=\",sum)\n\n", "What about:\ndef cumsum(n: int) -> int:\n \"\"\"Helper function to calculate cumulative sum\"\"\" \n return sum(range(n)) \n\nif __name__ == \"__main__\":\n \"\"\"Main entry point\"\"\"\n \n while n := int(input(\"Number: \")): \n print(cumsum(n))\n\n\ncumsum calculates the cumulative sum\n__name__ == \"__main__\" is python-specific boilerplate\nn := is the \"walrus-operator\", meaning it assigns to n at the point it's evaluating the input. The while loop terminates at 0 simply because 0 is \"falsy\".\n\nWithout the boilerplate code, in 2 lines:\nwhile n := int(input(\"Number: \")):\n print(sum(range(n)))\n\n", "n = int (input (''))\nprint(n*(n+1)//2)\n\n" ]
[ 3, 1, 0, 0, 0, 0 ]
[ "n*(n+1)/2\n\"zBody zmust zbe zat zleast z30 zcharacters; zyou zentered z9 z...\"\n", "num = int(input()) \ntotal = num \nfor x in range(num): \n total += x \nprint(total) \n\n" ]
[ -2, -2 ]
[ "python", "python_3.x" ]
stackoverflow_0050971279_python_python_3.x.txt
Q: Round (floor) to the nearest member of the geometric sequence (2, 4, 8, 16, 32, 64, 128 . . . )? As the title explains, how could I create a function func (using numpy or math modules) to find the nearest member of the geometric sequence (2, 4, 8, 16, 32, 64, 128 . . . )? For example, func(3) should yield 2, func(20) should yield 16, and func(128) should yield 128. I cannot find any information on this problem. Most rounding problems discuss rounding to the nearest multiple of some number, rather than to the nearest member of a geometric sequence. A: Use the concept of power and log import numpy as np def round_to_geometric(x): return int(2 ** np.floor(np.log2(x))) output: > print(round_to_geometric(3)) > print(round_to_geometric(20)) > print(round_to_geometric(128)) 2 16 128 A: One way to think about this is in terms of the length of a binary representation of an integer: def myround(i): return 2 ** (len(bin(i)) - 3) >>> [myround(i) for i in (3, 4, 5, 6, 7, 8, 20, 25, 128)] [2, 4, 4, 4, 4, 8, 16, 16, 128] Explanation: bin(x) returns a binary string representation of the input. >>> bin(20) '0b10100' The length of this representation depends on the most significant bit in the sequence. For bin(20), the most significant bit is 16 (or 2 ** 4). Subtracting 3 ignores the 0b and the 1s place in the binary string. So len(bin(20)) - 3 = 4
Round (floor) to the nearest member of the geometric sequence (2, 4, 8, 16, 32, 64, 128 . . . )?
As the title explains, how could I create a function func (using numpy or math modules) to find the nearest member of the geometric sequence (2, 4, 8, 16, 32, 64, 128 . . . )? For example, func(3) should yield 2, func(20) should yield 16, and func(128) should yield 128. I cannot find any information on this problem. Most rounding problems discuss rounding to the nearest multiple of some number, rather than to the nearest member of a geometric sequence.
[ "Use the concept of power and log\nimport numpy as np\ndef round_to_geometric(x):\n return int(2 ** np.floor(np.log2(x)))\n\noutput:\n> print(round_to_geometric(3))\n> print(round_to_geometric(20))\n> print(round_to_geometric(128))\n\n2\n16\n128\n\n", "One way to think about this is in terms of the length of a binary representation of an integer:\ndef myround(i):\n return 2 ** (len(bin(i)) - 3)\n\n>>> [myround(i) for i in (3, 4, 5, 6, 7, 8, 20, 25, 128)]\n[2, 4, 4, 4, 4, 8, 16, 16, 128]\n\nExplanation: bin(x) returns a binary string representation of the input.\n>>> bin(20)\n'0b10100'\n\nThe length of this representation depends on the most significant bit in the sequence. For bin(20), the most significant bit is 16 (or 2 ** 4). Subtracting 3 ignores the 0b and the 1s place in the binary string. So len(bin(20)) - 3 = 4\n" ]
[ 4, 2 ]
[]
[]
[ "python" ]
stackoverflow_0074622579_python.txt
Q: how can Python's pd.qcut give the same result as R's statar::xtile? I need to create bins based on one column in a dataframe. One problem is the values of that column are oddly distributed. Consequently, Python's pd.qcut may arbitrarily put observations into different bins, even though they have the same value. In R (or in Stata), I use the xtile function of the statar package. R is able to group all observations with the same value into one bin. library(tidyverse) sample_df <- data.frame(customer_id = seq(1:10), purch_frequency = c(1, 1, 1, 1, 1, 2, 3, 10, 11, 11)) sample_df <- sample_df %>% mutate(freq_bins1=statar::xtile(purch_frequency, 2), freq_bins2=statar::xtile(purch_frequency, 3)) print(sample_df) A corresponding implementation in Python, import pandas as pd data = {'customer_id': range(1,11), 'purch_frequency': [1, 1, 1, 1, 1, 2, 3, 10, 11, 11]} sample_df = pd.DataFrame(data) sample_df['freq_bins1'] = \ (sample_df['purch_frequency'].rank(method = 'first') .transform(lambda x: pd.qcut(x, 2, labels = False))) sample_df['freq_bins2'] = \ (sample_df['purch_frequency'].rank(method = 'first') .transform(lambda x: pd.qcut(x, 3, labels = False))) print(sample_df) As you can see, R and Python give different answers for the last column, freq_bins2. I wondered how I can modify the Python code so that it matches R's result. Thanks! A quick follow-up. The R and Python outputs are now attached below. For R (python's index is "1" less than R's, which is fine): customer_id purch_frequency freq_bins1 freq_bins2 1 1 1 1 1 2 2 1 1 1 3 3 1 1 1 4 4 1 1 1 5 5 1 1 1 6 6 2 2 2 7 7 3 2 2 8 8 10 2 3 9 9 11 2 3 10 10 11 2 3 For Python: customer_id purch_frequency freq_bins1 freq_bins2 0 1 1 0 0 1 2 1 0 0 2 3 1 0 0 3 4 1 0 0 4 5 1 0 1 5 6 2 1 1 6 7 3 1 1 7 8 10 1 2 8 9 11 1 2 9 10 11 1 2 A: Maybe there are better answers, but I have figured out a detour by calling the R function (statar::xtile) from within Python. # You need to first install rpy2 # Activate rpy2 to use R functions/packages in Python import rpy2 import rpy2.robjects as robjects from rpy2.robjects.packages import importr # in particular, the following two lines activate R functions # for pandas' dataframes from rpy2.robjects import pandas2ri pandas2ri.activate() ### This chunk is the same as the original post import pandas as pd data = {'customer_id': range(1,11), 'purch_frequency': [1, 1, 1, 1, 1, 2, 3, 10, 11, 11]} sample_df = pd.DataFrame(data) sample_df['freq_bins1'] = \ (sample_df['purch_frequency'].rank(method = 'first') .transform(lambda x: pd.qcut(x, 2, labels = False))) sample_df['freq_bins2'] = \ (sample_df['purch_frequency'].rank(method = 'first') .transform(lambda x: pd.qcut(x, 3, labels = False))) ### The following is to call R's statar::xtile statar = importr('statar') sample_df['freq_bin3'] = statar.xtile(sample_df['purch_frequency'], 3) print(sample_df) The output looks like the following: customer_id purch_frequency freq_bins1 freq_bins2 freq_bin3 0 1 1 0 0 1 1 2 1 0 0 1 2 3 1 0 0 1 3 4 1 0 0 1 4 5 1 0 1 1 5 6 2 1 1 2 6 7 3 1 1 2 7 8 10 1 2 3 8 9 11 1 2 3 9 10 11 1 2 3
how can Python's pd.qcut give the same result as R's statar::xtile?
I need to create bins based on one column in a dataframe. One problem is the values of that column are oddly distributed. Consequently, Python's pd.qcut may arbitrarily put observations into different bins, even though they have the same value. In R (or in Stata), I use the xtile function of the statar package. R is able to group all observations with the same value into one bin. library(tidyverse) sample_df <- data.frame(customer_id = seq(1:10), purch_frequency = c(1, 1, 1, 1, 1, 2, 3, 10, 11, 11)) sample_df <- sample_df %>% mutate(freq_bins1=statar::xtile(purch_frequency, 2), freq_bins2=statar::xtile(purch_frequency, 3)) print(sample_df) A corresponding implementation in Python, import pandas as pd data = {'customer_id': range(1,11), 'purch_frequency': [1, 1, 1, 1, 1, 2, 3, 10, 11, 11]} sample_df = pd.DataFrame(data) sample_df['freq_bins1'] = \ (sample_df['purch_frequency'].rank(method = 'first') .transform(lambda x: pd.qcut(x, 2, labels = False))) sample_df['freq_bins2'] = \ (sample_df['purch_frequency'].rank(method = 'first') .transform(lambda x: pd.qcut(x, 3, labels = False))) print(sample_df) As you can see, R and Python give different answers for the last column, freq_bins2. I wondered how I can modify the Python code so that it matches R's result. Thanks! A quick follow-up. The R and Python outputs are now attached below. For R (python's index is "1" less than R's, which is fine): customer_id purch_frequency freq_bins1 freq_bins2 1 1 1 1 1 2 2 1 1 1 3 3 1 1 1 4 4 1 1 1 5 5 1 1 1 6 6 2 2 2 7 7 3 2 2 8 8 10 2 3 9 9 11 2 3 10 10 11 2 3 For Python: customer_id purch_frequency freq_bins1 freq_bins2 0 1 1 0 0 1 2 1 0 0 2 3 1 0 0 3 4 1 0 0 4 5 1 0 1 5 6 2 1 1 6 7 3 1 1 7 8 10 1 2 8 9 11 1 2 9 10 11 1 2
[ "Maybe there are better answers, but I have figured out a detour by calling the R function (statar::xtile) from within Python.\n# You need to first install rpy2\n# Activate rpy2 to use R functions/packages in Python \nimport rpy2\nimport rpy2.robjects as robjects\nfrom rpy2.robjects.packages import importr\n# in particular, the following two lines activate R functions\n# for pandas' dataframes\nfrom rpy2.robjects import pandas2ri \npandas2ri.activate()\n\n### This chunk is the same as the original post\nimport pandas as pd\ndata = {'customer_id': range(1,11),\n 'purch_frequency': [1, 1, 1, 1, 1, 2, 3, 10, 11, 11]}\nsample_df = pd.DataFrame(data)\n\nsample_df['freq_bins1'] = \\\n (sample_df['purch_frequency'].rank(method = 'first')\n .transform(lambda x: pd.qcut(x, 2, labels = False)))\nsample_df['freq_bins2'] = \\\n (sample_df['purch_frequency'].rank(method = 'first')\n .transform(lambda x: pd.qcut(x, 3, labels = False)))\n\n### The following is to call R's statar::xtile\nstatar = importr('statar')\nsample_df['freq_bin3'] = statar.xtile(sample_df['purch_frequency'], 3)\n\nprint(sample_df)\n\nThe output looks like the following:\n customer_id purch_frequency freq_bins1 freq_bins2 freq_bin3\n0 1 1 0 0 1\n1 2 1 0 0 1\n2 3 1 0 0 1\n3 4 1 0 0 1\n4 5 1 0 1 1\n5 6 2 1 1 2\n6 7 3 1 1 2\n7 8 10 1 2 3\n8 9 11 1 2 3\n9 10 11 1 2 3\n\n" ]
[ 0 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074555385_pandas_python.txt
Q: Rectangle function that takes in two points and makes a rectangle in a global matrix not working I have a matrix of 50 by 50 that represents a grid of 25 by 25. I want all the positives on the grid to get +25 and the negatives stay there absolute value. But what my problem actually is I am creating a function that will create a rectangle based on the given values. Code (Python 3.10.5): def createRect(x,y,x1,y1,item): global matrix for i in range(x1,x,-1): matrix[i][y1]=item for i in range(y1,y-1,-1): matrix[x][i] = item for j in range(1,6): for i in range(y1-1,y-1,-1): matrix[x+j][i] = item matrix = [['. ']*50 for _ in range(50)] x,y,x1,y1 = 25+2,25+5,25+7,25+9 x2,y2,x3,y3 = 25+6,25+2,25+9,25+10 createRect(x,y,x1,y1,'# ') createRect(x2,y2,x3,y3,'- ') for i in range(50): for j in range(50): print(matrix[i][j], end = "") print() I tried using the function on another set of points but it only works for the first one seeing as its the only rectangle. A: I am a bit confused about your code there, as why you fill the matrix backwards (for i in range(x1,x,-1)) and why would you include a hardcoded 6 there (for j in range(1,6)) so I might as well not have understood what the problem is. But wouldn't the following create the rectangles as per the given points? : def createRect(x, y, x1, y1, item): global matrix for i in range(x, x1+1): for j in range(y, y1+1): matrix[i][j] = item
Rectangle function that takes in two points and makes a rectangle in a global matrix not working
I have a matrix of 50 by 50 that represents a grid of 25 by 25. I want all the positives on the grid to get +25 and the negatives stay there absolute value. But what my problem actually is I am creating a function that will create a rectangle based on the given values. Code (Python 3.10.5): def createRect(x,y,x1,y1,item): global matrix for i in range(x1,x,-1): matrix[i][y1]=item for i in range(y1,y-1,-1): matrix[x][i] = item for j in range(1,6): for i in range(y1-1,y-1,-1): matrix[x+j][i] = item matrix = [['. ']*50 for _ in range(50)] x,y,x1,y1 = 25+2,25+5,25+7,25+9 x2,y2,x3,y3 = 25+6,25+2,25+9,25+10 createRect(x,y,x1,y1,'# ') createRect(x2,y2,x3,y3,'- ') for i in range(50): for j in range(50): print(matrix[i][j], end = "") print() I tried using the function on another set of points but it only works for the first one seeing as its the only rectangle.
[ "I am a bit confused about your code there, as why you fill the matrix backwards (for i in range(x1,x,-1)) and why would you include a hardcoded 6 there (for j in range(1,6)) so I might as well not have understood what the problem is. But wouldn't the following create the rectangles as per the given points? :\ndef createRect(x, y, x1, y1, item):\n global matrix\n \n for i in range(x, x1+1):\n for j in range(y, y1+1):\n matrix[i][j] = item\n\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074622435_python.txt
Q: Keyword argument repeated in python Flask I'm trying to build restaurant list site using Flask. This is a part of my application.py code. @application.route("/list.html") def list_restaurants(): page = request.args.get("page", 0, type=int) limit = 4 category = request.args.get("category", "all") price = request.args.get("price", "all") area = request.args.get("area", "all") start_idx = limit*page end_idx = limit*(page+1) if category=="all" and price=="all" and area=="all": data = DB.get_restaurants() else: if category != "all" and price=="all" and area=="all": data = DB.get_restaurants_bycategory(category) elif price != "all" and category=="all" and area=="all": data = DB.get_restaurants_byprice(price) elif area != "all" and category=="all" and price=="all": data = DB.get_restaurants_byarea(area) else: data = DB.get_restaurants() tot_count = len(data) if tot_count<=limit: data = dict(list(data.items())[:tot_count]) else: data = dict(list(data.items())[start_idx:end_idx]) data = dict(sorted(data.items(), key=lambda x:x[1]['res_name'], reverse=False)) #print(data) page_count=len(data) return render_template( "list.html", datas=data.items(), total=tot_count, limit=limit, page=page, page_count=math.ceil(tot_counet/4), category=category, price=price, area=area) This is the python code calling the HTML page where the error is taking place. The HTML page (list.html): <!DOCTYPE html> <head> <meta charset="UTF-8" /> <title>search</title> <script src="https://code.jquery.com/jquery-latest.min.js"></script> <script src="{{ url_for('static', filename='main.js') }}" defer></script> <script src="https://code.jquery.com/jquery-latest.min.js"></script> <style src="{{ url_for('static', filename='index.css') }}"></style> </head> <body> <div class="header" id="logo" onclick="location.href='list.html'"> <img src="/static/YomoJomoLogo.png" width="150px" /> </div> <div class="contents"> <div class="searchbar"> <form> <div class="searchbox"> <a style="color: black;">검색</a> <input type="text" name="search" style="width: 80%; height: 30px;" placeholder="Search by restaurant name or menu name." /> <input type="button" name="search" onclick="location.href='search3.html'" name="search" value="search" /> </div> <div class="login"> <div></div> <input class="loginbutton" type="button" onclick="location.href='login.html'" name="login" value="login" /> <input class="regbutton" type="button" onclick="location.href='register_restaurant.html'" name="register" value="register" /> </div> </form> </div> <br /><br /><br /> <nav> <script> $(document).ready(function () { //alert("{{category}}"); $('#category option:contains("{{category}}")').prop('selected', true); }); </script> <div class="menu"> <ul> <li> <a>Category</a> <ul id="category" name="category" onchange="location=this.value"> <li> <a href="{{url_for('list_restaurants', page=i, category='Korean', price='all', area='all')}}" >Korean</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, category='Italian', price='all', area='all')}}" >Italian</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, category='Chinese', price='all', area='all')}}" >Chinese</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, category='Japanese', price='all', area='all')}}" >Japanese</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, category='Cafeteria', price='all', area='all')}}" >Cafeteria</a > </li> </ul> </li> <li> <a>Price</a> <ul id="price" name="price" onchange="location=this.value"> <li> <a href="{{url_for('list_restaurants', page=i, price='below 5', category='all', area='all')}}" >below 5</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, price='5-10', category='all', area='all')}}" >below 10</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, price='10-15', category='all', area='all')}}" >below 15</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, price='15-20', category='all', area='all')}}" >below 20</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, price='above 20', category='all', area='all')}}" >above 20</a > </li> </ul> </li> <li> <a>Area</a> <ul id="area" name="area" onchange="location=this.value"> <li> <a href="{{url_for('list_restaurants', page=i, area='school', category='all', price='all')}}" >school</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, area='front', category='all', price='all')}}" >front</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, area='back', category='all', price='all')}}" >back</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, area='etc', category='all', price='all')}}" >etc</a > </li> </ul> </li> <li style="float: right;"><a href="search5.html">random</a></li> </ul> </div> </nav> {% if total > 0 %} <p style="text-align: center;"> <br />restaurant list - {{total}}<br /><br /> </p> {% for data in datas %} <div style="float: left; width: 25%;"> <div style="text-align: center;"> <a href="/view_detail/{{data[1].res_name}}/"> <p style="color: black;">{{data[1].res_name}}</p> <img src="/static/image/{{data[1].img_path}}" width="200" /></a ><br /> </div> </div> {% endfor %} <!---pagenation--> <div class="page-wrap" style="clear: both;"> <br /><br /> <div class="page-nation"> <ul> <li> {% for i in range(page_count)%} <a href="{{url_for('list_restaurants', page=i)}}" color="black">{{i+1}}</a> {% endfor %} </li> </ul> <br /><br /> </div> </div> {% else %} <p class="ranking"> Search Result </p> <div style="margin: 20px;"> <p style="text-align: center;">No result.<br /><br /></p> <div style=" float: left; margin-left: 150px; padding: 40px; border-radius: 5%; text-align: center; background-color: #f3f3f3; " > Register a new restaurant<br /><br /> <input type="button" onclick="location.href='register_restaurant.html'" name="register" style="height: 30px; background-color: #738b5f; border: none; color: white;" value="register a new restaurant" /> </div> <div style=" float: right; margin-right: 150px; padding: 40px; border-radius: 5%; text-align: center; background-color: #f3f3f3; " > Random recommendation<br /><br /> <input type="button" onclick="location.href='search5.html'" name="register" style="height: 30px; background-color: #738b5f; border: none; color: white;" value="random recommendation" /> </div> {% endif %} </div> </div> </body> This is the error code. Traceback (most recent call last) File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2464, in __call__ return self.wsgi_app(environ, start_response) File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2450, in wsgi_app response = self.handle_exception(e) File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1867, in handle_exception reraise(exc_type, exc_value, tb) File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise raise value File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app response = self.full_dispatch_request() File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request rv = self.handle_user_exception(e) File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception reraise(exc_type, exc_value, tb) File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise raise value File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request rv = self.dispatch_request() File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "/workspace/flask/application.py", line 108, in list_restaurants area=area) File "/usr/local/lib/python3.7/site-packages/flask/templating.py", line 138, in render_template ctx.app.jinja_env.get_or_select_template(template_name_or_list), File "/usr/local/lib/python3.7/site-packages/jinja2/environment.py", line 930, in get_or_select_template return self.get_template(template_name_or_list, parent, globals) File "/usr/local/lib/python3.7/site-packages/jinja2/environment.py", line 883, in get_template return self._load_template(name, self.make_globals(globals)) File "/usr/local/lib/python3.7/site-packages/jinja2/environment.py", line 857, in _load_template template = self.loader.load(self, name, globals) File "/usr/local/lib/python3.7/site-packages/jinja2/loaders.py", line 127, in load code = environment.compile(source, name, filename) File "/usr/local/lib/python3.7/site-packages/jinja2/environment.py", line 636, in compile return self._compile(source, filename) File "/usr/local/lib/python3.7/site-packages/jinja2/environment.py", line 601, in _compile return compile(source, filename, "exec") File "/workspace/flask/templates/list.html", line 57 ^ SyntaxError: keyword argument repeated It keeps pointing the same line, not a specific part of the code. I tried adding blank lines on line 57, and it points the same line syntax error. The code was working well and suddenly it stopped. I have no idea how to deal with this 'keyword argument repeated' syntax error. Looking for some advices! A: <input type="button" name="search" onclick="location.href='search3.html'" name="search" value="search" /> You're using name="..." twice.
Keyword argument repeated in python Flask
I'm trying to build restaurant list site using Flask. This is a part of my application.py code. @application.route("/list.html") def list_restaurants(): page = request.args.get("page", 0, type=int) limit = 4 category = request.args.get("category", "all") price = request.args.get("price", "all") area = request.args.get("area", "all") start_idx = limit*page end_idx = limit*(page+1) if category=="all" and price=="all" and area=="all": data = DB.get_restaurants() else: if category != "all" and price=="all" and area=="all": data = DB.get_restaurants_bycategory(category) elif price != "all" and category=="all" and area=="all": data = DB.get_restaurants_byprice(price) elif area != "all" and category=="all" and price=="all": data = DB.get_restaurants_byarea(area) else: data = DB.get_restaurants() tot_count = len(data) if tot_count<=limit: data = dict(list(data.items())[:tot_count]) else: data = dict(list(data.items())[start_idx:end_idx]) data = dict(sorted(data.items(), key=lambda x:x[1]['res_name'], reverse=False)) #print(data) page_count=len(data) return render_template( "list.html", datas=data.items(), total=tot_count, limit=limit, page=page, page_count=math.ceil(tot_counet/4), category=category, price=price, area=area) This is the python code calling the HTML page where the error is taking place. The HTML page (list.html): <!DOCTYPE html> <head> <meta charset="UTF-8" /> <title>search</title> <script src="https://code.jquery.com/jquery-latest.min.js"></script> <script src="{{ url_for('static', filename='main.js') }}" defer></script> <script src="https://code.jquery.com/jquery-latest.min.js"></script> <style src="{{ url_for('static', filename='index.css') }}"></style> </head> <body> <div class="header" id="logo" onclick="location.href='list.html'"> <img src="/static/YomoJomoLogo.png" width="150px" /> </div> <div class="contents"> <div class="searchbar"> <form> <div class="searchbox"> <a style="color: black;">검색</a> <input type="text" name="search" style="width: 80%; height: 30px;" placeholder="Search by restaurant name or menu name." /> <input type="button" name="search" onclick="location.href='search3.html'" name="search" value="search" /> </div> <div class="login"> <div></div> <input class="loginbutton" type="button" onclick="location.href='login.html'" name="login" value="login" /> <input class="regbutton" type="button" onclick="location.href='register_restaurant.html'" name="register" value="register" /> </div> </form> </div> <br /><br /><br /> <nav> <script> $(document).ready(function () { //alert("{{category}}"); $('#category option:contains("{{category}}")').prop('selected', true); }); </script> <div class="menu"> <ul> <li> <a>Category</a> <ul id="category" name="category" onchange="location=this.value"> <li> <a href="{{url_for('list_restaurants', page=i, category='Korean', price='all', area='all')}}" >Korean</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, category='Italian', price='all', area='all')}}" >Italian</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, category='Chinese', price='all', area='all')}}" >Chinese</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, category='Japanese', price='all', area='all')}}" >Japanese</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, category='Cafeteria', price='all', area='all')}}" >Cafeteria</a > </li> </ul> </li> <li> <a>Price</a> <ul id="price" name="price" onchange="location=this.value"> <li> <a href="{{url_for('list_restaurants', page=i, price='below 5', category='all', area='all')}}" >below 5</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, price='5-10', category='all', area='all')}}" >below 10</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, price='10-15', category='all', area='all')}}" >below 15</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, price='15-20', category='all', area='all')}}" >below 20</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, price='above 20', category='all', area='all')}}" >above 20</a > </li> </ul> </li> <li> <a>Area</a> <ul id="area" name="area" onchange="location=this.value"> <li> <a href="{{url_for('list_restaurants', page=i, area='school', category='all', price='all')}}" >school</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, area='front', category='all', price='all')}}" >front</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, area='back', category='all', price='all')}}" >back</a > </li> <li> <a href="{{url_for('list_restaurants', page=i, area='etc', category='all', price='all')}}" >etc</a > </li> </ul> </li> <li style="float: right;"><a href="search5.html">random</a></li> </ul> </div> </nav> {% if total > 0 %} <p style="text-align: center;"> <br />restaurant list - {{total}}<br /><br /> </p> {% for data in datas %} <div style="float: left; width: 25%;"> <div style="text-align: center;"> <a href="/view_detail/{{data[1].res_name}}/"> <p style="color: black;">{{data[1].res_name}}</p> <img src="/static/image/{{data[1].img_path}}" width="200" /></a ><br /> </div> </div> {% endfor %} <!---pagenation--> <div class="page-wrap" style="clear: both;"> <br /><br /> <div class="page-nation"> <ul> <li> {% for i in range(page_count)%} <a href="{{url_for('list_restaurants', page=i)}}" color="black">{{i+1}}</a> {% endfor %} </li> </ul> <br /><br /> </div> </div> {% else %} <p class="ranking"> Search Result </p> <div style="margin: 20px;"> <p style="text-align: center;">No result.<br /><br /></p> <div style=" float: left; margin-left: 150px; padding: 40px; border-radius: 5%; text-align: center; background-color: #f3f3f3; " > Register a new restaurant<br /><br /> <input type="button" onclick="location.href='register_restaurant.html'" name="register" style="height: 30px; background-color: #738b5f; border: none; color: white;" value="register a new restaurant" /> </div> <div style=" float: right; margin-right: 150px; padding: 40px; border-radius: 5%; text-align: center; background-color: #f3f3f3; " > Random recommendation<br /><br /> <input type="button" onclick="location.href='search5.html'" name="register" style="height: 30px; background-color: #738b5f; border: none; color: white;" value="random recommendation" /> </div> {% endif %} </div> </div> </body> This is the error code. Traceback (most recent call last) File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2464, in __call__ return self.wsgi_app(environ, start_response) File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2450, in wsgi_app response = self.handle_exception(e) File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1867, in handle_exception reraise(exc_type, exc_value, tb) File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise raise value File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app response = self.full_dispatch_request() File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request rv = self.handle_user_exception(e) File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception reraise(exc_type, exc_value, tb) File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise raise value File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request rv = self.dispatch_request() File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "/workspace/flask/application.py", line 108, in list_restaurants area=area) File "/usr/local/lib/python3.7/site-packages/flask/templating.py", line 138, in render_template ctx.app.jinja_env.get_or_select_template(template_name_or_list), File "/usr/local/lib/python3.7/site-packages/jinja2/environment.py", line 930, in get_or_select_template return self.get_template(template_name_or_list, parent, globals) File "/usr/local/lib/python3.7/site-packages/jinja2/environment.py", line 883, in get_template return self._load_template(name, self.make_globals(globals)) File "/usr/local/lib/python3.7/site-packages/jinja2/environment.py", line 857, in _load_template template = self.loader.load(self, name, globals) File "/usr/local/lib/python3.7/site-packages/jinja2/loaders.py", line 127, in load code = environment.compile(source, name, filename) File "/usr/local/lib/python3.7/site-packages/jinja2/environment.py", line 636, in compile return self._compile(source, filename) File "/usr/local/lib/python3.7/site-packages/jinja2/environment.py", line 601, in _compile return compile(source, filename, "exec") File "/workspace/flask/templates/list.html", line 57 ^ SyntaxError: keyword argument repeated It keeps pointing the same line, not a specific part of the code. I tried adding blank lines on line 57, and it points the same line syntax error. The code was working well and suddenly it stopped. I have no idea how to deal with this 'keyword argument repeated' syntax error. Looking for some advices!
[ " <input\n type=\"button\"\n name=\"search\"\n onclick=\"location.href='search3.html'\"\n name=\"search\"\n value=\"search\"\n />\n\nYou're using name=\"...\" twice.\n" ]
[ 0 ]
[]
[]
[ "flask", "python" ]
stackoverflow_0074622657_flask_python.txt
Q: how to write an if-statement in python that incorporates platform.platform i am trying to write a program that prints different things depending on the OS and i'm wanting to write an if-statement to do that.I'm new to python but after looking online for a bit i haven't been able to find any solution import platform print('platform:', platform.platform()) if platform.platform == mac0S: print('this is a mac') A: I'll be completing the previous answers and giving some examples. So first of all you're going to want to check the platform Documentation There you'll find that [platform.platform](https://docs.python.org/3/library/platform.html#platform.platform) is a function that returns a single string, but it includes the MacOS version, which doesn't suit your example, so we'll be using platform.system instead: It returns Windows, Linux or Darwin (for OSX). import platform print(platform.system()) if platform.system() == "Darwin": print("This is a Mac") elif platform.system() == "Windows": print("This is a Windows") elif platform.system() == "Linux": print("This is a lean, mean, Linux machine") Hope this helps!
how to write an if-statement in python that incorporates platform.platform
i am trying to write a program that prints different things depending on the OS and i'm wanting to write an if-statement to do that.I'm new to python but after looking online for a bit i haven't been able to find any solution import platform print('platform:', platform.platform()) if platform.platform == mac0S: print('this is a mac')
[ "I'll be completing the previous answers and giving some examples.\nSo first of all you're going to want to check the platform Documentation\nThere you'll find that [platform.platform](https://docs.python.org/3/library/platform.html#platform.platform) is a function that returns a single string, but it includes the MacOS version, which doesn't suit your example, so we'll be using platform.system instead: It returns Windows, Linux or Darwin (for OSX).\nimport platform\n\nprint(platform.system())\n\nif platform.system() == \"Darwin\":\n print(\"This is a Mac\")\nelif platform.system() == \"Windows\":\n print(\"This is a Windows\")\nelif platform.system() == \"Linux\":\n print(\"This is a lean, mean, Linux machine\")\n\nHope this helps!\n" ]
[ 1 ]
[]
[]
[ "if_statement", "python" ]
stackoverflow_0074621783_if_statement_python.txt
Q: How do I get the correct outputs for this chess board? I am trying to make a program of a chess board, When a user inputs an x and y value it will either output "black" or "white". x = int(input("Please enter your (x) first number 1-8::")) y = int(input("Please enter your (y) second number 1-8::")) column = x % 2 row = y % 2 if column %2 == 0 and row %2 == 1: print("") print("white") elif row %2 ==0 and column %2 == 1: print("") print("black") Whenever i input 1 for "x" and 2 for "y" it outputs "black", great this is the correct output. But whenever i input some other numbers such as 2 and 2, it gives me a blank output. Whenever i input 1 and 4, it outputs "black" which the correct output should have been "white. How do i make it so that whenever user inputs two numbers ranging from 1 to 8, it outputs the correct colour tile? I am not trying to make the code more advanced but would appreciate some help! This is the chess board i am basing the colours on.( Do not mind the text on the picture) A: Instead of looking at x and y separately, just check the sum. If the sum is even, it's black, if the sum is odd, it is white. I added a lookup of the name in a python dict, but you can just do it with if conditions if you prefer. x = int(input("Please enter your (x) first number 1-8::")) y = int(input("Please enter your (y) second number 1-8::")) color_picker = {0: "Black", 1: "White"} if not 0<x<9 or not 0<y<9: print("Input valid number!!") else: color print(color_picker[(x+y)%2]) Let me know if it helps. A: if column %2 == 0 and row %2 == 1: ... elif row %2 ==0 and column %2 == 1: ... This covers the case where column is even and row is odd, and the case where row is even and column is odd. But what about the cases where they are both even, or both odd? A: x = int(input("Please enter your (x) first number 1-8::")) y = int(input("Please enter your (y) second number 1-8::")) column = x row = y if (column + row) %2 == 0: print("") print("black") elif (column + row) %2 == 1: print("") print("white") else: print("Input valid number!!")
How do I get the correct outputs for this chess board?
I am trying to make a program of a chess board, When a user inputs an x and y value it will either output "black" or "white". x = int(input("Please enter your (x) first number 1-8::")) y = int(input("Please enter your (y) second number 1-8::")) column = x % 2 row = y % 2 if column %2 == 0 and row %2 == 1: print("") print("white") elif row %2 ==0 and column %2 == 1: print("") print("black") Whenever i input 1 for "x" and 2 for "y" it outputs "black", great this is the correct output. But whenever i input some other numbers such as 2 and 2, it gives me a blank output. Whenever i input 1 and 4, it outputs "black" which the correct output should have been "white. How do i make it so that whenever user inputs two numbers ranging from 1 to 8, it outputs the correct colour tile? I am not trying to make the code more advanced but would appreciate some help! This is the chess board i am basing the colours on.( Do not mind the text on the picture)
[ "Instead of looking at x and y separately, just check the sum.\nIf the sum is even, it's black, if the sum is odd, it is white.\nI added a lookup of the name in a python dict, but you can just do it with if conditions if you prefer.\nx = int(input(\"Please enter your (x) first number 1-8::\"))\ny = int(input(\"Please enter your (y) second number 1-8::\"))\n\ncolor_picker = {0: \"Black\", 1: \"White\"}\n\nif not 0<x<9 or not 0<y<9:\n print(\"Input valid number!!\")\nelse:\n color\n print(color_picker[(x+y)%2])\n\nLet me know if it helps.\n", "if column %2 == 0 and row %2 == 1:\n ...\n\nelif row %2 ==0 and column %2 == 1:\n ...\n\nThis covers the case where column is even and row is odd, and the case where row is even and column is odd.\nBut what about the cases where they are both even, or both odd?\n", "x = int(input(\"Please enter your (x) first number 1-8::\"))\ny = int(input(\"Please enter your (y) second number 1-8::\"))\n\ncolumn = x\nrow = y\n\n\nif (column + row) %2 == 0:\n\n print(\"\")\n print(\"black\")\n\n\nelif (column + row) %2 == 1:\n print(\"\")\n print(\"white\")\n\nelse:\n print(\"Input valid number!!\")\n\n" ]
[ 1, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074622572_python.txt
Q: Python restaurant program I'm a beginner in python & this is my 3rd program. I'm learning functions. The user should be able to choose a number first from the menu (1. appetizers, 2.mains 3. drinks, 4.view orders, 5.exit) each one has sub-choices (example appetizers include salad, chips...etc). User should be able to return back after choosing an item. I'm stuck in View Order option, the user needs to be able to see each of the items in their order + prices next to each item & Lastly I want to be able to show total bill. How can i view all orders from user? How can i calculate total? should i use loop? def menu(): print("--MCQUAN MENU--") print("1. Appetizers") print("2. Mains") print("3. Drinks") print("4. Desserts") print("5. View order") print("6. Exit") print() def user_option(): choice = int(input("Choose from the menu & enter a number 1-6: ")) if choice == 1: print() print("APP - heres the apps") print("Salad - $5") print("Chips & salsa - $6") print("Soup - $10") elif choice == 2: print("MAINS - here the mains") print("Pasta - $13") print("Burger - $12") print("Pizza - $10") elif choice == 3: print("DRINKS - heres drinks") print("Water - $0") print("Tea - $3") print("Sprite - $2") elif choice == 4: print("DESSERTS - heres desserts") print("Cake - $6") print("Ice cream - $5 ") print("Pie - $7") elif choice == 5: print("VIEW ORDER -- heres ur order so far") elif choice == 6: print("Exiting") else: print("Invalid") choice = int(input("Enter number 1-6")) def app(): cost = 0 total = 0 user_food = input("Choose which u want ") if user_food == "salad": cheese = input("Cheese? (+$0.50) yes/no ") dressing = input("Dressing? (+$1) yes/no ") elif user_food == "chips & salsa": spicy = input("Spicy? yes/no ") cilantro = input("Cilantro? yes/no ") elif user_food == "soup": soup = input("Chicken noodle or mushroom? ") else: print("Alright!") user_option() user_food = input("Choose which u want ") def main(): user_food = input("Choose which u want ") if user_food == "pasta": cheese_2 = input("Cheese? yes/no ") chilli_f = input("Chilli flakes? yes/no ") elif user_food == "burger": onions = input("Onions? yes/no ") pickles = input("Pickles? yes/no ") elif user_food == "Pizza": pizza = input("Cheese or pepperoni? ") else: print("Alright!") user_option() user_food = input("Choose which u want ") def drinks(): user_food = input("Choose which u want ") if user_food == "water": size = input("Small, medium or large? ") user_option() user_food = input("Choose which u want ") def desserts(): user_food = input("Choose which you want") if user_food == "cake": w_c = input("Whipped cream? yes/no ") cherry = input("Cherry? yes/no ") elif user_food == "ice cream": w_c = input("Whipped cream? yes/no") cherry = input("Cherry? yes/no ") elif user_food == "pie": w_c = input("Whipped cream? yes/no") cherry = input("Cherry? yes/no ") else: print("Alright") user_option() user_food = input("Choose which u want ") #main menu() print("Welcome to McQuans!") print() user_option() app() main() drinks() desserts()` A: Ideally you do not want to keep your data in prints. We can use data structures like lists or dicts Here's what I suggest: We use a list for the client order, since we can keep adding to it and we'll be adding things sequentially. We use dicts (python dictionaries) for the menus, so we can keep track of all the items per menu and the price of each item. client_order = [] app_menu = { "Salad": 5, "Chips & salsa": 6, "Soup": 10 } Please check out this Dict tutorial if you are not familiar with them. Alright, let's start with this. Once you've got it I'll help you out some more if you need.
Python restaurant program
I'm a beginner in python & this is my 3rd program. I'm learning functions. The user should be able to choose a number first from the menu (1. appetizers, 2.mains 3. drinks, 4.view orders, 5.exit) each one has sub-choices (example appetizers include salad, chips...etc). User should be able to return back after choosing an item. I'm stuck in View Order option, the user needs to be able to see each of the items in their order + prices next to each item & Lastly I want to be able to show total bill. How can i view all orders from user? How can i calculate total? should i use loop? def menu(): print("--MCQUAN MENU--") print("1. Appetizers") print("2. Mains") print("3. Drinks") print("4. Desserts") print("5. View order") print("6. Exit") print() def user_option(): choice = int(input("Choose from the menu & enter a number 1-6: ")) if choice == 1: print() print("APP - heres the apps") print("Salad - $5") print("Chips & salsa - $6") print("Soup - $10") elif choice == 2: print("MAINS - here the mains") print("Pasta - $13") print("Burger - $12") print("Pizza - $10") elif choice == 3: print("DRINKS - heres drinks") print("Water - $0") print("Tea - $3") print("Sprite - $2") elif choice == 4: print("DESSERTS - heres desserts") print("Cake - $6") print("Ice cream - $5 ") print("Pie - $7") elif choice == 5: print("VIEW ORDER -- heres ur order so far") elif choice == 6: print("Exiting") else: print("Invalid") choice = int(input("Enter number 1-6")) def app(): cost = 0 total = 0 user_food = input("Choose which u want ") if user_food == "salad": cheese = input("Cheese? (+$0.50) yes/no ") dressing = input("Dressing? (+$1) yes/no ") elif user_food == "chips & salsa": spicy = input("Spicy? yes/no ") cilantro = input("Cilantro? yes/no ") elif user_food == "soup": soup = input("Chicken noodle or mushroom? ") else: print("Alright!") user_option() user_food = input("Choose which u want ") def main(): user_food = input("Choose which u want ") if user_food == "pasta": cheese_2 = input("Cheese? yes/no ") chilli_f = input("Chilli flakes? yes/no ") elif user_food == "burger": onions = input("Onions? yes/no ") pickles = input("Pickles? yes/no ") elif user_food == "Pizza": pizza = input("Cheese or pepperoni? ") else: print("Alright!") user_option() user_food = input("Choose which u want ") def drinks(): user_food = input("Choose which u want ") if user_food == "water": size = input("Small, medium or large? ") user_option() user_food = input("Choose which u want ") def desserts(): user_food = input("Choose which you want") if user_food == "cake": w_c = input("Whipped cream? yes/no ") cherry = input("Cherry? yes/no ") elif user_food == "ice cream": w_c = input("Whipped cream? yes/no") cherry = input("Cherry? yes/no ") elif user_food == "pie": w_c = input("Whipped cream? yes/no") cherry = input("Cherry? yes/no ") else: print("Alright") user_option() user_food = input("Choose which u want ") #main menu() print("Welcome to McQuans!") print() user_option() app() main() drinks() desserts()`
[ "Ideally you do not want to keep your data in prints.\nWe can use data structures like lists or dicts\nHere's what I suggest:\n\nWe use a list for the client order, since we can keep adding to it and we'll be adding things sequentially.\nWe use dicts (python dictionaries) for the menus, so we can keep track of all the items per menu and the price of each item.\n\nclient_order = []\n\napp_menu = {\n \"Salad\": 5,\n \"Chips & salsa\": 6,\n \"Soup\": 10\n}\n\nPlease check out this Dict tutorial if you are not familiar with them.\nAlright, let's start with this. Once you've got it I'll help you out some more if you need.\n" ]
[ 1 ]
[]
[]
[ "python" ]
stackoverflow_0074622744_python.txt
Q: Check if user has Admin -- Discord.py I would like to make a command that requires the user to have Administrator permission to execute the command. An example is when a user first invited bot on the server, members must not be able to the use the so called "permissions" command. However members with the moderator role should have access to it and execute the rest of the command. Would anyone be able to help with this in my command? A: It's still not clear what you want to reserve who you want to command to be avaliable to however, the has_permissions decorator allows you to set what permissions a user can use to access a command. This can be set within the parameters For example, if you just only want a member with Administrator permissions to have access to your command, you can add @commands.has_permissions(administrator = True) after the command decorator. Heres an example, @bot.command() @commands.has_permissions(administrator = True) async def permission(ctx): await ctx.send('You have administrator access...') More information can be found in Discord's documentation: https://discordpy.readthedocs.io/en/latest/ext/commands/api.html EDIT: However, using an if statement within a command can be done with: if ctx.author.guild_permissions.administrator: ... A: While this question is aimed at discord.py - this question came up when I was searching for how to do this with it's sort-of successor library Discord Interactions as discord.py is quite limited - so I'll let people know how to do this with interactions too. So for those wondering, this is how you check if the calling user is a Server Administrator on interactions: import interactions bot = interactions.Client(token=TOKEN) @bot.command(scope=SERVER_IDS) async def my_command(ctx): perms = (await ctx.author.get_guild_permissions(ctx.guild_id)) if interactions.Permissions.ADMINISTRATOR in perms: return await ctx.send("You are an admin") await ctx.send("You are NOT an admin") Additionally, here's a function snippet which can be used within your commands to quickly check if the caller is a server admin: async def is_server_admin(ctx: Union[CommandContext, ComponentContext]) -> bool: """Returns :bool:`True` if the calling user is a Discord Server Administrator""" perms = (await ctx.author.get_guild_permissions(ctx.guild_id)) return interactions.Permissions.ADMINISTRATOR in perms Example usage of that function: @bot.command(scope=SERVER_IDS) async def my_command(ctx): if await is_server_admin(ctx): return await ctx.send("You are an admin") await ctx.send("You are NOT an admin")
Check if user has Admin -- Discord.py
I would like to make a command that requires the user to have Administrator permission to execute the command. An example is when a user first invited bot on the server, members must not be able to the use the so called "permissions" command. However members with the moderator role should have access to it and execute the rest of the command. Would anyone be able to help with this in my command?
[ "It's still not clear what you want to reserve who you want to command to be avaliable to however, the has_permissions decorator allows you to set what permissions a user can use to access a command. This can be set within the parameters\nFor example, if you just only want a member with Administrator permissions to have access to your command, you can add @commands.has_permissions(administrator = True) after the command decorator. Heres an example,\n@bot.command()\n@commands.has_permissions(administrator = True)\nasync def permission(ctx):\n await ctx.send('You have administrator access...')\n\nMore information can be found in Discord's documentation:\nhttps://discordpy.readthedocs.io/en/latest/ext/commands/api.html\nEDIT:\nHowever, using an if statement within a command can be done with:\nif ctx.author.guild_permissions.administrator:\n...\n\n", "While this question is aimed at discord.py - this question came up when I was searching for how to do this with it's sort-of successor library Discord Interactions as discord.py is quite limited - so I'll let people know how to do this with interactions too.\nSo for those wondering, this is how you check if the calling user is a Server Administrator on interactions:\nimport interactions\nbot = interactions.Client(token=TOKEN)\n\n@bot.command(scope=SERVER_IDS)\nasync def my_command(ctx):\n perms = (await ctx.author.get_guild_permissions(ctx.guild_id))\n if interactions.Permissions.ADMINISTRATOR in perms:\n return await ctx.send(\"You are an admin\")\n \n await ctx.send(\"You are NOT an admin\")\n\nAdditionally, here's a function snippet which can be used within your commands to quickly check if the caller is a server admin:\nasync def is_server_admin(ctx: Union[CommandContext, ComponentContext]) -> bool:\n \"\"\"Returns :bool:`True` if the calling user is a Discord Server Administrator\"\"\"\n perms = (await ctx.author.get_guild_permissions(ctx.guild_id))\n return interactions.Permissions.ADMINISTRATOR in perms\n\nExample usage of that function:\n@bot.command(scope=SERVER_IDS)\nasync def my_command(ctx):\n if await is_server_admin(ctx):\n return await ctx.send(\"You are an admin\")\n await ctx.send(\"You are NOT an admin\")\n\n" ]
[ 4, 0 ]
[]
[]
[ "discord.py", "python" ]
stackoverflow_0066346332_discord.py_python.txt
Q: Pandas REGEX not returning expected results using "extract" I am attempting to use REGEX to extract connection strings from blocks of text in a pandas dataframe. My REGEX works on REGEX101.com (see Screenshot below). Link to my saved test here: https://regex101.com/r/ILnpS0/1 When I try to run the REGEX in a Pandas dataframe, I don’t get any REGEX matches/extracts (but no an error), despite getting matches on REGEX101. Link to my code in a Google Colab notebook: https://colab.research.google.com/drive/1WAMlGkHAOqe38Lzo_K0KHwD_ynVJyIq1?usp=sharing Therefore the issue appears to be how pandas is interpreting my REGEX Can anyone identify why I not getting any REGEX matches using pandas? REGEX Logic My REGEX consists of 3 groups (?=Source = DB2.Database)(.*?)(?=\]\)) Group 1: (?=Source = DB2.Database) is a “Lookbehind” that looks for the text “Source = DB2.Database” i.e the start of my connection string. Group 2: (.?)* looks for any characters and acts as a span between the 1st and 3rd group. Group 3: (?=])) is a look behind assertion that aims to identify the end of the connection string) Additional tests: When I run a simplified version of the REGEX (DB2.Database) I get the match, as expected. This example is also in the notebook linked above. My code (same as in linked Colab Notebook) import pandas as pd myDF = pd.DataFrame({'conn_str':['''{'expression': 'let\n Source = Snowflake.Databases("whitehouse.australia-east.azure.snowflakecomputing.com","USER"),\n WH_DW_Database = Source{[Name="WHOUSE_DW",Kind="Database"]}[Data],\n DWH_Schema = SPARK_DW_Database{[Name="DWH",Kind="Schema"]}[Data],\n D_ACCOUNT_CURR_View = DWH_Schema{[Name="D_ACCOUNT_CURR",Kind="View"]}[Data],\n #"Filtered Rows" = Table.SelectRows(D_ACCOUNT_CURR_View, each ([PAYMENT_TYPE] = "POSTPAID") and ([ACCOUNT_SEGMENT] <> "Consumer") ),\n #"Removed Other Columns" = Table.SelectColumns(#"Filtered Rows",{"DESCRIPTION", "ACCOUNT_NUMBER"})\nin\n #"Removed Other Columns"'}''','''{'expression': 'let\n Source = DB2.Database("69.699.69.69", "WHUDB", [HierarchicalNavigation=true, Implementation="Microsoft", Query="SELECT\n base.HEAD_PARTY_NO,\n base.HEAD_PARTY_NAME,\n usg.BILL_MONTH,\n base.CUSTOMER_NUMBER,\n base.ACCOUNT_NUMBER,\n base.CHARGE_ARRANGEMENT_NUMBER,\n usg.DATA_MB,\n usg.DATA_MB/1024 as Data_GB,\n base.PRODUCT_DESCRIPTION,\nbase.LINE_DESCRIPTION\n\nFROM PRODUCT.MOBILE_ACTIVE_BASE base\nLEFT JOIN PRODUCT.MOBILE_USAGE_SUMMARY usg\n\nON\n base.CHARGE_ARRANGEMENT_NUMBER = usg.CHARGE_ARRANGEMENT_NUMBER\n\nand \nbase.CHARGE_ARRANGEMENT_ID = usg.CHARGE_ARRANGEMENT_ID\n\nWHERE base.PRODUCT_DESCRIPTION LIKE \'%Share%\' \n--AND (base.HEAD_PARTY_NO = 71474425 or base.HEAD_PARTY_NO = 73314303)\nAND usg.BILL_MONTH BETWEEN (current_date - 5 MONTHS) and CURRENT_DATE \nOrder by base.ACCOUNT_NUMBER,Data_MB desc with ur"]),\n #"Added Custom1" = Table.AddColumn(Source, "Line Number", each Text.Middle([CHARGE_ARRANGEMENT_NUMBER],1,14)),\n #"Renamed Columns" = Table.RenameColumns(#"Added Custom1",{{"LINE_DESCRIPTION", "Line Description"}, {"BILL_MONTH", "Bill Month"}}),\n #"Filtered Rows" = Table.SelectRows(#"Renamed Columns", each ([PRODUCT_DESCRIPTION] <> "Sharer Unlimited NZ & Aus mins + Unlimited NZ & Aus texts" and [PRODUCT_DESCRIPTION] <> "Sharer with Data Stretch"))\nin\n #"Filtered Rows"'}''']}) myDF #why isn't this working? #this regex works on REGEX 101 : https://regex101.com/r/ILnpS0/1 regex_db =r'(?=Source = DB2.Database)(.*?)(?=\]\))' myDF['SQLDB connection2'] = myDF['conn_str'].str.extract(regex_db ,expand=True) myDF #This is a simplified version of the above REGEX, and works to extracts the text "DB2.Database" #This works fine regex_db2 =r'(DB2.Database)' myDF['SQLDB connection1'] = myDF['conn_str'].str.extract(regex_db2 ,expand=True) myDF Any suggestions on what I am doing wrong? A: Try running your regex in dot all mode, so that .* will match across newlines: regex_db = r'(?=Source = DB2.Database)(.*?)(?=\]\))' myDF["SQLDB connection2"] = myDF["conn_str"].str.extract(regex_db, expand=True, flags=re.S) myDF
Pandas REGEX not returning expected results using "extract"
I am attempting to use REGEX to extract connection strings from blocks of text in a pandas dataframe. My REGEX works on REGEX101.com (see Screenshot below). Link to my saved test here: https://regex101.com/r/ILnpS0/1 When I try to run the REGEX in a Pandas dataframe, I don’t get any REGEX matches/extracts (but no an error), despite getting matches on REGEX101. Link to my code in a Google Colab notebook: https://colab.research.google.com/drive/1WAMlGkHAOqe38Lzo_K0KHwD_ynVJyIq1?usp=sharing Therefore the issue appears to be how pandas is interpreting my REGEX Can anyone identify why I not getting any REGEX matches using pandas? REGEX Logic My REGEX consists of 3 groups (?=Source = DB2.Database)(.*?)(?=\]\)) Group 1: (?=Source = DB2.Database) is a “Lookbehind” that looks for the text “Source = DB2.Database” i.e the start of my connection string. Group 2: (.?)* looks for any characters and acts as a span between the 1st and 3rd group. Group 3: (?=])) is a look behind assertion that aims to identify the end of the connection string) Additional tests: When I run a simplified version of the REGEX (DB2.Database) I get the match, as expected. This example is also in the notebook linked above. My code (same as in linked Colab Notebook) import pandas as pd myDF = pd.DataFrame({'conn_str':['''{'expression': 'let\n Source = Snowflake.Databases("whitehouse.australia-east.azure.snowflakecomputing.com","USER"),\n WH_DW_Database = Source{[Name="WHOUSE_DW",Kind="Database"]}[Data],\n DWH_Schema = SPARK_DW_Database{[Name="DWH",Kind="Schema"]}[Data],\n D_ACCOUNT_CURR_View = DWH_Schema{[Name="D_ACCOUNT_CURR",Kind="View"]}[Data],\n #"Filtered Rows" = Table.SelectRows(D_ACCOUNT_CURR_View, each ([PAYMENT_TYPE] = "POSTPAID") and ([ACCOUNT_SEGMENT] <> "Consumer") ),\n #"Removed Other Columns" = Table.SelectColumns(#"Filtered Rows",{"DESCRIPTION", "ACCOUNT_NUMBER"})\nin\n #"Removed Other Columns"'}''','''{'expression': 'let\n Source = DB2.Database("69.699.69.69", "WHUDB", [HierarchicalNavigation=true, Implementation="Microsoft", Query="SELECT\n base.HEAD_PARTY_NO,\n base.HEAD_PARTY_NAME,\n usg.BILL_MONTH,\n base.CUSTOMER_NUMBER,\n base.ACCOUNT_NUMBER,\n base.CHARGE_ARRANGEMENT_NUMBER,\n usg.DATA_MB,\n usg.DATA_MB/1024 as Data_GB,\n base.PRODUCT_DESCRIPTION,\nbase.LINE_DESCRIPTION\n\nFROM PRODUCT.MOBILE_ACTIVE_BASE base\nLEFT JOIN PRODUCT.MOBILE_USAGE_SUMMARY usg\n\nON\n base.CHARGE_ARRANGEMENT_NUMBER = usg.CHARGE_ARRANGEMENT_NUMBER\n\nand \nbase.CHARGE_ARRANGEMENT_ID = usg.CHARGE_ARRANGEMENT_ID\n\nWHERE base.PRODUCT_DESCRIPTION LIKE \'%Share%\' \n--AND (base.HEAD_PARTY_NO = 71474425 or base.HEAD_PARTY_NO = 73314303)\nAND usg.BILL_MONTH BETWEEN (current_date - 5 MONTHS) and CURRENT_DATE \nOrder by base.ACCOUNT_NUMBER,Data_MB desc with ur"]),\n #"Added Custom1" = Table.AddColumn(Source, "Line Number", each Text.Middle([CHARGE_ARRANGEMENT_NUMBER],1,14)),\n #"Renamed Columns" = Table.RenameColumns(#"Added Custom1",{{"LINE_DESCRIPTION", "Line Description"}, {"BILL_MONTH", "Bill Month"}}),\n #"Filtered Rows" = Table.SelectRows(#"Renamed Columns", each ([PRODUCT_DESCRIPTION] <> "Sharer Unlimited NZ & Aus mins + Unlimited NZ & Aus texts" and [PRODUCT_DESCRIPTION] <> "Sharer with Data Stretch"))\nin\n #"Filtered Rows"'}''']}) myDF #why isn't this working? #this regex works on REGEX 101 : https://regex101.com/r/ILnpS0/1 regex_db =r'(?=Source = DB2.Database)(.*?)(?=\]\))' myDF['SQLDB connection2'] = myDF['conn_str'].str.extract(regex_db ,expand=True) myDF #This is a simplified version of the above REGEX, and works to extracts the text "DB2.Database" #This works fine regex_db2 =r'(DB2.Database)' myDF['SQLDB connection1'] = myDF['conn_str'].str.extract(regex_db2 ,expand=True) myDF Any suggestions on what I am doing wrong?
[ "Try running your regex in dot all mode, so that .* will match across newlines:\nregex_db = r'(?=Source = DB2.Database)(.*?)(?=\\]\\))'\nmyDF[\"SQLDB connection2\"] = myDF[\"conn_str\"].str.extract(regex_db, expand=True, flags=re.S)\nmyDF\n\n" ]
[ 1 ]
[]
[]
[ "pandas", "python", "regex", "regex_lookarounds" ]
stackoverflow_0074622819_pandas_python_regex_regex_lookarounds.txt
Q: How to use mariadb python connector in Docker? I want to use the Python mariadb connector in a python container. Yet, until now, I've met only troubles and now I segfault when trying to pass arguments to my SQL queries. here are my dockerfile and a script that highlight the problem. FROM python:3.11-buster RUN apt-get install gcc wget # https://stackoverflow.com/questions/74429209/mariadb-in-docker-mariadb-connector-python-requires-mariadb-connector-c-3-2 RUN wget https://dlm.mariadb.com/2678579/Connectors/c/connector-c-3.3.3/mariadb-connector-c-3.3.3-debian-buster-amd64.tar.gz -O - | tar -zxf - --strip-components=1 -C /usr WORKDIR /appli RUN pip3 install pynetdicom>=2.0 mariadb==1.1.4 COPY . . CMD ["python3", "AEServer.py"] import mariadb conn = mariadb.connect(user='myself', password='mypass', host=ip, database='db') # Successfully created connection cursor = conn.cursor() data = ('1.2.392', 'NAME^SURNAME^', 'BDAY', 'DX', 1) # Placeholder insert_stmt = ("INSERT Examens VALUES ('%s', '%s', '%s', '%s', %i, 'COMING')" % data) segv_stmt = ("INSERT Examens VALUES (?, ?, ?, ?, ?, 'COMING')", data) segv = "INSERT Examens VALUES (?, ?, ?, ?, ?, 'COMING')" cursor.execute(insert_stmt) # GOOD cursor.execute(segv, data) # Segmentation fault First, I tried adding mariadb in my pip install in the dockerfile. It failed, because I didn't have the mariadb/c connector. Then, I tried adding it by installing libmariadb3 and libmariadb-dev, according to this page of the documentation : https://mariadb.com/docs/ent/connect/programming-languages/c/install/ but I failed again, because the package fetched by the manager weren't up to date, so I had a version conflit when pip installing. Then, I tried the following reference : Mariadb in Docker: MariaDB Connector/Python requires MariaDB Connector/C >= 3.2.4, found version 3.1.16, to wget the connector and compiling it myself in the Dockerfile. I succeeded to create my container and I could create mariadb.coonnection and cursor so I thought it was over. Finally when trying to use insert statement with placeholder (either '%s' or '?'), I am met with a segfault. I tried various associations of sources images (3.11:bullseye, 3.11:buster), connectors (version 3.3.3, 3.2.7) and mariadb (version 1.1.0, 1.1.2, 1.1.4), but all of them segfault. What I want is a container with python3, my dependancy pynetdicom, and the ability to interract safely with a mariadb Database. A Dockerfile fulfilling the prevous prerequisites should be enough for me to move forward, but I also wish to know why things did happen this way. Edit : Posted some credentials used in dev, removed them. A: Even if you installed a newer MariaDB Connector/C version, the preinstalled 3.1.13 version from 3.11-buster is still installed. After installation of MariaDB Connector/C 3.3.3 you have 2 versions installed: /usr/lib/x86_64-linux-gnu/libmariadb.so.3 /usr/lib/mariadb/libmariadb.so.3 Now when running your python script, it crashes in /usr/lib/x86_64-linux-gnu/libmariadb.so.3. To force Python to use the newer MariaDB Connector/C library just add ENV LD_PRELOAD=/usr/lib/mariadb/libmariadb.so or ENV LD_LIBRARY_PATH=/usr/lib/mariadb to your Dockerfile.
How to use mariadb python connector in Docker?
I want to use the Python mariadb connector in a python container. Yet, until now, I've met only troubles and now I segfault when trying to pass arguments to my SQL queries. here are my dockerfile and a script that highlight the problem. FROM python:3.11-buster RUN apt-get install gcc wget # https://stackoverflow.com/questions/74429209/mariadb-in-docker-mariadb-connector-python-requires-mariadb-connector-c-3-2 RUN wget https://dlm.mariadb.com/2678579/Connectors/c/connector-c-3.3.3/mariadb-connector-c-3.3.3-debian-buster-amd64.tar.gz -O - | tar -zxf - --strip-components=1 -C /usr WORKDIR /appli RUN pip3 install pynetdicom>=2.0 mariadb==1.1.4 COPY . . CMD ["python3", "AEServer.py"] import mariadb conn = mariadb.connect(user='myself', password='mypass', host=ip, database='db') # Successfully created connection cursor = conn.cursor() data = ('1.2.392', 'NAME^SURNAME^', 'BDAY', 'DX', 1) # Placeholder insert_stmt = ("INSERT Examens VALUES ('%s', '%s', '%s', '%s', %i, 'COMING')" % data) segv_stmt = ("INSERT Examens VALUES (?, ?, ?, ?, ?, 'COMING')", data) segv = "INSERT Examens VALUES (?, ?, ?, ?, ?, 'COMING')" cursor.execute(insert_stmt) # GOOD cursor.execute(segv, data) # Segmentation fault First, I tried adding mariadb in my pip install in the dockerfile. It failed, because I didn't have the mariadb/c connector. Then, I tried adding it by installing libmariadb3 and libmariadb-dev, according to this page of the documentation : https://mariadb.com/docs/ent/connect/programming-languages/c/install/ but I failed again, because the package fetched by the manager weren't up to date, so I had a version conflit when pip installing. Then, I tried the following reference : Mariadb in Docker: MariaDB Connector/Python requires MariaDB Connector/C >= 3.2.4, found version 3.1.16, to wget the connector and compiling it myself in the Dockerfile. I succeeded to create my container and I could create mariadb.coonnection and cursor so I thought it was over. Finally when trying to use insert statement with placeholder (either '%s' or '?'), I am met with a segfault. I tried various associations of sources images (3.11:bullseye, 3.11:buster), connectors (version 3.3.3, 3.2.7) and mariadb (version 1.1.0, 1.1.2, 1.1.4), but all of them segfault. What I want is a container with python3, my dependancy pynetdicom, and the ability to interract safely with a mariadb Database. A Dockerfile fulfilling the prevous prerequisites should be enough for me to move forward, but I also wish to know why things did happen this way. Edit : Posted some credentials used in dev, removed them.
[ "Even if you installed a newer MariaDB Connector/C version, the preinstalled 3.1.13 version from 3.11-buster is still installed.\nAfter installation of MariaDB Connector/C 3.3.3 you have 2 versions installed:\n\n/usr/lib/x86_64-linux-gnu/libmariadb.so.3\n/usr/lib/mariadb/libmariadb.so.3\n\nNow when running your python script, it crashes in /usr/lib/x86_64-linux-gnu/libmariadb.so.3.\nTo force Python to use the newer MariaDB Connector/C library just add\nENV LD_PRELOAD=/usr/lib/mariadb/libmariadb.so or ENV LD_LIBRARY_PATH=/usr/lib/mariadb\nto your Dockerfile.\n" ]
[ 1 ]
[]
[]
[ "docker", "mariadb", "mariadb_connector_c", "python" ]
stackoverflow_0074613306_docker_mariadb_mariadb_connector_c_python.txt
Q: Having trouble connecting with MySQL.connector in python I am learning how to do MySQL in python using the mysql.connector module. However, whenever I try creating a connection, I get the error "mysql.connector.errors.InterfaceError: 2003: Can't connect to MySQL server on 'localhost:3306' (10061 No connection could be made because the target machine actively refused it)". I done the exact same as the example that I am using states, and it still isn't working. Below is the two different type of the example provided: import mysql.connector mydb = mysql.connector.connect( host="localhost", user="yourusername", password="yourpassword" ) print(mydb) and import mysql.connector mydb = mysql.connector.connect( host="localhost", user="myusername", password="mypassword" ) print(mydb) I tried exactly what the example said, and it didn't work. I also tried using a different username and password, but kept on getting the same error. Any idea what I can do to get the expected output which, according to the second example that I am referring to, is: <mysql.connector.connection.MySQLConnection object ar 0x016645F0> ? Thank you in advance A: import mySQL.connector Mybd=mysql.connector.connect( host='myusername', user='myusername', password='mypassword', database='mydatabase') Cursor=mycon.cursor() Cursor.execute('show databases') Data=cursor.fetchall() for row in data: Print(row) mycon.close()
Having trouble connecting with MySQL.connector in python
I am learning how to do MySQL in python using the mysql.connector module. However, whenever I try creating a connection, I get the error "mysql.connector.errors.InterfaceError: 2003: Can't connect to MySQL server on 'localhost:3306' (10061 No connection could be made because the target machine actively refused it)". I done the exact same as the example that I am using states, and it still isn't working. Below is the two different type of the example provided: import mysql.connector mydb = mysql.connector.connect( host="localhost", user="yourusername", password="yourpassword" ) print(mydb) and import mysql.connector mydb = mysql.connector.connect( host="localhost", user="myusername", password="mypassword" ) print(mydb) I tried exactly what the example said, and it didn't work. I also tried using a different username and password, but kept on getting the same error. Any idea what I can do to get the expected output which, according to the second example that I am referring to, is: <mysql.connector.connection.MySQLConnection object ar 0x016645F0> ? Thank you in advance
[ "import mySQL.connector\nMybd=mysql.connector.connect(\n host='myusername',\n user='myusername', \n password='mypassword', \n database='mydatabase')\nCursor=mycon.cursor()\nCursor.execute('show databases')\nData=cursor.fetchall()\nfor row in data:\n Print(row)\nmycon.close()\n\n" ]
[ 0 ]
[]
[]
[ "mysql", "mysql_connector", "python", "python_sql", "sql" ]
stackoverflow_0074590321_mysql_mysql_connector_python_python_sql_sql.txt
Q: Is there a way to interface with Python's SpaCy using Haskell? I've read a little about Haskell's foreign function interface, FFI, and it seems like it can call Python functions, but can it do something complicated, like parse a document using SpaCy, and then access all that document's properties in Haskell? If so, what would that look like? A: I'm a bit late to the party, but nowadays there a number of out of the box options for setting up SpaCy as an API, as described by alvas which do the trick rather nicely. eg. https://github.com/microsoft/cookiecutter-spacy-fastapi
Is there a way to interface with Python's SpaCy using Haskell?
I've read a little about Haskell's foreign function interface, FFI, and it seems like it can call Python functions, but can it do something complicated, like parse a document using SpaCy, and then access all that document's properties in Haskell? If so, what would that look like?
[ "I'm a bit late to the party, but nowadays there a number of out of the box options for setting up SpaCy as an API, as described by alvas which do the trick rather nicely. eg. https://github.com/microsoft/cookiecutter-spacy-fastapi\n" ]
[ 1 ]
[]
[]
[ "haskell", "nlp", "python", "spacy" ]
stackoverflow_0048014298_haskell_nlp_python_spacy.txt
Q: Add a space in 2D array when writing a text file I am trying to store a 2D vector into a .DAT file and I would like to add a space at the start of every row. An example of a desired output looks like this: 0.0000000E+00 0.0000000E+00 2.0020020E-03 0.0000000E+00 4.0040040E-03 0.0000000E+00 6.0060060E-03 0.0000000E+00 8.0080080E-03 0.0000000E+00 1.0010010E-02 0.0000000E+00 1.2012012E-02 0.0000000E+00 You can see at the front of 0, 2e-3, 4e-3, etc. there is a space. My code is trying to do that way data = np.column_stack((x, y)) with open('output.dat', 'w') as datfile: for _ in range(N): np.savetxt(datfile, data, delimiter = " ") The current output looks like this: 0.000000000000000000e+00 0.000000000000000000e+00 1.250156269533691795e-04 0.000000000000000000e+00 2.500312539067383591e-04 0.000000000000000000e+00 3.750468808601075386e-04 0.000000000000000000e+00 5.000625078134767181e-04 0.000000000000000000e+00 6.250781347668459519e-04 0.000000000000000000e+00 7.500937617202150772e-04 0.000000000000000000e+00 As you can see, there is no space at the front of every line. Do you have any solutions for this? Thanks! A: Consider using fmt argument to np.savetxt function, please note this one will set also set the number precision the same as in your desired output. Also note the space in the beginning of fmt string: np.savetxt(datfile, data, fmt=" %1.7E %1.7E") More on this in NumPy documentation and Python string module documentation A: Use a loop and f-strings: import numpy as np data = np.zeros((5, 2), dtype=np.float64) with open("out.dat", "w") as fh: for row in data: x, y = row fh.write(f" {x} {y}\n") 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
Add a space in 2D array when writing a text file
I am trying to store a 2D vector into a .DAT file and I would like to add a space at the start of every row. An example of a desired output looks like this: 0.0000000E+00 0.0000000E+00 2.0020020E-03 0.0000000E+00 4.0040040E-03 0.0000000E+00 6.0060060E-03 0.0000000E+00 8.0080080E-03 0.0000000E+00 1.0010010E-02 0.0000000E+00 1.2012012E-02 0.0000000E+00 You can see at the front of 0, 2e-3, 4e-3, etc. there is a space. My code is trying to do that way data = np.column_stack((x, y)) with open('output.dat', 'w') as datfile: for _ in range(N): np.savetxt(datfile, data, delimiter = " ") The current output looks like this: 0.000000000000000000e+00 0.000000000000000000e+00 1.250156269533691795e-04 0.000000000000000000e+00 2.500312539067383591e-04 0.000000000000000000e+00 3.750468808601075386e-04 0.000000000000000000e+00 5.000625078134767181e-04 0.000000000000000000e+00 6.250781347668459519e-04 0.000000000000000000e+00 7.500937617202150772e-04 0.000000000000000000e+00 As you can see, there is no space at the front of every line. Do you have any solutions for this? Thanks!
[ "Consider using fmt argument to np.savetxt function, please note this one will set also set the number precision the same as in your desired output. Also note the space in the beginning of fmt string:\nnp.savetxt(datfile, data, fmt=\" %1.7E %1.7E\")\nMore on this in NumPy documentation and Python string module documentation\n", "Use a loop and f-strings:\nimport numpy as np\n\ndata = np.zeros((5, 2), dtype=np.float64)\n\nwith open(\"out.dat\", \"w\") as fh:\n for row in data:\n x, y = row\n fh.write(f\" {x} {y}\\n\")\n\n 0.0 0.0\n 0.0 0.0\n 0.0 0.0\n 0.0 0.0\n 0.0 0.0\n\n" ]
[ 1, 0 ]
[]
[]
[ "genfromtxt", "numpy", "numpy_ndarray", "python" ]
stackoverflow_0074622918_genfromtxt_numpy_numpy_ndarray_python.txt
Q: Unexpected token # in JSON at position 0 when opening ipynb file in vscode I have a ipynb file (a jupyter notebook) which I am opening in vscode with python extension. I receive the error in the title Unexpected token # in JSON at position 0 which I dont understand at all, since the file is supposed to be interpreted as a python file. I can change the extension to .py and its opened fine by vscode, but I dont have the decorators to run/debug cells like define here (https://code.visualstudio.com/docs/python/jupyter-support-py). I know the file is correct because I have use it in another vscode installation in another computer and works fine. I have no idea what might be misconfigured in my environment... Any tops would be really helpful. Here is the actual python code I have that its producing the mentioned error my actual environment. issue.ipynb # %% import world as w import world_eg as weg import world_case1 as wc1 import simulator_static as simulation import numpy as np from scipy.optimize import minimize import matplotlib.pyplot as plt``` From the error, I understand that is parsing the file as a JSON file and the first line, which contains the #, fails. A: I had a similar problem and when I opened the notebook with an editor I saw I had merge markings that git had put into the file. e.g. <<<<<<< HEAD ... ======= ... >>>>>>> ... Cleaning up these, allowed jupyter to parse the file and run the notebook. A: This happens when you make a request to the server and parse the response as JSON, but it’s not JSON. JSON should start with a valid JSON value – an object, array, string, number, or false/true/null. The root cause is that the server returned HTML or some other non-JSON string. I've tried your code in my project and nothing wrong. everything looks fine. Check the Jupyter Server network, try to restart vscode and recreate a new juypter file, and see if the problem goes away. [edit] like the above screenshot shows, type # %% will add a new cell. Equally, when you open a .ipynb file, if python extension distinguishes the # %%, button run cell | debug cell will be displayed automatically for you to do further test. you can copy your code without # %% to a new created blank juypter file, then click the button export as and select Python Script to got button Run Cell | Debug Cell . OR reinstall python extension and try again. A: I had the same issue and for me that problem was solved simply by deleting the underscore (_) from the file name. I don't know why but it works. A: .ipynb files aren't actually Python source code files - they're encoded as JSON files. If you create a new notebook, then rename the file extension or open it in some text editor, you'll see the structure of the underlying JSON file. When VS Code tries to interpret your file, it's trying to parse Python source as a JSON object, which will obviously fail, and lead to the otherwise cryptic error of an unexpected token. In other words, it's not possible to convert a Python script into a notebook just by changing the file extension. Manually copying & pasting the code around will work, or you could try googling for some tool, e.g. https://github.com/remykarem/python2jupyter A: I had a similar issue when creating a new file in VS Code which I saved as .ipynb. After closing the file, I was not able to reopen and I received the same error message as above. For me, simply closing and restart VS Code did the trick. Afterwards, the .ipynb-file opened as expected. A: notepad encoding format Resaving the document using another encoding format solved my problem. A regular.ipynb file (left image) is saved using Unix(LF) format, but the file that couldn't open was saved using UTF (right image) A: "Unable to open 'XXX.ipynb'" "Unexpected token < in JSON at position XXX" For me, I have similar issues when I am using git and reopen ipynb files in vscode. To fix it, pretty easy! (1) Open and edit the file in json format and ACCEPT the current changes or incoming changes. (2) Save and Close the edited file and reopen the file. Everything works fine! Good luck!
Unexpected token # in JSON at position 0 when opening ipynb file in vscode
I have a ipynb file (a jupyter notebook) which I am opening in vscode with python extension. I receive the error in the title Unexpected token # in JSON at position 0 which I dont understand at all, since the file is supposed to be interpreted as a python file. I can change the extension to .py and its opened fine by vscode, but I dont have the decorators to run/debug cells like define here (https://code.visualstudio.com/docs/python/jupyter-support-py). I know the file is correct because I have use it in another vscode installation in another computer and works fine. I have no idea what might be misconfigured in my environment... Any tops would be really helpful. Here is the actual python code I have that its producing the mentioned error my actual environment. issue.ipynb # %% import world as w import world_eg as weg import world_case1 as wc1 import simulator_static as simulation import numpy as np from scipy.optimize import minimize import matplotlib.pyplot as plt``` From the error, I understand that is parsing the file as a JSON file and the first line, which contains the #, fails.
[ "I had a similar problem and when I opened the notebook with an editor I saw I had merge markings that git had put into the file. e.g.\n<<<<<<< HEAD\n...\n=======\n...\n>>>>>>> ...\n\nCleaning up these, allowed jupyter to parse the file and run the notebook.\n", "This happens when you make a request to the server and parse the response as JSON, but it’s not JSON. JSON should start with a valid JSON value – an object, array, string, number, or false/true/null. The root cause is that the server returned HTML or some other non-JSON string.\nI've tried your code in my project and nothing wrong. everything looks fine. Check the Jupyter Server network, try to restart vscode and recreate a new juypter file, and see if the problem goes away.\n\n[edit]\nlike the above screenshot shows, type # %% will add a new cell. Equally, when you open a .ipynb file, if python extension distinguishes the # %%, button run cell | debug cell will be displayed automatically for you to do further test.\n\nyou can copy your code without # %% to a new created blank juypter file, then\nclick the button export as and select Python Script to got button Run Cell | Debug Cell .\nOR reinstall python extension and try again.\n", "I had the same issue and for me that problem was solved simply by deleting the underscore (_) from the file name. I don't know why but it works.\n", ".ipynb files aren't actually Python source code files - they're encoded as JSON files. If you create a new notebook, then rename the file extension or open it in some text editor, you'll see the structure of the underlying JSON file.\nWhen VS Code tries to interpret your file, it's trying to parse Python source as a JSON object, which will obviously fail, and lead to the otherwise cryptic error of an unexpected token.\nIn other words, it's not possible to convert a Python script into a notebook just by changing the file extension. Manually copying & pasting the code around will work, or you could try googling for some tool, e.g. https://github.com/remykarem/python2jupyter\n", "I had a similar issue when creating a new file in VS Code which I saved as .ipynb. After closing the file, I was not able to reopen and I received the same error message as above.\nFor me, simply closing and restart VS Code did the trick. Afterwards, the .ipynb-file opened as expected.\n", "notepad encoding format\nResaving the document using another encoding format solved my problem. A regular.ipynb file (left image) is saved using Unix(LF) format, but the file that couldn't open was saved using UTF (right image)\n", "\n\"Unable to open 'XXX.ipynb'\"\n\"Unexpected token < in JSON at position XXX\"\nFor me, I have similar issues when I am using git and reopen ipynb files in vscode.\nTo fix it, pretty easy!\n(1) Open and edit the file in json format and ACCEPT the current\nchanges or incoming changes.\n(2) Save and Close the edited file and reopen the file. Everything\nworks fine!\nGood luck!\n\n" ]
[ 4, 3, 3, 1, 0, 0, 0 ]
[]
[]
[ "jupyter", "python", "visual_studio_code" ]
stackoverflow_0063238337_jupyter_python_visual_studio_code.txt
Q: Can the transparency of the link be specified in the list? Abstract I am trying to find a way to make the nodes transparent. In the simple case, the transparency of a node is simply specified by the "alpha" option of "nx.draw". However, I thought I could specify the transparency with a list, just as I specified the color with a list, but failed. Do you know the reason for this? import networkx as nx G = nx.Graph() G.add_node('A') G.add_node('B') G.add_node('C') G.add_node('D') G.add_node('E') G.add_edge('A', 'B') G.add_edge('A', 'C') G.add_edge('A', 'D') G.add_edge('B', 'D') G.add_edge('D', 'E') node_alpha = [1, 0.8, 0.6, 0.4, 0.2] nx.draw(G, alpha=node_alpha, with_labels = True) plt.figure() The following error occurred Traceback (most recent call last): File "C:/Program Files/Python36/transparency.py", line 19, in <module> nx.draw(G, alpha=node_alpha, with_labels = True) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\networkx\drawing\nx_pylab.py", line 123, in draw draw_networkx(G, pos=pos, ax=ax, **kwds) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\networkx\drawing\nx_pylab.py", line 336, in draw_networkx draw_networkx_edges(G, pos, arrows=arrows, **edge_kwds) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\networkx\drawing\nx_pylab.py", line 684, in draw_networkx_edges alpha=alpha, File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\matplotlib\collections.py", line 1391, in __init__ **kwargs) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\matplotlib\cbook\deprecation.py", line 411, in wrapper return func(*inner_args, **inner_kwargs) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\matplotlib\collections.py", line 213, in __init__ self.update(kwargs) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\matplotlib\artist.py", line 998, in update ret.append(func(v)) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\matplotlib\collections.py", line 834, in set_alpha super().set_alpha(alpha) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\matplotlib\artist.py", line 930, in set_alpha raise TypeError('alpha must be a float or None') TypeError: alpha must be a float or None We believe that the image will be as follows A: Passing an array or a list of alpha values is supported only in the nx.draw_networkx_nodes: alpha : float or array of floats (default=None) The node transparency. This can be a single alpha value, in which case it will be applied to all the nodes of color. Otherwise, if it is an array, the elements of alpha will be applied to the colors in order (cycling through alpha multiple times if necessary). For nx.draw and nx.draw_networkx_edges alpha is either None or float. There is no good reason why drawing edges should not support an array/list of alpha values, so this looks like a nice potential PR. In the meantime, if the network is substantially small, it's possible to manually draw one edge at a time, specifying the desired alpha for each edge. Rough pseudocode: list_alphas = [0.2, 0.3, ...] for alpha, edge in zip(list_alphas, G.edges()): draw_networkx_edges(G, edgelist=edge, alpha=alpha) If there are groups of edges with the same transparency value, then the loop can be reduced. In general, this won't scale well, so for larger networks it's better to refactor the function (and submit a PR).
Can the transparency of the link be specified in the list?
Abstract I am trying to find a way to make the nodes transparent. In the simple case, the transparency of a node is simply specified by the "alpha" option of "nx.draw". However, I thought I could specify the transparency with a list, just as I specified the color with a list, but failed. Do you know the reason for this? import networkx as nx G = nx.Graph() G.add_node('A') G.add_node('B') G.add_node('C') G.add_node('D') G.add_node('E') G.add_edge('A', 'B') G.add_edge('A', 'C') G.add_edge('A', 'D') G.add_edge('B', 'D') G.add_edge('D', 'E') node_alpha = [1, 0.8, 0.6, 0.4, 0.2] nx.draw(G, alpha=node_alpha, with_labels = True) plt.figure() The following error occurred Traceback (most recent call last): File "C:/Program Files/Python36/transparency.py", line 19, in <module> nx.draw(G, alpha=node_alpha, with_labels = True) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\networkx\drawing\nx_pylab.py", line 123, in draw draw_networkx(G, pos=pos, ax=ax, **kwds) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\networkx\drawing\nx_pylab.py", line 336, in draw_networkx draw_networkx_edges(G, pos, arrows=arrows, **edge_kwds) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\networkx\drawing\nx_pylab.py", line 684, in draw_networkx_edges alpha=alpha, File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\matplotlib\collections.py", line 1391, in __init__ **kwargs) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\matplotlib\cbook\deprecation.py", line 411, in wrapper return func(*inner_args, **inner_kwargs) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\matplotlib\collections.py", line 213, in __init__ self.update(kwargs) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\matplotlib\artist.py", line 998, in update ret.append(func(v)) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\matplotlib\collections.py", line 834, in set_alpha super().set_alpha(alpha) File "C:\Users\████\AppData\Roaming\Python\Python36\site-packages\matplotlib\artist.py", line 930, in set_alpha raise TypeError('alpha must be a float or None') TypeError: alpha must be a float or None We believe that the image will be as follows
[ "Passing an array or a list of alpha values is supported only in the nx.draw_networkx_nodes:\n\nalpha : float or array of floats (default=None)\nThe node transparency. This can be a single alpha value,\nin which case it will be applied to all the nodes of color. Otherwise,\nif it is an array, the elements of alpha will be applied to the colors\nin order (cycling through alpha multiple times if necessary).\n\nFor nx.draw and nx.draw_networkx_edges alpha is either None or float.\nThere is no good reason why drawing edges should not support an array/list of alpha values, so this looks like a nice potential PR.\nIn the meantime, if the network is substantially small, it's possible to manually draw one edge at a time, specifying the desired alpha for each edge. Rough pseudocode:\nlist_alphas = [0.2, 0.3, ...]\n\nfor alpha, edge in zip(list_alphas, G.edges()):\n draw_networkx_edges(G, edgelist=edge, alpha=alpha)\n\nIf there are groups of edges with the same transparency value, then the loop can be reduced. In general, this won't scale well, so for larger networks it's better to refactor the function (and submit a PR).\n" ]
[ 0 ]
[]
[]
[ "alpha", "matplotlib", "networkx", "python", "python_3.x" ]
stackoverflow_0074622898_alpha_matplotlib_networkx_python_python_3.x.txt
Q: How to transform Row data into column data using pandas? I exported many reports from my system in xls in the same specific format and need to change them to another format: Basically for every item description I need to insert the corresponding Account series it is in column J using pandas. Data CP N0 N1 ITEM DEBIT CREDIT NET D/C Account: (663) 31/10/2022 595 12 ITEM DESCRIPTION 4859 5.564,40 59.786,28 C Account: (664) 31/10/2022 596 12 ITEM DESCRIPTION 234243 3.475,34 15.492,41 D 31/10/2022 103 14 ITEM DESCRIPTION 456456 0,01 15.492,40 C Account: (678) 31/10/2022 597 12 ITEM DESCRIPTION 2332 6.555,27 71.503,39 C Account: (689) 31/10/2022 608 13 ITEM DESCRIPTION 66546 266.516,00 504.013,87 D 31/10/2022 608 13 ITEM DESCRIPTION 57567 5.578,67 7.656.192,54 D Account: (500) 31/10/2022 608 13 ITEM DESCRIPTION 345345 54.405,00 645.175,00 D I tried to write a script but couldn't fetch a logic to fill the column. Could someone help me? Desired format: Data CP N0 N1 ITEM DEBIT CREDIT NET D/C Account Account: (663) 31/10/2022 595 12 ITEM DESCRIPTION 4859 5.564,40 59.786,28 C Account: (663) Account: (664) 31/10/2022 596 12 ITEM DESCRIPTION 234243 3.475,34 15.492,41 D Account: (664) 31/10/2022 103 14 ITEM DESCRIPTION 456456 0,01 15.492,40 C Account: (664) Account: (678) 31/10/2022 597 12 ITEM DESCRIPTION 2332 6.555,27 71.503,39 C Account: (678) Account: (689) 31/10/2022 608 13 ITEM DESCRIPTION 66546 266.516,00 504.013,87 D Account: (689) 31/10/2022 608 13 ITEM DESCRIPTION 57567 5.578,67 7.656.192,54 D Account: (689) Account: (500) 31/10/2022 608 13 ITEM DESCRIPTION 345345 54.405,00 645.175,00 D Account: (500) A: try this: mask = df['Data'].str.startswith('Account') df['Account'] = df.groupby(mask.cumsum())['Data'].transform('first').mask(mask) print(df) # df data like this: data = [{'Data': 'Account: (663)', 'CP': 'nan', 'N0': 'nan', 'N1': 'nan', 'ITEM': 'nan', 'DEBIT': 'nan', 'CREDIT': 'nan', 'NET': 'nan', 'D/C': 'nan', 'Account': 'nan'}, {'Data': '31/10/2022', 'CP': 595.0, 'N0': 'nan', 'N1': 12.0, 'ITEM': 'ITEM DESCRIPTION 4859', 'DEBIT': '5.564,40', 'CREDIT': 'nan', 'NET': '59.786,28', 'D/C': 'C', 'Account': 'Account: (663)'}, {'Data': 'Account: (664)', 'CP': 'nan', 'N0': 'nan', 'N1': 'nan', 'ITEM': 'nan', 'DEBIT': 'nan', 'CREDIT': 'nan', 'NET': 'nan', 'D/C': 'nan', 'Account': 'nan'}, {'Data': '31/10/2022', 'CP': 596.0, 'N0': 'nan', 'N1': 12.0, 'ITEM': 'ITEM DESCRIPTION 234243', 'DEBIT': '3.475,34', 'CREDIT': 'nan', 'NET': '15.492,41', 'D/C': 'D', 'Account': 'Account: (664)'}, {'Data': '31/10/2022', 'CP': 103.0, 'N0': 'nan', 'N1': 14.0, 'ITEM': 'ITEM DESCRIPTION 456456', 'DEBIT': 'nan', 'CREDIT': '0,01', 'NET': '15.492,40', 'D/C': 'C', 'Account': 'Account: (664)'}, {'Data': 'Account: (678)', 'CP': 'nan', 'N0': 'nan', 'N1': 'nan', 'ITEM': 'nan', 'DEBIT': 'nan', 'CREDIT': 'nan', 'NET': 'nan', 'D/C': 'nan', 'Account': 'nan'}, {'Data': '31/10/2022', 'CP': 597.0, 'N0': 'nan', 'N1': 12.0, 'ITEM': 'ITEM DESCRIPTION 2332', 'DEBIT': '6.555,27', 'CREDIT': 'nan', 'NET': '71.503,39', 'D/C': 'C', 'Account': 'Account: (678)'}, {'Data': 'Account: (689)', 'CP': 'nan', 'N0': 'nan', 'N1': 'nan', 'ITEM': 'nan', 'DEBIT': 'nan', 'CREDIT': 'nan', 'NET': 'nan', 'D/C': 'nan', 'Account': 'nan'}, {'Data': '31/10/2022', 'CP': 608.0, 'N0': 'nan', 'N1': 13.0, 'ITEM': 'ITEM DESCRIPTION 66546', 'DEBIT': '266.516,00', 'CREDIT': 'nan', 'NET': '504.013,87', 'D/C': 'D', 'Account': 'Account: (689)'}, {'Data': '31/10/2022', 'CP': 608.0, 'N0': 'nan', 'N1': 13.0, 'ITEM': 'ITEM DESCRIPTION 57567', 'DEBIT': '5.578,67', 'CREDIT': 'nan', 'NET': '7.656.192,54', 'D/C': 'D', 'Account': 'Account: (689)'}, {'Data': 'Account: (500)', 'CP': 'nan', 'N0': 'nan', 'N1': 'nan', 'ITEM': 'nan', 'DEBIT': 'nan', 'CREDIT': 'nan', 'NET': 'nan', 'D/C': 'nan', 'Account': 'nan'}, {'Data': '31/10/2022', 'CP': 608.0, 'N0': 'nan', 'N1': 13.0, 'ITEM': 'ITEM DESCRIPTION 345345', 'DEBIT': '54.405,00', 'CREDIT': 'nan', 'NET': '645.175,00', 'D/C': 'D', 'Account': 'Account: (500)'}] df = pd.DataFrame(data).replace('nan', float('nan')) A: This is not the most "pythonic" way of doing this, but because finding the most efficient way is probably not so important here, this should work fine: data_col = df['Data'] def find_last_acct_entry(date): idx = data_col.index(date) ret = idx while not data_col[ret].startswith('Account') and ret >= 1: ret -= 1 return data_col[ret] for idx, row in df.iterrows(): if not row['Data'].startswith('Account'): acct = find_last_acct_entry(row['Data']) df.loc[idx, 'Account'] = acct
How to transform Row data into column data using pandas?
I exported many reports from my system in xls in the same specific format and need to change them to another format: Basically for every item description I need to insert the corresponding Account series it is in column J using pandas. Data CP N0 N1 ITEM DEBIT CREDIT NET D/C Account: (663) 31/10/2022 595 12 ITEM DESCRIPTION 4859 5.564,40 59.786,28 C Account: (664) 31/10/2022 596 12 ITEM DESCRIPTION 234243 3.475,34 15.492,41 D 31/10/2022 103 14 ITEM DESCRIPTION 456456 0,01 15.492,40 C Account: (678) 31/10/2022 597 12 ITEM DESCRIPTION 2332 6.555,27 71.503,39 C Account: (689) 31/10/2022 608 13 ITEM DESCRIPTION 66546 266.516,00 504.013,87 D 31/10/2022 608 13 ITEM DESCRIPTION 57567 5.578,67 7.656.192,54 D Account: (500) 31/10/2022 608 13 ITEM DESCRIPTION 345345 54.405,00 645.175,00 D I tried to write a script but couldn't fetch a logic to fill the column. Could someone help me? Desired format: Data CP N0 N1 ITEM DEBIT CREDIT NET D/C Account Account: (663) 31/10/2022 595 12 ITEM DESCRIPTION 4859 5.564,40 59.786,28 C Account: (663) Account: (664) 31/10/2022 596 12 ITEM DESCRIPTION 234243 3.475,34 15.492,41 D Account: (664) 31/10/2022 103 14 ITEM DESCRIPTION 456456 0,01 15.492,40 C Account: (664) Account: (678) 31/10/2022 597 12 ITEM DESCRIPTION 2332 6.555,27 71.503,39 C Account: (678) Account: (689) 31/10/2022 608 13 ITEM DESCRIPTION 66546 266.516,00 504.013,87 D Account: (689) 31/10/2022 608 13 ITEM DESCRIPTION 57567 5.578,67 7.656.192,54 D Account: (689) Account: (500) 31/10/2022 608 13 ITEM DESCRIPTION 345345 54.405,00 645.175,00 D Account: (500)
[ "try this:\nmask = df['Data'].str.startswith('Account')\ndf['Account'] = df.groupby(mask.cumsum())['Data'].transform('first').mask(mask)\nprint(df)\n\n# df data like this:\n\ndata = [{'Data': 'Account: (663)',\n 'CP': 'nan',\n 'N0': 'nan',\n 'N1': 'nan',\n 'ITEM': 'nan',\n 'DEBIT': 'nan',\n 'CREDIT': 'nan',\n 'NET': 'nan',\n 'D/C': 'nan',\n 'Account': 'nan'},\n {'Data': '31/10/2022',\n 'CP': 595.0,\n 'N0': 'nan',\n 'N1': 12.0,\n 'ITEM': 'ITEM DESCRIPTION 4859',\n 'DEBIT': '5.564,40',\n 'CREDIT': 'nan',\n 'NET': '59.786,28',\n 'D/C': 'C',\n 'Account': 'Account: (663)'},\n {'Data': 'Account: (664)',\n 'CP': 'nan',\n 'N0': 'nan',\n 'N1': 'nan',\n 'ITEM': 'nan',\n 'DEBIT': 'nan',\n 'CREDIT': 'nan',\n 'NET': 'nan',\n 'D/C': 'nan',\n 'Account': 'nan'},\n {'Data': '31/10/2022',\n 'CP': 596.0,\n 'N0': 'nan',\n 'N1': 12.0,\n 'ITEM': 'ITEM DESCRIPTION 234243',\n 'DEBIT': '3.475,34',\n 'CREDIT': 'nan',\n 'NET': '15.492,41',\n 'D/C': 'D',\n 'Account': 'Account: (664)'},\n {'Data': '31/10/2022',\n 'CP': 103.0,\n 'N0': 'nan',\n 'N1': 14.0,\n 'ITEM': 'ITEM DESCRIPTION 456456',\n 'DEBIT': 'nan',\n 'CREDIT': '0,01',\n 'NET': '15.492,40',\n 'D/C': 'C',\n 'Account': 'Account: (664)'},\n {'Data': 'Account: (678)',\n 'CP': 'nan',\n 'N0': 'nan',\n 'N1': 'nan',\n 'ITEM': 'nan',\n 'DEBIT': 'nan',\n 'CREDIT': 'nan',\n 'NET': 'nan',\n 'D/C': 'nan',\n 'Account': 'nan'},\n {'Data': '31/10/2022',\n 'CP': 597.0,\n 'N0': 'nan',\n 'N1': 12.0,\n 'ITEM': 'ITEM DESCRIPTION 2332',\n 'DEBIT': '6.555,27',\n 'CREDIT': 'nan',\n 'NET': '71.503,39',\n 'D/C': 'C',\n 'Account': 'Account: (678)'},\n {'Data': 'Account: (689)',\n 'CP': 'nan',\n 'N0': 'nan',\n 'N1': 'nan',\n 'ITEM': 'nan',\n 'DEBIT': 'nan',\n 'CREDIT': 'nan',\n 'NET': 'nan',\n 'D/C': 'nan',\n 'Account': 'nan'},\n {'Data': '31/10/2022',\n 'CP': 608.0,\n 'N0': 'nan',\n 'N1': 13.0,\n 'ITEM': 'ITEM DESCRIPTION 66546',\n 'DEBIT': '266.516,00',\n 'CREDIT': 'nan',\n 'NET': '504.013,87',\n 'D/C': 'D',\n 'Account': 'Account: (689)'},\n {'Data': '31/10/2022',\n 'CP': 608.0,\n 'N0': 'nan',\n 'N1': 13.0,\n 'ITEM': 'ITEM DESCRIPTION 57567',\n 'DEBIT': '5.578,67',\n 'CREDIT': 'nan',\n 'NET': '7.656.192,54',\n 'D/C': 'D',\n 'Account': 'Account: (689)'},\n {'Data': 'Account: (500)',\n 'CP': 'nan',\n 'N0': 'nan',\n 'N1': 'nan',\n 'ITEM': 'nan',\n 'DEBIT': 'nan',\n 'CREDIT': 'nan',\n 'NET': 'nan',\n 'D/C': 'nan',\n 'Account': 'nan'},\n {'Data': '31/10/2022',\n 'CP': 608.0,\n 'N0': 'nan',\n 'N1': 13.0,\n 'ITEM': 'ITEM DESCRIPTION 345345',\n 'DEBIT': '54.405,00',\n 'CREDIT': 'nan',\n 'NET': '645.175,00',\n 'D/C': 'D',\n 'Account': 'Account: (500)'}]\n\ndf = pd.DataFrame(data).replace('nan', float('nan'))\n\n", "This is not the most \"pythonic\" way of doing this, but because finding the most efficient way is probably not so important here, this should work fine:\ndata_col = df['Data']\n\ndef find_last_acct_entry(date):\n idx = data_col.index(date)\n ret = idx\n while not data_col[ret].startswith('Account') and ret >= 1:\n ret -= 1\n return data_col[ret]\n\nfor idx, row in df.iterrows():\n if not row['Data'].startswith('Account'):\n acct = find_last_acct_entry(row['Data'])\n df.loc[idx, 'Account'] = acct\n\n" ]
[ 0, 0 ]
[]
[]
[ "etl", "pandas", "python" ]
stackoverflow_0074622662_etl_pandas_python.txt
Q: looping My Selenium Python webdriver code beginner I am a beginner and I wrote my first webdriver selenium python code. My question is how can I loop this code infinitely- I want the webdriver to close and then RE-open to continue the same code over and over. Can I add some kind of loop at the end of my code in order to start it up so i can leave my PC and not have to run it each time manually? **** I want the webdriver to open and close so i can use a diff proxy each time***** Thank you ` from seleniumwire import webdriver import time from selenium.webdriver.common.by import By from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.common.action_chains import ActionChains options = { 'proxy': { 'https': 'XXXXXXXXX:3402', 'no_proxy': 'XXXXXXXX:3403' } } driver = webdriver.Chrome('your_absolute_path', seleniumwire_options=options) driver.get('https://www.google.com') driver.implicitly_wait(30) driver.find_element(by=By.XPATH, value="/XYZXYZ").send_keys("HI") time.sleep(5) driver.find_element(by=By.XPATH, value="/XYZXYZ").click() driver.close() ` Couldnt find anything online for this-stuck A: You can use While loop: while True: options = { 'proxy': { 'https': 'XXXXXXXXX:3402', 'no_proxy': 'XXXXXXXX:3403' } } driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()), seleniumwire_options=options) driver.get('https://www.google.com') driver.implicitly_wait(30) driver.find_element(by=By.XPATH, value="/XYZXYZ").send_keys("HI") time.sleep(5) driver.find_element(by=By.XPATH, value="/XYZXYZ").click() driver.close() # add a condition to quit the loop, like: if <condition>: break
looping My Selenium Python webdriver code beginner
I am a beginner and I wrote my first webdriver selenium python code. My question is how can I loop this code infinitely- I want the webdriver to close and then RE-open to continue the same code over and over. Can I add some kind of loop at the end of my code in order to start it up so i can leave my PC and not have to run it each time manually? **** I want the webdriver to open and close so i can use a diff proxy each time***** Thank you ` from seleniumwire import webdriver import time from selenium.webdriver.common.by import By from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.common.action_chains import ActionChains options = { 'proxy': { 'https': 'XXXXXXXXX:3402', 'no_proxy': 'XXXXXXXX:3403' } } driver = webdriver.Chrome('your_absolute_path', seleniumwire_options=options) driver.get('https://www.google.com') driver.implicitly_wait(30) driver.find_element(by=By.XPATH, value="/XYZXYZ").send_keys("HI") time.sleep(5) driver.find_element(by=By.XPATH, value="/XYZXYZ").click() driver.close() ` Couldnt find anything online for this-stuck
[ "You can use While loop:\nwhile True:\n options = {\n 'proxy': {\n 'https': 'XXXXXXXXX:3402',\n 'no_proxy': 'XXXXXXXX:3403'\n }\n }\n\n driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()), seleniumwire_options=options)\n driver.get('https://www.google.com')\n driver.implicitly_wait(30)\n driver.find_element(by=By.XPATH, value=\"/XYZXYZ\").send_keys(\"HI\")\n time.sleep(5)\n driver.find_element(by=By.XPATH, value=\"/XYZXYZ\").click()\n driver.close()\n\n # add a condition to quit the loop, like:\n if <condition>:\n break\n\n" ]
[ 0 ]
[]
[]
[ "google_chrome", "python", "selenium", "webdriver" ]
stackoverflow_0074622493_google_chrome_python_selenium_webdriver.txt
Q: How to find the highest value of a specified group using Python In the example below, how do I find out the highest price of 'mansion' ? Data Description this is a csv dataset contains three columns:h_type,h_price,y_year. Under the first column h_type, there are two different types of house, (mansion and apartment). The row is a list of a transaction. Usage I want to be able to implement a code so when an end user request the price via linebot, it will automatically provide information. since there are tow types of house(mension and apartment), instead of finding a highest price of the whole data, i would like to narrow down to a specified type. bond=pd.read_csv('/content/.., index_col='h_type') if bond.loc['mansion']: #<= how?,bad code here idMax = priceSr.idxmax() if not isnan(idMax): maxSr = df.loc[idMax] if most is None: most = maxSr.copy() else: if float(maxSr['h_price']) > float(most['h_price']): most = maxSr.copy() most = most.to_frame().transpose() print(most, '\n==========') This is the error message 1536 def __nonzero__(self): 1537 raise ValueError( -> 1538 f"The truth value of a {type(self).__name__} is ambiguous. " 1539 "Use a.empty, a.bool(), a.item(), a.any() or a.all()." 1540 ) ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all(). However, when i try this, it works bond=pd.read_csv('/content/...,index_col='h_type') a=bond.loc['mansion'] aMax=a.sort_values(['h_price'],ascending=False).head(1) please give me the advise to modify the bad code up there! thank you! A: The built-in min() and max() have two different signatures that allow you to call them either with an iterable as their first argument or with two or more regular arguments. The signature that accepts a single iterable argument looks something like this: min(iterable, *[, default, key]) -> minimum_value max(iterable, *[, default, key]) -> maximum_value
How to find the highest value of a specified group using Python
In the example below, how do I find out the highest price of 'mansion' ? Data Description this is a csv dataset contains three columns:h_type,h_price,y_year. Under the first column h_type, there are two different types of house, (mansion and apartment). The row is a list of a transaction. Usage I want to be able to implement a code so when an end user request the price via linebot, it will automatically provide information. since there are tow types of house(mension and apartment), instead of finding a highest price of the whole data, i would like to narrow down to a specified type. bond=pd.read_csv('/content/.., index_col='h_type') if bond.loc['mansion']: #<= how?,bad code here idMax = priceSr.idxmax() if not isnan(idMax): maxSr = df.loc[idMax] if most is None: most = maxSr.copy() else: if float(maxSr['h_price']) > float(most['h_price']): most = maxSr.copy() most = most.to_frame().transpose() print(most, '\n==========') This is the error message 1536 def __nonzero__(self): 1537 raise ValueError( -> 1538 f"The truth value of a {type(self).__name__} is ambiguous. " 1539 "Use a.empty, a.bool(), a.item(), a.any() or a.all()." 1540 ) ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all(). However, when i try this, it works bond=pd.read_csv('/content/...,index_col='h_type') a=bond.loc['mansion'] aMax=a.sort_values(['h_price'],ascending=False).head(1) please give me the advise to modify the bad code up there! thank you!
[ "The built-in min() and max() have two different signatures that allow you to call them either with an iterable as their first argument or with two or more regular arguments. The signature that accepts a single iterable argument looks something like this:\nmin(iterable, *[, default, key]) -> minimum_value\nmax(iterable, *[, default, key]) -> maximum_value\n\n" ]
[ 2 ]
[]
[]
[ "python" ]
stackoverflow_0074622979_python.txt
Q: NameError: name 'Class_Name' is not defined I have an issue which is I keep getting the error of NameError: name 'Class_Name' is not defined. Which I understand. The tricky part is that my code looks something like this: class FirstClass(): second_class: SecondClass def __init__(self): """SOME CODE HERE""" class SecondClass(firsClass: FirstClass): def __init__(self): self.first_class = firstClass So the problem is that no matter how I arrange the code, I will always have one above the other so it will always say that is not defined. How can I solve this? ------------------------SOLUTION---------------------------- I found the solution to this. If you use the import from __future__ import annotations It will read the file and you will be able to call the classes even if they are defined later in the code. A: from pprint import pp class FirstClass(): #for typing, if you want to indicate a class variable second_class: type["SecondClass"] #for typing, if you want to indicate an instance variable second_class_instance: "SecondClass" def __init__(self): """SOME CODE HERE""" self.second_class_instance = self.second_class() #this looks off in the inheritance declaration - dont specify types here #class SecondClass(firsClass: FirstClass): class SecondClass(FirstClass): def __init__(self): #this code will error, watch your cases! #self.first_class = firstClass ... #for doing something with the second_class attribute, later in the runtime #however, no tricks with "SecondClass" quoting allowed, you need #to wait till both FirstClass and SecondClass EXIST, which is why #this line is here, with this indentation FirstClass.second_class = SecondClass first = FirstClass() pp(first) pp(vars(first)) Is doing this a good idea? Probably not, but may there is special, special, justification for tracking a second_class class reference, say to call a class constructor without hardcoding the SecondClass in FirstClass methods. And, in this scheme, I am even less convinced by having SecondClass subclass FirstClass, but... output: % py test_429_declare.py <__main__.FirstClass object at 0x10841f220> {'second_class_instance': <__main__.SecondClass object at 0x10841f250>} and mypy % mypy test_429_declare.py Success: no issues found in 1 source file A: Actually, I found the solution to this. If we use the import from __future__ import annotations it will read the file and it won't have that problem, so we can reference different things in the order that we want. Taking the example: class FirstClass(): second_class: SecondClass def __init__(self): """SOME CODE HERE""" class SecondClass(firsClass: FirstClass): def __init__(self): self.first_class = firstClass This will be resolved as: from __future__ import annotations class FirstClass(): second_class: SecondClass def __init__(self): """SOME CODE HERE""" class SecondClass(firsClass: FirstClass): def __init__(self): self.first_class = firstClass You can find more details in the python documentation: https://docs.python.org/3/library/__future__.html
NameError: name 'Class_Name' is not defined
I have an issue which is I keep getting the error of NameError: name 'Class_Name' is not defined. Which I understand. The tricky part is that my code looks something like this: class FirstClass(): second_class: SecondClass def __init__(self): """SOME CODE HERE""" class SecondClass(firsClass: FirstClass): def __init__(self): self.first_class = firstClass So the problem is that no matter how I arrange the code, I will always have one above the other so it will always say that is not defined. How can I solve this? ------------------------SOLUTION---------------------------- I found the solution to this. If you use the import from __future__ import annotations It will read the file and you will be able to call the classes even if they are defined later in the code.
[ "\nfrom pprint import pp\n\nclass FirstClass():\n\n #for typing, if you want to indicate a class variable\n second_class: type[\"SecondClass\"]\n\n #for typing, if you want to indicate an instance variable\n second_class_instance: \"SecondClass\"\n\n def __init__(self):\n \"\"\"SOME CODE HERE\"\"\"\n\n self.second_class_instance = self.second_class()\n\n#this looks off in the inheritance declaration - dont specify types here\n#class SecondClass(firsClass: FirstClass):\n\nclass SecondClass(FirstClass):\n def __init__(self):\n #this code will error, watch your cases!\n #self.first_class = firstClass\n ...\n\n#for doing something with the second_class attribute, later in the runtime\n#however, no tricks with \"SecondClass\" quoting allowed, you need\n#to wait till both FirstClass and SecondClass EXIST, which is why \n#this line is here, with this indentation\nFirstClass.second_class = SecondClass\n\nfirst = FirstClass()\npp(first)\npp(vars(first))\n\n\nIs doing this a good idea? Probably not, but may there is special, special, justification for tracking a second_class class reference, say to call a class constructor without hardcoding the SecondClass in FirstClass methods.\nAnd, in this scheme, I am even less convinced by having SecondClass subclass FirstClass, but...\noutput:\n% py test_429_declare.py\n<__main__.FirstClass object at 0x10841f220>\n{'second_class_instance': <__main__.SecondClass object at 0x10841f250>}\n\nand mypy\n% mypy test_429_declare.py\nSuccess: no issues found in 1 source file\n\n", "Actually, I found the solution to this.\nIf we use the import\nfrom __future__ import annotations\nit will read the file and it won't have that problem, so we can reference different things in the order that we want. Taking the example:\nclass FirstClass():\n second_class: SecondClass\n def __init__(self):\n \"\"\"SOME CODE HERE\"\"\"\n\nclass SecondClass(firsClass: FirstClass):\n def __init__(self):\n self.first_class = firstClass\n\nThis will be resolved as:\nfrom __future__ import annotations\n\nclass FirstClass():\n second_class: SecondClass\n def __init__(self):\n \"\"\"SOME CODE HERE\"\"\"\n\nclass SecondClass(firsClass: FirstClass):\n def __init__(self):\n self.first_class = firstClass\n\nYou can find more details in the python documentation: https://docs.python.org/3/library/__future__.html\n" ]
[ 0, 0 ]
[]
[]
[ "class", "nameerror", "python", "structure" ]
stackoverflow_0074410324_class_nameerror_python_structure.txt
Q: python tracing a segmentation fault I'm developing C extensions from python and I obtain some segfaults (inevitable during the development...). I'm searching for a way to display at which line of code the segfault happens (an idea is like tracing every single line of code), how can I do that? A: If you are on linux, run python under gdb gdb python (gdb) run /path/to/script.py ## wait for segfault ## (gdb) backtrace ## stack trace of the c code A: Here's a way to output the filename and line number of every line of Python your code runs: import sys def trace(frame, event, arg): print("%s, %s:%d" % (event, frame.f_code.co_filename, frame.f_lineno)) return trace def test(): print("Line 8") print("Line 9") sys.settrace(trace) test() Output: call, test.py:7 line, test.py:8 Line 8 line, test.py:9 Line 9 return, test.py:9 (You'd probably want to write the trace output to a file, of course.) A: Segfaults from C extensions are very frequently a result of not incrementing a reference count when you create a new reference to an object. That makes them very hard to track down as the segfault occurs only after the last reference is removed from the object, and even then often only when some other object is being allocated. You don't say how much C extension code you have written so far, but if you're just starting out consider whether you can use either ctypes or Cython. Ctypes may not be flexible enough for your needs, but you should be able to link to just about any C library with Cython and have all the reference counts maintained for you automatically. That isn't always sufficient: if your Python objects and any underlying C objects have different lifetimes you can still get problems, but it does simplify things considerably. A: I came here looking for a solution to the same problem, and none of the other answers helped me. What did help was faulthandler, and you can install it in Python 2.7 just using pip install. faulthandler was introduced to Python only in version 3.3, that was released in September 2012, which was after most other answers here were written. A: There are somewhat undocumented python extensions for gdb. From the Python source grab Tools/gdb/libpython.py (it is not included in a normal install). Put this in sys.path Then: # gdb /gps/python2.7_x64/bin/python coredump ... Core was generated by `/usr/bin/python script.py'. Program terminated with signal 11, Segmentation fault. #0 call_function (oparg=<optimized out>, pp_stack=0x7f9084d15dc0) at Python/ceval.c:4037 ... (gdb) python >import libpython > >end (gdb) bt #0 call_function (oparg=<optimized out>, pp_stack=0x7f9084d15dc0) at Python/ceval.c:4037 #1 PyEval_EvalFrameEx (f=f@entry= Frame 0x7f9084d20ad0, for file /usr/lib/python2.7/site-packages/librabbitmq/__init__.py, line 220, in drain_events (self=<Connection(channels={1: <Channel(channel_id=1, connection=<...>, is_open=True, connect_timeout=4, _default_channel=<....(truncated), throwflag=throwflag@entry=0) at Python/ceval.c:2681 ... (gdb) py-list 218 else: 219 timeout = float(timeout) >220 self._basic_recv(timeout) 221 222 def channel(self, channel_id=None): As you can see we now have visibility into the Python stack corresponding with the CPython call chain. Some caveats: Your version of gdb needs to be greater than 7 and it needs to have been compiled with --with-python gdb embeds python (by linking to libpython), it doesn't run it in a subshell. This means that It may not necessarily match the version of python that is on $PATH. You need to download libpython.py from whatever version of the Python source that matches whatever gdb is linked to. You may have to run gdb as root - if so you may need to set up sys.path to match that of the code that you are debugging. If you cannot copy libpython.py into sys.path then you can add it's location to sys.path like this: (gdb) python >import sys >sys.path.append('/path/to/containing/dir/') >import libpython > >end This is somewhat poorly documented in the python dev docs, the fedora wiki and the python wiki If you have an older gdb or just can't get this working there is also a gdbinit in the Python source that you can copy to ~/.gdbinit which add some similar functionality A: Here are 3 more alternatives: 1: Executing a script with faulthandler enabled: python3 -X faulthandler your_script.py 2: Executing a script in debug mode (pdb) python3 -m pdb your_script.py and execute the script with the continue command. The gdb tool provided the most information, but none of them print the last executed line number in my script. 3: I ended up using pytest. For this to work, I wrapped my code in a function prefixed with test_ and execute the script like this: pytest your_script.py A: Mark's answer is awesome. If you happen to be on a machine that has lldb readily available in your path, and not gdb (which was my case), Mark's answer becomes: lldb python (lldb) process launch -- /path/to/script.py ## wait for segfault ## (lldb) bt ## stack trace of the c code I wish I would have stumbled on this answer earlier :)
python tracing a segmentation fault
I'm developing C extensions from python and I obtain some segfaults (inevitable during the development...). I'm searching for a way to display at which line of code the segfault happens (an idea is like tracing every single line of code), how can I do that?
[ "If you are on linux, run python under gdb\ngdb python\n(gdb) run /path/to/script.py\n## wait for segfault ##\n(gdb) backtrace\n## stack trace of the c code\n\n", "Here's a way to output the filename and line number of every line of Python your code runs:\nimport sys\n\ndef trace(frame, event, arg):\n print(\"%s, %s:%d\" % (event, frame.f_code.co_filename, frame.f_lineno))\n return trace\n\ndef test():\n print(\"Line 8\")\n print(\"Line 9\")\n\nsys.settrace(trace)\ntest()\n\nOutput:\ncall, test.py:7\nline, test.py:8\nLine 8\nline, test.py:9\nLine 9\nreturn, test.py:9\n\n(You'd probably want to write the trace output to a file, of course.)\n", "Segfaults from C extensions are very frequently a result of not incrementing a reference count when you create a new reference to an object. That makes them very hard to track down as the segfault occurs only after the last reference is removed from the object, and even then often only when some other object is being allocated.\nYou don't say how much C extension code you have written so far, but if you're just starting out consider whether you can use either ctypes or Cython. Ctypes may not be flexible enough for your needs, but you should be able to link to just about any C library with Cython and have all the reference counts maintained for you automatically.\nThat isn't always sufficient: if your Python objects and any underlying C objects have different lifetimes you can still get problems, but it does simplify things considerably.\n", "I came here looking for a solution to the same problem, and none of the other answers helped me. What did help was faulthandler, and you can install it in Python 2.7 just using pip install.\nfaulthandler was introduced to Python only in version 3.3, that was released in September 2012, which was after most other answers here were written.\n", "There are somewhat undocumented python extensions for gdb.\nFrom the Python source grab Tools/gdb/libpython.py (it is not included in a normal install). \nPut this in sys.path\nThen:\n# gdb /gps/python2.7_x64/bin/python coredump\n...\nCore was generated by `/usr/bin/python script.py'.\nProgram terminated with signal 11, Segmentation fault.\n#0 call_function (oparg=<optimized out>, pp_stack=0x7f9084d15dc0) at Python/ceval.c:4037\n...\n(gdb) python\n>import libpython\n>\n>end\n(gdb) bt\n#0 call_function (oparg=<optimized out>, pp_stack=0x7f9084d15dc0) at Python/ceval.c:4037\n#1 PyEval_EvalFrameEx (f=f@entry=\n Frame 0x7f9084d20ad0, \n for file /usr/lib/python2.7/site-packages/librabbitmq/__init__.py, line 220, \n in drain_events (self=<Connection(channels={1: <Channel(channel_id=1, connection=<...>, is_open=True, connect_timeout=4, _default_channel=<....(truncated), throwflag=throwflag@entry=0) at Python/ceval.c:2681\n...\n(gdb) py-list\n 218 else:\n 219 timeout = float(timeout)\n>220 self._basic_recv(timeout)\n 221\n 222 def channel(self, channel_id=None):\n\nAs you can see we now have visibility into the Python stack corresponding with the CPython call chain.\nSome caveats:\n\nYour version of gdb needs to be greater than 7 and it needs to have been compiled with --with-python\ngdb embeds python (by linking to libpython), it doesn't run it in a subshell. This means that It may not necessarily match the version of python that is on $PATH. \nYou need to download libpython.py from whatever version of the Python source that matches whatever gdb is linked to. \nYou may have to run gdb as root - if so you may need to set up sys.path to match that of the code that you are debugging. \n\nIf you cannot copy libpython.py into sys.path then you can add it's location to sys.path like this:\n(gdb) python\n>import sys\n>sys.path.append('/path/to/containing/dir/')\n>import libpython\n>\n>end\n\nThis is somewhat poorly documented in the python dev docs, the fedora wiki and the python wiki\nIf you have an older gdb or just can't get this working there is also a gdbinit in the Python source that you can copy to ~/.gdbinit which add some similar functionality \n", "Here are 3 more alternatives:\n1: Executing a script with faulthandler enabled:\npython3 -X faulthandler your_script.py\n\n2: Executing a script in debug mode (pdb)\npython3 -m pdb your_script.py\n\nand execute the script with the continue command.\nThe gdb tool provided the most information, but none of them print the last executed line number in my script.\n3: I ended up using pytest. For this to work, I wrapped my code in a function prefixed with test_ and execute the script like this:\npytest your_script.py\n\n", "Mark's answer is awesome. If you happen to be on a machine that has lldb readily available in your path, and not gdb (which was my case), Mark's answer becomes:\nlldb python\n(lldb) process launch -- /path/to/script.py\n## wait for segfault ##\n(lldb) bt\n## stack trace of the c code\n\nI wish I would have stumbled on this answer earlier :)\n" ]
[ 98, 47, 19, 15, 6, 1, 1 ]
[]
[]
[ "c", "debugging", "python" ]
stackoverflow_0002663841_c_debugging_python.txt
Q: How to apply a user defined function between rows in pandas using both rows values? I have two rows of data in a Pandas data frame and want to operate each column separately with a function that includes both values e.g. import pandas as pd df = pd.DataFrame({"x": [1, 2], "z": [2, 6], "i": [3, 12], "j": [4, 20], "y": [5, 30]}) x z i j y 0 1 2 3 4 5 1 2 6 12 20 30 The function is something like the row 2 val minus row 1 val, divided by the latter - for each column separately e.g. (row2-row1)/row2 so I can get the following 0.5 0.667 0.75 0.8 0.833 Based on the following links how to apply a user defined function column wise on grouped data in pandas https://www.geeksforgeeks.org/apply-a-function-to-each-row-or-column-in-dataframe-using-pandas-apply/ https://pythoninoffice.com/pandas-how-to-calculate-difference-between-rows Groupby and apply a defined function - Pandas I tried the following df.apply(lambda x,y: (x + y)/y, axis=0) This does not work as it expects y as an argument df.diff() This works but then it is not exactly the function I want. Does anyone know how to achieve the result I expect? A: df.diff(1).div(df) output x z i j y 0 NaN NaN NaN NaN NaN 1 0.5 0.67 0.75 0.8 0.83 With a short example, I answered. If I'm misunderstanding something, edit your example more long. I'll answer again.
How to apply a user defined function between rows in pandas using both rows values?
I have two rows of data in a Pandas data frame and want to operate each column separately with a function that includes both values e.g. import pandas as pd df = pd.DataFrame({"x": [1, 2], "z": [2, 6], "i": [3, 12], "j": [4, 20], "y": [5, 30]}) x z i j y 0 1 2 3 4 5 1 2 6 12 20 30 The function is something like the row 2 val minus row 1 val, divided by the latter - for each column separately e.g. (row2-row1)/row2 so I can get the following 0.5 0.667 0.75 0.8 0.833 Based on the following links how to apply a user defined function column wise on grouped data in pandas https://www.geeksforgeeks.org/apply-a-function-to-each-row-or-column-in-dataframe-using-pandas-apply/ https://pythoninoffice.com/pandas-how-to-calculate-difference-between-rows Groupby and apply a defined function - Pandas I tried the following df.apply(lambda x,y: (x + y)/y, axis=0) This does not work as it expects y as an argument df.diff() This works but then it is not exactly the function I want. Does anyone know how to achieve the result I expect?
[ "df.diff(1).div(df)\n\noutput\n x z i j y\n0 NaN NaN NaN NaN NaN\n1 0.5 0.67 0.75 0.8 0.83\n\nWith a short example, I answered. If I'm misunderstanding something, edit your example more long. I'll answer again.\n" ]
[ 0 ]
[]
[]
[ "apply", "dataframe", "pandas", "python" ]
stackoverflow_0074623034_apply_dataframe_pandas_python.txt
Q: Add "collection" of attributes directly to top level of a class I am trying to capture (S3) logs in a structured way. I am capturing the access-related elements with this type of tuple: class _Access(NamedTuple): time: datetime ip: str actor: str request_id: str action: str key: str request_uri: str status: int error_code: str I then have a class that uses this named tuple as follows (edited just down to relevant code): class Logs: def __init__(self, log: str): raw_logs = match(S3_LOG_REGEX, log) if raw_logs is None: raise FormatError(log) logs = raw_logs.groups() timestamp = datetime.strptime(logs[2], "%d/%b/%Y:%H:%M:%S %z") http_status = int(logs[9]) access = _Access( timestamp, logs[3], logs[4], logs[5], logs[6], logs[7], logs[8], http_status, logs[10], ) self.access = access The problem is that it is too verbose when I now want to use it: >>> log_struct = Logs(raw_log) >>> log_struct.access.action # I don't want to have to add `access` As I mention above, I'd rather be able to do something like this: >>> log_struct = Logs(raw_log) >>> log_struct.action But I still want to have this clean named tuple called _Access. How can I make everything from access available at the top level? Specifically, I have this line: self.access = access which is giving me that extra "layer" that I don't want. I'd like to be able to "unpack" it somehow, similar to how we can unpack arguments by passing the star in *args. But I'm not sure how I can unpack the tuple in this case. A: What you really need for your use case is an alternative constructor for your NamedTuple subclass to parse a string of a log entry into respective fields, which can be done by creating a class method that calls the __new__ method with arguments parsed from the input string. Using just the fields of ip and action as a simplified example: from typing import NamedTuple class Logs(NamedTuple): ip: str action: str @classmethod def parse(cls, log: str) -> 'Logs': return cls.__new__(cls, *log.split()) log_struct = Logs.parse('192.168.1.1 GET') print(log_struct) print(log_struct.ip) print(log_struct.action) This outputs: Logs(ip='192.168.1.1', action='GET') 192.168.1.1 GET A: I agree with @blhsing and recommend that solution. This is assuming that there are not extra attributes required to be apply to the named tuple (say storing the raw log value). If you really need the object to remain composed, another way to support accessing the properties of the _Access class would be to override the __getattr__ method [PEP 562] of Logs The __getattr__ function at the module level should accept one argument which is the name of an attribute and return the computed value or raise an AttributeError: def __getattr__(name: str) -> Any: ... If an attribute is not found on a module object through the normal lookup (i.e. object.__getattribute__), then __getattr__ is searched in the module __dict__ before raising an AttributeError. If found, it is called with the attribute name and the result is returned. Looking up a name as a module global will bypass module __getattr__. This is intentional, otherwise calling __getattr__ for builtins will significantly harm performance. E.g. from typing import NamedTuple, Any class _Access(NamedTuple): foo: str bar: str class Logs: def __init__(self, log: str) -> None: self.log = log self.access = _Access(*log.split()) def __getattr__(self, name: str) -> Any: return getattr(self.access, name) When you request an attribute of Logs which is not present it will try to access the attribute through the Logs.access attribute. Meaning you can write code like this: logs = Logs("fizz buzz") print(f"{logs.log=}, {logs.foo=}, {logs.bar=}") logs.log='fizz buzz', logs.foo='fizz', logs.bar='buzz' Note that this would not preserve the typing information through to the Logs object in most static analyzers and autocompletes. That to me would be a compelling enough reason not to do this, and continue to use the more verbose way of accessing values as you describe in your question. If you still really need this, and want to remain type safe. Then I would add properties to the Logs class which fetch from the _Access object. class Logs: def __init__(self, log: str) -> None: self.log = log self.access = _Access(*log.split()) @property def foo(self) -> str: return self.access.foo @property def bar(self) -> str: return self.access.bar This avoids the type safty issues, and depending on how much code you write using the Logs instances, still can cut down on other boilerplate dramatically.
Add "collection" of attributes directly to top level of a class
I am trying to capture (S3) logs in a structured way. I am capturing the access-related elements with this type of tuple: class _Access(NamedTuple): time: datetime ip: str actor: str request_id: str action: str key: str request_uri: str status: int error_code: str I then have a class that uses this named tuple as follows (edited just down to relevant code): class Logs: def __init__(self, log: str): raw_logs = match(S3_LOG_REGEX, log) if raw_logs is None: raise FormatError(log) logs = raw_logs.groups() timestamp = datetime.strptime(logs[2], "%d/%b/%Y:%H:%M:%S %z") http_status = int(logs[9]) access = _Access( timestamp, logs[3], logs[4], logs[5], logs[6], logs[7], logs[8], http_status, logs[10], ) self.access = access The problem is that it is too verbose when I now want to use it: >>> log_struct = Logs(raw_log) >>> log_struct.access.action # I don't want to have to add `access` As I mention above, I'd rather be able to do something like this: >>> log_struct = Logs(raw_log) >>> log_struct.action But I still want to have this clean named tuple called _Access. How can I make everything from access available at the top level? Specifically, I have this line: self.access = access which is giving me that extra "layer" that I don't want. I'd like to be able to "unpack" it somehow, similar to how we can unpack arguments by passing the star in *args. But I'm not sure how I can unpack the tuple in this case.
[ "What you really need for your use case is an alternative constructor for your NamedTuple subclass to parse a string of a log entry into respective fields, which can be done by creating a class method that calls the __new__ method with arguments parsed from the input string.\nUsing just the fields of ip and action as a simplified example:\nfrom typing import NamedTuple\n\nclass Logs(NamedTuple):\n ip: str\n action: str\n\n @classmethod\n def parse(cls, log: str) -> 'Logs':\n return cls.__new__(cls, *log.split())\n\nlog_struct = Logs.parse('192.168.1.1 GET')\nprint(log_struct)\nprint(log_struct.ip)\nprint(log_struct.action)\n\nThis outputs:\nLogs(ip='192.168.1.1', action='GET')\n192.168.1.1\nGET\n\n", "I agree with @blhsing and recommend that solution. This is assuming that there are not extra attributes required to be apply to the named tuple (say storing the raw log value).\nIf you really need the object to remain composed, another way to support accessing the properties of the _Access class would be to override the __getattr__ method [PEP 562] of Logs\n\nThe __getattr__ function at the module level should accept one\nargument which is the name of an attribute and return the computed\nvalue or raise an AttributeError:\ndef __getattr__(name: str) -> Any: ...\n\nIf an attribute is not found on a module object through the normal\nlookup (i.e. object.__getattribute__), then __getattr__ is\nsearched in the module __dict__ before raising an AttributeError.\nIf found, it is called with the attribute name and the result is\nreturned. Looking up a name as a module global will bypass module\n__getattr__. This is intentional, otherwise calling __getattr__\nfor builtins will significantly harm performance.\n\nE.g.\nfrom typing import NamedTuple, Any\n\n\nclass _Access(NamedTuple):\n foo: str\n bar: str\n\n\nclass Logs:\n def __init__(self, log: str) -> None:\n self.log = log\n self.access = _Access(*log.split())\n\n def __getattr__(self, name: str) -> Any:\n return getattr(self.access, name)\n\nWhen you request an attribute of Logs which is not present it will try to access the attribute through the Logs.access attribute. Meaning you can write code like this:\nlogs = Logs(\"fizz buzz\")\nprint(f\"{logs.log=}, {logs.foo=}, {logs.bar=}\")\n\nlogs.log='fizz buzz', logs.foo='fizz', logs.bar='buzz'\n\nNote that this would not preserve the typing information through to the Logs object in most static analyzers and autocompletes. That to me would be a compelling enough reason not to do this, and continue to use the more verbose way of accessing values as you describe in your question.\nIf you still really need this, and want to remain type safe. Then I would add properties to the Logs class which fetch from the _Access object.\nclass Logs:\n def __init__(self, log: str) -> None:\n self.log = log\n self.access = _Access(*log.split())\n\n @property\n def foo(self) -> str:\n return self.access.foo\n\n @property\n def bar(self) -> str:\n return self.access.bar\n\nThis avoids the type safty issues, and depending on how much code you write using the Logs instances, still can cut down on other boilerplate dramatically.\n" ]
[ 4, 1 ]
[]
[]
[ "attributes", "class", "python" ]
stackoverflow_0074574306_attributes_class_python.txt
Q: How to make sure con1D's output_shape is same as input_shape with time series in keras autoencoder? Conv1D output shape incorrect in keras autoencoder model when running autoencoder fit. I try to use keras autoencoder model to compress and decompress my time-series data. but when I change the layer with Conv1D, the output shape is incorrect. I have some time series data with the shape of (4000, 689), where represent 4000 samples and each sample has 689 features. I want to use Conv1D to compress the data but the Upsampling layer's and last Conv1D layer's output shape(?, 688, 1) is not equal to input shape (, 689, 1). How should I set those layers' parameters? Thanks in advance. x_train = data[0:4000].values x_test = data[4000:].values print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) x_train shape: (4000, 689) x_test shape: (202, 689) I reshaped the x_train, x_test to 3dim, like below. x_tr = x_train.reshape(4000,689,1) x_te = x_test.reshape(202,689,1) print('x_tr shape:', x_tr.shape) print('x_te shape:', x_te.shape) x_tr shape: (4000, 689, 1) x_te shape: (202, 689, 1) input_img = Input(shape=(689,1)) x = Conv1D(16, 3, activation='relu', padding='same')(input_img) print(x) x = MaxPooling1D(2, padding='same')(x) print(x) x = Conv1D(8, 3, activation='relu', padding='same')(x) print(x) x = MaxPooling1D(2, padding='same')(x) print(x) x = Conv1D(8, 3, activation='relu', padding='same')(x) print(x) encoded = MaxPooling1D(2)(x) print(encoded) print('--------------') x = Conv1D(8, 3, activation='relu', padding='same')(encoded) print(x) x = UpSampling1D(2)(x) print(x) x = Conv1D(8, 3, activation='relu', padding='same')(x) print(x) x = UpSampling1D(2)(x) print(x) x = Conv1D(16, 3, activation='relu', padding='same')(x) print(x) x = UpSampling1D(2)(x) print(x) decoded = Conv1D(1, 3, activation='sigmoid', padding='same')(x) print(decoded) autoencoder = Model(input_img, decoded) autoencoder.compile(optimizer='adam', loss='mse') When I imported those models and run cells above in Jupyter, It seems ok. Maybe. But I get the error in next code when running autoencoder.fit. autoencoder.fit(x_tr, x_tr, epochs=50, batch_size=128, shuffle=True, validation_data=(x_te, x_te)) So I print each layer. The each layers' print result below. Tensor("conv1d_166/Relu:0", shape=(?, 689, 16), dtype=float32) Tensor("max_pooling1d_71/Squeeze:0", shape=(?, 345, 16), dtype=float32) Tensor("conv1d_167/Relu:0", shape=(?, 345, 8), dtype=float32) Tensor("max_pooling1d_72/Squeeze:0", shape=(?, 173, 8), dtype=float32) Tensor("conv1d_168/Relu:0", shape=(?, 173, 8), dtype=float32) Tensor("max_pooling1d_73/Squeeze:0", shape=(?, 86, 8), dtype=float32) Tensor("conv1d_169/Relu:0", shape=(?, 86, 8), dtype=float32) Tensor("up_sampling1d_67/concat:0", shape=(?, 172, 8), dtype=float32) Tensor("conv1d_170/Relu:0", shape=(?, 172, 8), dtype=float32) Tensor("up_sampling1d_68/concat:0", shape=(?, 344, 8), dtype=float32) Tensor("conv1d_171/Relu:0", shape=(?, 344, 16), dtype=float32) Tensor("up_sampling1d_69/concat:0", shape=(?, 688, 16), dtype=float32) Tensor("conv1d_172/Sigmoid:0", shape=(?, 688, 1), dtype=float32) ValueError bellow: ValueError Traceback (most recent call last) <ipython-input-74-56836006a800> in <module> 3 batch_size=128, 4 shuffle=True, ----> 5 validation_data=(x_te, x_te) 6 ) ~/anaconda3/envs/keras/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs) 950 sample_weight=sample_weight, 951 class_weight=class_weight, --> 952 batch_size=batch_size) 953 # Prepare validation data. 954 do_validation = False ~/anaconda3/envs/keras/lib/python3.6/site-packages/keras/engine/training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size) 787 feed_output_shapes, 788 check_batch_axis=False, # Don't enforce the batch size. --> 789 exception_prefix='target') 790 791 # Generate sample-wise weight values given the `sample_weight` and ~/anaconda3/envs/keras/lib/python3.6/site-packages/keras/engine/training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix) 136 ': expected ' + names[i] + ' to have shape ' + 137 str(shape) + ' but got array with shape ' + --> 138 str(data_shape)) 139 return data 140 ValueError: Error when checking target: expected conv1d_172 to have shape (688, 1) but got array with shape (689, 1) Is floor function make this happen? How to fix the error and autoencoder.fit correctly? Thanks in advance. A: Using Convolutional layers, you need to infer your output size based on the input size, kernel size and other parameters. The simplest way to do so is to feed a data sample through the network and see the final vector size after your last convolutional layer. Then, you can define further layers based on that size. Here is an example from my pytorch project: def _infer_flat_size(self): encoder_output = self.encoder(torch.ones(1, *self.input_size)) return int(np.prod(encoder_output.size()[1:])), encoder_output.size()[1:]
How to make sure con1D's output_shape is same as input_shape with time series in keras autoencoder?
Conv1D output shape incorrect in keras autoencoder model when running autoencoder fit. I try to use keras autoencoder model to compress and decompress my time-series data. but when I change the layer with Conv1D, the output shape is incorrect. I have some time series data with the shape of (4000, 689), where represent 4000 samples and each sample has 689 features. I want to use Conv1D to compress the data but the Upsampling layer's and last Conv1D layer's output shape(?, 688, 1) is not equal to input shape (, 689, 1). How should I set those layers' parameters? Thanks in advance. x_train = data[0:4000].values x_test = data[4000:].values print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) x_train shape: (4000, 689) x_test shape: (202, 689) I reshaped the x_train, x_test to 3dim, like below. x_tr = x_train.reshape(4000,689,1) x_te = x_test.reshape(202,689,1) print('x_tr shape:', x_tr.shape) print('x_te shape:', x_te.shape) x_tr shape: (4000, 689, 1) x_te shape: (202, 689, 1) input_img = Input(shape=(689,1)) x = Conv1D(16, 3, activation='relu', padding='same')(input_img) print(x) x = MaxPooling1D(2, padding='same')(x) print(x) x = Conv1D(8, 3, activation='relu', padding='same')(x) print(x) x = MaxPooling1D(2, padding='same')(x) print(x) x = Conv1D(8, 3, activation='relu', padding='same')(x) print(x) encoded = MaxPooling1D(2)(x) print(encoded) print('--------------') x = Conv1D(8, 3, activation='relu', padding='same')(encoded) print(x) x = UpSampling1D(2)(x) print(x) x = Conv1D(8, 3, activation='relu', padding='same')(x) print(x) x = UpSampling1D(2)(x) print(x) x = Conv1D(16, 3, activation='relu', padding='same')(x) print(x) x = UpSampling1D(2)(x) print(x) decoded = Conv1D(1, 3, activation='sigmoid', padding='same')(x) print(decoded) autoencoder = Model(input_img, decoded) autoencoder.compile(optimizer='adam', loss='mse') When I imported those models and run cells above in Jupyter, It seems ok. Maybe. But I get the error in next code when running autoencoder.fit. autoencoder.fit(x_tr, x_tr, epochs=50, batch_size=128, shuffle=True, validation_data=(x_te, x_te)) So I print each layer. The each layers' print result below. Tensor("conv1d_166/Relu:0", shape=(?, 689, 16), dtype=float32) Tensor("max_pooling1d_71/Squeeze:0", shape=(?, 345, 16), dtype=float32) Tensor("conv1d_167/Relu:0", shape=(?, 345, 8), dtype=float32) Tensor("max_pooling1d_72/Squeeze:0", shape=(?, 173, 8), dtype=float32) Tensor("conv1d_168/Relu:0", shape=(?, 173, 8), dtype=float32) Tensor("max_pooling1d_73/Squeeze:0", shape=(?, 86, 8), dtype=float32) Tensor("conv1d_169/Relu:0", shape=(?, 86, 8), dtype=float32) Tensor("up_sampling1d_67/concat:0", shape=(?, 172, 8), dtype=float32) Tensor("conv1d_170/Relu:0", shape=(?, 172, 8), dtype=float32) Tensor("up_sampling1d_68/concat:0", shape=(?, 344, 8), dtype=float32) Tensor("conv1d_171/Relu:0", shape=(?, 344, 16), dtype=float32) Tensor("up_sampling1d_69/concat:0", shape=(?, 688, 16), dtype=float32) Tensor("conv1d_172/Sigmoid:0", shape=(?, 688, 1), dtype=float32) ValueError bellow: ValueError Traceback (most recent call last) <ipython-input-74-56836006a800> in <module> 3 batch_size=128, 4 shuffle=True, ----> 5 validation_data=(x_te, x_te) 6 ) ~/anaconda3/envs/keras/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs) 950 sample_weight=sample_weight, 951 class_weight=class_weight, --> 952 batch_size=batch_size) 953 # Prepare validation data. 954 do_validation = False ~/anaconda3/envs/keras/lib/python3.6/site-packages/keras/engine/training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size) 787 feed_output_shapes, 788 check_batch_axis=False, # Don't enforce the batch size. --> 789 exception_prefix='target') 790 791 # Generate sample-wise weight values given the `sample_weight` and ~/anaconda3/envs/keras/lib/python3.6/site-packages/keras/engine/training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix) 136 ': expected ' + names[i] + ' to have shape ' + 137 str(shape) + ' but got array with shape ' + --> 138 str(data_shape)) 139 return data 140 ValueError: Error when checking target: expected conv1d_172 to have shape (688, 1) but got array with shape (689, 1) Is floor function make this happen? How to fix the error and autoencoder.fit correctly? Thanks in advance.
[ "Using Convolutional layers, you need to infer your output size based on the input size, kernel size and other parameters. The simplest way to do so is to feed a data sample through the network and see the final vector size after your last convolutional layer. Then, you can define further layers based on that size.\nHere is an example from my pytorch project:\ndef _infer_flat_size(self):\n encoder_output = self.encoder(torch.ones(1, *self.input_size))\n return int(np.prod(encoder_output.size()[1:])), encoder_output.size()[1:]\n\n" ]
[ 0 ]
[]
[]
[ "autoencoder", "conv_neural_network", "keras", "python", "time_series" ]
stackoverflow_0055731225_autoencoder_conv_neural_network_keras_python_time_series.txt
Q: Python Tkinter GUI does not open when the script is called by C# program ` var psierror = new ProcessStartInfo(); psierror.FileName = @"C:\Users\Acer\AppData\Local\Programs\Python\Python310\python.exe"; psierror.Arguments = $"\"{exception_case}\" \"{Image}\""; psierror.UseShellExecute = false; psierror.CreateNoWindow = true; psierror.RedirectStandardOutput = true; psierror.RedirectStandardError = true; var errors_again = ""; var results_again = ""; using (var process = Process.Start(psierror)) { errors_again = process.StandardError.ReadToEnd(); results_again = process.StandardOutput.ReadToEnd(); } Console.WriteLine(results_again); } ` This is the C# segment that attempts to call the GUI but it seems like it just skips that program when ran. This code worked for calling a non-GUI python script so I just assumed it would work for this script as well. Any help is appreciated What I want to happen is that the C# program calls the python script which starts the GUI to allow the user to make the selections. Afterwards, the value of the selections are sent to the C# program to be processed. A: It was an issue with my python script and I also changed the CreateNoWindow to false
Python Tkinter GUI does not open when the script is called by C# program
` var psierror = new ProcessStartInfo(); psierror.FileName = @"C:\Users\Acer\AppData\Local\Programs\Python\Python310\python.exe"; psierror.Arguments = $"\"{exception_case}\" \"{Image}\""; psierror.UseShellExecute = false; psierror.CreateNoWindow = true; psierror.RedirectStandardOutput = true; psierror.RedirectStandardError = true; var errors_again = ""; var results_again = ""; using (var process = Process.Start(psierror)) { errors_again = process.StandardError.ReadToEnd(); results_again = process.StandardOutput.ReadToEnd(); } Console.WriteLine(results_again); } ` This is the C# segment that attempts to call the GUI but it seems like it just skips that program when ran. This code worked for calling a non-GUI python script so I just assumed it would work for this script as well. Any help is appreciated What I want to happen is that the C# program calls the python script which starts the GUI to allow the user to make the selections. Afterwards, the value of the selections are sent to the C# program to be processed.
[ "It was an issue with my python script and I also changed the CreateNoWindow to false\n" ]
[ 0 ]
[]
[]
[ "c#", "python", "tkinter" ]
stackoverflow_0074622862_c#_python_tkinter.txt
Q: Python Wikipedia Library does not find requested page eventhough it exists So I want to search the wikipedia database for some keywords and then extract the text that the relative pages have to then use for a tf-idf module to later on implement in a text classification program. I am currently looping through a pandas dataframe with all the keywords and then searching the wikipedia database for the respective keywords, but I am getting an error saying the webpage does not exists. Here is my code: import wikipedia import pandas as pd from sklearn.feature_extraction.text import CountVectorizer df_wiki_pages=pd.read_csv(r'C:\\Users\\jason\\Downloads\\Categories.csv',usecols=[0]) df_wiki_pages = df_wiki_pages.dropna() print(df_wiki_pages) wikipages = [] for pages in wikipages: tokenized_texts = [] for index, row in df_wiki_pages.iterrows(): currentRow = row['Categories'] print("Now testing: "+ currentRow) wiki = wikipedia.page('currentRow') It gives me this error once it loops to the keyword "Customer_advocacy": PageError: Page id "customer advocate" does not match any pages. Try another id! I do not understand why it is searching for 'customer advocate' since my query is for 'Customer_advocacy'. I do not understand why it changes the query by itself because the page for 'Customer_advocacy' exists meanwhile the page for 'customer advocate' does not. Am I doing something wrong in my query? A: Try setting the auto_suggest flag to False: wiki = wikipedia.page(currentRow, auto_suggest=False) If we try this on the problematic string, "Customer_advocacy," it seems to work: import wikipedia wiki = wikipedia.page("Customer advocacy", auto_suggest=False) print(wiki) # <WikipediaPage 'Customer advocacy'> Currently, your implementation uses the string 'currentRow' in the call to wikipedia.page. I assume that is a typo.
Python Wikipedia Library does not find requested page eventhough it exists
So I want to search the wikipedia database for some keywords and then extract the text that the relative pages have to then use for a tf-idf module to later on implement in a text classification program. I am currently looping through a pandas dataframe with all the keywords and then searching the wikipedia database for the respective keywords, but I am getting an error saying the webpage does not exists. Here is my code: import wikipedia import pandas as pd from sklearn.feature_extraction.text import CountVectorizer df_wiki_pages=pd.read_csv(r'C:\\Users\\jason\\Downloads\\Categories.csv',usecols=[0]) df_wiki_pages = df_wiki_pages.dropna() print(df_wiki_pages) wikipages = [] for pages in wikipages: tokenized_texts = [] for index, row in df_wiki_pages.iterrows(): currentRow = row['Categories'] print("Now testing: "+ currentRow) wiki = wikipedia.page('currentRow') It gives me this error once it loops to the keyword "Customer_advocacy": PageError: Page id "customer advocate" does not match any pages. Try another id! I do not understand why it is searching for 'customer advocate' since my query is for 'Customer_advocacy'. I do not understand why it changes the query by itself because the page for 'Customer_advocacy' exists meanwhile the page for 'customer advocate' does not. Am I doing something wrong in my query?
[ "Try setting the auto_suggest flag to False:\nwiki = wikipedia.page(currentRow, auto_suggest=False)\n\nIf we try this on the problematic string, \"Customer_advocacy,\" it seems to work:\nimport wikipedia\n\nwiki = wikipedia.page(\"Customer advocacy\", auto_suggest=False)\nprint(wiki) # <WikipediaPage 'Customer advocacy'>\n\nCurrently, your implementation uses the string 'currentRow' in the call to wikipedia.page. I assume that is a typo.\n" ]
[ 0 ]
[]
[]
[ "python", "wikipedia", "wikipedia_api" ]
stackoverflow_0074623142_python_wikipedia_wikipedia_api.txt
Q: Web Scraping with Python - Because when I use FOR IN it just returns a string Because when I print the item "Vagas", they return all the strings I need, but when I print "on_click" it returns only one string. And from what I've seen, it returns only the last string, ignoring the others. soup = BeautifulSoup(stringue, "html.parser") Vagas = soup.find_all(title="Vaga disponível.") for teste2 in Vagas: on_click = teste2.get('onclick') print(on_click) Return of "Vagas". `<input id="ctl00_ctl00_Content_Content_rpt_turno_4_ctl01_imb_vaga_1" name="ctl00$ctl00$Content$Content$rpt_turno_4$ctl01$imb_vaga_1" onclick="javascript:window.open('Cadastro.aspx?id_agenda=0&amp;id_turno=29/11/2022 3:00:00;29/11/2022 4:00:00&amp;data=29/11/2022&amp;id_turno_exportador=198397&amp;id_turno_agenda=61298&amp;id_transportadora=23213&amp;id_turno_transp=68291&amp;id_Cliente=40300&amp;codigo_terminal=40300&amp;codigo_empresa=1&amp;codigo_exportador=24978&amp;codigo_transportador=23213&amp;codigo_turno=4&amp;turno_transp_vg=68291','_blank','height=850,width=1000,top=(screen.width)?(screen.width-1000)/2 : 0,left=(screen.height)?(screen.height-700)/2 : 0,toolbar=no,location=no,directories=no,status=no,menubar=no,scrollbars=yes,resizable=no');" src="../App_Themes/SisLog/Images/add-document.gif" style="height:20px;border-width:0px;" title="Vaga disponível." type="image"/>, <input id="ctl00_ctl00_Content_Content_rpt_turno_6_ctl01_imb_vaga_1" name="ctl00$ctl00$Content$Content$rpt_turno_6$ctl01$imb_vaga_1" onclick="javascript:window.open('Cadastro.aspx?id_agenda=0&amp;id_turno=29/11/2022 5:00:00;29/11/2022 6:00:00&amp;data=29/11/2022&amp;id_turno_exportador=198397&amp;id_turno_agenda=61298&amp;id_transportadora=23213&amp;id_turno_transp=68291&amp;id_Cliente=40300&amp;codigo_terminal=40300&amp;codigo_empresa=1&amp;codigo_exportador=24978&amp;codigo_transportador=23213&amp;codigo_turno=6&amp;turno_transp_vg=68291','_blank','height=850,width=1000,top=(screen.width)?(screen.width-1000)/2 : 0,left=(screen.height)?(screen.height-700)/2 : 0,toolbar=no,location=no,directories=no,status=no,menubar=no,scrollbars=yes,resizable=no');" src="../App_Themes/SisLog/Images/add-document.gif" style="height:20px;border-width:0px;" title="Vaga disponível." type="image"/>,` Return of "on_click" javascript:window.open('Cadastro.aspx?id_agenda=0&id_turno=29/11/2022 19:00:00;29/11/2022 20:00:00&data=29/11/2022&id_turno_exportador=198397&id_turno_agenda=61298&id_transportadora=23213&id_turno_transp=68291&id_Cliente=40300&codigo_terminal=40300&codigo_empresa=1&codigo_exportador=24978&codigo_transportador=23213&codigo_turno=20&turno_transp_vg=68291','_blank','height=850,width=1000,top=(screen.width)?(screen.width-1000)/2 : 0,left=(screen.height)?(screen.height-700)/2 : 0,toolbar=no,location=no,directories=no,status=no,menubar=no,scrollbars=yes,resizable=no'); A: Put the print statement inside the for loop: for teste2 in Vagas: on_click = teste2.get('onclick') print(on_click)
Web Scraping with Python - Because when I use FOR IN it just returns a string
Because when I print the item "Vagas", they return all the strings I need, but when I print "on_click" it returns only one string. And from what I've seen, it returns only the last string, ignoring the others. soup = BeautifulSoup(stringue, "html.parser") Vagas = soup.find_all(title="Vaga disponível.") for teste2 in Vagas: on_click = teste2.get('onclick') print(on_click) Return of "Vagas". `<input id="ctl00_ctl00_Content_Content_rpt_turno_4_ctl01_imb_vaga_1" name="ctl00$ctl00$Content$Content$rpt_turno_4$ctl01$imb_vaga_1" onclick="javascript:window.open('Cadastro.aspx?id_agenda=0&amp;id_turno=29/11/2022 3:00:00;29/11/2022 4:00:00&amp;data=29/11/2022&amp;id_turno_exportador=198397&amp;id_turno_agenda=61298&amp;id_transportadora=23213&amp;id_turno_transp=68291&amp;id_Cliente=40300&amp;codigo_terminal=40300&amp;codigo_empresa=1&amp;codigo_exportador=24978&amp;codigo_transportador=23213&amp;codigo_turno=4&amp;turno_transp_vg=68291','_blank','height=850,width=1000,top=(screen.width)?(screen.width-1000)/2 : 0,left=(screen.height)?(screen.height-700)/2 : 0,toolbar=no,location=no,directories=no,status=no,menubar=no,scrollbars=yes,resizable=no');" src="../App_Themes/SisLog/Images/add-document.gif" style="height:20px;border-width:0px;" title="Vaga disponível." type="image"/>, <input id="ctl00_ctl00_Content_Content_rpt_turno_6_ctl01_imb_vaga_1" name="ctl00$ctl00$Content$Content$rpt_turno_6$ctl01$imb_vaga_1" onclick="javascript:window.open('Cadastro.aspx?id_agenda=0&amp;id_turno=29/11/2022 5:00:00;29/11/2022 6:00:00&amp;data=29/11/2022&amp;id_turno_exportador=198397&amp;id_turno_agenda=61298&amp;id_transportadora=23213&amp;id_turno_transp=68291&amp;id_Cliente=40300&amp;codigo_terminal=40300&amp;codigo_empresa=1&amp;codigo_exportador=24978&amp;codigo_transportador=23213&amp;codigo_turno=6&amp;turno_transp_vg=68291','_blank','height=850,width=1000,top=(screen.width)?(screen.width-1000)/2 : 0,left=(screen.height)?(screen.height-700)/2 : 0,toolbar=no,location=no,directories=no,status=no,menubar=no,scrollbars=yes,resizable=no');" src="../App_Themes/SisLog/Images/add-document.gif" style="height:20px;border-width:0px;" title="Vaga disponível." type="image"/>,` Return of "on_click" javascript:window.open('Cadastro.aspx?id_agenda=0&id_turno=29/11/2022 19:00:00;29/11/2022 20:00:00&data=29/11/2022&id_turno_exportador=198397&id_turno_agenda=61298&id_transportadora=23213&id_turno_transp=68291&id_Cliente=40300&codigo_terminal=40300&codigo_empresa=1&codigo_exportador=24978&codigo_transportador=23213&codigo_turno=20&turno_transp_vg=68291','_blank','height=850,width=1000,top=(screen.width)?(screen.width-1000)/2 : 0,left=(screen.height)?(screen.height-700)/2 : 0,toolbar=no,location=no,directories=no,status=no,menubar=no,scrollbars=yes,resizable=no');
[ "Put the print statement inside the for loop:\nfor teste2 in Vagas:\n on_click = teste2.get('onclick')\n print(on_click)\n\n" ]
[ 0 ]
[]
[]
[ "beautifulsoup", "pandas", "python", "selenium", "web_scraping" ]
stackoverflow_0074620714_beautifulsoup_pandas_python_selenium_web_scraping.txt
Q: Trying to find Total Sales & Total Cost of 3 different Countries cars Update: Below is the excel file link. I am brand new to Python and was doing this graph but I am stuck, I am trying to show a horizontal bar graph with 3 different countries, spain, germany, switzerland and trying to show the the total sales and total costs. I keep getting an error but not sure If my formula is correct If you could please take a look. Error I get is below. If someone could please assist! line 33 total.plot('CountryMake', ['Total Sales Price', 'Total Cost Price'], kind='barh') ^ SyntaxError: invalid syntax import pandas as pd sales= pd.read_excel('Simplified Car Sales Data.xlsx') spa= sales[sales['CountryName'] == 'Spain'] spa_totals = spa.sum(axis=0, numeric_only = True) spa_count = spa.count(axis=0) spa_total_sale_price = round(spa_totals) ['SalePrice'] spa_total_cost_price = round(spa_totals) ['CostPrice'] swi= sales[sales['CountryName'] == 'Switzerland'] swi_totals = spa.sum(axis=0, numeric_only = True) swi_count = spa.count(axis=0) swi_total_sale_price = round(swi_totals) ['SalePrice'] swi_total_cost_price = round(swi_totals) ['CostPrice'] ger= sales[sales['CountryName'] == 'Germany'] ger_totals = ger.sum(axis=0, numeric_only = True) ger_count = ger.count(axis=0) ger_total_sale_price = round(ger_totals) ['SalePrice'] ger_total_cost_price = round(ger_totals) ['CostPrice'] totals = pd.DataFrame([('Spain',spa_tot_sale_price, spa_tot_cost_price), ('Switzerland', swi_tot_sale_price,swi_tot_cost_price), ('Germany', ger_tot_sale_price,ger_tot_cost_price), columns ['CountryName', 'Total Sales Price', 'Total Cost Price'] total.plot('CountryMake', ['Total Sales Price', 'Total Cost Price'], kind='barh') Error I get is below. If someone could please assist! File "C:\Users\danac\AppData\Local\Temp\ipykernel_16616\2535154149.py", line 33 total.plot('CountryMake', ['Total Sales Price', 'Total Cost Price'], kind='barh') ^ SyntaxError: invalid syntax A: I don't think that you defined total in this part of the code. So, you may need to do totals.plot. You also didn't close the brackets. import pandas as pd import matplotlib.pyplot as plt sales= pd.read_excel('Simplified Car Sales Data.xlsx') spa= sales[sales['CountryName'] == 'Spain'] spa_totals = spa.sum(axis=0, numeric_only = True) spa_count = spa.count(axis=0) spa_total_sale_price = round(spa_totals) ['SalePrice'] spa_total_cost_price = round(spa_totals) ['CostPrice'] swi= sales[sales['CountryName'] == 'Switzerland'] swi_totals = spa.sum(axis=0, numeric_only = True) swi_count = spa.count(axis=0) swi_total_sale_price = round(swi_totals) ['SalePrice'] swi_total_cost_price = round(swi_totals) ['CostPrice'] ger= sales[sales['CountryName'] == 'Germany'] ger_totals = ger.sum(axis=0, numeric_only = True) ger_count = ger.count(axis=0) ger_total_sale_price = round(ger_totals) ['SalePrice'] ger_total_cost_price = round(ger_totals) ['CostPrice'] totals = pd.DataFrame([('Spain',spa_total_sale_price, spa_total_cost_price), ('Switzerland', swi_total_sale_price,swi_total_cost_price), ('Germany', ger_total_sale_price,ger_total_cost_price)], columns=['CountryName', 'Total Sales Price', 'Total Cost Price']) totals.plot('CountryName', ['Total Sales Price', 'Total Cost Price'], kind='barh') plt.show() This should work, although I don't guarantee given that I don't have your data. P.S. Try to use IDEs. It will be easier that way
Trying to find Total Sales & Total Cost of 3 different Countries cars
Update: Below is the excel file link. I am brand new to Python and was doing this graph but I am stuck, I am trying to show a horizontal bar graph with 3 different countries, spain, germany, switzerland and trying to show the the total sales and total costs. I keep getting an error but not sure If my formula is correct If you could please take a look. Error I get is below. If someone could please assist! line 33 total.plot('CountryMake', ['Total Sales Price', 'Total Cost Price'], kind='barh') ^ SyntaxError: invalid syntax import pandas as pd sales= pd.read_excel('Simplified Car Sales Data.xlsx') spa= sales[sales['CountryName'] == 'Spain'] spa_totals = spa.sum(axis=0, numeric_only = True) spa_count = spa.count(axis=0) spa_total_sale_price = round(spa_totals) ['SalePrice'] spa_total_cost_price = round(spa_totals) ['CostPrice'] swi= sales[sales['CountryName'] == 'Switzerland'] swi_totals = spa.sum(axis=0, numeric_only = True) swi_count = spa.count(axis=0) swi_total_sale_price = round(swi_totals) ['SalePrice'] swi_total_cost_price = round(swi_totals) ['CostPrice'] ger= sales[sales['CountryName'] == 'Germany'] ger_totals = ger.sum(axis=0, numeric_only = True) ger_count = ger.count(axis=0) ger_total_sale_price = round(ger_totals) ['SalePrice'] ger_total_cost_price = round(ger_totals) ['CostPrice'] totals = pd.DataFrame([('Spain',spa_tot_sale_price, spa_tot_cost_price), ('Switzerland', swi_tot_sale_price,swi_tot_cost_price), ('Germany', ger_tot_sale_price,ger_tot_cost_price), columns ['CountryName', 'Total Sales Price', 'Total Cost Price'] total.plot('CountryMake', ['Total Sales Price', 'Total Cost Price'], kind='barh') Error I get is below. If someone could please assist! File "C:\Users\danac\AppData\Local\Temp\ipykernel_16616\2535154149.py", line 33 total.plot('CountryMake', ['Total Sales Price', 'Total Cost Price'], kind='barh') ^ SyntaxError: invalid syntax
[ "I don't think that you defined total in this part of the code. So, you may need to do totals.plot. You also didn't close the brackets.\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nsales= pd.read_excel('Simplified Car Sales Data.xlsx')\n\nspa= sales[sales['CountryName'] == 'Spain']\n\nspa_totals = spa.sum(axis=0, numeric_only = True)\nspa_count = spa.count(axis=0)\n\nspa_total_sale_price = round(spa_totals) ['SalePrice']\nspa_total_cost_price = round(spa_totals) ['CostPrice']\n\nswi= sales[sales['CountryName'] == 'Switzerland']\n\nswi_totals = spa.sum(axis=0, numeric_only = True)\nswi_count = spa.count(axis=0)\n\nswi_total_sale_price = round(swi_totals) ['SalePrice']\nswi_total_cost_price = round(swi_totals) ['CostPrice']\n\nger= sales[sales['CountryName'] == 'Germany']\n\nger_totals = ger.sum(axis=0, numeric_only = True)\nger_count = ger.count(axis=0)\n\nger_total_sale_price = round(ger_totals) ['SalePrice']\nger_total_cost_price = round(ger_totals) ['CostPrice']\n\ntotals = pd.DataFrame([('Spain',spa_total_sale_price, spa_total_cost_price),\n ('Switzerland', swi_total_sale_price,swi_total_cost_price),\n ('Germany', ger_total_sale_price,ger_total_cost_price)],\n columns=['CountryName', 'Total Sales Price', 'Total Cost Price'])\ntotals.plot('CountryName', ['Total Sales Price', 'Total Cost Price'], kind='barh')\nplt.show()\n\nThis should work, although I don't guarantee given that I don't have your data.\nP.S.\nTry to use IDEs. It will be easier that way\n" ]
[ 0 ]
[]
[]
[ "jupyter", "python" ]
stackoverflow_0074623090_jupyter_python.txt
Q: Get the current version of the current package in Python I am building a Python library and I have the requirement inside one of the modules to get the current version of this same library and make decisions based on the current version. Is this possible in Python? What do you think is the best approach? A: You should use dir(package) to see all the possible options for that package. If there is __version__, you can use that. A: I found that the version of my library is being pulled from Github upon every release by setuptools_scm. The developers of setuptools_scm provide some tips on retrieving package version at runtime: https://github.com/pypa/setuptools_scm#retrieving-package-version-at-runtime Looks like using importlib.metadata is the way to go.
Get the current version of the current package in Python
I am building a Python library and I have the requirement inside one of the modules to get the current version of this same library and make decisions based on the current version. Is this possible in Python? What do you think is the best approach?
[ "You should use dir(package) to see all the possible options for that package. If there is __version__, you can use that.\n", "I found that the version of my library is being pulled from Github upon every release by setuptools_scm. The developers of setuptools_scm provide some tips on retrieving package version at runtime:\nhttps://github.com/pypa/setuptools_scm#retrieving-package-version-at-runtime\nLooks like using importlib.metadata is the way to go.\n" ]
[ 0, 0 ]
[]
[]
[ "python", "python_3.x", "version" ]
stackoverflow_0074623054_python_python_3.x_version.txt
Q: Google colab: downloading dataframes as csv or excel or google sheets I am trying to save a dataframe from my google colab code to a csv, excel, or google sheet so that i can work with it in that form. I have been successful in downloading it however the format in the excel doc is off. I want three columns (year/mean/std) which i have in my dataframe. when I download it, it labels the columns but the data doesnt fall into place and instead all stays in the first column. image from google colab image of csv A: Because you are using tab separation. You should remove sep='\t' and download again.
Google colab: downloading dataframes as csv or excel or google sheets
I am trying to save a dataframe from my google colab code to a csv, excel, or google sheet so that i can work with it in that form. I have been successful in downloading it however the format in the excel doc is off. I want three columns (year/mean/std) which i have in my dataframe. when I download it, it labels the columns but the data doesnt fall into place and instead all stays in the first column. image from google colab image of csv
[ "Because you are using tab separation. You should remove sep='\\t' and download again.\n" ]
[ 0 ]
[]
[]
[ "export_to_csv", "google_colaboratory", "python" ]
stackoverflow_0074621626_export_to_csv_google_colaboratory_python.txt
Q: VS Code / Pylance / Pylint Cannot resolve import The Summary I have a python import that works when run from the VS Code terminal, but that VS Code's editor is giving warnings about. Also, "Go to Definition" doesn't work. The Problem I have created a docker container from the image tensorflow/tensorflow:1.15.2-py3, then attach to it using VS Code's "Remote- Containers" extension. Then I've created the following file in the container. main.py: import tensorflow.compat.v1 as tf print(tf.__version__) This runs fine in the VS Code terminal, but the Editor and the Problems pane both give me an unresolved import 'tensorflow.compat' warning. Also "Go to Definition" doesn't work on tf.__version__. I'm using several extensions but I believe the relevant ones are the Microsoft Python extension (installed in the container), as well as the Remote - Containers extension, and now the Pylance extension (installed in the container). The Things I've Tried I've tried this with the default pylint, and then also after installing pylance with similar results. I've also seen some docs about similar issues, but they were related to setting the correct source folder location for modules that were part of a project. In contrast, my code within my project seems to work fine with imports/go-to-definition. It's external libraries that don't seem to work. Also, for the sake of this minimal example, I've attached to the container as root, so I am guessing it's not an issue of elevated permissions. I've also tried disabling all the extensions except the following, but got the same results: Remote - Containers (local) Remote - WSL (local) Python (on container) Jupyter (on container, required by Python for some reason) All the extensions above are on the latest versions. I've also fiddled around with setting python.autocomplete.extraPaths, but I'm not sure what the right path is. It also seems like the wrong thing to have to add libraries to the path that are installed in the global python installation, especially since I'm not using a virtual environment (it being in a docker container and all). The Question How do I fix VS Code so that it recognizes this import and I can use "Go to Definition" to explore these tensorflow functions/classes/etc? A: tldr; TensorFlow defines some of its modules in a way that pylint & pylance aren't able to recognize. These errors don't necessarily indicate an incorrect setup. To Fix: pylint: The pylint warnings are safely ignored. Intellisense: The best way I know of at the moment to fix Intellisense is to replace the imports with the modules they are aliasing (found by importing alias in a repl as x then running help(x)). Because the target of the alias in my case is an internal name, you probably don't want to check in these changes to source control. Not ideal. Details Regarding the linting: It seems that tensorflow defines its modules in a way that the tools can't understand. Also, it appears that the package is an alias of some kind to another package. For example: import tensorflow.compat.v1 as tf tf.estimator.RunConfig() The above code gives the pylint warning and breaks intellisense. But if you manually import the above in a REPL and run help(tf), it shows you the below package, which you can use instead: import tensorflow_core._api.v1.compat.v1 as tf tf.estimator.RunConfig() This second example does not cause the pylint warning. Also the Intellisense features (Go to definition, Ctrl+Click, etc) work with this second example. However, based on the _api, it looks like that second package name is an internal namespace, so I'm guessing it is probably best to only use this internal name for local debugging. Confirmation/Tickets pylint: I've found a ticket about pylint having issues with a couple tensorflow imports that looks related. Intellisense: I've opened a ticket with pylance. A: So for me I was trying to import pandas as pd but I got the error "pd" is not accessedPylance (module) pd SO what I did was reload the extension Python IntelliSense(Pylance) and that solved my issue. A: I had the same problem but with all kinds of packages. My solution was to go to the VSCode settings and search for "python.analysis.extraPaths", and add the path to your site-packages. In my case, I added C:\Code\Python39\Lib\site-packages, and now it's working fine. A: What, usually, solves the pylance issues for me is pointing my Python interpreter to the virtualenv one. Open the command palette Ctrl + Shift + P Type: Python: Select Interpreter It will show a list of all the python Interpreters it actually detects: Select Enter interpreter path Type in the path to your local venv/bin folder or click find to navigate using the file explorer. Your path should look something like: venv/bin/python3.9 A: i changed "import tensorflow as tf" to "from tensorflow import compat as tf" it ll even work for tf.gfile.Gfile()
VS Code / Pylance / Pylint Cannot resolve import
The Summary I have a python import that works when run from the VS Code terminal, but that VS Code's editor is giving warnings about. Also, "Go to Definition" doesn't work. The Problem I have created a docker container from the image tensorflow/tensorflow:1.15.2-py3, then attach to it using VS Code's "Remote- Containers" extension. Then I've created the following file in the container. main.py: import tensorflow.compat.v1 as tf print(tf.__version__) This runs fine in the VS Code terminal, but the Editor and the Problems pane both give me an unresolved import 'tensorflow.compat' warning. Also "Go to Definition" doesn't work on tf.__version__. I'm using several extensions but I believe the relevant ones are the Microsoft Python extension (installed in the container), as well as the Remote - Containers extension, and now the Pylance extension (installed in the container). The Things I've Tried I've tried this with the default pylint, and then also after installing pylance with similar results. I've also seen some docs about similar issues, but they were related to setting the correct source folder location for modules that were part of a project. In contrast, my code within my project seems to work fine with imports/go-to-definition. It's external libraries that don't seem to work. Also, for the sake of this minimal example, I've attached to the container as root, so I am guessing it's not an issue of elevated permissions. I've also tried disabling all the extensions except the following, but got the same results: Remote - Containers (local) Remote - WSL (local) Python (on container) Jupyter (on container, required by Python for some reason) All the extensions above are on the latest versions. I've also fiddled around with setting python.autocomplete.extraPaths, but I'm not sure what the right path is. It also seems like the wrong thing to have to add libraries to the path that are installed in the global python installation, especially since I'm not using a virtual environment (it being in a docker container and all). The Question How do I fix VS Code so that it recognizes this import and I can use "Go to Definition" to explore these tensorflow functions/classes/etc?
[ "tldr;\nTensorFlow defines some of its modules in a way that pylint & pylance aren't able to recognize. These errors don't necessarily indicate an incorrect setup.\nTo Fix:\n\npylint: The pylint warnings are safely ignored.\nIntellisense: The best way I know of at the moment to fix Intellisense is to replace the imports with the modules they are aliasing (found by importing alias in a repl as x then running help(x)). Because the target of the alias in my case is an internal name, you probably don't want to check in these changes to source control. Not ideal.\n\nDetails\nRegarding the linting: It seems that tensorflow defines its modules in a way that the tools can't understand. Also, it appears that the package is an alias of some kind to another package. For example:\nimport tensorflow.compat.v1 as tf\ntf.estimator.RunConfig()\n\nThe above code gives the pylint warning and breaks intellisense. But if you manually import the above in a REPL and run help(tf), it shows you the below package, which you can use instead:\nimport tensorflow_core._api.v1.compat.v1 as tf\ntf.estimator.RunConfig()\n\nThis second example does not cause the pylint warning. Also the Intellisense features (Go to definition, Ctrl+Click, etc) work with this second example.\nHowever, based on the _api, it looks like that second package name is an internal namespace, so I'm guessing it is probably best to only use this internal name for local debugging.\nConfirmation/Tickets\n\npylint: I've found a ticket about pylint having issues with a couple tensorflow imports that looks related.\nIntellisense: I've opened a ticket with pylance.\n\n", "So for me I was trying to\nimport pandas as pd\n\nbut I got the error\n\"pd\" is not accessedPylance (module) pd\n\nSO what I did was reload the extension Python IntelliSense(Pylance) and that solved my issue.\n", "I had the same problem but with all kinds of packages.\nMy solution was to go to the VSCode settings and search for \"python.analysis.extraPaths\", and add the path to your site-packages.\nIn my case, I added C:\\Code\\Python39\\Lib\\site-packages, and now it's working fine.\n", "What, usually, solves the pylance issues for me is pointing my Python interpreter to the virtualenv one.\nOpen the command palette Ctrl + Shift + P\nType: Python: Select Interpreter\nIt will show a list of all the python Interpreters it actually detects:\n\nSelect Enter interpreter path\nType in the path to your local venv/bin folder or click find to navigate using the file explorer.\nYour path should look something like:\nvenv/bin/python3.9\n", "i changed \"import tensorflow as tf\" to \"from tensorflow import compat as tf\"\nit ll even work for tf.gfile.Gfile()\n" ]
[ 7, 3, 1, 1, 0 ]
[]
[]
[ "pylance", "pylint", "python", "tensorflow", "visual_studio_code" ]
stackoverflow_0065271399_pylance_pylint_python_tensorflow_visual_studio_code.txt
Q: The Python Tools server crashed 5 times in the last 3 minutes. The server will not be restarted The Python Server Crashes Unexpectedly. Am able to run in debug mode but the Linting is not working. Could anybody help me out please? A: I had a similar issue, which I resolved based on this answer. What I found was opening the folders individually seemed to work fine, but if I opened the workspace it caused the issues. Deleting the workspace and creating a new one seemed to solve the issue. A: If anyone is facing this issue in the containers then this did the trick for me: Run the following the command inside the container: rm -rf ~/.vscode-server/ Reopen vscode It removes all the cached data for that particular container. A: I had turned on "Python: Pylance Lsp Notebooks Enabled" setting. Turning it off did the trick. A: I was able to resolve this by following the steps to reinstall the Python extension found here: https://github.com/microsoft/vscode-python/issues/13679#issuecomment-683899458 Copying steps here: Uninstall Python extension (if you have pylance uninstall it first).\ Close all instances of VS Code.\ Go to, %USERPROFILE%/.vscode/extensions (on windows) or ~/.vscode/extensions on Linux/Mac.\ Delete any folder with the name starting with ms-python.python*\ Start VS Code, and install Python extension (also pylance if you uninstalled it in step 1).
The Python Tools server crashed 5 times in the last 3 minutes. The server will not be restarted
The Python Server Crashes Unexpectedly. Am able to run in debug mode but the Linting is not working. Could anybody help me out please?
[ "I had a similar issue, which I resolved based on this answer. What I found was opening the folders individually seemed to work fine, but if I opened the workspace it caused the issues.\nDeleting the workspace and creating a new one seemed to solve the issue.\n", "If anyone is facing this issue in the containers then this did the trick for me:\nRun the following the command inside the container:\nrm -rf ~/.vscode-server/\n\nReopen vscode\nIt removes all the cached data for that particular container.\n", "I had turned on \"Python: Pylance Lsp Notebooks Enabled\" setting. Turning it off did the trick.\n", "I was able to resolve this by following the steps to reinstall the Python extension found here: https://github.com/microsoft/vscode-python/issues/13679#issuecomment-683899458\nCopying steps here:\n\n\nUninstall Python extension (if you have pylance uninstall it first).\\\nClose all instances of VS Code.\\\nGo to, %USERPROFILE%/.vscode/extensions (on windows) or ~/.vscode/extensions on Linux/Mac.\\\nDelete any folder with the name starting with ms-python.python*\\\nStart VS Code, and install Python extension (also pylance if you uninstalled it in step 1).\n\n\n" ]
[ 1, 0, 0, 0 ]
[]
[]
[ "python", "visual_studio_code", "vscode_extensions", "vscode_settings" ]
stackoverflow_0068783077_python_visual_studio_code_vscode_extensions_vscode_settings.txt
Q: How to correctly write a merge sort algorithm with the use of a temporary list I'm writing a merge sort with recursion, but it doesn't print out the correct message, however when I'm hand writing through the code, it seems right. could anyone help me to find out why? def mergeSort1(arr): if len(arr)<=1: #base case return arr else : breakN =len(arr)//2 left = arr[:breakN] right = arr[breakN:] mergeSort1(left) mergeSort1(right) i=j=0 temp = [] while i<len(left) and j<len(right): if left[i] <= right[j]: temp.append(left[i]) i += 1 else: temp.append(right[j]) j += 1 while i < len(left): # extend the list in case there's any missing temp.append(left[i]) i += 1 while j < len(right): temp.append(right[j]) j += 1 #print(temp) return temp code to get the result: arr = [9,7,3,6,2] mergeSort1(arr) print(arr) and the result: [9, 7, 3, 6, 2] I then looked up at the code from other people, I found the problem might lie in temp[], so I added a print(temp) at the back of else statement(see above code), and it prints out the following: [7, 9] [2, 6] [3, 6, 2] [3, 6, 2, 9, 7] It shows the first and second answer is what I want, could anyone please help me find out why ? A: Since you are not modifying the input list arr in-place and are returning a new list temp instead, you should assign the returning value of the function to a variable. Change: mergeSort1(left) mergeSort1(right) to: left = mergeSort1(left) right = mergeSort1(right) And change: mergeSort1(arr) print(arr) to: print(mergeSort1(arr)) Alternatively, you can modify the input list in-place, in which case you can assign temp back to arr in-place using slice assignment. Change: return temp to: arr[:] = temp
How to correctly write a merge sort algorithm with the use of a temporary list
I'm writing a merge sort with recursion, but it doesn't print out the correct message, however when I'm hand writing through the code, it seems right. could anyone help me to find out why? def mergeSort1(arr): if len(arr)<=1: #base case return arr else : breakN =len(arr)//2 left = arr[:breakN] right = arr[breakN:] mergeSort1(left) mergeSort1(right) i=j=0 temp = [] while i<len(left) and j<len(right): if left[i] <= right[j]: temp.append(left[i]) i += 1 else: temp.append(right[j]) j += 1 while i < len(left): # extend the list in case there's any missing temp.append(left[i]) i += 1 while j < len(right): temp.append(right[j]) j += 1 #print(temp) return temp code to get the result: arr = [9,7,3,6,2] mergeSort1(arr) print(arr) and the result: [9, 7, 3, 6, 2] I then looked up at the code from other people, I found the problem might lie in temp[], so I added a print(temp) at the back of else statement(see above code), and it prints out the following: [7, 9] [2, 6] [3, 6, 2] [3, 6, 2, 9, 7] It shows the first and second answer is what I want, could anyone please help me find out why ?
[ "Since you are not modifying the input list arr in-place and are returning a new list temp instead, you should assign the returning value of the function to a variable.\nChange:\nmergeSort1(left)\nmergeSort1(right)\n\nto:\nleft = mergeSort1(left)\nright = mergeSort1(right)\n\nAnd change:\nmergeSort1(arr)\nprint(arr)\n\nto:\nprint(mergeSort1(arr))\n\nAlternatively, you can modify the input list in-place, in which case you can assign temp back to arr in-place using slice assignment.\nChange:\nreturn temp\n\nto:\narr[:] = temp\n\n" ]
[ 0 ]
[]
[]
[ "data_structures", "mergesort", "python", "recursion" ]
stackoverflow_0074623297_data_structures_mergesort_python_recursion.txt
Q: Solving Canadian Computing Competition: "24" I am trying to solve the problem above (Here is the link: https://dmoj.ca/problem/ccc08s4) using python but am running into difficulties. The problem asks to determine if 4 number when multiplied, subtracted, added, or divided can yield 24. Parenthesis are also allowed to specify precedence. If such a value is not possible then output the greatest number less than 24. My solution was to create all permutations of the 4 numbers and then the permutations of all the operations (multiplication, division, subtraction, and addition) in sets of 3 since you can only have 3 operations at one time. Then I would loop over the numbers and apply all the operations yielding all possibilities but this would be incorrect since I did not incorporate parenthesis in the problem. That is where I am struggling. I am not really sure how to incorporate parenthesis. I feel like I am overcomplicating the problem and the only solution I can think of is finding permutations of multiplication division subtraction, addition, (, and ) but this would still be incorrect since some equations can have more than one set of parenthesis. I am really new to competitive programming and I feel like I am missing something obvious. I would love some feedback and help. A: One way is to consider the postfix expression instead of the infix expression. E.g, for the infix expression (A+B)*(C-D), we'll consider the postfix expression AB+CD-* instead. It can be observed that there will be no need for parenthesis in postfix expression, which makes them more friendly to machine. So you just need to generate all valid postfix expression and evaluate them, for example by using the method here: https://www.geeksforgeeks.org/stack-set-4-evaluation-postfix-expression/ Another more implementation-friendly way is to use recursion to generate all possible order of operation.
Solving Canadian Computing Competition: "24"
I am trying to solve the problem above (Here is the link: https://dmoj.ca/problem/ccc08s4) using python but am running into difficulties. The problem asks to determine if 4 number when multiplied, subtracted, added, or divided can yield 24. Parenthesis are also allowed to specify precedence. If such a value is not possible then output the greatest number less than 24. My solution was to create all permutations of the 4 numbers and then the permutations of all the operations (multiplication, division, subtraction, and addition) in sets of 3 since you can only have 3 operations at one time. Then I would loop over the numbers and apply all the operations yielding all possibilities but this would be incorrect since I did not incorporate parenthesis in the problem. That is where I am struggling. I am not really sure how to incorporate parenthesis. I feel like I am overcomplicating the problem and the only solution I can think of is finding permutations of multiplication division subtraction, addition, (, and ) but this would still be incorrect since some equations can have more than one set of parenthesis. I am really new to competitive programming and I feel like I am missing something obvious. I would love some feedback and help.
[ "One way is to consider the postfix expression instead of the infix expression.\nE.g, for the infix expression (A+B)*(C-D), we'll consider the postfix expression AB+CD-* instead.\nIt can be observed that there will be no need for parenthesis in postfix expression, which makes them more friendly to machine.\nSo you just need to generate all valid postfix expression and evaluate them, for example by using the method here: https://www.geeksforgeeks.org/stack-set-4-evaluation-postfix-expression/\nAnother more implementation-friendly way is to use recursion to generate all possible order of operation.\n" ]
[ 1 ]
[]
[]
[ "algorithm", "python", "python_3.x" ]
stackoverflow_0074621629_algorithm_python_python_3.x.txt
Q: Python Openpyxl - Append many excel files into 1 file I have 10 Excel files (they have same number of columns, and varying number of rows) I need to append data from those 10 files into one single Excel file using Openpyxl Python library Read data from File1, append it to new_file Read data from File2, append it to new_file ... Is this possible? Can anyone help me? Thank you A: There are some missing details in the question, as raised by @moken. Let's make some assumptions that all files have a single sheet named 'Sheet 1' and identical column headers. And the final output will start with file10's content, then file9 etc and we will skip copying the column headers. For the sake of simplicity, we will use 3 files' content for illustration: file1.xlsx: col_1 col_2 col_3 F1-1 F1-2 F1-3 file2.xlsx: col_1 col_2 col_3 F2-1 F2-2 F2-3 F2-2 F2-3 F2-4 file3.xlsx: col_1 col_2 col_3 F3-1 F3-2 F3-3 F3-2 F3-3 F3-4 F3-3 F3-4 F3-5 The code is rather straightforward, where we get all rows from the current file and append row by row to the main workbook: from openpyxl import load_workbook main_workbook = load_workbook(filename="file3.xlsx") file_list = ["file2.xlsx","file1.xlsx"] for file in file_list: workbook = load_workbook(filename=file) new_rows = list(workbook['Sheet1'].values) for idx,row in enumerate(new_rows): # skip column header if idx == 0: continue main_workbook['Sheet1'].append(row) workbook.close() main_workbook.save("merged.xlsx") The final output would have rows with the following values: >>> list(main_workbook['Sheet1'].values) [('col_1', 'col_2', 'col_3'), ('F3-1', 'F3-2', 'F3-3'), ('F3-2', 'F3-3', 'F3-4'), ('F3-3', 'F3-4', 'F3-5'), ('F2-1', 'F2-2', 'F2-3'), ('F2-2', 'F2-3', 'F2-4'), ('F1-1', 'F1-2', 'F1-3')]
Python Openpyxl - Append many excel files into 1 file
I have 10 Excel files (they have same number of columns, and varying number of rows) I need to append data from those 10 files into one single Excel file using Openpyxl Python library Read data from File1, append it to new_file Read data from File2, append it to new_file ... Is this possible? Can anyone help me? Thank you
[ "There are some missing details in the question, as raised by @moken. Let's make some assumptions that all files have a single sheet named 'Sheet 1' and identical column headers. And the final output will start with file10's content, then file9 etc and we will skip copying the column headers.\nFor the sake of simplicity, we will use 3 files' content for illustration:\nfile1.xlsx:\n\n\n\n\ncol_1\ncol_2\ncol_3\n\n\n\n\nF1-1\nF1-2\nF1-3\n\n\n\n\nfile2.xlsx:\n\n\n\n\ncol_1\ncol_2\ncol_3\n\n\n\n\nF2-1\nF2-2\nF2-3\n\n\nF2-2\nF2-3\nF2-4\n\n\n\n\nfile3.xlsx:\n\n\n\n\ncol_1\ncol_2\ncol_3\n\n\n\n\nF3-1\nF3-2\nF3-3\n\n\nF3-2\nF3-3\nF3-4\n\n\nF3-3\nF3-4\nF3-5\n\n\n\n\nThe code is rather straightforward, where we get all rows from the current file and append row by row to the main workbook:\nfrom openpyxl import load_workbook\nmain_workbook = load_workbook(filename=\"file3.xlsx\")\nfile_list = [\"file2.xlsx\",\"file1.xlsx\"]\n\nfor file in file_list:\n workbook = load_workbook(filename=file)\n new_rows = list(workbook['Sheet1'].values)\n for idx,row in enumerate(new_rows):\n # skip column header\n if idx == 0: continue\n main_workbook['Sheet1'].append(row)\n workbook.close()\nmain_workbook.save(\"merged.xlsx\")\n\nThe final output would have rows with the following values:\n>>> list(main_workbook['Sheet1'].values)\n[('col_1', 'col_2', 'col_3'),\n ('F3-1', 'F3-2', 'F3-3'),\n ('F3-2', 'F3-3', 'F3-4'),\n ('F3-3', 'F3-4', 'F3-5'),\n ('F2-1', 'F2-2', 'F2-3'),\n ('F2-2', 'F2-3', 'F2-4'),\n ('F1-1', 'F1-2', 'F1-3')]\n\n" ]
[ 1 ]
[]
[]
[ "excel", "openpyxl", "python" ]
stackoverflow_0074621496_excel_openpyxl_python.txt
Q: Unable to perform join in mongodb using a package and code specified in https://pypi.org/project/mongojoin/ Unable to execute $lookup in mongodb. I need to perform join in mongodb using Python, but the code and package specified in https://pypi.org/project/mongojoin/ is not working. Also, can $lookup be run from mongoshell, and if yes, how? I am using the following code: from mongojoin.mongojoin import MongoJoin, MongoCollection c = MongoCollection("test", "c", ["name", "add", "prod_id"], {prod_id : "123456"}) p = MongoCollection("test", "p", [ "prod_id", "pname", "pcost",], {prod_id : "123456"}) aggregator = MongoJoin(c, p, ["prod_id"]) result = aggregator.inner() print(result) Getting the following error: Traceback (most recent call last): File "E:\nm21\jd1.py", line 1, in <module> from mongojoin.mongojoin import MongoJoin, MongoCollection File "C:\Python\Python39\lib\site-packages\mongojoin\mongojoin.py", line 10, in <module> from processdata import CollectionsProcessedData ModuleNotFoundError: No module named 'processdata' A: I have converted the cursors to lists then appended one list with another. It worked... A: enter image description here Download the zip file of mongojoin from GIT and paste the files in same loaction of the file from where you are trying to import the mongojoin packages. use Imports as Following: ** from mongojoin import MongoJoin from mongojoin import MongoCollection ** As for example mongoread_Q is the file from which i am trying join and init.py, mongocollection.py , processdata.py are files from MongoCollection packages extracted in same Location.
Unable to perform join in mongodb using a package and code specified in https://pypi.org/project/mongojoin/
Unable to execute $lookup in mongodb. I need to perform join in mongodb using Python, but the code and package specified in https://pypi.org/project/mongojoin/ is not working. Also, can $lookup be run from mongoshell, and if yes, how? I am using the following code: from mongojoin.mongojoin import MongoJoin, MongoCollection c = MongoCollection("test", "c", ["name", "add", "prod_id"], {prod_id : "123456"}) p = MongoCollection("test", "p", [ "prod_id", "pname", "pcost",], {prod_id : "123456"}) aggregator = MongoJoin(c, p, ["prod_id"]) result = aggregator.inner() print(result) Getting the following error: Traceback (most recent call last): File "E:\nm21\jd1.py", line 1, in <module> from mongojoin.mongojoin import MongoJoin, MongoCollection File "C:\Python\Python39\lib\site-packages\mongojoin\mongojoin.py", line 10, in <module> from processdata import CollectionsProcessedData ModuleNotFoundError: No module named 'processdata'
[ "I have converted the cursors to lists then appended one list with another.\nIt worked...\n", "enter image description here\nDownload the zip file of mongojoin from GIT and paste the files in same loaction of the file from where you are trying to import the mongojoin packages.\nuse Imports as Following:\n**\n\nfrom mongojoin import MongoJoin\nfrom mongojoin import MongoCollection\n\n**\nAs for example mongoread_Q is the file from which i am trying join and init.py, mongocollection.py , processdata.py are files from MongoCollection packages extracted in same Location.\n" ]
[ 0, 0 ]
[ "Update the below changes to mongo.py library after installing \"pip install process-data & pip install sklearn\"\n\"from process_data.setup.collections import CollectionsProcessedData\"\n\n" ]
[ -1 ]
[ "join", "lookup", "mongodb", "python" ]
stackoverflow_0064872133_join_lookup_mongodb_python.txt
Q: Check if an array contains values from a list and add list as columns I have a data_frame as below, Id Col1 1 [["A", "B", "E", "F"]] 2 [["A", "D", "E"]] I have a list as ["A", "B", "C"] I would like to add the elements in the list as columns and check if the exist in col1 or not. So my expected output will be like, Id Col1 A B C 1 [["A", "B", "E", "F"]] 1 1 0 2 [["A", "D", "E"]] 1 0 0 I tried the below code to check if any one of the values in the list exist in Col1 but not sure how to do that for each of the values in the list. list_exist = data_frame.withColumn("list",F.array([F.lit(i) for i in list]))\ .withColumn("list_exist",F.arrays_overlap(F.col("Col1"),F.col("list")))\ .drop("list") I'm new to PySpark so any help is much appreciated. Thanks! A: This can be achieved using a list comprehension. ls = ["A", "B", "C"] ... df = df.select('*', *[F.array_contains('col1', c).cast('int').alias(c) for c in ls])
Check if an array contains values from a list and add list as columns
I have a data_frame as below, Id Col1 1 [["A", "B", "E", "F"]] 2 [["A", "D", "E"]] I have a list as ["A", "B", "C"] I would like to add the elements in the list as columns and check if the exist in col1 or not. So my expected output will be like, Id Col1 A B C 1 [["A", "B", "E", "F"]] 1 1 0 2 [["A", "D", "E"]] 1 0 0 I tried the below code to check if any one of the values in the list exist in Col1 but not sure how to do that for each of the values in the list. list_exist = data_frame.withColumn("list",F.array([F.lit(i) for i in list]))\ .withColumn("list_exist",F.arrays_overlap(F.col("Col1"),F.col("list")))\ .drop("list") I'm new to PySpark so any help is much appreciated. Thanks!
[ "This can be achieved using a list comprehension.\n\nls = [\"A\", \"B\", \"C\"]\n...\ndf = df.select('*', *[F.array_contains('col1', c).cast('int').alias(c) for c in ls])\n\n" ]
[ 0 ]
[]
[]
[ "apache_spark_sql", "arrays", "pyspark", "python" ]
stackoverflow_0074623329_apache_spark_sql_arrays_pyspark_python.txt
Q: If column header = specific value condition I am trying to create a condition where if the column headers in my dataframe are equal to Unnamed: 0 VALUE VALUE.1 VALUE.2 then i want to do drop the first two rows and rename the headers Unnamed: 0 VALUE VALUE.1 VALUE.2 Name Hobbies Dislikes Favorite Color Ben NaN NaN NaN Alex NaN Running Red Mike NaN Cartoons Blue Mark NaN Pizza Yellow I know i can do df = df.drop([0,1]) but i need it to be conditional I tried doing if df.columns = {"Unnamed: 0", "VALUE", "VALUE.1", "VALUE.2"}: df = df.drop([0,1]) df = df.rename(columns={"Unnamed: 0": "Name", "VALUE": "Hobbies", "VALUE.1": "Dislikes", "VALUE.2": "Favorite Color"}) but i'm running into a syntax error where i am trying to create a condition with my column names. Any clue how to fix this? A: Firstly, you only need to drop row 0 cause columns is not a row. Then the == should be used in the if statement, and it's a list comparison, so add .all() import pandas as pd df = pd.DataFrame(columns=["Unnamed: 0", "VALUE", "VALUE.1", "VALUE.2"]) df.loc[0] = ['Name', 'Hobbies', 'Dislikes', 'Favorite Color'] df.loc[1] = ['Ben', None, None, None] print(df) if (df.columns == ["Unnamed: 0", "VALUE", "VALUE.1", "VALUE.2"]).all(): df = df.drop([0]) df.columns = ['Name', 'Hobbies', 'Dislikes', 'Favorite Color'] print() print(df) output: Unnamed: 0 VALUE VALUE.1 VALUE.2 0 Name Hobbies Dislikes Favorite Color 1 Ben None None None Name Hobbies Dislikes Favorite Color 1 Ben None None None A: try this: cols = pd.Index(['Unnamed:0', 'VALUE', 'VALUE.1', 'VALUE.2']) if df.columns.equals(cols): df = df.set_axis(df.iloc[0], axis=1).iloc[1:] print(df) >>> Name Hobbies Dislikes Favorite Color 1 Ben NaN NaN NaN None 2 Alex NaN Running Red None 3 Mike NaN Cartoons Blue None 4 Mark NaN Pizza Yellow None A: There is a ':' at the end of the first line. and the indentation apart from that the code shouldn't give any syntax error
If column header = specific value condition
I am trying to create a condition where if the column headers in my dataframe are equal to Unnamed: 0 VALUE VALUE.1 VALUE.2 then i want to do drop the first two rows and rename the headers Unnamed: 0 VALUE VALUE.1 VALUE.2 Name Hobbies Dislikes Favorite Color Ben NaN NaN NaN Alex NaN Running Red Mike NaN Cartoons Blue Mark NaN Pizza Yellow I know i can do df = df.drop([0,1]) but i need it to be conditional I tried doing if df.columns = {"Unnamed: 0", "VALUE", "VALUE.1", "VALUE.2"}: df = df.drop([0,1]) df = df.rename(columns={"Unnamed: 0": "Name", "VALUE": "Hobbies", "VALUE.1": "Dislikes", "VALUE.2": "Favorite Color"}) but i'm running into a syntax error where i am trying to create a condition with my column names. Any clue how to fix this?
[ "Firstly, you only need to drop row 0 cause columns is not a row.\nThen the == should be used in the if statement, and it's a list comparison, so add .all()\nimport pandas as pd\n\ndf = pd.DataFrame(columns=[\"Unnamed: 0\", \"VALUE\", \"VALUE.1\", \"VALUE.2\"])\ndf.loc[0] = ['Name', 'Hobbies', 'Dislikes', 'Favorite Color']\ndf.loc[1] = ['Ben', None, None, None]\n\nprint(df)\n\nif (df.columns == [\"Unnamed: 0\", \"VALUE\", \"VALUE.1\", \"VALUE.2\"]).all():\n df = df.drop([0])\n df.columns = ['Name', 'Hobbies', 'Dislikes', 'Favorite Color']\nprint()\nprint(df)\n\noutput:\n Unnamed: 0 VALUE VALUE.1 VALUE.2\n0 Name Hobbies Dislikes Favorite Color\n1 Ben None None None\n\n Name Hobbies Dislikes Favorite Color\n1 Ben None None None\n\n", "try this:\ncols = pd.Index(['Unnamed:0', 'VALUE', 'VALUE.1', 'VALUE.2'])\nif df.columns.equals(cols):\n df = df.set_axis(df.iloc[0], axis=1).iloc[1:]\nprint(df)\n>>>\n Name Hobbies Dislikes Favorite Color\n1 Ben NaN NaN NaN None\n2 Alex NaN Running Red None\n3 Mike NaN Cartoons Blue None\n4 Mark NaN Pizza Yellow None\n\n", "There is a ':' at the end of the first line.\nand the indentation\napart from that the code shouldn't give any syntax error\n" ]
[ 2, 2, 0 ]
[]
[]
[ "dataframe", "if_statement", "pandas", "python" ]
stackoverflow_0074623137_dataframe_if_statement_pandas_python.txt