content
stringlengths 85
101k
| title
stringlengths 0
150
| question
stringlengths 15
48k
| answers
list | answers_scores
list | non_answers
list | non_answers_scores
list | tags
list | name
stringlengths 35
137
|
---|---|---|---|---|---|---|---|---|
Q:
Sprite animation works when moving right, but not when moving left
I'm making a simple game with Pygame. I have a character sprite which when moving right has all of the animations, but when moving left, the animations are not shown, instead it shows the default standing still animations as the character moves left across the screen. I have been looking at this code for an hour trying to figure out why this is happening and I can't put my finger on why, the moving left part of the code is exactly the same as the moving right part of the code, just loading different images, but it works when moving right and doesn't work when moving left.
Please note in the code below there are rect_stand, rect_right and rect_left, I'm pretty sure the rect_right and rect_left are not actually doing anything because the draw method only uses rect.stand, but I don't think that's the reason it's failing since before I animated walking left, I had walking right with a rect_right and this was fine, it's only in the left direction where it doesn't load the animations.
class Character(pygame.sprite.Sprite):
sprite_frame = 0
def __init__(self):
self.standing_right_frame1 = pygame.image.load("C:\\...\\character1_standing_facing_right_1.png")
self.standing_right_frame2 = pygame.image.load("C:\\...\\character1_standing_facing_right_2.png")
self.walking_right_frame1 = pygame.image.load("C:\\...\\character1_walking_right_1.png")
self.walking_right_frame2 = pygame.image.load("C:\\...\\character1_walking_right_2.png")
self.walking_right_frame3 = pygame.image.load("C:\\...\\character1_walking_right_3.png")
self.walking_right_frame4 = pygame.image.load("C:\\...\\character1_walking_right_4.png")
self.walking_right_frame5 = pygame.image.load("C:\\...\\character1_walking_right_5.png")
self.walking_right_frame6 = pygame.image.load("C:\\...\\character1_walking_right_6.png")
self.walking_right_frame7 = pygame.image.load("C:\\...\\character1_walking_right_7.png")
self.walking_right_frame8 = pygame.image.load("C:\\...\\character1_walking_right_8.png")
self.walking_right_frame9 = pygame.image.load("C:\\...\\character1_walking_right_9.png")
self.walking_right_frame10 = pygame.image.load("C:\\...\\character1_walking_right_10.png")
self.walking_left_frame1 = pygame.image.load("C:\\...\\character1_walking_left_1.png")
self.walking_left_frame2 = pygame.image.load("C:\\...\\character1_walking_left_2.png")
self.walking_left_frame3 = pygame.image.load("C:\\...\\character1_walking_left_3.png")
self.walking_left_frame4 = pygame.image.load("C:\\...\\character1_walking_left_4.png")
self.walking_left_frame5 = pygame.image.load("C:\\...\\character1_walking_left_5.png")
self.walking_left_frame6 = pygame.image.load("C:\\...\\character1_walking_left_6.png")
self.walking_left_frame7 = pygame.image.load("C:\\...\\character1_walking_left_7.png")
self.walking_left_frame8 = pygame.image.load("C:\\...\\character1_walking_left_8.png")
self.walking_left_frame9 = pygame.image.load("C:\\...\\character1_walking_left_9.png")
self.walking_left_frame10 = pygame.image.load("C:\\...\\character1_walking_left_10.png")
self.standing_right = [self.standing_right_frame1, self.standing_right_frame2]
self.walking_right = [self.walking_right_frame1, self.walking_right_frame2, self.walking_right_frame3, self.walking_right_frame4, self.walking_right_frame5, self.walking_right_frame6, self.walking_right_frame7, self.walking_right_frame8, self.walking_right_frame9, self.walking_right_frame10]
self.walking_left = [self.walking_left_frame1, self.walking_left_frame2, self.walking_left_frame3, self.walking_left_frame4, self.walking_left_frame5, self.walking_left_frame6, self.walking_left_frame7, self.walking_left_frame8, self.walking_left_frame9, self.walking_left_frame10]
self.character_sprite = self.standing_right
self.rect_stand = self.standing_right_frame1.get_rect()
self.rect_right = self.walking_right_frame1.get_rect()
self.rect_left = self.walking_left_frame1.get_rect()
self.rect_stand.center = (200, 300)
def character_standing_still(self):
sprite_index = self.sprite_frame // 15
if sprite_index >= len(self.standing_right):
self.sprite_frame = 0
sprite_index = 0
self.character_sprite = self.standing_right[sprite_index]
self.sprite_frame = self.sprite_frame + 1
def character_walking(self, direction = "Standing"):
if direction == "Left":
sprite_index = self.sprite_frame // 15
self.rect_stand.move_ip(-5, 0)
if sprite_index >= len(self.walking_left):
self.sprite_frame = 0
sprite_index = 0
self.character_sprite = self.walking_left[sprite_index]
self.sprite_frame = self.sprite_frame + 1
elif direction == "Right":
sprite_index = self.sprite_frame // 15
self.rect_stand.move_ip(5, 0)
if sprite_index >= len(self.walking_right):
self.sprite_frame = 0
sprite_index = 0
self.character_sprite = self.walking_right[sprite_index]
self.sprite_frame = self.sprite_frame + 1
else:
self.character_standing_still()
def update(self):
pressed_key = pygame.key.get_pressed()
if self.rect_stand.left > 0:
if pressed_key[K_LEFT]:
self.character_walking("Left")
else:
self.character_standing_still()
else:
self.character_standing_still()
if self.rect_stand.right < screen_width:
if pressed_key[K_RIGHT]:
self.character_walking("Right")
else:
self.character_standing_still()
else:
self.character_standing_still()
def draw(self, display):
self.game_screen = display
self.game_screen.blit(self.character_sprite, self.rect_stand)
pygame.init()
character = Character()
while game_running:
game_clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_running = False
game_screen.fill(color_white)
character.update()
character.draw(game_screen)
pygame.display.flip()
pygame.quit()
A:
Answer: Possibly multiple issues.
Firstly, the K_LEFT code is using self.standing_right for the sprite image. (It's not clear if this is correct, but it looks wrong).
Secondly, imagine no keys are pressed, and follow the update() function in your imagination. It logically reduces down to something like:
def update(self):
if ( not-against-window-edge ):
if ( left is pressed ):
paint left-walking-image
else:
paint right-standing-image # <<-- Always ends up here on no-keys
if ( right is pressed ):
paint right-walking-image
else:
paint right-standing-image # <<-- Always ends up here on no-keys
So if no keys are pressed, it's always showing the right-standing sprite as a last-step. The code needs to remember the previously faced direction.
So changing the second if to elif, remembering the direction, and some re-arranging:
def update(self):
pressed_key = pygame.key.get_pressed()
if pressed_key[K_LEFT]:
self.facing = 'left' # Turn Left
sprite_index = self.sprite_frame // 8
if self.rect_stand.left > 0 or self.rect_right.left > 0 or self.rect_left.left > 0:
self.rect_stand.move_ip(-5, 0)
self.rect_left.move_ip(-5, 0)
if sprite_index >= len(self.walking_left):
self.sprite_frame = 0
sprite_index = 0
self.character_sprite = self.walking_left[sprite_index]
self.sprite_frame = self.sprite_frame + 1
elif pressed_key[K_RIGHT]:
self.facing = 'right'
sprite_index = self.sprite_frame // 8
if self.rect_stand.right < screen_width or self.rect_right.right < screen_width or self.rect_left.right < screen_width:
self.rect_stand.move_ip(5, 0)
self.rect_right.move_ip(5, 0)
if sprite_index >= len(self.walking_right):
self.sprite_frame = 0
sprite_index = 0
self.character_sprite = self.walking_right[sprite_index]
self.sprite_frame = self.sprite_frame + 1
else:
# Neither left/right pressed, show stopped sprite
sprite_index = self.sprite_frame // 15
if ( self.facing == 'left' ):
if sprite_index >= len(self.standing_right): # TODO: left
self.sprite_frame = 0
sprite_index = 0
self.character_sprite = self.standing_right[sprite_index] # TODO: left
elif ( self.facing == 'right' ):
if sprite_index >= len(self.standing_right):
self.sprite_frame = 0
sprite_index = 0
self.character_sprite = self.standing_right[sprite_index]
self.sprite_frame = self.sprite_frame + 1
Not forgetting to add 'self.direction' to the __init__().
It might be better to simply store a reference to the animation frames currently being used. Say you have:
class Player():
def __init__( self, blah ):
self.left_animation_sprites = [ ... ]
self.right_animation_sprites = [ ... ]
self.current_animation = self.right_animation_sprites
Then when you change directions, it's just a matter of referring to the "other" set of animations.
def turnLeft( self ):
if ( self.direction != 'left' ):
self.direction = 'left'
self.current_animation = self.left_animation_sprites
self.sprite_frame = 0
So now when you're animating in update, it's always looping through self.current_animation. This way your update code doesn't need to know which direction the player is facing.
EDIT
Essentially the updated code still has the same problem, and that is that the update() function will always execute self.character_standing_still() (the one on the last line of the function) when heading Left.
I can't be sure, but until I modified the code, I couldn't get the frame-indexing to work correctly either. But that was probably me. Anyway I removed the uses of a temporary sprite_index in lieu of just using the length of the animation list.
The code is not storing the direction of travel, so can't paint the correct "still" image left/right. It always uses right. This is also a minor part of the problem.
Here's a fixed version (but still doesn't remember the direction):
import pygame
screen_width = 500
screen_height = 500
game_screen = pygame.display.set_mode( ( screen_width, screen_height ) )
color_white = ( 200, 200, 200 )
class Character(pygame.sprite.Sprite):
def __init__(self):
self.sprite_frame = 0
self.standing_right_frame1 = pygame.image.load("images/character1_standing_facing_right_01.png")
self.standing_right_frame2 = pygame.image.load("images/character1_standing_facing_right_01.png")
self.standing_left_frame1 = pygame.image.load("images/character1_standing_facing_left_01.png")
self.standing_left_frame2 = pygame.image.load("images/character1_standing_facing_left_01.png")
self.walking_right_frame1 = pygame.image.load("images/character1_walking_right_01.png")
self.walking_right_frame2 = pygame.image.load("images/character1_walking_right_02.png")
self.walking_right_frame3 = pygame.image.load("images/character1_walking_right_03.png")
self.walking_right_frame4 = pygame.image.load("images/character1_walking_right_04.png")
self.walking_right_frame5 = pygame.image.load("images/character1_walking_right_05.png")
self.walking_right_frame6 = pygame.image.load("images/character1_walking_right_06.png")
self.walking_right_frame7 = pygame.image.load("images/character1_walking_right_07.png")
self.walking_right_frame8 = pygame.image.load("images/character1_walking_right_08.png")
self.walking_right_frame9 = pygame.image.load("images/character1_walking_right_09.png")
self.walking_right_frame10 = pygame.image.load("images/character1_walking_right_10.png")
self.walking_left_frame1 = pygame.image.load("images/character1_walking_left_01.png")
self.walking_left_frame2 = pygame.image.load("images/character1_walking_left_02.png")
self.walking_left_frame3 = pygame.image.load("images/character1_walking_left_03.png")
self.walking_left_frame4 = pygame.image.load("images/character1_walking_left_04.png")
self.walking_left_frame5 = pygame.image.load("images/character1_walking_left_05.png")
self.walking_left_frame6 = pygame.image.load("images/character1_walking_left_06.png")
self.walking_left_frame7 = pygame.image.load("images/character1_walking_left_07.png")
self.walking_left_frame8 = pygame.image.load("images/character1_walking_left_08.png")
self.walking_left_frame9 = pygame.image.load("images/character1_walking_left_09.png")
self.walking_left_frame10 = pygame.image.load("images/character1_walking_left_10.png")
self.standing_right = [self.standing_right_frame1, self.standing_right_frame2]
self.walking_right = [self.walking_right_frame1, self.walking_right_frame2, self.walking_right_frame3, self.walking_right_frame4, self.walking_right_frame5, self.walking_right_frame6, self.walking_right_frame7, self.walking_right_frame8, self.walking_right_frame9, self.walking_right_frame10]
self.walking_left = [self.walking_left_frame1, self.walking_left_frame2, self.walking_left_frame3, self.walking_left_frame4, self.walking_left_frame5, self.walking_left_frame6, self.walking_left_frame7, self.walking_left_frame8, self.walking_left_frame9, self.walking_left_frame10]
self.character_sprite = self.standing_right
self.rect_stand = self.standing_right_frame1.get_rect()
self.rect_right = self.walking_right_frame1.get_rect()
self.rect_left = self.walking_left_frame1.get_rect()
self.rect_stand.center = (200, 300)
def character_standing_still(self):
if self.sprite_frame >= len(self.standing_right):
self.sprite_frame = 0
self.character_sprite = self.standing_right[self.sprite_frame]
self.sprite_frame = self.sprite_frame + 1
def character_walking(self, direction = "Standing"):
if direction == "Left":
print( "left" )
if self.rect_stand.left > 0:
self.rect_stand.move_ip(-5, 0)
if self.sprite_frame >= len(self.walking_left):
self.sprite_frame = 0
self.character_sprite = self.walking_left[self.sprite_frame]
self.sprite_frame = self.sprite_frame + 1
elif direction == "Right":
if self.rect_stand.right < screen_width:
self.rect_stand.move_ip(5, 0)
if self.sprite_frame >= len(self.walking_right):
self.sprite_frame = 0
self.character_sprite = self.walking_right[self.sprite_frame]
self.sprite_frame = self.sprite_frame + 1
else:
self.character_standing_still()
def update(self):
pressed_key = pygame.key.get_pressed()
if pressed_key[pygame.K_LEFT]:
self.character_walking("Left")
elif pressed_key[pygame.K_RIGHT]:
self.character_walking("Right")
else:
self.character_standing_still()
def draw(self, display):
self.game_screen = display
self.game_screen.blit(self.character_sprite, self.rect_stand)
pygame.init()
character = Character()
game_clock = pygame.time.Clock()
game_running = True
while game_running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_running = False
game_screen.fill(color_white)
character.update()
character.draw(game_screen)
pygame.display.flip()
game_clock.tick(30)
pygame.quit()
|
Sprite animation works when moving right, but not when moving left
|
I'm making a simple game with Pygame. I have a character sprite which when moving right has all of the animations, but when moving left, the animations are not shown, instead it shows the default standing still animations as the character moves left across the screen. I have been looking at this code for an hour trying to figure out why this is happening and I can't put my finger on why, the moving left part of the code is exactly the same as the moving right part of the code, just loading different images, but it works when moving right and doesn't work when moving left.
Please note in the code below there are rect_stand, rect_right and rect_left, I'm pretty sure the rect_right and rect_left are not actually doing anything because the draw method only uses rect.stand, but I don't think that's the reason it's failing since before I animated walking left, I had walking right with a rect_right and this was fine, it's only in the left direction where it doesn't load the animations.
class Character(pygame.sprite.Sprite):
sprite_frame = 0
def __init__(self):
self.standing_right_frame1 = pygame.image.load("C:\\...\\character1_standing_facing_right_1.png")
self.standing_right_frame2 = pygame.image.load("C:\\...\\character1_standing_facing_right_2.png")
self.walking_right_frame1 = pygame.image.load("C:\\...\\character1_walking_right_1.png")
self.walking_right_frame2 = pygame.image.load("C:\\...\\character1_walking_right_2.png")
self.walking_right_frame3 = pygame.image.load("C:\\...\\character1_walking_right_3.png")
self.walking_right_frame4 = pygame.image.load("C:\\...\\character1_walking_right_4.png")
self.walking_right_frame5 = pygame.image.load("C:\\...\\character1_walking_right_5.png")
self.walking_right_frame6 = pygame.image.load("C:\\...\\character1_walking_right_6.png")
self.walking_right_frame7 = pygame.image.load("C:\\...\\character1_walking_right_7.png")
self.walking_right_frame8 = pygame.image.load("C:\\...\\character1_walking_right_8.png")
self.walking_right_frame9 = pygame.image.load("C:\\...\\character1_walking_right_9.png")
self.walking_right_frame10 = pygame.image.load("C:\\...\\character1_walking_right_10.png")
self.walking_left_frame1 = pygame.image.load("C:\\...\\character1_walking_left_1.png")
self.walking_left_frame2 = pygame.image.load("C:\\...\\character1_walking_left_2.png")
self.walking_left_frame3 = pygame.image.load("C:\\...\\character1_walking_left_3.png")
self.walking_left_frame4 = pygame.image.load("C:\\...\\character1_walking_left_4.png")
self.walking_left_frame5 = pygame.image.load("C:\\...\\character1_walking_left_5.png")
self.walking_left_frame6 = pygame.image.load("C:\\...\\character1_walking_left_6.png")
self.walking_left_frame7 = pygame.image.load("C:\\...\\character1_walking_left_7.png")
self.walking_left_frame8 = pygame.image.load("C:\\...\\character1_walking_left_8.png")
self.walking_left_frame9 = pygame.image.load("C:\\...\\character1_walking_left_9.png")
self.walking_left_frame10 = pygame.image.load("C:\\...\\character1_walking_left_10.png")
self.standing_right = [self.standing_right_frame1, self.standing_right_frame2]
self.walking_right = [self.walking_right_frame1, self.walking_right_frame2, self.walking_right_frame3, self.walking_right_frame4, self.walking_right_frame5, self.walking_right_frame6, self.walking_right_frame7, self.walking_right_frame8, self.walking_right_frame9, self.walking_right_frame10]
self.walking_left = [self.walking_left_frame1, self.walking_left_frame2, self.walking_left_frame3, self.walking_left_frame4, self.walking_left_frame5, self.walking_left_frame6, self.walking_left_frame7, self.walking_left_frame8, self.walking_left_frame9, self.walking_left_frame10]
self.character_sprite = self.standing_right
self.rect_stand = self.standing_right_frame1.get_rect()
self.rect_right = self.walking_right_frame1.get_rect()
self.rect_left = self.walking_left_frame1.get_rect()
self.rect_stand.center = (200, 300)
def character_standing_still(self):
sprite_index = self.sprite_frame // 15
if sprite_index >= len(self.standing_right):
self.sprite_frame = 0
sprite_index = 0
self.character_sprite = self.standing_right[sprite_index]
self.sprite_frame = self.sprite_frame + 1
def character_walking(self, direction = "Standing"):
if direction == "Left":
sprite_index = self.sprite_frame // 15
self.rect_stand.move_ip(-5, 0)
if sprite_index >= len(self.walking_left):
self.sprite_frame = 0
sprite_index = 0
self.character_sprite = self.walking_left[sprite_index]
self.sprite_frame = self.sprite_frame + 1
elif direction == "Right":
sprite_index = self.sprite_frame // 15
self.rect_stand.move_ip(5, 0)
if sprite_index >= len(self.walking_right):
self.sprite_frame = 0
sprite_index = 0
self.character_sprite = self.walking_right[sprite_index]
self.sprite_frame = self.sprite_frame + 1
else:
self.character_standing_still()
def update(self):
pressed_key = pygame.key.get_pressed()
if self.rect_stand.left > 0:
if pressed_key[K_LEFT]:
self.character_walking("Left")
else:
self.character_standing_still()
else:
self.character_standing_still()
if self.rect_stand.right < screen_width:
if pressed_key[K_RIGHT]:
self.character_walking("Right")
else:
self.character_standing_still()
else:
self.character_standing_still()
def draw(self, display):
self.game_screen = display
self.game_screen.blit(self.character_sprite, self.rect_stand)
pygame.init()
character = Character()
while game_running:
game_clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_running = False
game_screen.fill(color_white)
character.update()
character.draw(game_screen)
pygame.display.flip()
pygame.quit()
|
[
"Answer: Possibly multiple issues.\nFirstly, the K_LEFT code is using self.standing_right for the sprite image. (It's not clear if this is correct, but it looks wrong).\nSecondly, imagine no keys are pressed, and follow the update() function in your imagination. It logically reduces down to something like:\ndef update(self):\n if ( not-against-window-edge ):\n\n if ( left is pressed ):\n paint left-walking-image\n else:\n paint right-standing-image # <<-- Always ends up here on no-keys\n\n if ( right is pressed ):\n paint right-walking-image\n else:\n paint right-standing-image # <<-- Always ends up here on no-keys\n\nSo if no keys are pressed, it's always showing the right-standing sprite as a last-step. The code needs to remember the previously faced direction.\nSo changing the second if to elif, remembering the direction, and some re-arranging:\ndef update(self):\n pressed_key = pygame.key.get_pressed()\n\n if pressed_key[K_LEFT]:\n self.facing = 'left' # Turn Left\n sprite_index = self.sprite_frame // 8\n if self.rect_stand.left > 0 or self.rect_right.left > 0 or self.rect_left.left > 0:\n self.rect_stand.move_ip(-5, 0)\n self.rect_left.move_ip(-5, 0)\n if sprite_index >= len(self.walking_left):\n self.sprite_frame = 0\n sprite_index = 0\n self.character_sprite = self.walking_left[sprite_index]\n self.sprite_frame = self.sprite_frame + 1\n\n elif pressed_key[K_RIGHT]: \n self.facing = 'right'\n sprite_index = self.sprite_frame // 8\n if self.rect_stand.right < screen_width or self.rect_right.right < screen_width or self.rect_left.right < screen_width:\n self.rect_stand.move_ip(5, 0)\n self.rect_right.move_ip(5, 0)\n if sprite_index >= len(self.walking_right):\n self.sprite_frame = 0\n sprite_index = 0\n self.character_sprite = self.walking_right[sprite_index]\n self.sprite_frame = self.sprite_frame + 1\n\n else: \n # Neither left/right pressed, show stopped sprite\n sprite_index = self.sprite_frame // 15\n if ( self.facing == 'left' ):\n if sprite_index >= len(self.standing_right): # TODO: left\n self.sprite_frame = 0\n sprite_index = 0\n self.character_sprite = self.standing_right[sprite_index] # TODO: left\n elif ( self.facing == 'right' ):\n if sprite_index >= len(self.standing_right):\n self.sprite_frame = 0\n sprite_index = 0\n self.character_sprite = self.standing_right[sprite_index]\n self.sprite_frame = self.sprite_frame + 1\n\nNot forgetting to add 'self.direction' to the __init__().\nIt might be better to simply store a reference to the animation frames currently being used. Say you have:\nclass Player():\n def __init__( self, blah ):\n self.left_animation_sprites = [ ... ]\n self.right_animation_sprites = [ ... ]\n self.current_animation = self.right_animation_sprites\n\nThen when you change directions, it's just a matter of referring to the \"other\" set of animations.\n def turnLeft( self ):\n if ( self.direction != 'left' ):\n self.direction = 'left' \n self.current_animation = self.left_animation_sprites\n self.sprite_frame = 0\n\nSo now when you're animating in update, it's always looping through self.current_animation. This way your update code doesn't need to know which direction the player is facing.\nEDIT\nEssentially the updated code still has the same problem, and that is that the update() function will always execute self.character_standing_still() (the one on the last line of the function) when heading Left.\nI can't be sure, but until I modified the code, I couldn't get the frame-indexing to work correctly either. But that was probably me. Anyway I removed the uses of a temporary sprite_index in lieu of just using the length of the animation list.\nThe code is not storing the direction of travel, so can't paint the correct \"still\" image left/right. It always uses right. This is also a minor part of the problem.\nHere's a fixed version (but still doesn't remember the direction):\nimport pygame\n\nscreen_width = 500\nscreen_height = 500\ngame_screen = pygame.display.set_mode( ( screen_width, screen_height ) )\n\ncolor_white = ( 200, 200, 200 )\n\nclass Character(pygame.sprite.Sprite):\n\n\n def __init__(self):\n self.sprite_frame = 0\n self.standing_right_frame1 = pygame.image.load(\"images/character1_standing_facing_right_01.png\")\n self.standing_right_frame2 = pygame.image.load(\"images/character1_standing_facing_right_01.png\")\n self.standing_left_frame1 = pygame.image.load(\"images/character1_standing_facing_left_01.png\")\n self.standing_left_frame2 = pygame.image.load(\"images/character1_standing_facing_left_01.png\")\n self.walking_right_frame1 = pygame.image.load(\"images/character1_walking_right_01.png\")\n self.walking_right_frame2 = pygame.image.load(\"images/character1_walking_right_02.png\")\n self.walking_right_frame3 = pygame.image.load(\"images/character1_walking_right_03.png\")\n self.walking_right_frame4 = pygame.image.load(\"images/character1_walking_right_04.png\")\n self.walking_right_frame5 = pygame.image.load(\"images/character1_walking_right_05.png\")\n self.walking_right_frame6 = pygame.image.load(\"images/character1_walking_right_06.png\")\n self.walking_right_frame7 = pygame.image.load(\"images/character1_walking_right_07.png\")\n self.walking_right_frame8 = pygame.image.load(\"images/character1_walking_right_08.png\")\n self.walking_right_frame9 = pygame.image.load(\"images/character1_walking_right_09.png\")\n self.walking_right_frame10 = pygame.image.load(\"images/character1_walking_right_10.png\")\n self.walking_left_frame1 = pygame.image.load(\"images/character1_walking_left_01.png\")\n self.walking_left_frame2 = pygame.image.load(\"images/character1_walking_left_02.png\")\n self.walking_left_frame3 = pygame.image.load(\"images/character1_walking_left_03.png\")\n self.walking_left_frame4 = pygame.image.load(\"images/character1_walking_left_04.png\")\n self.walking_left_frame5 = pygame.image.load(\"images/character1_walking_left_05.png\")\n self.walking_left_frame6 = pygame.image.load(\"images/character1_walking_left_06.png\")\n self.walking_left_frame7 = pygame.image.load(\"images/character1_walking_left_07.png\")\n self.walking_left_frame8 = pygame.image.load(\"images/character1_walking_left_08.png\")\n self.walking_left_frame9 = pygame.image.load(\"images/character1_walking_left_09.png\")\n self.walking_left_frame10 = pygame.image.load(\"images/character1_walking_left_10.png\")\n\n self.standing_right = [self.standing_right_frame1, self.standing_right_frame2]\n self.walking_right = [self.walking_right_frame1, self.walking_right_frame2, self.walking_right_frame3, self.walking_right_frame4, self.walking_right_frame5, self.walking_right_frame6, self.walking_right_frame7, self.walking_right_frame8, self.walking_right_frame9, self.walking_right_frame10]\n self.walking_left = [self.walking_left_frame1, self.walking_left_frame2, self.walking_left_frame3, self.walking_left_frame4, self.walking_left_frame5, self.walking_left_frame6, self.walking_left_frame7, self.walking_left_frame8, self.walking_left_frame9, self.walking_left_frame10]\n self.character_sprite = self.standing_right\n self.rect_stand = self.standing_right_frame1.get_rect()\n self.rect_right = self.walking_right_frame1.get_rect()\n self.rect_left = self.walking_left_frame1.get_rect()\n self.rect_stand.center = (200, 300)\n\n def character_standing_still(self):\n if self.sprite_frame >= len(self.standing_right):\n self.sprite_frame = 0\n self.character_sprite = self.standing_right[self.sprite_frame]\n self.sprite_frame = self.sprite_frame + 1\n\n\n def character_walking(self, direction = \"Standing\"):\n\n if direction == \"Left\":\n print( \"left\" )\n if self.rect_stand.left > 0:\n self.rect_stand.move_ip(-5, 0)\n if self.sprite_frame >= len(self.walking_left):\n self.sprite_frame = 0\n self.character_sprite = self.walking_left[self.sprite_frame]\n self.sprite_frame = self.sprite_frame + 1\n\n elif direction == \"Right\":\n if self.rect_stand.right < screen_width:\n self.rect_stand.move_ip(5, 0)\n if self.sprite_frame >= len(self.walking_right):\n self.sprite_frame = 0\n self.character_sprite = self.walking_right[self.sprite_frame]\n self.sprite_frame = self.sprite_frame + 1\n else:\n self.character_standing_still()\n\n\n def update(self):\n pressed_key = pygame.key.get_pressed()\n if pressed_key[pygame.K_LEFT]:\n self.character_walking(\"Left\")\n elif pressed_key[pygame.K_RIGHT]:\n self.character_walking(\"Right\")\n else:\n self.character_standing_still()\n\n\n def draw(self, display):\n self.game_screen = display\n self.game_screen.blit(self.character_sprite, self.rect_stand) \n\n\npygame.init()\ncharacter = Character()\n\ngame_clock = pygame.time.Clock()\ngame_running = True\nwhile game_running:\n\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_running = False\n\n game_screen.fill(color_white)\n character.update()\n character.draw(game_screen)\n pygame.display.flip()\n game_clock.tick(30)\n\npygame.quit()\n\n"
] |
[
0
] |
[] |
[] |
[
"animation",
"pygame",
"python",
"sprite"
] |
stackoverflow_0074594570_animation_pygame_python_sprite.txt
|
Q:
Compare two dates not considering Year and giving incorrect answer
I have two dates I'm trying to compare in this format: a = '10.2022' (october 2022) and b = '02.2023' (February 2023)
When I enter a > b I expect to have False
Here is my code:
import datetime
a = '10-2022' # String date
b = '02-2023' # String date
date_format = '%m.%Y'
a = datetime.datetime.strptime(master_list[0][1], '%m-%Y').strftime(date_format)
b = datetime.datetime.strptime(master_list[0][2], '%m-%Y').strftime(date_format)
So I have a = '10.2022 ' and b = '02.2023'.
a > b returns True which is not correct because Oct 2022 is obviously before Feb 2023. Not sure what's not working
A:
import datetime
date_format = '%m.%Y'
a = datetime.datetime.strptime('10-2022', '%m-%Y')
b = datetime.datetime.strptime('02-2023', '%m-%Y')
print(type(a))
print(a)
print(f'a is greater than b when both are datetime: {a>b}')
print()
# now convert back to format you want...
a_as_str = a.strftime(date_format)
b_as_str = b.strftime(date_format)
print(type(a_as_str))
print(a_as_str)
print(f'a is greater than b when both are strings: {a_as_str>b_as_str}')
Output:
<class 'datetime.datetime'>
2022-10-01 00:00:00
a is greater than b when both are datetime: False
<class 'str'>
10.2022
a is greater than b when both are strings: True
|
Compare two dates not considering Year and giving incorrect answer
|
I have two dates I'm trying to compare in this format: a = '10.2022' (october 2022) and b = '02.2023' (February 2023)
When I enter a > b I expect to have False
Here is my code:
import datetime
a = '10-2022' # String date
b = '02-2023' # String date
date_format = '%m.%Y'
a = datetime.datetime.strptime(master_list[0][1], '%m-%Y').strftime(date_format)
b = datetime.datetime.strptime(master_list[0][2], '%m-%Y').strftime(date_format)
So I have a = '10.2022 ' and b = '02.2023'.
a > b returns True which is not correct because Oct 2022 is obviously before Feb 2023. Not sure what's not working
|
[
"import datetime\n\ndate_format = '%m.%Y'\n\na = datetime.datetime.strptime('10-2022', '%m-%Y') \nb = datetime.datetime.strptime('02-2023', '%m-%Y') \n\nprint(type(a))\nprint(a)\nprint(f'a is greater than b when both are datetime: {a>b}')\nprint()\n\n# now convert back to format you want...\na_as_str = a.strftime(date_format)\nb_as_str = b.strftime(date_format)\n\nprint(type(a_as_str))\nprint(a_as_str)\nprint(f'a is greater than b when both are strings: {a_as_str>b_as_str}')\n\nOutput:\n<class 'datetime.datetime'>\n2022-10-01 00:00:00\na is greater than b when both are datetime: False\n\n<class 'str'>\n10.2022\na is greater than b when both are strings: True\n\n"
] |
[
0
] |
[
"You are comparing 2 numbers. 10,2022 (float) and 02,2023 (float) literally.\nTry using unix to compare time ^^\n"
] |
[
-3
] |
[
"python"
] |
stackoverflow_0074592476_python.txt
|
Q:
In Pytube, is there any way to get the highest audio quality stream?
I was trying to make a really simple python code to get me the stream with the highest quality audio, so I first tried something like this
def get_highest_audio(url):
yt = YouTube(url)
best_audio_stream = yt.streams.filter(only_audio=True).all()[1]
return best_audio_stream
Which did return a stream, but it wasn't the stream with the highest quality audio, so I tried to find a function in the pytube library.
While there was a get_highest_resolution() function, there was not a get_highest_audio_resolution() function.
So I tried to get the audio resolution by hand, but there wasn't a function that got the audio resolution of a stream.
Is there any way to create this function?
A:
You can just use:
yt.streams.get_audio_only()
this gets the highest bitrate audio stream. It defaults to mp4.
A:
Why not remove the [1] and it will display all audio formats. From there you can select the highest one?
A:
Try This: My Github URL
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created Syst: macOS Monterey 12.6 (21G115) Kernel: Darwin 21.6.0
# Created Plat: Python 3.10.8 ('v3.10.8:aaaf517424', 'Oct 11 2022 10:14:40')
# Created By : Jeromie Kirchoff (JayRizzo)
# Created Date: Tue Jul 13 20:52:32 2021 CST
# Last ModDate: Sat Nov 26 19:56:32 2022 CST
# =============================================================================
"""This Module Has Been Build For Downloading The Highest Quality Video & Audio Of A YouTube Video."""
# =============================================================================
from os import path as ospath
from pytube import YouTube
CURRENT_HOME = ospath.expanduser('~')
VIDEO_FILE_PATH = ospath.join(CURRENT_HOME, 'Videos', 'MusicVideos')
AUDIO_FILE_PATH = ospath.join(CURRENT_HOME, 'Music', 'Downloaded')
def getYTVid(URL):
"""Get Highest Quality Video from YT URL."""
YT = YouTube(URL)
try:
print(f"Downloading Video: {YT.title}")
YTVIDEO_FILE_PATH = YT.streams.filter(only_audio=False, progressive=True, file_extension='mp4').order_by('resolution').desc().first().download(VIDEO_FILE_PATH)
print(f"Download Video Completed: {YTVIDEO_FILE_PATH}\n")
except Exception as e:
print(f"Error: {e}")
def getYTAudio(URL):
"""Get Highest Quality Audio from YT URL."""
YT = YouTube(URL)
try:
YTAUDIO_FILE_PATH = YT.streams.filter(only_audio=True, file_extension='mp4').order_by('abr').desc().first().download(AUDIO_FILE_PATH)
print(f"Download Video Completed: {YTAUDIO_FILE_PATH}\n")
except Exception as e:
print(f"Error: {e}")
if __name__ == '__main__':
print(f"Video Path: {VIDEO_FILE_PATH}")
print(f"Audio Path: {AUDIO_FILE_PATH}")
print("Downloading Audio: Tom MacDonald - I Wish.mp4")
getYTAudio('https://www.youtube.com/watch?v=8wNUjCcaGrM') # Tom MacDonald - I Wish.mp4
print("Downloading Audio: Tom MacDonald - \"Cancelled\".mp4")
getYTAudio('https://www.youtube.com/watch?v=EHBMbZdCpSk') # Tom MacDonald - "Cancelled".mp4
print("Downloading Audio: Tom MacDonald - Dont Look Down (uncensored).mp4")
getYTAudio('https://www.youtube.com/watch?v=Ex3zq_ADrNU') # Tom MacDonald - Dont Look Down (uncensored).mp4
print("Downloading Vid: Tom MacDonald - I Wish.mp4")
getYTVid('https://www.youtube.com/watch?v=8wNUjCcaGrM') # Tom MacDonald - I Wish.mp4
print("Downloading Vid: Tom MacDonald - \"Cancelled\".mp4")
getYTVid('https://www.youtube.com/watch?v=EHBMbZdCpSk') # Tom MacDonald - "Cancelled".mp4
print("Downloading Vid: Tom MacDonald - Dont Look Down (uncensored).mp4")
getYTVid('https://www.youtube.com/watch?v=Ex3zq_ADrNU') # Tom MacDonald - Dont Look Down (uncensored).mp4
|
In Pytube, is there any way to get the highest audio quality stream?
|
I was trying to make a really simple python code to get me the stream with the highest quality audio, so I first tried something like this
def get_highest_audio(url):
yt = YouTube(url)
best_audio_stream = yt.streams.filter(only_audio=True).all()[1]
return best_audio_stream
Which did return a stream, but it wasn't the stream with the highest quality audio, so I tried to find a function in the pytube library.
While there was a get_highest_resolution() function, there was not a get_highest_audio_resolution() function.
So I tried to get the audio resolution by hand, but there wasn't a function that got the audio resolution of a stream.
Is there any way to create this function?
|
[
"You can just use:\nyt.streams.get_audio_only()\n\nthis gets the highest bitrate audio stream. It defaults to mp4.\n",
"Why not remove the [1] and it will display all audio formats. From there you can select the highest one? \n",
"Try This: My Github URL\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# =============================================================================\n# Created Syst: macOS Monterey 12.6 (21G115) Kernel: Darwin 21.6.0\n# Created Plat: Python 3.10.8 ('v3.10.8:aaaf517424', 'Oct 11 2022 10:14:40')\n# Created By : Jeromie Kirchoff (JayRizzo)\n# Created Date: Tue Jul 13 20:52:32 2021 CST\n# Last ModDate: Sat Nov 26 19:56:32 2022 CST\n# =============================================================================\n\"\"\"This Module Has Been Build For Downloading The Highest Quality Video & Audio Of A YouTube Video.\"\"\"\n# =============================================================================\nfrom os import path as ospath\nfrom pytube import YouTube\nCURRENT_HOME = ospath.expanduser('~')\nVIDEO_FILE_PATH = ospath.join(CURRENT_HOME, 'Videos', 'MusicVideos')\nAUDIO_FILE_PATH = ospath.join(CURRENT_HOME, 'Music', 'Downloaded')\n\ndef getYTVid(URL):\n \"\"\"Get Highest Quality Video from YT URL.\"\"\"\n YT = YouTube(URL)\n try:\n print(f\"Downloading Video: {YT.title}\")\n YTVIDEO_FILE_PATH = YT.streams.filter(only_audio=False, progressive=True, file_extension='mp4').order_by('resolution').desc().first().download(VIDEO_FILE_PATH)\n print(f\"Download Video Completed: {YTVIDEO_FILE_PATH}\\n\")\n except Exception as e:\n print(f\"Error: {e}\")\n\ndef getYTAudio(URL):\n \"\"\"Get Highest Quality Audio from YT URL.\"\"\"\n YT = YouTube(URL)\n try:\n YTAUDIO_FILE_PATH = YT.streams.filter(only_audio=True, file_extension='mp4').order_by('abr').desc().first().download(AUDIO_FILE_PATH)\n print(f\"Download Video Completed: {YTAUDIO_FILE_PATH}\\n\")\n except Exception as e:\n print(f\"Error: {e}\")\n\nif __name__ == '__main__':\n print(f\"Video Path: {VIDEO_FILE_PATH}\")\n print(f\"Audio Path: {AUDIO_FILE_PATH}\")\n print(\"Downloading Audio: Tom MacDonald - I Wish.mp4\")\n getYTAudio('https://www.youtube.com/watch?v=8wNUjCcaGrM') # Tom MacDonald - I Wish.mp4\n print(\"Downloading Audio: Tom MacDonald - \\\"Cancelled\\\".mp4\")\n getYTAudio('https://www.youtube.com/watch?v=EHBMbZdCpSk') # Tom MacDonald - \"Cancelled\".mp4\n print(\"Downloading Audio: Tom MacDonald - Dont Look Down (uncensored).mp4\")\n getYTAudio('https://www.youtube.com/watch?v=Ex3zq_ADrNU') # Tom MacDonald - Dont Look Down (uncensored).mp4\n print(\"Downloading Vid: Tom MacDonald - I Wish.mp4\")\n getYTVid('https://www.youtube.com/watch?v=8wNUjCcaGrM') # Tom MacDonald - I Wish.mp4\n print(\"Downloading Vid: Tom MacDonald - \\\"Cancelled\\\".mp4\")\n getYTVid('https://www.youtube.com/watch?v=EHBMbZdCpSk') # Tom MacDonald - \"Cancelled\".mp4\n print(\"Downloading Vid: Tom MacDonald - Dont Look Down (uncensored).mp4\")\n getYTVid('https://www.youtube.com/watch?v=Ex3zq_ADrNU') # Tom MacDonald - Dont Look Down (uncensored).mp4\n\n"
] |
[
6,
0,
0
] |
[] |
[] |
[
"python",
"pytube",
"youtube"
] |
stackoverflow_0061419900_python_pytube_youtube.txt
|
Q:
if the string is "yash" then how can we can the output in the form of yyaasshh
Given a string, return a string where for every char in the original, there are two chars.
double_char('The') → 'TThhee'
double_char('AAbb') → 'AAAAbbbb'
double_char('Hi-There') → 'HHii--TThheerree'
what is the code for this
A:
You can loop through each character and then duplicate each letter and append it to a new variable
def double_char(str):
result = ""
for i in range(len(str)):
result = result + str[i] + str[i]
return result
|
if the string is "yash" then how can we can the output in the form of yyaasshh
|
Given a string, return a string where for every char in the original, there are two chars.
double_char('The') → 'TThhee'
double_char('AAbb') → 'AAAAbbbb'
double_char('Hi-There') → 'HHii--TThheerree'
what is the code for this
|
[
"You can loop through each character and then duplicate each letter and append it to a new variable\ndef double_char(str):\n result = \"\"\n for i in range(len(str)):\n result = result + str[i] + str[i]\n return result\n\n"
] |
[
1
] |
[] |
[] |
[
"python",
"python_3.x"
] |
stackoverflow_0074595294_python_python_3.x.txt
|
Q:
"Int object is not callable"
num = int(input("enter num "))
if num <=0:
output= abs(num)
print(output)
else:
output =abs(num)
print(output)
TypeError Traceback (most recent call last)
<ipython-input-54-aba2c4ff0eb3> in <module>
3 INPUT: -1 OUTPUT: 1"""
4
----> 5 num = int(input("enter num "))
6
7
TypeError: 'int' object is not callable
A:
num = int(input("enter num "))
output = abs(num)
print(output)
|
"Int object is not callable"
|
num = int(input("enter num "))
if num <=0:
output= abs(num)
print(output)
else:
output =abs(num)
print(output)
TypeError Traceback (most recent call last)
<ipython-input-54-aba2c4ff0eb3> in <module>
3 INPUT: -1 OUTPUT: 1"""
4
----> 5 num = int(input("enter num "))
6
7
TypeError: 'int' object is not callable
|
[
"num = int(input(\"enter num \")) \n\noutput = abs(num) \nprint(output) \n\n"
] |
[
1
] |
[] |
[] |
[
"python"
] |
stackoverflow_0074595361_python.txt
|
Q:
How to subtract 1 column from 4 columns and produce 4 new, resulting columns?
I have a dataframe as shown. I need 4 new columns [['PriceSpread_ATL', 'PriceSpread_CHI', 'PriceSpread_LA', 'PriceSpread_NY']] that are the price spreads for each market. For 'PriceSpreadATL', each cell in the column 'FarmPrice' must be subtracted from the corresponding cell in the column 'AtlantaRetail' and divided by the cell in 'FarmPrice' (ex.: (4.12 - 2.05)/2.05; (4.12 - 1.49)/1.49; (3.37 - 1.35)/1.35; (3.12 - 1.20)/ 1.20; and so on). Similarly, for 'PriceSpreadCHI', each cell in the column 'FarmPrice' must be subtracted from the corresponding cell in the column 'ChicagoRetail' and divided by the cell in 'FarmPrice', and so on for 'PriceSpread_LA' and 'PriceSpread_NY'. All the new price spread columns [['PriceSpread_ATL', 'PriceSpread_CHI', 'PriceSpread_LA', 'PriceSpread_NY']] should be appended column-wise to the dataframe. How do I carry out such an operation?
A:
You can try approaching it by iterating through a list of the title you want.
d = {'farm_p' : [2.05, 1.49, 1.35], 'A_retail': [3.39, 2.39, 5.0], 'L_retail': [4.12, 4.12, 4.0]}
df = pd.DataFrame(data = d)
# Generate list for titles
header = ['A_retail', 'L_retail']
for head in header:
# Create column & append result by row with output
new_head = 'PriceSpread_' + head
df[new_head] = (df[head] - df['farm_p']) / df['farm_p']
This should return you the additional columns you are looking for:
A:
you can do the following, first the general idea, then an example.
df["new_col"] = (df["some_col"] - df["another_col"]) / df["whatever_col"]
In your case let's do the following.
df["NewLosAngelesRetail"] = (df["LosAngelesRetail"] - df["FarmPrice"]) / df["FarmPrice"]
Now you can do the same with other columns, here is a link with a similar question Sum DataFrame cols.
Be careful you have NaNs Fillnans docs
|
How to subtract 1 column from 4 columns and produce 4 new, resulting columns?
|
I have a dataframe as shown. I need 4 new columns [['PriceSpread_ATL', 'PriceSpread_CHI', 'PriceSpread_LA', 'PriceSpread_NY']] that are the price spreads for each market. For 'PriceSpreadATL', each cell in the column 'FarmPrice' must be subtracted from the corresponding cell in the column 'AtlantaRetail' and divided by the cell in 'FarmPrice' (ex.: (4.12 - 2.05)/2.05; (4.12 - 1.49)/1.49; (3.37 - 1.35)/1.35; (3.12 - 1.20)/ 1.20; and so on). Similarly, for 'PriceSpreadCHI', each cell in the column 'FarmPrice' must be subtracted from the corresponding cell in the column 'ChicagoRetail' and divided by the cell in 'FarmPrice', and so on for 'PriceSpread_LA' and 'PriceSpread_NY'. All the new price spread columns [['PriceSpread_ATL', 'PriceSpread_CHI', 'PriceSpread_LA', 'PriceSpread_NY']] should be appended column-wise to the dataframe. How do I carry out such an operation?
|
[
"You can try approaching it by iterating through a list of the title you want.\nd = {'farm_p' : [2.05, 1.49, 1.35], 'A_retail': [3.39, 2.39, 5.0], 'L_retail': [4.12, 4.12, 4.0]}\ndf = pd.DataFrame(data = d)\n\n# Generate list for titles\nheader = ['A_retail', 'L_retail']\n\nfor head in header:\n # Create column & append result by row with output\n new_head = 'PriceSpread_' + head\n df[new_head] = (df[head] - df['farm_p']) / df['farm_p']\n\nThis should return you the additional columns you are looking for:\n\n",
"you can do the following, first the general idea, then an example.\ndf[\"new_col\"] = (df[\"some_col\"] - df[\"another_col\"]) / df[\"whatever_col\"]\n\nIn your case let's do the following.\ndf[\"NewLosAngelesRetail\"] = (df[\"LosAngelesRetail\"] - df[\"FarmPrice\"]) / df[\"FarmPrice\"]\n\nNow you can do the same with other columns, here is a link with a similar question Sum DataFrame cols.\nBe careful you have NaNs Fillnans docs\n"
] |
[
0,
0
] |
[] |
[] |
[
"group_by",
"python"
] |
stackoverflow_0074595375_group_by_python.txt
|
Q:
Does iterating through a file in S3 using boto3's StreamingBody.iter_lines() count as a GET request for each line?
I'm working on something where I am trying to access some data stored in a large CSV file in S3 via boto3. I'm considering iterating through the data line by line for memory sake, using:
s3_client = boto3.client("s3")
iterator = s3_client.get_object(Bucket='my-bucket', Key='my-key')['Body'].iter_lines()
for line in iterator:
foo(line)
This works on the toy data I've been testing with, but the full dataset is over 70 million rows. My question is if using this method to iterate through the data will actually send off 70 million separate requests (and incurs charges by AWS per 70 million times more than were I to read the entire file in at once)? Or does it count a single request?
I don't have a great understanding of the underlying mechanism boto3 is using to access the data in a StreamingBody class, and have had difficulty finding information on it, including in the boto3 documentation.
A:
No, it loads a fixed chunk size(in bytes) for each request, if there is another line in the loaded content cache, the __next__ call to the generator returns it, otherwise, it will make another request until there is no remaining content. See https://github.com/boto/botocore/blob/dfda41c08e3ed5354dce9f958b6db06e6cce99ed/botocore/response.py#L135-L148
So how many requests it makes depends on the file size and the chunk size, not related to the size of each line, it should work well for your case.
|
Does iterating through a file in S3 using boto3's StreamingBody.iter_lines() count as a GET request for each line?
|
I'm working on something where I am trying to access some data stored in a large CSV file in S3 via boto3. I'm considering iterating through the data line by line for memory sake, using:
s3_client = boto3.client("s3")
iterator = s3_client.get_object(Bucket='my-bucket', Key='my-key')['Body'].iter_lines()
for line in iterator:
foo(line)
This works on the toy data I've been testing with, but the full dataset is over 70 million rows. My question is if using this method to iterate through the data will actually send off 70 million separate requests (and incurs charges by AWS per 70 million times more than were I to read the entire file in at once)? Or does it count a single request?
I don't have a great understanding of the underlying mechanism boto3 is using to access the data in a StreamingBody class, and have had difficulty finding information on it, including in the boto3 documentation.
|
[
"No, it loads a fixed chunk size(in bytes) for each request, if there is another line in the loaded content cache, the __next__ call to the generator returns it, otherwise, it will make another request until there is no remaining content. See https://github.com/boto/botocore/blob/dfda41c08e3ed5354dce9f958b6db06e6cce99ed/botocore/response.py#L135-L148\nSo how many requests it makes depends on the file size and the chunk size, not related to the size of each line, it should work well for your case.\n"
] |
[
1
] |
[] |
[] |
[
"amazon_s3",
"boto3",
"python"
] |
stackoverflow_0060422708_amazon_s3_boto3_python.txt
|
Q:
what is this color map? cmap=mglearn.cm3
I try to run the following code but it gives the following error in recognistion of mglearn color map.
grr = pd.scatter_matrix( ...., cmap=mglearn.cm3)
ErrorName: name 'mglearn' is not defined
I should add pd is Anaconda Pandas package imported as pd but does not recognize the color map mglearn.cm3
Any suggestions?
A:
Open Anaconda prompt and execute pip install mglearn
After that just import mglearn
import pandas as pd
import mglearn
iris_dataframe = pd.DataFrame(X_train, columns=iris_dataset.feature_names)
grr = pd.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15), marker='o',s=60, alpha=0.8, hist_kwds={'bins': 10},cmap=mglearn.cm3)
A:
Concerns the code for the book - Introduction to Machine Learning with Python
At the top of the Code for the book (notebooks, etc.) there is a package folder named - mglearn. It contains, as its fist .py file - init.py.
It therefore allowed me to simply copy/paste the folder - mglearn, into my
C:\Users\Ernesto\Anaconda3 folder and afterwards type - import mglearn - in my Spyder 3.5 editor.
Then the line:
grr = pd.scatter_matrix(iris_dataframe,
c=y_train,
figsize=(15, 15),
marker='o',
hist_kwds={'bins': 20},
s=60,
alpha=0.8,
cmap=mglearn.cm3)
prints the scatter_matrix just as in the book.
A:
conda install pip(because conda install mglearn will give a error)
pip install mglearn
grr = pd.plotting.scatter_matrix( ...., cmap=mglearn.cm3)
if you still can't see the output then you might have missed %matplotlib inline
A:
You need to install mglearn package. You can do that by running the below command in your Command Prompt or in the terminal (Anaconda/ any other IDE that has support for Terminal).
pip install mglearn
For more details on mglearn package visit: enter link description here
A:
if you are working in jupyter notebook just use cmap='PuBu' in scatter_matrix()
|
what is this color map? cmap=mglearn.cm3
|
I try to run the following code but it gives the following error in recognistion of mglearn color map.
grr = pd.scatter_matrix( ...., cmap=mglearn.cm3)
ErrorName: name 'mglearn' is not defined
I should add pd is Anaconda Pandas package imported as pd but does not recognize the color map mglearn.cm3
Any suggestions?
|
[
"Open Anaconda prompt and execute pip install mglearn\nAfter that just import mglearn\nimport pandas as pd\nimport mglearn\niris_dataframe = pd.DataFrame(X_train, columns=iris_dataset.feature_names)\ngrr = pd.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15), marker='o',s=60, alpha=0.8, hist_kwds={'bins': 10},cmap=mglearn.cm3)\n\n",
"Concerns the code for the book - Introduction to Machine Learning with Python\n\nAt the top of the Code for the book (notebooks, etc.) there is a package folder named - mglearn. It contains, as its fist .py file - init.py.\nIt therefore allowed me to simply copy/paste the folder - mglearn, into my\nC:\\Users\\Ernesto\\Anaconda3 folder and afterwards type - import mglearn - in my Spyder 3.5 editor.\nThen the line:\ngrr = pd.scatter_matrix(iris_dataframe,\n c=y_train, \n figsize=(15, 15),\n marker='o',\n hist_kwds={'bins': 20},\n s=60, \n alpha=0.8, \n cmap=mglearn.cm3)\n\nprints the scatter_matrix just as in the book.\n\n",
"\nconda install pip(because conda install mglearn will give a error)\npip install mglearn\ngrr = pd.plotting.scatter_matrix( ...., cmap=mglearn.cm3)\nif you still can't see the output then you might have missed %matplotlib inline\n\n",
"You need to install mglearn package. You can do that by running the below command in your Command Prompt or in the terminal (Anaconda/ any other IDE that has support for Terminal).\npip install mglearn\nFor more details on mglearn package visit: enter link description here\n",
"if you are working in jupyter notebook just use cmap='PuBu' in scatter_matrix()\n"
] |
[
4,
2,
0,
0,
0
] |
[] |
[] |
[
"anaconda",
"pandas",
"python"
] |
stackoverflow_0040878325_anaconda_pandas_python.txt
|
Q:
How To Open and Display .JSON Files Inside Jupyter Notebook
I have an issue within Jupyter that I cannot find online anywhere and was hoping I could get some help.
Essentially, I want to open .JSON files from multiple folders with different names. For example.
data/weather/date=2022-11-20/data.JSON
data/weather/date=2022-11-21/data.JSON
data/weather/date=2022-11-22/data.JSON
data/weather/date=2022-11-23/data.JSON
I want to be able to output the info inside the data.JSON onto my Jupyter Notebook, but how do I do that as the folder names are all different.
Thank you in advance.
What I tried so far
for path,dirs,files in os.walk('data/weather'): for file in files: if fnmatch.fnmatch(file,'*.json'): data = os.path.join(path,file) print(data)
OUTPUT:
data/weather/date=2022-11-20/data.JSON
data/weather/date=2022-11-21/data.JSON
data/weather/date=2022-11-22/data.JSON
data/weather/date=2022-11-23/data.JSON
But i dont want it to output the directory, I want to actually open the .JSON and display its content
A:
This solution uses the os library to go thru different directories
import os
import json
for root, dirs, files in os.walk('data/weather'):
for file in files:
if file.endswith('.JSON'):
with open(os.path.join(root, file), 'r') as f:
data = json.load(f)
print(data)
|
How To Open and Display .JSON Files Inside Jupyter Notebook
|
I have an issue within Jupyter that I cannot find online anywhere and was hoping I could get some help.
Essentially, I want to open .JSON files from multiple folders with different names. For example.
data/weather/date=2022-11-20/data.JSON
data/weather/date=2022-11-21/data.JSON
data/weather/date=2022-11-22/data.JSON
data/weather/date=2022-11-23/data.JSON
I want to be able to output the info inside the data.JSON onto my Jupyter Notebook, but how do I do that as the folder names are all different.
Thank you in advance.
What I tried so far
for path,dirs,files in os.walk('data/weather'): for file in files: if fnmatch.fnmatch(file,'*.json'): data = os.path.join(path,file) print(data)
OUTPUT:
data/weather/date=2022-11-20/data.JSON
data/weather/date=2022-11-21/data.JSON
data/weather/date=2022-11-22/data.JSON
data/weather/date=2022-11-23/data.JSON
But i dont want it to output the directory, I want to actually open the .JSON and display its content
|
[
"This solution uses the os library to go thru different directories\nimport os\nimport json\n\nfor root, dirs, files in os.walk('data/weather'):\n for file in files:\n if file.endswith('.JSON'):\n with open(os.path.join(root, file), 'r') as f:\n data = json.load(f)\n print(data)\n\n"
] |
[
1
] |
[] |
[] |
[
"jupyter_notebook",
"numpy",
"pandas",
"python"
] |
stackoverflow_0074595487_jupyter_notebook_numpy_pandas_python.txt
|
Q:
Create a generic function to join multiple datasets in pyspark
Hi I am creating a generic function or class to add n numbers of datasets but I am unable to find the proper logic to do that, I put all codes below and highlight the section in which I want some help. if you find any problem in understanding my code then please ping me.
import pyspark
# importing sparksession from pyspark.sql module
from pyspark.sql import SparkSession
# creating sparksession and giving an app name
spark = SparkSession.builder.appName('sparkdf').getOrCreate()
data_fact = [["1", "sravan", "company 1","100"],
["2", "ojaswi", "company 1","200"],
["3", "rohith", "company 2","300"],
["4", "sridevi", "company 1","400"],
["5", "bobby", "company 1","500"]]
# specify column names
columns = ['ID', 'NAME', 'Company','Amount']
# creating a dataframe from the lists of data
df_fact = spark.createDataFrame(data_fact, columns)
Department_table = [["1", "45000", "IT"],
["2", "145000", "Manager"],
["6", "45000", "HR"],
["5", "34000", "Sales"]]
# specify column names
columns1 = ['ID', 'salary', 'department']
df_Department = spark.createDataFrame(Department_table, columns1)
Leave_Table = [["1", "Sick Leave"],
["2", "Casual leave"],
["3", "Casual leave"],
["4", "Earned Leave"],
["4", "Sick Leave"] ]
# specify column names
columns2 = ['ID', 'Leave_type']
df_Leave = spark.createDataFrame(Leave_Table, columns2)
Phone_Table = [["1", "Apple"],
["2", "Samsung"],
["3", "MI"],
["4", "Vivo"],
["4", "Apple"] ]
# specify column names
columns3 = ['ID', 'Phone_type']
df_Phone = spark.createDataFrame(Phone_Table, columns3)
Df_join = df_fact.join(df_Department,df_fact.ID ==df_Department.ID,"inner")\
.join(df_Phone,df_fact.ID ==df_Phone.ID,"inner")\
.join(df_Leave,df_fact.ID ==df_Leave.ID,"inner")\
.select(df_fact.Amount,df_Department.ID,df_Department.salary,df_Department.department,df_Phone.Phone_type,df_Leave.Leave_type)
display(Df_join)
basically, I want to generalise this stuff for n numbers of datasets
Df_join = df_fact.join(df_Department,df_fact.ID ==df_Department.ID,"inner")\
.join(df_Phone,df_fact.ID ==df_Phone.ID,"inner")\
.join(df_Leave,df_fact.ID ==df_Leave.ID,"inner")\
.select(df_fact.Amount,df_Department.ID,df_Department.salary,df_Department.department,df_Phone.Phone_type,df_Leave.Leave_type) ```
A:
Since you're using inner join in all dataframe, if you want to prevent the bulky code, you can use the .reduce() in functools to do the joining and select the column that you want:
df = reduce(lambda x, y: x.join(y, on='id', how='inner'), [df_fact, df_Department, df_Leave, df_Phone])
df.show(10, False)
+---+------+---------+------+------+----------+------------+----------+
|ID |NAME |Company |Amount|salary|department|Leave_type |Phone_type|
+---+------+---------+------+------+----------+------------+----------+
|1 |sravan|company 1|100 |45000 |IT |Sick Leave |Apple |
|2 |ojaswi|company 1|200 |145000|Manager |Casual leave|Samsung |
+---+------+---------+------+------+----------+------------+----------+
https://docs.python.org/3/library/functools.html#functools.reduce
Edit 1:
If you need to indicate different key in different joining, given that you have already renamed the columns:
df = reduce(lambda x, y: x.join(y, on=list(set(x.columns)&set(y.columns)), how='inner'), [df_fact, df_Department, df_Leave, df_Phone])
df.show(10, False)
+---+------+---------+------+------+----------+------------+----------+
|ID |NAME |Company |Amount|salary|department|Leave_type |Phone_type|
+---+------+---------+------+------+----------+------------+----------+
|1 |sravan|company 1|100 |45000 |IT |Sick Leave |Apple |
|2 |ojaswi|company 1|200 |145000|Manager |Casual leave|Samsung |
+---+------+---------+------+------+----------+------------+----------+
|
Create a generic function to join multiple datasets in pyspark
|
Hi I am creating a generic function or class to add n numbers of datasets but I am unable to find the proper logic to do that, I put all codes below and highlight the section in which I want some help. if you find any problem in understanding my code then please ping me.
import pyspark
# importing sparksession from pyspark.sql module
from pyspark.sql import SparkSession
# creating sparksession and giving an app name
spark = SparkSession.builder.appName('sparkdf').getOrCreate()
data_fact = [["1", "sravan", "company 1","100"],
["2", "ojaswi", "company 1","200"],
["3", "rohith", "company 2","300"],
["4", "sridevi", "company 1","400"],
["5", "bobby", "company 1","500"]]
# specify column names
columns = ['ID', 'NAME', 'Company','Amount']
# creating a dataframe from the lists of data
df_fact = spark.createDataFrame(data_fact, columns)
Department_table = [["1", "45000", "IT"],
["2", "145000", "Manager"],
["6", "45000", "HR"],
["5", "34000", "Sales"]]
# specify column names
columns1 = ['ID', 'salary', 'department']
df_Department = spark.createDataFrame(Department_table, columns1)
Leave_Table = [["1", "Sick Leave"],
["2", "Casual leave"],
["3", "Casual leave"],
["4", "Earned Leave"],
["4", "Sick Leave"] ]
# specify column names
columns2 = ['ID', 'Leave_type']
df_Leave = spark.createDataFrame(Leave_Table, columns2)
Phone_Table = [["1", "Apple"],
["2", "Samsung"],
["3", "MI"],
["4", "Vivo"],
["4", "Apple"] ]
# specify column names
columns3 = ['ID', 'Phone_type']
df_Phone = spark.createDataFrame(Phone_Table, columns3)
Df_join = df_fact.join(df_Department,df_fact.ID ==df_Department.ID,"inner")\
.join(df_Phone,df_fact.ID ==df_Phone.ID,"inner")\
.join(df_Leave,df_fact.ID ==df_Leave.ID,"inner")\
.select(df_fact.Amount,df_Department.ID,df_Department.salary,df_Department.department,df_Phone.Phone_type,df_Leave.Leave_type)
display(Df_join)
basically, I want to generalise this stuff for n numbers of datasets
Df_join = df_fact.join(df_Department,df_fact.ID ==df_Department.ID,"inner")\
.join(df_Phone,df_fact.ID ==df_Phone.ID,"inner")\
.join(df_Leave,df_fact.ID ==df_Leave.ID,"inner")\
.select(df_fact.Amount,df_Department.ID,df_Department.salary,df_Department.department,df_Phone.Phone_type,df_Leave.Leave_type) ```
|
[
"Since you're using inner join in all dataframe, if you want to prevent the bulky code, you can use the .reduce() in functools to do the joining and select the column that you want:\ndf = reduce(lambda x, y: x.join(y, on='id', how='inner'), [df_fact, df_Department, df_Leave, df_Phone])\ndf.show(10, False)\n+---+------+---------+------+------+----------+------------+----------+\n|ID |NAME |Company |Amount|salary|department|Leave_type |Phone_type|\n+---+------+---------+------+------+----------+------------+----------+\n|1 |sravan|company 1|100 |45000 |IT |Sick Leave |Apple |\n|2 |ojaswi|company 1|200 |145000|Manager |Casual leave|Samsung |\n+---+------+---------+------+------+----------+------------+----------+\n\nhttps://docs.python.org/3/library/functools.html#functools.reduce\n\nEdit 1:\nIf you need to indicate different key in different joining, given that you have already renamed the columns:\ndf = reduce(lambda x, y: x.join(y, on=list(set(x.columns)&set(y.columns)), how='inner'), [df_fact, df_Department, df_Leave, df_Phone])\ndf.show(10, False)\n+---+------+---------+------+------+----------+------------+----------+\n|ID |NAME |Company |Amount|salary|department|Leave_type |Phone_type|\n+---+------+---------+------+------+----------+------------+----------+\n|1 |sravan|company 1|100 |45000 |IT |Sick Leave |Apple |\n|2 |ojaswi|company 1|200 |145000|Manager |Casual leave|Samsung |\n+---+------+---------+------+------+----------+------------+----------+\n\n"
] |
[
3
] |
[] |
[] |
[
"apache_spark",
"dataframe",
"pyspark",
"python"
] |
stackoverflow_0074595610_apache_spark_dataframe_pyspark_python.txt
|
Q:
Can not read csv with pandas in azure functions with python
I have created an Azure Blob Storage Trigger in Azure function in python.
A CSV file adds in blob storage and I try to read it with pandas.
import logging
import pandas as pd
import azure.functions as func
def main(myblob: func.InputStream):
logging.info(f"Python blob trigger function processed blob \n"
f"Name: {myblob.name}\n"
f"Blob Size: {myblob.length} bytes")
df_new = pd.read_csv(myblob)
print(df_new.head())
If I pass myblob to pd.read_csv, then I get UnsupportedOperation: read1
Python blob trigger function processed blob
Name: samples-workitems/Data_26112022_080027.csv
Blob Size: None bytes
[2022-11-27T16:19:25.650Z] Executed 'Functions.BlobTrigger1' (Failed, Id=2df388f5-a8dc-4554-80fa-f809cfaeedfe, Duration=1472ms)
[2022-11-27T16:19:25.655Z] System.Private.CoreLib: Exception while executing function: Functions.BlobTrigger1. System.Private.CoreLib: Result: Failure
Exception: UnsupportedOperation: read1
If I pass myblob.read(),
df_new = pd.read_csv(myblob.read())
it gives TypeError: Expected file path name or file-like object, got <class 'bytes'> type
Python blob trigger function processed blob
Name: samples-workitems/Data_26112022_080027.csv
Blob Size: None bytes
[2022-11-27T16:09:56.513Z] Executed 'Functions.BlobTrigger1' (Failed, Id=e3825c28-7538-4e30-bad2-2526f9811697, Duration=1468ms)
[2022-11-27T16:09:56.518Z] System.Private.CoreLib: Exception while executing function: Functions.BlobTrigger1. System.Private.CoreLib: Result: Failure
Exception: TypeError: Expected file path name or file-like object, got <class 'bytes'> type
From Azure functions Docs:
InputStream is File-like object representing an input blob.
From Pandas read_csv Docs:
read_csv takes filepath_or_bufferstr, path object or file-like object
So technically I should read this object. What piece of puzzle am I missing here?
A:
If you refer to this article, it says that this piece of code will work. But this is recommended for smaller files as the entire files goes into memory. Not recommended to be used for larger files.
import logging
import pandas as pd
import azure.functions as func
from io import BytesIO
def main(myblob: func.InputStream):
logging.info(f"Python blob trigger function processed blob \n"
f"Name: {myblob.name}\n"
f"Blob Size: {myblob.length} bytes")
df_new = pd.read_csv(BytesIO(myblob.read()))
print(df_new.head())
|
Can not read csv with pandas in azure functions with python
|
I have created an Azure Blob Storage Trigger in Azure function in python.
A CSV file adds in blob storage and I try to read it with pandas.
import logging
import pandas as pd
import azure.functions as func
def main(myblob: func.InputStream):
logging.info(f"Python blob trigger function processed blob \n"
f"Name: {myblob.name}\n"
f"Blob Size: {myblob.length} bytes")
df_new = pd.read_csv(myblob)
print(df_new.head())
If I pass myblob to pd.read_csv, then I get UnsupportedOperation: read1
Python blob trigger function processed blob
Name: samples-workitems/Data_26112022_080027.csv
Blob Size: None bytes
[2022-11-27T16:19:25.650Z] Executed 'Functions.BlobTrigger1' (Failed, Id=2df388f5-a8dc-4554-80fa-f809cfaeedfe, Duration=1472ms)
[2022-11-27T16:19:25.655Z] System.Private.CoreLib: Exception while executing function: Functions.BlobTrigger1. System.Private.CoreLib: Result: Failure
Exception: UnsupportedOperation: read1
If I pass myblob.read(),
df_new = pd.read_csv(myblob.read())
it gives TypeError: Expected file path name or file-like object, got <class 'bytes'> type
Python blob trigger function processed blob
Name: samples-workitems/Data_26112022_080027.csv
Blob Size: None bytes
[2022-11-27T16:09:56.513Z] Executed 'Functions.BlobTrigger1' (Failed, Id=e3825c28-7538-4e30-bad2-2526f9811697, Duration=1468ms)
[2022-11-27T16:09:56.518Z] System.Private.CoreLib: Exception while executing function: Functions.BlobTrigger1. System.Private.CoreLib: Result: Failure
Exception: TypeError: Expected file path name or file-like object, got <class 'bytes'> type
From Azure functions Docs:
InputStream is File-like object representing an input blob.
From Pandas read_csv Docs:
read_csv takes filepath_or_bufferstr, path object or file-like object
So technically I should read this object. What piece of puzzle am I missing here?
|
[
"If you refer to this article, it says that this piece of code will work. But this is recommended for smaller files as the entire files goes into memory. Not recommended to be used for larger files.\nimport logging\nimport pandas as pd\n\nimport azure.functions as func\nfrom io import BytesIO\n\ndef main(myblob: func.InputStream):\n logging.info(f\"Python blob trigger function processed blob \\n\"\n f\"Name: {myblob.name}\\n\"\n f\"Blob Size: {myblob.length} bytes\")\n df_new = pd.read_csv(BytesIO(myblob.read()))\n print(df_new.head())\n\n"
] |
[
1
] |
[] |
[] |
[
"azure",
"pandas",
"python"
] |
stackoverflow_0074591834_azure_pandas_python.txt
|
Q:
TypeError: cannot perform reduce with flexible type
I have been using the scikit-learn library. I'm trying to use the Gaussian Naive Bayes Module under the scikit-learn library but I'm running into the following error. TypeError: cannot perform reduce with flexible type
Below is the code snippet.
training = GaussianNB()
training = training.fit(trainData, target)
prediction = training.predict(testData)
This is target
['ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'AML', 'AML', 'AML', 'AML', 'AML', 'AML', 'AML', 'AML', 'AML', 'AML', 'AML']
This is trainData
[['-214' '-153' '-58' ..., '36' '191' '-37']
['-139' '-73' '-1' ..., '11' '76' '-14']
['-76' '-49' '-307' ..., '41' '228' '-41']
...,
['-32' '-49' '49' ..., '-26' '133' '-32']
['-124' '-79' '-37' ..., '39' '298' '-3']
['-135' '-186' '-70' ..., '-12' '790' '-10']]
Below is the stack trace
Traceback (most recent call last):
File "prediction.py", line 90, in <module>
gaussianNaiveBayes()
File "prediction.py", line 76, in gaussianNaiveBayes
training = training.fit(trainData, target)
File "/Library/Python/2.7/site-packages/sklearn/naive_bayes.py", line 163, in fit
self.theta_[i, :] = np.mean(Xi, axis=0)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/numpy/ core/fromnumeric.py", line 2716, in mean
out=out, keepdims=keepdims)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/numpy/core/_methods.py", line 62, in _mean
ret = um.add.reduce(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
TypeError: cannot perform reduce with flexible type
A:
It looks like your 'trainData' is a list of strings:
['-214' '-153' '-58' ..., '36' '191' '-37']
Change your 'trainData' to a numeric type.
import numpy as np
np.array(['1','2','3']).astype(np.float)
A:
When your are trying to apply prod on string type of value like:
['-214' '-153' '-58' ..., '36' '191' '-37']
you will get the error.
Solution:
Append only integer value like [1,2,3], and you will get your expected output.
If the value is in string format before appending then, in the array you can convert the type into int type and store it in a list.
A:
My best advice facing that error. Typically you have to check the type compatibility of your data. Take few minutes to check it, print it and you should find an incompatibility.
|
TypeError: cannot perform reduce with flexible type
|
I have been using the scikit-learn library. I'm trying to use the Gaussian Naive Bayes Module under the scikit-learn library but I'm running into the following error. TypeError: cannot perform reduce with flexible type
Below is the code snippet.
training = GaussianNB()
training = training.fit(trainData, target)
prediction = training.predict(testData)
This is target
['ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'ALL', 'AML', 'AML', 'AML', 'AML', 'AML', 'AML', 'AML', 'AML', 'AML', 'AML', 'AML']
This is trainData
[['-214' '-153' '-58' ..., '36' '191' '-37']
['-139' '-73' '-1' ..., '11' '76' '-14']
['-76' '-49' '-307' ..., '41' '228' '-41']
...,
['-32' '-49' '49' ..., '-26' '133' '-32']
['-124' '-79' '-37' ..., '39' '298' '-3']
['-135' '-186' '-70' ..., '-12' '790' '-10']]
Below is the stack trace
Traceback (most recent call last):
File "prediction.py", line 90, in <module>
gaussianNaiveBayes()
File "prediction.py", line 76, in gaussianNaiveBayes
training = training.fit(trainData, target)
File "/Library/Python/2.7/site-packages/sklearn/naive_bayes.py", line 163, in fit
self.theta_[i, :] = np.mean(Xi, axis=0)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/numpy/ core/fromnumeric.py", line 2716, in mean
out=out, keepdims=keepdims)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/numpy/core/_methods.py", line 62, in _mean
ret = um.add.reduce(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
TypeError: cannot perform reduce with flexible type
|
[
"It looks like your 'trainData' is a list of strings:\n['-214' '-153' '-58' ..., '36' '191' '-37']\n\nChange your 'trainData' to a numeric type.\n import numpy as np\n np.array(['1','2','3']).astype(np.float)\n\n",
"When your are trying to apply prod on string type of value like:\n['-214' '-153' '-58' ..., '36' '191' '-37']\n\nyou will get the error.\nSolution:\nAppend only integer value like [1,2,3], and you will get your expected output.\nIf the value is in string format before appending then, in the array you can convert the type into int type and store it in a list.\n",
"My best advice facing that error. Typically you have to check the type compatibility of your data. Take few minutes to check it, print it and you should find an incompatibility.\n"
] |
[
169,
4,
0
] |
[] |
[] |
[
"python",
"python_2.7",
"scikit_learn"
] |
stackoverflow_0028393103_python_python_2.7_scikit_learn.txt
|
Q:
Pygame animation list index continuously goes out of range
I followed a clear code tutorial to make a platformer and ended up finishing it, however one thing always continously messed up. That being the animation, at times the game just would not run and would only run in debug mode due to the animation list being out of index which makes no sense to me since every item in the list is accounted for. Below is my player.py which has all of the functions and such that the video mentions. If more information is needed I will gladly provide it. (the animation part is specifically erroring at self.image = self.animations['idle'][self.frame_index])
import pygame
from support import import_folder
class Player(pygame.sprite.Sprite):
def __init__(self,pos,surface,create_jump_particles):
super().__init__()
self.import_character_assets()
self.frame_index = 0
self.animation_speed = 0.15
self.image = self.animations['idle'][self.frame_index]
self.rect = self.image.get_rect(topleft = pos)
# dust particles
self.import_dust_run_particles()
self.dust_frame_index = 0
self.dust_animation_speed = 0.15
self.display_surface = surface
self.create_jump_particles = create_jump_particles
# player movement
self.direction = pygame.math.Vector2(0,0)
self.speed = 8
self.gravity = 0.8
self.jump_speed = -16
# player status
self.status = 'idle'
self.facing_right = True
self.on_ground = False
self.on_ceiling = False
self.on_left = False
self.on_right = False
def import_character_assets(self):
character_path = '../graphics/character/'
self.animations = {'idle':[],'run':[],'jump':[],'fall':[]}
for animation in self.animations.keys():
full_path = character_path + animation
self.animations[animation] = import_folder(full_path)
def import_dust_run_particles(self):
self.dust_run_particles = import_folder('../graphics/character/dust_particles/run')
def animate(self):
animation = self.animations[self.status]
# loop over frame index
self.frame_index += self.animation_speed
if self.frame_index >= len(animation):
self.frame_index = 0
image = animation[int(self.frame_index)]
if self.facing_right:
self.image = image
else:
flipped_image = pygame.transform.flip(image,True,False)
self.image = flipped_image
# set the rect
if self.on_ground and self.on_right:
self.rect = self.image.get_rect(bottomright = self.rect.bottomright)
elif self.on_ground and self.on_left:
self.rect = self.image.get_rect(bottomleft = self.rect.bottomleft)
elif self.on_ground:
self.rect = self.image.get_rect(midbottom = self.rect.midbottom)
elif self.on_ceiling and self.on_right:
self.rect = self.image.get_rect(topright = self.rect.topright)
elif self.on_ceiling and self.on_left:
self.rect = self.image.get_rect(topleft = self.rect.topleft)
elif self.on_ceiling:
self.rect = self.image.get_rect(midtop = self.rect.midtop)
def run_dust_animation(self):
if self.status == 'run' and self.on_ground:
self.dust_frame_index += self.dust_animation_speed
if self.dust_frame_index >= len(self.dust_run_particles):
self.dust_frame_index = 0
dust_particle = self.dust_run_particles[int(self.dust_frame_index)]
if self.facing_right:
pos = self.rect.bottomleft - pygame.math.Vector2(6,10)
self.display_surface.blit(dust_particle,pos)
else:
pos = self.rect.bottomright - pygame.math.Vector2(6,10)
flipped_dust_particle = pygame.transform.flip(dust_particle,True,False)
self.display_surface.blit(flipped_dust_particle,pos)
def get_input(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_RIGHT]:
self.direction.x = 1
self.facing_right = True
elif keys[pygame.K_LEFT]:
self.direction.x = -1
self.facing_right = False
else:
self.direction.x = 0
if keys[pygame.K_SPACE] and self.on_ground:
self.jump()
self.create_jump_particles(self.rect.midbottom)
def get_status(self):
if self.direction.y < 0:
self.status = 'jump'
elif self.direction.y > 1:
self.status = 'fall'
else:
if self.direction.x != 0:
self.status = 'run'
else:
self.status = 'idle'
def apply_gravity(self):
self.direction.y += self.gravity
self.rect.y += self.direction.y
def jump(self):
self.direction.y = self.jump_speed
def update(self):
self.get_input()
self.get_status()
self.animate()
self.run_dust_animation()
I've tried everything, I've even gone to his website and downloaded his prototype and tried adding my own files to it and it still returns the same error. Whats supposed to be happening is that my character does an animation depending on which state they are in.
A:
So it's likely that the code is not loading any of the animation frames for the 'idle' character animation.
The initialisation code first loads all the assets:
class Player(pygame.sprite.Sprite):
def __init__(self,pos,surface,create_jump_particles):
super().__init__()
self.import_character_assets()
Where self.import_character_assets() is:
def import_character_assets(self):
character_path = '../graphics/character/'
self.animations = {'idle':[],'run':[],'jump':[],'fall':[]}
for animation in self.animations.keys():
full_path = character_path + animation
self.animations[animation] = import_folder(full_path)
Which is iterating through 'idle', 'run', 'jump', 'fall', loading images from '../graphics/character/idle', '../graphics/character/run', etc. into a dictionary-of-lists.
We don't have the code for import_folder(), but it's obviously intended to load some set of images.
After importing, the self.animations dictionary is expected to have at least a single image in self.animations['idle']. This is because the __init__() is defaulting to 'idle', and index 0:
self.frame_index = 0
self.animation_speed = 0.15
self.image = self.animations['idle'][self.frame_index] # <<-- HERE, 0
So we can conclude that because we know, self.animations['idle'] is out of bounds, even on [0], no images were loaded.
Check that there are images available for this. Or maybe the filenames are not what's expected (e.g.: .jpg, not .png), or some other file-system error (e.g.: read only, zero byte files, ... ). Maybe the load-path is incorrect?
It might be a good idea to check the load, and write an error:
def import_character_assets(self):
character_path = '../graphics/character/'
self.animations = {'idle':[],'run':[],'jump':[],'fall':[]}
for animation in self.animations.keys():
full_path = character_path + animation
self.animations[animation] = import_folder(full_path)
# check we loaded something
if ( len( self.animations[animation] ) == 0 ):
print( "Failed to load any animations for \"" + animation + "\" type" )
|
Pygame animation list index continuously goes out of range
|
I followed a clear code tutorial to make a platformer and ended up finishing it, however one thing always continously messed up. That being the animation, at times the game just would not run and would only run in debug mode due to the animation list being out of index which makes no sense to me since every item in the list is accounted for. Below is my player.py which has all of the functions and such that the video mentions. If more information is needed I will gladly provide it. (the animation part is specifically erroring at self.image = self.animations['idle'][self.frame_index])
import pygame
from support import import_folder
class Player(pygame.sprite.Sprite):
def __init__(self,pos,surface,create_jump_particles):
super().__init__()
self.import_character_assets()
self.frame_index = 0
self.animation_speed = 0.15
self.image = self.animations['idle'][self.frame_index]
self.rect = self.image.get_rect(topleft = pos)
# dust particles
self.import_dust_run_particles()
self.dust_frame_index = 0
self.dust_animation_speed = 0.15
self.display_surface = surface
self.create_jump_particles = create_jump_particles
# player movement
self.direction = pygame.math.Vector2(0,0)
self.speed = 8
self.gravity = 0.8
self.jump_speed = -16
# player status
self.status = 'idle'
self.facing_right = True
self.on_ground = False
self.on_ceiling = False
self.on_left = False
self.on_right = False
def import_character_assets(self):
character_path = '../graphics/character/'
self.animations = {'idle':[],'run':[],'jump':[],'fall':[]}
for animation in self.animations.keys():
full_path = character_path + animation
self.animations[animation] = import_folder(full_path)
def import_dust_run_particles(self):
self.dust_run_particles = import_folder('../graphics/character/dust_particles/run')
def animate(self):
animation = self.animations[self.status]
# loop over frame index
self.frame_index += self.animation_speed
if self.frame_index >= len(animation):
self.frame_index = 0
image = animation[int(self.frame_index)]
if self.facing_right:
self.image = image
else:
flipped_image = pygame.transform.flip(image,True,False)
self.image = flipped_image
# set the rect
if self.on_ground and self.on_right:
self.rect = self.image.get_rect(bottomright = self.rect.bottomright)
elif self.on_ground and self.on_left:
self.rect = self.image.get_rect(bottomleft = self.rect.bottomleft)
elif self.on_ground:
self.rect = self.image.get_rect(midbottom = self.rect.midbottom)
elif self.on_ceiling and self.on_right:
self.rect = self.image.get_rect(topright = self.rect.topright)
elif self.on_ceiling and self.on_left:
self.rect = self.image.get_rect(topleft = self.rect.topleft)
elif self.on_ceiling:
self.rect = self.image.get_rect(midtop = self.rect.midtop)
def run_dust_animation(self):
if self.status == 'run' and self.on_ground:
self.dust_frame_index += self.dust_animation_speed
if self.dust_frame_index >= len(self.dust_run_particles):
self.dust_frame_index = 0
dust_particle = self.dust_run_particles[int(self.dust_frame_index)]
if self.facing_right:
pos = self.rect.bottomleft - pygame.math.Vector2(6,10)
self.display_surface.blit(dust_particle,pos)
else:
pos = self.rect.bottomright - pygame.math.Vector2(6,10)
flipped_dust_particle = pygame.transform.flip(dust_particle,True,False)
self.display_surface.blit(flipped_dust_particle,pos)
def get_input(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_RIGHT]:
self.direction.x = 1
self.facing_right = True
elif keys[pygame.K_LEFT]:
self.direction.x = -1
self.facing_right = False
else:
self.direction.x = 0
if keys[pygame.K_SPACE] and self.on_ground:
self.jump()
self.create_jump_particles(self.rect.midbottom)
def get_status(self):
if self.direction.y < 0:
self.status = 'jump'
elif self.direction.y > 1:
self.status = 'fall'
else:
if self.direction.x != 0:
self.status = 'run'
else:
self.status = 'idle'
def apply_gravity(self):
self.direction.y += self.gravity
self.rect.y += self.direction.y
def jump(self):
self.direction.y = self.jump_speed
def update(self):
self.get_input()
self.get_status()
self.animate()
self.run_dust_animation()
I've tried everything, I've even gone to his website and downloaded his prototype and tried adding my own files to it and it still returns the same error. Whats supposed to be happening is that my character does an animation depending on which state they are in.
|
[
"So it's likely that the code is not loading any of the animation frames for the 'idle' character animation.\nThe initialisation code first loads all the assets:\nclass Player(pygame.sprite.Sprite):\n def __init__(self,pos,surface,create_jump_particles):\n super().__init__()\n self.import_character_assets()\n\nWhere self.import_character_assets() is:\ndef import_character_assets(self):\n character_path = '../graphics/character/'\n self.animations = {'idle':[],'run':[],'jump':[],'fall':[]}\n\n for animation in self.animations.keys():\n full_path = character_path + animation\n self.animations[animation] = import_folder(full_path)\n\nWhich is iterating through 'idle', 'run', 'jump', 'fall', loading images from '../graphics/character/idle', '../graphics/character/run', etc. into a dictionary-of-lists.\nWe don't have the code for import_folder(), but it's obviously intended to load some set of images.\nAfter importing, the self.animations dictionary is expected to have at least a single image in self.animations['idle']. This is because the __init__() is defaulting to 'idle', and index 0:\n self.frame_index = 0\n self.animation_speed = 0.15\n self.image = self.animations['idle'][self.frame_index] # <<-- HERE, 0\n\nSo we can conclude that because we know, self.animations['idle'] is out of bounds, even on [0], no images were loaded.\nCheck that there are images available for this. Or maybe the filenames are not what's expected (e.g.: .jpg, not .png), or some other file-system error (e.g.: read only, zero byte files, ... ). Maybe the load-path is incorrect?\nIt might be a good idea to check the load, and write an error:\ndef import_character_assets(self):\n character_path = '../graphics/character/'\n self.animations = {'idle':[],'run':[],'jump':[],'fall':[]}\n\n for animation in self.animations.keys():\n full_path = character_path + animation\n self.animations[animation] = import_folder(full_path)\n # check we loaded something\n if ( len( self.animations[animation] ) == 0 ):\n print( \"Failed to load any animations for \\\"\" + animation + \"\\\" type\" )\n\n"
] |
[
0
] |
[] |
[] |
[
"animation",
"pygame",
"python",
"python_3.x"
] |
stackoverflow_0074595379_animation_pygame_python_python_3.x.txt
|
Q:
Add another level of headers to multiindex dataframe
I have the following dataframe:
dic = {'US':{'Quality':{'points':"-2 n", 'difference':'equal', 'stat': 'same'}, 'Prices':{'points':"-7 n", 'difference':'negative', 'stat': 'below'}, 'Satisfaction':{'points':"3 n", 'difference':'positive', 'stat': 'below'}},
'UK': {'Quality':{'points':"3 n", 'difference':'equal', 'stat': 'above'}, 'Prices':{'points':"-13 n", 'difference':'negative', 'stat': 'below'}, 'Satisfaction':{'points':"2 n", 'difference':'negative', 'stat': 'same'}}}
d1 = defaultdict(dict)
for k, v in dic.items():
for k1, v1 in v.items():
for k2, v2 in v1.items():
d1[(k, k2)].update({k1: v2})
df = pd.DataFrame(d1)
df.columns = df.columns.rename("Skateboard", level=0)
df.columns = df.columns.rename("Q3", level=1)
df.insert(loc=0, column=('', 'Mode'), value="Website")
Currently, it looks like this:
How do I add another level of headers to the multiindex dataframe to make it look like the image below?
Update:
dic = {'US':{'Quality':{'points':"-2 n", 'difference':'equal', 'stat': 'same'}, 'Prices':{'points':"-7 n", 'difference':'negative', 'stat': 'below'}, 'Satisfaction':{'points':"3 n", 'difference':'positive', 'stat': 'below'}},
'UK': {'Quality':{'points':"3 n", 'difference':'equal', 'stat': 'above'}, 'Prices':{'points':"-13 n", 'difference':'negative', 'stat': 'below'}, 'Satisfaction':{'points':"2 n", 'difference':'negative', 'stat': 'same'}}}
d1 = defaultdict(dict)
for k, v in dic.items():
for k1, v1 in v.items():
for k2, v2 in v1.items():
d1[(k, k2)].update({k1: v2})
df = pd.DataFrame(d1)
df.columns = df.columns.rename("Skateboard", level=0)
df.columns = df.columns.rename("Metric", level=1)
df1 = df.xs('points', axis=1, level=1, drop_level=False)
df2 = df.drop('points', axis=1, level=1)
df3 = (pd.concat([df1, df2], keys=['GM', ''], axis=1)
.swaplevel(0, 1, axis=1)
.sort_index(axis=1))
df3.columns = df3.columns.rename("Q3", level=1)
df3.insert(loc=0, column=('','', 'Mode'), value="Website")
df3
Now the data frame looks like:
How do I move the header GM to be first for the column for both US and UK (see the second image for the final output)?
A:
Example
data = {('A', 'a'): {0: 8, 1: 3, 2: 4},
('A', 'b'): {0: 5, 1: 7, 2: 8},
('A', 'c'): {0: 1, 1: 7, 2: 6},
('B', 'a'): {0: 7, 1: 1, 2: 0},
('B', 'b'): {0: 1, 1: 1, 2: 7},
('B', 'c'): {0: 7, 1: 7, 2: 4}}
df = pd.DataFrame(data)
df
A B
a b c a b c
0 8 5 1 7 1 7
1 3 7 7 1 1 7
2 4 8 6 0 7 4
Code
make new level and add c to a column, add d except a
df with a (df1)
df1 = df.xs('a', axis=1, level=1, drop_level=False)
output(df1):
A B
a a
0 8 7
1 3 1
2 4 0
df except a (df2)
df2 = df.drop('a', axis=1, level=1)
output(df2):
A B
b c b c
0 5 1 1 7
1 7 7 1 7
2 8 6 7 4
concat df1 & df2 with key
pd.concat([df1, df2], keys=['c', 'd'], axis=1)
output:
c d
A B A B
a a b c b c
0 8 7 5 1 1 7
1 3 1 7 7 1 7
2 4 0 8 6 7 4
swaplevel and sort
(pd.concat([df1, df2], keys=['c', 'd'], axis=1)
.swaplevel(0, 1, axis=1)
.sort_index(axis=1))
result:
A B
c d c d
a b c a b c
0 8 5 1 7 1 7
1 3 7 7 1 1 7
2 4 8 6 0 7 4
we can add level to columns
or use simple way
df3 = pd.concat([df], keys=[''], names=['Q3'], axis=1).swaplevel(0, 1, axis=1)
df3.columns = df3.columns.map(lambda x: (x[0], 'c', x[2]) if x[2] == 'a' else x)
df3
A B
Q3 c c
a b c a b c
0 8 5 1 7 1 7
1 3 7 7 1 1 7
2 4 8 6 0 7 4
|
Add another level of headers to multiindex dataframe
|
I have the following dataframe:
dic = {'US':{'Quality':{'points':"-2 n", 'difference':'equal', 'stat': 'same'}, 'Prices':{'points':"-7 n", 'difference':'negative', 'stat': 'below'}, 'Satisfaction':{'points':"3 n", 'difference':'positive', 'stat': 'below'}},
'UK': {'Quality':{'points':"3 n", 'difference':'equal', 'stat': 'above'}, 'Prices':{'points':"-13 n", 'difference':'negative', 'stat': 'below'}, 'Satisfaction':{'points':"2 n", 'difference':'negative', 'stat': 'same'}}}
d1 = defaultdict(dict)
for k, v in dic.items():
for k1, v1 in v.items():
for k2, v2 in v1.items():
d1[(k, k2)].update({k1: v2})
df = pd.DataFrame(d1)
df.columns = df.columns.rename("Skateboard", level=0)
df.columns = df.columns.rename("Q3", level=1)
df.insert(loc=0, column=('', 'Mode'), value="Website")
Currently, it looks like this:
How do I add another level of headers to the multiindex dataframe to make it look like the image below?
Update:
dic = {'US':{'Quality':{'points':"-2 n", 'difference':'equal', 'stat': 'same'}, 'Prices':{'points':"-7 n", 'difference':'negative', 'stat': 'below'}, 'Satisfaction':{'points':"3 n", 'difference':'positive', 'stat': 'below'}},
'UK': {'Quality':{'points':"3 n", 'difference':'equal', 'stat': 'above'}, 'Prices':{'points':"-13 n", 'difference':'negative', 'stat': 'below'}, 'Satisfaction':{'points':"2 n", 'difference':'negative', 'stat': 'same'}}}
d1 = defaultdict(dict)
for k, v in dic.items():
for k1, v1 in v.items():
for k2, v2 in v1.items():
d1[(k, k2)].update({k1: v2})
df = pd.DataFrame(d1)
df.columns = df.columns.rename("Skateboard", level=0)
df.columns = df.columns.rename("Metric", level=1)
df1 = df.xs('points', axis=1, level=1, drop_level=False)
df2 = df.drop('points', axis=1, level=1)
df3 = (pd.concat([df1, df2], keys=['GM', ''], axis=1)
.swaplevel(0, 1, axis=1)
.sort_index(axis=1))
df3.columns = df3.columns.rename("Q3", level=1)
df3.insert(loc=0, column=('','', 'Mode'), value="Website")
df3
Now the data frame looks like:
How do I move the header GM to be first for the column for both US and UK (see the second image for the final output)?
|
[
"Example\ndata = {('A', 'a'): {0: 8, 1: 3, 2: 4},\n ('A', 'b'): {0: 5, 1: 7, 2: 8},\n ('A', 'c'): {0: 1, 1: 7, 2: 6},\n ('B', 'a'): {0: 7, 1: 1, 2: 0},\n ('B', 'b'): {0: 1, 1: 1, 2: 7},\n ('B', 'c'): {0: 7, 1: 7, 2: 4}}\ndf = pd.DataFrame(data)\n\ndf\n A B\n a b c a b c\n0 8 5 1 7 1 7\n1 3 7 7 1 1 7\n2 4 8 6 0 7 4\n\nCode\nmake new level and add c to a column, add d except a\n\ndf with a (df1)\ndf1 = df.xs('a', axis=1, level=1, drop_level=False)\n\noutput(df1):\n A B\n a a\n0 8 7\n1 3 1\n2 4 0\n\n\ndf except a (df2)\ndf2 = df.drop('a', axis=1, level=1)\n\noutput(df2):\n A B\n b c b c\n0 5 1 1 7\n1 7 7 1 7\n2 8 6 7 4\n\n\nconcat df1 & df2 with key\npd.concat([df1, df2], keys=['c', 'd'], axis=1)\n\noutput:\n c d\n A B A B\n a a b c b c\n0 8 7 5 1 1 7\n1 3 1 7 7 1 7\n2 4 0 8 6 7 4\n\n\nswaplevel and sort\n(pd.concat([df1, df2], keys=['c', 'd'], axis=1)\n .swaplevel(0, 1, axis=1)\n .sort_index(axis=1))\n\nresult:\n A B\n c d c d\n a b c a b c\n0 8 5 1 7 1 7\n1 3 7 7 1 1 7\n2 4 8 6 0 7 4\n\n\nwe can add level to columns\n\nor use simple way\ndf3 = pd.concat([df], keys=[''], names=['Q3'], axis=1).swaplevel(0, 1, axis=1)\ndf3.columns = df3.columns.map(lambda x: (x[0], 'c', x[2]) if x[2] == 'a' else x)\n\ndf3\n A B\nQ3 c c \n a b c a b c\n0 8 5 1 7 1 7\n1 3 7 7 1 1 7\n2 4 8 6 0 7 4\n\n"
] |
[
1
] |
[] |
[] |
[
"dataframe",
"multi_index",
"pandas",
"python"
] |
stackoverflow_0074595521_dataframe_multi_index_pandas_python.txt
|
Q:
Sum of all the values in a list of dictionaries
I have a warehouses dictionary (shown below) and I need to get the sum of 'tons'. The values can be at various depths in the dictionary.
warehouses = {
"Warehouse Lisboa": [
{ "name": "apples", "tons": 4},
{ "name": "oranges", "tons": 10},
{ "name": "lemons", "tons": 50}
],
"Warehouse Cascais": {
"Branch 1": [
{ "name": "apples", "tons": 10},
{ "name": "oranges", "tons": 24}
],
"Branch 2": [
{ "name": "apples", "tons": 16},
{ "name": "oranges", "tons": 8}
]
},
"Warehouse Oeiras": {
"Branch 1": {
"Sub Branch 1":{
"Sub sub Branch 1": [
{ "name": "lemons", "tons": 10}
]
}
},
"Branch 2": [
{ "name": "apples", "tons": 3}
]
}
}
I tried the following but it returned - TypeError: unsupported operand type(s) for +: 'int' and 'list':
def stock_fruits(warehouses):
return sum(warehouses.values())
How do I get the sum of all the 'tons' values in the dictionary?
A:
Consider using a depth-first search approach:
from typing import Union
def stock_fruits(curr: Union[dict, list]) -> int:
if isinstance(curr, dict):
return sum(stock_fruits(value) for value in curr.values())
return sum(entry["tons"] for entry in curr)
warehouses = {
"Warehouse Lisboa": [
{"name": "apples", "tons": 4},
{"name": "oranges", "tons": 10},
{"name": "lemons", "tons": 50}
],
"Warehouse Cascais": {
"Branch 1": [
{"name": "apples", "tons": 10},
{"name": "oranges", "tons": 24}
],
"Branch 2": [
{"name": "apples", "tons": 16},
{"name": "oranges", "tons": 8}
]
},
"Warehouse Oeiras": {
"Branch 1": {
"Sub Branch 1": {
"Sub sub Branch 1": [
{"name": "lemons", "tons": 10}
]
}
},
"Branch 2": [
{"name": "apples", "tons": 3}
]
}
}
print(f"{stock_fruits(warehouses) = }")
Output:
stock_fruits(warehouses) = 135
A:
You can use this function to compute the sum:
def stock_fruits(warehouses):
fruit_sum = 0
queue = deque(list(warehouses.values()))
while queue:
node = queue.popleft()
if isinstance(node, List):
fruit_sum += sum([item.get('tons', 0) for item in node])
else:
queue.extend(list(node.values()))
return fruit_sum
|
Sum of all the values in a list of dictionaries
|
I have a warehouses dictionary (shown below) and I need to get the sum of 'tons'. The values can be at various depths in the dictionary.
warehouses = {
"Warehouse Lisboa": [
{ "name": "apples", "tons": 4},
{ "name": "oranges", "tons": 10},
{ "name": "lemons", "tons": 50}
],
"Warehouse Cascais": {
"Branch 1": [
{ "name": "apples", "tons": 10},
{ "name": "oranges", "tons": 24}
],
"Branch 2": [
{ "name": "apples", "tons": 16},
{ "name": "oranges", "tons": 8}
]
},
"Warehouse Oeiras": {
"Branch 1": {
"Sub Branch 1":{
"Sub sub Branch 1": [
{ "name": "lemons", "tons": 10}
]
}
},
"Branch 2": [
{ "name": "apples", "tons": 3}
]
}
}
I tried the following but it returned - TypeError: unsupported operand type(s) for +: 'int' and 'list':
def stock_fruits(warehouses):
return sum(warehouses.values())
How do I get the sum of all the 'tons' values in the dictionary?
|
[
"Consider using a depth-first search approach:\nfrom typing import Union\n\ndef stock_fruits(curr: Union[dict, list]) -> int:\n if isinstance(curr, dict):\n return sum(stock_fruits(value) for value in curr.values())\n return sum(entry[\"tons\"] for entry in curr)\n\nwarehouses = {\n \"Warehouse Lisboa\": [\n {\"name\": \"apples\", \"tons\": 4},\n {\"name\": \"oranges\", \"tons\": 10},\n {\"name\": \"lemons\", \"tons\": 50}\n ],\n \"Warehouse Cascais\": {\n \"Branch 1\": [\n {\"name\": \"apples\", \"tons\": 10},\n {\"name\": \"oranges\", \"tons\": 24}\n ],\n \"Branch 2\": [\n {\"name\": \"apples\", \"tons\": 16},\n {\"name\": \"oranges\", \"tons\": 8}\n ]\n },\n \"Warehouse Oeiras\": {\n \"Branch 1\": {\n \"Sub Branch 1\": {\n \"Sub sub Branch 1\": [\n {\"name\": \"lemons\", \"tons\": 10}\n ]\n }\n },\n \"Branch 2\": [\n {\"name\": \"apples\", \"tons\": 3}\n ]\n }\n}\nprint(f\"{stock_fruits(warehouses) = }\")\n\nOutput:\nstock_fruits(warehouses) = 135\n\n",
"You can use this function to compute the sum:\n def stock_fruits(warehouses):\n fruit_sum = 0\n\n queue = deque(list(warehouses.values()))\n while queue:\n node = queue.popleft()\n if isinstance(node, List):\n fruit_sum += sum([item.get('tons', 0) for item in node])\n else:\n queue.extend(list(node.values()))\n\n return fruit_sum\n\n"
] |
[
1,
0
] |
[] |
[] |
[
"dictionary",
"python",
"sum"
] |
stackoverflow_0074595259_dictionary_python_sum.txt
|
Q:
ufunc 'add' did not contain loop with signature matching type dtype ('S32') ('S32') ('S32')
I'm trying to run someone's script for some simulations I've made to try plotting some histograms, but when I do I always get the error message mentioned above. I have no idea what's gone wrong.
Here's the complete traceback error I get:
File "AVAnalyse.py", line 205, in <module>
f.write(line[0] + ' ' + line[1] + ' ' + line[2] + ' ' + line[3])
TypeError: ufunc 'add' did not contain a loop with signature matching types dtype('S32') dtype('S32') dtype('S32')
This is the code I am trying to run:
name_out = "histogram_" + donor + "_" + acceptor + ".dat"
f = open(name_out, 'w')
f.write('distance d.probability efficiency e.probability')
for line in dist_hist:
f.write(line[0] + ' ' + line[1] + ' ' + line[2] + ' ' + line[3])
f.close()
print "data saved in " + "histogram_" + donor + "_" + acceptor + ".dat"
What am I doing wrong?
A:
It seems like line[0], line[1], line[2], line[3] are elements of dist_hist. dict_hist is a numpy.ndarray. The elements of dict_hist has a numeric type (like np.float64) (based on calculations from your attached file). You're trying to add elements of different types: np.float64 and str. If you want to avoid this TypeError, you can change type of line[0], line[1], line[2], line[3] to str.
Your snippet of code should be like this:
name_out = "histogram_"+donor+"_"+acceptor+".dat"
f = open(name_out,'w')
f.write('distance d.probability efficiency e.probability')
for line in dist_hist:
f.write(str(line[0])+' '+str(line[1])+' '+str(line[2])+' '+str(line[3]))
f.close()
print "data saved in " +"histogram_"+donor+"_"+acceptor+".dat"
EDIT:
You should replace this snippet of code:
name_out = "histogram_"+donor+"_"+acceptor+".dat"
f = open(name_out,'w')
f.write('distance d.probability efficiency e.probability')
for line in dist_hist:
f.write(line[0]+' '+line[1]+' '+line[2]+' '+line[3])
f.close()
to this one:
name_out = "histogram_" + donor + "_" + acceptor + ".dat"
f = open(name_out,'w')
f.write('distance d.probability efficiency e.probability\n')
for line in dist_hist:
f.write(str(line[0]) + ' ' + str(line[1]) + ' ' + str(line[2]) + ' ' + str(line[3]) + '\n')
f.close()
Before that, strings were written to file in one line. Because of that your data variable point to empty array since we start to read from 2nd line (which was empty).
A:
My problem was solved by @Eduard Ilyasov's solution. The TLDR is to wrap the part where the error came from in str(thing) (when printing for me).
A:
My best advice when you're facing an error like that. Typically you have to check the type compatibility of your data. Take few minutes to check it, print it and you should find an incompatibility. Often, that's mixed of string and numerical values.
|
ufunc 'add' did not contain loop with signature matching type dtype ('S32') ('S32') ('S32')
|
I'm trying to run someone's script for some simulations I've made to try plotting some histograms, but when I do I always get the error message mentioned above. I have no idea what's gone wrong.
Here's the complete traceback error I get:
File "AVAnalyse.py", line 205, in <module>
f.write(line[0] + ' ' + line[1] + ' ' + line[2] + ' ' + line[3])
TypeError: ufunc 'add' did not contain a loop with signature matching types dtype('S32') dtype('S32') dtype('S32')
This is the code I am trying to run:
name_out = "histogram_" + donor + "_" + acceptor + ".dat"
f = open(name_out, 'w')
f.write('distance d.probability efficiency e.probability')
for line in dist_hist:
f.write(line[0] + ' ' + line[1] + ' ' + line[2] + ' ' + line[3])
f.close()
print "data saved in " + "histogram_" + donor + "_" + acceptor + ".dat"
What am I doing wrong?
|
[
"It seems like line[0], line[1], line[2], line[3] are elements of dist_hist. dict_hist is a numpy.ndarray. The elements of dict_hist has a numeric type (like np.float64) (based on calculations from your attached file). You're trying to add elements of different types: np.float64 and str. If you want to avoid this TypeError, you can change type of line[0], line[1], line[2], line[3] to str.\nYour snippet of code should be like this:\nname_out = \"histogram_\"+donor+\"_\"+acceptor+\".dat\" \nf = open(name_out,'w')\nf.write('distance d.probability efficiency e.probability')\nfor line in dist_hist:\n f.write(str(line[0])+' '+str(line[1])+' '+str(line[2])+' '+str(line[3]))\nf.close()\n\nprint \"data saved in \" +\"histogram_\"+donor+\"_\"+acceptor+\".dat\"\n\nEDIT:\nYou should replace this snippet of code:\nname_out = \"histogram_\"+donor+\"_\"+acceptor+\".dat\" \nf = open(name_out,'w')\nf.write('distance d.probability efficiency e.probability')\nfor line in dist_hist:\n f.write(line[0]+' '+line[1]+' '+line[2]+' '+line[3])\nf.close()\n\nto this one:\nname_out = \"histogram_\" + donor + \"_\" + acceptor + \".dat\" \nf = open(name_out,'w')\nf.write('distance d.probability efficiency e.probability\\n')\nfor line in dist_hist:\n f.write(str(line[0]) + ' ' + str(line[1]) + ' ' + str(line[2]) + ' ' + str(line[3]) + '\\n')\nf.close()\n\nBefore that, strings were written to file in one line. Because of that your data variable point to empty array since we start to read from 2nd line (which was empty).\n",
"My problem was solved by @Eduard Ilyasov's solution. The TLDR is to wrap the part where the error came from in str(thing) (when printing for me).\n",
"My best advice when you're facing an error like that. Typically you have to check the type compatibility of your data. Take few minutes to check it, print it and you should find an incompatibility. Often, that's mixed of string and numerical values.\n"
] |
[
32,
0,
0
] |
[] |
[] |
[
"numpy",
"python"
] |
stackoverflow_0041859824_numpy_python.txt
|
Q:
'ChebConv_Coma' object has no attribute 'weight'
my code
import torch
from torch_scatter import scatter_add
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.conv.cheb_conv import ChebConv
from torch_geometric.utils import remove_self_loops
from utils import normal
class ChebConv_Coma(ChebConv):
def __init__(self, in_channels, out_channels, K, normalization=None, bias=True):
super(ChebConv_Coma, self).__init__(in_channels, out_channels, K, normalization, bias)
def reset_parameters(self):
normal(self.weight, 0, 0.1)
normal(self.bias, 0, 0.1)
I get the message:
File "/home/jack/pytorch_coma/layers.py", line 14, in reset_parameters
normal(self.weight, 0, 0.1)
File "/home/jack/.conda/envs/COMA/lib/python3.7/site-packages/torch/nn/modules/module.py", line 948, in __getattr__
type(self).__name__, name))
AttributeError: 'ChebConv_Coma' object has no attribute 'weight'
I don't know what to do next.Is there anything wrong with the ChebConv?
A:
Have you solved the problem?
I've checked the Cheb_conv.py and have a guess: the Parent Class message_passing has no attribute called weight, and instead since it's an implementation of a Graph Network(the discrete model), the corresponding self.weight should be self.lins[k].weight (the linear transform matrix), so to replace self.weight with self.lins[k].weight may help with the issue.
Here is the reference link about GNN(message passing) with pytorch:
https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html#creating-message-passing-networks
|
'ChebConv_Coma' object has no attribute 'weight'
|
my code
import torch
from torch_scatter import scatter_add
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.conv.cheb_conv import ChebConv
from torch_geometric.utils import remove_self_loops
from utils import normal
class ChebConv_Coma(ChebConv):
def __init__(self, in_channels, out_channels, K, normalization=None, bias=True):
super(ChebConv_Coma, self).__init__(in_channels, out_channels, K, normalization, bias)
def reset_parameters(self):
normal(self.weight, 0, 0.1)
normal(self.bias, 0, 0.1)
I get the message:
File "/home/jack/pytorch_coma/layers.py", line 14, in reset_parameters
normal(self.weight, 0, 0.1)
File "/home/jack/.conda/envs/COMA/lib/python3.7/site-packages/torch/nn/modules/module.py", line 948, in __getattr__
type(self).__name__, name))
AttributeError: 'ChebConv_Coma' object has no attribute 'weight'
I don't know what to do next.Is there anything wrong with the ChebConv?
|
[
"Have you solved the problem?\nI've checked the Cheb_conv.py and have a guess: the Parent Class message_passing has no attribute called weight, and instead since it's an implementation of a Graph Network(the discrete model), the corresponding self.weight should be self.lins[k].weight (the linear transform matrix), so to replace self.weight with self.lins[k].weight may help with the issue.\nHere is the reference link about GNN(message passing) with pytorch:\nhttps://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html#creating-message-passing-networks\n"
] |
[
0
] |
[] |
[] |
[
"python"
] |
stackoverflow_0071015710_python.txt
|
Q:
How can i use data which has bigger size than RAM memory?
I want to search string S is in category. And category file is saved in folder.
I want to load category file and search S, but size of category file exceed size of RAM memory. So i cant load category file. In this case, how can i know string S is in category or not?
A:
The easiest way would be to process your big file one line at a time:
#!/usr/bin/env python3
N = 0
with open('BigFile.txt', 'r') as f:
while True:
line = f.readline()
if not line:
print('No cats found')
break
if 'cat' in line:
print(f'Found cat, on line {N}')
break
N += 1
Create a test file with 3 million lines like this, with the word "cat" on the final line like this:
jot -w "%08d" 3000000 > BigFile.txt
echo cat >> BigFile.txt
Alternatively, you can use mmap() and let your operating system do the work:
#!/usr/bin/env python3
import mmap
import re
# Open file and mmap it into memory to search in it for "cat"
with open('BigBoy', 'r') as f:
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as m:
match = re.search(b'cat', m)
if match:
print(f'Found {match.re.pattern} at offset {match.start()}')
Example:
First, make a file too big for RAM, e.g. 64GB with the word "cat" at the end:
dd if=/dev/zero of=BigBoy bs=1G count=64
echo cat >> BigBoy
Now search in file bigger than RAM:
./SearchInMmapFile.py
Found b'cat' at offset 68719476736
|
How can i use data which has bigger size than RAM memory?
|
I want to search string S is in category. And category file is saved in folder.
I want to load category file and search S, but size of category file exceed size of RAM memory. So i cant load category file. In this case, how can i know string S is in category or not?
|
[
"The easiest way would be to process your big file one line at a time:\n#!/usr/bin/env python3\n\nN = 0\nwith open('BigFile.txt', 'r') as f:\n while True:\n line = f.readline()\n if not line:\n print('No cats found')\n break\n if 'cat' in line:\n print(f'Found cat, on line {N}')\n break\n N += 1\n\nCreate a test file with 3 million lines like this, with the word \"cat\" on the final line like this:\njot -w \"%08d\" 3000000 > BigFile.txt\necho cat >> BigFile.txt\n\n\nAlternatively, you can use mmap() and let your operating system do the work:\n#!/usr/bin/env python3\n\nimport mmap\nimport re\n\n# Open file and mmap it into memory to search in it for \"cat\"\nwith open('BigBoy', 'r') as f:\n with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as m:\n match = re.search(b'cat', m)\n if match:\n print(f'Found {match.re.pattern} at offset {match.start()}')\n\n\nExample:\nFirst, make a file too big for RAM, e.g. 64GB with the word \"cat\" at the end:\ndd if=/dev/zero of=BigBoy bs=1G count=64\necho cat >> BigBoy\n\nNow search in file bigger than RAM:\n./SearchInMmapFile.py \nFound b'cat' at offset 68719476736\n\n"
] |
[
0
] |
[] |
[] |
[
"memory",
"python"
] |
stackoverflow_0074595230_memory_python.txt
|
Q:
I believe I have a Function Argument Issue
For the most part all of my code seems to be working fine. The code is a text-based game. When I collect all of the items it fuctions correctly without errors. But if I go to directly to Caslte Black without collecting all the items it finishes with the proper message, but I get the following error:
Traceback (most recent call last):
File "C:\Users....................", line 38, in <module>
city_item = cities[current_city][0]
KeyError: 'Castle Black'
Here is my code.
print("Welcome to the GOT: Winter is Coming Text Based Game")
player_name = input("What is your name Lord Commander? ")
print("\nIntroduction:")
print(" Sir " + player_name + ", as Lord Commander of the Kingsguard")
print(" you are to visit 10 cities across Westeros and Essos, and you MUST")
print(" collect the item from each city to win the game and defeat the Night King.")
print(" Once you collect all the items you automatically win the game defeating the Night King.")
print(" If you confront the Night King without all 10 items, you will perish and")
print(" all of Westeros and Essos will be doomed!")
print("\nGame Play Directions:")
print(" To move in a direction you will be prompted to chose either North, South, East or West")
print(" To collect an item, you will be prompted to enter 'Y' for 'YES' or 'N' for 'NO'.")
print("\nYou are now ready to begin your quest Lord Commander " + player_name + "!\n\n\n")
cities = {"King's Landing": [None, ["North", "South", "East", "West"]],
"Casterly Rock": ["The Oathkeeper Sword", ["South", "East"]],
"Highgarden": ["A Golden Rose", ["North"]],
"Sunspear": ["A Viper", ["North", "East"]],
"Great Pyramid Meereen": ["Drogon the Dragon", ["West"]],
"Dragonstone": ["Dragon Glass", ["North", "West"]],
"Pyke": ["The Iron Fleet", ["East"]],
"The Twins": ["A Letter of Passage", ["North", "South", "East", "West"]],
"The Eyrie": ["A Falcon", ["South", "West"]],
"The Dreadfort": ["Lord Bolton's Army", ["West"]],
"Winterfell": ["Ghost the Dyer Wolf", ["South", "East", "West"]]
}
inventory = []
current_city = "King's Landing"
while True:
if current_city == "Castle Black":
print("You have been defeated by the Night King! The Realm is doomed!")
print("Lord Commander, you are currently in", current_city, ".")
city_item = cities[current_city][0]
print("The current room has", city_item)
if city_item != None:
option = input("Do you want collect " + city_item + "? (Y/N): ") .upper()
if option in ['Y', 'YES']:
inventory.append(city_item)
cities[current_city][0] = None
print("Collected items: ", inventory)
if len(inventory) == 10:
print("\nCONGRATULATIONS!")
print("You have collected all the items and have defeated the Night King!\n")
break
direction = input("Which direction do you want to go? (North, South, East, West): ")
while direction not in cities[current_city][1]:
print("You cannot go that way from " + current_city + ". Please try another direction.")
direction = input("Which direction do you want to go? (North, South, East, West): ")
if current_city == "King's Landing":
if direction == "North":
next_city = "The Twins"
elif direction == "South":
next_city = "Sunspear"
elif direction == "East":
next_city = "Dragonstone"
else:
next_city = "Casterly Rock"
elif current_city == "The Twins":
if direction == "North":
next_city = "Winterfell"
elif direction == "South":
next_city = "King's Landing"
elif direction == "East":
next_city = "The Eyrie"
else:
next_city = "Pyke"
elif current_city == "Sunspear":
if direction == "North":
next_city = "King's Landing"
else:
next_city = "Great Pyramid Meereen"
elif current_city == "Great Pyramid Meereen":
next_city = "Sunspear"
elif current_city == "Casterly Rock":
if direction == "South":
next_city = "Highgarden"
else:
next_city = "King's Landing"
elif current_city == "Highgarden":
next_city = "Casterly Rock"
elif current_city == "Dragonstone":
if direction == "North":
next_city = "The Eyrie"
else:
next_city = "King's Landing"
elif current_city == "The Eyrie":
if direction == "South":
next_city = "Dragonstone"
else:
next_city = "The Twins"
elif current_city == "Pyke":
next_city = "The Twins"
elif current_city == "Winterfell":
if direction == "South":
next_city = "The Twins"
elif direction == "East":
next_city = "The Dreadfort"
else:
next_city = "Castle Black"
elif current_city == "The Dreadfort":
next_city = "Winterfell"
current_city = next_city
print("My Lord, you have moved to", current_city, ".\n")
print("\nThank you for saving the Realm!")
When I change the argument from 0 to 1 and start the game over
city_item = cities[current_city][1]
I get the following error:
Traceback (most recent call last):
File "C:\Users\............................", line 42, in <module>
option = input("Do you want collect " + city_item + "? (Y/N): ") .upper()
TypeError: can only concatenate str (not "list") to str
I am unsure which way I should go from here.
A:
After the first condition in the loop, add a break or wrap the rest in an else. You're trying to access cities["Castle Black"], hence the KeyError.
|
I believe I have a Function Argument Issue
|
For the most part all of my code seems to be working fine. The code is a text-based game. When I collect all of the items it fuctions correctly without errors. But if I go to directly to Caslte Black without collecting all the items it finishes with the proper message, but I get the following error:
Traceback (most recent call last):
File "C:\Users....................", line 38, in <module>
city_item = cities[current_city][0]
KeyError: 'Castle Black'
Here is my code.
print("Welcome to the GOT: Winter is Coming Text Based Game")
player_name = input("What is your name Lord Commander? ")
print("\nIntroduction:")
print(" Sir " + player_name + ", as Lord Commander of the Kingsguard")
print(" you are to visit 10 cities across Westeros and Essos, and you MUST")
print(" collect the item from each city to win the game and defeat the Night King.")
print(" Once you collect all the items you automatically win the game defeating the Night King.")
print(" If you confront the Night King without all 10 items, you will perish and")
print(" all of Westeros and Essos will be doomed!")
print("\nGame Play Directions:")
print(" To move in a direction you will be prompted to chose either North, South, East or West")
print(" To collect an item, you will be prompted to enter 'Y' for 'YES' or 'N' for 'NO'.")
print("\nYou are now ready to begin your quest Lord Commander " + player_name + "!\n\n\n")
cities = {"King's Landing": [None, ["North", "South", "East", "West"]],
"Casterly Rock": ["The Oathkeeper Sword", ["South", "East"]],
"Highgarden": ["A Golden Rose", ["North"]],
"Sunspear": ["A Viper", ["North", "East"]],
"Great Pyramid Meereen": ["Drogon the Dragon", ["West"]],
"Dragonstone": ["Dragon Glass", ["North", "West"]],
"Pyke": ["The Iron Fleet", ["East"]],
"The Twins": ["A Letter of Passage", ["North", "South", "East", "West"]],
"The Eyrie": ["A Falcon", ["South", "West"]],
"The Dreadfort": ["Lord Bolton's Army", ["West"]],
"Winterfell": ["Ghost the Dyer Wolf", ["South", "East", "West"]]
}
inventory = []
current_city = "King's Landing"
while True:
if current_city == "Castle Black":
print("You have been defeated by the Night King! The Realm is doomed!")
print("Lord Commander, you are currently in", current_city, ".")
city_item = cities[current_city][0]
print("The current room has", city_item)
if city_item != None:
option = input("Do you want collect " + city_item + "? (Y/N): ") .upper()
if option in ['Y', 'YES']:
inventory.append(city_item)
cities[current_city][0] = None
print("Collected items: ", inventory)
if len(inventory) == 10:
print("\nCONGRATULATIONS!")
print("You have collected all the items and have defeated the Night King!\n")
break
direction = input("Which direction do you want to go? (North, South, East, West): ")
while direction not in cities[current_city][1]:
print("You cannot go that way from " + current_city + ". Please try another direction.")
direction = input("Which direction do you want to go? (North, South, East, West): ")
if current_city == "King's Landing":
if direction == "North":
next_city = "The Twins"
elif direction == "South":
next_city = "Sunspear"
elif direction == "East":
next_city = "Dragonstone"
else:
next_city = "Casterly Rock"
elif current_city == "The Twins":
if direction == "North":
next_city = "Winterfell"
elif direction == "South":
next_city = "King's Landing"
elif direction == "East":
next_city = "The Eyrie"
else:
next_city = "Pyke"
elif current_city == "Sunspear":
if direction == "North":
next_city = "King's Landing"
else:
next_city = "Great Pyramid Meereen"
elif current_city == "Great Pyramid Meereen":
next_city = "Sunspear"
elif current_city == "Casterly Rock":
if direction == "South":
next_city = "Highgarden"
else:
next_city = "King's Landing"
elif current_city == "Highgarden":
next_city = "Casterly Rock"
elif current_city == "Dragonstone":
if direction == "North":
next_city = "The Eyrie"
else:
next_city = "King's Landing"
elif current_city == "The Eyrie":
if direction == "South":
next_city = "Dragonstone"
else:
next_city = "The Twins"
elif current_city == "Pyke":
next_city = "The Twins"
elif current_city == "Winterfell":
if direction == "South":
next_city = "The Twins"
elif direction == "East":
next_city = "The Dreadfort"
else:
next_city = "Castle Black"
elif current_city == "The Dreadfort":
next_city = "Winterfell"
current_city = next_city
print("My Lord, you have moved to", current_city, ".\n")
print("\nThank you for saving the Realm!")
When I change the argument from 0 to 1 and start the game over
city_item = cities[current_city][1]
I get the following error:
Traceback (most recent call last):
File "C:\Users\............................", line 42, in <module>
option = input("Do you want collect " + city_item + "? (Y/N): ") .upper()
TypeError: can only concatenate str (not "list") to str
I am unsure which way I should go from here.
|
[
"After the first condition in the loop, add a break or wrap the rest in an else. You're trying to access cities[\"Castle Black\"], hence the KeyError.\n"
] |
[
1
] |
[] |
[] |
[
"python"
] |
stackoverflow_0074595719_python.txt
|
Q:
Is there a way to iterate through certain types of objects on a canvas in python tkinter?
For example, I've got a bunch of text objects of varying font families on a canvas, can I make some kind of call to iterate through these text objects and alter them?
A:
You can get a list of all items on a canvas with the find_all() method and then just list them:
def get_canvas_items(canvas):
item_list = canvas.find_all()
for item in item_list:
item_type = canvas.type(item) # e.g. "text", "line", etc.
item_keys = canvas.itemconfig(item).keys() # item options
# Do stuff...
Have a look at effbot: The Tkinter Canvas Widget for additional info on canvas.
|
Is there a way to iterate through certain types of objects on a canvas in python tkinter?
|
For example, I've got a bunch of text objects of varying font families on a canvas, can I make some kind of call to iterate through these text objects and alter them?
|
[
"You can get a list of all items on a canvas with the find_all() method and then just list them:\ndef get_canvas_items(canvas):\n item_list = canvas.find_all()\n for item in item_list:\n item_type = canvas.type(item) # e.g. \"text\", \"line\", etc.\n item_keys = canvas.itemconfig(item).keys() # item options\n # Do stuff...\n\nHave a look at effbot: The Tkinter Canvas Widget for additional info on canvas.\n"
] |
[
2
] |
[] |
[] |
[
"python",
"tkinter"
] |
stackoverflow_0074595086_python_tkinter.txt
|
Q:
Merge Sort in reverse
i want to sort the array in descending order using mergeSort
this is the code for my MergeSort implementation in ascending order
def MergeSort(B):
if len(B) <= 1:
return
mid = len(B)//2
half1 = B[:mid]
half2 = B[mid:]
reverseSort(half1)
reverseSort(half2)
merge(half1,half2,B)
def merge(half1, half2, B):
i=0; j1=0; j2=0
while j1<len(half1) and j2<len(half2):
if half1[j1] < half2[j2]:
B[i] = half1[j1]
j1 += 1; i += 1
else:
B[i] = half2[j2]
j2 += 1; i += 1
while j1 > len(half1):
B[i] = half1[j1]
j1 += 1; i += 1
while j2 > len(half2):
B[i] = half2[j2]
j2 += 1; i += 1
i have tried changing the following if statement to this
if half1[j1] < half2[j2]:
but the output is incorrect
essentially i want the following
B=["cat", "ca", "can", "cat", "cat"]
MergeSortReverse(B)
print(B) #["cat","cat","cat","can","ca"]
|
Merge Sort in reverse
|
i want to sort the array in descending order using mergeSort
this is the code for my MergeSort implementation in ascending order
def MergeSort(B):
if len(B) <= 1:
return
mid = len(B)//2
half1 = B[:mid]
half2 = B[mid:]
reverseSort(half1)
reverseSort(half2)
merge(half1,half2,B)
def merge(half1, half2, B):
i=0; j1=0; j2=0
while j1<len(half1) and j2<len(half2):
if half1[j1] < half2[j2]:
B[i] = half1[j1]
j1 += 1; i += 1
else:
B[i] = half2[j2]
j2 += 1; i += 1
while j1 > len(half1):
B[i] = half1[j1]
j1 += 1; i += 1
while j2 > len(half2):
B[i] = half2[j2]
j2 += 1; i += 1
i have tried changing the following if statement to this
if half1[j1] < half2[j2]:
but the output is incorrect
essentially i want the following
B=["cat", "ca", "can", "cat", "cat"]
MergeSortReverse(B)
print(B) #["cat","cat","cat","can","ca"]
|
[] |
[] |
[
"def merge_sort_descending(arr):\n if len(arr) > 1:\n mid = len(arr) // 2\n left = arr[:mid]\n right = arr[mid:]\n merge_sort_descending(left)\n merge_sort_descending(right)\n i = j = k = 0\n while i < len(left) and j < len(right):\n if left[i] > right[j]:\n arr[k] = left[i]\n i += 1\n else:\n arr[k] = right[j]\n j += 1\n k += 1\n while i < len(left):\n arr[k] = left[i]\n i += 1\n k += 1\n while j < len(right):\n arr[k] = right[j]\n j += 1\n k += 1\n\nB=[\"cat\", \"ca\", \"can\", \"cat\", \"cat\"]\n['cat', 'cat', 'cat', 'can', 'ca']\n\n"
] |
[
-1
] |
[
"python"
] |
stackoverflow_0074595754_python.txt
|
Q:
How to Create Different Colors for Tiles
I am relatively new to pygame, and I am trying to create a matching card game. I am currently stuck on how to replace the colors of the tiles. I currently have a list of colors for my tiles, but I do not know how I would implement each of these colors into my tiles. For example, tile 1 would be blue, tile 2 would be green, so on and so forth.
Code for implementing board/tiles
creation of board/random tiles
I tried using a for loop to access each color, and then assign it to a variable, but I realized that this would not be the right way to go about it. I've also tried to create a for loop inside of a list, in my sqr_color, variable but this does not work either since I would have to access each color again inside the list. I also know that my self.assigned is working, since I can access specific colors.
A:
I would create a data structure to hold the various pieces of information about a tile. It could be something as simple as a list:
tile1 = [ ( 255, 0, 0 ), 10, 10, 100, 100 ]
tile2 = [ ( 0, 255, 0 ), 120, 10, 100, 100 ]
Where each tile is a python list of [ colour, x, y, width, height ].
Alternatively, you could create a class for each tile, and store the same thing:
class Tile():
def __init__( self, colour, x, y, width=100, height=100 ):
self.colour = colour
self.x = x
self.y = y
self.width = width
self.height = height
And then
tile1 = Tile( ( 255, 0, 0 ), 10, 10 )
tile2 = Tile( ( 0, 255, 0 ), 120, 10 )
An easy way of processing all these tiles, is to put them into a list themselves:
all_tiles = [ tile1, tile2, ... ]
So building on this theme, to create a set of tiles where the colours come from a list:
all_tiles = []
color_index = 0
for i in range( 10 ):
next_color = self.color[color_index]
new_tile = Tile( next_color, 10+(i*110), 10 ) # create a tile
all_tiles.append( new_tile ) # add to tile-list
if ( color_index >= len( self.color ) ):
color_index = 0 # re-start from first colour again
else:
color_index += 1 # move to the next colour in list
So now you would have a set of 10 tiles in the list all_tiles, where the tile colours come from self.colors. Note that if we run out of colours (the len() check), it starts from the first colour in the list again.
|
How to Create Different Colors for Tiles
|
I am relatively new to pygame, and I am trying to create a matching card game. I am currently stuck on how to replace the colors of the tiles. I currently have a list of colors for my tiles, but I do not know how I would implement each of these colors into my tiles. For example, tile 1 would be blue, tile 2 would be green, so on and so forth.
Code for implementing board/tiles
creation of board/random tiles
I tried using a for loop to access each color, and then assign it to a variable, but I realized that this would not be the right way to go about it. I've also tried to create a for loop inside of a list, in my sqr_color, variable but this does not work either since I would have to access each color again inside the list. I also know that my self.assigned is working, since I can access specific colors.
|
[
"I would create a data structure to hold the various pieces of information about a tile. It could be something as simple as a list:\ntile1 = [ ( 255, 0, 0 ), 10, 10, 100, 100 ]\ntile2 = [ ( 0, 255, 0 ), 120, 10, 100, 100 ]\n\nWhere each tile is a python list of [ colour, x, y, width, height ].\nAlternatively, you could create a class for each tile, and store the same thing:\nclass Tile():\n def __init__( self, colour, x, y, width=100, height=100 ):\n self.colour = colour\n self.x = x\n self.y = y \n self.width = width\n self.height = height\n\nAnd then\ntile1 = Tile( ( 255, 0, 0 ), 10, 10 )\ntile2 = Tile( ( 0, 255, 0 ), 120, 10 )\n\nAn easy way of processing all these tiles, is to put them into a list themselves:\nall_tiles = [ tile1, tile2, ... ]\n\nSo building on this theme, to create a set of tiles where the colours come from a list:\nall_tiles = []\ncolor_index = 0\nfor i in range( 10 ):\n next_color = self.color[color_index]\n new_tile = Tile( next_color, 10+(i*110), 10 ) # create a tile\n all_tiles.append( new_tile ) # add to tile-list\n\n if ( color_index >= len( self.color ) ):\n color_index = 0 # re-start from first colour again\n else:\n color_index += 1 # move to the next colour in list\n\nSo now you would have a set of 10 tiles in the list all_tiles, where the tile colours come from self.colors. Note that if we run out of colours (the len() check), it starts from the first colour in the list again.\n"
] |
[
0
] |
[] |
[] |
[
"python"
] |
stackoverflow_0074595313_python.txt
|
Q:
How to get sorted list inside a dictionary with json.dumps()
I have the following problem: having a python dictionary like the following:
{"qqq": [{"bbb": "111"}, {"aaa": "333"}], "zzz": {"bbb": [5, 2, 1, 9]}}
I would like to obtain an ordered json object such as:
'{"qqq": [{"aaa": "333"}, {"bbb": "111"}], "zzz": {"bbb": [1, 2, 5, 9]}}'
At the moment I use the following:
class ListEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, list):
return sorted(o)
return json.JSONEncoder.default(self, o)
print json.dumps(c, sort_keys=True, cls=ListEncoder)
But the two list inside my object are not sorted, and I get:
'{"qqq": [{"bbb": "111"}, {"aaa": "333"}], "zzz": {"bbb": [5, 2, 1, 9]}}'
probably because the custom JSONEncoder skips a type that already knows how to manage (list).
UPDATE
Martijn solution below works perfectly for the example above, but unfortunately I have to manage more complicated dictionaries, with a bigger depth: for example the following two
a = {
'aaa': 'aaa',
'op': 'ccc',
'oppa': {
'ggg': [{'fff': 'ev'}],
'flt': {
'nnn': [
{
'mmm': [{'a_b_d': [6]},{'a_b_c': [6,7]}]
},
{
'iii': [3, 2, 4, 5]
}
]
}
},
'rrr': {},
'ttt': ['aaa-bbb-ccc']
}
b = {
'aaa': 'aaa',
'op': 'ccc',
'oppa': {
'ggg': [{'fff': 'ev'}],
'flt': {
'nnn': [
{
'iii': [2, 3, 4, 5]
},
{
'mmm': [{'a_b_c': [6,7]},{'a_b_d': [6]}]
}
]
}
},
'rrr': {},
'ttt': ['aaa-bbb-ccc']
}
They would be the same if the lists inside the same would be sorted.
But they aren't with the class above, and I get 2 different json strings:
{"aaa": "aaa", "op": "ccc", "oppa": {"flt": {"nnn": [{"iii": [3, 2, 4, 1]}, {"mmm": [{"a_b_d": [6]}, {"a_b_c": [6, 7]}]}]}, "ggg": [{"fff": "ev"}]}, "rrr": {}, "ttt": ["aaa-bbb-ccc"]}
{"aaa": "aaa", "op": "ccc", "oppa": {"flt": {"nnn": [{"iii": [2, 3, 4, 5]}, {"mmm": [{"a_b_c": [6, 7]}, {"a_b_d": [6]}]}]}, "ggg": [{"fff": "ev"}]}, "rrr": {}, "ttt": ["aaa-bbb-ccc"]}
Any idea to fix this?
A:
default isn't called for lists; that method is only for types the encoder doesn't know how to handle. Override the encode method instead:
class SortedListEncoder(json.JSONEncoder):
def encode(self, obj):
def sort_lists(item):
if isinstance(item, list):
return sorted(sort_lists(i) for i in item)
elif isinstance(item, dict):
return {k: sort_lists(v) for k, v in item.items()}
else:
return item
return super(SortedListEncoder, self).encode(sort_lists(obj))
This essentially just sorts all lists (recursively) before encoding; this could have been done before passing it to json.dumps() but this way it is part of the responsibility of the encoder, just like sorting the keys is.
Demo:
>>> json.dumps(c, sort_keys=True, cls=SortedListEncoder)
'{"qqq": [{"aaa": "333"}, {"bbb": "111"}], "zzz": {"bbb": [1, 2, 5, 9]}}'
>>> json.dumps(a, sort_keys=True, cls=SortedListEncoder)
'{"aaa": "aaa", "op": "ccc", "oppa": {"flt": {"nnn": [{"iii": [2, 3, 4, 5]}, {"mmm": [{"a_b_c": [6, 7]}, {"a_b_d": [6]}]}]}, "ggg": [{"fff": "ev"}]}, "rrr": {}, "ttt": ["aaa-bbb-ccc"]}'
>>> json.dumps(b, sort_keys=True, cls=SortedListEncoder)
'{"aaa": "aaa", "op": "ccc", "oppa": {"flt": {"nnn": [{"iii": [2, 3, 4, 5]}, {"mmm": [{"a_b_c": [6, 7]}, {"a_b_d": [6]}]}]}, "ggg": [{"fff": "ev"}]}, "rrr": {}, "ttt": ["aaa-bbb-ccc"]}'
A:
The updated question should probably have been a new question but my solution for the update was to extend the accepted answer to add a more complex key to the list sort:
class SortedListEncoder(json.JSONEncoder):
def encode(self, obj):
def get_key(item):
if isinstance(item, dict):
return get_key(sorted(item.keys()))
else:
return str(item)
def sort_lists(item):
if isinstance(item, list):
return sorted((sort_lists(i) for i in item), key=lambda nm: get_key(nm))
elif isinstance(item, dict):
return {k: sort_lists(v) for k, v in item.items()}
else:
return item
return super(SortedListEncoder, self).encode(sort_lists(obj))
which allows dicts to be compared based on the sorted list of keys.
This is not a complete ordering of the object but it returns the same ordering for both of your test cases (and mine):
{"aaa": "aaa", "op": "ccc", "oppa": {"flt": {"nnn": [{"iii": [2, 3, 4, 5]}, {"mmm": [{"a_b_c": [6, 7]}, {"a_b_d": [6]}]}]}, "ggg": [{"fff": "ev"}]}, "rrr": {}, "ttt": ["aaa-bbb-ccc"]}
It cant cover the ordering of a list containing dicts that have the same "first" key but different values ie:
a=[{"bb": ["aa", "dd"]}, {"bb": ["cc", "dd"]}]
b=[{"bb": ["dd", "cc"]}, {"bb": ["dd", "aa"]}]
produces sorted sublists but leaves the dictionary order unaltered:
[{"bb": ["aa", "dd"]}, {"bb": ["cc", "dd"]}]
[{"bb": ["cc", "dd"]}, {"bb": ["aa", "dd"]}]
|
How to get sorted list inside a dictionary with json.dumps()
|
I have the following problem: having a python dictionary like the following:
{"qqq": [{"bbb": "111"}, {"aaa": "333"}], "zzz": {"bbb": [5, 2, 1, 9]}}
I would like to obtain an ordered json object such as:
'{"qqq": [{"aaa": "333"}, {"bbb": "111"}], "zzz": {"bbb": [1, 2, 5, 9]}}'
At the moment I use the following:
class ListEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, list):
return sorted(o)
return json.JSONEncoder.default(self, o)
print json.dumps(c, sort_keys=True, cls=ListEncoder)
But the two list inside my object are not sorted, and I get:
'{"qqq": [{"bbb": "111"}, {"aaa": "333"}], "zzz": {"bbb": [5, 2, 1, 9]}}'
probably because the custom JSONEncoder skips a type that already knows how to manage (list).
UPDATE
Martijn solution below works perfectly for the example above, but unfortunately I have to manage more complicated dictionaries, with a bigger depth: for example the following two
a = {
'aaa': 'aaa',
'op': 'ccc',
'oppa': {
'ggg': [{'fff': 'ev'}],
'flt': {
'nnn': [
{
'mmm': [{'a_b_d': [6]},{'a_b_c': [6,7]}]
},
{
'iii': [3, 2, 4, 5]
}
]
}
},
'rrr': {},
'ttt': ['aaa-bbb-ccc']
}
b = {
'aaa': 'aaa',
'op': 'ccc',
'oppa': {
'ggg': [{'fff': 'ev'}],
'flt': {
'nnn': [
{
'iii': [2, 3, 4, 5]
},
{
'mmm': [{'a_b_c': [6,7]},{'a_b_d': [6]}]
}
]
}
},
'rrr': {},
'ttt': ['aaa-bbb-ccc']
}
They would be the same if the lists inside the same would be sorted.
But they aren't with the class above, and I get 2 different json strings:
{"aaa": "aaa", "op": "ccc", "oppa": {"flt": {"nnn": [{"iii": [3, 2, 4, 1]}, {"mmm": [{"a_b_d": [6]}, {"a_b_c": [6, 7]}]}]}, "ggg": [{"fff": "ev"}]}, "rrr": {}, "ttt": ["aaa-bbb-ccc"]}
{"aaa": "aaa", "op": "ccc", "oppa": {"flt": {"nnn": [{"iii": [2, 3, 4, 5]}, {"mmm": [{"a_b_c": [6, 7]}, {"a_b_d": [6]}]}]}, "ggg": [{"fff": "ev"}]}, "rrr": {}, "ttt": ["aaa-bbb-ccc"]}
Any idea to fix this?
|
[
"default isn't called for lists; that method is only for types the encoder doesn't know how to handle. Override the encode method instead:\nclass SortedListEncoder(json.JSONEncoder):\n def encode(self, obj):\n def sort_lists(item):\n if isinstance(item, list):\n return sorted(sort_lists(i) for i in item)\n elif isinstance(item, dict):\n return {k: sort_lists(v) for k, v in item.items()}\n else:\n return item\n return super(SortedListEncoder, self).encode(sort_lists(obj))\n\nThis essentially just sorts all lists (recursively) before encoding; this could have been done before passing it to json.dumps() but this way it is part of the responsibility of the encoder, just like sorting the keys is.\nDemo:\n>>> json.dumps(c, sort_keys=True, cls=SortedListEncoder)\n'{\"qqq\": [{\"aaa\": \"333\"}, {\"bbb\": \"111\"}], \"zzz\": {\"bbb\": [1, 2, 5, 9]}}'\n>>> json.dumps(a, sort_keys=True, cls=SortedListEncoder)\n'{\"aaa\": \"aaa\", \"op\": \"ccc\", \"oppa\": {\"flt\": {\"nnn\": [{\"iii\": [2, 3, 4, 5]}, {\"mmm\": [{\"a_b_c\": [6, 7]}, {\"a_b_d\": [6]}]}]}, \"ggg\": [{\"fff\": \"ev\"}]}, \"rrr\": {}, \"ttt\": [\"aaa-bbb-ccc\"]}'\n>>> json.dumps(b, sort_keys=True, cls=SortedListEncoder)\n'{\"aaa\": \"aaa\", \"op\": \"ccc\", \"oppa\": {\"flt\": {\"nnn\": [{\"iii\": [2, 3, 4, 5]}, {\"mmm\": [{\"a_b_c\": [6, 7]}, {\"a_b_d\": [6]}]}]}, \"ggg\": [{\"fff\": \"ev\"}]}, \"rrr\": {}, \"ttt\": [\"aaa-bbb-ccc\"]}'\n\n",
"The updated question should probably have been a new question but my solution for the update was to extend the accepted answer to add a more complex key to the list sort:\nclass SortedListEncoder(json.JSONEncoder):\n def encode(self, obj):\n def get_key(item):\n if isinstance(item, dict):\n return get_key(sorted(item.keys()))\n else:\n return str(item)\n def sort_lists(item):\n if isinstance(item, list):\n return sorted((sort_lists(i) for i in item), key=lambda nm: get_key(nm))\n elif isinstance(item, dict):\n return {k: sort_lists(v) for k, v in item.items()}\n else:\n return item\n return super(SortedListEncoder, self).encode(sort_lists(obj))\n\nwhich allows dicts to be compared based on the sorted list of keys.\nThis is not a complete ordering of the object but it returns the same ordering for both of your test cases (and mine):\n{\"aaa\": \"aaa\", \"op\": \"ccc\", \"oppa\": {\"flt\": {\"nnn\": [{\"iii\": [2, 3, 4, 5]}, {\"mmm\": [{\"a_b_c\": [6, 7]}, {\"a_b_d\": [6]}]}]}, \"ggg\": [{\"fff\": \"ev\"}]}, \"rrr\": {}, \"ttt\": [\"aaa-bbb-ccc\"]}\n\nIt cant cover the ordering of a list containing dicts that have the same \"first\" key but different values ie:\na=[{\"bb\": [\"aa\", \"dd\"]}, {\"bb\": [\"cc\", \"dd\"]}]\nb=[{\"bb\": [\"dd\", \"cc\"]}, {\"bb\": [\"dd\", \"aa\"]}]\n\nproduces sorted sublists but leaves the dictionary order unaltered:\n[{\"bb\": [\"aa\", \"dd\"]}, {\"bb\": [\"cc\", \"dd\"]}]\n[{\"bb\": [\"cc\", \"dd\"]}, {\"bb\": [\"aa\", \"dd\"]}]\n\n"
] |
[
8,
0
] |
[
"I leave this here because i ran into the same issue.\nYou can use this function to sort your nested data structures:\ndef sort_data(data):\n if isinstance(data, dict):\n output = OrderedDict()\n for key, value in data.items():\n output[key] = sort_data(value)\n return output\n elif isinstance(data, list):\n calculated = [sort_data(x) for x in data]\n return sorted(calculated, key=str)\n elif isinstance(data, (int, bool, str, float, type(None))):\n return data\n else:\n raise Exception(\"Unkown type: {} for {}\".format(type(data), data))\n\nExample:\ndata = {\"b\":[ \"zzz\", \"yyy\", \"xxx\"],\n \"d\": [42, 54, 675, \"aaa\"],\n \"c\": {\"a\": [\"bbb\", \"ccc\", \"aaa\"]},\n }\n\nsorted_data = sort_data(data)\nprint(json.dumps(sorted_data, indent=2, sort_keys=True))\n\n# prints:\n#{\n# \"b\": [\n# \"xxx\",\n# \"yyy\",\n# \"zzz\"\n# ],\n# \"c\": {\n# \"a\": [\n# \"aaa\",\n# \"bbb\",\n# \"ccc\"\n# ]\n# },\n# \"d\": [\n# 42,\n# 54,\n# 675,\n# \"aaa\"\n# ]\n#}\n\n\n"
] |
[
-1
] |
[
"dictionary",
"json",
"list",
"python"
] |
stackoverflow_0024076832_dictionary_json_list_python.txt
|
Q:
How do I protect Python code from being read by users?
I am developing a piece of software in Python that will be distributed to my employer's customers. My employer wants to limit the usage of the software with a time-restricted license file.
If we distribute the .py files or even .pyc files it will be easy to (decompile and) remove the code that checks the license file.
Another aspect is that my employer does not want the code to be read by our customers, fearing that the code may be stolen or at least the "novel ideas".
Is there a good way to handle this problem?
A:
"Is there a good way to handle this problem?" No. Nothing can be protected against reverse engineering. Even the firmware on DVD machines has been reverse engineered and the AACS Encryption key exposed. And that's in spite of the DMCA making that a criminal offense.
Since no technical method can stop your customers from reading your code, you have to apply ordinary commercial methods.
Licenses. Contracts. Terms and Conditions. This still works even when people can read the code. Note that some of your Python-based components may require that you pay fees before you sell software using those components. Also, some open-source licenses prohibit you from concealing the source or origins of that component.
Offer significant value. If your stuff is so good -- at a price that is hard to refuse -- there's no incentive to waste time and money reverse engineering anything. Reverse engineering is expensive. Make your product slightly less expensive.
Offer upgrades and enhancements that make any reverse engineering a bad idea. When the next release breaks their reverse engineering, there's no point. This can be carried to absurd extremes, but you should offer new features that make the next release more valuable than reverse engineering.
Offer customization at rates so attractive that they'd rather pay you to build and support the enhancements.
Use a license key which expires. This is cruel, and will give you a bad reputation, but it certainly makes your software stop working.
Offer it as a web service. SaaS involves no downloads to customers.
A:
Python, being a byte-code-compiled interpreted language, is very difficult to lock down. Even if you use a exe-packager like py2exe, the layout of the executable is well-known, and the Python byte-codes are well understood.
Usually in cases like this, you have to make a tradeoff. How important is it really to protect the code? Are there real secrets in there (such as a key for symmetric encryption of bank transfers), or are you just being paranoid? Choose the language that lets you develop the best product quickest, and be realistic about how valuable your novel ideas are.
If you decide you really need to enforce the license check securely, write it as a small C extension so that the license check code can be extra-hard (but not impossible!) to reverse engineer, and leave the bulk of your code in Python.
A:
Python is not the tool you need
You must use the right tool to do the right thing, and Python was not designed to be obfuscated. It's the contrary; everything is open or easy to reveal or modify in Python because that's the language's philosophy.
If you want something you can't see through, look for another tool. This is not a bad thing, it is important that several different tools exist for different usages.
Obfuscation is really hard
Even compiled programs can be reverse-engineered so don't think that you can fully protect any code. You can analyze obfuscated PHP, break the flash encryption key, etc. Newer versions of Windows are cracked every time.
Having a legal requirement is a good way to go
You cannot prevent somebody from misusing your code, but you can easily discover if someone does. Therefore, it's just a casual legal issue.
Code protection is overrated
Nowadays, business models tend to go for selling services instead of products. You cannot copy a service, pirate nor steal it. Maybe it's time to consider to go with the flow...
A:
Compile python and distribute binaries!
Sensible idea:
Use Cython, Nuitka, Shed Skin or something similar to compile python to C code, then distribute your app as python binary libraries (pyd) instead.
That way, no Python (byte) code is left and you've done any reasonable amount of obscurification anyone (i.e. your employer) could expect from regular Code, I think. (.NET or Java less safe than this case, as that bytecode is not obfuscated and can relatively easily be decompiled into reasonable source.)
Cython is getting more and more compatible with CPython, so I think it should work. (I'm actually considering this for our product.. We're already building some thirdparty libs as pyd/dlls, so shipping our own python code as binaries is not a overly big step for us.)
See This Blog Post (not by me) for a tutorial on how to do it. (thx @hithwen)
Crazy idea:
You could probably get Cython to store the C-files separately for each module, then just concatenate them all and build them with heavy inlining. That way, your Python module is pretty monolithic and difficult to chip at with common tools.
Beyond crazy:
You might be able to build a single executable if you can link to (and optimize with) the python runtime and all libraries (dlls) statically. That way, it'd sure be difficult to intercept calls to/from python and whatever framework libraries you use. This cannot be done if you're using LGPL code though.
A:
I understand that you want your customers to use the power of python but do not want expose the source code.
Here are my suggestions:
(a) Write the critical pieces of the code as C or C++ libraries and then use SIP or swig to expose the C/C++ APIs to Python namespace.
(b) Use cython instead of Python
(c) In both (a) and (b), it should be possible to distribute the libraries as licensed binary with a Python interface.
A:
Have you had a look at pyminifier? It does Minify, obfuscate, and compress Python code. The example code looks pretty nasty for casual reverse engineering.
$ pyminifier --nonlatin --replacement-length=50 /tmp/tumult.py
#!/usr/bin/env python3
ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲמּ=ImportError
ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ燱=print
ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ=False
ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ澨=object
try:
import demiurgic
except ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲמּ:
ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ燱("Warning: You're not demiurgic. Actually, I think that's normal.")
try:
import mystificate
except ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲמּ:
ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ燱("Warning: Dark voodoo may be unreliable.")
ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲﺬ=ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ
class ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ(ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ澨):
def __init__(self,*args,**kwargs):
pass
def ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ클(self,dactyl):
ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ퐐=demiurgic.palpitation(dactyl)
ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ=mystificate.dark_voodoo(ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ퐐)
return ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ
def ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ(self,whatever):
ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ燱(whatever)
if __name__=="__main__":
ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ燱("Forming...")
ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲﺃ=ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ("epicaricacy","perseverate")
ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲﺃ.ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ("Codswallop")
# Created by pyminifier (https://github.com/liftoff/pyminifier)
A:
Is your employer aware that he can "steal" back any ideas that other people get from your code? I mean, if they can read your work, so can you theirs. Maybe looking at how you can benefit from the situation would yield a better return of your investment than fearing how much you could lose.
[EDIT] Answer to Nick's comment:
Nothing gained and nothing lost. The customer has what he wants (and paid for it since he did the change himself). Since he doesn't release the change, it's as if it didn't happen for everyone else.
Now if the customer sells the software, they have to change the copyright notice (which is illegal, so you can sue and will win -> simple case).
If they don't change the copyright notice, the 2nd level customers will notice that the software comes from you original and wonder what is going on. Chances are that they will contact you and so you will learn about the reselling of your work.
Again we have two cases: The original customer sold only a few copies. That means they didn't make much money anyway, so why bother. Or they sold in volume. That means better chances for you to learn about what they do and do something about it.
But in the end, most companies try to comply to the law (once their reputation is ruined, it's much harder to do business). So they will not steal your work but work with you to improve it. So if you include the source (with a license that protects you from simple reselling), chances are that they will simply push back changes they made since that will make sure the change is in the next version and they don't have to maintain it. That's win-win: You get changes and they can make the change themselves if they really, desperately need it even if you're unwilling to include it in the official release.
A:
Use Cython. It will compile your modules to high-performant C files, which can then be compiled to native binary libraries. This is basically un-reversable, compared to .pyc bytecode!
I've written a detailed article on how to set up Cython for a Python project, check it out:
Protecting Python Sources With Cython
A:
Do not rely on obfuscation. As You have correctly concluded, it offers very limited protection.
UPDATE: Here is a link to paper which reverse engineered obfuscated python code in Dropbox. The approach - opcode remapping is a good barrier, but clearly it can be defeated.
Instead, as many posters have mentioned make it:
Not worth reverse engineering time (Your software is so good, it makes sense to pay)
Make them sign a contract and do a license audit if feasible.
Alternatively, as the kick-ass Python IDE WingIDE does: Give away the code. That's right, give the code away and have people come back for upgrades and support.
A:
Shipping .pyc files has its problems - they are not compatible with any other python version than the python version they were created with, which means you must know which python version is running on the systems the product will run on. That's a very limiting factor.
A:
In some circumstances, it may be possible to move (all, or at least a key part) of the software into a web service that your organization hosts.
That way, the license checks can be performed in the safety of your own server room.
A:
Though there's no perfect solution, the following can be done:
Move some critical piece of startup code into a native library.
Enforce the license check in the native library.
If the call to the native code were to be removed, the program wouldn't start anyway. If it's not removed then the license will be enforced.
Though this is not a cross-platform or a pure-Python solution, it will work.
A:
I was surprised in not seeing pyconcrete in any answer. Maybe because it's newer than the question?
It could be exactly what you need(ed).
Instead of obfuscating the code, it encrypts it and decrypts at load time.
From pypi page:
Protect python script work flow
your_script.py import pyconcrete
pyconcrete will hook import module
when your script do import MODULE,
pyconcrete import hook will try to find MODULE.pye first and then
decrypt MODULE.pye via _pyconcrete.pyd and execute decrypted data (as
.pyc content)
encrypt & decrypt secret key record in _pyconcrete.pyd
(like DLL or SO) the secret key would be hide in binary code, can’t
see it directly in HEX view
A:
I think there is one more method to protect your Python code; part of the Obfuscation method. I believe there was a game like Mount and Blade or something that changed and recompiled their own python interpreter (the original interpreter which i believe is open source) and just changed the OP codes in the OP code table to be different then the standard python OP codes.
So the python source is unmodified but the file extensions of the *.pyc files are different and the op codes don't match to the public python.exe interpreter. If you checked the games data files all the data was in Python source format.
All sorts of nasty tricks can be done to mess with immature hackers this way. Stopping a bunch of inexperienced hackers is easy. It's the professional hackers that you will not likely beat. But most companies don't keep pro hackers on staff long I imagine (likely because things get hacked). But immature hackers are all over the place (read as curious IT staff).
You could for example, in a modified interpreter, allow it to check for certain comments or doc strings in your source. You could have special OP codes for such lines of code. For example:
OP 234 is for source line "# Copyright I wrote this"
or compile that line into op codes that are equivalent to "if False:" if "# Copyright" is missing. Basically disabling a whole block of code for what appears to be some obscure reason.
One use case where recompiling a modified interpreter may be feasible is where you didn't write the app, the app is big, but you are paid to protect it, such as when you're a dedicated server admin for a financial app.
I find it a little contradictory to leave the source or opcodes open for eyeballs, but use SSL for network traffic. SSL is not 100% safe either. But it's used to stop MOST eyes from reading it. A wee bit precaution is sensible.
Also, if enough people deem that Python source and opcodes are too visible, it's likely someone will eventually develop at least a simple protection tool for it. So the more people asking "how to protect Python app" only promotes that development.
A:
The reliable only way to protect code is to run it on a server you control and provide your clients with a client which interfaces with that server.
A:
Depending in who the client is, a simple protection mechanism, combined with a sensible license agreement will be far more effective than any complex licensing/encryption/obfuscation system.
The best solution would be selling the code as a service, say by hosting the service, or offering support - although that isn't always practical.
Shipping the code as .pyc files will prevent your protection being foiled by a few #s, but it's hardly effective anti-piracy protection (as if there is such a technology), and at the end of the day, it shouldn't achieve anything that a decent license agreement with the company will.
Concentrate on making your code as nice to use as possible - having happy customers will make your company far more money than preventing some theoretical piracy..
A:
Another attempt to make your code harder to steal is to use jython and then use java obfuscator.
This should work pretty well as jythonc translate python code to java and then java is compiled to bytecode. So ounce you obfuscate the classes it will be really hard to understand what is going on after decompilation, not to mention recovering the actual code.
The only problem with jython is that you can't use python modules written in c.
A:
You should take a look at how the guys at getdropbox.com do it for their client software, including Linux. It's quite tricky to crack and requires some quite creative disassembly to get past the protection mechanisms.
A:
What about signing your code with standard encryption schemes by hashing and signing important files and checking it with public key methods?
In this way you can issue license file with a public key for each customer.
Additional you can use an python obfuscator like this one (just googled it).
A:
The best you can do with Python is to obscure things.
Strip out all docstrings
Distribute only the .pyc compiled files.
freeze it
Obscure your constants inside a class/module so that help(config) doesn't show everything
You may be able to add some additional obscurity by encrypting part of it and decrypting it on the fly and passing it to eval(). But no matter what you do someone can break it.
None of this will stop a determined attacker from disassembling the bytecode or digging through your api with help, dir, etc.
A:
Idea of having time restricted license and check for it in locally installed program will not work. Even with perfect obfuscation, license check can be removed. However if you check license on remote system and run significant part of the program on your closed remote system, you will be able to protect your IP.
Preventing competitors from using the source code as their own or write their inspired version of the same code, one way to protect is to add signatures to your program logic (some secrets to be able to prove that code was stolen from you) and obfuscate the python source code so, it's hard to read and utilize.
Good obfuscation adds basically the same protection to your code, that compiling it to executable (and stripping binary) does. Figuring out how obfuscated complex code works might be even harder than actually writing your own implementation.
This will not help preventing hacking of your program. Even with obfuscation code license stuff will be cracked and program may be modified to have slightly different behaviour (in the same way that compiling code to binary does not help protection of native programs).
In addition to symbol obfuscation might be good idea to unrefactor the code, which makes everything even more confusing if e.g. call graphs points to many different places even if actually those different places does eventually the same thing.
Logical signature inside obfuscated code (e.g. you may create table of values which are used by program logic, but also used as signature), which can be used to determine that code is originated from you. If someone decides to use your obfuscated code module as part of their own product (even after reobfuscating it to make it seem different) you can show, that code is stolen with your secret signature.
A:
I have looked at software protection in general for my own projects and the general philosophy is that complete protection is impossible. The only thing that you can hope to achieve is to add protection to a level that would cost your customer more to bypass than it would to purchase another license.
With that said I was just checking google for python obsfucation and not turning up a lot of anything. In a .Net solution, obsfucation would be a first approach to your problem on a windows platform, but I am not sure if anyone has solutions on Linux that work with Mono.
The next thing would be to write your code in a compiled language, or if you really want to go all the way, then in assembler. A stripped out executable would be a lot harder to decompile than an interpreted language.
It all comes down to tradeoffs. On one end you have ease of software development in python, in which it is also very hard to hide secrets. On the other end you have software written in assembler which is much harder to write, but is much easier to hide secrets.
Your boss has to choose a point somewhere along that continuum that supports his requirements. And then he has to give you the tools and time so you can build what he wants. However my bet is that he will object to real development costs versus potential monetary losses.
A:
It is possible to have the py2exe byte-code in a crypted resource for a C launcher that loads and executes it in memory. Some ideas here and here.
Some have also thought of a self modifying program to make reverse engineering expensive.
You can also find tutorials for preventing debuggers, make the disassembler fail, set false debugger breakpoints and protect your code with checksums. Search for ["crypted code" execute "in memory"] for more links.
But as others already said, if your code is worth it, reverse engineers will succeed in the end.
A:
Use the same way to protect binary file of c/c++, that is, obfuscate each function body in executable or library binary file, insert an instruction "jump" at the begin of each function entry, jump to special function to restore obfuscated code. Byte-code is binary code of Python script, so
First compile python script to code object
Then iterate each code object, obfuscate co_code of each code object as the following
0 JUMP_ABSOLUTE n = 3 + len(bytecode)
3
...
... Here it's obfuscated bytecode
...
n LOAD_GLOBAL ? (__pyarmor__)
n+3 CALL_FUNCTION 0
n+6 POP_TOP
n+7 JUMP_ABSOLUTE 0
Save obfuscated code object as .pyc or .pyo file
Those obfuscated file (.pyc or .pyo) can be used by normal python interpreter, when those code object is called first time
First op is JUMP_ABSOLUTE, it will jump to offset n
At offset n, the instruction is to call a PyCFunction. This function will restore those obfuscated bytecode between offset 3 and n, and put the original byte-code at offset 0. The obfuscated code can be got by the following code
char *obfucated_bytecode;
Py_ssize_t len;
PyFrameObject* frame = PyEval_GetFrame();
PyCodeObject *f_code = frame->f_code;
PyObject *co_code = f_code->co_code;
PyBytes_AsStringAndSize(co_code, &obfucated_bytecode, &len)
After this function returns, the last instruction is to jump to
offset 0. The really byte-code now is executed.
There is a tool Pyarmor to obfuscate python scripts by this way.
A:
There is a comprehensive answer on concealing the python source code, which can be find here.
Possible techniques discussed are:
- use compiled bytecode (python -m compileall)
- executable creators (or installers like PyInstaller)
- software as an service (the best solution to conceal your code in my opinion)
- python source code obfuscators
A:
Neiher Cython nor Nuitka were not the answer, because when running the solution that is compiled with Nuitka or Cython into .pyd or .exe files a cache directory is generated and all .pyc files are copied into the cache directory, so an attacker simply can decompile .pyc files and see your code or change it.
A:
using cxfreeze ( py2exe for linux ) will do the job.
http://cx-freeze.sourceforge.net/
it is available in ubuntu repositories
A:
If we focus on software licensing, I would recommend to take a look at another Stack Overflow answer I wrote here to get some inspiration of how a license key verification system can be constructed.
There is an open-source library on GitHub that can help you with the license verification bit.
You can install it by pip install licensing and then add the following code:
pubKey = "<RSAKeyValue><Modulus>sGbvxwdlDbqFXOMlVUnAF5ew0t0WpPW7rFpI5jHQOFkht/326dvh7t74RYeMpjy357NljouhpTLA3a6idnn4j6c3jmPWBkjZndGsPL4Bqm+fwE48nKpGPjkj4q/yzT4tHXBTyvaBjA8bVoCTnu+LiC4XEaLZRThGzIn5KQXKCigg6tQRy0GXE13XYFVz/x1mjFbT9/7dS8p85n8BuwlY5JvuBIQkKhuCNFfrUxBWyu87CFnXWjIupCD2VO/GbxaCvzrRjLZjAngLCMtZbYBALksqGPgTUN7ZM24XbPWyLtKPaXF2i4XRR9u6eTj5BfnLbKAU5PIVfjIS+vNYYogteQ==</Modulus><Exponent>AQAB</Exponent></RSAKeyValue>"
res = Key.activate(token="WyIyNTU1IiwiRjdZZTB4RmtuTVcrQlNqcSszbmFMMHB3aWFJTlBsWW1Mbm9raVFyRyJd",\
rsa_pub_key=pubKey,\
product_id=3349, key="ICVLD-VVSZR-ZTICT-YKGXL", machine_code=Helpers.GetMachineCode())
if res[0] == None not Helpers.IsOnRightMachine(res[0]):
print("An error occured: {0}".format(res[1]))
else:
print("Success")
You can read more about the way the RSA public key, etc are configured here.
A:
I documented how to obfuscate the python by converting it to .so file, and converting it to a python wheel file:
https://github.com/UM-NLP/python-obfuscation
|
How do I protect Python code from being read by users?
|
I am developing a piece of software in Python that will be distributed to my employer's customers. My employer wants to limit the usage of the software with a time-restricted license file.
If we distribute the .py files or even .pyc files it will be easy to (decompile and) remove the code that checks the license file.
Another aspect is that my employer does not want the code to be read by our customers, fearing that the code may be stolen or at least the "novel ideas".
Is there a good way to handle this problem?
|
[
"\"Is there a good way to handle this problem?\" No. Nothing can be protected against reverse engineering. Even the firmware on DVD machines has been reverse engineered and the AACS Encryption key exposed. And that's in spite of the DMCA making that a criminal offense.\nSince no technical method can stop your customers from reading your code, you have to apply ordinary commercial methods.\n\nLicenses. Contracts. Terms and Conditions. This still works even when people can read the code. Note that some of your Python-based components may require that you pay fees before you sell software using those components. Also, some open-source licenses prohibit you from concealing the source or origins of that component.\n\nOffer significant value. If your stuff is so good -- at a price that is hard to refuse -- there's no incentive to waste time and money reverse engineering anything. Reverse engineering is expensive. Make your product slightly less expensive.\n\nOffer upgrades and enhancements that make any reverse engineering a bad idea. When the next release breaks their reverse engineering, there's no point. This can be carried to absurd extremes, but you should offer new features that make the next release more valuable than reverse engineering.\n\nOffer customization at rates so attractive that they'd rather pay you to build and support the enhancements.\n\nUse a license key which expires. This is cruel, and will give you a bad reputation, but it certainly makes your software stop working.\n\nOffer it as a web service. SaaS involves no downloads to customers.\n\n\n",
"Python, being a byte-code-compiled interpreted language, is very difficult to lock down. Even if you use a exe-packager like py2exe, the layout of the executable is well-known, and the Python byte-codes are well understood.\nUsually in cases like this, you have to make a tradeoff. How important is it really to protect the code? Are there real secrets in there (such as a key for symmetric encryption of bank transfers), or are you just being paranoid? Choose the language that lets you develop the best product quickest, and be realistic about how valuable your novel ideas are.\nIf you decide you really need to enforce the license check securely, write it as a small C extension so that the license check code can be extra-hard (but not impossible!) to reverse engineer, and leave the bulk of your code in Python.\n",
"Python is not the tool you need\nYou must use the right tool to do the right thing, and Python was not designed to be obfuscated. It's the contrary; everything is open or easy to reveal or modify in Python because that's the language's philosophy.\nIf you want something you can't see through, look for another tool. This is not a bad thing, it is important that several different tools exist for different usages.\nObfuscation is really hard\nEven compiled programs can be reverse-engineered so don't think that you can fully protect any code. You can analyze obfuscated PHP, break the flash encryption key, etc. Newer versions of Windows are cracked every time.\nHaving a legal requirement is a good way to go\nYou cannot prevent somebody from misusing your code, but you can easily discover if someone does. Therefore, it's just a casual legal issue.\nCode protection is overrated\nNowadays, business models tend to go for selling services instead of products. You cannot copy a service, pirate nor steal it. Maybe it's time to consider to go with the flow...\n",
"Compile python and distribute binaries!\nSensible idea: \nUse Cython, Nuitka, Shed Skin or something similar to compile python to C code, then distribute your app as python binary libraries (pyd) instead.\nThat way, no Python (byte) code is left and you've done any reasonable amount of obscurification anyone (i.e. your employer) could expect from regular Code, I think. (.NET or Java less safe than this case, as that bytecode is not obfuscated and can relatively easily be decompiled into reasonable source.)\nCython is getting more and more compatible with CPython, so I think it should work. (I'm actually considering this for our product.. We're already building some thirdparty libs as pyd/dlls, so shipping our own python code as binaries is not a overly big step for us.)\nSee This Blog Post (not by me) for a tutorial on how to do it. (thx @hithwen)\nCrazy idea:\nYou could probably get Cython to store the C-files separately for each module, then just concatenate them all and build them with heavy inlining. That way, your Python module is pretty monolithic and difficult to chip at with common tools.\nBeyond crazy:\nYou might be able to build a single executable if you can link to (and optimize with) the python runtime and all libraries (dlls) statically. That way, it'd sure be difficult to intercept calls to/from python and whatever framework libraries you use. This cannot be done if you're using LGPL code though.\n",
"I understand that you want your customers to use the power of python but do not want expose the source code.\nHere are my suggestions:\n(a) Write the critical pieces of the code as C or C++ libraries and then use SIP or swig to expose the C/C++ APIs to Python namespace.\n(b) Use cython instead of Python\n(c) In both (a) and (b), it should be possible to distribute the libraries as licensed binary with a Python interface.\n",
"Have you had a look at pyminifier? It does Minify, obfuscate, and compress Python code. The example code looks pretty nasty for casual reverse engineering.\n$ pyminifier --nonlatin --replacement-length=50 /tmp/tumult.py\n#!/usr/bin/env python3\nﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲמּ=ImportError\nﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ燱=print\nﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ=False\nﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ澨=object\ntry:\n import demiurgic\nexcept ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲמּ:\n ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ燱(\"Warning: You're not demiurgic. Actually, I think that's normal.\")\ntry:\n import mystificate\nexcept ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲמּ:\n ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ燱(\"Warning: Dark voodoo may be unreliable.\")\nﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲﺬ=ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ\nclass ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ(ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ澨):\n def __init__(self,*args,**kwargs):\n pass\n def ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ클(self,dactyl):\n ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ퐐=demiurgic.palpitation(dactyl)\n ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ=mystificate.dark_voodoo(ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ퐐)\n return ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ\n def ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ(self,whatever):\n ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ燱(whatever)\nif __name__==\"__main__\":\n ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ燱(\"Forming...\")\n ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲﺃ=ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ(\"epicaricacy\",\"perseverate\")\n ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲﺃ.ﺭ异ﭞﰣﺁں뻛嬭ﱌꝪﴹ뙫퉊ﳦﲣפּܟﺶﶨࠔﶻ䉊ﰸﭳᣲ(\"Codswallop\")\n# Created by pyminifier (https://github.com/liftoff/pyminifier)\n\n",
"Is your employer aware that he can \"steal\" back any ideas that other people get from your code? I mean, if they can read your work, so can you theirs. Maybe looking at how you can benefit from the situation would yield a better return of your investment than fearing how much you could lose.\n[EDIT] Answer to Nick's comment:\nNothing gained and nothing lost. The customer has what he wants (and paid for it since he did the change himself). Since he doesn't release the change, it's as if it didn't happen for everyone else.\nNow if the customer sells the software, they have to change the copyright notice (which is illegal, so you can sue and will win -> simple case).\nIf they don't change the copyright notice, the 2nd level customers will notice that the software comes from you original and wonder what is going on. Chances are that they will contact you and so you will learn about the reselling of your work.\nAgain we have two cases: The original customer sold only a few copies. That means they didn't make much money anyway, so why bother. Or they sold in volume. That means better chances for you to learn about what they do and do something about it.\nBut in the end, most companies try to comply to the law (once their reputation is ruined, it's much harder to do business). So they will not steal your work but work with you to improve it. So if you include the source (with a license that protects you from simple reselling), chances are that they will simply push back changes they made since that will make sure the change is in the next version and they don't have to maintain it. That's win-win: You get changes and they can make the change themselves if they really, desperately need it even if you're unwilling to include it in the official release.\n",
"Use Cython. It will compile your modules to high-performant C files, which can then be compiled to native binary libraries. This is basically un-reversable, compared to .pyc bytecode!\nI've written a detailed article on how to set up Cython for a Python project, check it out:\nProtecting Python Sources With Cython\n",
"Do not rely on obfuscation. As You have correctly concluded, it offers very limited protection.\nUPDATE: Here is a link to paper which reverse engineered obfuscated python code in Dropbox. The approach - opcode remapping is a good barrier, but clearly it can be defeated.\nInstead, as many posters have mentioned make it:\n\nNot worth reverse engineering time (Your software is so good, it makes sense to pay)\nMake them sign a contract and do a license audit if feasible. \n\nAlternatively, as the kick-ass Python IDE WingIDE does: Give away the code. That's right, give the code away and have people come back for upgrades and support.\n",
"Shipping .pyc files has its problems - they are not compatible with any other python version than the python version they were created with, which means you must know which python version is running on the systems the product will run on. That's a very limiting factor.\n",
"In some circumstances, it may be possible to move (all, or at least a key part) of the software into a web service that your organization hosts.\nThat way, the license checks can be performed in the safety of your own server room.\n",
"Though there's no perfect solution, the following can be done:\n\nMove some critical piece of startup code into a native library.\nEnforce the license check in the native library.\n\nIf the call to the native code were to be removed, the program wouldn't start anyway. If it's not removed then the license will be enforced.\nThough this is not a cross-platform or a pure-Python solution, it will work.\n",
"I was surprised in not seeing pyconcrete in any answer. Maybe because it's newer than the question?\nIt could be exactly what you need(ed).\nInstead of obfuscating the code, it encrypts it and decrypts at load time.\nFrom pypi page:\n\nProtect python script work flow \n\nyour_script.py import pyconcrete\npyconcrete will hook import module \nwhen your script do import MODULE,\n pyconcrete import hook will try to find MODULE.pye first and then\n decrypt MODULE.pye via _pyconcrete.pyd and execute decrypted data (as\n .pyc content) \nencrypt & decrypt secret key record in _pyconcrete.pyd\n (like DLL or SO) the secret key would be hide in binary code, can’t\n see it directly in HEX view\n\n\n",
"I think there is one more method to protect your Python code; part of the Obfuscation method. I believe there was a game like Mount and Blade or something that changed and recompiled their own python interpreter (the original interpreter which i believe is open source) and just changed the OP codes in the OP code table to be different then the standard python OP codes.\nSo the python source is unmodified but the file extensions of the *.pyc files are different and the op codes don't match to the public python.exe interpreter. If you checked the games data files all the data was in Python source format.\nAll sorts of nasty tricks can be done to mess with immature hackers this way. Stopping a bunch of inexperienced hackers is easy. It's the professional hackers that you will not likely beat. But most companies don't keep pro hackers on staff long I imagine (likely because things get hacked). But immature hackers are all over the place (read as curious IT staff).\nYou could for example, in a modified interpreter, allow it to check for certain comments or doc strings in your source. You could have special OP codes for such lines of code. For example:\nOP 234 is for source line \"# Copyright I wrote this\"\nor compile that line into op codes that are equivalent to \"if False:\" if \"# Copyright\" is missing. Basically disabling a whole block of code for what appears to be some obscure reason.\nOne use case where recompiling a modified interpreter may be feasible is where you didn't write the app, the app is big, but you are paid to protect it, such as when you're a dedicated server admin for a financial app.\nI find it a little contradictory to leave the source or opcodes open for eyeballs, but use SSL for network traffic. SSL is not 100% safe either. But it's used to stop MOST eyes from reading it. A wee bit precaution is sensible.\nAlso, if enough people deem that Python source and opcodes are too visible, it's likely someone will eventually develop at least a simple protection tool for it. So the more people asking \"how to protect Python app\" only promotes that development.\n",
"The reliable only way to protect code is to run it on a server you control and provide your clients with a client which interfaces with that server.\n",
"Depending in who the client is, a simple protection mechanism, combined with a sensible license agreement will be far more effective than any complex licensing/encryption/obfuscation system.\nThe best solution would be selling the code as a service, say by hosting the service, or offering support - although that isn't always practical.\nShipping the code as .pyc files will prevent your protection being foiled by a few #s, but it's hardly effective anti-piracy protection (as if there is such a technology), and at the end of the day, it shouldn't achieve anything that a decent license agreement with the company will.\nConcentrate on making your code as nice to use as possible - having happy customers will make your company far more money than preventing some theoretical piracy..\n",
"Another attempt to make your code harder to steal is to use jython and then use java obfuscator. \nThis should work pretty well as jythonc translate python code to java and then java is compiled to bytecode. So ounce you obfuscate the classes it will be really hard to understand what is going on after decompilation, not to mention recovering the actual code. \nThe only problem with jython is that you can't use python modules written in c.\n",
"You should take a look at how the guys at getdropbox.com do it for their client software, including Linux. It's quite tricky to crack and requires some quite creative disassembly to get past the protection mechanisms.\n",
"What about signing your code with standard encryption schemes by hashing and signing important files and checking it with public key methods?\nIn this way you can issue license file with a public key for each customer.\nAdditional you can use an python obfuscator like this one (just googled it).\n",
"The best you can do with Python is to obscure things.\n\nStrip out all docstrings\nDistribute only the .pyc compiled files.\nfreeze it\nObscure your constants inside a class/module so that help(config) doesn't show everything\n\nYou may be able to add some additional obscurity by encrypting part of it and decrypting it on the fly and passing it to eval(). But no matter what you do someone can break it.\nNone of this will stop a determined attacker from disassembling the bytecode or digging through your api with help, dir, etc.\n",
"Idea of having time restricted license and check for it in locally installed program will not work. Even with perfect obfuscation, license check can be removed. However if you check license on remote system and run significant part of the program on your closed remote system, you will be able to protect your IP.\nPreventing competitors from using the source code as their own or write their inspired version of the same code, one way to protect is to add signatures to your program logic (some secrets to be able to prove that code was stolen from you) and obfuscate the python source code so, it's hard to read and utilize. \nGood obfuscation adds basically the same protection to your code, that compiling it to executable (and stripping binary) does. Figuring out how obfuscated complex code works might be even harder than actually writing your own implementation. \nThis will not help preventing hacking of your program. Even with obfuscation code license stuff will be cracked and program may be modified to have slightly different behaviour (in the same way that compiling code to binary does not help protection of native programs). \nIn addition to symbol obfuscation might be good idea to unrefactor the code, which makes everything even more confusing if e.g. call graphs points to many different places even if actually those different places does eventually the same thing. \nLogical signature inside obfuscated code (e.g. you may create table of values which are used by program logic, but also used as signature), which can be used to determine that code is originated from you. If someone decides to use your obfuscated code module as part of their own product (even after reobfuscating it to make it seem different) you can show, that code is stolen with your secret signature.\n",
"I have looked at software protection in general for my own projects and the general philosophy is that complete protection is impossible. The only thing that you can hope to achieve is to add protection to a level that would cost your customer more to bypass than it would to purchase another license.\nWith that said I was just checking google for python obsfucation and not turning up a lot of anything. In a .Net solution, obsfucation would be a first approach to your problem on a windows platform, but I am not sure if anyone has solutions on Linux that work with Mono. \nThe next thing would be to write your code in a compiled language, or if you really want to go all the way, then in assembler. A stripped out executable would be a lot harder to decompile than an interpreted language.\nIt all comes down to tradeoffs. On one end you have ease of software development in python, in which it is also very hard to hide secrets. On the other end you have software written in assembler which is much harder to write, but is much easier to hide secrets.\nYour boss has to choose a point somewhere along that continuum that supports his requirements. And then he has to give you the tools and time so you can build what he wants. However my bet is that he will object to real development costs versus potential monetary losses.\n",
"It is possible to have the py2exe byte-code in a crypted resource for a C launcher that loads and executes it in memory. Some ideas here and here.\nSome have also thought of a self modifying program to make reverse engineering expensive.\nYou can also find tutorials for preventing debuggers, make the disassembler fail, set false debugger breakpoints and protect your code with checksums. Search for [\"crypted code\" execute \"in memory\"] for more links.\nBut as others already said, if your code is worth it, reverse engineers will succeed in the end.\n",
"Use the same way to protect binary file of c/c++, that is, obfuscate each function body in executable or library binary file, insert an instruction \"jump\" at the begin of each function entry, jump to special function to restore obfuscated code. Byte-code is binary code of Python script, so \n\nFirst compile python script to code object\nThen iterate each code object, obfuscate co_code of each code object as the following\n\n\n 0 JUMP_ABSOLUTE n = 3 + len(bytecode)\n\n 3\n ...\n ... Here it's obfuscated bytecode\n ...\n\n n LOAD_GLOBAL ? (__pyarmor__)\n n+3 CALL_FUNCTION 0\n n+6 POP_TOP\n n+7 JUMP_ABSOLUTE 0\n\n\nSave obfuscated code object as .pyc or .pyo file\n\nThose obfuscated file (.pyc or .pyo) can be used by normal python interpreter, when those code object is called first time\n\nFirst op is JUMP_ABSOLUTE, it will jump to offset n\nAt offset n, the instruction is to call a PyCFunction. This function will restore those obfuscated bytecode between offset 3 and n, and put the original byte-code at offset 0. The obfuscated code can be got by the following code\n\n char *obfucated_bytecode;\n Py_ssize_t len;\n PyFrameObject* frame = PyEval_GetFrame();\n PyCodeObject *f_code = frame->f_code;\n PyObject *co_code = f_code->co_code; \n PyBytes_AsStringAndSize(co_code, &obfucated_bytecode, &len)\n\nAfter this function returns, the last instruction is to jump to\noffset 0. The really byte-code now is executed.\n\nThere is a tool Pyarmor to obfuscate python scripts by this way.\n",
"There is a comprehensive answer on concealing the python source code, which can be find here.\nPossible techniques discussed are:\n- use compiled bytecode (python -m compileall)\n- executable creators (or installers like PyInstaller)\n- software as an service (the best solution to conceal your code in my opinion)\n- python source code obfuscators\n",
"Neiher Cython nor Nuitka were not the answer, because when running the solution that is compiled with Nuitka or Cython into .pyd or .exe files a cache directory is generated and all .pyc files are copied into the cache directory, so an attacker simply can decompile .pyc files and see your code or change it.\n",
"using cxfreeze ( py2exe for linux ) will do the job.\nhttp://cx-freeze.sourceforge.net/\nit is available in ubuntu repositories\n",
"If we focus on software licensing, I would recommend to take a look at another Stack Overflow answer I wrote here to get some inspiration of how a license key verification system can be constructed.\nThere is an open-source library on GitHub that can help you with the license verification bit.\nYou can install it by pip install licensing and then add the following code:\npubKey = \"<RSAKeyValue><Modulus>sGbvxwdlDbqFXOMlVUnAF5ew0t0WpPW7rFpI5jHQOFkht/326dvh7t74RYeMpjy357NljouhpTLA3a6idnn4j6c3jmPWBkjZndGsPL4Bqm+fwE48nKpGPjkj4q/yzT4tHXBTyvaBjA8bVoCTnu+LiC4XEaLZRThGzIn5KQXKCigg6tQRy0GXE13XYFVz/x1mjFbT9/7dS8p85n8BuwlY5JvuBIQkKhuCNFfrUxBWyu87CFnXWjIupCD2VO/GbxaCvzrRjLZjAngLCMtZbYBALksqGPgTUN7ZM24XbPWyLtKPaXF2i4XRR9u6eTj5BfnLbKAU5PIVfjIS+vNYYogteQ==</Modulus><Exponent>AQAB</Exponent></RSAKeyValue>\"\n\nres = Key.activate(token=\"WyIyNTU1IiwiRjdZZTB4RmtuTVcrQlNqcSszbmFMMHB3aWFJTlBsWW1Mbm9raVFyRyJd\",\\\n rsa_pub_key=pubKey,\\\n product_id=3349, key=\"ICVLD-VVSZR-ZTICT-YKGXL\", machine_code=Helpers.GetMachineCode())\n\nif res[0] == None not Helpers.IsOnRightMachine(res[0]):\n print(\"An error occured: {0}\".format(res[1]))\nelse:\n print(\"Success\")\n\nYou can read more about the way the RSA public key, etc are configured here.\n",
"I documented how to obfuscate the python by converting it to .so file, and converting it to a python wheel file:\nhttps://github.com/UM-NLP/python-obfuscation\n"
] |
[
500,
413,
321,
169,
61,
46,
35,
30,
24,
20,
18,
17,
14,
13,
12,
10,
8,
6,
6,
6,
5,
4,
3,
2,
2,
2,
1,
1,
0
] |
[] |
[] |
[
"copy_protection",
"licensing",
"obfuscation",
"python"
] |
stackoverflow_0000261638_copy_protection_licensing_obfuscation_python.txt
|
Q:
Multiply all elements in each row of an array by numbers in a 1D array
I have a torch tensor (x) of shape [16,3,32,32], 16 images, 3 colour channels 32x32. I'm doing diffusion and need to apply the following formula to the images
return sqrt_alpha_hat * x + sqrt_one_minus_alpha_hat * error
Error has the same dimensions as x. This works fine when sqrt_alpha_hat and sqrt_one_minus_alpha_hat are integers, the tensors are all multiplied by the number and then added up. I want to multiply each image by a different value. So my sqrt_alpha_hat and sqrt_one_minus_alpha_hat are 1D arrays of size 32, one number for each image. Keep in mind this array is in CUDA so some np functions won't work.
I tried using np.fill to create a massive array with format:
[[[1 ... 1], ... [1 ... 1] (32 columns)
... (32 rows)
[1 ... 1], ... [1 ... 1]],
... (3 colour channels)
[[1 ... 1], ... [1 ... 1]
...
[1 ... 1], ... [1 ... 1]]]
... (16 images)
[[[16 ... 16], ... [16 ... 16]
...
[16 ... 16], ... [16 ... 16]],
...
[[16 ... 16], ... [16 ... 16]
...
[16 ... 16], ... [16 ... 16]]]
but that didn't work. There surely must be a simpler way to do this.
A:
Used
sqrt_alpha_hat_table = torch.stack([torch.full(x.shape[1:], sqrt_alpha_hat[i]) for i in range(x.shape[0])]).to(device)
A:
The "correct" way to do this (vectorized rather than loop-based, and without allocating lots of memory for repeating row vectors) is with expand(). I'll assume that you meant either that sqrt_alpha_hat is of size [16], or that there are 32 images, or that you made a semantic error somewhere else in your description.
# transform from size [n_images] to size [n_images,1,1,1]
sqrt_alpha_hat = sqrt_alpha_hat.unsqueeze(1).unsqueeze(1).unsqueeze(1)
# broadcast (view tensor view, rather than copying values) across new
dimensions to size [n_images,3,32,32]
sqrt_alpha_hat = sqrt_alpha_hat.expand(n_images,3,32,32)
# same for sqrt_one_minus_alpha_hat
...
# now you can multiply and add easily because the dimensions of all arrays match
return sqrt_alpha_hat * x + sqrt_one_minus_alpha_hat * error
As a general rule, probably best to stick to torch functions rather than np functions when using pytorch tensors to avoid issues such as CUDA incompatibility etc.
|
Multiply all elements in each row of an array by numbers in a 1D array
|
I have a torch tensor (x) of shape [16,3,32,32], 16 images, 3 colour channels 32x32. I'm doing diffusion and need to apply the following formula to the images
return sqrt_alpha_hat * x + sqrt_one_minus_alpha_hat * error
Error has the same dimensions as x. This works fine when sqrt_alpha_hat and sqrt_one_minus_alpha_hat are integers, the tensors are all multiplied by the number and then added up. I want to multiply each image by a different value. So my sqrt_alpha_hat and sqrt_one_minus_alpha_hat are 1D arrays of size 32, one number for each image. Keep in mind this array is in CUDA so some np functions won't work.
I tried using np.fill to create a massive array with format:
[[[1 ... 1], ... [1 ... 1] (32 columns)
... (32 rows)
[1 ... 1], ... [1 ... 1]],
... (3 colour channels)
[[1 ... 1], ... [1 ... 1]
...
[1 ... 1], ... [1 ... 1]]]
... (16 images)
[[[16 ... 16], ... [16 ... 16]
...
[16 ... 16], ... [16 ... 16]],
...
[[16 ... 16], ... [16 ... 16]
...
[16 ... 16], ... [16 ... 16]]]
but that didn't work. There surely must be a simpler way to do this.
|
[
"Used\nsqrt_alpha_hat_table = torch.stack([torch.full(x.shape[1:], sqrt_alpha_hat[i]) for i in range(x.shape[0])]).to(device)\n\n",
"The \"correct\" way to do this (vectorized rather than loop-based, and without allocating lots of memory for repeating row vectors) is with expand(). I'll assume that you meant either that sqrt_alpha_hat is of size [16], or that there are 32 images, or that you made a semantic error somewhere else in your description.\n# transform from size [n_images] to size [n_images,1,1,1]\nsqrt_alpha_hat = sqrt_alpha_hat.unsqueeze(1).unsqueeze(1).unsqueeze(1)\n\n# broadcast (view tensor view, rather than copying values) across new \ndimensions to size [n_images,3,32,32] \nsqrt_alpha_hat = sqrt_alpha_hat.expand(n_images,3,32,32)\n\n# same for sqrt_one_minus_alpha_hat\n... \n\n# now you can multiply and add easily because the dimensions of all arrays match\nreturn sqrt_alpha_hat * x + sqrt_one_minus_alpha_hat * error\n\nAs a general rule, probably best to stick to torch functions rather than np functions when using pytorch tensors to avoid issues such as CUDA incompatibility etc.\n"
] |
[
0,
0
] |
[] |
[] |
[
"python",
"pytorch"
] |
stackoverflow_0074593838_python_pytorch.txt
|
Q:
How to efficiently list all files in an Azure blob using python?
I need to list all files in an Azure blob using python. Currently I use the code below. this worked well when there were few files. But now I have a large number of files and the script runs more than an hour. The time-consuming part is the for loop. How can this be done faster?
import os, uuid
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, __version__
import pandas as pd
connect_str = "************"
blob_service_client = BlobServiceCliaent.from_connection_string(connect_str)
blob_service_client.get_account_information()
c = blob_service_client.list_containers()
container_client = blob_service_client.get_container_client("blobName")
l = []
for blob in container_client.list_blobs():
l.append(blob.name)
A:
I could able to achieve this using list_blobs method of BlockBlobService. After reproducing from my end, I have observed that the list_blobs method of BlobServiceClient returns all the properties of blob which is taking more time to proocess whereas BlockBlobService returns objects. Below is the code that was working for me.
import os
from azure.storage.blob import BlockBlobService
import datetime
ACCOUNT_NAME = "<YOUR_ACCOUNT_NAME>"
CONTAINER_NAME = "<YOUR_CONTAINER_NAME>"
SAS_TOKEN='<YOUR_SAS_TOKEN>'
block_blob_service = BlockBlobService(account_name=ACCOUNT_NAME,account_key=None,sas_token=SAS_TOKEN)
# Lists All Blobs
l =[]
print("\nList blobs in the container")
generator = block_blob_service.list_blobs(CONTAINER_NAME)
for blob in generator:
print("a"+str(datetime.datetime.now()))
blobname=blob
l.append(blob.name)
print(l)
print("b"+str(datetime.datetime.now()))
OUTPUT:
|
How to efficiently list all files in an Azure blob using python?
|
I need to list all files in an Azure blob using python. Currently I use the code below. this worked well when there were few files. But now I have a large number of files and the script runs more than an hour. The time-consuming part is the for loop. How can this be done faster?
import os, uuid
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, __version__
import pandas as pd
connect_str = "************"
blob_service_client = BlobServiceCliaent.from_connection_string(connect_str)
blob_service_client.get_account_information()
c = blob_service_client.list_containers()
container_client = blob_service_client.get_container_client("blobName")
l = []
for blob in container_client.list_blobs():
l.append(blob.name)
|
[
"I could able to achieve this using list_blobs method of BlockBlobService. After reproducing from my end, I have observed that the list_blobs method of BlobServiceClient returns all the properties of blob which is taking more time to proocess whereas BlockBlobService returns objects. Below is the code that was working for me.\nimport os\nfrom azure.storage.blob import BlockBlobService\nimport datetime\n\nACCOUNT_NAME = \"<YOUR_ACCOUNT_NAME>\"\nCONTAINER_NAME = \"<YOUR_CONTAINER_NAME>\"\nSAS_TOKEN='<YOUR_SAS_TOKEN>'\n\nblock_blob_service = BlockBlobService(account_name=ACCOUNT_NAME,account_key=None,sas_token=SAS_TOKEN)\n\n# Lists All Blobs\nl =[]\nprint(\"\\nList blobs in the container\")\ngenerator = block_blob_service.list_blobs(CONTAINER_NAME)\nfor blob in generator:\n print(\"a\"+str(datetime.datetime.now()))\n blobname=blob\n l.append(blob.name)\n \nprint(l)\n \nprint(\"b\"+str(datetime.datetime.now()))\n\nOUTPUT:\n\n"
] |
[
0
] |
[] |
[] |
[
"azure",
"python"
] |
stackoverflow_0074565291_azure_python.txt
|
Q:
webscraping on websocket streaming using Python 3.x
I've been webscraping for a long time and recently decided to scrape a video stream via websocket streaming. I fully understand websockets and how they work, but I don't fully understand the streaming part.
I'm trying to scrape a stream where I get base64 data using Python 3.10, and when I try to decode it I find that it can't be read (exactly because it's data from the video stream).
The stream I'm trying to extract is from a company that provides some weather data and I need to get that data without needing to use Selenium or some other library for testing.
Is there any effective way to do this? Maybe some well performing library, or some way to "read" the data from the stream somehow?
Here is an impression that I took from the data obtained by the websocket:
Even after trying to decode the obtained base64 to utf-8, the result is the same as the image above.
A:
I can recommend this package: https://github.com/websocket-client/websocket-client
It is pretty simple and stable and it works flawlessly. Also it supports asyncio.
def on_message(ws, message):
...
def on_open(ws):
...
def on_close(ws, close_status_code, close_msg):
...
def on_error(ws, error):
...
ws = websocket.WebSocketApp(
"wss://<address>",
on_open=on_open,
on_message=on_message,
on_error=on_error,
on_close=on_close,
)
ws.run_forever()
Usually when scraping WS you need to initiate the proccess by sending some command (you can track it by Dev Tools also, this package will be marked as green up arrow). Then you can reproduce it by using ws.send("<message>")
|
webscraping on websocket streaming using Python 3.x
|
I've been webscraping for a long time and recently decided to scrape a video stream via websocket streaming. I fully understand websockets and how they work, but I don't fully understand the streaming part.
I'm trying to scrape a stream where I get base64 data using Python 3.10, and when I try to decode it I find that it can't be read (exactly because it's data from the video stream).
The stream I'm trying to extract is from a company that provides some weather data and I need to get that data without needing to use Selenium or some other library for testing.
Is there any effective way to do this? Maybe some well performing library, or some way to "read" the data from the stream somehow?
Here is an impression that I took from the data obtained by the websocket:
Even after trying to decode the obtained base64 to utf-8, the result is the same as the image above.
|
[
"I can recommend this package: https://github.com/websocket-client/websocket-client\nIt is pretty simple and stable and it works flawlessly. Also it supports asyncio.\ndef on_message(ws, message):\n ...\n\ndef on_open(ws):\n ...\n\ndef on_close(ws, close_status_code, close_msg):\n ...\n\ndef on_error(ws, error):\n ...\n\nws = websocket.WebSocketApp(\n \"wss://<address>\",\n on_open=on_open,\n on_message=on_message,\n on_error=on_error,\n on_close=on_close,\n)\nws.run_forever()\n\nUsually when scraping WS you need to initiate the proccess by sending some command (you can track it by Dev Tools also, this package will be marked as green up arrow). Then you can reproduce it by using ws.send(\"<message>\")\n"
] |
[
4
] |
[] |
[] |
[
"python",
"web_scraping",
"websocket"
] |
stackoverflow_0074595506_python_web_scraping_websocket.txt
|
Q:
how to exit a function when an event is set?
I have an infinite loop thread that sets an event when a sensor is high/true
event = threading.Event()
def eventSetter():
while True:
if sensor:
event.set()
else:
event.clear()
th1 = threading.thread(target=eventSetter)
th1.start
and I have a function capture that takes 5 sec to execute
def capture():
time.sleep(2) #sleep represents a task that takes 2 sec to finish
time.sleep(1)
time.sleep(2)
return
now I want to exit the function capture in the middle of its task whenever the event is set
for example if task 1 takes 5sec to finish and the event occurs at time 2sec, the task should not continue at time 2sec and the function should exit
I tried checking for the event every line but i don't know how to exit in the middle of its task thus it waits for the task to finish before return applies also I didn't like the look of multiple if/return
def capture():
time.sleep(2) #sleep represents a task that takes sec to finish
if event.is_set():
return
time.sleep(1)
if event.is_set():
return
time.sleep(2)
if event.is_set():
return
A:
Do you want breaking loop ?
if you want break any time or condition you need to break `key.
for example
event = threading.Event()
def eventSetter():
while True:
if sensor:
event.set()
break
#loop stoped.
else:
event.clear()
return True #or none.
A:
You're going to have to check the event state every chance you get in order to break out of capture as soon as possible. The variation you provided is fine, but here are a couple other ways to do the same thing:
def capture():
# nested checks are fine with only a few tasks but can get awkward
# when the indentation gets too deep to be easily readable
time.sleep(2) # task 1
if not event.is_set()
time.sleep(1) # task 2
if not event.is_set():
time.sleep(2) # task 3
def capture():
# defining a list of functions to be executed is great when there
# are many subtasks but can be overkill if there are just a few
from functools import partial
subtasks = [
partial(time.sleep, 2), # first task
partial(time.sleep, 1), # second task
partial(time.sleep, 2) # third task
]
for subtask in subtasks:
if event.is_set():
return
subtask()
|
how to exit a function when an event is set?
|
I have an infinite loop thread that sets an event when a sensor is high/true
event = threading.Event()
def eventSetter():
while True:
if sensor:
event.set()
else:
event.clear()
th1 = threading.thread(target=eventSetter)
th1.start
and I have a function capture that takes 5 sec to execute
def capture():
time.sleep(2) #sleep represents a task that takes 2 sec to finish
time.sleep(1)
time.sleep(2)
return
now I want to exit the function capture in the middle of its task whenever the event is set
for example if task 1 takes 5sec to finish and the event occurs at time 2sec, the task should not continue at time 2sec and the function should exit
I tried checking for the event every line but i don't know how to exit in the middle of its task thus it waits for the task to finish before return applies also I didn't like the look of multiple if/return
def capture():
time.sleep(2) #sleep represents a task that takes sec to finish
if event.is_set():
return
time.sleep(1)
if event.is_set():
return
time.sleep(2)
if event.is_set():
return
|
[
"Do you want breaking loop ?\nif you want break any time or condition you need to break `key.\nfor example\nevent = threading.Event()\ndef eventSetter():\n while True:\n if sensor:\n event.set()\n break\n #loop stoped.\n else:\n event.clear()\n\n return True #or none.\n\n",
"You're going to have to check the event state every chance you get in order to break out of capture as soon as possible. The variation you provided is fine, but here are a couple other ways to do the same thing:\ndef capture():\n # nested checks are fine with only a few tasks but can get awkward\n # when the indentation gets too deep to be easily readable\n time.sleep(2) # task 1\n if not event.is_set()\n time.sleep(1) # task 2\n if not event.is_set():\n time.sleep(2) # task 3\n\ndef capture():\n # defining a list of functions to be executed is great when there\n # are many subtasks but can be overkill if there are just a few\n from functools import partial\n subtasks = [\n partial(time.sleep, 2), # first task\n partial(time.sleep, 1), # second task\n partial(time.sleep, 2) # third task\n ]\n\n for subtask in subtasks:\n if event.is_set():\n return\n subtask()\n\n"
] |
[
0,
0
] |
[] |
[] |
[
"events",
"python"
] |
stackoverflow_0074595714_events_python.txt
|
Q:
how to upload image correctly. Django
I am building a Django application (run in local) and I am having headaches about uploading files/pictures. I have read tons of questions/answers everywhere as well as followed the official doc, but somehow I still have problems.
In my models.py:
FuncionarioPathFoto = models.FileField(
"Foto",
upload_to = "images/",
db_column= "FuncionarioPathFoto",
null= False,
blank = False
)
In my views (I'm using inline forms, so the code is big):
def create_funcionario(request):
if request.method == "GET":
form = FuncionariosForm
form_funcionarioadicional_factory = inlineformset_factory(Funcionarios, FuncionarioAdicional, form=FuncionarioAdicionalForm, extra=1)
form_funcionarioaux_factory = inlineformset_factory(Funcionarios, FuncionarioAux, form=FuncionarioAuxForm, extra=1)
form_funcionarioarquivo_factory = inlineformset_factory(Funcionarios, FuncionarioArquivo, form=FuncionarioArquivoForm, extra=1)
form_funcionarioadicional = form_funcionarioadicional_factory()
form_funcionarioaux = form_funcionarioaux_factory()
form_funcionarioarquivo = form_funcionarioarquivo_factory()
context = {
'form': form,
'form_funcionarioadicional': form_funcionarioadicional,
'form_funcionarioaux': form_funcionarioaux,
'form_funcionarioarquivo': form_funcionarioarquivo,
}
return render(request, '../templates/funcionarios/form_funcionarios.html', context)
elif request.method == "POST":
form = FuncionariosForm(request.POST)
form_funcionarioadicional_factory = inlineformset_factory(Funcionarios, FuncionarioAdicional, form=FuncionarioAdicionalForm)
form_funcionarioaux_factory = inlineformset_factory(Funcionarios, FuncionarioAux, form=FuncionarioAuxForm)
form_funcionarioarquivo_factory = inlineformset_factory(Funcionarios, FuncionarioArquivo, form=FuncionarioArquivoForm)
form_funcionarioadicional = form_funcionarioadicional_factory(request.POST)
form_funcionarioaux = form_funcionarioaux_factory(request.POST)
form_funcionarioarquivo = form_funcionarioarquivo_factory(request.POST)
if form.is_valid() and form_funcionarioadicional.is_valid() and form_funcionarioaux.is_valid() and form_funcionarioarquivo.is_valid():
funcionario = form.save()
form_funcionarioadicional.instance = funcionario
form_funcionarioaux.instance = funcionario
form_funcionarioarquivo.instance = funcionario
form_funcionarioadicional.save()
form_funcionarioaux.save()
form_funcionarioarquivo.save()
messages.success(request, "Funcionário adicionado com sucesso!")
return redirect(reverse('lista_funcionarios'))
else:
context = {
'form': form,
'form_funcionarioadicional': form_funcionarioadicional,
'form_funcionarioaux': form_funcionarioaux,
'form_funcionarioarquivo': form_funcionarioarquivo,
}
return render(request, '../templates/funcionarios/form_funcionarios.html', context)
I put this in my urls, and settings:
urls:
if settings.DEBUG:
urlpatterns += static(
settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT,
)
settings:
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
And I already tried to add <form method="POST" enctype="multipart/form-data">
but I was unsuccessful, when submitting my form, that field keeps giving error.
I tried uploading the file via /admin, and it went to the directory correctly with no errors.
what can i try to do to solve it?
A:
Let me explain the process of uploading files with Django with my own method.
If a file is sent to the server, it is kept in request.FILES as temp. You can see it by saying print(request.FILES) .
First, read the temp data and then load it into the relevant directory with the open function in python.
For example
img = request.FILES['img'].read()
print(type(img))
open('mypath/name.png','wb').write(img) # Note if output not bytes change to open mode 'w'
A:
You need to pass request.FILES in form in views.py
form = FuncionariosForm(request.POST,request.FILES)
and also need to pass enctype="multipart/form-data" in html form
<form action="/action_page_binary.asp" method="post" enctype="multipart/form-data">
<label for="fname">First name:</label>
<input type="text" id="fname" name="fname"><br><br>
<label for="lname">Last name:</label>
<input type="text" id="lname" name="lname"><br><br>
<input type="submit" value="Submit">
</form>
|
how to upload image correctly. Django
|
I am building a Django application (run in local) and I am having headaches about uploading files/pictures. I have read tons of questions/answers everywhere as well as followed the official doc, but somehow I still have problems.
In my models.py:
FuncionarioPathFoto = models.FileField(
"Foto",
upload_to = "images/",
db_column= "FuncionarioPathFoto",
null= False,
blank = False
)
In my views (I'm using inline forms, so the code is big):
def create_funcionario(request):
if request.method == "GET":
form = FuncionariosForm
form_funcionarioadicional_factory = inlineformset_factory(Funcionarios, FuncionarioAdicional, form=FuncionarioAdicionalForm, extra=1)
form_funcionarioaux_factory = inlineformset_factory(Funcionarios, FuncionarioAux, form=FuncionarioAuxForm, extra=1)
form_funcionarioarquivo_factory = inlineformset_factory(Funcionarios, FuncionarioArquivo, form=FuncionarioArquivoForm, extra=1)
form_funcionarioadicional = form_funcionarioadicional_factory()
form_funcionarioaux = form_funcionarioaux_factory()
form_funcionarioarquivo = form_funcionarioarquivo_factory()
context = {
'form': form,
'form_funcionarioadicional': form_funcionarioadicional,
'form_funcionarioaux': form_funcionarioaux,
'form_funcionarioarquivo': form_funcionarioarquivo,
}
return render(request, '../templates/funcionarios/form_funcionarios.html', context)
elif request.method == "POST":
form = FuncionariosForm(request.POST)
form_funcionarioadicional_factory = inlineformset_factory(Funcionarios, FuncionarioAdicional, form=FuncionarioAdicionalForm)
form_funcionarioaux_factory = inlineformset_factory(Funcionarios, FuncionarioAux, form=FuncionarioAuxForm)
form_funcionarioarquivo_factory = inlineformset_factory(Funcionarios, FuncionarioArquivo, form=FuncionarioArquivoForm)
form_funcionarioadicional = form_funcionarioadicional_factory(request.POST)
form_funcionarioaux = form_funcionarioaux_factory(request.POST)
form_funcionarioarquivo = form_funcionarioarquivo_factory(request.POST)
if form.is_valid() and form_funcionarioadicional.is_valid() and form_funcionarioaux.is_valid() and form_funcionarioarquivo.is_valid():
funcionario = form.save()
form_funcionarioadicional.instance = funcionario
form_funcionarioaux.instance = funcionario
form_funcionarioarquivo.instance = funcionario
form_funcionarioadicional.save()
form_funcionarioaux.save()
form_funcionarioarquivo.save()
messages.success(request, "Funcionário adicionado com sucesso!")
return redirect(reverse('lista_funcionarios'))
else:
context = {
'form': form,
'form_funcionarioadicional': form_funcionarioadicional,
'form_funcionarioaux': form_funcionarioaux,
'form_funcionarioarquivo': form_funcionarioarquivo,
}
return render(request, '../templates/funcionarios/form_funcionarios.html', context)
I put this in my urls, and settings:
urls:
if settings.DEBUG:
urlpatterns += static(
settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT,
)
settings:
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
And I already tried to add <form method="POST" enctype="multipart/form-data">
but I was unsuccessful, when submitting my form, that field keeps giving error.
I tried uploading the file via /admin, and it went to the directory correctly with no errors.
what can i try to do to solve it?
|
[
"Let me explain the process of uploading files with Django with my own method.\nIf a file is sent to the server, it is kept in request.FILES as temp. You can see it by saying print(request.FILES) .\nFirst, read the temp data and then load it into the relevant directory with the open function in python.\nFor example\nimg = request.FILES['img'].read()\n\nprint(type(img))\n\nopen('mypath/name.png','wb').write(img) # Note if output not bytes change to open mode 'w'\n\n",
"You need to pass request.FILES in form in views.py\nform = FuncionariosForm(request.POST,request.FILES)\n\nand also need to pass enctype=\"multipart/form-data\" in html form\n<form action=\"/action_page_binary.asp\" method=\"post\" enctype=\"multipart/form-data\">\n <label for=\"fname\">First name:</label>\n <input type=\"text\" id=\"fname\" name=\"fname\"><br><br>\n <label for=\"lname\">Last name:</label>\n <input type=\"text\" id=\"lname\" name=\"lname\"><br><br>\n <input type=\"submit\" value=\"Submit\">\n</form>\n\n"
] |
[
1,
1
] |
[] |
[] |
[
"django",
"django_models",
"django_templates",
"django_views",
"python"
] |
stackoverflow_0074595637_django_django_models_django_templates_django_views_python.txt
|
Q:
how to insert into the list at the next odd index position
I have written the python code below to create a database:
list = ["a","b","c","d","e"]
while(True):
print("1/insert")
print("2/delete")
print("3/quit")
print("4/display")
choice=int(input("enter your choice: "))
if choice==1:
name=input("enter a name:")
list.insert(1,name)
if choice==2:
dltname=input("enter the name you want to delete")
list.remove(dltname)
if choice == 3:
exit()
if choice==4:
print(list)
When I first insert an item, it's inserted in the 1st index position, my goal is when I insert the next item, I want to insert it in the 3rd index positon, the next one in the 5th index positon, means I want to insert each item in the next available odd index position. Can I get help about how to do this? Thank you!
I tried to use n=1 whose value increases by 2 every time 1 is chosen as option, but couldn't do it.
|
how to insert into the list at the next odd index position
|
I have written the python code below to create a database:
list = ["a","b","c","d","e"]
while(True):
print("1/insert")
print("2/delete")
print("3/quit")
print("4/display")
choice=int(input("enter your choice: "))
if choice==1:
name=input("enter a name:")
list.insert(1,name)
if choice==2:
dltname=input("enter the name you want to delete")
list.remove(dltname)
if choice == 3:
exit()
if choice==4:
print(list)
When I first insert an item, it's inserted in the 1st index position, my goal is when I insert the next item, I want to insert it in the 3rd index positon, the next one in the 5th index positon, means I want to insert each item in the next available odd index position. Can I get help about how to do this? Thank you!
I tried to use n=1 whose value increases by 2 every time 1 is chosen as option, but couldn't do it.
|
[] |
[] |
[
"Help on method_descriptor:\n\ninsert(self, index, object, /)\n Insert object before index.\n\ninsert(1, obj) will insert the object before index 1 which will turns to be the 2th position after insert opt.\n"
] |
[
-1
] |
[
"insertion",
"python",
"while_loop"
] |
stackoverflow_0074595663_insertion_python_while_loop.txt
|
Q:
How can I open a new browser tab with subprocess?
I'm opening a new IE window with this:
subprocess.Popen(r'"' + os.environ["PROGRAMFILES"] +
'\Internet Explorer\IEXPLORE.EXE" ' + Call_URL)
This is fine when IE is closed, but even when it's open this spawns a new window. How can I open just a new tab? If possible I'd like to use the standard browser - however I couldn't figure out how to do that either.
Note: I can't use webbrowser and os has no .startfile. I had no luck with os.popen either (using Jython 2.5.3b1).
A:
Since you also wanted a standard browser am giving an example to open a new tab with chrome. If chrome is not open already it will open and then navigate to the URL.
import subprocess
subprocess.Popen("start chrome /new-tab www.google.com",shell = True)
This works. Please try and let me know if this is what you wanted.
Another one without hardcoding the Call_URL
import subprocess
Call_URL = "www.google.com"
mycmd = r'start chrome /new-tab {}'.format(Call_URL)
subprocess.Popen(mycmd,shell = True)
Are you expecting something like this?
A:
Keep It Simple & Smart (updated)... programmatic and use a terminator()!
Here is simple answer to launch, track, and terminate a new Chrome browser instance. It launches a new process for a Chrome instance, launches additional tabs into that new Chrome webbrowser instance, and finally using "terminate()" when finished to close the original browser launched by the subprocess() and its webbrowser child tabs. This works even when there is an existing Chrome browser process running.
The standard path (user below) for Chrome.exe on Windows 10 is (usually): "C:\Program Files\Google\Chrome\Application\chrome.exe"
The code should always open a new Chrome window, even if Chrome is already running. The package "subprocess" is mandatory instead of os.system, or else it will not launch a new chrome window.
Advantages of this programmatic approach:
(1) subprocess() has a process ID, useful to track and close the browser started in the subprocess.
(2) All child tabs started within the subprocess.Popen() will be closed when the parent subprocess is terminated.
N.B. If there is an pre-existing browser instance running, my_chrome_process.terminate() will NOT terminate it; it will terminate only the instance started by the subprocess.Popen() code below. This is the expected behavior.
import subprocess
url1 = r'https://www.python.org'
url2 = r'https://github.com/'
url3 = r'https://stackoverflow.com/questions/22445217/python-webbrowser-open-to-open-chrome-browser'
url4 = r'https://docs.python.org/3.3/library/webbrowser.html'
chrome_path = r'C:\Program Files\Google\Chrome\Application\chrome.exe'
my_chrome_process = subprocess.Popen(chrome_path, shell=False)
print(f'Process ID: {my_chrome_process.pid}') # Uncomment this line if you want to see PID in Console.
import webbrowser
webbrowser.register('chrome', None, webbrowser.BackgroundBrowser(chrome_path))
webbrowser.get('chrome').open_new_tab(url1)
webbrowser.get('chrome').open_new_tab(url2)
webbrowser.get('chrome').open_new_tab(url3)
webbrowser.get('chrome').open_new_tab(url4)
my_chrome_process.terminate()
# If for any reason, my_chrome_process.terminate() does not work, then use the following os.system() code to kill the browser started using subprocess().
# See https://stackoverflow.com/questions/68540790/popen-kill-not-closing-browser-window for more information.
import os
os.system("Taskkill /PID %d /F" % my_chrome_process.pid)
|
How can I open a new browser tab with subprocess?
|
I'm opening a new IE window with this:
subprocess.Popen(r'"' + os.environ["PROGRAMFILES"] +
'\Internet Explorer\IEXPLORE.EXE" ' + Call_URL)
This is fine when IE is closed, but even when it's open this spawns a new window. How can I open just a new tab? If possible I'd like to use the standard browser - however I couldn't figure out how to do that either.
Note: I can't use webbrowser and os has no .startfile. I had no luck with os.popen either (using Jython 2.5.3b1).
|
[
"Since you also wanted a standard browser am giving an example to open a new tab with chrome. If chrome is not open already it will open and then navigate to the URL.\nimport subprocess\nsubprocess.Popen(\"start chrome /new-tab www.google.com\",shell = True)\n\nThis works. Please try and let me know if this is what you wanted.\nAnother one without hardcoding the Call_URL\nimport subprocess\nCall_URL = \"www.google.com\"\nmycmd = r'start chrome /new-tab {}'.format(Call_URL)\nsubprocess.Popen(mycmd,shell = True) \n\nAre you expecting something like this?\n",
"Keep It Simple & Smart (updated)... programmatic and use a terminator()!\nHere is simple answer to launch, track, and terminate a new Chrome browser instance. It launches a new process for a Chrome instance, launches additional tabs into that new Chrome webbrowser instance, and finally using \"terminate()\" when finished to close the original browser launched by the subprocess() and its webbrowser child tabs. This works even when there is an existing Chrome browser process running.\nThe standard path (user below) for Chrome.exe on Windows 10 is (usually): \"C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe\"\nThe code should always open a new Chrome window, even if Chrome is already running. The package \"subprocess\" is mandatory instead of os.system, or else it will not launch a new chrome window.\nAdvantages of this programmatic approach:\n(1) subprocess() has a process ID, useful to track and close the browser started in the subprocess.\n(2) All child tabs started within the subprocess.Popen() will be closed when the parent subprocess is terminated.\nN.B. If there is an pre-existing browser instance running, my_chrome_process.terminate() will NOT terminate it; it will terminate only the instance started by the subprocess.Popen() code below. This is the expected behavior.\nimport subprocess\nurl1 = r'https://www.python.org'\nurl2 = r'https://github.com/'\nurl3 = r'https://stackoverflow.com/questions/22445217/python-webbrowser-open-to-open-chrome-browser'\nurl4 = r'https://docs.python.org/3.3/library/webbrowser.html'\n\nchrome_path = r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe'\n\nmy_chrome_process = subprocess.Popen(chrome_path, shell=False)\nprint(f'Process ID: {my_chrome_process.pid}') # Uncomment this line if you want to see PID in Console.\n\nimport webbrowser\nwebbrowser.register('chrome', None, webbrowser.BackgroundBrowser(chrome_path))\nwebbrowser.get('chrome').open_new_tab(url1)\nwebbrowser.get('chrome').open_new_tab(url2)\nwebbrowser.get('chrome').open_new_tab(url3)\nwebbrowser.get('chrome').open_new_tab(url4)\n\nmy_chrome_process.terminate()\n\n# If for any reason, my_chrome_process.terminate() does not work, then use the following os.system() code to kill the browser started using subprocess(). \n# See https://stackoverflow.com/questions/68540790/popen-kill-not-closing-browser-window for more information. \n\nimport os\nos.system(\"Taskkill /PID %d /F\" % my_chrome_process.pid)\n\n"
] |
[
4,
0
] |
[] |
[] |
[
"browser",
"python",
"subprocess"
] |
stackoverflow_0035987882_browser_python_subprocess.txt
|
Q:
How to verify installed spaCy version?
I have installed spaCy with python for my NLP project.
I have installed that using pip. How can I verify installed spaCy version?
using
pip install -U spacy
What is command to verify installed spaCy version?
A:
You can also do python -m spacy info. If you're updating an existing installation, you might want to run python -m spacy validate, to check that the models you already have are compatible with the version you just installed.
A:
Use command - python -m spacy info to check spacy version
A:
If you ask yourself: How to find any Python pkg version?
This one should be used/ as well, not only for Spacy ofc:
The easiest (if you installed it using pip):
pip show spacy #pip3 if you installed it using pip3
Or:
python -m spacy --version
Or... just run python (with the version that you installed Spacy on) and use the version method
If you want to know the version of any Python pkg (package) you are working with this would work for you every time!
run:
python
>> import spacy
>> print(spacy.__version__)
Or, Either:
python -m spacy --version
or
python3 -m spacy --version #depends where it is install (python or python3)
A:
If you installed with pip you can try to find it with pip list and get version info with pip show <name>
A:
If you are using python3, you can use your package manager (pip) pip3 list and find spacy's version.
For Python 2.7+ pip list does the job
A:
Simply use !python -m spacy info to get details on Jupyter notebook, remove ! for normal python command check.
Check the above screenshot to see the result details.
Thanks
A:
Another way to get versions of Spacy and the dependencies is to use: pip freeze requirements.txt. See this link for the official documentation for both Mac and Windows OSs.
The main benefit I find with this approach is that you get a list of all dependencies plus the versions. Libraries are often times very picky about the versions. Using this method you can just share the requirements.txt with your collaborators and then they are good to go too :)
Edit:
Thanks to hc_dev for the valuable comment.
A:
Ways to find the spacy version installed:
pip show spacy
python -m spacy info
python -m spacy validate
Below find how the output will be:
pip show spacy
Name: spacy
Version: 3.4.2
Summary: Industrial-strength Natural Language Processing (NLP) in Python
Home-page: https://spacy.io
Author: Explosion
Author-email: contact@explosion.ai
License: MIT
Location: c:\users\shraddha.shetty\appdata\local\programs\python\python310\lib\site-packages
Requires: catalogue, cymem, jinja2, langcodes, murmurhash, numpy, packaging, pathy, preshed, pydantic, requests, setuptools, spacy-legacy, spacy-loggers, srsly, thinc, tqdm, typer, wasabi
Required-by: en-core-web-sm, pyresparser
python -m spacy info
============================== Info about spaCy ==============================
spaCy version 3.4.2
Location C:\Users\shraddha.shetty\AppData\Local\Programs\Python\Python310\lib\site-packages\spacy
Platform Windows-10-10.0.19044-SP0
Python version 3.10.5
Pipelines en_core_web_sm (3.4.1)
C:\Users\shraddha.shetty>python -m spacy validate
✔ Loaded compatibility table
================= Installed pipeline packages (spaCy v3.4.3) =================
ℹ spaCy installation:
C:\Users\shraddha.shetty\AppData\Local\Programs\Python\Python310\lib\site-packages\spacy
NAME SPACY VERSION
en_core_web_sm >=3.4.0,<3.5.0 3.4.1 ✔
|
How to verify installed spaCy version?
|
I have installed spaCy with python for my NLP project.
I have installed that using pip. How can I verify installed spaCy version?
using
pip install -U spacy
What is command to verify installed spaCy version?
|
[
"You can also do python -m spacy info. If you're updating an existing installation, you might want to run python -m spacy validate, to check that the models you already have are compatible with the version you just installed.\n",
"Use command - python -m spacy info to check spacy version \n",
"If you ask yourself: How to find any Python pkg version?\nThis one should be used/ as well, not only for Spacy ofc:\nThe easiest (if you installed it using pip):\npip show spacy #pip3 if you installed it using pip3\n\nOr:\npython -m spacy --version\n\nOr... just run python (with the version that you installed Spacy on) and use the version method\nIf you want to know the version of any Python pkg (package) you are working with this would work for you every time!\nrun:\npython\n>> import spacy\n>> print(spacy.__version__)\n\n\nOr, Either:\npython -m spacy --version\n\nor\npython3 -m spacy --version #depends where it is install (python or python3)\n\n",
"If you installed with pip you can try to find it with pip list and get version info with pip show <name>\n",
"If you are using python3, you can use your package manager (pip) pip3 list and find spacy's version. \nFor Python 2.7+ pip list does the job\n",
"\nSimply use !python -m spacy info to get details on Jupyter notebook, remove ! for normal python command check.\nCheck the above screenshot to see the result details.\nThanks\n",
"Another way to get versions of Spacy and the dependencies is to use: pip freeze requirements.txt. See this link for the official documentation for both Mac and Windows OSs.\nThe main benefit I find with this approach is that you get a list of all dependencies plus the versions. Libraries are often times very picky about the versions. Using this method you can just share the requirements.txt with your collaborators and then they are good to go too :)\nEdit:\nThanks to hc_dev for the valuable comment.\n",
"Ways to find the spacy version installed:\n\npip show spacy\npython -m spacy info\npython -m spacy validate\n\nBelow find how the output will be:\npip show spacy\n\nName: spacy\nVersion: 3.4.2\nSummary: Industrial-strength Natural Language Processing (NLP) in Python\nHome-page: https://spacy.io\nAuthor: Explosion\nAuthor-email: contact@explosion.ai\nLicense: MIT\nLocation: c:\\users\\shraddha.shetty\\appdata\\local\\programs\\python\\python310\\lib\\site-packages\nRequires: catalogue, cymem, jinja2, langcodes, murmurhash, numpy, packaging, pathy, preshed, pydantic, requests, setuptools, spacy-legacy, spacy-loggers, srsly, thinc, tqdm, typer, wasabi\nRequired-by: en-core-web-sm, pyresparser\n\npython -m spacy info\n\n============================== Info about spaCy ==============================\n\nspaCy version 3.4.2\nLocation C:\\Users\\shraddha.shetty\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\spacy\nPlatform Windows-10-10.0.19044-SP0\nPython version 3.10.5\nPipelines en_core_web_sm (3.4.1)\n\nC:\\Users\\shraddha.shetty>python -m spacy validate\n ✔ Loaded compatibility table\n \n ================= Installed pipeline packages (spaCy v3.4.3) =================\n ℹ spaCy installation:\n C:\\Users\\shraddha.shetty\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\spacy\n \n NAME SPACY VERSION\n en_core_web_sm >=3.4.0,<3.5.0 3.4.1 ✔\n\n"
] |
[
45,
9,
8,
3,
2,
1,
0,
0
] |
[] |
[] |
[
"nlp",
"pip",
"python",
"spacy",
"version"
] |
stackoverflow_0047350942_nlp_pip_python_spacy_version.txt
|
Q:
Find an element's text not on screen during execution
I'm learning web scraping with Selenium and to practice I'm trying to get some promotions from this site:
Here is my code:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
def get_promotion():
'''
Web scraping process to get Smiles promotion
'''
promotions = []
chrome_options = Options()
chrome_options.add_argument("--disable-notifications")
driver = webdriver.Chrome(options=chrome_options)
driver.implicitly_wait(10)
driver.get('https://www.smiles.com.br/home')
site_promotion = driver.find_elements(By.CLASS_NAME, 'swiper-slide')
for promotion in site_promotion:
promotions.append(
{
'destination': promotion.find_element(By.XPATH, f'./a/div/div/h3').text,
'origin': promotion.find_element(By.XPATH, f'./a/div/div/h4/span[2]').text,
'diamont_value': promotion.find_element(By.XPATH, f'./a/div/div/div/span[2]/p[3]').text,
'normal_value': promotion.find_element(By.XPATH, f'./a/div/div/div/span[2]/p[2]').text,
}
)
return promotions
The problem is that the result didn't bring the information of the last 3 cards, as shown below:
[
{'destination': 'São Paulo (GRU)', 'origin': 'Maceió (MCZ)', 'diamont_value': '17.700', 'normal_value': '19.000'},
{'destination': 'Rio de Janeiro (GIG)', 'origin': 'Recife (REC)', 'diamont_value': '19.500', 'normal_value': '21.000'},
{'destination': 'Brasília (BSB)', 'origin': 'Recife (REC)', 'diamont_value': '17.700', 'normal_value': '19.000'},
{'destination': 'Porto Seguro (BPS)', 'origin': 'Belo Horizonte (CNF)', 'diamont_value': '13.300', 'normal_value': '14.500'},
{'destination': 'Goiânia (GYN)', 'origin': 'Palmas (PMW)', 'diamont_value': '11.500', 'normal_value': '12.500'},
{'destination': '', 'origin': '', 'diamont_value': '', 'normal_value': ''},
{'destination': '', 'origin': '', 'diamont_value': '', 'normal_value': ''},
{'destination': '', 'origin': '', 'diamont_value': '', 'normal_value': ''}
]
What caught my attention was the fact that when the browser opened with selenium it didn't show the last 3 cards:
The problem isn't the presence of the elements. Debugging I could see that the last 3 elements of site_promotion are there.
Is Selenium confused by the fact that the last 3 cards didn't appear on screen? If yes, how could I fix this?
Is there a way to grab this element's text even if they aren't appearing on the screen?
I tried to add options.add_argument("--start-maximized") but it just returned an empty promotions list.
A:
Instead of using .text you should use .get_attribute('textContent')
which would make
promotions.append(
{
'destination': promotion.find_element(By.XPATH, f'./a/div/div/h3').text,
'origin': promotion.find_element(By.XPATH, f'./a/div/div/h4/span[2]').text,
'diamont_value': promotion.find_element(By.XPATH, f'./a/div/div/div/span[2]/p[3]').text,
'normal_value': promotion.find_element(By.XPATH, f'./a/div/div/div/span[2]/p[2]').text,
}
)
into
promotions.append(
{
'destination': promotion.find_element(By.XPATH, f'./a/div/div/h3').get_attribute('textContent'),
'origin': promotion.find_element(By.XPATH, f'./a/div/div/h4/span[2]').get_attribute('textContent'),
'diamont_value': promotion.find_element(By.XPATH, f'./a/div/div/div/span[2]/p[3]').get_attribute('textContent'),
'normal_value': promotion.find_element(By.XPATH, f'./a/div/div/div/span[2]/p[2]').get_attribute('textContent'),
}
)
This Source states that :
Both the methods, getText() and getAttribute(), are used to retrieve data from an HTML element. The getText() method simply returns the visible text present between the start and end tags (which is not hidden by CSS). The getAttribute() method on the other hand identifies and fetches the key-value pairs of attributes within the HTML tags.
It seems that the values are not present as they are hidden off screen, the content may still be in the HTML but the .text may not correctly read it.
I had a similar issue with a site with visually hidden data even though the content was still in the HTML source.
|
Find an element's text not on screen during execution
|
I'm learning web scraping with Selenium and to practice I'm trying to get some promotions from this site:
Here is my code:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
def get_promotion():
'''
Web scraping process to get Smiles promotion
'''
promotions = []
chrome_options = Options()
chrome_options.add_argument("--disable-notifications")
driver = webdriver.Chrome(options=chrome_options)
driver.implicitly_wait(10)
driver.get('https://www.smiles.com.br/home')
site_promotion = driver.find_elements(By.CLASS_NAME, 'swiper-slide')
for promotion in site_promotion:
promotions.append(
{
'destination': promotion.find_element(By.XPATH, f'./a/div/div/h3').text,
'origin': promotion.find_element(By.XPATH, f'./a/div/div/h4/span[2]').text,
'diamont_value': promotion.find_element(By.XPATH, f'./a/div/div/div/span[2]/p[3]').text,
'normal_value': promotion.find_element(By.XPATH, f'./a/div/div/div/span[2]/p[2]').text,
}
)
return promotions
The problem is that the result didn't bring the information of the last 3 cards, as shown below:
[
{'destination': 'São Paulo (GRU)', 'origin': 'Maceió (MCZ)', 'diamont_value': '17.700', 'normal_value': '19.000'},
{'destination': 'Rio de Janeiro (GIG)', 'origin': 'Recife (REC)', 'diamont_value': '19.500', 'normal_value': '21.000'},
{'destination': 'Brasília (BSB)', 'origin': 'Recife (REC)', 'diamont_value': '17.700', 'normal_value': '19.000'},
{'destination': 'Porto Seguro (BPS)', 'origin': 'Belo Horizonte (CNF)', 'diamont_value': '13.300', 'normal_value': '14.500'},
{'destination': 'Goiânia (GYN)', 'origin': 'Palmas (PMW)', 'diamont_value': '11.500', 'normal_value': '12.500'},
{'destination': '', 'origin': '', 'diamont_value': '', 'normal_value': ''},
{'destination': '', 'origin': '', 'diamont_value': '', 'normal_value': ''},
{'destination': '', 'origin': '', 'diamont_value': '', 'normal_value': ''}
]
What caught my attention was the fact that when the browser opened with selenium it didn't show the last 3 cards:
The problem isn't the presence of the elements. Debugging I could see that the last 3 elements of site_promotion are there.
Is Selenium confused by the fact that the last 3 cards didn't appear on screen? If yes, how could I fix this?
Is there a way to grab this element's text even if they aren't appearing on the screen?
I tried to add options.add_argument("--start-maximized") but it just returned an empty promotions list.
|
[
"Instead of using .text you should use .get_attribute('textContent')\nwhich would make\npromotions.append(\n { \n 'destination': promotion.find_element(By.XPATH, f'./a/div/div/h3').text,\n 'origin': promotion.find_element(By.XPATH, f'./a/div/div/h4/span[2]').text,\n 'diamont_value': promotion.find_element(By.XPATH, f'./a/div/div/div/span[2]/p[3]').text,\n 'normal_value': promotion.find_element(By.XPATH, f'./a/div/div/div/span[2]/p[2]').text,\n }\n)\n\ninto\npromotions.append(\n { \n 'destination': promotion.find_element(By.XPATH, f'./a/div/div/h3').get_attribute('textContent'),\n 'origin': promotion.find_element(By.XPATH, f'./a/div/div/h4/span[2]').get_attribute('textContent'),\n 'diamont_value': promotion.find_element(By.XPATH, f'./a/div/div/div/span[2]/p[3]').get_attribute('textContent'),\n 'normal_value': promotion.find_element(By.XPATH, f'./a/div/div/div/span[2]/p[2]').get_attribute('textContent'),\n }\n)\n\nThis Source states that :\n\nBoth the methods, getText() and getAttribute(), are used to retrieve data from an HTML element. The getText() method simply returns the visible text present between the start and end tags (which is not hidden by CSS). The getAttribute() method on the other hand identifies and fetches the key-value pairs of attributes within the HTML tags.\n\nIt seems that the values are not present as they are hidden off screen, the content may still be in the HTML but the .text may not correctly read it.\nI had a similar issue with a site with visually hidden data even though the content was still in the HTML source.\n"
] |
[
0
] |
[] |
[] |
[
"python",
"selenium",
"selenium_chromedriver",
"selenium_webdriver",
"web_scraping"
] |
stackoverflow_0072842706_python_selenium_selenium_chromedriver_selenium_webdriver_web_scraping.txt
|
Q:
Nested for loop with 2 variables. Ouput to be appended in dataframe column
check_df has two column one with code and other is blank
in_df has 2 column one is merged column and other is V_ORG_UNIT_NAME_LEVEL14.
I want to check each code of "V_ORG_UNIT_CODE" from check_df inside "merged column from in_df.
If it matches(it may contain that value may not be exact match) i want corresponding "OutputDisplay" in check_df empty column "V_ORG_UNIT_CODE"
check_df
V_ORG_UNIT_CODE
V_ORG_UNIT_NAME_LEVEL14
abc
def
gth
in_df
OutputDisplay
MergedColumn
123
dasabcraf
456
asfgfdg
567
as0def!gfhg
Expected Output
check_df
V_ORG_UNIT_CODE
V_ORG_UNIT_NAME_LEVEL14
abc
123
def
567
gth
NA
for x in check_df["V_ORG_UNIT_CODE"]:
for y,z in zip(in_df["MergedColumn"],in_df["OutputDisplay"]):
if (y.__contains__(x)):
print(z)
check_df['V_ORG_UNIT_NAME_LEVEL14']=check_df['V_ORG_UNIT_NAME_LEVEL14'].append(z)
My print(z) is correct output but I am getting error when i am appending it in a dataframe column
TypeError Traceback (most recent call last)
<ipython-input-6-e4f45d7306ae> in <module>
3 for x in check_df["V_ORG_UNIT_CODE"]:
4 for y,z in zip(in_df["MergedColumn"],in_df["OutputDisplay"]):
----> 5 if (y.__contains__(x)):
6 # print(z)
7 # check_df['V_ORG_UNIT_NAME_LEVEL14']=check_df['V_ORG_UNIT_NAME_LEVEL14'].append(z)
TypeError: 'in <string>' requires string as left operand, not int
A:
check ,5 line "y value" type, that must be string type
A:
try the DataFrame class built-in function .insert
https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.insert.html
A:
If I've instantiated your dataframe correctly (check below), the following seems to deliver the outcome you're after:
import pandas as pd
check_df = pd.DataFrame()
in_df = pd.DataFrame()
First we create check_df:
check_df['V_ORG_UNIT_CODE'] = ['abc', 'def', 'gth']
check_df['V_ORG_UNIT_NAME_LEVEL14'] = [None, None, None]
check_df looks like this:
V_ORG_UNIT_CODE V_ORG_UNIT_NAME_LEVEL14
0 abc None
1 def None
2 gth None
Then we create in_df:
in_df['OutputDisplay'] = [123, 456, 567]
in_df['MergedColumn'] = ['dasabcraf', 'asfgfdg', 'as0def!gfhg']
in_df looks like this:
OutputDisplay MergedColumn
0 123 dasabcraf
1 456 asfgfdg
2 567 as0def!gfhg
I've then kept your code essentially unchanged, except I use enumerate to get both every item in the first column of check_df and also its index as i:
for i, x in enumerate(check_df["V_ORG_UNIT_CODE"]):
for y, z in zip(in_df["MergedColumn"], in_df["OutputDisplay"]):
if x in y:
check_df['V_ORG_UNIT_NAME_LEVEL14'][i]=z
print (check_df)
Which produces this result:
V_ORG_UNIT_CODE V_ORG_UNIT_NAME_LEVEL14
0 abc 123
1 def 567
2 gth None
Is that what you were after?
|
Nested for loop with 2 variables. Ouput to be appended in dataframe column
|
check_df has two column one with code and other is blank
in_df has 2 column one is merged column and other is V_ORG_UNIT_NAME_LEVEL14.
I want to check each code of "V_ORG_UNIT_CODE" from check_df inside "merged column from in_df.
If it matches(it may contain that value may not be exact match) i want corresponding "OutputDisplay" in check_df empty column "V_ORG_UNIT_CODE"
check_df
V_ORG_UNIT_CODE
V_ORG_UNIT_NAME_LEVEL14
abc
def
gth
in_df
OutputDisplay
MergedColumn
123
dasabcraf
456
asfgfdg
567
as0def!gfhg
Expected Output
check_df
V_ORG_UNIT_CODE
V_ORG_UNIT_NAME_LEVEL14
abc
123
def
567
gth
NA
for x in check_df["V_ORG_UNIT_CODE"]:
for y,z in zip(in_df["MergedColumn"],in_df["OutputDisplay"]):
if (y.__contains__(x)):
print(z)
check_df['V_ORG_UNIT_NAME_LEVEL14']=check_df['V_ORG_UNIT_NAME_LEVEL14'].append(z)
My print(z) is correct output but I am getting error when i am appending it in a dataframe column
TypeError Traceback (most recent call last)
<ipython-input-6-e4f45d7306ae> in <module>
3 for x in check_df["V_ORG_UNIT_CODE"]:
4 for y,z in zip(in_df["MergedColumn"],in_df["OutputDisplay"]):
----> 5 if (y.__contains__(x)):
6 # print(z)
7 # check_df['V_ORG_UNIT_NAME_LEVEL14']=check_df['V_ORG_UNIT_NAME_LEVEL14'].append(z)
TypeError: 'in <string>' requires string as left operand, not int
|
[
"check ,5 line \"y value\" type, that must be string type\n",
"try the DataFrame class built-in function .insert\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.insert.html\n",
"If I've instantiated your dataframe correctly (check below), the following seems to deliver the outcome you're after:\nimport pandas as pd\n\ncheck_df = pd.DataFrame()\nin_df = pd.DataFrame()\n\nFirst we create check_df:\ncheck_df['V_ORG_UNIT_CODE'] = ['abc', 'def', 'gth']\ncheck_df['V_ORG_UNIT_NAME_LEVEL14'] = [None, None, None]\n\ncheck_df looks like this:\n V_ORG_UNIT_CODE V_ORG_UNIT_NAME_LEVEL14\n0 abc None\n1 def None\n2 gth None\n\nThen we create in_df:\nin_df['OutputDisplay'] = [123, 456, 567]\nin_df['MergedColumn'] = ['dasabcraf', 'asfgfdg', 'as0def!gfhg']\n\nin_df looks like this:\n OutputDisplay MergedColumn\n0 123 dasabcraf\n1 456 asfgfdg\n2 567 as0def!gfhg\n\nI've then kept your code essentially unchanged, except I use enumerate to get both every item in the first column of check_df and also its index as i:\nfor i, x in enumerate(check_df[\"V_ORG_UNIT_CODE\"]): \n for y, z in zip(in_df[\"MergedColumn\"], in_df[\"OutputDisplay\"]):\n if x in y:\n check_df['V_ORG_UNIT_NAME_LEVEL14'][i]=z\n \nprint (check_df)\n\nWhich produces this result:\n V_ORG_UNIT_CODE V_ORG_UNIT_NAME_LEVEL14\n0 abc 123\n1 def 567\n2 gth None\n\nIs that what you were after?\n"
] |
[
0,
0,
0
] |
[] |
[] |
[
"pandas",
"python"
] |
stackoverflow_0074595796_pandas_python.txt
|
Q:
Display option value and text to select
I have a select field populated from the database table 'Grade'. It displays Grade objects instead of 'Grade 1', 'Grade 2', 'Grade 3' etc. How can I populate the select to display the texts.
My codes:
models.py
class Grade(models.Model):
grade_id = models.AutoField(primary_key=True)
grade_name = models.CharField(max_length=10, default="")
class Meta:
db_table = 'grade'
class Student(models.Model):
student_id = models.AutoField(primary_key=True)
first_name = models.CharField(max_length=50, default="")
last_name = models.CharField(max_length=50, default="")
grade = models.ForeignKey(Grade, on_delete=models.CASCADE)
class Meta:
db_table = 'Student'
forms.py
class CreateStudentForm(forms.ModelForm):
class Meta:
model = Student
fields = ['grade', 'first_name', 'last_name' ]
widgets = {
'grade': forms.Select(choices=Grade.objects.all(), attrs={'id':'selectGrade', 'class': 'form-control'}),
'first_name': forms.TextInput(attrs={'id':'txtFirstName', 'class': 'form-control', 'placeholder': 'First Name'}),
'last_name': forms.TextInput(attrs={'id':'txtLastName', 'class': 'form-control', 'placeholder': 'Last Name'}),
}
views.py
def student_action(request):
form = CreateStudentForm()
return render(request, 'student.html', {'form': form})
A:
You should define the __str__() method in the model so:
class Grade(models.Model):
grade_id = models.AutoField(primary_key=True)
grade_name = models.CharField(max_length=10, default="")
def __str__(self):
return f"{self.grade_name}"
class Meta:
db_table = 'grade'
How to display Select Grade instead of - - - - - -?
You can override the __init__() method of ModelForm so:
class CreateStudentForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['grade'].empty_label = 'Select Grade'
class Meta:
model = Student
fields = ['grade', 'first_name', 'last_name' ]
widgets = {
'grade': forms.Select(choices=Grade.objects.all(), attrs={'id':'selectGrade', 'class': 'form-control'}),
'first_name': forms.TextInput(attrs={'id':'txtFirstName', 'class': 'form-control', 'placeholder': 'First Name'}),
'last_name': forms.TextInput(attrs={'id':'txtLastName', 'class': 'form-control', 'placeholder': 'Last Name'}),
}
A:
You need to write str method for represent grade_name instead of Object name like this ...
models.py
class Grade(models.Model):
grade_id = models.AutoField(primary_key=True)
grade_name = models.CharField(max_length=10, default="")
class Meta:
db_table = 'grade'
def __str__(self):
return self.grade_name
|
Display option value and text to select
|
I have a select field populated from the database table 'Grade'. It displays Grade objects instead of 'Grade 1', 'Grade 2', 'Grade 3' etc. How can I populate the select to display the texts.
My codes:
models.py
class Grade(models.Model):
grade_id = models.AutoField(primary_key=True)
grade_name = models.CharField(max_length=10, default="")
class Meta:
db_table = 'grade'
class Student(models.Model):
student_id = models.AutoField(primary_key=True)
first_name = models.CharField(max_length=50, default="")
last_name = models.CharField(max_length=50, default="")
grade = models.ForeignKey(Grade, on_delete=models.CASCADE)
class Meta:
db_table = 'Student'
forms.py
class CreateStudentForm(forms.ModelForm):
class Meta:
model = Student
fields = ['grade', 'first_name', 'last_name' ]
widgets = {
'grade': forms.Select(choices=Grade.objects.all(), attrs={'id':'selectGrade', 'class': 'form-control'}),
'first_name': forms.TextInput(attrs={'id':'txtFirstName', 'class': 'form-control', 'placeholder': 'First Name'}),
'last_name': forms.TextInput(attrs={'id':'txtLastName', 'class': 'form-control', 'placeholder': 'Last Name'}),
}
views.py
def student_action(request):
form = CreateStudentForm()
return render(request, 'student.html', {'form': form})
|
[
"You should define the __str__() method in the model so:\nclass Grade(models.Model):\n grade_id = models.AutoField(primary_key=True)\n grade_name = models.CharField(max_length=10, default=\"\")\n\n def __str__(self):\n return f\"{self.grade_name}\"\n\n class Meta:\n db_table = 'grade'\n\n\nHow to display Select Grade instead of - - - - - -?\n\nYou can override the __init__() method of ModelForm so:\nclass CreateStudentForm(forms.ModelForm):\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['grade'].empty_label = 'Select Grade'\n \n class Meta:\n model = Student\n fields = ['grade', 'first_name', 'last_name' ]\n widgets = {\n 'grade': forms.Select(choices=Grade.objects.all(), attrs={'id':'selectGrade', 'class': 'form-control'}),\n 'first_name': forms.TextInput(attrs={'id':'txtFirstName', 'class': 'form-control', 'placeholder': 'First Name'}),\n 'last_name': forms.TextInput(attrs={'id':'txtLastName', 'class': 'form-control', 'placeholder': 'Last Name'}),\n } \n\n",
"You need to write str method for represent grade_name instead of Object name like this ...\nmodels.py\nclass Grade(models.Model):\n grade_id = models.AutoField(primary_key=True)\n grade_name = models.CharField(max_length=10, default=\"\")\n class Meta:\n db_table = 'grade'\n \n def __str__(self):\n return self.grade_name\n\n"
] |
[
3,
2
] |
[] |
[] |
[
"django",
"django_forms",
"django_queryset",
"django_views",
"python"
] |
stackoverflow_0074595889_django_django_forms_django_queryset_django_views_python.txt
|
Q:
spaCy 3.4 Sentence segmenter permutations performing poorly on phrases without punctuation
I am attempting to use two of the four alternatives from spaCy for sentence segmentation, and all of them seem to perform equally bad on phrases without punctuation. I am trying to utilize a solution such as these on spans of text that are blended and not diarized (speaker diarization). My goal is to identify sentence boundaries and I thought the linguistic parsing functionality might work well to split the phrases into individual sentence elements.
python version and spacy version with language models:
============================== Info about spaCy ==============================
spaCy version 3.4.3
Location /opt/homebrew/lib/python3.10/site-packages/spacy
Platform macOS-12.6-arm64-arm-64bit
Python version 3.10.8
Pipelines en_core_web_sm (3.4.1), en_core_web_trf (3.4.1)
I tried the following methods (after uninstalling and reinstalling both the spaCy updates and also the appropriate language model updates:
Dependency parser - According to the docs (https://spacy.io/usage/linguistic-features#sbd), this should work well if texts are closer to general purpose news or web text. Here is my example:
nlp = spacy.load("en_core_web_sm")
doc = nlp("perfect how are you doing i'm ok good to hear that can you explain me a little bit more about the situation that you send me by email")
for sent in doc.sents:
print(sent.text)
print(token.text for token in doc)
returns:
perfect how are you doing i'm ok good to hear that can you explain me a little bit more about the situation that you send me by email
In using the spaCy statistical segmenter, I have the same results. According to the docs, this statistical model ONLY provides the sentence boundaries (which is fine with me). See below:
nlp = spacy.load("en_core_web_sm", exclude=["parser"])
nlp.enable_pipe("senter")
doc = nlp("perfect how are you doing i'm ok good to hear that can you explain me a little bit more about the situation that you send me by email")
for sent in doc.sents:
print(sent.text)
returns the same result, essentially no sentence boundaries
The documentation states that these models require a trained pipeline to provide accurate predictions. I am using the https://spacy.io/models/en (https://spacy.io/models/en). Is there something that I might be missing or not using correctly? I would have expected that the syntax parsing (NP, VP, etc, etc) would assist in defining the boundaries) would have been able to at least identify one sentence boundary. But without punctuation, I am getting the same span of text that I use as input.
Also, I tried using a different language model (en_core_web_trf (3.4.1)) but I am having issues with the environment not recognizing the installation is correct). Separate issue.
A:
I am trying to utilize a solution such as these on spans of text that are blended and not diarized (speaker diarization).
The issue is simply that the spaCy models are not trained for that task and won't do well. They're trained mostly on text from books or articles that reliably has punctuation.
What you can do is train the SentenceRecognizer with your own data. It might be that the task is just hard and performance still won't be great, but it should do better than the default models.
A:
You could semiautomatically infer the sentence boundaries, then capitalize them. That wouldn't be too hard.
Almost all your sentences start with personal pronouns (I/You), or else the auxiliary verb Can preceding the personal pronoun.
Perfect How are you doing I'm ok good to hear that Can you explain me a little bit more about the situation that you send me by email
|
spaCy 3.4 Sentence segmenter permutations performing poorly on phrases without punctuation
|
I am attempting to use two of the four alternatives from spaCy for sentence segmentation, and all of them seem to perform equally bad on phrases without punctuation. I am trying to utilize a solution such as these on spans of text that are blended and not diarized (speaker diarization). My goal is to identify sentence boundaries and I thought the linguistic parsing functionality might work well to split the phrases into individual sentence elements.
python version and spacy version with language models:
============================== Info about spaCy ==============================
spaCy version 3.4.3
Location /opt/homebrew/lib/python3.10/site-packages/spacy
Platform macOS-12.6-arm64-arm-64bit
Python version 3.10.8
Pipelines en_core_web_sm (3.4.1), en_core_web_trf (3.4.1)
I tried the following methods (after uninstalling and reinstalling both the spaCy updates and also the appropriate language model updates:
Dependency parser - According to the docs (https://spacy.io/usage/linguistic-features#sbd), this should work well if texts are closer to general purpose news or web text. Here is my example:
nlp = spacy.load("en_core_web_sm")
doc = nlp("perfect how are you doing i'm ok good to hear that can you explain me a little bit more about the situation that you send me by email")
for sent in doc.sents:
print(sent.text)
print(token.text for token in doc)
returns:
perfect how are you doing i'm ok good to hear that can you explain me a little bit more about the situation that you send me by email
In using the spaCy statistical segmenter, I have the same results. According to the docs, this statistical model ONLY provides the sentence boundaries (which is fine with me). See below:
nlp = spacy.load("en_core_web_sm", exclude=["parser"])
nlp.enable_pipe("senter")
doc = nlp("perfect how are you doing i'm ok good to hear that can you explain me a little bit more about the situation that you send me by email")
for sent in doc.sents:
print(sent.text)
returns the same result, essentially no sentence boundaries
The documentation states that these models require a trained pipeline to provide accurate predictions. I am using the https://spacy.io/models/en (https://spacy.io/models/en). Is there something that I might be missing or not using correctly? I would have expected that the syntax parsing (NP, VP, etc, etc) would assist in defining the boundaries) would have been able to at least identify one sentence boundary. But without punctuation, I am getting the same span of text that I use as input.
Also, I tried using a different language model (en_core_web_trf (3.4.1)) but I am having issues with the environment not recognizing the installation is correct). Separate issue.
|
[
"\nI am trying to utilize a solution such as these on spans of text that are blended and not diarized (speaker diarization).\n\nThe issue is simply that the spaCy models are not trained for that task and won't do well. They're trained mostly on text from books or articles that reliably has punctuation.\nWhat you can do is train the SentenceRecognizer with your own data. It might be that the task is just hard and performance still won't be great, but it should do better than the default models.\n",
"You could semiautomatically infer the sentence boundaries, then capitalize them. That wouldn't be too hard.\nAlmost all your sentences start with personal pronouns (I/You), or else the auxiliary verb Can preceding the personal pronoun.\n\nPerfect How are you doing I'm ok good to hear that Can you explain me a little bit more about the situation that you send me by email\n\n"
] |
[
1,
0
] |
[] |
[] |
[
"nlp",
"python",
"spacy"
] |
stackoverflow_0074591575_nlp_python_spacy.txt
|
Q:
How to make a request to get a picture from an ipcam?
I have some troubles getting the picture on my ip camera on python. I have an axis camera, I almost do the work on the rtsp link and cv2 video capture but when the hours go by I got an h264 error (here I asked for that problem).
So I decided to use a get request to get the picture, but now I got 401, error. Here is my code:
import requests
from requests.auth import HTTPBasicAuth
r = requests.get("http://xxx.xxx.xxx.xxx/jpg/image.jpg", auth=HTTPBasicAuth('xxx', 'xxx'))
print(r.status_code)
I also tried with out the HTTPBasicAuth but the same, I don't know how to get a good auth here.
Any help?
A:
There is nothing wrong with your code. I have done the same code and works fine on my side. I would suggest you to verify the credentials that you have provided as a 401 response code is received when you provide wrong password or username.
Additionally, don't forget to pass the stream=True parameter inside the requests.get parameter otherwise the process will never successfully return anything even if the credentials actually work.
import requests
from requests.auth import HTTPBasicAuth
r = requests.get("http://xxx.xxx.xxx.xxx/jpg/image.jpg", auth=HTTPBasicAuth('xxx', 'xxx'), stream=True)
for streamDataChunks in r:
process_raw_image_data(streamDataChunks)
|
How to make a request to get a picture from an ipcam?
|
I have some troubles getting the picture on my ip camera on python. I have an axis camera, I almost do the work on the rtsp link and cv2 video capture but when the hours go by I got an h264 error (here I asked for that problem).
So I decided to use a get request to get the picture, but now I got 401, error. Here is my code:
import requests
from requests.auth import HTTPBasicAuth
r = requests.get("http://xxx.xxx.xxx.xxx/jpg/image.jpg", auth=HTTPBasicAuth('xxx', 'xxx'))
print(r.status_code)
I also tried with out the HTTPBasicAuth but the same, I don't know how to get a good auth here.
Any help?
|
[
"There is nothing wrong with your code. I have done the same code and works fine on my side. I would suggest you to verify the credentials that you have provided as a 401 response code is received when you provide wrong password or username.\nAdditionally, don't forget to pass the stream=True parameter inside the requests.get parameter otherwise the process will never successfully return anything even if the credentials actually work.\n import requests\n from requests.auth import HTTPBasicAuth\n r = requests.get(\"http://xxx.xxx.xxx.xxx/jpg/image.jpg\", auth=HTTPBasicAuth('xxx', 'xxx'), stream=True)\n \n for streamDataChunks in r:\n process_raw_image_data(streamDataChunks)\n\n"
] |
[
0
] |
[] |
[] |
[
"python",
"request"
] |
stackoverflow_0068714335_python_request.txt
|
Q:
Use Python to launch and track Chrome browser (on Windows), open new tabs, then close everything when done
I needed to launch Chrome programmatically, then open some more tabs, then close them all when I was done, even if an existing Chrome browser was already open. I could find partial answers, but nothing simple that worked with already running browsers.
I needed something following the KISS principle (Keep It Simple & Smart), simple code with a terminator!
A:
Here is a simple answer that will launch, track, and terminate a new Chrome browser instance, but with child tabs too.
It launches a new process for a Chrome instance, launches additional tabs into that new Chrome webbrowser instance, and finally using "terminate()" when finished to close the original browser launched by the subprocess() and its webbrowser child tabs. This works even when there is an existing Chrome browser process running.
The standard path (user below) for Chrome.exe on Windows 10 is (usually): "C:\Program Files\Google\Chrome\Application\chrome.exe"
The code should always open a new Chrome window, even if Chrome is already running. The package "subprocess" is mandatory instead of os.system, or else it will not launch a new chrome window.
Advantages of this programmatic approach:
(1) subprocess() has a process ID, useful to track and close the browser started in the subprocess.
(2) All child tabs started within the subprocess.Popen() will be closed when the parent subprocess is terminated.
N.B. If there is an pre-existing browser instance running, my_chrome_process.terminate() will NOT terminate it; it will terminate only the instance started by the subprocess.Popen() code below. This is the expected behavior.
import subprocess
url1 = r'https://www.python.org'
url2 = r'https://github.com/'
url3 = r'https://stackoverflow.com/questions/22445217/python-webbrowser-open-to-open-chrome-browser'
url4 = r'https://docs.python.org/3.3/library/webbrowser.html'
chrome_path = r'C:\Program Files\Google\Chrome\Application\chrome.exe'
my_chrome_process = subprocess.Popen(chrome_path, shell=False)
print(f'Process ID: {my_chrome_process.pid}') # Uncomment this line if you want to see PID in Console.
import webbrowser
webbrowser.register('chrome', None, webbrowser.BackgroundBrowser(chrome_path))
webbrowser.get('chrome').open_new_tab(url1)
webbrowser.get('chrome').open_new_tab(url2)
webbrowser.get('chrome').open_new_tab(url3)
webbrowser.get('chrome').open_new_tab(url4)
my_chrome_process.terminate()
If for any reason, my_chrome_process.terminate() does not work, then use the following os.system() code to kill the browser started using subprocess().
See popen.kill not closing browser window for more information.
import os
os.system("Taskkill /PID %d /F" % my_chrome_process.pid)
|
Use Python to launch and track Chrome browser (on Windows), open new tabs, then close everything when done
|
I needed to launch Chrome programmatically, then open some more tabs, then close them all when I was done, even if an existing Chrome browser was already open. I could find partial answers, but nothing simple that worked with already running browsers.
I needed something following the KISS principle (Keep It Simple & Smart), simple code with a terminator!
|
[
"Here is a simple answer that will launch, track, and terminate a new Chrome browser instance, but with child tabs too.\nIt launches a new process for a Chrome instance, launches additional tabs into that new Chrome webbrowser instance, and finally using \"terminate()\" when finished to close the original browser launched by the subprocess() and its webbrowser child tabs. This works even when there is an existing Chrome browser process running.\nThe standard path (user below) for Chrome.exe on Windows 10 is (usually): \"C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe\"\nThe code should always open a new Chrome window, even if Chrome is already running. The package \"subprocess\" is mandatory instead of os.system, or else it will not launch a new chrome window.\nAdvantages of this programmatic approach:\n(1) subprocess() has a process ID, useful to track and close the browser started in the subprocess.\n(2) All child tabs started within the subprocess.Popen() will be closed when the parent subprocess is terminated.\nN.B. If there is an pre-existing browser instance running, my_chrome_process.terminate() will NOT terminate it; it will terminate only the instance started by the subprocess.Popen() code below. This is the expected behavior.\nimport subprocess\nurl1 = r'https://www.python.org'\nurl2 = r'https://github.com/'\nurl3 = r'https://stackoverflow.com/questions/22445217/python-webbrowser-open-to-open-chrome-browser'\nurl4 = r'https://docs.python.org/3.3/library/webbrowser.html'\n\nchrome_path = r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe'\n\nmy_chrome_process = subprocess.Popen(chrome_path, shell=False)\nprint(f'Process ID: {my_chrome_process.pid}') # Uncomment this line if you want to see PID in Console.\n\nimport webbrowser\nwebbrowser.register('chrome', None, webbrowser.BackgroundBrowser(chrome_path))\nwebbrowser.get('chrome').open_new_tab(url1)\nwebbrowser.get('chrome').open_new_tab(url2)\nwebbrowser.get('chrome').open_new_tab(url3)\nwebbrowser.get('chrome').open_new_tab(url4)\n\nmy_chrome_process.terminate()\n\nIf for any reason, my_chrome_process.terminate() does not work, then use the following os.system() code to kill the browser started using subprocess().\nSee popen.kill not closing browser window for more information.\nimport os\nos.system(\"Taskkill /PID %d /F\" % my_chrome_process.pid)\n\n"
] |
[
0
] |
[] |
[] |
[
"google_chrome",
"python",
"python_webbrowser",
"subprocess"
] |
stackoverflow_0074596006_google_chrome_python_python_webbrowser_subprocess.txt
|
Q:
Sorting pandas groupby output
I have a dataframe that looks like
name performance year
bob 50 2002
bob 90 2005
bob 82 2010
joey 50 2015
joey 85 2013
joey 37 1990
sarah 90 1994
sarah 95 2020
sarah 35 2013
I would like groupby name and compute average performance while only displaying the top two results in descending order by performance.
I am currently doing df.groupby(['name']).mean() but this computes the averages of both performance as well as year while displaying all 3 names in alphabetical order (I would only like to display the top 2 in descending order by performance avg).
A:
here is my solution, basically was missing one field in the group by method.
Code:
import pandas as pd
# defining columns
cols = ['name', 'performance', 'year']
# defining data
data = [
['bob', 50, 2002]
, ['bob', 90, 2005]
, ['bob', 82, 2010]
, ['joey', 50, 2015]
, ['joey', 85, 2013]
, ['joey', 37, 1990]
, ['sarah', 90, 1994]
, ['sarah', 95, 2020]
, ['sarah', 35, 2013]
]
# create dataframe
df = pd.DataFrame(data, columns=cols)
# dataframe, grouped by name and year, aggregated by mean() of performance, first 2 values in descending order
df = df.groupby(['name', 'year'])['performance'].mean().sort_values(ascending=False).head(2)
# resetting index to display performance column name
df = df.reset_index()
# print dataframe
print(df)
Output:
name year performance
0 sarah 2020 95.0
1 bob 2005 90.0
|
Sorting pandas groupby output
|
I have a dataframe that looks like
name performance year
bob 50 2002
bob 90 2005
bob 82 2010
joey 50 2015
joey 85 2013
joey 37 1990
sarah 90 1994
sarah 95 2020
sarah 35 2013
I would like groupby name and compute average performance while only displaying the top two results in descending order by performance.
I am currently doing df.groupby(['name']).mean() but this computes the averages of both performance as well as year while displaying all 3 names in alphabetical order (I would only like to display the top 2 in descending order by performance avg).
|
[
"here is my solution, basically was missing one field in the group by method.\nCode:\nimport pandas as pd\n\n# defining columns\ncols = ['name', 'performance', 'year']\n\n# defining data\ndata = [\n ['bob', 50, 2002]\n, ['bob', 90, 2005]\n, ['bob', 82, 2010]\n, ['joey', 50, 2015]\n, ['joey', 85, 2013]\n, ['joey', 37, 1990]\n, ['sarah', 90, 1994]\n, ['sarah', 95, 2020]\n, ['sarah', 35, 2013]\n]\n\n# create dataframe\ndf = pd.DataFrame(data, columns=cols)\n\n# dataframe, grouped by name and year, aggregated by mean() of performance, first 2 values in descending order\ndf = df.groupby(['name', 'year'])['performance'].mean().sort_values(ascending=False).head(2)\n\n# resetting index to display performance column name\ndf = df.reset_index()\n\n# print dataframe\nprint(df)\n\nOutput:\n name year performance\n0 sarah 2020 95.0\n1 bob 2005 90.0\n\n"
] |
[
2
] |
[] |
[] |
[
"group_by",
"pandas",
"python",
"sorting"
] |
stackoverflow_0074595611_group_by_pandas_python_sorting.txt
|
Q:
Python: "zsh: segmentation fault" error causes
I was just playing around with a python library called 'Pyautogui.' Everything was going fun and cool until upon one run, I hit a zsh: segmentation fault. Pyautogui has stopped working on my local machine since. Any code using the Pyautogui library crashes with the same error.
Not a big practical issue as I was just experimenting personally, but I am curious as to the cause of the issue as to why code that ran fine a few times, all of a sudden pops what seems to be a memory related error and then crashes an entire library!
Code I ran about 5 times before it crashed is beyond simple, and no issues with Pip or anything. If any experts on Python compilation and/or C have any ideas, would greatly appreciate your input!
import pyautogui as auto
from time import sleep
def move_da_mouse():
while True:
# this just moves the mouse 100px, 200px on the x and y axis of the screen respectively
auto.moveTo(100, 200)
sleep(30)
if __name__ == "__main__":
move_da_mouse()
A:
This is happening because the python library you are using, 'Pyautogui', is attempting to access a memory beyond its reach.
|
Python: "zsh: segmentation fault" error causes
|
I was just playing around with a python library called 'Pyautogui.' Everything was going fun and cool until upon one run, I hit a zsh: segmentation fault. Pyautogui has stopped working on my local machine since. Any code using the Pyautogui library crashes with the same error.
Not a big practical issue as I was just experimenting personally, but I am curious as to the cause of the issue as to why code that ran fine a few times, all of a sudden pops what seems to be a memory related error and then crashes an entire library!
Code I ran about 5 times before it crashed is beyond simple, and no issues with Pip or anything. If any experts on Python compilation and/or C have any ideas, would greatly appreciate your input!
import pyautogui as auto
from time import sleep
def move_da_mouse():
while True:
# this just moves the mouse 100px, 200px on the x and y axis of the screen respectively
auto.moveTo(100, 200)
sleep(30)
if __name__ == "__main__":
move_da_mouse()
|
[
"This is happening because the python library you are using, 'Pyautogui', is attempting to access a memory beyond its reach.\n"
] |
[
0
] |
[] |
[] |
[
"python",
"python_3.x"
] |
stackoverflow_0074595898_python_python_3.x.txt
|
Q:
PyLTSpice "LTSpice_Batch" function not working
I'm using VSCode with Python 3.11.0 in a virtual enviroment. I have installed the aforementioned toolchain to configure and launch LTSpice models.
However, it seems not to work and I cannot find why (I'm quite new to python programming). I attach here the actual code and the traceback. Notice that the main code is taken from the example given in its webpage.
The code:
import os
from PyLTSpice.LTSpiceBatch import SimCommander
# get script absolute path
meAbsPath = os.path.dirname(os.path.realpath("Draft1.asc"))
meAbsPath = meAbsPath + '/Draft1.asc'
print(str(meAbsPath))
# select spice model
LTC = SimCommander(meAbsPath)
# set default arguments
# LTC.set_parameters(res=0, cap=100e-6)
LTC.set_component_value('R1', '2k')
LTC.set_component_value('L1', '1u')
# LTC.set_element_model('V3', "SINE(0 1 3k 0 0 0)")
# define simulation
LTC.add_instructions(
"; Simulation settings",
".param run = 0"
)
LTC.reset_netlist()
LTC.add_instructions(
"; Simulation settings",
".tran 0 10u 0 1n",
)
LTC.run()
LTC.wait_completion()
# Sim Statistics
print('Successful/Total Simulations: ' + str(LTC.okSim) + '/' + str(LTC.runno))
The traceback log:
(.venv) PS C:\Users\ciko9\Documents\VSCode_Projects\.venv> c:; cd 'c:\Users\ciko9\Documents\VSCode_Projects\.venv'; & 'c:\Users\ciko9\Documents\VSCode_Projects\.venv\Scripts\python.exe' 'c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\adapter/../..\debugpy\launcher' '51647' '--' 'c:\Users\ciko9\Documents\VSCode_Projects\.venv\lts_run.py'
C:\Users\ciko9\Documents\VSCode_Projects\.venv/Draft1.asc
Creating Netlist
Traceback (most recent call last):
File "C:\Users\ciko9\AppData\Local\Programs\Python\Python311\Lib\runpy.py", line 198, in _run_module_as_main
return _run_code(code, main_globals, None,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\ciko9\AppData\Local\Programs\Python\Python311\Lib\runpy.py", line 88, in _run_code
exec(code, run_globals)
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\adapter/../..\debugpy\launcher/../..\debugpy\__main__.py", line 39, in <module>
cli.main()
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\adapter/../..\debugpy\launcher/../..\debugpy/..\debugpy\server\cli.py", line 430, in main
run()
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\adapter/../..\debugpy\launcher/../..\debugpy/..\debugpy\server\cli.py", line 284, in run_file
runpy.run_path(target, run_name="__main__")
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_runpy.py", line 321, in run_path
return _run_module_code(code, init_globals, run_name,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_runpy.py", line 135, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_runpy.py", line 124, in _run_code
exec(code, run_globals)
File "c:\Users\ciko9\Documents\VSCode_Projects\.venv\lts_run.py", line 11, in <module>
LTC = SimCommander(meAbsPath)
^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Users\ciko9\Documents\VSCode_Projects\.venv\Lib\site-packages\PyLTSpice\LTSpiceBatch.py", line 288, in __init__
retcode = run_function(cmd_netlist)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Users\ciko9\Documents\VSCode_Projects\.venv\Lib\site-packages\PyLTSpice\LTSpiceBatch.py", line 136, in run_function
result = subprocess.run(command, timeout=timeout)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\ciko9\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 546, in run
with Popen(*popenargs, **kwargs) as process:
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\ciko9\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 1022, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "C:\Users\ciko9\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 1491, in _execute_child
hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\_vendored\pydevd\_pydev_bundle\pydev_monkey.py", line 853, in new_CreateProcess
return getattr(_subprocess, original_name)(app_name, cmd_line, *args)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
FileNotFoundError: [WinError 2] El sistema no puede encontrar el archivo especificado
I guess that it actually finds the ".asc" LTSpice model file, but at some point it misses something but I could not figure out what is it.
Thanks in advance!
A:
The cause is now understood.
pyLTSpiceBatch.py is referring the XVIIx64.exe path directly.
else: # Windows
LTspice_exe = [r"C:\Program Files\LTC\LTspiceXVII\XVIIx64.exe"]
LTspice_arg = {'netlist': ['-netlist'], 'run': ['-b', '-Run']}
PROCNAME = "XVIIx64.exe"
If you installed LTSpice to except above path, SimCommander is not work.
Then you need action either one,
Replace the installed LTSpice to default path C:\Program Files\LTC\LTspiceXVII.
Rewrite LTSpice path in pyLTSpiceBatch.py
|
PyLTSpice "LTSpice_Batch" function not working
|
I'm using VSCode with Python 3.11.0 in a virtual enviroment. I have installed the aforementioned toolchain to configure and launch LTSpice models.
However, it seems not to work and I cannot find why (I'm quite new to python programming). I attach here the actual code and the traceback. Notice that the main code is taken from the example given in its webpage.
The code:
import os
from PyLTSpice.LTSpiceBatch import SimCommander
# get script absolute path
meAbsPath = os.path.dirname(os.path.realpath("Draft1.asc"))
meAbsPath = meAbsPath + '/Draft1.asc'
print(str(meAbsPath))
# select spice model
LTC = SimCommander(meAbsPath)
# set default arguments
# LTC.set_parameters(res=0, cap=100e-6)
LTC.set_component_value('R1', '2k')
LTC.set_component_value('L1', '1u')
# LTC.set_element_model('V3', "SINE(0 1 3k 0 0 0)")
# define simulation
LTC.add_instructions(
"; Simulation settings",
".param run = 0"
)
LTC.reset_netlist()
LTC.add_instructions(
"; Simulation settings",
".tran 0 10u 0 1n",
)
LTC.run()
LTC.wait_completion()
# Sim Statistics
print('Successful/Total Simulations: ' + str(LTC.okSim) + '/' + str(LTC.runno))
The traceback log:
(.venv) PS C:\Users\ciko9\Documents\VSCode_Projects\.venv> c:; cd 'c:\Users\ciko9\Documents\VSCode_Projects\.venv'; & 'c:\Users\ciko9\Documents\VSCode_Projects\.venv\Scripts\python.exe' 'c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\adapter/../..\debugpy\launcher' '51647' '--' 'c:\Users\ciko9\Documents\VSCode_Projects\.venv\lts_run.py'
C:\Users\ciko9\Documents\VSCode_Projects\.venv/Draft1.asc
Creating Netlist
Traceback (most recent call last):
File "C:\Users\ciko9\AppData\Local\Programs\Python\Python311\Lib\runpy.py", line 198, in _run_module_as_main
return _run_code(code, main_globals, None,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\ciko9\AppData\Local\Programs\Python\Python311\Lib\runpy.py", line 88, in _run_code
exec(code, run_globals)
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\adapter/../..\debugpy\launcher/../..\debugpy\__main__.py", line 39, in <module>
cli.main()
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\adapter/../..\debugpy\launcher/../..\debugpy/..\debugpy\server\cli.py", line 430, in main
run()
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\adapter/../..\debugpy\launcher/../..\debugpy/..\debugpy\server\cli.py", line 284, in run_file
runpy.run_path(target, run_name="__main__")
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_runpy.py", line 321, in run_path
return _run_module_code(code, init_globals, run_name,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_runpy.py", line 135, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\_vendored\pydevd\_pydevd_bundle\pydevd_runpy.py", line 124, in _run_code
exec(code, run_globals)
File "c:\Users\ciko9\Documents\VSCode_Projects\.venv\lts_run.py", line 11, in <module>
LTC = SimCommander(meAbsPath)
^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Users\ciko9\Documents\VSCode_Projects\.venv\Lib\site-packages\PyLTSpice\LTSpiceBatch.py", line 288, in __init__
retcode = run_function(cmd_netlist)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Users\ciko9\Documents\VSCode_Projects\.venv\Lib\site-packages\PyLTSpice\LTSpiceBatch.py", line 136, in run_function
result = subprocess.run(command, timeout=timeout)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\ciko9\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 546, in run
with Popen(*popenargs, **kwargs) as process:
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\ciko9\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 1022, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "C:\Users\ciko9\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 1491, in _execute_child
hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Users\ciko9\.vscode\extensions\ms-python.python-2022.18.2\pythonFiles\lib\python\debugpy\_vendored\pydevd\_pydev_bundle\pydev_monkey.py", line 853, in new_CreateProcess
return getattr(_subprocess, original_name)(app_name, cmd_line, *args)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
FileNotFoundError: [WinError 2] El sistema no puede encontrar el archivo especificado
I guess that it actually finds the ".asc" LTSpice model file, but at some point it misses something but I could not figure out what is it.
Thanks in advance!
|
[
"The cause is now understood.\npyLTSpiceBatch.py is referring the XVIIx64.exe path directly.\nelse: # Windows\n LTspice_exe = [r\"C:\\Program Files\\LTC\\LTspiceXVII\\XVIIx64.exe\"]\n LTspice_arg = {'netlist': ['-netlist'], 'run': ['-b', '-Run']}\n PROCNAME = \"XVIIx64.exe\"\n\nIf you installed LTSpice to except above path, SimCommander is not work.\nThen you need action either one,\n\nReplace the installed LTSpice to default path C:\\Program Files\\LTC\\LTspiceXVII.\n\nRewrite LTSpice path in pyLTSpiceBatch.py\n\n\n"
] |
[
0
] |
[
"I faced this problem too yesterday.\nBy any chance, were you trying it on windows 11?\nIf that, it is same as my situation. Probably it will work on windows 10.\nI do not know why though, it seems like that subprocess.py or command prompt is not work normally on windows 11.\nHoping that the information will be of some help to you.\n"
] |
[
-1
] |
[
"python"
] |
stackoverflow_0074425104_python.txt
|
Q:
ValueError: [E1041] Expected a string, Doc, or bytes as input, but got:
import pandas
df['findings'] = df['findings'].astype(str)
#df['findings'] = df['findings'].astype('string')
df["new_column"] = GPT2_model(df['findings'], min_length=60)
After running this I get the following error, even after converting my dataframe to string.
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-37-1225bf7a7a14> in <module>
----> 1 df["new_column"] = GPT2_model(df['findings'], min_length=60)
5 frames
/usr/local/lib/python3.7/dist-packages/spacy/language.py in _ensure_doc(self, doc_like)
1106 if isinstance(doc_like, bytes):
1107 return Doc(self.vocab).from_bytes(doc_like)
-> 1108 raise ValueError(Errors.E1041.format(type=type(doc_like)))
1109
1110 def _ensure_doc_with_context(
ValueError: [E1041] Expected a string, Doc, or bytes as input, but got: <class 'pandas.core.series.Series'>
A:
Your method/model GPT2_model doesn't take a Pandas Series object. That's what the error is complaining about. You can instead apply the method to your findings column.
df['new_column'] = df['findings'].apply(GPT2_model, min_length=60)
|
ValueError: [E1041] Expected a string, Doc, or bytes as input, but got:
|
import pandas
df['findings'] = df['findings'].astype(str)
#df['findings'] = df['findings'].astype('string')
df["new_column"] = GPT2_model(df['findings'], min_length=60)
After running this I get the following error, even after converting my dataframe to string.
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-37-1225bf7a7a14> in <module>
----> 1 df["new_column"] = GPT2_model(df['findings'], min_length=60)
5 frames
/usr/local/lib/python3.7/dist-packages/spacy/language.py in _ensure_doc(self, doc_like)
1106 if isinstance(doc_like, bytes):
1107 return Doc(self.vocab).from_bytes(doc_like)
-> 1108 raise ValueError(Errors.E1041.format(type=type(doc_like)))
1109
1110 def _ensure_doc_with_context(
ValueError: [E1041] Expected a string, Doc, or bytes as input, but got: <class 'pandas.core.series.Series'>
|
[
"Your method/model GPT2_model doesn't take a Pandas Series object. That's what the error is complaining about. You can instead apply the method to your findings column.\ndf['new_column'] = df['findings'].apply(GPT2_model, min_length=60)\n\n"
] |
[
1
] |
[] |
[] |
[
"pandas",
"python"
] |
stackoverflow_0074596073_pandas_python.txt
|
Q:
pyspark: OverflowError: mktime argument out of range
I am processing parquet files in pyspark. My version info is:
My data contains date and timestamp fields with values less than '1970-01-01'. I am getting the following error running locally on Mac OS Monterey v12.6.1.
22/11/27 20:22:46 ERROR Utils: Aborting task
org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/worker.py", line 686, in main
process()
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/worker.py", line 678, in process
serializer.dump_stream(out_iter, outfile)
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/serializers.py", line 273, in dump_stream
vs = list(itertools.islice(iterator, batch))
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/util.py", line 81, in wrapper
return f(*args, **kwargs)
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/sql/types.py", line 788, in toInternal
return tuple(
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/sql/types.py", line 789, in <genexpr>
f.toInternal(v) if c else v
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/sql/types.py", line 591, in toInternal
return self.dataType.toInternal(obj)
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/sql/types.py", line 216, in toInternal
calendar.timegm(dt.utctimetuple()) if dt.tzinfo else time.mktime(dt.timetuple())
OverflowError: mktime argument out of range
How can I resolve this error?
Adding more context:
I am reading data from a file, processing it and writing it to Postgres table. Reading the parquet file, writing it to the table works fine. I also tried writing the dataframe as a parquet file as it. That also worked.
I was able to isolate the issue to the part where I capture records that failed table insert and write them to a reject file. This is done by calling a mapPartition on the dataframe from the parquet file.
The core logic of the map partitions function is:
for row in rows:
try:
processed_row_count.add(1) # accumulator
cur.execute(insert_statement, row)
accepted_row_count.add(1) # accumulator
except Exception as e:
# Collect the records with error message here and write to reject file
rejected_row_count.add(1) # accumulator
row = row.asDict()
row["ErrorMessage"] = f"Error received from psycopg2 module is: {str(e)}"
yield Row(**row)
conn.close()
The mapPartitions is called as:
rejected_on_load_df = enriched_df.rdd.mapPartitions(process_rows).toDF(
enriched_df.schema.add("ErrorMessage", StringType(), nullable=False)
)
I get the error on writing the rejected_on_load_df
A:
I was curious and I did a little research.
The method mktime(), according to python's documentation, is platform dependent: more info here https://docs.python.org/3.8/library/time.html#time.mktime.
Then I had a look at Mac OS and, as it is based on an UNIX system, it runs with epoch Unix time.
From wiki: https://en.wikipedia.org/wiki/Unix_time
Unix time[a] is a date and time representation widely used in computing. It measures time by the number of seconds that have elapsed since 00:00:00 UTC on 1 January 1970, the beginning of the Unix epoch.[3]
Unix time originated as the system time of Unix operating systems. It
has come to be widely used in other computer operating systems, file
systems, programming languages, and databases.[5][6][7]
That's my best guess on why this is happening.
So my advice would be to find a way to represent dates in another format, which is not epoch.
|
pyspark: OverflowError: mktime argument out of range
|
I am processing parquet files in pyspark. My version info is:
My data contains date and timestamp fields with values less than '1970-01-01'. I am getting the following error running locally on Mac OS Monterey v12.6.1.
22/11/27 20:22:46 ERROR Utils: Aborting task
org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/worker.py", line 686, in main
process()
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/worker.py", line 678, in process
serializer.dump_stream(out_iter, outfile)
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/serializers.py", line 273, in dump_stream
vs = list(itertools.islice(iterator, batch))
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/util.py", line 81, in wrapper
return f(*args, **kwargs)
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/sql/types.py", line 788, in toInternal
return tuple(
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/sql/types.py", line 789, in <genexpr>
f.toInternal(v) if c else v
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/sql/types.py", line 591, in toInternal
return self.dataType.toInternal(obj)
File "/Users/pm/opt/spark-3.3.0-bin-hadoop3/python/lib/pyspark.zip/pyspark/sql/types.py", line 216, in toInternal
calendar.timegm(dt.utctimetuple()) if dt.tzinfo else time.mktime(dt.timetuple())
OverflowError: mktime argument out of range
How can I resolve this error?
Adding more context:
I am reading data from a file, processing it and writing it to Postgres table. Reading the parquet file, writing it to the table works fine. I also tried writing the dataframe as a parquet file as it. That also worked.
I was able to isolate the issue to the part where I capture records that failed table insert and write them to a reject file. This is done by calling a mapPartition on the dataframe from the parquet file.
The core logic of the map partitions function is:
for row in rows:
try:
processed_row_count.add(1) # accumulator
cur.execute(insert_statement, row)
accepted_row_count.add(1) # accumulator
except Exception as e:
# Collect the records with error message here and write to reject file
rejected_row_count.add(1) # accumulator
row = row.asDict()
row["ErrorMessage"] = f"Error received from psycopg2 module is: {str(e)}"
yield Row(**row)
conn.close()
The mapPartitions is called as:
rejected_on_load_df = enriched_df.rdd.mapPartitions(process_rows).toDF(
enriched_df.schema.add("ErrorMessage", StringType(), nullable=False)
)
I get the error on writing the rejected_on_load_df
|
[
"I was curious and I did a little research.\nThe method mktime(), according to python's documentation, is platform dependent: more info here https://docs.python.org/3.8/library/time.html#time.mktime.\nThen I had a look at Mac OS and, as it is based on an UNIX system, it runs with epoch Unix time.\nFrom wiki: https://en.wikipedia.org/wiki/Unix_time\n\nUnix time[a] is a date and time representation widely used in computing. It measures time by the number of seconds that have elapsed since 00:00:00 UTC on 1 January 1970, the beginning of the Unix epoch.[3]\nUnix time originated as the system time of Unix operating systems. It\nhas come to be widely used in other computer operating systems, file\nsystems, programming languages, and databases.[5][6][7]\n\nThat's my best guess on why this is happening.\nSo my advice would be to find a way to represent dates in another format, which is not epoch.\n"
] |
[
0
] |
[] |
[] |
[
"datetime",
"pyspark",
"python"
] |
stackoverflow_0074595439_datetime_pyspark_python.txt
|
Q:
Entry field doesn't exist, it is made inside of a function
so im trying to make a user login system, when a user clicks sign up and enters their password and username, it appends both of those to a file, however I made the entry field in the function that is called when you click sign in, Now i have an error when trying to get the data from that entry
usr = (Usrfield).get()
NameError: name 'Usrfield' is not defined
this is my code:
from tkinter import *
from tkinter import messagebox
Homescreen = Tk()
def addattrs():
Signup = Tk()
Usrfield = Entry(Signup, width=50)
#Usrfield = signupinput.get(1.0, "end-1c") c
Usrfield.insert(0, "What is ur usr")
Usrfield.pack()
Pswrdfield = Entry(Signup, width=50)
Pswrdfield.insert(0, "Password?")
Pswrdfield.pack()
Homescreen.withdraw()
loginbtn = Button(Signup, text="sign up", command=createusr)
loginbtn.pack()
def login():
Login= Toplevel(Homescreen)
Login.geometry("750x250")
Login.title("New Window")
Label(Login, text="Youve logged in?", font=('Helvetica 17 bold')).pack(pady=30)
Homescreen.withdraw()
def signup():
#create window
Homescreen.withdraw()
addattrs()
def createusr():
usr = (Usrfield).get()
password = Pswrdfield.get()
with open("Usernames.txt", "a+") as usrs:
usrs.write(usr)
usrs.write("\n")
with open("Passwords.txt", "a+") as pswrds:
pswrds.write(password)
pswrds.write("\n")
#Create a label
Label(Homescreen, text= "Click the below button to Open a New Window", font= ('Helvetica 17 bold')).pack(pady=30)
#Create a button to open a New Window
to_login = Button(Homescreen, text="Login", command=login).pack()
to_signup = Button(Homescreen, text="Sign up", command=signup).pack()
Homescreen.mainloop()
I tried defining the new screen after the original one closed but the program stopped running before the code could execute
A:
just make your widgets global in addattr() function. use the code below:
from tkinter import *
from tkinter import messagebox
Homescreen = Tk()
def addattrs():
global Usrfield,Pswrdfield
Signup = Tk()
Usrfield = Entry(Signup, width=50)
#Usrfield = signupinput.get(1.0, "end-1c") c
Usrfield.insert(0, "What is ur usr")
Usrfield.pack()
Pswrdfield = Entry(Signup, width=50)
Pswrdfield.insert(0, "Password?")
Pswrdfield.pack()
Homescreen.withdraw()
loginbtn = Button(Signup, text="sign up", command=createusr)
loginbtn.pack()
def login():
Login= Toplevel(Homescreen)
Login.geometry("750x250")
Login.title("New Window")
Label(Login, text="Youve logged in?", font=('Helvetica 17 bold')).pack(pady=30)
Homescreen.withdraw()
def signup():
#create window
Homescreen.withdraw()
addattrs()
def createusr():
usr = (Usrfield).get()
password = Pswrdfield.get()
with open("Usernames.txt", "a+") as usrs:
usrs.write(usr)
usrs.write("\n")
with open("Passwords.txt", "a+") as pswrds:
pswrds.write(password)
pswrds.write("\n")
#Create a label
Label(Homescreen, text= "Click the below button to Open a New Window", font= ('Helvetica 17 bold')).pack(pady=30)
#Create a button to open a New Window
to_login = Button(Homescreen, text="Login", command=login).pack()
to_signup = Button(Homescreen, text="Sign up", command=signup).pack()
Homescreen.mainloop()
have fun :)
|
Entry field doesn't exist, it is made inside of a function
|
so im trying to make a user login system, when a user clicks sign up and enters their password and username, it appends both of those to a file, however I made the entry field in the function that is called when you click sign in, Now i have an error when trying to get the data from that entry
usr = (Usrfield).get()
NameError: name 'Usrfield' is not defined
this is my code:
from tkinter import *
from tkinter import messagebox
Homescreen = Tk()
def addattrs():
Signup = Tk()
Usrfield = Entry(Signup, width=50)
#Usrfield = signupinput.get(1.0, "end-1c") c
Usrfield.insert(0, "What is ur usr")
Usrfield.pack()
Pswrdfield = Entry(Signup, width=50)
Pswrdfield.insert(0, "Password?")
Pswrdfield.pack()
Homescreen.withdraw()
loginbtn = Button(Signup, text="sign up", command=createusr)
loginbtn.pack()
def login():
Login= Toplevel(Homescreen)
Login.geometry("750x250")
Login.title("New Window")
Label(Login, text="Youve logged in?", font=('Helvetica 17 bold')).pack(pady=30)
Homescreen.withdraw()
def signup():
#create window
Homescreen.withdraw()
addattrs()
def createusr():
usr = (Usrfield).get()
password = Pswrdfield.get()
with open("Usernames.txt", "a+") as usrs:
usrs.write(usr)
usrs.write("\n")
with open("Passwords.txt", "a+") as pswrds:
pswrds.write(password)
pswrds.write("\n")
#Create a label
Label(Homescreen, text= "Click the below button to Open a New Window", font= ('Helvetica 17 bold')).pack(pady=30)
#Create a button to open a New Window
to_login = Button(Homescreen, text="Login", command=login).pack()
to_signup = Button(Homescreen, text="Sign up", command=signup).pack()
Homescreen.mainloop()
I tried defining the new screen after the original one closed but the program stopped running before the code could execute
|
[
"just make your widgets global in addattr() function. use the code below:\nfrom tkinter import *\nfrom tkinter import messagebox\nHomescreen = Tk()\n\ndef addattrs():\n global Usrfield,Pswrdfield\n Signup = Tk()\n Usrfield = Entry(Signup, width=50)\n #Usrfield = signupinput.get(1.0, \"end-1c\") c\n Usrfield.insert(0, \"What is ur usr\")\n Usrfield.pack()\n Pswrdfield = Entry(Signup, width=50)\n Pswrdfield.insert(0, \"Password?\")\n Pswrdfield.pack()\n Homescreen.withdraw()\n loginbtn = Button(Signup, text=\"sign up\", command=createusr)\n loginbtn.pack()\n\ndef login():\n Login= Toplevel(Homescreen)\n Login.geometry(\"750x250\")\n Login.title(\"New Window\")\n Label(Login, text=\"Youve logged in?\", font=('Helvetica 17 bold')).pack(pady=30)\n Homescreen.withdraw()\n\ndef signup():\n #create window\n\n Homescreen.withdraw()\n addattrs()\ndef createusr():\n usr = (Usrfield).get()\n password = Pswrdfield.get()\n\n with open(\"Usernames.txt\", \"a+\") as usrs:\n usrs.write(usr)\n usrs.write(\"\\n\")\n with open(\"Passwords.txt\", \"a+\") as pswrds:\n pswrds.write(password)\n pswrds.write(\"\\n\") \n\n\n#Create a label\nLabel(Homescreen, text= \"Click the below button to Open a New Window\", font= ('Helvetica 17 bold')).pack(pady=30)\n#Create a button to open a New Window\nto_login = Button(Homescreen, text=\"Login\", command=login).pack()\nto_signup = Button(Homescreen, text=\"Sign up\", command=signup).pack()\n\n\nHomescreen.mainloop()\n\nhave fun :)\n"
] |
[
0
] |
[] |
[] |
[
"python",
"tkinter"
] |
stackoverflow_0074595794_python_tkinter.txt
|
Q:
ModuleNotFoundError: No module named ‘webdriver_manager.Edge
I’m getting the error:
ModuleNotFoundError: No module named ‘webdriver_manager.Edge’. My Code is:
from selenium import webdriver
from selenium.webdriver.edge.options import Options
from selenium.webdriver.edge.service import Service
from webdriver_manager.Edge import ChromeDriverManager
def Mok():
chrome_options = Options()
chrome_options.add_argument("--headless")
driver=webdriver.Edge(options=chrome_options, service=Service(EdgeDriverManager().install()))
start_url='netlify.com'
driver.get(start_url)
print(driver.page_source.encode("utf-8"))
driver.get_screenshot_as_png('reddit.png')
print(driver.title)
driver.close()
Mok()
A:
import
from webdriver_manager.microsoft import EdgeChromiumDriverManager
[...]
driver = webdriver.Edge(EdgeChromiumDriverManager().install())
A:
You basically have a typo in your code:
See https://github.com/SergeyPirogov/webdriver_manager#use-with-edge
If you are using webdriver_manager as your package.
Use with Edge:
# selenium 3
from selenium import webdriver
from webdriver_manager.microsoft import EdgeChromiumDriverManager
driver = webdriver.Edge(EdgeChromiumDriverManager().install())
# selenium 4
from selenium import webdriver
from selenium.webdriver.edge.service import Service as EdgeService
from webdriver_manager.microsoft import EdgeChromiumDriverManager
driver = webdriver.Edge(service=EdgeService(EdgeChromiumDriverManager().install()))
A:
In python, we need to install or we need to have access the module before importing it if the module is user-defined.
The error "ModuleNotFoundError: No module named 'webdriver_manager'" also implies that we didn't install the webdriver-manager module or we are importing it or installing it in an incorrect environment. To fix this error, install the module in the appropriate working directory or in a virtual environment by running the command.
pip install webdriver-manager
|
ModuleNotFoundError: No module named ‘webdriver_manager.Edge
|
I’m getting the error:
ModuleNotFoundError: No module named ‘webdriver_manager.Edge’. My Code is:
from selenium import webdriver
from selenium.webdriver.edge.options import Options
from selenium.webdriver.edge.service import Service
from webdriver_manager.Edge import ChromeDriverManager
def Mok():
chrome_options = Options()
chrome_options.add_argument("--headless")
driver=webdriver.Edge(options=chrome_options, service=Service(EdgeDriverManager().install()))
start_url='netlify.com'
driver.get(start_url)
print(driver.page_source.encode("utf-8"))
driver.get_screenshot_as_png('reddit.png')
print(driver.title)
driver.close()
Mok()
|
[
"import \nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager\n\n[...]\n\ndriver = webdriver.Edge(EdgeChromiumDriverManager().install())\n\n\n",
"You basically have a typo in your code:\nSee https://github.com/SergeyPirogov/webdriver_manager#use-with-edge\nIf you are using webdriver_manager as your package.\nUse with Edge:\n# selenium 3\nfrom selenium import webdriver\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager\n\ndriver = webdriver.Edge(EdgeChromiumDriverManager().install())\n\n# selenium 4\nfrom selenium import webdriver\nfrom selenium.webdriver.edge.service import Service as EdgeService\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager\n\ndriver = webdriver.Edge(service=EdgeService(EdgeChromiumDriverManager().install()))\n\n",
"In python, we need to install or we need to have access the module before importing it if the module is user-defined.\nThe error \"ModuleNotFoundError: No module named 'webdriver_manager'\" also implies that we didn't install the webdriver-manager module or we are importing it or installing it in an incorrect environment. To fix this error, install the module in the appropriate working directory or in a virtual environment by running the command.\npip install webdriver-manager\n"
] |
[
0,
0,
0
] |
[] |
[] |
[
"python",
"selenium"
] |
stackoverflow_0074596027_python_selenium.txt
|
Q:
Python Web browser click listener
Is there any packages or ways to detect what is being clicking in a web browser? I mean get tag/xpath from the web browser (of what is being clicked)? To afterwards find it via selenium or similar?
Or even with to determine what it is with the coordinates of the mouse click.
Like the codegen in Playwright or similar, like a form of listening.
Hope this makes sense.
A:
We could add window listener with JavaScript using driver.execute_script to listen to any clicks, and then call function xpath as provided in SO answer to generate Xpath of an element. As a gist, below is the window.addEventListener script which handles any click event by displaying an alert with the clicked element text (if present) and its Xpath:
window.addEventListener('click', function(event) {alert(event.target.text+'=>'+xpath(event.target));})
And here is the relevant code to launch the browser, execute the script and sleep for 20 seconds to allow interaction on the browser:
def launch_url(url):
driver = webdriver.Chrome('./chromedriver')
driver.get(url)
driver.execute_script('''
function xpath(el) {
if (typeof el == 'string') return document.evaluate(el, document, null, 0, null);
if (!el || el.nodeType != 1) return '';
if (el.id) return '//*[@id="' + el.id + '"';
var sames = [].filter.call(el.parentNode.children, function (x) { return x.tagName == el.tagName });
return xpath(el.parentNode) + '/' + el.tagName.toLowerCase() + (sames.length > 1 ? '['+([].indexOf.call(sames, el)+1)+']' : '')
}
window.addEventListener('click', function(event) {alert(event.target.id+'=>'+xpath(event.target));})
''')
time.sleep(20)
As a test, launched the SO main questions page with launch_url("https://stackoverflow.com/questions") and clicked on the "Ask Question" button:
|
Python Web browser click listener
|
Is there any packages or ways to detect what is being clicking in a web browser? I mean get tag/xpath from the web browser (of what is being clicked)? To afterwards find it via selenium or similar?
Or even with to determine what it is with the coordinates of the mouse click.
Like the codegen in Playwright or similar, like a form of listening.
Hope this makes sense.
|
[
"We could add window listener with JavaScript using driver.execute_script to listen to any clicks, and then call function xpath as provided in SO answer to generate Xpath of an element. As a gist, below is the window.addEventListener script which handles any click event by displaying an alert with the clicked element text (if present) and its Xpath:\nwindow.addEventListener('click', function(event) {alert(event.target.text+'=>'+xpath(event.target));})\n\nAnd here is the relevant code to launch the browser, execute the script and sleep for 20 seconds to allow interaction on the browser:\ndef launch_url(url):\n driver = webdriver.Chrome('./chromedriver')\n driver.get(url)\n driver.execute_script('''\n function xpath(el) { \n if (typeof el == 'string') return document.evaluate(el, document, null, 0, null); \n if (!el || el.nodeType != 1) return ''; \n if (el.id) return '//*[@id="' + el.id + '"'; \n var sames = [].filter.call(el.parentNode.children, function (x) { return x.tagName == el.tagName }); \n return xpath(el.parentNode) + '/' + el.tagName.toLowerCase() + (sames.length > 1 ? '['+([].indexOf.call(sames, el)+1)+']' : '')\n } \n window.addEventListener('click', function(event) {alert(event.target.id+'=>'+xpath(event.target));})\n ''')\n time.sleep(20)\n\nAs a test, launched the SO main questions page with launch_url(\"https://stackoverflow.com/questions\") and clicked on the \"Ask Question\" button:\n\n"
] |
[
1
] |
[] |
[] |
[
"python",
"python_webbrowser",
"selenium"
] |
stackoverflow_0074592808_python_python_webbrowser_selenium.txt
|
Q:
VSCode unable to install Jupyter / Python Extension
I had to set up my Computer again and tried to install the Python Extension in VSCode.
When I try to install this extension I get the following error:
Unable to install extension 'ms-toolsai.jupyter' as it is not compatible with VS Code '1.54.1'.
To install the Python Extension, Jupyter is needed but this Extension seems to be broken in the newest VSCode version.
Anyone run into the same error and knows a fix for this without downgrading VSCode?
A:
OS: Manjaro KDE
I tried installing code and upgrading to insiders (AUR) using pacman and did not get Python to install in vscode because of the Jupyter error. I tried installing Jupyter vsix manually and this also failed.
I ended up getting Python in to vscode by installing code using snap. https://snapcraft.io/code
A:
If python/other extension on VS Code is unable to installed then,
You Should uninstall VS code first then clear the temp and prefetch file.
After that go to C:\Users<username>\AppData\Roaming and delete Code folder there.
3.Now Restart your PC and install VS Code Again.....
Thats It......Done.
|
VSCode unable to install Jupyter / Python Extension
|
I had to set up my Computer again and tried to install the Python Extension in VSCode.
When I try to install this extension I get the following error:
Unable to install extension 'ms-toolsai.jupyter' as it is not compatible with VS Code '1.54.1'.
To install the Python Extension, Jupyter is needed but this Extension seems to be broken in the newest VSCode version.
Anyone run into the same error and knows a fix for this without downgrading VSCode?
|
[
"OS: Manjaro KDE\nI tried installing code and upgrading to insiders (AUR) using pacman and did not get Python to install in vscode because of the Jupyter error. I tried installing Jupyter vsix manually and this also failed.\nI ended up getting Python in to vscode by installing code using snap. https://snapcraft.io/code\n",
"If python/other extension on VS Code is unable to installed then,\n\nYou Should uninstall VS code first then clear the temp and prefetch file.\nAfter that go to C:\\Users<username>\\AppData\\Roaming and delete Code folder there.\n3.Now Restart your PC and install VS Code Again.....\nThats It......Done.\n\n"
] |
[
1,
0
] |
[] |
[] |
[
"jupyter",
"python",
"visual_studio_code"
] |
stackoverflow_0066544398_jupyter_python_visual_studio_code.txt
|
Q:
Sorting of list values in dict not working - python
i am sorting a dictionary, it is sorting based on keys but not with values . if i try sorting with values am getting error "'<' not supported between instances of 'list' and 'int'"
Below is code i used.
cars = "ABC/{'Place': 'UK', 'Fruit': 'Apple', 'Vit': ['C','A'], 'Check': ['B', 'C', 'X', 'D','A']}/Place"
import re
import ast
y = ast.literal_eval(re.search('({.+})', cars).group(0))
from collections import OrderedDict
new_dict = dict(OrderedDict(sorted(y.items())))
print(new_dict)
and this is output
{
'Check': ['B', 'C', 'X', 'D', 'A'],
'Fruit': 'Apple',
'Place': 'UK',
'Vit': ['C', 'A']
}
but the problem here is , it is not sorting the list values present .
the expected output is
{
'Check': ['A','B','C','D','X'],
'Fruit': 'Apple',
'Place': 'UK',
'Vit': ['A', 'C']
}
So whereever there is list value , it should sort that list.
Can anyone help me with this .
A:
You can use isinstance to check whether a value is list, and then apply sorted accordingly:
dct = {
'Check': ['B', 'C', 'X', 'D', 'A'],
'Fruit': 'Apple',
'Place': 'UK',
'Vit': ['C', 'A']
}
output = {k: sorted(v) if isinstance(v, list) else v for k, v in sorted(dct.items())}
print(output) # {'Check': ['A', 'B', 'C', 'D', 'X'], 'Fruit': 'Apple', 'Place': 'UK', 'Vit': ['A', 'C']}
|
Sorting of list values in dict not working - python
|
i am sorting a dictionary, it is sorting based on keys but not with values . if i try sorting with values am getting error "'<' not supported between instances of 'list' and 'int'"
Below is code i used.
cars = "ABC/{'Place': 'UK', 'Fruit': 'Apple', 'Vit': ['C','A'], 'Check': ['B', 'C', 'X', 'D','A']}/Place"
import re
import ast
y = ast.literal_eval(re.search('({.+})', cars).group(0))
from collections import OrderedDict
new_dict = dict(OrderedDict(sorted(y.items())))
print(new_dict)
and this is output
{
'Check': ['B', 'C', 'X', 'D', 'A'],
'Fruit': 'Apple',
'Place': 'UK',
'Vit': ['C', 'A']
}
but the problem here is , it is not sorting the list values present .
the expected output is
{
'Check': ['A','B','C','D','X'],
'Fruit': 'Apple',
'Place': 'UK',
'Vit': ['A', 'C']
}
So whereever there is list value , it should sort that list.
Can anyone help me with this .
|
[
"You can use isinstance to check whether a value is list, and then apply sorted accordingly:\ndct = {\n 'Check': ['B', 'C', 'X', 'D', 'A'],\n 'Fruit': 'Apple',\n 'Place': 'UK',\n 'Vit': ['C', 'A']\n}\n\noutput = {k: sorted(v) if isinstance(v, list) else v for k, v in sorted(dct.items())}\n\nprint(output) # {'Check': ['A', 'B', 'C', 'D', 'X'], 'Fruit': 'Apple', 'Place': 'UK', 'Vit': ['A', 'C']}\n\n"
] |
[
2
] |
[] |
[] |
[
"dictionary",
"python"
] |
stackoverflow_0074596289_dictionary_python.txt
|
Q:
Attaching parameters to geojson object becomes non-existant when creating a geopandas dataframe
I have this dataframe
d = {
'geoid': ['13085970205'],
'FIPS': ['13085'],
'Year': [2024],
'parameters': [{"Year": 2024, "hpi_prediction": 304.32205}],
'geometry':[
{
"coordinates": [[[[-84.126456, 34.389734], [-84.12641, 34.39026], [-84.126323, 34.39068]]]],
"parameters": {"Year": 2024, "hpi_prediction": 304.32205},
"type": "MultiPolygon"
}
]
}
dd = pd.DataFrame(data=d)
When I want to write this out I use import geopandas as gpd to convert the data into a dataframe like this
df_geopandas_hpi = gpd.GeoDataFrame(dd[['geoid', 'geometry']])
Once this happens the parameters key in the original dataframe gets erased. Why? Note that the type of geometry in example dataframe is geojson.geometry.MultiPolygon. How can I avoid this from happening?
What I essentially need to do is the following
if ~os.path.exists('../verus_data'):
os.mkdir('../verus_data')
for county, df_county in dd.groupby('FIPS'):
if ~os.path.exists('../verus_data/'+str(county)):
os.mkdir('../verus_data/'+str(county))
if ~os.path.exists('../verus_data/'+str(county)+'/'+'predicted'):
os.mkdir('../verus_data/'+str(county)+'/'+'predicted')
if ~os.path.exists('../verus_data/'+str(county)+'/'+'analyzed'):
os.mkdir('../verus_data/'+str(county)+'/'+'analyzed')
df_hpi = df_county[df_county['key'] == 'hpi']
df_analyzed = df_county[df_county['key'] == 'analyzed']
for year, df_year in df_hpi.groupby('Year'):
if ~os.path.exists('../verus_data/'+str(county)+'/'+'predicted'+'/'+str(year)):
os.mkdir('../verus_data/'+str(county)+'/'+'predicted'+'/'+str(year))
df_geopandas_hpi = gpd.GeoDataFrame(df_year[['geoid', 'geometry', 'parameters']])
df_geopandas_hpi.to_file('../verus_data/'+str(county)+'/'+'predicted'+'/'+str(year)+'/'+'hpi_predictions.geojson', driver="GeoJSON")
for year, df_year in df_analyzed.groupby('Year'):
if ~os.path.exists('../verus_data/'+str(county)+'/'+'analyzed'+'/'+str(year)):
os.mkdir('../verus_data/'+str(county)+'/'+'analyzed'+'/'+str(year))
df_geopandas_analyzed = gpd.GeoDataFrame(df_year[['geoid', 'geometry', 'parameters']])
df_geopandas_analyzed.to_file('../verus_data/'+str(county)+'/'+'analyzed'+'/'+str(year)+'/'+'analyzed_values.geojson', driver="GeoJSON")
I need to somehow write out these geojson files while keeping parameters key intact.
A:
Geopandas relies on the shapely library to handle geometry objects. Shapely does not have a concept of parameters or additional metadata which can be included at arbitrary levels in GeoJSON but don't fit the shapely or geopandas data models.
For example, when parsing with shapely.geometry.shape:
In [10]: shape = shapely.geometry.shape(
...: {
...: "coordinates": [[[[-84.126456, 34.389734], [-84.12641, 34.39026], [-84.126323, 34.39068]]]],
...: "parameters": {"Year": 2024, "hpi_prediction": 304.32205},
...: "type": "MultiPolygon"
...: }
...: )
In [11]: shape
Out[11]: <shapely.geometry.multipolygon.MultiPolygon at 0x11040eb60>
In [12]: shape.parameters
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
Input In [12], in <cell line: 1>()
----> 1 shape.parameters
AttributeError: 'MultiPolygon' object has no attribute 'parameters'
If you'd like to retain these, you'll need to parse the json separately from converting to geopandas. For example, if "parameters" is present in every element, you could simply assign it as a new column:
In [21]: gdf = gpd.GeoDataFrame(dd[["geoid", "geometry"]])
...: gdf["parameters"] = dd.geometry.str["parameters"]
In [22]: gdf
Out[22]:
geoid geometry parameters
0 13085970205 {'coordinates': [[[[-84.126456, 34.389734], [-... {'Year': 2024, 'hpi_prediction': 304.32205}
However, if the parameters field is not always present, you may need to do some extra cleaning. You can always access the elements of the geometry column within the pandas dataframe dd directly, e.g.
In [27]: dd.loc[0, "geometry"]["parameters"]["hpi_prediction"]
Out[27]: 304.32205
A:
All you have to do is add the parameters column in the
df_geopandas_hpi = gpd.GeoDataFrame(df_year[['geoid', 'geometry', 'parameters']])
|
Attaching parameters to geojson object becomes non-existant when creating a geopandas dataframe
|
I have this dataframe
d = {
'geoid': ['13085970205'],
'FIPS': ['13085'],
'Year': [2024],
'parameters': [{"Year": 2024, "hpi_prediction": 304.32205}],
'geometry':[
{
"coordinates": [[[[-84.126456, 34.389734], [-84.12641, 34.39026], [-84.126323, 34.39068]]]],
"parameters": {"Year": 2024, "hpi_prediction": 304.32205},
"type": "MultiPolygon"
}
]
}
dd = pd.DataFrame(data=d)
When I want to write this out I use import geopandas as gpd to convert the data into a dataframe like this
df_geopandas_hpi = gpd.GeoDataFrame(dd[['geoid', 'geometry']])
Once this happens the parameters key in the original dataframe gets erased. Why? Note that the type of geometry in example dataframe is geojson.geometry.MultiPolygon. How can I avoid this from happening?
What I essentially need to do is the following
if ~os.path.exists('../verus_data'):
os.mkdir('../verus_data')
for county, df_county in dd.groupby('FIPS'):
if ~os.path.exists('../verus_data/'+str(county)):
os.mkdir('../verus_data/'+str(county))
if ~os.path.exists('../verus_data/'+str(county)+'/'+'predicted'):
os.mkdir('../verus_data/'+str(county)+'/'+'predicted')
if ~os.path.exists('../verus_data/'+str(county)+'/'+'analyzed'):
os.mkdir('../verus_data/'+str(county)+'/'+'analyzed')
df_hpi = df_county[df_county['key'] == 'hpi']
df_analyzed = df_county[df_county['key'] == 'analyzed']
for year, df_year in df_hpi.groupby('Year'):
if ~os.path.exists('../verus_data/'+str(county)+'/'+'predicted'+'/'+str(year)):
os.mkdir('../verus_data/'+str(county)+'/'+'predicted'+'/'+str(year))
df_geopandas_hpi = gpd.GeoDataFrame(df_year[['geoid', 'geometry', 'parameters']])
df_geopandas_hpi.to_file('../verus_data/'+str(county)+'/'+'predicted'+'/'+str(year)+'/'+'hpi_predictions.geojson', driver="GeoJSON")
for year, df_year in df_analyzed.groupby('Year'):
if ~os.path.exists('../verus_data/'+str(county)+'/'+'analyzed'+'/'+str(year)):
os.mkdir('../verus_data/'+str(county)+'/'+'analyzed'+'/'+str(year))
df_geopandas_analyzed = gpd.GeoDataFrame(df_year[['geoid', 'geometry', 'parameters']])
df_geopandas_analyzed.to_file('../verus_data/'+str(county)+'/'+'analyzed'+'/'+str(year)+'/'+'analyzed_values.geojson', driver="GeoJSON")
I need to somehow write out these geojson files while keeping parameters key intact.
|
[
"Geopandas relies on the shapely library to handle geometry objects. Shapely does not have a concept of parameters or additional metadata which can be included at arbitrary levels in GeoJSON but don't fit the shapely or geopandas data models.\nFor example, when parsing with shapely.geometry.shape:\nIn [10]: shape = shapely.geometry.shape(\n ...: {\n ...: \"coordinates\": [[[[-84.126456, 34.389734], [-84.12641, 34.39026], [-84.126323, 34.39068]]]],\n ...: \"parameters\": {\"Year\": 2024, \"hpi_prediction\": 304.32205},\n ...: \"type\": \"MultiPolygon\"\n ...: }\n ...: )\n\nIn [11]: shape\nOut[11]: <shapely.geometry.multipolygon.MultiPolygon at 0x11040eb60>\n\nIn [12]: shape.parameters\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\nInput In [12], in <cell line: 1>()\n----> 1 shape.parameters\n\nAttributeError: 'MultiPolygon' object has no attribute 'parameters'\n\nIf you'd like to retain these, you'll need to parse the json separately from converting to geopandas. For example, if \"parameters\" is present in every element, you could simply assign it as a new column:\n\nIn [21]: gdf = gpd.GeoDataFrame(dd[[\"geoid\", \"geometry\"]])\n ...: gdf[\"parameters\"] = dd.geometry.str[\"parameters\"]\n\nIn [22]: gdf\nOut[22]:\n geoid geometry parameters\n0 13085970205 {'coordinates': [[[[-84.126456, 34.389734], [-... {'Year': 2024, 'hpi_prediction': 304.32205}\n\nHowever, if the parameters field is not always present, you may need to do some extra cleaning. You can always access the elements of the geometry column within the pandas dataframe dd directly, e.g.\nIn [27]: dd.loc[0, \"geometry\"][\"parameters\"][\"hpi_prediction\"]\nOut[27]: 304.32205\n\n",
"All you have to do is add the parameters column in the\ndf_geopandas_hpi = gpd.GeoDataFrame(df_year[['geoid', 'geometry', 'parameters']])\n\n"
] |
[
1,
0
] |
[] |
[] |
[
"geopandas",
"python"
] |
stackoverflow_0074596125_geopandas_python.txt
|
Q:
Dividing the data timeline into subset of date ranges of dictioanries in a list in python
I am converting date timeline into smaller date time segments based on the frequency in minutes from start time to end time.
Input:
start_time = '2022-11-20-09:48:00'
last_time = '2022-11-20-08:48:00'
frequency = 300 # seconds so it is 5 minutes
what I tried so far
from datetime import datetime
def time_divider(s_time, e_time, frequency):
st_obj = datetime.strptime(s_time, '%Y-%m-%d-%H:%M:%S')
start_range = st_obj.timestamp()
end_range = datetime.strptime(e_time, '%Y-%m-%d-%H:%M:%S').timestamp()
date_segments = []
a = int(start_range)
b = int(end_range+1)
for i in range(a, b+1):
date_segments.append({datetime.fromtimestamp(i): datetime.fromtimestamp(i + frequency)})
i = i + frequency
return date_segments
date_s = time_divider(start_time, last_time, 300)
print(date_s)
current output:
[]
expected output and the output should come in this sequence
{datetime.datetime(2022, 11, 20, 9, 48): datetime.datetime(2022, 11, 20, 9, 52),datetime.datetime(2022, 11, 20, 9, 53): datetime.datetime(2022, 11, 20, 9, 57), datetime.datetime(2022, 11, 20, 9, 57): datetime.datetime(2022, 11, 20, 10, 02 ), ...}
A:
start_time should be less than end_time
like:
start_time = '2022-11-20-08:48:00'
last_time = '2022-11-20-09:48:00'
and if we hadd 5 minutes in 48 it would become 53 not 52 as you have mentioned in expected output.
Here is the solution which might solve the problem.
from datetime import datetime, timedelta
start_time = '2022-11-20-08:48:00'
last_time = '2022-11-20-09:48:00'
frequency = 300 # seconds so it is 5 minutes
def time_divider(s_time, e_time, frequency):
st_obj = datetime.strptime(s_time, '%Y-%m-%d-%H:%M:%S')
end_range = datetime.strptime(e_time, '%Y-%m-%d-%H:%M:%S')
date_segments = []
while st_obj <= end_range:
old_st_obj = st_obj
st_obj = st_obj + timedelta(seconds=frequency)
date_segments.append({old_st_obj: st_obj})
return date_segments
date_s = time_divider(start_time, last_time, 300)
print(date_s)
|
Dividing the data timeline into subset of date ranges of dictioanries in a list in python
|
I am converting date timeline into smaller date time segments based on the frequency in minutes from start time to end time.
Input:
start_time = '2022-11-20-09:48:00'
last_time = '2022-11-20-08:48:00'
frequency = 300 # seconds so it is 5 minutes
what I tried so far
from datetime import datetime
def time_divider(s_time, e_time, frequency):
st_obj = datetime.strptime(s_time, '%Y-%m-%d-%H:%M:%S')
start_range = st_obj.timestamp()
end_range = datetime.strptime(e_time, '%Y-%m-%d-%H:%M:%S').timestamp()
date_segments = []
a = int(start_range)
b = int(end_range+1)
for i in range(a, b+1):
date_segments.append({datetime.fromtimestamp(i): datetime.fromtimestamp(i + frequency)})
i = i + frequency
return date_segments
date_s = time_divider(start_time, last_time, 300)
print(date_s)
current output:
[]
expected output and the output should come in this sequence
{datetime.datetime(2022, 11, 20, 9, 48): datetime.datetime(2022, 11, 20, 9, 52),datetime.datetime(2022, 11, 20, 9, 53): datetime.datetime(2022, 11, 20, 9, 57), datetime.datetime(2022, 11, 20, 9, 57): datetime.datetime(2022, 11, 20, 10, 02 ), ...}
|
[
"start_time should be less than end_time\nlike:\nstart_time = '2022-11-20-08:48:00'\nlast_time = '2022-11-20-09:48:00'\nand if we hadd 5 minutes in 48 it would become 53 not 52 as you have mentioned in expected output.\nHere is the solution which might solve the problem.\nfrom datetime import datetime, timedelta\n\n\nstart_time = '2022-11-20-08:48:00'\nlast_time = '2022-11-20-09:48:00'\nfrequency = 300 # seconds so it is 5 minutes\n\n\ndef time_divider(s_time, e_time, frequency):\n st_obj = datetime.strptime(s_time, '%Y-%m-%d-%H:%M:%S')\n end_range = datetime.strptime(e_time, '%Y-%m-%d-%H:%M:%S')\n date_segments = []\n while st_obj <= end_range:\n old_st_obj = st_obj\n st_obj = st_obj + timedelta(seconds=frequency)\n date_segments.append({old_st_obj: st_obj})\n return date_segments\n\n\ndate_s = time_divider(start_time, last_time, 300)\nprint(date_s)\n\n"
] |
[
2
] |
[] |
[] |
[
"python",
"python_3.x"
] |
stackoverflow_0074596235_python_python_3.x.txt
|
Q:
How can I call multiple views in one url address in Django?
I'm trying to show forms defined by new_measurement on index.html, but I only manage to get IndexView() to work. I tried various combinations between IndexView() and new_measurement(), but those didn't work out at all. I know that IndexView() doesn't pass anything related to new_measurement(), and new_measurement() isn't called, which is the core of my problem. I'd really appreciate if someone more experienced with Django could tell me what I could, or should do. Thank you.
Here's my views.py:
from django.shortcuts import render
from django.utils import timezone
from .models import Measurement
from .forms import MeasurementForm
from django.views import generic
class IndexView(generic.ListView):
model = Measurement
context_object_name = 'measurement_list'
template_name = 'index.html'
queryset = Measurement.objects.all()
def new_measurement(request):
if request.method == "POST":
form = MeasurementForm(request.POST)
if form.is_valid():
measurement = form.save(commit=False)
measurement.measurement_date = timezone.now()
measurement.save()
else:
form = MeasurementForm()
return render(request, 'index.html', {'form': form})
urls.py:
from django.urls import path
from . import views
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
]
forms.py:
class MeasurementForm(forms.ModelForm):
class Meta:
model = Measurement
fields = ('measurement_value', 'measurement_unit')
index.html:
{% extends "base.html" %}
{% block content %}
<h1>Climate Measurement Tool</h1>
<h2>Add a new measurement</h2>
<form method="POST" class="post-form">
{% csrf_token %}
{{ form.as_p }}
<button type="submit" class="save">Add</button>
</form>
<h2>Measurements</h2>
{% if measurement_list %}
<ul>
{% for measurement in measurement_list %}
<li>
<p>{{ measurement }}</p>
</li>
{% endfor %}
</ul>
{% else %}
<p>No measurements yet</p>
{% endif %}
{% endblock %}
A:
You can't map multiple views in one url but you can do mutiple works in one view.
update your views.py as you can see that I am sending (querylist and form) both in that view
views.py
def new_measurement(request):
if request.method == "POST":
form = MeasurementForm(request.POST)
if form.is_valid():
measurement = form.save(commit=False)
measurement.measurement_date = timezone.now()
measurement.save()
else:
form = MeasurementForm()
qs = Measurement.objects.all()
context = {'form': form, 'measurement_list': qs}
return render(request, 'index.html', context)
update urls.py
from django.urls import path
from . import views
urlpatterns = [
path('', views.new_measurement, name='index'),
]
A:
You can't call 2 views for one url. basically each url has to be linked to one view and that's something you can't really change.
But if you want your code to be cleaner and have multiple functions, you can call them in your view, basically what you can do is to make a view and call it when a url or even more than one url has been used and in that view decide which function to use
Example:
def god_view(request):
if request.method == "POST"
return post_func(request)
return get_func(request)
This is a very simple example but you can do so many other things.
A:
It is not possible to have more views in one url, but you can simulate it. I did it like a view and in the template of this view was javascript which loaded the second view with the response of AJAX and filled the belonging element with the second view's content. The second view was not whole template but it started with some div tags which were placed into the first template. I'll try to give you an example
views
def first_view(request):
return render(
request,
'first_template.html',
{
'first_content': 'Some heavy content'
})
def second_view(request):
return render(
request,
'second_template.html',
{
'second_content': 'Some heavier content!'
})
first_template.html
...
<body>
<div id="1">
{{ first_content }}
</div>
<div>
... loading ...
</div>
<script>
window.onload = function() {
$.ajax({
url: {% url 'to_second_view' %},
method: 'GET',
success: function(response) {
$('#2').html(response);
}
})
}
</script>
</body>
...
second_template.html
<div>
{{ second_content }}
</div>
A:
If you're using cbv you can override the get_template_names method for any view that inherits TemplateResponseMixin and return a list of string which are searched in order until one matches or ImporperlyConfigured is raised. For example:
class SomeView(TemplateResponseMixin):
...
def get_template_names(self):
if self.request.method == "POST":
return ['post_template.html']
else:
return ['template.html']
A:
Instead of generic.ListView you can try with rest_framework.views.APIView
from rest_framework.views import APIView
class IndexView(APIView):
def post(self, request: Request):
form = MeasurementForm(request.POST)
if form.is_valid():
measurement = form.save(commit=False)
measurement.measurement_date = timezone.now()
measurement.save()
return render(request, 'index.html', {'form': form})
def get(self, request: Request):
form = MeasurementForm()
return render(request, 'index.html', {'form': form})
This gives you more control on the APIs you call. Also you can raise/return error when you call your API using incorrect methods (PUT, PATCH)
|
How can I call multiple views in one url address in Django?
|
I'm trying to show forms defined by new_measurement on index.html, but I only manage to get IndexView() to work. I tried various combinations between IndexView() and new_measurement(), but those didn't work out at all. I know that IndexView() doesn't pass anything related to new_measurement(), and new_measurement() isn't called, which is the core of my problem. I'd really appreciate if someone more experienced with Django could tell me what I could, or should do. Thank you.
Here's my views.py:
from django.shortcuts import render
from django.utils import timezone
from .models import Measurement
from .forms import MeasurementForm
from django.views import generic
class IndexView(generic.ListView):
model = Measurement
context_object_name = 'measurement_list'
template_name = 'index.html'
queryset = Measurement.objects.all()
def new_measurement(request):
if request.method == "POST":
form = MeasurementForm(request.POST)
if form.is_valid():
measurement = form.save(commit=False)
measurement.measurement_date = timezone.now()
measurement.save()
else:
form = MeasurementForm()
return render(request, 'index.html', {'form': form})
urls.py:
from django.urls import path
from . import views
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
]
forms.py:
class MeasurementForm(forms.ModelForm):
class Meta:
model = Measurement
fields = ('measurement_value', 'measurement_unit')
index.html:
{% extends "base.html" %}
{% block content %}
<h1>Climate Measurement Tool</h1>
<h2>Add a new measurement</h2>
<form method="POST" class="post-form">
{% csrf_token %}
{{ form.as_p }}
<button type="submit" class="save">Add</button>
</form>
<h2>Measurements</h2>
{% if measurement_list %}
<ul>
{% for measurement in measurement_list %}
<li>
<p>{{ measurement }}</p>
</li>
{% endfor %}
</ul>
{% else %}
<p>No measurements yet</p>
{% endif %}
{% endblock %}
|
[
"You can't map multiple views in one url but you can do mutiple works in one view.\nupdate your views.py as you can see that I am sending (querylist and form) both in that view\nviews.py\ndef new_measurement(request):\n if request.method == \"POST\":\n form = MeasurementForm(request.POST)\n if form.is_valid():\n measurement = form.save(commit=False)\n measurement.measurement_date = timezone.now()\n measurement.save()\n else:\n form = MeasurementForm()\n\n qs = Measurement.objects.all()\n context = {'form': form, 'measurement_list': qs}\n return render(request, 'index.html', context)\n\nupdate urls.py\nfrom django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.new_measurement, name='index'),\n]\n\n",
"You can't call 2 views for one url. basically each url has to be linked to one view and that's something you can't really change.\nBut if you want your code to be cleaner and have multiple functions, you can call them in your view, basically what you can do is to make a view and call it when a url or even more than one url has been used and in that view decide which function to use\nExample:\ndef god_view(request):\n if request.method == \"POST\"\n return post_func(request)\n return get_func(request)\n\nThis is a very simple example but you can do so many other things.\n",
"It is not possible to have more views in one url, but you can simulate it. I did it like a view and in the template of this view was javascript which loaded the second view with the response of AJAX and filled the belonging element with the second view's content. The second view was not whole template but it started with some div tags which were placed into the first template. I'll try to give you an example\nviews\ndef first_view(request):\n return render(\n request,\n 'first_template.html',\n {\n 'first_content': 'Some heavy content'\n })\n\n\ndef second_view(request):\n return render(\n request, \n 'second_template.html', \n {\n 'second_content': 'Some heavier content!'\n })\n\nfirst_template.html\n...\n<body>\n\n <div id=\"1\">\n {{ first_content }}\n </div>\n <div>\n ... loading ...\n </div>\n\n <script>\n\n window.onload = function() {\n $.ajax({\n url: {% url 'to_second_view' %},\n method: 'GET',\n success: function(response) {\n $('#2').html(response);\n }\n })\n }\n\n </script>\n\n</body>\n...\n\nsecond_template.html\n<div>\n {{ second_content }}\n</div>\n\n",
"If you're using cbv you can override the get_template_names method for any view that inherits TemplateResponseMixin and return a list of string which are searched in order until one matches or ImporperlyConfigured is raised. For example:\nclass SomeView(TemplateResponseMixin):\n ...\n def get_template_names(self):\n if self.request.method == \"POST\":\n return ['post_template.html']\n else:\n return ['template.html']\n\n",
"Instead of generic.ListView you can try with rest_framework.views.APIView\nfrom rest_framework.views import APIView\n\nclass IndexView(APIView):\n \n def post(self, request: Request):\n form = MeasurementForm(request.POST)\n if form.is_valid():\n measurement = form.save(commit=False)\n measurement.measurement_date = timezone.now()\n measurement.save()\n return render(request, 'index.html', {'form': form})\n\n def get(self, request: Request):\n form = MeasurementForm()\n return render(request, 'index.html', {'form': form})\n\n\nThis gives you more control on the APIs you call. Also you can raise/return error when you call your API using incorrect methods (PUT, PATCH)\n"
] |
[
5,
2,
0,
0,
0
] |
[] |
[] |
[
"django",
"django_urls",
"django_views",
"python"
] |
stackoverflow_0048729966_django_django_urls_django_views_python.txt
|
Q:
How to convert a 5-level dictionary into a DataFrame?
I have a dictionary with structure:
Level 1:
id (int)
username (str)
meta (contain a string of Kpi_info)
This is a dictionary:
dict = {'id': 206, 'username': 'hantran','meta': '{"kpi_info":\
{"2021" :{"1":{"revenue":"2000", "kpi":"2100","result":"0"}, "2":{"revenue":"2500", "kpi":"2000", "result":"1"}},\
"2022": {"1":{"revenue":"3000", "kpi":"2500","result":"1"}, "2":{"revenue":"2500", "kpi":"3000", "result":"0"}}}'
}
My desire result is a DataFame like this:
id
username
Year
Month
revenue
kpi
result
206
hantran
2021
1
2000
2100
0
206
hantran
2021
2
2500
2000
1
206
hantran
2022
1
3000
2500
1
206
hantran
2022
2
2500
3000
0
Apparently, similar question has been discussed here. However, the solution only work for 3-level dictionary. I don't know how to make it work for my 1-level dictionary with most of the needed information is in a string.
A:
If the string in your dictionary is valid json, it can easily be converted into a dictionary:
from json import loads
d = {'id': 206, 'username': 'hantran', 'meta': '{"kpi_info": {"2021" :{"1":{"revenue":"2000", "kpi":"2100","result":"0"}, "2":{"revenue":"2500", "kpi":"2000", "result":"1"}}, "2022": {"1":{"revenue":"3000", "kpi":"2500","result":"1"}, "2":{"revenue":"2500", "kpi":"3000", "result":"0"}}}}'}
d['meta'] = loads(d['meta'])
However, the representation of the dictionary in your code is not a valid dictionary, as it is missing a closing }. There's no easy way to deal with errors like these, so you should check if your actual data has this problem, or whether you should check the code you share more carefully.
Note that you shouldn't call a dictionary dict, since doing so will shadow the actual dict type and you won't be able to access that normally after doing so.
With the dictionary d it's now fairly easy to construct a DataFrame as needed:
from pandas import DataFrame
df = DataFrame([
{
'id': d['id'], 'username': d['username'],
'year': int(k1), 'month': int(k2),
'revenue': d2['revenue'], 'kpi': d2['kpi'], 'result': d2['result']
}
for k1, d1 in d['meta']['kpi_info'].items()
for k2, d2 in d1.items()
])
print(df)
This makes use of pandas ability to turn a list of dictionaries into a dataframe, using the keys of the dictionaries as column references.
Result:
id username year month revenue kpi result
0 206 hantran 2021 1 2000 2100 0
1 206 hantran 2021 2 2500 2000 1
2 206 hantran 2022 1 3000 2500 1
3 206 hantran 2022 2 2500 3000 0
|
How to convert a 5-level dictionary into a DataFrame?
|
I have a dictionary with structure:
Level 1:
id (int)
username (str)
meta (contain a string of Kpi_info)
This is a dictionary:
dict = {'id': 206, 'username': 'hantran','meta': '{"kpi_info":\
{"2021" :{"1":{"revenue":"2000", "kpi":"2100","result":"0"}, "2":{"revenue":"2500", "kpi":"2000", "result":"1"}},\
"2022": {"1":{"revenue":"3000", "kpi":"2500","result":"1"}, "2":{"revenue":"2500", "kpi":"3000", "result":"0"}}}'
}
My desire result is a DataFame like this:
id
username
Year
Month
revenue
kpi
result
206
hantran
2021
1
2000
2100
0
206
hantran
2021
2
2500
2000
1
206
hantran
2022
1
3000
2500
1
206
hantran
2022
2
2500
3000
0
Apparently, similar question has been discussed here. However, the solution only work for 3-level dictionary. I don't know how to make it work for my 1-level dictionary with most of the needed information is in a string.
|
[
"If the string in your dictionary is valid json, it can easily be converted into a dictionary:\nfrom json import loads\n\nd = {'id': 206, 'username': 'hantran', 'meta': '{\"kpi_info\": {\"2021\" :{\"1\":{\"revenue\":\"2000\", \"kpi\":\"2100\",\"result\":\"0\"}, \"2\":{\"revenue\":\"2500\", \"kpi\":\"2000\", \"result\":\"1\"}}, \"2022\": {\"1\":{\"revenue\":\"3000\", \"kpi\":\"2500\",\"result\":\"1\"}, \"2\":{\"revenue\":\"2500\", \"kpi\":\"3000\", \"result\":\"0\"}}}}'}\n\nd['meta'] = loads(d['meta'])\n\nHowever, the representation of the dictionary in your code is not a valid dictionary, as it is missing a closing }. There's no easy way to deal with errors like these, so you should check if your actual data has this problem, or whether you should check the code you share more carefully.\nNote that you shouldn't call a dictionary dict, since doing so will shadow the actual dict type and you won't be able to access that normally after doing so.\nWith the dictionary d it's now fairly easy to construct a DataFrame as needed:\nfrom pandas import DataFrame\n\ndf = DataFrame([\n {\n 'id': d['id'], 'username': d['username'],\n 'year': int(k1), 'month': int(k2),\n 'revenue': d2['revenue'], 'kpi': d2['kpi'], 'result': d2['result']\n }\n for k1, d1 in d['meta']['kpi_info'].items()\n for k2, d2 in d1.items()\n])\n\nprint(df)\n\nThis makes use of pandas ability to turn a list of dictionaries into a dataframe, using the keys of the dictionaries as column references.\nResult:\n id username year month revenue kpi result\n0 206 hantran 2021 1 2000 2100 0\n1 206 hantran 2021 2 2500 2000 1\n2 206 hantran 2022 1 3000 2500 1\n3 206 hantran 2022 2 2500 3000 0\n\n"
] |
[
1
] |
[] |
[] |
[
"dataframe",
"dictionary",
"python"
] |
stackoverflow_0074596127_dataframe_dictionary_python.txt
|
Q:
how to create unique ID for pairs
I have pandas dataframe that store the relationship of two customers like below. How do I create a unique ID for associated customers?
Assuming there are tons of thousands of customers. The customer ID are completed random numbers which are not classified as prefix 'A' and 'B' in the example presented. The prefix is just for explaining the problem.
import pandas as pd
# initialize list of lists
data = [['A1', 'A1'],
['A1', 'A2'],
['A2', 'A1'],
['A2', 'A3'],
['A3', 'A2'],
['A3', 'A4'],
['A4', 'A3'],
['A4', 'A5'],
['B1', 'B1'],
['B1', 'B2'],
['B2', 'B1'],
['B2', 'B3'],
['B3', 'B2'],
['B3', 'B4'],
['B4', 'B3'],
['B4', 'B5']
]
# Create the pandas DataFrame
df = pd.DataFrame(data, columns=['cust_1', 'cust_2'])
# print dataframe.
df
The end results should like below:
Thanks a lot.
A:
If you have only A and B, you can simply see the starting point to calculate the ID.
In [12]: df['Unique_ID'] = df['cust_1'].map(lambda x: 'ID1' if x.startswith('A')
...: else 'ID2')
In [13]: df
Out[13]:
cust_1 cust_2 Unique_ID
0 A1 A1 ID1
1 A1 A2 ID1
2 A2 A1 ID1
3 A2 A3 ID1
4 A3 A2 ID1
5 A3 A4 ID1
6 A4 A3 ID1
7 A4 A5 ID1
8 B1 B1 ID2
9 B1 B2 ID2
10 B2 B1 ID2
11 B2 B3 ID2
12 B3 B2 ID2
13 B3 B4 ID2
14 B4 B3 ID2
15 B4 B5 ID2
Here if first element in group is A, id is ID1 as shown in fig, if it is B, id is ID2 as shown.
|
how to create unique ID for pairs
|
I have pandas dataframe that store the relationship of two customers like below. How do I create a unique ID for associated customers?
Assuming there are tons of thousands of customers. The customer ID are completed random numbers which are not classified as prefix 'A' and 'B' in the example presented. The prefix is just for explaining the problem.
import pandas as pd
# initialize list of lists
data = [['A1', 'A1'],
['A1', 'A2'],
['A2', 'A1'],
['A2', 'A3'],
['A3', 'A2'],
['A3', 'A4'],
['A4', 'A3'],
['A4', 'A5'],
['B1', 'B1'],
['B1', 'B2'],
['B2', 'B1'],
['B2', 'B3'],
['B3', 'B2'],
['B3', 'B4'],
['B4', 'B3'],
['B4', 'B5']
]
# Create the pandas DataFrame
df = pd.DataFrame(data, columns=['cust_1', 'cust_2'])
# print dataframe.
df
The end results should like below:
Thanks a lot.
|
[
"If you have only A and B, you can simply see the starting point to calculate the ID.\nIn [12]: df['Unique_ID'] = df['cust_1'].map(lambda x: 'ID1' if x.startswith('A')\n ...: else 'ID2') \n\nIn [13]: df \nOut[13]: \n cust_1 cust_2 Unique_ID\n0 A1 A1 ID1\n1 A1 A2 ID1\n2 A2 A1 ID1\n3 A2 A3 ID1\n4 A3 A2 ID1\n5 A3 A4 ID1\n6 A4 A3 ID1\n7 A4 A5 ID1\n8 B1 B1 ID2\n9 B1 B2 ID2\n10 B2 B1 ID2\n11 B2 B3 ID2\n12 B3 B2 ID2\n13 B3 B4 ID2\n14 B4 B3 ID2\n15 B4 B5 ID2\n\nHere if first element in group is A, id is ID1 as shown in fig, if it is B, id is ID2 as shown.\n"
] |
[
0
] |
[] |
[] |
[
"python"
] |
stackoverflow_0074596426_python.txt
|
Q:
Getting the latest python 3 version programmatically
I want to get the latest python source from https://www.python.org/ftp/python/.
While posting this, the latest version is 3.9.1. I do not want to hardcode 3.9.1 in my code to get the latest version and keep on updating the version when a new version comes out. I am using OS ubuntu 16.04
Is there a programmatic way to get the latest python version (using curl) and use that version to get the latest source?
A:
I had a similar problem and couldn't find anything better than scraping the downloads page. You mentioned curl, so I'm assuming you want a shell script. I ended up with this:
url='https://www.python.org/ftp/python/'
curl --silent "$url" |
sed -n 's!.*href="\([0-9]\+\.[0-9]\+\.[0-9]\+\)/".*!\1!p' |
sort -rV |
while read -r version; do
filename="Python-$version.tar.xz"
# Versions which only have alpha, beta, or rc releases will fail here.
# Stop when we find one with a final release.
if curl --fail --silent -O "$url/$version/$filename"; then
echo "$filename"
break
fi
done
This relies on sort -V, which I believe is specific to GNU coreutils. That shouldn't be a problem on Ubuntu.
If you're using this in a larger script and want to use the version or filename variables after the loop, see How to pipe input to a Bash while loop and preserve variables after loop ends.
A:
One possible approach:
$ cat latest_python_version.py
import requests
from bs4 import BeautifulSoup
import re
url = "https://www.python.org/doc/versions/"
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
# Search for
# <div class="section" id="python-documentation-by-version">
# ...
# </div>
div = soup.find("div", attrs={"id": "python-documentation-by-version"})
# Get the first link. It will be something like
# <li><a class="reference external" href="https://docs.python.org/release/3.11.0/">Python 3.11.0</a>,
# documentation released on 24 October 2022.</li>
link = div.find("li")
# x.contents will be something like ['Python 3.11.0']
x = link.find("a", attrs={"href": re.compile("^https*://")})
# extract the latest version which will be something like '3.11.0'
matches = re.search("Python (.*)$", x.contents[0])
version = matches.group(1)
print(version)
Sample run:
$ python latest_python_version.py
3.11.0
|
Getting the latest python 3 version programmatically
|
I want to get the latest python source from https://www.python.org/ftp/python/.
While posting this, the latest version is 3.9.1. I do not want to hardcode 3.9.1 in my code to get the latest version and keep on updating the version when a new version comes out. I am using OS ubuntu 16.04
Is there a programmatic way to get the latest python version (using curl) and use that version to get the latest source?
|
[
"I had a similar problem and couldn't find anything better than scraping the downloads page. You mentioned curl, so I'm assuming you want a shell script. I ended up with this:\nurl='https://www.python.org/ftp/python/'\n\ncurl --silent \"$url\" |\n sed -n 's!.*href=\"\\([0-9]\\+\\.[0-9]\\+\\.[0-9]\\+\\)/\".*!\\1!p' |\n sort -rV |\nwhile read -r version; do\n filename=\"Python-$version.tar.xz\"\n # Versions which only have alpha, beta, or rc releases will fail here.\n # Stop when we find one with a final release.\n if curl --fail --silent -O \"$url/$version/$filename\"; then\n echo \"$filename\"\n break\n fi\ndone\n\nThis relies on sort -V, which I believe is specific to GNU coreutils. That shouldn't be a problem on Ubuntu.\nIf you're using this in a larger script and want to use the version or filename variables after the loop, see How to pipe input to a Bash while loop and preserve variables after loop ends.\n",
"One possible approach:\n$ cat latest_python_version.py\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\nurl = \"https://www.python.org/doc/versions/\"\nresponse = requests.get(url)\nsoup = BeautifulSoup(response.text, \"html.parser\")\n\n# Search for\n# <div class=\"section\" id=\"python-documentation-by-version\">\n# ...\n# </div>\ndiv = soup.find(\"div\", attrs={\"id\": \"python-documentation-by-version\"})\n\n# Get the first link. It will be something like\n# <li><a class=\"reference external\" href=\"https://docs.python.org/release/3.11.0/\">Python 3.11.0</a>,\n# documentation released on 24 October 2022.</li>\nlink = div.find(\"li\")\n\n# x.contents will be something like ['Python 3.11.0']\nx = link.find(\"a\", attrs={\"href\": re.compile(\"^https*://\")})\n\n# extract the latest version which will be something like '3.11.0'\nmatches = re.search(\"Python (.*)$\", x.contents[0])\nversion = matches.group(1)\nprint(version)\n\nSample run:\n$ python latest_python_version.py\n3.11.0\n\n"
] |
[
4,
0
] |
[] |
[] |
[
"pip",
"python",
"python_3.x"
] |
stackoverflow_0065311659_pip_python_python_3.x.txt
|
Q:
How to get mxnet function decorators in python
I want to get mxnet function decorators in python. I can get the decorators for Tensorflow as follows:
Given that we have the following tensorflow API:
tf.math.floor(2.5)
When I run the code, the function arguments are set inside tensorflow object.
APIname = "tf.math.floor"
apit_split = APIname.split('.')
func_name = apit_split[-1]
module_obj = tf
if len(func_name_list) > 1:
for module_name in apit_split[:-1]:
module_obj = getattr(module_obj, module_name)
myfunction = getattr(module_obj, func_name)
And the output is:
As you can see, I have the decorators for the function.
Now for mxnet, I have the following code snippet:
Given that we have the following mxnet API:
from mxnet import ndarray
x = ndarray.ones((2,3))
When I run the code, the function arguments are set inside ndarray object.
APIname = "ndarray.ones"
apit_split = APIname.split('.')
func_name = apit_split[-1]
module_obj = ndarray
if len(func_name_list) > 1:
for module_name in apit_split[:-1]:
module_obj = getattr(module_obj, module_name)
myfunction = getattr(module_obj, func_name)
The output is:
As you can see, there is no decorator for the function. Any idea? thanks.
A:
Just found the answer.
The problem is that I should do the above stuff inside __init__.py located in the root folder of mxnet library (installed via pip).
|
How to get mxnet function decorators in python
|
I want to get mxnet function decorators in python. I can get the decorators for Tensorflow as follows:
Given that we have the following tensorflow API:
tf.math.floor(2.5)
When I run the code, the function arguments are set inside tensorflow object.
APIname = "tf.math.floor"
apit_split = APIname.split('.')
func_name = apit_split[-1]
module_obj = tf
if len(func_name_list) > 1:
for module_name in apit_split[:-1]:
module_obj = getattr(module_obj, module_name)
myfunction = getattr(module_obj, func_name)
And the output is:
As you can see, I have the decorators for the function.
Now for mxnet, I have the following code snippet:
Given that we have the following mxnet API:
from mxnet import ndarray
x = ndarray.ones((2,3))
When I run the code, the function arguments are set inside ndarray object.
APIname = "ndarray.ones"
apit_split = APIname.split('.')
func_name = apit_split[-1]
module_obj = ndarray
if len(func_name_list) > 1:
for module_name in apit_split[:-1]:
module_obj = getattr(module_obj, module_name)
myfunction = getattr(module_obj, func_name)
The output is:
As you can see, there is no decorator for the function. Any idea? thanks.
|
[
"Just found the answer.\nThe problem is that I should do the above stuff inside __init__.py located in the root folder of mxnet library (installed via pip).\n"
] |
[
0
] |
[] |
[] |
[
"attributes",
"mxnet",
"python",
"wrapper"
] |
stackoverflow_0074595937_attributes_mxnet_python_wrapper.txt
|
Q:
NameError: name 'allTodos' is not defined | UnboundLocalError: local variable 'allTodos' referenced before assignment
Hi I am doing practice on flask, creating Todo App.
If I use global after
db.session.add(todo)
then I get an NameError
If I dont use global keyword then I get the UnboundLocalError:
Here is my code:
from flask import Flask, render_template ,request
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'
db = SQLAlchemy(app)
class Todo(db.Model):
SNumber = db.Column(db.Integer,primary_key=True)
title = db.Column(db.String(200),nullable=False)
desc = db.Column(db.String(500),nullable=False)
data_created = db.Column(db.DateTime, default = datetime.utcnow)
def __repr__(self) -> str:
return f"{self.SNumber} - {self.title}"
@app.route('/' , methods=['GET','POST'])
def hello_world():
if request.method == 'POST':
title = request.form['title']
desc = request.form['desc']
todo = Todo(title = title, desc = desc)
db.session.add(todo)
db.session.commit()
allTodos = Todo.query.all()
return render_template('index.html',allTodo=allTodos)
@app.route('/show')
def products():
#allTodo = Todo.query.all()
#print(allTodo)
return "this is a product page"
if __name__ == "__main__":
app.run(debug=True)
A:
Your variable is defined and assigned in the if statement. There is a chance that code block will not be executed, resulting in the interpreter not knowing what to do when it reaches the return statement without having "all_todos" defined. You can always define it and assign it a default value before the if statement.
|
NameError: name 'allTodos' is not defined | UnboundLocalError: local variable 'allTodos' referenced before assignment
|
Hi I am doing practice on flask, creating Todo App.
If I use global after
db.session.add(todo)
then I get an NameError
If I dont use global keyword then I get the UnboundLocalError:
Here is my code:
from flask import Flask, render_template ,request
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'
db = SQLAlchemy(app)
class Todo(db.Model):
SNumber = db.Column(db.Integer,primary_key=True)
title = db.Column(db.String(200),nullable=False)
desc = db.Column(db.String(500),nullable=False)
data_created = db.Column(db.DateTime, default = datetime.utcnow)
def __repr__(self) -> str:
return f"{self.SNumber} - {self.title}"
@app.route('/' , methods=['GET','POST'])
def hello_world():
if request.method == 'POST':
title = request.form['title']
desc = request.form['desc']
todo = Todo(title = title, desc = desc)
db.session.add(todo)
db.session.commit()
allTodos = Todo.query.all()
return render_template('index.html',allTodo=allTodos)
@app.route('/show')
def products():
#allTodo = Todo.query.all()
#print(allTodo)
return "this is a product page"
if __name__ == "__main__":
app.run(debug=True)
|
[
"Your variable is defined and assigned in the if statement. There is a chance that code block will not be executed, resulting in the interpreter not knowing what to do when it reaches the return statement without having \"all_todos\" defined. You can always define it and assign it a default value before the if statement.\n"
] |
[
0
] |
[] |
[] |
[
"backend",
"flask",
"flask_sqlalchemy",
"python",
"python_3.x"
] |
stackoverflow_0074596527_backend_flask_flask_sqlalchemy_python_python_3.x.txt
|
Q:
lxml xpath exponential performance behavior
I'm trying to use xpath to query a large html with multiple tables and only extract a few tables that contain a specific pattern in one of the cells. I'm running into time related challenges.
I've tried to minimize by issue as much as possible.
code setup: - creates 10 (300x15) tables with random values between 0-100
import pandas as pd
import numpy as np
dataframes = [pd.DataFrame(np.random.randint(0,100, (300, 15)), columns=[f"Col-{i}" for i in range(15)]) for k in range(10)]
html_strings = [df.to_html() for df in dataframes]
combined_html = '\n'.join(html_strings)
source_html = f'<html><body>{combined_html}</body></html>'
code execution: I want to extract all tables that have the value "80" in them (in this case it will be all 10 tables)
from lxml import etree
root = etree.fromstring(source_html.encode())
PAT = '80' # this should result in returning all 10 tables as 80 will definitely be there in all of them (pandas index)
# method 1: query directly using xpath - this takes a long time to run - and this seems to exhibit exponential time behavior
xpath_query = "//table//*[text() = '{PAT}']/ancestor::table"
tables = root.xpath(xpath_query)
# method 2: this runs in under a second. first get all the tables and then run the same xpath expression within the table context
all_tables = root.xpath('//table')
table_xpath_individual = ".//*[text() = '{PAT}']/ancestor::table"
selected_tables = [table for table in all_tables if table.xpath(table_xpath_individual)]
method 1 takes 40-50s to finish
method 2 takes <1s
I'm not sure whether it's the xpath expression in method 1 that's problematic or it's an lxml issue here. I've switched to using method 2 for now - but unsure whether it's a 100% equivalent behavior
A:
I don't know if this is relevant (but I suspect so). You can simplify these XPaths by eliminating the //* step and the trailing /ancestor::table step.
//table[descendant::text() = '{PAT}']
Note that in your problematic XPath, for each table you will find every descendant element whose text is 80 (there might be many within a table) and for each one, return all of that element's table ancestors (again, because in theory there might be more than one if you had a table containing a table, the XPath processor is going to have to laboriously traverse all those ancestor paths). That will return a potentially large number of results which the XPath processor will then have to deduplicate, so that it doesn't return multiple instances of any given table (an XPath 1.0 nodeset is guaranteed not to contain duplicates).
|
lxml xpath exponential performance behavior
|
I'm trying to use xpath to query a large html with multiple tables and only extract a few tables that contain a specific pattern in one of the cells. I'm running into time related challenges.
I've tried to minimize by issue as much as possible.
code setup: - creates 10 (300x15) tables with random values between 0-100
import pandas as pd
import numpy as np
dataframes = [pd.DataFrame(np.random.randint(0,100, (300, 15)), columns=[f"Col-{i}" for i in range(15)]) for k in range(10)]
html_strings = [df.to_html() for df in dataframes]
combined_html = '\n'.join(html_strings)
source_html = f'<html><body>{combined_html}</body></html>'
code execution: I want to extract all tables that have the value "80" in them (in this case it will be all 10 tables)
from lxml import etree
root = etree.fromstring(source_html.encode())
PAT = '80' # this should result in returning all 10 tables as 80 will definitely be there in all of them (pandas index)
# method 1: query directly using xpath - this takes a long time to run - and this seems to exhibit exponential time behavior
xpath_query = "//table//*[text() = '{PAT}']/ancestor::table"
tables = root.xpath(xpath_query)
# method 2: this runs in under a second. first get all the tables and then run the same xpath expression within the table context
all_tables = root.xpath('//table')
table_xpath_individual = ".//*[text() = '{PAT}']/ancestor::table"
selected_tables = [table for table in all_tables if table.xpath(table_xpath_individual)]
method 1 takes 40-50s to finish
method 2 takes <1s
I'm not sure whether it's the xpath expression in method 1 that's problematic or it's an lxml issue here. I've switched to using method 2 for now - but unsure whether it's a 100% equivalent behavior
|
[
"I don't know if this is relevant (but I suspect so). You can simplify these XPaths by eliminating the //* step and the trailing /ancestor::table step.\n//table[descendant::text() = '{PAT}']\n\nNote that in your problematic XPath, for each table you will find every descendant element whose text is 80 (there might be many within a table) and for each one, return all of that element's table ancestors (again, because in theory there might be more than one if you had a table containing a table, the XPath processor is going to have to laboriously traverse all those ancestor paths). That will return a potentially large number of results which the XPath processor will then have to deduplicate, so that it doesn't return multiple instances of any given table (an XPath 1.0 nodeset is guaranteed not to contain duplicates).\n"
] |
[
2
] |
[] |
[] |
[
"lxml",
"pandas",
"parsing",
"python",
"xpath"
] |
stackoverflow_0074595362_lxml_pandas_parsing_python_xpath.txt
|
Q:
How to plot 4 figures per page with pdfpages in matplotlib?
I have the code below which produces the output I want.
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
plt.style.use('ggplot')
%matplotlib inline
data = dict({'Variable_Grouping':['Type_A', 'Type_A', 'Type_A', 'Type_C', 'Type_C', 'Type_C', 'Type_C', 'Type_D', 'Type_D', 'Type_E', 'Type_E', 'Type_E', 'Type_H', 'Type_H'], 'Variable':['a1', 'a2', 'a3', 'c1', 'c2', 'c3', 'c4', 'd1', 'd2', 'e1', 'e2', 'e3', 'h1', 'h2'], 'Count':[5, 3, 8, 4, 3, 9, 5, 3, 8, 5, 3, 8, 5, 3],'Percent':[0.0625, 0.125, 0.4375, 0.0, 0.125, 0.5, 0.02, 0.125, 0.03, 0.0625, 0.05, 0.44, 0.07, 0.023]})
to_plot = pd.DataFrame(data)
g = sns.FacetGrid(to_plot, col='Variable_Grouping', col_wrap = 2, sharex=False, sharey = False, height = 5, aspect = 1, margin_titles=True)
g=g.map(plt.bar, "Variable","Count").add_legend()
for ax, (_, subdata) in zip(g.axes, to_plot.groupby('Variable_Grouping')):
ax2=ax.twinx()
subdata.plot(x='Variable',y='Percent', ax = ax2, legend=True, color='g', label = 'Percent')
ax2.set_ylabel('Percent')
ax2.grid(False)
for ax in g.axes.flatten():
ax.tick_params(labelbottom=True, labelrotation = 90)
g.fig.suptitle('Analysis', fontsize=16, fontweight = 'demibold', y = 1.02)
g.fig.subplots_adjust(hspace=0.3, wspace=0.7, right = 0.9)
plt.show();
Now I am using matplotlib.backends.backend_pdf to plot the figures in pdf. I want 4 figures per page.
with PdfPages('Analysis.pdf') as pdf:
g = sns.FacetGrid(to_plot, col='Variable_Grouping', col_wrap = 2, sharex=False, sharey = False, height = 5, aspect = 1, margin_titles=True)
g=g.map(plt.bar, "Variable","Count").add_legend()
for ax, (_, subdata) in zip(g.axes, to_plot.groupby('Variable_Grouping')):
ax2=ax.twinx()
subdata.plot(x='Variable',y='Percent', ax = ax2, legend=True, color='g', label = 'Percent')
ax2.set_ylabel('Percent')
ax2.grid(False)
for ax in g.axes.flatten():
ax.tick_params(labelbottom=True, labelrotation = 90)
g.fig.suptitle('Analysis', fontsize=16, fontweight = 'demibold', y = 1.02)
g.fig.subplots_adjust(hspace=0.3, wspace=0.7, right = 0.9)
pdf.savefig(bbox_inches = 'tight')
plt.close();
The code above gives me all the plots in a single page as expected.
def grouper(iterable, n, fillvalue=None):
from itertools import zip_longest
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
if len(to_plot['Variable_Grouping'].unique()) < 4:
N_plots_per_page =len(to_plot['Variable_Grouping'].unique())
elif len(to_plot['Variable_Grouping'].unique()) >= 4:
N_plots_per_page = 4
with PdfPages('Analysis.pdf') as pdf:
for cols in grouper(to_plot['Variable_Grouping'].unique(), N_plots_per_page):
g = sns.FacetGrid(to_plot, col='Variable_Grouping', col_wrap = 2, sharex=False, sharey = False, height = 5, aspect = 1, margin_titles=True)
g=g.map(plt.bar, "Variable","Count").add_legend()
for ax, (_, subdata) in zip(g.axes, to_plot.groupby('Variable_Grouping')):
ax2=ax.twinx()
subdata.plot(x='Variable',y='Percent', ax = ax2, legend=True, color='g', label = 'Percent')
ax2.set_ylabel('Percent')
ax2.grid(False)
for ax in g.axes.flatten():
ax.tick_params(labelbottom=True, labelrotation = 90)
g.fig.suptitle('Analysis', fontsize=16, fontweight = 'demibold', y = 1.02)
g.fig.subplots_adjust(hspace=0.3, wspace=0.7, right = 0.9)
pdf.savefig(bbox_inches = 'tight')
plt.show()
plt.close();
In the code above I have tried using the grouper function (https://docs.python.org/3/library/itertools.html#itertools-recipes). This was also mentioned in Export huge seaborn chart into pdf with multiple pages and this repeats all the graphs in all the pages.
I wanted to enquire if there is an easy way to get 4 graphs per page or what's wrong with the above code I used using the grouper function which is repeating the graphs. Any help will be appreciated. Thanks.
A:
The problem is, even you try to get the number of plots per page, you take the whole data inside the loop to plot with to_plot. You need to filter your to_plot with the cols you get by your grouper and your code will work.
The only changes I made is create the variable data_per_page and replace that with to_plot inside of sns.FaceGrid and in for ax, (_,subdata) in zip(...).
with PdfPages('Analysis.pdf') as pdf:
for cols in grouper(to_plot['Variable_Grouping'].unique(), N_plots_per_page):
data_per_page = to_plot.loc[to_plot['Variable_Grouping'].isin(cols)]
g = sns.FacetGrid(data_per_page, col='Variable_Grouping', col_wrap = 2, sharex=False, sharey = False, height = 5, aspect = 1, margin_titles=True)
g=g.map(plt.bar, "Variable","Count").add_legend()
for ax, (_,subdata) in zip(g.axes, data_per_page.groupby(['Variable_Grouping'])):
ax2=ax.twinx()
subdata.plot(x='Variable',y='Percent', ax = ax2, legend=True, color='g', label = 'Percent')
ax2.set_ylabel('Percent')
ax2.grid(False)
for ax in g.axes.flatten():
ax.tick_params(labelbottom=True, labelrotation = 90)
g.fig.suptitle('Analysis', fontsize=16, fontweight = 'demibold', y = 1.02)
g.fig.subplots_adjust(hspace=0.3, wspace=0.7, right = 0.9)
pdf.savefig(bbox_inches='tight')
plt.show()
plt.close()
As a result I get a pdf with 2 pages, on the first there are 4 plots, and on the second only 1.
|
How to plot 4 figures per page with pdfpages in matplotlib?
|
I have the code below which produces the output I want.
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
plt.style.use('ggplot')
%matplotlib inline
data = dict({'Variable_Grouping':['Type_A', 'Type_A', 'Type_A', 'Type_C', 'Type_C', 'Type_C', 'Type_C', 'Type_D', 'Type_D', 'Type_E', 'Type_E', 'Type_E', 'Type_H', 'Type_H'], 'Variable':['a1', 'a2', 'a3', 'c1', 'c2', 'c3', 'c4', 'd1', 'd2', 'e1', 'e2', 'e3', 'h1', 'h2'], 'Count':[5, 3, 8, 4, 3, 9, 5, 3, 8, 5, 3, 8, 5, 3],'Percent':[0.0625, 0.125, 0.4375, 0.0, 0.125, 0.5, 0.02, 0.125, 0.03, 0.0625, 0.05, 0.44, 0.07, 0.023]})
to_plot = pd.DataFrame(data)
g = sns.FacetGrid(to_plot, col='Variable_Grouping', col_wrap = 2, sharex=False, sharey = False, height = 5, aspect = 1, margin_titles=True)
g=g.map(plt.bar, "Variable","Count").add_legend()
for ax, (_, subdata) in zip(g.axes, to_plot.groupby('Variable_Grouping')):
ax2=ax.twinx()
subdata.plot(x='Variable',y='Percent', ax = ax2, legend=True, color='g', label = 'Percent')
ax2.set_ylabel('Percent')
ax2.grid(False)
for ax in g.axes.flatten():
ax.tick_params(labelbottom=True, labelrotation = 90)
g.fig.suptitle('Analysis', fontsize=16, fontweight = 'demibold', y = 1.02)
g.fig.subplots_adjust(hspace=0.3, wspace=0.7, right = 0.9)
plt.show();
Now I am using matplotlib.backends.backend_pdf to plot the figures in pdf. I want 4 figures per page.
with PdfPages('Analysis.pdf') as pdf:
g = sns.FacetGrid(to_plot, col='Variable_Grouping', col_wrap = 2, sharex=False, sharey = False, height = 5, aspect = 1, margin_titles=True)
g=g.map(plt.bar, "Variable","Count").add_legend()
for ax, (_, subdata) in zip(g.axes, to_plot.groupby('Variable_Grouping')):
ax2=ax.twinx()
subdata.plot(x='Variable',y='Percent', ax = ax2, legend=True, color='g', label = 'Percent')
ax2.set_ylabel('Percent')
ax2.grid(False)
for ax in g.axes.flatten():
ax.tick_params(labelbottom=True, labelrotation = 90)
g.fig.suptitle('Analysis', fontsize=16, fontweight = 'demibold', y = 1.02)
g.fig.subplots_adjust(hspace=0.3, wspace=0.7, right = 0.9)
pdf.savefig(bbox_inches = 'tight')
plt.close();
The code above gives me all the plots in a single page as expected.
def grouper(iterable, n, fillvalue=None):
from itertools import zip_longest
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
if len(to_plot['Variable_Grouping'].unique()) < 4:
N_plots_per_page =len(to_plot['Variable_Grouping'].unique())
elif len(to_plot['Variable_Grouping'].unique()) >= 4:
N_plots_per_page = 4
with PdfPages('Analysis.pdf') as pdf:
for cols in grouper(to_plot['Variable_Grouping'].unique(), N_plots_per_page):
g = sns.FacetGrid(to_plot, col='Variable_Grouping', col_wrap = 2, sharex=False, sharey = False, height = 5, aspect = 1, margin_titles=True)
g=g.map(plt.bar, "Variable","Count").add_legend()
for ax, (_, subdata) in zip(g.axes, to_plot.groupby('Variable_Grouping')):
ax2=ax.twinx()
subdata.plot(x='Variable',y='Percent', ax = ax2, legend=True, color='g', label = 'Percent')
ax2.set_ylabel('Percent')
ax2.grid(False)
for ax in g.axes.flatten():
ax.tick_params(labelbottom=True, labelrotation = 90)
g.fig.suptitle('Analysis', fontsize=16, fontweight = 'demibold', y = 1.02)
g.fig.subplots_adjust(hspace=0.3, wspace=0.7, right = 0.9)
pdf.savefig(bbox_inches = 'tight')
plt.show()
plt.close();
In the code above I have tried using the grouper function (https://docs.python.org/3/library/itertools.html#itertools-recipes). This was also mentioned in Export huge seaborn chart into pdf with multiple pages and this repeats all the graphs in all the pages.
I wanted to enquire if there is an easy way to get 4 graphs per page or what's wrong with the above code I used using the grouper function which is repeating the graphs. Any help will be appreciated. Thanks.
|
[
"The problem is, even you try to get the number of plots per page, you take the whole data inside the loop to plot with to_plot. You need to filter your to_plot with the cols you get by your grouper and your code will work.\nThe only changes I made is create the variable data_per_page and replace that with to_plot inside of sns.FaceGrid and in for ax, (_,subdata) in zip(...).\nwith PdfPages('Analysis.pdf') as pdf:\n for cols in grouper(to_plot['Variable_Grouping'].unique(), N_plots_per_page):\n \n data_per_page = to_plot.loc[to_plot['Variable_Grouping'].isin(cols)] \n \n g = sns.FacetGrid(data_per_page, col='Variable_Grouping', col_wrap = 2, sharex=False, sharey = False, height = 5, aspect = 1, margin_titles=True)\n g=g.map(plt.bar, \"Variable\",\"Count\").add_legend()\n for ax, (_,subdata) in zip(g.axes, data_per_page.groupby(['Variable_Grouping'])):\n\n ax2=ax.twinx()\n subdata.plot(x='Variable',y='Percent', ax = ax2, legend=True, color='g', label = 'Percent')\n ax2.set_ylabel('Percent')\n ax2.grid(False)\n for ax in g.axes.flatten():\n ax.tick_params(labelbottom=True, labelrotation = 90)\n g.fig.suptitle('Analysis', fontsize=16, fontweight = 'demibold', y = 1.02)\n g.fig.subplots_adjust(hspace=0.3, wspace=0.7, right = 0.9)\n pdf.savefig(bbox_inches='tight')\n plt.show()\n plt.close()\n\nAs a result I get a pdf with 2 pages, on the first there are 4 plots, and on the second only 1.\n"
] |
[
1
] |
[] |
[] |
[
"matplotlib",
"pdfpages",
"python"
] |
stackoverflow_0074593895_matplotlib_pdfpages_python.txt
|
Q:
opening a .txt file
first, i'd like to mention that im using python via visual studio. not sure if this information will
be relevant but this is my first time using file input so i'm not sure
basically, i have a .txt file located in the same location as my .py file. however, when i go to access it, i get an error 'FileNotFoundError: [Errno 2] No such file or directory'
is there a way to make it work or a different IDE i should use?
I tried reading a .txt file from the same location as my .py file. however, i keep getting the error 'FileNotFoundError: [Errno 2] No such file or directory'
A:
Take into account that the script might not be running from the same path where your python script is and most probably your are not specifying the exact path of the file.
If your file is located in the same directory where your python sript is you can use the pathlib library this way to get your script to work:
import pathlib
# Some custom code
# find your current script path
current_script_path = pathlib.Path(__file__).parent
my_file_name = "my_file_name.txt"
# Specify the file path relative to your script's path
my_file_path = current_script_path / my_file_name
with open(my_file_path, "r+") as f:
print(f.read())
|
opening a .txt file
|
first, i'd like to mention that im using python via visual studio. not sure if this information will
be relevant but this is my first time using file input so i'm not sure
basically, i have a .txt file located in the same location as my .py file. however, when i go to access it, i get an error 'FileNotFoundError: [Errno 2] No such file or directory'
is there a way to make it work or a different IDE i should use?
I tried reading a .txt file from the same location as my .py file. however, i keep getting the error 'FileNotFoundError: [Errno 2] No such file or directory'
|
[
"Take into account that the script might not be running from the same path where your python script is and most probably your are not specifying the exact path of the file.\nIf your file is located in the same directory where your python sript is you can use the pathlib library this way to get your script to work:\nimport pathlib\n\n# Some custom code\n\n# find your current script path\ncurrent_script_path = pathlib.Path(__file__).parent\n\nmy_file_name = \"my_file_name.txt\"\n\n# Specify the file path relative to your script's path\nmy_file_path = current_script_path / my_file_name\n\nwith open(my_file_path, \"r+\") as f:\n print(f.read())\n\n\n"
] |
[
0
] |
[] |
[] |
[
"file",
"input",
"python"
] |
stackoverflow_0074596414_file_input_python.txt
|
Q:
How can I give a user input into terminal after running a terminal command in python?
I have a python script that runs a command in terminal using the subprocess module and this command runs another script that asks for a user input. How can I give a user input to terminal from my original python script?
Example:
contents of introduction.sh below
#!/bin/bash
**# Ask the user for their name**
echo Hello, who am I talking to?
read varname
echo It\'s nice to meet you $varname
running introduction.sh would output the following:
user@bash ./introduction.sh
Hello, who am I talking to?
Ryan
It's nice to meet you Ryan
My python script runs introduction.sh in terminal perfectly fine. What I can't figure out how to do is to run introduction.sh with a name such as Ryan as a user input all from my python script.
I tried using the os module to call introduction.sh and then using os again to give the user input as two separate lines. This strategy runs introduction.sh perfectly fine but treats my second input as an undefined variable and does nothing.
My current script testing.py is below.
import subprocess
subprocess.run(["python3", "testing.py"], shell=True, capture_output=True)
subprocess.run(["Ryan"], shell=True, capture_output=True)
print('done')
A:
There are a number of ways of doing this with the subprocess package. Here's a simple way to do so:
import subprocess
process = subprocess.Popen('/tmp/introduction.sh', stdin=subprocess.PIPE)
process.communicate("George".encode())
Result:
Hello, who am I talking to?
It's nice to meet you George
|
How can I give a user input into terminal after running a terminal command in python?
|
I have a python script that runs a command in terminal using the subprocess module and this command runs another script that asks for a user input. How can I give a user input to terminal from my original python script?
Example:
contents of introduction.sh below
#!/bin/bash
**# Ask the user for their name**
echo Hello, who am I talking to?
read varname
echo It\'s nice to meet you $varname
running introduction.sh would output the following:
user@bash ./introduction.sh
Hello, who am I talking to?
Ryan
It's nice to meet you Ryan
My python script runs introduction.sh in terminal perfectly fine. What I can't figure out how to do is to run introduction.sh with a name such as Ryan as a user input all from my python script.
I tried using the os module to call introduction.sh and then using os again to give the user input as two separate lines. This strategy runs introduction.sh perfectly fine but treats my second input as an undefined variable and does nothing.
My current script testing.py is below.
import subprocess
subprocess.run(["python3", "testing.py"], shell=True, capture_output=True)
subprocess.run(["Ryan"], shell=True, capture_output=True)
print('done')
|
[
"There are a number of ways of doing this with the subprocess package. Here's a simple way to do so:\nimport subprocess\n\nprocess = subprocess.Popen('/tmp/introduction.sh', stdin=subprocess.PIPE)\nprocess.communicate(\"George\".encode())\n\nResult:\nHello, who am I talking to?\nIt's nice to meet you George\n\n"
] |
[
0
] |
[] |
[] |
[
"python",
"terminal",
"user_input"
] |
stackoverflow_0074595522_python_terminal_user_input.txt
|
Q:
How to find the closet value to input provided from list
I am stuck at this point. Need to find the closet value near to my input
mylist = [1,8,4,88,100]
inp=5
My output:
4
I now using for loop to but need some more efficient way to handle
As theinp = 5 ->The nearest value to my input is 4. So my output is 4
A:
Get the absolute diff, from there you will get the nearest value. Then get the element from the index. enumerate gives you the index.
mylist = [1,8,4,88,100]
inp=5
closest_val = mylist[min([abs(i-inp), index] for index, i in enumerate(mylist))[-1]] #4
A:
Here is a solution using a comprehension:
numbers = [1, 8, 4, 88, 100]
n = 5
distances = [abs(n - e) for e in numbers]
closest = numbers[distances.index(min(distances))]
Which leaves closest set with:
4
One thing to have in mind is that this method will give you the first closest number according to the order the numbers appear in the list. In other words, if you have two numbers with the same distance, the first one in the list is presented.
Something like this:
>>> numbers = [1, 8, 6, 4, 88, 100]
>>> n = 5
>>> distances = [abs(n - e) for e in numbers]
>>> closest = numbers[distances.index(min(distances))]
>>> closest
6
>>> numbers = [1, 8, 4, 6, 88, 100]
>>> n = 5
>>> distances = [abs(n - e) for e in numbers]
>>> closest = numbers[distances.index(min(distances))]
>>> closest
4
A:
Two possibilities arise.
If the list is sorted, then you can use binary search to reach to the closest values. You can then compare values to the left and right and the one with minimum difference is your desired result.
If the list is not sorted, then you'll have to scan the entire list and keep track of the one that gives you least result. Then the one that gives you minimum absolute difference is your answer.
|
How to find the closet value to input provided from list
|
I am stuck at this point. Need to find the closet value near to my input
mylist = [1,8,4,88,100]
inp=5
My output:
4
I now using for loop to but need some more efficient way to handle
As theinp = 5 ->The nearest value to my input is 4. So my output is 4
|
[
"Get the absolute diff, from there you will get the nearest value. Then get the element from the index. enumerate gives you the index.\nmylist = [1,8,4,88,100]\n\ninp=5\n\nclosest_val = mylist[min([abs(i-inp), index] for index, i in enumerate(mylist))[-1]] #4\n\n",
"Here is a solution using a comprehension:\nnumbers = [1, 8, 4, 88, 100]\nn = 5\ndistances = [abs(n - e) for e in numbers]\nclosest = numbers[distances.index(min(distances))]\n\nWhich leaves closest set with:\n4\n\nOne thing to have in mind is that this method will give you the first closest number according to the order the numbers appear in the list. In other words, if you have two numbers with the same distance, the first one in the list is presented.\nSomething like this:\n>>> numbers = [1, 8, 6, 4, 88, 100]\n>>> n = 5\n>>> distances = [abs(n - e) for e in numbers]\n>>> closest = numbers[distances.index(min(distances))]\n>>> closest\n6\n\n>>> numbers = [1, 8, 4, 6, 88, 100]\n>>> n = 5\n>>> distances = [abs(n - e) for e in numbers]\n>>> closest = numbers[distances.index(min(distances))]\n>>> closest\n4 \n\n",
"Two possibilities arise.\n\nIf the list is sorted, then you can use binary search to reach to the closest values. You can then compare values to the left and right and the one with minimum difference is your desired result.\nIf the list is not sorted, then you'll have to scan the entire list and keep track of the one that gives you least result. Then the one that gives you minimum absolute difference is your answer.\n\n"
] |
[
1,
1,
0
] |
[] |
[] |
[
"python"
] |
stackoverflow_0074596525_python.txt
|
Q:
NotFittedError: All estimators failed to fit for RandomizedSearchCV
I am trying to use RandomizedSearchCV for a classification problem (2 classes). The dataset can be downloaded from this Kaggle site. Following is the code showing the error
# Load packages
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import make_scorer, accuracy_score
from scipy.stats import uniform
import pandas as pd
import numpy as np
import time
import warnings
warnings.filterwarnings('ignore')
pd.set_option("display.max_columns", None)
# Make scorer: accuracy
acc_score = make_scorer(accuracy_score)
# Load dataset
trainSet = pd.read_csv('../input/train.csv')
testSet = pd.read_csv('../input/test.csv')
submitSet = pd.read_csv('../input/sample_submission.csv')
trainSet.head()
# Remove not used variables
train = trainSet.drop(columns=['Name', 'Ticket'])
train['Cabin_letter'] = train['Cabin'].str[0:1]
train['Cabin_no'] = train['Cabin'].str[1:]
train.head()
# Feature generation: training data
train = trainSet.drop(columns=['Name', 'Ticket', 'Cabin'])
train = train.dropna(axis=0)
train = pd.get_dummies(train)
train.head()
# train validation split
X_train, X_val, y_train, y_val = train_test_split(train.drop(columns=['PassengerId','Survived'], axis=0),
train['Survived'],
test_size=0.2, random_state=111,
stratify=train['Survived'])
# RandomizedSearhCV
param_rand = {'max_depth':uniform(3,10),
'max_features':uniform(0.8,1),
'learning_rate':uniform(0.01,1),
'n_estimators':uniform(80,150),
'subsample':uniform(0.8,1)}
rand = RandomizedSearchCV(estimator=GradientBoostingClassifier(), param_distributions=param_rand, scoring=acc_score, cv=5)
rand.fit(X_train.iloc[1:100,], y_train.iloc[1:100,])
Error
---------------------------------------------------------------------------
NotFittedError Traceback (most recent call last)
Input In [15], in <cell line: 10>()
2 param_rand = {'max_depth':uniform(3,10),
3 'max_features':uniform(0.8,1),
4 'learning_rate':uniform(0.01,1),
5 'n_estimators':uniform(80,150),
6 'subsample':uniform(0.8,1)}
8 rand = RandomizedSearchCV(estimator=GradientBoostingClassifier(), param_distributions=param_rand, scoring=acc_score, cv=5)
---> 10 rand.fit(X_train.iloc[1:100,], y_train.iloc[1:100,])
File ~\anaconda3\lib\site-packages\sklearn\utils\validation.py:63, in _deprecate_positional_args.<locals>._inner_deprecate_positional_args.<locals>.inner_f(*args, **kwargs)
61 extra_args = len(args) - len(all_args)
62 if extra_args <= 0:
---> 63 return f(*args, **kwargs)
65 # extra_args > 0
66 args_msg = ['{}={}'.format(name, arg)
67 for name, arg in zip(kwonly_args[:extra_args],
68 args[-extra_args:])]
File ~\anaconda3\lib\site-packages\sklearn\model_selection\_search.py:841, in BaseSearchCV.fit(self, X, y, groups, **fit_params)
835 results = self._format_results(
836 all_candidate_params, n_splits, all_out,
837 all_more_results)
839 return results
--> 841 self._run_search(evaluate_candidates)
843 # multimetric is determined here because in the case of a callable
844 # self.scoring the return type is only known after calling
845 first_test_score = all_out[0]['test_scores']
File ~\anaconda3\lib\site-packages\sklearn\model_selection\_search.py:1633, in RandomizedSearchCV._run_search(self, evaluate_candidates)
1631 def _run_search(self, evaluate_candidates):
1632 """Search n_iter candidates from param_distributions"""
-> 1633 evaluate_candidates(ParameterSampler(
1634 self.param_distributions, self.n_iter,
1635 random_state=self.random_state))
File ~\anaconda3\lib\site-packages\sklearn\model_selection\_search.py:827, in BaseSearchCV.fit.<locals>.evaluate_candidates(candidate_params, cv, more_results)
822 # For callable self.scoring, the return type is only know after
823 # calling. If the return type is a dictionary, the error scores
824 # can now be inserted with the correct key. The type checking
825 # of out will be done in `_insert_error_scores`.
826 if callable(self.scoring):
--> 827 _insert_error_scores(out, self.error_score)
828 all_candidate_params.extend(candidate_params)
829 all_out.extend(out)
File ~\anaconda3\lib\site-packages\sklearn\model_selection\_validation.py:301, in _insert_error_scores(results, error_score)
298 successful_score = result["test_scores"]
300 if successful_score is None:
--> 301 raise NotFittedError("All estimators failed to fit")
303 if isinstance(successful_score, dict):
304 formatted_error = {name: error_score for name in successful_score}
NotFittedError: All estimators failed to fit
A:
It is due to the param distributions you set. uniform(x,y) will generate float values, whereas you are using it for some params that require ints.
param_rand = {'max_depth':(3,10),
'max_features':(2,4),
'learning_rate':uniform(0.01,1),
'n_estimators':(80,150),
'subsample':uniform(0.8,1)}
Changing to this should work.
|
NotFittedError: All estimators failed to fit for RandomizedSearchCV
|
I am trying to use RandomizedSearchCV for a classification problem (2 classes). The dataset can be downloaded from this Kaggle site. Following is the code showing the error
# Load packages
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import make_scorer, accuracy_score
from scipy.stats import uniform
import pandas as pd
import numpy as np
import time
import warnings
warnings.filterwarnings('ignore')
pd.set_option("display.max_columns", None)
# Make scorer: accuracy
acc_score = make_scorer(accuracy_score)
# Load dataset
trainSet = pd.read_csv('../input/train.csv')
testSet = pd.read_csv('../input/test.csv')
submitSet = pd.read_csv('../input/sample_submission.csv')
trainSet.head()
# Remove not used variables
train = trainSet.drop(columns=['Name', 'Ticket'])
train['Cabin_letter'] = train['Cabin'].str[0:1]
train['Cabin_no'] = train['Cabin'].str[1:]
train.head()
# Feature generation: training data
train = trainSet.drop(columns=['Name', 'Ticket', 'Cabin'])
train = train.dropna(axis=0)
train = pd.get_dummies(train)
train.head()
# train validation split
X_train, X_val, y_train, y_val = train_test_split(train.drop(columns=['PassengerId','Survived'], axis=0),
train['Survived'],
test_size=0.2, random_state=111,
stratify=train['Survived'])
# RandomizedSearhCV
param_rand = {'max_depth':uniform(3,10),
'max_features':uniform(0.8,1),
'learning_rate':uniform(0.01,1),
'n_estimators':uniform(80,150),
'subsample':uniform(0.8,1)}
rand = RandomizedSearchCV(estimator=GradientBoostingClassifier(), param_distributions=param_rand, scoring=acc_score, cv=5)
rand.fit(X_train.iloc[1:100,], y_train.iloc[1:100,])
Error
---------------------------------------------------------------------------
NotFittedError Traceback (most recent call last)
Input In [15], in <cell line: 10>()
2 param_rand = {'max_depth':uniform(3,10),
3 'max_features':uniform(0.8,1),
4 'learning_rate':uniform(0.01,1),
5 'n_estimators':uniform(80,150),
6 'subsample':uniform(0.8,1)}
8 rand = RandomizedSearchCV(estimator=GradientBoostingClassifier(), param_distributions=param_rand, scoring=acc_score, cv=5)
---> 10 rand.fit(X_train.iloc[1:100,], y_train.iloc[1:100,])
File ~\anaconda3\lib\site-packages\sklearn\utils\validation.py:63, in _deprecate_positional_args.<locals>._inner_deprecate_positional_args.<locals>.inner_f(*args, **kwargs)
61 extra_args = len(args) - len(all_args)
62 if extra_args <= 0:
---> 63 return f(*args, **kwargs)
65 # extra_args > 0
66 args_msg = ['{}={}'.format(name, arg)
67 for name, arg in zip(kwonly_args[:extra_args],
68 args[-extra_args:])]
File ~\anaconda3\lib\site-packages\sklearn\model_selection\_search.py:841, in BaseSearchCV.fit(self, X, y, groups, **fit_params)
835 results = self._format_results(
836 all_candidate_params, n_splits, all_out,
837 all_more_results)
839 return results
--> 841 self._run_search(evaluate_candidates)
843 # multimetric is determined here because in the case of a callable
844 # self.scoring the return type is only known after calling
845 first_test_score = all_out[0]['test_scores']
File ~\anaconda3\lib\site-packages\sklearn\model_selection\_search.py:1633, in RandomizedSearchCV._run_search(self, evaluate_candidates)
1631 def _run_search(self, evaluate_candidates):
1632 """Search n_iter candidates from param_distributions"""
-> 1633 evaluate_candidates(ParameterSampler(
1634 self.param_distributions, self.n_iter,
1635 random_state=self.random_state))
File ~\anaconda3\lib\site-packages\sklearn\model_selection\_search.py:827, in BaseSearchCV.fit.<locals>.evaluate_candidates(candidate_params, cv, more_results)
822 # For callable self.scoring, the return type is only know after
823 # calling. If the return type is a dictionary, the error scores
824 # can now be inserted with the correct key. The type checking
825 # of out will be done in `_insert_error_scores`.
826 if callable(self.scoring):
--> 827 _insert_error_scores(out, self.error_score)
828 all_candidate_params.extend(candidate_params)
829 all_out.extend(out)
File ~\anaconda3\lib\site-packages\sklearn\model_selection\_validation.py:301, in _insert_error_scores(results, error_score)
298 successful_score = result["test_scores"]
300 if successful_score is None:
--> 301 raise NotFittedError("All estimators failed to fit")
303 if isinstance(successful_score, dict):
304 formatted_error = {name: error_score for name in successful_score}
NotFittedError: All estimators failed to fit
|
[
"It is due to the param distributions you set. uniform(x,y) will generate float values, whereas you are using it for some params that require ints.\nparam_rand = {'max_depth':(3,10),\n 'max_features':(2,4),\n 'learning_rate':uniform(0.01,1),\n 'n_estimators':(80,150),\n 'subsample':uniform(0.8,1)}\n\nChanging to this should work.\n"
] |
[
1
] |
[] |
[] |
[
"machine_learning",
"python",
"scikit_learn"
] |
stackoverflow_0074596455_machine_learning_python_scikit_learn.txt
|
Q:
Extracting multiple strings from a Pandas row (single cell) into columns, with specific starting and ending text
I have a dataframe df where one column 'Images' contains a bunch of HTML strings in each row from which I would like to extract URLs, that have a specific Start and End characters. Ideally they would then be turned into columns for each URL extracted.
df example:
df = pd.DataFrame({
'Description': ['USB Emergency Light Torch', 'USB RC LED DESKLAMP DL013', 'Green torch light with strap', 'Sensor Night Light W Switch A78'],
'SKU': ['9023578-001001', '9023464-001001', '9023463-001001', '9023290-001001'],
'Images': ['[{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023578-temp.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023578-3.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023578.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023578-temp.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023578.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023578-1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023578-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023578-3.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/4c186adb30ce12db4dc6d068ea20241d/9/0/9023578-temp.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/wysiwyg/mageplus/_images/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://image.useinsider.com/default/action-builder/instant-purchase.png"}]',
'[{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023464-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023464-1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023464.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023464-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023464.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023464-1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/4c186adb30ce12db4dc6d068ea20241d/9/0/9023464-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/wysiwyg/mageplus/_images/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://image.useinsider.com/default/action-builder/instant-purchase.png"}]',
'[{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023463-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023463-1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023463.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023463-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023463.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023463-1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/4c186adb30ce12db4dc6d068ea20241d/9/0/9023463-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/wysiwyg/mageplus/_images/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://image.useinsider.com/default/action-builder/instant-purchase.png"}]',
'[{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023290-2_1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023290.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023290-1_1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023290-2_1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023290-1_1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023290.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/4c186adb30ce12db4dc6d068ea20241d/9/0/9023290-2_1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/wysiwyg/mageplus/_images/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://image.useinsider.com/default/action-builder/instant-purchase.png"}]']
})
Which looks like this:
Start: https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d
End: .jpg
The other URLs and text can be ignored.
Desired Output (one URL per column, added to the end of the df; there might be more than 3 results, so the number of columns added is variable):
https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/8/9/8993791-temp.jpg
https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/8/9/8993791-3.jpg
https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/8/9/8993791.jpg
Here's an image example of the desired output in case:
So far I tried this code below (it only extracts to a single cell with "," as a joiner, haven't figured the column splitting yet) but the output is a seemingly empty cell with just '' contained within:
df['img_lines'] = df['Images'].str.findall('^https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d.*\.jpg$').str.join(",")
A:
Example
s1 = pd.Series(['aaa123.jpg', 'bca234.jpg', 'aaa425.gif', 'aaa234.jpg'])
s1
0 aaa123.jpg
1 bca234.jpg
2 aaa425.gif
3 aaa234.jpg
dtype: object
Code
if you want extract from 'aa' to '.jpg'
s1.str.extract('(aa.+.jpg)').dropna()[0]
result:
0 aaa123.jpg
3 aaa234.jpg
Name: 0, dtype: object
in your example
start = 'https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d'
pat = '({}[^ ]+.jpg)'.format(start)
result = df['Images'].str.split('Images-src').explode().str.extract(pat).dropna()
result:
0
0 https://www.website.com.my/media/catalog/produ...
0 https://www.website.com.my/media/catalog/produ...
0 https://www.website.com.my/media/catalog/produ...
1 https://www.website.com.my/media/catalog/produ...
1 https://www.website.com.my/media/catalog/produ...
1 https://www.website.com.my/media/catalog/produ...
2 https://www.website.com.my/media/catalog/produ...
2 https://www.website.com.my/media/catalog/produ...
2 https://www.website.com.my/media/catalog/produ...
3 https://www.website.com.my/media/catalog/produ...
3 https://www.website.com.my/media/catalog/produ...
3 https://www.website.com.my/media/catalog/produ...
make result to dataframe
result.groupby(level=0)[0].agg(list).apply(lambda x: pd.Series(x))
output:
0 1 2
0 https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ...
1 https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ...
2 https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ...
3 https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ...
concat output to your df and rename
A:
It looks like you are indexing wrong column of the DataFrame. Instead of df['Images'], you should do - df['Images-src'], as the links are in the latter column.
df['Images-src'].str.findall('^https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d.*\.jpg$').str.join(",")
EDIT:
Might not be the most efficient way. But, one thing you can do is convert the HTML string to python object using json.loads, and then create a new DataFrame and then filter the results.
import json #Importing json module
for i in df['Images']: #Loops through the string
df1 = pd.DataFrame(json.loads(i)) #Creates a new DataFrame with prev string as python object
print(df1['Images-src'].str.findall('^https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d.*\.jpg$').str.join(","))
O/P:
0
1
2 https://www.website.com.my/media/catalog/produ...
3 https://www.website.com.my/media/catalog/produ...
4 https://www.website.com.my/media/catalog/produ...
5
6
7
8
9
10
11
12
Name: Images-src, dtype: object
If you want to print like your desired output:
a = df1['Images-src'].str.findall('^https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d.*\.jpg$').str.join(",")
for i in a:
if i != '':
print(i)
O/P:
https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/8/9/8993791-temp.jpg
https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/8/9/8993791-3.jpg
https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/8/9/8993791.jpg
I'll leave the logical part to you for implementing it.
A:
This can be done without regex using native string methods along with other built-in python modules: json and itemgetter (this one's not necessary but useful for convenience). In short, Images column contains json objects, so convert each item to a python list and simply search for the relevant URLs in it.
json.loads call creates a list of dictionaries and since you only care about the URLs under the 'Images-src' key, get them using itemgetter() first. Then among these URLs, see filter the ones that starts and ends with the specific pattern you want.
import json
from operator import itemgetter
df[['URL1','URL2','URL3']] = [[url for url in map(itemgetter('Images-src'), lst) if url.startswith('https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d') and url.endswith('.jpg')] for lst in df['Images'].map(json.loads)]
If you insist on regex, re.match might be useful in the last step. The idea is to see if a string starts with the pattern you want; has whatever and ends with .jpg:
import re
df[['URL1','URL2','URL3']] = [[url for url in map(itemgetter('Images-src'), lst) if re.match(r'https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d.*\.jpg$', url)] for lst in df['Images'].map(json.loads)]
If there are variable number of columns, it is best to cast to a dataframe first and join() to df later to avoid a ValueError (see this post):
df = df.join(pd.DataFrame([[url for url in map(itemgetter('Images-src'), lst) if url.startswith('https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d') and url.endswith('.jpg')] for lst in df['Images'].map(json.loads)]).add_prefix('URL'))
|
Extracting multiple strings from a Pandas row (single cell) into columns, with specific starting and ending text
|
I have a dataframe df where one column 'Images' contains a bunch of HTML strings in each row from which I would like to extract URLs, that have a specific Start and End characters. Ideally they would then be turned into columns for each URL extracted.
df example:
df = pd.DataFrame({
'Description': ['USB Emergency Light Torch', 'USB RC LED DESKLAMP DL013', 'Green torch light with strap', 'Sensor Night Light W Switch A78'],
'SKU': ['9023578-001001', '9023464-001001', '9023463-001001', '9023290-001001'],
'Images': ['[{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023578-temp.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023578-3.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023578.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023578-temp.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023578.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023578-1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023578-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023578-3.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/4c186adb30ce12db4dc6d068ea20241d/9/0/9023578-temp.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/wysiwyg/mageplus/_images/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://image.useinsider.com/default/action-builder/instant-purchase.png"}]',
'[{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023464-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023464-1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023464.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023464-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023464.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023464-1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/4c186adb30ce12db4dc6d068ea20241d/9/0/9023464-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/wysiwyg/mageplus/_images/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://image.useinsider.com/default/action-builder/instant-purchase.png"}]',
'[{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023463-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023463-1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023463.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023463-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023463.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023463-1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/4c186adb30ce12db4dc6d068ea20241d/9/0/9023463-2.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/wysiwyg/mageplus/_images/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://image.useinsider.com/default/action-builder/instant-purchase.png"}]',
'[{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/logo/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023290-2_1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023290.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/9/0/9023290-1_1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023290-2_1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023290-1_1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/f0229e02574c793c147c08297c074a46/9/0/9023290.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/catalog/product/cache/4c186adb30ce12db4dc6d068ea20241d/9/0/9023290-2_1.jpg"},{"Images":"","Images-src":"https://www.website.com.my/media/wysiwyg/mageplus/_images/stores/3/logo-b2b_1_-min.png"},{"Images":"","Images-src":"https://image.useinsider.com/default/action-builder/instant-purchase.png"}]']
})
Which looks like this:
Start: https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d
End: .jpg
The other URLs and text can be ignored.
Desired Output (one URL per column, added to the end of the df; there might be more than 3 results, so the number of columns added is variable):
https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/8/9/8993791-temp.jpg
https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/8/9/8993791-3.jpg
https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/8/9/8993791.jpg
Here's an image example of the desired output in case:
So far I tried this code below (it only extracts to a single cell with "," as a joiner, haven't figured the column splitting yet) but the output is a seemingly empty cell with just '' contained within:
df['img_lines'] = df['Images'].str.findall('^https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d.*\.jpg$').str.join(",")
|
[
"Example\ns1 = pd.Series(['aaa123.jpg', 'bca234.jpg', 'aaa425.gif', 'aaa234.jpg'])\n\ns1\n0 aaa123.jpg\n1 bca234.jpg\n2 aaa425.gif\n3 aaa234.jpg\ndtype: object\n\nCode\nif you want extract from 'aa' to '.jpg'\ns1.str.extract('(aa.+.jpg)').dropna()[0]\n\nresult:\n0 aaa123.jpg\n3 aaa234.jpg\nName: 0, dtype: object\n\n\nin your example\nstart = 'https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d'\npat = '({}[^ ]+.jpg)'.format(start)\nresult = df['Images'].str.split('Images-src').explode().str.extract(pat).dropna()\n\nresult:\n 0\n0 https://www.website.com.my/media/catalog/produ...\n0 https://www.website.com.my/media/catalog/produ...\n0 https://www.website.com.my/media/catalog/produ...\n1 https://www.website.com.my/media/catalog/produ...\n1 https://www.website.com.my/media/catalog/produ...\n1 https://www.website.com.my/media/catalog/produ...\n2 https://www.website.com.my/media/catalog/produ...\n2 https://www.website.com.my/media/catalog/produ...\n2 https://www.website.com.my/media/catalog/produ...\n3 https://www.website.com.my/media/catalog/produ...\n3 https://www.website.com.my/media/catalog/produ...\n3 https://www.website.com.my/media/catalog/produ...\n\nmake result to dataframe\nresult.groupby(level=0)[0].agg(list).apply(lambda x: pd.Series(x))\n\noutput:\n 0 1 2\n0 https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ...\n1 https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ...\n2 https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ...\n3 https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ... https://www.website.com.my/media/catalog/produ...\n\nconcat output to your df and rename\n",
"It looks like you are indexing wrong column of the DataFrame. Instead of df['Images'], you should do - df['Images-src'], as the links are in the latter column.\ndf['Images-src'].str.findall('^https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d.*\\.jpg$').str.join(\",\")\n\nEDIT:\nMight not be the most efficient way. But, one thing you can do is convert the HTML string to python object using json.loads, and then create a new DataFrame and then filter the results.\nimport json #Importing json module\n\nfor i in df['Images']: #Loops through the string\n df1 = pd.DataFrame(json.loads(i)) #Creates a new DataFrame with prev string as python object\n print(df1['Images-src'].str.findall('^https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d.*\\.jpg$').str.join(\",\"))\n\nO/P:\n0 \n1 \n2 https://www.website.com.my/media/catalog/produ...\n3 https://www.website.com.my/media/catalog/produ...\n4 https://www.website.com.my/media/catalog/produ...\n5 \n6 \n7 \n8 \n9 \n10 \n11 \n12 \nName: Images-src, dtype: object\n\nIf you want to print like your desired output:\na = df1['Images-src'].str.findall('^https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d.*\\.jpg$').str.join(\",\")\n\nfor i in a:\n if i != '':\n print(i)\n\nO/P:\nhttps://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/8/9/8993791-temp.jpg\nhttps://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/8/9/8993791-3.jpg\nhttps://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d/8/9/8993791.jpg\n\nI'll leave the logical part to you for implementing it.\n",
"This can be done without regex using native string methods along with other built-in python modules: json and itemgetter (this one's not necessary but useful for convenience). In short, Images column contains json objects, so convert each item to a python list and simply search for the relevant URLs in it.\njson.loads call creates a list of dictionaries and since you only care about the URLs under the 'Images-src' key, get them using itemgetter() first. Then among these URLs, see filter the ones that starts and ends with the specific pattern you want.\nimport json\nfrom operator import itemgetter\ndf[['URL1','URL2','URL3']] = [[url for url in map(itemgetter('Images-src'), lst) if url.startswith('https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d') and url.endswith('.jpg')] for lst in df['Images'].map(json.loads)]\n\nIf you insist on regex, re.match might be useful in the last step. The idea is to see if a string starts with the pattern you want; has whatever and ends with .jpg:\nimport re\ndf[['URL1','URL2','URL3']] = [[url for url in map(itemgetter('Images-src'), lst) if re.match(r'https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d.*\\.jpg$', url)] for lst in df['Images'].map(json.loads)]\n\n\nIf there are variable number of columns, it is best to cast to a dataframe first and join() to df later to avoid a ValueError (see this post):\ndf = df.join(pd.DataFrame([[url for url in map(itemgetter('Images-src'), lst) if url.startswith('https://www.website.com.my/media/catalog/product/cache/3f354f4955006fba9bb013076742094d') and url.endswith('.jpg')] for lst in df['Images'].map(json.loads)]).add_prefix('URL'))\n\n"
] |
[
1,
1,
1
] |
[] |
[] |
[
"extract",
"pandas",
"python",
"regex"
] |
stackoverflow_0074595772_extract_pandas_python_regex.txt
|
Q:
Pytorch-Lightning ModelCheckpoint get paths of saved checkpoints
I am using PytorchLightning and beside others a ModelCheckpoint which saves models with a formated filename like `filename="model_{epoch}-{val_acc:.2f}"
In a process I want to load these checkpoints again, for simplicity lets say I want only the best via save_top_k=N.
As the filename is dynamic I wonder how can I retrieve the checkpoint easily is there a built in attribute or via the trainer that gives the saved checkpoints?
For example like
checkpoint_callback.get_top_k_paths()
I know I can do it with glob and model_dir but wondering if there is a one line solution built in somehwere.
A:
you can retrieve the best model path after training from the checkpoint
# retrieve the best checkpoint after training
checkpoint_callback = ModelCheckpoint(dirpath='my/path/')
trainer = Trainer(callbacks=[checkpoint_callback])
model = ...
trainer.fit(model)
checkpoint_callback.best_model_path
To find all the checkpoints you can get the list of files in the dirpath where the checkpoints are saved.
|
Pytorch-Lightning ModelCheckpoint get paths of saved checkpoints
|
I am using PytorchLightning and beside others a ModelCheckpoint which saves models with a formated filename like `filename="model_{epoch}-{val_acc:.2f}"
In a process I want to load these checkpoints again, for simplicity lets say I want only the best via save_top_k=N.
As the filename is dynamic I wonder how can I retrieve the checkpoint easily is there a built in attribute or via the trainer that gives the saved checkpoints?
For example like
checkpoint_callback.get_top_k_paths()
I know I can do it with glob and model_dir but wondering if there is a one line solution built in somehwere.
|
[
"you can retrieve the best model path after training from the checkpoint\n# retrieve the best checkpoint after training\ncheckpoint_callback = ModelCheckpoint(dirpath='my/path/')\ntrainer = Trainer(callbacks=[checkpoint_callback])\nmodel = ...\ntrainer.fit(model)\ncheckpoint_callback.best_model_path\n\nTo find all the checkpoints you can get the list of files in the dirpath where the checkpoints are saved.\n"
] |
[
1
] |
[] |
[] |
[
"python",
"pytorch",
"pytorch_lightning"
] |
stackoverflow_0074577562_python_pytorch_pytorch_lightning.txt
|
Q:
Working with values from 2 separate lists - Python
I have 2 lists which each have 10 value and i want to multiply the values.
import random
n1_r = random.sample(range(1, 100), 10)
n2_r = random.sample(range(1, 100), 10)
n1 = n1_r
n2 = n2_r
for example I want to multiply the first value from n1 with the first value in n2 and so on?
im expecting a new list of 10 values stored in n3
A:
n3 = [a * b for a, b in zip(n1, n2)]
A:
You can do this in numerous ways. I would go with a basic list comprehension considering the experience level you are.
For example:
array1 = [2, 2, 2, 2]
array2 = [3, 3, 3, 3]
array3 = [i * j for i,j in zip(array1, array2)]
>>> array3
[6, 6, 6, 6]
Then you can always do some more succinct one liners too.
For example:
array3 = list(map(lambda x: x[0]*x[1], zip(array1, array2)))
There are many tools, modules, and constructs in python to accomplish this. Take a look at Pandas and the module operator for a couple of handy ways to process and operate data.
A:
There are multiple ways to do this. Refer https://www.entechin.com/how-to-multiply-two-lists-in-python/
For such advanced numerical operations you can use numpy library
Eg:
import numpy as np
array1 = np.array(n1_r)
array2 = np.array(n2_r)
result = array1*array2
|
Working with values from 2 separate lists - Python
|
I have 2 lists which each have 10 value and i want to multiply the values.
import random
n1_r = random.sample(range(1, 100), 10)
n2_r = random.sample(range(1, 100), 10)
n1 = n1_r
n2 = n2_r
for example I want to multiply the first value from n1 with the first value in n2 and so on?
im expecting a new list of 10 values stored in n3
|
[
"n3 = [a * b for a, b in zip(n1, n2)]\n\n",
"You can do this in numerous ways. I would go with a basic list comprehension considering the experience level you are.\nFor example:\narray1 = [2, 2, 2, 2]\narray2 = [3, 3, 3, 3]\narray3 = [i * j for i,j in zip(array1, array2)]\n>>> array3\n[6, 6, 6, 6]\n\nThen you can always do some more succinct one liners too.\nFor example:\narray3 = list(map(lambda x: x[0]*x[1], zip(array1, array2)))\n\nThere are many tools, modules, and constructs in python to accomplish this. Take a look at Pandas and the module operator for a couple of handy ways to process and operate data.\n",
"There are multiple ways to do this. Refer https://www.entechin.com/how-to-multiply-two-lists-in-python/\nFor such advanced numerical operations you can use numpy library\nEg:\nimport numpy as np\n\narray1 = np.array(n1_r)\narray2 = np.array(n2_r)\n \nresult = array1*array2\n\n"
] |
[
2,
1,
0
] |
[] |
[] |
[
"list",
"python"
] |
stackoverflow_0074596393_list_python.txt
|
Q:
Set name to groupby size column in Pandas
I have a data frame that I need to count the unique items of a certain row. In the example below, I want to label the name for the below function as "NUM_CIK". What's the best way to assign a name to the groupby column?
Current code:
cik_groupby_cusip_occur = cik_groupby_cusip_occur.groupby(
['CUSIP'], sort=True)['CIK COMPANY'].size().sort_values(ascending=False)
Sample Output:
CUSIP
594918104 4560
037833100 4457
023135106 4053
02079K305 3545
478160104 3472
Wanted Output:
CUSIP NUM_CIK
594918104 4560
037833100 4457
023135106 4053
02079K305 3545
478160104 3472
A:
Use Series.reset_index with name parameter:
(cik_groupby_cusip_occur = cik_groupby_cusip_occur
.groupby('CUSIP')['CIK COMPANY']
.size()
.sort_values(ascending=False)
.reset_index(name='NUM_CIK'))
Or Series.value_counts:
cik_groupby_cusip_occur = (cik_groupby_cusip_occur['CUSIP']
.value_counts()
.rename_axis('CUSIP')
.reset_index(name='NUM_CIK'))
A:
Either use reset_index(name='NUM_CIK')
Or:
cik_groupby_cusip_occur = (cik_groupby_cusip_occur
.groupby(['CUSIP'], sort=True)['CIK COMPANY']
.agg(NUM_CIK='size')
.sort_values(by='NUM_CIK', ascending=False)
)
|
Set name to groupby size column in Pandas
|
I have a data frame that I need to count the unique items of a certain row. In the example below, I want to label the name for the below function as "NUM_CIK". What's the best way to assign a name to the groupby column?
Current code:
cik_groupby_cusip_occur = cik_groupby_cusip_occur.groupby(
['CUSIP'], sort=True)['CIK COMPANY'].size().sort_values(ascending=False)
Sample Output:
CUSIP
594918104 4560
037833100 4457
023135106 4053
02079K305 3545
478160104 3472
Wanted Output:
CUSIP NUM_CIK
594918104 4560
037833100 4457
023135106 4053
02079K305 3545
478160104 3472
|
[
"Use Series.reset_index with name parameter:\n(cik_groupby_cusip_occur = cik_groupby_cusip_occur\n .groupby('CUSIP')['CIK COMPANY']\n .size()\n .sort_values(ascending=False)\n .reset_index(name='NUM_CIK'))\n\nOr Series.value_counts:\ncik_groupby_cusip_occur = (cik_groupby_cusip_occur['CUSIP']\n .value_counts()\n .rename_axis('CUSIP')\n .reset_index(name='NUM_CIK'))\n\n",
"Either use reset_index(name='NUM_CIK')\nOr:\ncik_groupby_cusip_occur = (cik_groupby_cusip_occur\n .groupby(['CUSIP'], sort=True)['CIK COMPANY']\n .agg(NUM_CIK='size')\n .sort_values(by='NUM_CIK', ascending=False)\n)\n\n"
] |
[
1,
1
] |
[] |
[] |
[
"pandas",
"python"
] |
stackoverflow_0074596543_pandas_python.txt
|
Q:
Run Python script without Windows console appearing
Is there any way to run a Python script in Windows XP without a command shell momentarily appearing? I often need to automate WordPerfect (for work) with Python, and even if my script has no output, if I execute it from without WP an empty shell still pops up for a second before disappearing. Is there any way to prevent this? Some kind of output redirection perhaps?
A:
pythonw.exe will run the script without a command prompt. The problem is that the Python interpreter, Python.exe, is linked against the console subsystem to produce console output (since that's 90% of cases) -- pythonw.exe is instead linked against the GUI subsystem, and Windows will not create a console output window for it unless it asks for one.
This article discusses GUI programming with Python, and also alludes to pythonw.exe. It also helpfully points out that if your Python files end with .pyw instead of .py, the standard Windows installer will set up associations correctly and run your Python in pythonw.exe.
In your case it doesn't sound like a problem, but reliance upon pythonw.exe makes your application Windows-specific -- other solutions exist to accomplish this on, say, Mac OS X.
A:
If you name your files with the ".pyw" extension, then windows will execute them with the pythonw.exe interpreter. This will not open the dos console for running your script.
A:
I tried methods above, however, a console stills appears and disappears quickly due to a Timer in my script. Finally, I found following code:
import ctypes
import os
import win32process
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
if hwnd != 0:
ctypes.windll.user32.ShowWindow(hwnd, 0)
ctypes.windll.kernel32.CloseHandle(hwnd)
_, pid = win32process.GetWindowThreadProcessId(hwnd)
os.system('taskkill /PID ' + str(pid) + ' /f')
A:
This will work on all Windows Versions:
1. Create "Runner.bat" file with Notepad (or any other text editor) and insert following content:
@echo off
python server.py
where server.py is the path of the Python script you want to run.
2. Create "RunScript.vbs" file with Notepad and insert following content:
CreateObject("Wscript.Shell").Run "runner.bat",0,True
3. Run the "RunScript.vbs" file with double click and your Python script will be runnig without any visible console windows
p.s. I know that this was not part of your question but it is often the case, if you want to run the script on windows start (after user login) just paste the shortcut of "RunScript.vbs" file into your startup folder. (On Windows 10 mostly under: C:\Users[USERNAME]\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup )
Best regards
A:
Quite easy in Win10:
Open a PowerShell and type this commands:
type nul > openWindowed.vbs
type nul > main.py
notepad openWindowed.vbs
Paste the next into your openWindowed.vbs file:
Set WshShell = CreateObject("WScript.Shell")
WshShell.Run chr(34) & "C:\Users\path\to\main.py" & Chr(34), 0
Set WshShell = Nothing
The .vbs file will execute your main.py file without open a cmd. I use this a lot to the make.py files, because I don't know cmake scripting.
A:
Change the file extension to .pyw and add the following line (changed accordingly to your python version) to the beginning of your script:
#! c:\\Users\\sohra\\AppData\\Local\\Programs\\Python\\Python38\\pythonw.exe
Now, if you double click on the file, it will be executed by pythonw.exe without the console window.
A:
Turn of your window defender. And install pyinstaller package using pip install pyinstaller .
After installing open cmd and type pyinstaller --onefile --noconsole filename.py
A:
There are 2 options in Windows, I think:
Option 1
Change the .py file extension to .pyw and replace python.exe with pythonw.exe at its very first line as follows (take care to replace <your_username>\\<your_path_to>):
#! C:\\Users\\<your_username>\\<your_path_to>\\Scripts\\pythonw.exe
Double click on .pyw script file to run it.
Optionally, you can also create a shortcut to .pyw file, to customize its name, icon and keyboard shortcut as final launcher!
Option 2
Leave the .py file extension and replace pythonw.exe with python.exe at its very first line as follows (take care to replace <your_username>\\<your_path_to>):
#! C:\\Users\\<your_username>\\<your_path_to>\\Scripts\\python.exe
Use a .vbs (Visual Basic Script) file with the following content as launcher of the .py file (you can use a .vbs file to also launch a .bat file without showing the prompt):
Set WshShell = CreateObject("WScript.Shell")
WshShell.Run "C:\Users\<your_username>\<your_path_to>\Scripts\python.exe C:\Users\<your_username>\<your_path_to>\script.py", 0, True
Set WshShell = Nothing
Double click on .vbs file to run your .py script.
Optionally, you can also create a shortcut to .vbs file, to customize its name, icon and keyboard shortcut as final launcher!
A:
When you install Pyinstaller, you will be able to convert the .py into a .exe. In the settings you can change whether to show or not show the console window that python opened when the file is ran.
A:
Yeah, It's another issue of fuck python. The stanza works well via python.exe, whereas it is failed on pythonw.exe. Even more, no feasible mechanism to output sysout or syserr. Therefore, I write a C# program to start python without Command Window.
public class Program
{
private string python = @"D:\Libs\Python38\Python38_64\python.exe";
private string args = "mdx_server.py";
private string wkDir = "D:\\Program Files\\mdx-server";
public static void Main(string[] args) {
Program app = new Program();
Console.WriteLine(app.python + " " + app.args + " " + app.wkDir);
app.Exec();
Console.WriteLine("Start done!");
}
private void runThread() {
Process process = new Process();
try {
process.StartInfo.WindowStyle = ProcessWindowStyle.Hidden;
process.StartInfo.Arguments = args;
process.StartInfo.CreateNoWindow = true;
process.StartInfo.FileName = python;
process.StartInfo.WorkingDirectory = wkDir;
process.StartInfo.UseShellExecute = false;
process.Start();
} catch (Exception ex) {
Console.WriteLine(ex.Message);
}
}
public void Exec() {
Thread _worker = new Thread(runThread);
_worker.Start();
}
}
|
Run Python script without Windows console appearing
|
Is there any way to run a Python script in Windows XP without a command shell momentarily appearing? I often need to automate WordPerfect (for work) with Python, and even if my script has no output, if I execute it from without WP an empty shell still pops up for a second before disappearing. Is there any way to prevent this? Some kind of output redirection perhaps?
|
[
"pythonw.exe will run the script without a command prompt. The problem is that the Python interpreter, Python.exe, is linked against the console subsystem to produce console output (since that's 90% of cases) -- pythonw.exe is instead linked against the GUI subsystem, and Windows will not create a console output window for it unless it asks for one.\nThis article discusses GUI programming with Python, and also alludes to pythonw.exe. It also helpfully points out that if your Python files end with .pyw instead of .py, the standard Windows installer will set up associations correctly and run your Python in pythonw.exe.\nIn your case it doesn't sound like a problem, but reliance upon pythonw.exe makes your application Windows-specific -- other solutions exist to accomplish this on, say, Mac OS X.\n",
"If you name your files with the \".pyw\" extension, then windows will execute them with the pythonw.exe interpreter. This will not open the dos console for running your script.\n",
"I tried methods above, however, a console stills appears and disappears quickly due to a Timer in my script. Finally, I found following code:\nimport ctypes\nimport os\nimport win32process\n\nhwnd = ctypes.windll.kernel32.GetConsoleWindow() \nif hwnd != 0: \n ctypes.windll.user32.ShowWindow(hwnd, 0) \n ctypes.windll.kernel32.CloseHandle(hwnd)\n _, pid = win32process.GetWindowThreadProcessId(hwnd)\n os.system('taskkill /PID ' + str(pid) + ' /f')\n\n",
"This will work on all Windows Versions:\n1. Create \"Runner.bat\" file with Notepad (or any other text editor) and insert following content:\n@echo off\npython server.py\n\nwhere server.py is the path of the Python script you want to run.\n2. Create \"RunScript.vbs\" file with Notepad and insert following content:\nCreateObject(\"Wscript.Shell\").Run \"runner.bat\",0,True\n\n3. Run the \"RunScript.vbs\" file with double click and your Python script will be runnig without any visible console windows\np.s. I know that this was not part of your question but it is often the case, if you want to run the script on windows start (after user login) just paste the shortcut of \"RunScript.vbs\" file into your startup folder. (On Windows 10 mostly under: C:\\Users[USERNAME]\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup )\nBest regards\n",
"Quite easy in Win10:\nOpen a PowerShell and type this commands:\ntype nul > openWindowed.vbs\ntype nul > main.py\nnotepad openWindowed.vbs\n\nPaste the next into your openWindowed.vbs file:\nSet WshShell = CreateObject(\"WScript.Shell\") \nWshShell.Run chr(34) & \"C:\\Users\\path\\to\\main.py\" & Chr(34), 0\nSet WshShell = Nothing\n\nThe .vbs file will execute your main.py file without open a cmd. I use this a lot to the make.py files, because I don't know cmake scripting.\n",
"Change the file extension to .pyw and add the following line (changed accordingly to your python version) to the beginning of your script:\n#! c:\\\\Users\\\\sohra\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python38\\\\pythonw.exe\n\nNow, if you double click on the file, it will be executed by pythonw.exe without the console window.\n",
"Turn of your window defender. And install pyinstaller package using pip install pyinstaller .\nAfter installing open cmd and type pyinstaller --onefile --noconsole filename.py\n",
"There are 2 options in Windows, I think:\n\nOption 1\n\nChange the .py file extension to .pyw and replace python.exe with pythonw.exe at its very first line as follows (take care to replace <your_username>\\\\<your_path_to>):\n#! C:\\\\Users\\\\<your_username>\\\\<your_path_to>\\\\Scripts\\\\pythonw.exe\n\n\nDouble click on .pyw script file to run it.\n\nOptionally, you can also create a shortcut to .pyw file, to customize its name, icon and keyboard shortcut as final launcher!\n\n\n\nOption 2\n\nLeave the .py file extension and replace pythonw.exe with python.exe at its very first line as follows (take care to replace <your_username>\\\\<your_path_to>):\n#! C:\\\\Users\\\\<your_username>\\\\<your_path_to>\\\\Scripts\\\\python.exe\n\n\nUse a .vbs (Visual Basic Script) file with the following content as launcher of the .py file (you can use a .vbs file to also launch a .bat file without showing the prompt):\nSet WshShell = CreateObject(\"WScript.Shell\") \nWshShell.Run \"C:\\Users\\<your_username>\\<your_path_to>\\Scripts\\python.exe C:\\Users\\<your_username>\\<your_path_to>\\script.py\", 0, True\nSet WshShell = Nothing\n\n\nDouble click on .vbs file to run your .py script.\n\nOptionally, you can also create a shortcut to .vbs file, to customize its name, icon and keyboard shortcut as final launcher!\n\n\n\n\n",
"When you install Pyinstaller, you will be able to convert the .py into a .exe. In the settings you can change whether to show or not show the console window that python opened when the file is ran.\n",
"Yeah, It's another issue of fuck python. The stanza works well via python.exe, whereas it is failed on pythonw.exe. Even more, no feasible mechanism to output sysout or syserr. Therefore, I write a C# program to start python without Command Window.\npublic class Program\n{\n private string python = @\"D:\\Libs\\Python38\\Python38_64\\python.exe\";\n private string args = \"mdx_server.py\";\n private string wkDir = \"D:\\\\Program Files\\\\mdx-server\";\n \n public static void Main(string[] args) {\n Program app = new Program();\n Console.WriteLine(app.python + \" \" + app.args + \" \" + app.wkDir);\n app.Exec();\n Console.WriteLine(\"Start done!\");\n }\n\n private void runThread() {\n Process process = new Process();\n try {\n process.StartInfo.WindowStyle = ProcessWindowStyle.Hidden;\n process.StartInfo.Arguments = args;\n\n process.StartInfo.CreateNoWindow = true;\n process.StartInfo.FileName = python;\n process.StartInfo.WorkingDirectory = wkDir;\n\n process.StartInfo.UseShellExecute = false;\n process.Start();\n } catch (Exception ex) {\n Console.WriteLine(ex.Message);\n }\n }\n\n public void Exec() {\n Thread _worker = new Thread(runThread);\n _worker.Start();\n }\n}\n\n"
] |
[
133,
37,
8,
4,
2,
1,
0,
0,
0,
0
] |
[
"I had the same problem. I tried many options, and all of them failed\nBut I tried this method, and it magically worked!!!!!\nSo, I had this python file (mod.py) in a folder, I used to run using command prompt\nWhen I used to close the cmd the gui is automatically closed.....(SAD),\nSo I run it as follows \nC:\\....>pythonw mod.py\nDon't forget pythonw \"w\" is IMP \n"
] |
[
-2
] |
[
"python",
"shell",
"windows"
] |
stackoverflow_0001689015_python_shell_windows.txt
|
Q:
Group-by some value with time condition in pandas
Suppose I have a DataFrame like this -
ID-A ID-B ID-C Time
1 A X 2022/01/01 09:00:00
1 A X 2022/01/01 09:10:00
1 A Y 2022/01/02 10:15:00
2 B Y 2022/01/01 11:45:00
2 C Y 2022/01/01 01:00:00
2 C Y 2022/01/01 12:00:00
I want to group by columns ID-A and ID-B, and find the count of ID-C per each group. The new time field should be start time(min value per group) and end time(max value of time per group).
Desired Dataframe -
ID-A ID-B Value start_time end_time
1 A 3 2022/01/01 09:00:00 2022/01/02 10:15:00
2 B 1 2022/01/01 11:45:00 2022/01/01 11:45:00
2 C 2 2022/01/01 01:00:00 2022/01/01 12:00:00
A:
Use:
(df.groupby(['ID-A', 'ID-B'], as_index=False)
.agg(Value=('ID-C', 'size'),
start_time=('Time', 'min'),
end_time=('Time', 'max'),
)
)
|
Group-by some value with time condition in pandas
|
Suppose I have a DataFrame like this -
ID-A ID-B ID-C Time
1 A X 2022/01/01 09:00:00
1 A X 2022/01/01 09:10:00
1 A Y 2022/01/02 10:15:00
2 B Y 2022/01/01 11:45:00
2 C Y 2022/01/01 01:00:00
2 C Y 2022/01/01 12:00:00
I want to group by columns ID-A and ID-B, and find the count of ID-C per each group. The new time field should be start time(min value per group) and end time(max value of time per group).
Desired Dataframe -
ID-A ID-B Value start_time end_time
1 A 3 2022/01/01 09:00:00 2022/01/02 10:15:00
2 B 1 2022/01/01 11:45:00 2022/01/01 11:45:00
2 C 2 2022/01/01 01:00:00 2022/01/01 12:00:00
|
[
"Use:\n(df.groupby(['ID-A', 'ID-B'], as_index=False)\n .agg(Value=('ID-C', 'size'),\n start_time=('Time', 'min'),\n end_time=('Time', 'max'),\n )\n)\n\n"
] |
[
2
] |
[] |
[] |
[
"dataframe",
"group_by",
"numpy",
"pandas",
"python"
] |
stackoverflow_0074596630_dataframe_group_by_numpy_pandas_python.txt
|
Q:
Pandas - Applying formula on all column based on a value on the row
lets say I have a dataframe like below
+------+------+------+-------------+
| A | B | C | devisor_col |
+------+------+------+-------------+
| 2 | 4 | 10 | 2 |
| 3 | 3 | 9 | 3 |
| 10 | 25 | 40 | 10 |
+------+------+------+-------------+
what would be the best command to apply a formula using values from the devisor_col. Do note that I have thousand of column and rows.
the result should be like this:
+------+------+------+-------------+
| A | B | V | devisor_col |
+------+------+------+-------------+
| 1 | 2 | 5 | 2 |
| 1 | 1 | 3 | 3 |
| 1 | 1.5 | 4 | 10 |
+------+------+------+-------------+
I tried using apply map but I dont know why I cant apply it to all columns.
modResult = my_df.applymap(lambda x: x/x["devisor_col"]))
A:
IIUC, use pandas.DataFrame.divide on axis=0 :
modResult= (
pd.concat(
[my_df, my_df.filter(like="Col") # selecting columns
.divide(my_df["devisor_col"], axis=0).add_suffix("_div")], axis=1)
)
# Output :
print(modResult)
Col1 Col2 Col3 devisor_col Col1_div Col2_div Col3_div
0 2 4 10 2 1.0 2.0 5.0
1 3 3 9 3 1.0 1.0 3.0
2 10 25 40 10 1.0 2.5 4.0
If you need only the result of the divide, use this :
modResult= my_df.filter(like="Col").divide(my_df["devisor_col"], axis=0)
print(modResult)
Col1 Col2 Col3
0 1.0 2.0 5.0
1 1.0 1.0 3.0
2 1.0 2.5 4.0
Or if you want to overwrite the old columns, use pandas.DataFrame.join:
modResult= (
my_df.filter(like="Col")
.divide(my_df["devisor_col"], axis=0)
.join(my_df["devisor_col"])
)
Col1 Col2 Col3 devisor_col
0 1.0 2.0 5.0 2
1 1.0 1.0 3.0 3
2 1.0 2.5 4.0 10
You can replace my_df.filter(like="Col") with my_df.loc[:, my_df.columns!="devisor_col"].
A:
You can try using .loc
df = pd.DataFrame([[1,2,3,1],[2,3,4,5],[4,5,6,7]], columns=['col1', 'col2', 'col3', 'divisor'])
df.loc[:, df.columns != 'divisor'] = df.loc[:, df.columns != 'divisor'].divide(df['divisor'], axis=0)
|
Pandas - Applying formula on all column based on a value on the row
|
lets say I have a dataframe like below
+------+------+------+-------------+
| A | B | C | devisor_col |
+------+------+------+-------------+
| 2 | 4 | 10 | 2 |
| 3 | 3 | 9 | 3 |
| 10 | 25 | 40 | 10 |
+------+------+------+-------------+
what would be the best command to apply a formula using values from the devisor_col. Do note that I have thousand of column and rows.
the result should be like this:
+------+------+------+-------------+
| A | B | V | devisor_col |
+------+------+------+-------------+
| 1 | 2 | 5 | 2 |
| 1 | 1 | 3 | 3 |
| 1 | 1.5 | 4 | 10 |
+------+------+------+-------------+
I tried using apply map but I dont know why I cant apply it to all columns.
modResult = my_df.applymap(lambda x: x/x["devisor_col"]))
|
[
"IIUC, use pandas.DataFrame.divide on axis=0 :\nmodResult= (\n pd.concat(\n [my_df, my_df.filter(like=\"Col\") # selecting columns\n .divide(my_df[\"devisor_col\"], axis=0).add_suffix(\"_div\")], axis=1)\n )\n\n# Output :\nprint(modResult)\n\n Col1 Col2 Col3 devisor_col Col1_div Col2_div Col3_div\n0 2 4 10 2 1.0 2.0 5.0\n1 3 3 9 3 1.0 1.0 3.0\n2 10 25 40 10 1.0 2.5 4.0\n\nIf you need only the result of the divide, use this :\nmodResult= my_df.filter(like=\"Col\").divide(my_df[\"devisor_col\"], axis=0)\n\nprint(modResult)\n\n Col1 Col2 Col3\n0 1.0 2.0 5.0\n1 1.0 1.0 3.0\n2 1.0 2.5 4.0\n\nOr if you want to overwrite the old columns, use pandas.DataFrame.join:\nmodResult= (\n my_df.filter(like=\"Col\")\n .divide(my_df[\"devisor_col\"], axis=0)\n .join(my_df[\"devisor_col\"])\n )\n\n Col1 Col2 Col3 devisor_col\n0 1.0 2.0 5.0 2\n1 1.0 1.0 3.0 3\n2 1.0 2.5 4.0 10\n\nYou can replace my_df.filter(like=\"Col\") with my_df.loc[:, my_df.columns!=\"devisor_col\"].\n",
"You can try using .loc\ndf = pd.DataFrame([[1,2,3,1],[2,3,4,5],[4,5,6,7]], columns=['col1', 'col2', 'col3', 'divisor'])\n\ndf.loc[:, df.columns != 'divisor'] = df.loc[:, df.columns != 'divisor'].divide(df['divisor'], axis=0)\n\n"
] |
[
2,
2
] |
[] |
[] |
[
"pandas",
"python"
] |
stackoverflow_0074596399_pandas_python.txt
|
Q:
Why `vectorize` is outperformed by `frompyfunc`?
Numpy offers vectorize and frompyfunc with similar functionalies.
As pointed out in this SO-post, vectorize wraps frompyfunc and handles the type of the returned array correctly, while frompyfunc returns an array of np.object.
However, frompyfunc outperforms vectorize consistently by 10-20% for all sizes, which can also not be explained with different return types.
Consider the following variants:
import numpy as np
def do_double(x):
return 2.0*x
vectorize = np.vectorize(do_double)
frompyfunc = np.frompyfunc(do_double, 1, 1)
def wrapped_frompyfunc(arr):
return frompyfunc(arr).astype(np.float64)
wrapped_frompyfunc just converts the result of frompyfunc to the right type - as we can see, the costs of this operation are almost neglegible.
It results in the following timings (blue line is frompyfunc):
I would expect vectorize to have more overhead - but this should be seen only for small sizes. On the other hand, converting np.object to np.float64 is also done in wrapped_frompyfunc - which is still much faster.
How this performance difference can be explained?
Code to produce timing-comparison using perfplot-package (given the functions above):
import numpy as np
import perfplot
perfplot.show(
setup=lambda n: np.linspace(0, 1, n),
n_range=[2**k for k in range(20,27)],
kernels=[
frompyfunc,
vectorize,
wrapped_frompyfunc,
],
labels=["frompyfunc", "vectorize", "wrapped_frompyfunc"],
logx=True,
logy=False,
xlabel='len(x)',
equality_check = None,
)
NB: For smaller sizes, the overhead of vectorize is much higher, but that is to be expected (it wraps frompyfunc after all):
A:
Following the hints of @hpaulj we can profile the vectorize-function:
arr=np.linspace(0,1,10**7)
%load_ext line_profiler
%lprun -f np.vectorize._vectorize_call \
-f np.vectorize._get_ufunc_and_otypes \
-f np.vectorize.__call__ \
vectorize(arr)
which shows that 100% of time is spent in _vectorize_call:
Timer unit: 1e-06 s
Total time: 3.53012 s
File: python3.7/site-packages/numpy/lib/function_base.py
Function: __call__ at line 2063
Line # Hits Time Per Hit % Time Line Contents
==============================================================
2063 def __call__(self, *args, **kwargs):
...
2091 1 3530112.0 3530112.0 100.0 return self._vectorize_call(func=func, args=vargs)
...
Total time: 3.38001 s
File: python3.7/site-packages/numpy/lib/function_base.py
Function: _vectorize_call at line 2154
Line # Hits Time Per Hit % Time Line Contents
==============================================================
2154 def _vectorize_call(self, func, args):
...
2161 1 85.0 85.0 0.0 ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
2162
2163 # Convert args to object arrays first
2164 1 1.0 1.0 0.0 inputs = [array(a, copy=False, subok=True, dtype=object)
2165 1 117686.0 117686.0 3.5 for a in args]
2166
2167 1 3089595.0 3089595.0 91.4 outputs = ufunc(*inputs)
2168
2169 1 4.0 4.0 0.0 if ufunc.nout == 1:
2170 1 172631.0 172631.0 5.1 res = array(outputs, copy=False, subok=True, dtype=otypes[0])
2171 else:
2172 res = tuple([array(x, copy=False, subok=True, dtype=t)
2173 for x, t in zip(outputs, otypes)])
2174 1 1.0 1.0 0.0 return res
It shows the part I have missed in my assumptions: the double-array is converted to object-array entirely in a preprocessing step (which is not a very wise thing to do memory-wise). Other parts are similar for wrapped_frompyfunc:
Timer unit: 1e-06 s
Total time: 3.20055 s
File: <ipython-input-113-66680dac59af>
Function: wrapped_frompyfunc at line 16
Line # Hits Time Per Hit % Time Line Contents
==============================================================
16 def wrapped_frompyfunc(arr):
17 1 3014961.0 3014961.0 94.2 a = frompyfunc(arr)
18 1 185587.0 185587.0 5.8 b = a.astype(np.float64)
19 1 1.0 1.0 0.0 return b
When we take a look at peak memory consumption (e.g. via /usr/bin/time python script.py), we will see, that the vectorized version has twice the memory consumption of frompyfunc, which uses a more sophisticated strategy: The double-array is handled in blocks of size NPY_BUFSIZE (which is 8192) and thus only 8192 python-floats (24bytes+8byte pointer) are present in memory at the same time (and not the number of elements in array, which might be much higher). The costs of reserving the memory from the OS + more cache misses is probably what leads to higher running times.
My take-aways from it:
the preprocessing step, which converts all inputs into object-arrays, might be not needed at all, because frompyfunc has an even more sophisticated way of handling those conversions.
neither vectorize no frompyfunc should be used, when the resulting ufunc should be used in "real code". Instead one should either write it in C or use numba/similar.
Calling frompyfunc on the object-array needs less time than on the double-array:
arr=np.linspace(0,1,10**7)
a = arr.astype(np.object)
%timeit frompyfunc(arr) # 1.08 s ± 65.8 ms
%timeit frompyfunc(a) # 876 ms ± 5.58 ms
However, the line-profiler-timings above have not shown any advantage for using ufunc on objects rather than doubles: 3.089595s vs 3014961.0s. My suspision is that it is due to more cache misses in the case when all objects are created vs. only 8192 created objects (256Kb) are hot in L2 cache.
A:
The question is entirely moot. If speed is the question, then neither vectorize, nor frompyfunc, is the answer. Any speed difference between them pales into insignificance compared to faster ways of doing it.
I found this question wondering why frompyfunc broke my code (it returns objects), whereas vectorize worked (it returned what I told it to do), and found people talking about speed.
Now, in the 2020s, numba/jit is available, which blows any speed advantage of frompyfunc clean out of the water.
I coded a toy application, returning a large array of np.uint8 from another one, and got the following results.
pure python 200 ms
vectorize 58 ms
frompyfunc + cast back to uint8 53 ms
np.empty + numba/njit 55 us (4 cores, 100 us single core)
So 1000x speedup over numpy, and 4000x over pure python
I can post the code if anyone is bothered. Coding the njit version involved little more than adding the line @njit before the pure python function, so you don't need to be hardcore to do it.
It is less convenient than wrapping your function in vectorize, as you have to write the looping over the numpy array stuff manually, but it does avoid writing an external C function. You do need to write in a numpy/C-like subset of python, and avoid python objects.
Perhaps I'm being hard on numpy here, asking it to vectorise a pure python function. So, what if I benchmark a numpy native array function like min against numba?
Staggeringly, I got a 10x speedup using numba/jit over np.min on a 385x360 array of np.uint8. 230 us for np.min(array) was the baseline. Numba achieved 60 us using a single core, and 22 us with all four cores.
|
Why `vectorize` is outperformed by `frompyfunc`?
|
Numpy offers vectorize and frompyfunc with similar functionalies.
As pointed out in this SO-post, vectorize wraps frompyfunc and handles the type of the returned array correctly, while frompyfunc returns an array of np.object.
However, frompyfunc outperforms vectorize consistently by 10-20% for all sizes, which can also not be explained with different return types.
Consider the following variants:
import numpy as np
def do_double(x):
return 2.0*x
vectorize = np.vectorize(do_double)
frompyfunc = np.frompyfunc(do_double, 1, 1)
def wrapped_frompyfunc(arr):
return frompyfunc(arr).astype(np.float64)
wrapped_frompyfunc just converts the result of frompyfunc to the right type - as we can see, the costs of this operation are almost neglegible.
It results in the following timings (blue line is frompyfunc):
I would expect vectorize to have more overhead - but this should be seen only for small sizes. On the other hand, converting np.object to np.float64 is also done in wrapped_frompyfunc - which is still much faster.
How this performance difference can be explained?
Code to produce timing-comparison using perfplot-package (given the functions above):
import numpy as np
import perfplot
perfplot.show(
setup=lambda n: np.linspace(0, 1, n),
n_range=[2**k for k in range(20,27)],
kernels=[
frompyfunc,
vectorize,
wrapped_frompyfunc,
],
labels=["frompyfunc", "vectorize", "wrapped_frompyfunc"],
logx=True,
logy=False,
xlabel='len(x)',
equality_check = None,
)
NB: For smaller sizes, the overhead of vectorize is much higher, but that is to be expected (it wraps frompyfunc after all):
|
[
"Following the hints of @hpaulj we can profile the vectorize-function:\narr=np.linspace(0,1,10**7)\n%load_ext line_profiler\n\n%lprun -f np.vectorize._vectorize_call \\\n -f np.vectorize._get_ufunc_and_otypes \\\n -f np.vectorize.__call__ \\\n vectorize(arr)\n\nwhich shows that 100% of time is spent in _vectorize_call:\nTimer unit: 1e-06 s\n\nTotal time: 3.53012 s\nFile: python3.7/site-packages/numpy/lib/function_base.py\nFunction: __call__ at line 2063\n\nLine # Hits Time Per Hit % Time Line Contents\n==============================================================\n 2063 def __call__(self, *args, **kwargs):\n ... \n 2091 1 3530112.0 3530112.0 100.0 return self._vectorize_call(func=func, args=vargs)\n\n...\n\nTotal time: 3.38001 s\nFile: python3.7/site-packages/numpy/lib/function_base.py\nFunction: _vectorize_call at line 2154\n\nLine # Hits Time Per Hit % Time Line Contents\n==============================================================\n 2154 def _vectorize_call(self, func, args):\n ...\n 2161 1 85.0 85.0 0.0 ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)\n 2162 \n 2163 # Convert args to object arrays first\n 2164 1 1.0 1.0 0.0 inputs = [array(a, copy=False, subok=True, dtype=object)\n 2165 1 117686.0 117686.0 3.5 for a in args]\n 2166 \n 2167 1 3089595.0 3089595.0 91.4 outputs = ufunc(*inputs)\n 2168 \n 2169 1 4.0 4.0 0.0 if ufunc.nout == 1:\n 2170 1 172631.0 172631.0 5.1 res = array(outputs, copy=False, subok=True, dtype=otypes[0])\n 2171 else:\n 2172 res = tuple([array(x, copy=False, subok=True, dtype=t)\n 2173 for x, t in zip(outputs, otypes)])\n 2174 1 1.0 1.0 0.0 return res\n\nIt shows the part I have missed in my assumptions: the double-array is converted to object-array entirely in a preprocessing step (which is not a very wise thing to do memory-wise). Other parts are similar for wrapped_frompyfunc:\nTimer unit: 1e-06 s\n\nTotal time: 3.20055 s\nFile: <ipython-input-113-66680dac59af>\nFunction: wrapped_frompyfunc at line 16\n\nLine # Hits Time Per Hit % Time Line Contents\n==============================================================\n 16 def wrapped_frompyfunc(arr):\n 17 1 3014961.0 3014961.0 94.2 a = frompyfunc(arr)\n 18 1 185587.0 185587.0 5.8 b = a.astype(np.float64)\n 19 1 1.0 1.0 0.0 return b\n\nWhen we take a look at peak memory consumption (e.g. via /usr/bin/time python script.py), we will see, that the vectorized version has twice the memory consumption of frompyfunc, which uses a more sophisticated strategy: The double-array is handled in blocks of size NPY_BUFSIZE (which is 8192) and thus only 8192 python-floats (24bytes+8byte pointer) are present in memory at the same time (and not the number of elements in array, which might be much higher). The costs of reserving the memory from the OS + more cache misses is probably what leads to higher running times.\nMy take-aways from it:\n\nthe preprocessing step, which converts all inputs into object-arrays, might be not needed at all, because frompyfunc has an even more sophisticated way of handling those conversions.\nneither vectorize no frompyfunc should be used, when the resulting ufunc should be used in \"real code\". Instead one should either write it in C or use numba/similar.\n\n\nCalling frompyfunc on the object-array needs less time than on the double-array:\narr=np.linspace(0,1,10**7)\na = arr.astype(np.object)\n%timeit frompyfunc(arr) # 1.08 s ± 65.8 ms\n%timeit frompyfunc(a) # 876 ms ± 5.58 ms\n\nHowever, the line-profiler-timings above have not shown any advantage for using ufunc on objects rather than doubles: 3.089595s vs 3014961.0s. My suspision is that it is due to more cache misses in the case when all objects are created vs. only 8192 created objects (256Kb) are hot in L2 cache.\n",
"The question is entirely moot. If speed is the question, then neither vectorize, nor frompyfunc, is the answer. Any speed difference between them pales into insignificance compared to faster ways of doing it.\nI found this question wondering why frompyfunc broke my code (it returns objects), whereas vectorize worked (it returned what I told it to do), and found people talking about speed.\nNow, in the 2020s, numba/jit is available, which blows any speed advantage of frompyfunc clean out of the water.\nI coded a toy application, returning a large array of np.uint8 from another one, and got the following results.\npure python 200 ms\nvectorize 58 ms\nfrompyfunc + cast back to uint8 53 ms \nnp.empty + numba/njit 55 us (4 cores, 100 us single core)\n\nSo 1000x speedup over numpy, and 4000x over pure python\nI can post the code if anyone is bothered. Coding the njit version involved little more than adding the line @njit before the pure python function, so you don't need to be hardcore to do it.\nIt is less convenient than wrapping your function in vectorize, as you have to write the looping over the numpy array stuff manually, but it does avoid writing an external C function. You do need to write in a numpy/C-like subset of python, and avoid python objects.\nPerhaps I'm being hard on numpy here, asking it to vectorise a pure python function. So, what if I benchmark a numpy native array function like min against numba?\nStaggeringly, I got a 10x speedup using numba/jit over np.min on a 385x360 array of np.uint8. 230 us for np.min(array) was the baseline. Numba achieved 60 us using a single core, and 22 us with all four cores.\n"
] |
[
3,
0
] |
[] |
[] |
[
"arrays",
"numpy",
"performance",
"perfplot",
"python"
] |
stackoverflow_0057253839_arrays_numpy_performance_perfplot_python.txt
|
Q:
Getting this error called on Kaggle as ""ImportError: cannot import name 'DecisionBoundaryDisplay' from 'sklearn.inspection'""
I have searched for this error on stackoverflow, people have asked about it but I'm using and working in Kaggle which doesn't need any environment and library to install and set up. Help me out with this.
import warnings
warnings.filterwarnings('ignore')
from sklearn.datasets import load_iris
from sklearn.cluster import KMeans
from sklearn.inspection import DecisionBoundaryDisplay
# Fix the random seed for reproducibility
# !! Important !! : do not change this
seed = 1234
np.random.seed(seed)
A:
DecisionBoundaryDisplay requires nightly build version of sklearn as it's a new feature. (https://scikit-learn.org/dev/modules/generated/sklearn.inspection.DecisionBoundaryDisplay.html)
If you run this in your Kaggle notebook:
import sklearn; sklearn.show_versions()
you should see that the version is insufficient. (I got sklearn 1.0.2)
Unfortunately, I don't think you can upgrade the sci-kit learn package in Kaggle further as the latest build requires a newer version of Python (3.8).
Below I used this code to get an error on purpose to see what are the available versions of sklearn, and you can see that all the later versions require Python 3.8.
!pip install scikit-learn==
ERROR: Ignored the following versions that require a different python version: 1.1.0 Requires-Python >=3.8; 1.1.0rc1 Requires-Python >=3.8; 1.1.1 Requires-Python >=3.8; 1.1.2 Requires-Python >=3.8; 1.1.3 Requires-Python >=3.8
ERROR: Could not find a version that satisfies the requirement scikit-learn== (from versions: 0.9, 0.10, 0.11, 0.12, 0.12.1, 0.13, 0.13.1, 0.14, 0.14.1, 0.15.0b1, 0.15.0b2, 0.15.0, 0.15.1, 0.15.2, 0.16b1, 0.16.0, 0.16.1, 0.17b1, 0.17, 0.17.1, 0.18, 0.18.1, 0.18.2, 0.19b2, 0.19.0, 0.19.1, 0.19.2, 0.20rc1, 0.20.0, 0.20.1, 0.20.2, 0.20.3, 0.20.4, 0.21rc2, 0.21.0, 0.21.1, 0.21.2, 0.21.3, 0.22rc2.post1, 0.22rc3, 0.22, 0.22.1, 0.22.2, 0.22.2.post1, 0.23.0rc1, 0.23.0, 0.23.1, 0.23.2, 0.24.dev0, 0.24.0rc1, 0.24.0, 0.24.1, 0.24.2, 1.0rc1, 1.0rc2, 1.0, 1.0.1, 1.0.2)
ERROR: No matching distribution found for scikit-learn==
Based on this, it seems we are unable to change Python version in Kaggle environment: https://www.kaggle.com/questions-and-answers/210493
They also mention workarounds, how to use Kaggle APIs to access the dataset from your local environment, where you would be able to install the required versions for python/sklearn for your needs.
|
Getting this error called on Kaggle as ""ImportError: cannot import name 'DecisionBoundaryDisplay' from 'sklearn.inspection'""
|
I have searched for this error on stackoverflow, people have asked about it but I'm using and working in Kaggle which doesn't need any environment and library to install and set up. Help me out with this.
import warnings
warnings.filterwarnings('ignore')
from sklearn.datasets import load_iris
from sklearn.cluster import KMeans
from sklearn.inspection import DecisionBoundaryDisplay
# Fix the random seed for reproducibility
# !! Important !! : do not change this
seed = 1234
np.random.seed(seed)
|
[
"DecisionBoundaryDisplay requires nightly build version of sklearn as it's a new feature. (https://scikit-learn.org/dev/modules/generated/sklearn.inspection.DecisionBoundaryDisplay.html)\nIf you run this in your Kaggle notebook:\nimport sklearn; sklearn.show_versions()\n\nyou should see that the version is insufficient. (I got sklearn 1.0.2)\nUnfortunately, I don't think you can upgrade the sci-kit learn package in Kaggle further as the latest build requires a newer version of Python (3.8).\nBelow I used this code to get an error on purpose to see what are the available versions of sklearn, and you can see that all the later versions require Python 3.8.\n!pip install scikit-learn==\n\nERROR: Ignored the following versions that require a different python version: 1.1.0 Requires-Python >=3.8; 1.1.0rc1 Requires-Python >=3.8; 1.1.1 Requires-Python >=3.8; 1.1.2 Requires-Python >=3.8; 1.1.3 Requires-Python >=3.8\nERROR: Could not find a version that satisfies the requirement scikit-learn== (from versions: 0.9, 0.10, 0.11, 0.12, 0.12.1, 0.13, 0.13.1, 0.14, 0.14.1, 0.15.0b1, 0.15.0b2, 0.15.0, 0.15.1, 0.15.2, 0.16b1, 0.16.0, 0.16.1, 0.17b1, 0.17, 0.17.1, 0.18, 0.18.1, 0.18.2, 0.19b2, 0.19.0, 0.19.1, 0.19.2, 0.20rc1, 0.20.0, 0.20.1, 0.20.2, 0.20.3, 0.20.4, 0.21rc2, 0.21.0, 0.21.1, 0.21.2, 0.21.3, 0.22rc2.post1, 0.22rc3, 0.22, 0.22.1, 0.22.2, 0.22.2.post1, 0.23.0rc1, 0.23.0, 0.23.1, 0.23.2, 0.24.dev0, 0.24.0rc1, 0.24.0, 0.24.1, 0.24.2, 1.0rc1, 1.0rc2, 1.0, 1.0.1, 1.0.2)\nERROR: No matching distribution found for scikit-learn==\n\nBased on this, it seems we are unable to change Python version in Kaggle environment: https://www.kaggle.com/questions-and-answers/210493\nThey also mention workarounds, how to use Kaggle APIs to access the dataset from your local environment, where you would be able to install the required versions for python/sklearn for your needs.\n"
] |
[
0
] |
[] |
[] |
[
"iris_dataset",
"kaggle",
"numpy",
"python",
"scikit_learn"
] |
stackoverflow_0074588825_iris_dataset_kaggle_numpy_python_scikit_learn.txt
|
Q:
Scrapy startproject command failed on import etree, Mac M1. The error message is: "symbol not found in flat namespace"
I am trying to start a project using the Scrapy library, for a small webscraping project, but it fails on the import etree module. The exact error on the traceback is:
from .. import etree
ImportError: dlopen(/Users/myname/Desktop/scrapy_project/venv/lib/python3.10/site-packages/lxml/etree.cpython-310-darwin.so, 0x0002): symbol not found in flat namespace '_exsltDateXpathCtxtRegister'
I have tried uninstalling reinstalling Scrapy and lxml libraries using pip, pip3, conda, and brew. Each time I am faced with the same error when I try the scrapy shell command or scrapy startproject command on the terminal. I have even downloaded xcode using the
xcode-select --install
command, which did not seem to help either.
What exactly does the "symbol not found in flat namespace '_exsltDateXpathCtxtRegister'" error mean? Any idea how to work around the etree library or get scrapy to work?
for reference I am working on a macbook pro m1 computer and running the project on Pycharm, using python 3.10.
Thanks, any help would be greatly appreciated
A:
had the same issue, uninstall lxml:
pip3 uninstall lxml
then
pip3 install lxml --no-cache-dir it will force redownload and build wheel
A:
I fixed this issue with uninstalling lxml and reinstalling it with conda:
pip uninstall lxml
conda -c install lxml
|
Scrapy startproject command failed on import etree, Mac M1. The error message is: "symbol not found in flat namespace"
|
I am trying to start a project using the Scrapy library, for a small webscraping project, but it fails on the import etree module. The exact error on the traceback is:
from .. import etree
ImportError: dlopen(/Users/myname/Desktop/scrapy_project/venv/lib/python3.10/site-packages/lxml/etree.cpython-310-darwin.so, 0x0002): symbol not found in flat namespace '_exsltDateXpathCtxtRegister'
I have tried uninstalling reinstalling Scrapy and lxml libraries using pip, pip3, conda, and brew. Each time I am faced with the same error when I try the scrapy shell command or scrapy startproject command on the terminal. I have even downloaded xcode using the
xcode-select --install
command, which did not seem to help either.
What exactly does the "symbol not found in flat namespace '_exsltDateXpathCtxtRegister'" error mean? Any idea how to work around the etree library or get scrapy to work?
for reference I am working on a macbook pro m1 computer and running the project on Pycharm, using python 3.10.
Thanks, any help would be greatly appreciated
|
[
"had the same issue, uninstall lxml:\npip3 uninstall lxml\nthen\npip3 install lxml --no-cache-dir it will force redownload and build wheel\n",
"I fixed this issue with uninstalling lxml and reinstalling it with conda:\npip uninstall lxml\nconda -c install lxml\n\n"
] |
[
0,
0
] |
[] |
[] |
[
"apple_m1",
"lxml",
"python",
"scrapy",
"xml.etree"
] |
stackoverflow_0070862598_apple_m1_lxml_python_scrapy_xml.etree.txt
|
Q:
I have written a django query but need specific user information of particular date
Models:
class Employee(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, default=1,related_name='Employee')
eid = models.IntegerField(primary_key=True)
salary = models.IntegerField(null=True, blank=True)
gender = models.CharField(max_length=6, choices=GENDER_CHOICES, default=1)
contactno = models.CharField(max_length=10, blank=False)
email = models.CharField(max_length=50 ,null=True, blank=True)
country = models.CharField(max_length=30)
address = models.CharField(max_length=60)
def __str__(self):
return self.user.first_name + '_' + self.user.last_name
class Attendance(models.Model):
employee = models.ForeignKey(Employee, on_delete=models.CASCADE, default=1,related_name='Attendance')
attendance_date = models.DateField(null=True)
in_time = models.TimeField(null=True)
out_time = models.TimeField(null=True ,blank=True)
description = models.TextField(null=True, blank=True)
def __str__(self):
return str(self.employee) + '-' + str(self.attendance_date)
class Breaks(models.Model):
employee = models.ForeignKey(Employee, on_delete=models.CASCADE, default=1)
break_in = models.TimeField(null=True, blank=True)
break_out = models.TimeField(null=True, blank=True)
attendance =models.ForeignKey(Attendance, on_delete=models.CASCADE, default=1,related_name='Breaks')
def __str__(self):
return str(self.employee) + '-' + str(self.break_in) + '-' + str(self.break_out)
def detail_attendance(request):
attendance_list = Attendance.objects.filter(employee__user_id=request.user.id)
counter = Counter()
return render(request, 'employee/detail_attendance.html', {'attendance_list': attendance_list, 'counter': counter})
def detail_break(request):
break_list=Breaks.objects.filter(employee__user_id=request.user.id )
return render(request, 'employee/detail_break.html', {'break_list': break_list})
I have created a function above for detail breaks. I am getting specific user data, but it is giving me the previous data as well. So I need the data for specific date for example in my attendance models I adding attendance of each user.
Please let me know what should I change in detail break.
A:
Use this Queryset:
from django.db.models import Q
from datetime import date
Breaks.objects.filter(
Q(employee__user=request.user) &
Q(attendance__attendance_date=date.today())
)
Or:
Breaks.objects.filter(
Q(employee__user=request.user) &
Q(attendance__attendance_date="2022-11-28")
)
A:
Breaks.objects.filter(date__range=["2011-01-01", "2011-01-31"])
Or if you are just trying to filter month wise:
Breaks.objects.filter(date__year='2011',
date__month='01')
Please reply to this message ,If it doesn't work.
|
I have written a django query but need specific user information of particular date
|
Models:
class Employee(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, default=1,related_name='Employee')
eid = models.IntegerField(primary_key=True)
salary = models.IntegerField(null=True, blank=True)
gender = models.CharField(max_length=6, choices=GENDER_CHOICES, default=1)
contactno = models.CharField(max_length=10, blank=False)
email = models.CharField(max_length=50 ,null=True, blank=True)
country = models.CharField(max_length=30)
address = models.CharField(max_length=60)
def __str__(self):
return self.user.first_name + '_' + self.user.last_name
class Attendance(models.Model):
employee = models.ForeignKey(Employee, on_delete=models.CASCADE, default=1,related_name='Attendance')
attendance_date = models.DateField(null=True)
in_time = models.TimeField(null=True)
out_time = models.TimeField(null=True ,blank=True)
description = models.TextField(null=True, blank=True)
def __str__(self):
return str(self.employee) + '-' + str(self.attendance_date)
class Breaks(models.Model):
employee = models.ForeignKey(Employee, on_delete=models.CASCADE, default=1)
break_in = models.TimeField(null=True, blank=True)
break_out = models.TimeField(null=True, blank=True)
attendance =models.ForeignKey(Attendance, on_delete=models.CASCADE, default=1,related_name='Breaks')
def __str__(self):
return str(self.employee) + '-' + str(self.break_in) + '-' + str(self.break_out)
def detail_attendance(request):
attendance_list = Attendance.objects.filter(employee__user_id=request.user.id)
counter = Counter()
return render(request, 'employee/detail_attendance.html', {'attendance_list': attendance_list, 'counter': counter})
def detail_break(request):
break_list=Breaks.objects.filter(employee__user_id=request.user.id )
return render(request, 'employee/detail_break.html', {'break_list': break_list})
I have created a function above for detail breaks. I am getting specific user data, but it is giving me the previous data as well. So I need the data for specific date for example in my attendance models I adding attendance of each user.
Please let me know what should I change in detail break.
|
[
"Use this Queryset:\nfrom django.db.models import Q\nfrom datetime import date\n\n\nBreaks.objects.filter(\n Q(employee__user=request.user) & \n Q(attendance__attendance_date=date.today())\n)\n\nOr:\n\nBreaks.objects.filter(\n Q(employee__user=request.user) & \n Q(attendance__attendance_date=\"2022-11-28\")\n)\n\n",
"Breaks.objects.filter(date__range=[\"2011-01-01\", \"2011-01-31\"])\n\nOr if you are just trying to filter month wise:\nBreaks.objects.filter(date__year='2011', \n date__month='01')\n\nPlease reply to this message ,If it doesn't work.\n"
] |
[
2,
0
] |
[] |
[] |
[
"django",
"django_models",
"django_queryset",
"django_views",
"python"
] |
stackoverflow_0074596023_django_django_models_django_queryset_django_views_python.txt
|
Q:
Django Not Reflecting Updates to Javascript Files?
I have javascript files in my static folder. Django finds and loads them perfectly fine, so I don't think there is anything wrong with my configuration of the static options. However, sometimes when I make a change to a .js file and save it, the Django template that uses it does NOT reflect those changes -- inspecting the javascript with the browser reveals the javascript BEFORE the last save. Restarting the server does nothing, though restarting my computer has sometimes solved the issue. I do not have any code that explicitly deals with caching. Has anyone ever experienced anything like this?
A:
I believe your browser is caching your js
you could power refresh your browser, or clear browser cache?
on chrome control+f5 or shift + f5
i believe on firefox it is control + shift + r
A:
Since you are editing JavaScript files and watching for the changes in the browser I assume you are actively developing your Django app and probably using Django's development runserver. There is a better solution than clearing the browser cache and refreshing. If you run a watcher utility that supports the livereload protocol then your browser will automatically refresh whenever you change any static file.
The django-livereload-server python package provides a spiffy solution. Install it:
$ pip install django-livereload-server
Add 'livereload.middleware.LiveReloadScript' to MIDDLEWARE_CLASSES in settings.py.
Then run
$ ./manage.py livereload
before starting the runserver.
More documentation can be found at the django-livereload-server github site
A:
For me, opening Incognito Mode in Chrome let the browser show the recent changes in my .js static files.
A:
To anyone who is using Firefox:
If you don't want to clean your browser cache or it doesn't solve your issue, you can try doing a hard refresh (hold Shift and press the refresh button).
|
Django Not Reflecting Updates to Javascript Files?
|
I have javascript files in my static folder. Django finds and loads them perfectly fine, so I don't think there is anything wrong with my configuration of the static options. However, sometimes when I make a change to a .js file and save it, the Django template that uses it does NOT reflect those changes -- inspecting the javascript with the browser reveals the javascript BEFORE the last save. Restarting the server does nothing, though restarting my computer has sometimes solved the issue. I do not have any code that explicitly deals with caching. Has anyone ever experienced anything like this?
|
[
"I believe your browser is caching your js\nyou could power refresh your browser, or clear browser cache? \non chrome control+f5 or shift + f5\ni believe on firefox it is control + shift + r\n",
"Since you are editing JavaScript files and watching for the changes in the browser I assume you are actively developing your Django app and probably using Django's development runserver. There is a better solution than clearing the browser cache and refreshing. If you run a watcher utility that supports the livereload protocol then your browser will automatically refresh whenever you change any static file.\nThe django-livereload-server python package provides a spiffy solution. Install it:\n$ pip install django-livereload-server\nAdd 'livereload.middleware.LiveReloadScript' to MIDDLEWARE_CLASSES in settings.py.\nThen run\n$ ./manage.py livereload\nbefore starting the runserver.\nMore documentation can be found at the django-livereload-server github site\n",
"For me, opening Incognito Mode in Chrome let the browser show the recent changes in my .js static files.\n",
"To anyone who is using Firefox:\nIf you don't want to clean your browser cache or it doesn't solve your issue, you can try doing a hard refresh (hold Shift and press the refresh button).\n"
] |
[
34,
15,
1,
0
] |
[] |
[] |
[
"django",
"python"
] |
stackoverflow_0015641474_django_python.txt
|
Q:
Why does this code goes into infinite loop? - python
i tried to when year and month match loop end but it not works, how i get rid of infinite loop
driver.switch_to.frame(0)
month = "March"
year = 2023
driver.find_element(By.XPATH, "//input[@id='datepicker']").click()
while True:
mon = driver.find_element(By.XPATH, "//span[@class='ui-datepicker-month']").text
yr = driver.find_element(By.XPATH, "//span[@class='ui-datepicker-year']").text
print(mon, yr)
if mon == month and yr == year:
break
else:
driver.find_element(By.XPATH, "//*[@id='ui-datepicker-div']/div/a[2]/span").click()
A:
Try using equals or is:
mon.__eq__(month) and yr.__eq__(year)
Instead of while true you can also negate the part that you use for breaking so it will be like while year and month not equal to something do this.
|
Why does this code goes into infinite loop? - python
|
i tried to when year and month match loop end but it not works, how i get rid of infinite loop
driver.switch_to.frame(0)
month = "March"
year = 2023
driver.find_element(By.XPATH, "//input[@id='datepicker']").click()
while True:
mon = driver.find_element(By.XPATH, "//span[@class='ui-datepicker-month']").text
yr = driver.find_element(By.XPATH, "//span[@class='ui-datepicker-year']").text
print(mon, yr)
if mon == month and yr == year:
break
else:
driver.find_element(By.XPATH, "//*[@id='ui-datepicker-div']/div/a[2]/span").click()
|
[
"Try using equals or is:\nmon.__eq__(month) and yr.__eq__(year)\n\nInstead of while true you can also negate the part that you use for breaking so it will be like while year and month not equal to something do this.\n"
] |
[
0
] |
[] |
[] |
[
"python",
"selenium",
"while_loop"
] |
stackoverflow_0074596708_python_selenium_while_loop.txt
|
Q:
Loop keeps returning to wrong part
I'm taking a fundamentals of programming class and we're supposed to be building a menu that calculates BMI and also shows different gym membership options, what I can't figure out is why my menu keeps looping back to the BMI calculator after viewing the membership rates.
this is some of my code:
def mainmenu():
option = int(input("Enter your option: "))
while option != 0:
if option == 1:
try:
print("Calculate BMI")
the_height = float(input("Enter the height in cm: "))
assert the_height > 0
the_weight = float(input("Enter the weight in kg: "))
assert the_weight > 0
the_BMI = the_weight / (the_height/100)**2
except ValueError:
print("Enter height and weight in whole numbers")
print("Your BMI is", the_BMI)
if the_BMI <= 18.5:
print("You are underweight.")
elif the_BMI <= 24.9:
print("You are Normal.")
elif the_BMI <= 29.9:
print("You are overweight.")
else:
print("You are obese.")
check = input("Do you want to quit or start again, enter Y to restart or another to end ?: ")
if check.upper() == "Y":
print("Bye...")
mainmenu()
elif option == 2:
def submenu():
print("Choose your membership type")
print("[1] Bassic")
print("[2] Regular")
print("[3] Premium")
print("[0] Exit to main menu")
loop = True
while loop:
submenu()
option = int(input("Enter your option: "))
if option == 1:
print("Basic Membership")
print("$10 per week, $40 per month")
break
elif option == 2:
print("Regular Membership")
print("$15 per week, $60 per month")
check = input("Do you want to quit or start again, enter Y to restart or another to end ?: ")
if check.upper() == "Y":
submenu()
elif option == 3:
print("Premium Membership")
print("$20 per week, $80 per month")
check = input("Do you want to quit or start again, enter Y to restart or another to end ?: ")
if check.upper() == "Y":
submenu()
elif option == 0:
loop = False
else:
break
else:
print("Invalid option....")
break
mainmenu()
option = int(input("Enter your option: "))
Any suggestions would be helpful, I've been playing around for a while and can't find the solution.
A:
It looks like the reason for this is that you're using option variable to store the value that user provide for both main menu and sub menu.
Instead of this
submenu()
option = int(input("Enter your option: "))
Use
submenu()
submenu_option = int(input("Enter your option: "))
Also replace the option with submenu_option only where you wants to refer to the user selection for the sub menu
|
Loop keeps returning to wrong part
|
I'm taking a fundamentals of programming class and we're supposed to be building a menu that calculates BMI and also shows different gym membership options, what I can't figure out is why my menu keeps looping back to the BMI calculator after viewing the membership rates.
this is some of my code:
def mainmenu():
option = int(input("Enter your option: "))
while option != 0:
if option == 1:
try:
print("Calculate BMI")
the_height = float(input("Enter the height in cm: "))
assert the_height > 0
the_weight = float(input("Enter the weight in kg: "))
assert the_weight > 0
the_BMI = the_weight / (the_height/100)**2
except ValueError:
print("Enter height and weight in whole numbers")
print("Your BMI is", the_BMI)
if the_BMI <= 18.5:
print("You are underweight.")
elif the_BMI <= 24.9:
print("You are Normal.")
elif the_BMI <= 29.9:
print("You are overweight.")
else:
print("You are obese.")
check = input("Do you want to quit or start again, enter Y to restart or another to end ?: ")
if check.upper() == "Y":
print("Bye...")
mainmenu()
elif option == 2:
def submenu():
print("Choose your membership type")
print("[1] Bassic")
print("[2] Regular")
print("[3] Premium")
print("[0] Exit to main menu")
loop = True
while loop:
submenu()
option = int(input("Enter your option: "))
if option == 1:
print("Basic Membership")
print("$10 per week, $40 per month")
break
elif option == 2:
print("Regular Membership")
print("$15 per week, $60 per month")
check = input("Do you want to quit or start again, enter Y to restart or another to end ?: ")
if check.upper() == "Y":
submenu()
elif option == 3:
print("Premium Membership")
print("$20 per week, $80 per month")
check = input("Do you want to quit or start again, enter Y to restart or another to end ?: ")
if check.upper() == "Y":
submenu()
elif option == 0:
loop = False
else:
break
else:
print("Invalid option....")
break
mainmenu()
option = int(input("Enter your option: "))
Any suggestions would be helpful, I've been playing around for a while and can't find the solution.
|
[
"It looks like the reason for this is that you're using option variable to store the value that user provide for both main menu and sub menu.\nInstead of this\nsubmenu()\noption = int(input(\"Enter your option: \"))\n\nUse\nsubmenu()\nsubmenu_option = int(input(\"Enter your option: \"))\n\nAlso replace the option with submenu_option only where you wants to refer to the user selection for the sub menu\n"
] |
[
1
] |
[] |
[] |
[
"bmi",
"menu",
"python"
] |
stackoverflow_0074596825_bmi_menu_python.txt
|
Q:
Building a Python dictionary from a list (no redundant entries) while keeping a count
I'm new to Python coming from a JavaScript background. I'm trying to find a solution for the following. I want to build a dictionary from list data on the fly. I only want to add the list entries that are unique, with a count of 1. Any repeats thereafter I want to keep a count of. Hence from a list containing ["one", "two", "three", "one"] I want to build a dictionary containing {'one': 2, 'two': 1, 'three': 1} I mean to use the list entries as keys and use the dict values for the respective counts. I can't seem to get Python to do it. My code follows. It's currently adding unpredictably to the dictionary totals. I only seem to be able to add the unique entries in the list this way. No luck with any totals. I wanted to ask if I'm on the wrong track or if I'm missing something with this approach. Can someone please help?
import copy
data = ["one", "two", "three", "one"]
new_dict = {}
# build dictionary from list data and only count (not add) any redundant entries
for x in data:
dict_copy = copy.deepcopy(new_dict) # loop through a copy (safety)
for y in dict_copy:
if x in new_dict: # check if an entry exists?
new_dict[y] += 1 # this count gives unpredictable results !!
else:
new_dict[x] = 1 # new entry
else:
new_dict[x] = 1 # first entry
print(new_dict)
A:
Use collections.Counter.
In [1]: from collections import Counter
In [2]: items = ["one", "two", "three", "one"]
In [3]: Counter(items)
Out[3]: Counter({'one': 2, 'two': 1, 'three': 1})
In [4]: dict(Counter(items))
Out[4]: {'one': 2, 'two': 1, 'three': 1}
|
Building a Python dictionary from a list (no redundant entries) while keeping a count
|
I'm new to Python coming from a JavaScript background. I'm trying to find a solution for the following. I want to build a dictionary from list data on the fly. I only want to add the list entries that are unique, with a count of 1. Any repeats thereafter I want to keep a count of. Hence from a list containing ["one", "two", "three", "one"] I want to build a dictionary containing {'one': 2, 'two': 1, 'three': 1} I mean to use the list entries as keys and use the dict values for the respective counts. I can't seem to get Python to do it. My code follows. It's currently adding unpredictably to the dictionary totals. I only seem to be able to add the unique entries in the list this way. No luck with any totals. I wanted to ask if I'm on the wrong track or if I'm missing something with this approach. Can someone please help?
import copy
data = ["one", "two", "three", "one"]
new_dict = {}
# build dictionary from list data and only count (not add) any redundant entries
for x in data:
dict_copy = copy.deepcopy(new_dict) # loop through a copy (safety)
for y in dict_copy:
if x in new_dict: # check if an entry exists?
new_dict[y] += 1 # this count gives unpredictable results !!
else:
new_dict[x] = 1 # new entry
else:
new_dict[x] = 1 # first entry
print(new_dict)
|
[
"Use collections.Counter.\nIn [1]: from collections import Counter\n\nIn [2]: items = [\"one\", \"two\", \"three\", \"one\"]\n\nIn [3]: Counter(items)\nOut[3]: Counter({'one': 2, 'two': 1, 'three': 1})\n\nIn [4]: dict(Counter(items))\nOut[4]: {'one': 2, 'two': 1, 'three': 1}\n\n\n"
] |
[
0
] |
[
"#1 This might just be the answer I was looking for.\ndata = [\"one\", \"two\", \"three\", \"one\"]\nnew_dict = {}\n\n\nfor x in data:\n if x in new_dict:\n new_dict[x] = new_dict[x] + 1\n else:\n new_dict[x] = 1\n\nprint(new_dict)\n\n#2 Using list comprehension.\nnew_dict = [[x, data.count(x)] for x in set(data)]\n\n"
] |
[
-1
] |
[
"dictionary",
"python",
"python_3.x"
] |
stackoverflow_0074596396_dictionary_python_python_3.x.txt
|
Q:
How To Scroll Inside An Element On A Webpage (Selenium Python)
How can I scroll down in a certain element of a webpage in Selenium?
Basically my goal is to scroll down in this element until new profile results stop loading.
Let's say that there should be 100 profile results that I'm trying to gather.
By default, the webpage will load 30 results.
I need to scroll down IN THIS SECTION, wait a few seconds for 30 more results to load, repeat (until all results have loaded).
I am able to count the number of results with:
len(driver.find_elements(By.XPATH, "//div[@class='virtual-box']"))
I already have all the other code written, I just need to figure out the line of code to get Selenium to scroll down like 2 inches.
I've looked around a bunch and can't seem to find a good answer (that or I suck at googling).
This is a section of my code:
(getting the total number of profiles currently on the page = max_prof)
while new_max_prof > max_prof:
scroll_and_wait(profile_number)
if max_prof != new_max_prof: # to make sure that they are the same
max_prof = new_max_prof
...and here is the function that it is calling (which currently doesn't work because I can't get it to scroll)
def scroll_and_wait(profile_number=profile_number): # This doesn't work yet
global profile_xpath
global new_max_prof
global max_prof
print('scrolling!')
#driver.execute_script("window.scrollTo(0,1080);") # does not work
temp_xpath = profile_xpath + str(max_prof) + ']'
element = driver.find_element(By.XPATH, temp_xpath)
ActionChains(driver).scroll_to_element(element).perform() # scrolls to the last profile
element.click() # selects the last profile
# Tested and this does not seem to load the new profiles unless you scroll down.
print('did the scroll!!!')
time.sleep(5)
new_max_prof = int(len(driver.find_elements(By.XPATH, "//div[@class='virtual-box']")))
print('new max prof is: ' + str(new_max_prof))
time.sleep(4)
I tried:
#1. driver.execute_script("window.scrollTo(0,1080);") and driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")``` but neither seemed to do anything.
#2. ActionChains(driver).scroll_to_element(element).perform() hoping that if I scrolled to the last profile on the page, it would load the next one (it doesn't)
#3. Using pywin32 win32api.mouse_event(MOUSEEVENTF_WHEEL, -300, 0) to simulate the mouse scrolling. Didn't seem to work, but even if it did, I'm not sure this would solve it because it would really need to be in the element of the webpage. Not just going to the bottom of the webpage.
A:
OKAY! I found something that works. (If anyone knows a better solution please let me know)
You can use this code to scroll to the bottom of the page:
driver.find_element(By.TAG_NAME, 'html').send_keys(Keys.END) # works, but not inside element.
What I had to do was more complicated though (since I am trying to scroll down IN AN ELEMENT on the page, and not just to the bottom of the page).
IF YOUR SCROLL BAR HAS ARROW BUTTONS at the top/buttons, try just clicking them with .click() or .click_and_hold() that's a much easier solution that trying to scroll and does the same thing.
IF, LIKE ME, YOUR SCROLL BAR HAS NO ARROW BUTTONS, you can still click on the scroll bar path at the bottom/top and it will move. If you find the XPATH to your scrollbar, then click it, it will click in the middle (not helpful), but you can offset this on the x/y axis with ".move_by_offset(0, 0)" so for example:
# import ActionChains
from selenium.webdriver.common.action_chains import ActionChains
scroll_bar_xpath = "//div[@ng-if='::vm.isVirtual']/div[@class='ps-scrollbar-y-rail']"
element = driver.find_element(By.XPATH, scroll_bar_xpath)
# Do stuff
ActionChains(driver).move_to_element(element).move_by_offset(0,50).click().perform()
Now normally, you wouldn't want to use a fixed pixel amount (50 on the y axis) because if you change the browser size, or run the program on a different monitor, it could mess up.
To solve this, you just need to figure out the size of the scroll bar, so that you know where the bottom of it is. All you have to do is:
element = driver.find_element(By.XPATH, scroll_bar_xpath)
size = element.size
w = size['width']
h = size['height']\
print('size is: ' + size)
print(h)
print(w)
This will give you the size of the element. You want to click at the bottom of it, so you'd thing that you can just take the height, and pass that into move_by_offset like this: ".move_by_offset(0,h)". You can't do that, because when you select an element, it starts from the middle, so you want to cut that number in half (and round it down so that you don't have a decimal.) This is what I ended up doing that worked:
# import ActionChains
from selenium.webdriver.common.action_chains import ActionChains
import math
scroll_bar_xpath = "//div[@ng-if='::vm.isVirtual']/div[@class='ps-scrollbar-y-rail']"
element = driver.find_element(By.XPATH, scroll_bar_xpath)
size = element.size
w = size['width']
h = size['height']
#Calculate where to click
click_place = math.floor(h / 2)
# Do Stuff
ActionChains(driver).move_to_element(element).move_by_offset(0, click_place).click().perform() #50 worked
Hope it helps!
|
How To Scroll Inside An Element On A Webpage (Selenium Python)
|
How can I scroll down in a certain element of a webpage in Selenium?
Basically my goal is to scroll down in this element until new profile results stop loading.
Let's say that there should be 100 profile results that I'm trying to gather.
By default, the webpage will load 30 results.
I need to scroll down IN THIS SECTION, wait a few seconds for 30 more results to load, repeat (until all results have loaded).
I am able to count the number of results with:
len(driver.find_elements(By.XPATH, "//div[@class='virtual-box']"))
I already have all the other code written, I just need to figure out the line of code to get Selenium to scroll down like 2 inches.
I've looked around a bunch and can't seem to find a good answer (that or I suck at googling).
This is a section of my code:
(getting the total number of profiles currently on the page = max_prof)
while new_max_prof > max_prof:
scroll_and_wait(profile_number)
if max_prof != new_max_prof: # to make sure that they are the same
max_prof = new_max_prof
...and here is the function that it is calling (which currently doesn't work because I can't get it to scroll)
def scroll_and_wait(profile_number=profile_number): # This doesn't work yet
global profile_xpath
global new_max_prof
global max_prof
print('scrolling!')
#driver.execute_script("window.scrollTo(0,1080);") # does not work
temp_xpath = profile_xpath + str(max_prof) + ']'
element = driver.find_element(By.XPATH, temp_xpath)
ActionChains(driver).scroll_to_element(element).perform() # scrolls to the last profile
element.click() # selects the last profile
# Tested and this does not seem to load the new profiles unless you scroll down.
print('did the scroll!!!')
time.sleep(5)
new_max_prof = int(len(driver.find_elements(By.XPATH, "//div[@class='virtual-box']")))
print('new max prof is: ' + str(new_max_prof))
time.sleep(4)
I tried:
#1. driver.execute_script("window.scrollTo(0,1080);") and driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")``` but neither seemed to do anything.
#2. ActionChains(driver).scroll_to_element(element).perform() hoping that if I scrolled to the last profile on the page, it would load the next one (it doesn't)
#3. Using pywin32 win32api.mouse_event(MOUSEEVENTF_WHEEL, -300, 0) to simulate the mouse scrolling. Didn't seem to work, but even if it did, I'm not sure this would solve it because it would really need to be in the element of the webpage. Not just going to the bottom of the webpage.
|
[
"OKAY! I found something that works. (If anyone knows a better solution please let me know)\nYou can use this code to scroll to the bottom of the page:\ndriver.find_element(By.TAG_NAME, 'html').send_keys(Keys.END) # works, but not inside element.\n\nWhat I had to do was more complicated though (since I am trying to scroll down IN AN ELEMENT on the page, and not just to the bottom of the page).\nIF YOUR SCROLL BAR HAS ARROW BUTTONS at the top/buttons, try just clicking them with .click() or .click_and_hold() that's a much easier solution that trying to scroll and does the same thing.\nIF, LIKE ME, YOUR SCROLL BAR HAS NO ARROW BUTTONS, you can still click on the scroll bar path at the bottom/top and it will move. If you find the XPATH to your scrollbar, then click it, it will click in the middle (not helpful), but you can offset this on the x/y axis with \".move_by_offset(0, 0)\" so for example:\n# import ActionChains\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nscroll_bar_xpath = \"//div[@ng-if='::vm.isVirtual']/div[@class='ps-scrollbar-y-rail']\"\nelement = driver.find_element(By.XPATH, scroll_bar_xpath)\n\n# Do stuff\nActionChains(driver).move_to_element(element).move_by_offset(0,50).click().perform()\n\nNow normally, you wouldn't want to use a fixed pixel amount (50 on the y axis) because if you change the browser size, or run the program on a different monitor, it could mess up.\nTo solve this, you just need to figure out the size of the scroll bar, so that you know where the bottom of it is. All you have to do is:\nelement = driver.find_element(By.XPATH, scroll_bar_xpath)\nsize = element.size\nw = size['width']\nh = size['height']\\\nprint('size is: ' + size)\nprint(h)\nprint(w)\n\nThis will give you the size of the element. You want to click at the bottom of it, so you'd thing that you can just take the height, and pass that into move_by_offset like this: \".move_by_offset(0,h)\". You can't do that, because when you select an element, it starts from the middle, so you want to cut that number in half (and round it down so that you don't have a decimal.) This is what I ended up doing that worked:\n# import ActionChains\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport math\n\nscroll_bar_xpath = \"//div[@ng-if='::vm.isVirtual']/div[@class='ps-scrollbar-y-rail']\"\nelement = driver.find_element(By.XPATH, scroll_bar_xpath)\nsize = element.size\nw = size['width'] \nh = size['height']\n\n#Calculate where to click\nclick_place = math.floor(h / 2)\n\n# Do Stuff\nActionChains(driver).move_to_element(element).move_by_offset(0, click_place).click().perform() #50 worked\n\nHope it helps!\n"
] |
[
1
] |
[] |
[] |
[
"python",
"python_3.x",
"selenium",
"selenium_chromedriver"
] |
stackoverflow_0074596105_python_python_3.x_selenium_selenium_chromedriver.txt
|
Q:
discord py command not found
I'm making a discord bot, and trying to have / menu context commands. this is my code:
import discord
from discord.ext import commands
from dotenv import load_dotenv
import requests
load_dotenv()
url = "https://discord.com/api/v10/applications/ID/commands"
TOKEN = "TOKEN"
# This is an example CHAT_INPUT or Slash Command, with a type of 1
json = {
"name": "ping",
"type": 1,
"description": "test lmao",
"options": [
{
"name": "test",
"description": "test",
"type": 3,
"required": True,
}
]
}
# For authorization, you can use either your bot token
headers = {
"Authorization": "Bot TOKEN"
}
r = requests.post(url, headers=headers, json=json)
bot = commands.Bot(command_prefix='/',intents=discord.Intents.all())
@bot.event
async def on_read():
print("Bot is ready!")
@bot.command(pass_context = True)
async def ping(ctx, arg):
await ctx.send("Pong! "+ str(arg))
bot.run(TOKEN)
The application command shows up, but the bot doesn't respond and I get this error:
discord.app_commands.errors.CommandNotFound: Application command 'ping' not found
A:
Seems like you try to use slash commands, but you aren’t defining any app commands (slash commands) you‘re defining a text command.
to you slash commands, you should use the app_commands package of discord.py. it also handles registering the app commands at discord
https://discordpy.readthedocs.io/en/stable/interactions/api.html#application-commands
|
discord py command not found
|
I'm making a discord bot, and trying to have / menu context commands. this is my code:
import discord
from discord.ext import commands
from dotenv import load_dotenv
import requests
load_dotenv()
url = "https://discord.com/api/v10/applications/ID/commands"
TOKEN = "TOKEN"
# This is an example CHAT_INPUT or Slash Command, with a type of 1
json = {
"name": "ping",
"type": 1,
"description": "test lmao",
"options": [
{
"name": "test",
"description": "test",
"type": 3,
"required": True,
}
]
}
# For authorization, you can use either your bot token
headers = {
"Authorization": "Bot TOKEN"
}
r = requests.post(url, headers=headers, json=json)
bot = commands.Bot(command_prefix='/',intents=discord.Intents.all())
@bot.event
async def on_read():
print("Bot is ready!")
@bot.command(pass_context = True)
async def ping(ctx, arg):
await ctx.send("Pong! "+ str(arg))
bot.run(TOKEN)
The application command shows up, but the bot doesn't respond and I get this error:
discord.app_commands.errors.CommandNotFound: Application command 'ping' not found
|
[
"Seems like you try to use slash commands, but you aren’t defining any app commands (slash commands) you‘re defining a text command.\nto you slash commands, you should use the app_commands package of discord.py. it also handles registering the app commands at discord\nhttps://discordpy.readthedocs.io/en/stable/interactions/api.html#application-commands\n"
] |
[
0
] |
[] |
[] |
[
"bots",
"discord",
"discord.py",
"python"
] |
stackoverflow_0074596095_bots_discord_discord.py_python.txt
|
Q:
How to remove a string part of a column value?
I'm working with a dataset in Python.
I've loaded it into a dataframe so that I can perform a linear regression on it.
But first I need to clean the dataframe so that it only has number values.
One of the columns has movies' runtime in it, phrased like this:
**Runtime**
142 min
175 min
152 min
202 min
96 min
...
And so on.
How do I remove the 'min' part of the column so that the column only shows the number part?
i.e.,
**Runtime**
142
175
152
202
96
...
A:
If need numeric before min use Series.str.extract:
df['Runtime'] = df['Runtime'].str.extract('(\d+)\s*min', expand=False).astype(int)
Or convert values to timedeltas by to_timedelta and convert to minutes from seconds by Series.dt.total_seconds and divide 60:
df['Runtime'] = pd.to_timedelta(df['Runtime']).dt.total_seconds().div(60).astype(int)
print (df)
Runtime
0 142
1 175
2 152
3 202
4 96
|
How to remove a string part of a column value?
|
I'm working with a dataset in Python.
I've loaded it into a dataframe so that I can perform a linear regression on it.
But first I need to clean the dataframe so that it only has number values.
One of the columns has movies' runtime in it, phrased like this:
**Runtime**
142 min
175 min
152 min
202 min
96 min
...
And so on.
How do I remove the 'min' part of the column so that the column only shows the number part?
i.e.,
**Runtime**
142
175
152
202
96
...
|
[
"If need numeric before min use Series.str.extract:\ndf['Runtime'] = df['Runtime'].str.extract('(\\d+)\\s*min', expand=False).astype(int)\n\nOr convert values to timedeltas by to_timedelta and convert to minutes from seconds by Series.dt.total_seconds and divide 60:\ndf['Runtime'] = pd.to_timedelta(df['Runtime']).dt.total_seconds().div(60).astype(int)\nprint (df)\n Runtime\n0 142\n1 175\n2 152\n3 202\n4 96\n\n"
] |
[
1
] |
[] |
[] |
[
"dataframe",
"pandas",
"python"
] |
stackoverflow_0074597020_dataframe_pandas_python.txt
|
Q:
i want to calculate the distance using geodesic from geopy library
I have four lists of longitude and latitude
'''
shop_long = [-123.223, -127.223, -123.223, -123.048]
shop_lat = [49.1534, 55.1303, 49.1534, 53.2563]
cus_long = [-126.07325247944962, -126.07255765553835, -126.07485428820583,
-126.0733578858899, -126.07270416708549]
cus_lat = [51.29548801984406, 51.29486187466757, 51.29566033167437,
51.295612714656855]
distance = []
shop = (shop_long, shop_lat)
customer = (cus_long, cus_lat)
print(geodesic(shop, customer).miles)'''
I want to calculate the distance between customer and shop using their latitude and longitude and append it into distance list. Please help me.
A:
Consider utilizing zip:
from geopy.distance import geodesic
def main() -> None:
shop_long = [-123.223, -127.223, -123.223, -123.048]
shop_lat = [49.1534, 55.1303, 49.1534, 53.2563]
shop_cords = [(lat, long) for lat, long in zip(shop_lat, shop_long)]
cus_long = [-126.07325247944962, -126.07255765553835, -126.07485428820583,
-126.0733578858899, -126.07270416708549]
cus_lat = [51.29548801984406, 51.29486187466757, 51.29566033167437,
51.295612714656855]
cus_cords = [(lat, long) for lat, long in zip(cus_lat, cus_long)]
cus_dists_to_shops = {
f'cus_{cus_index}_dists': [f'{geodesic(cus, shop).km:.2f}km' for shop in shop_cords]
for cus_index, cus in enumerate(cus_cords, start=1)
}
print(cus_dists_to_shops)
if __name__ == '__main__':
main()
Output:
{
'cus_1_dists': ['313.23km', '433.62km', '313.23km', '300.35km'],
'cus_2_dists': ['313.15km', '433.69km', '313.15km', '300.37km'],
'cus_3_dists': ['313.32km', '433.58km', '313.32km', '300.41km'],
'cus_4_dists': ['313.25km', '433.60km', '313.25km', '300.35km']
}
|
i want to calculate the distance using geodesic from geopy library
|
I have four lists of longitude and latitude
'''
shop_long = [-123.223, -127.223, -123.223, -123.048]
shop_lat = [49.1534, 55.1303, 49.1534, 53.2563]
cus_long = [-126.07325247944962, -126.07255765553835, -126.07485428820583,
-126.0733578858899, -126.07270416708549]
cus_lat = [51.29548801984406, 51.29486187466757, 51.29566033167437,
51.295612714656855]
distance = []
shop = (shop_long, shop_lat)
customer = (cus_long, cus_lat)
print(geodesic(shop, customer).miles)'''
I want to calculate the distance between customer and shop using their latitude and longitude and append it into distance list. Please help me.
|
[
"Consider utilizing zip:\nfrom geopy.distance import geodesic\n\n\ndef main() -> None:\n shop_long = [-123.223, -127.223, -123.223, -123.048]\n shop_lat = [49.1534, 55.1303, 49.1534, 53.2563]\n shop_cords = [(lat, long) for lat, long in zip(shop_lat, shop_long)]\n cus_long = [-126.07325247944962, -126.07255765553835, -126.07485428820583,\n -126.0733578858899, -126.07270416708549]\n cus_lat = [51.29548801984406, 51.29486187466757, 51.29566033167437,\n 51.295612714656855]\n cus_cords = [(lat, long) for lat, long in zip(cus_lat, cus_long)]\n cus_dists_to_shops = {\n f'cus_{cus_index}_dists': [f'{geodesic(cus, shop).km:.2f}km' for shop in shop_cords]\n for cus_index, cus in enumerate(cus_cords, start=1)\n }\n print(cus_dists_to_shops)\n\n\nif __name__ == '__main__':\n main()\n\nOutput:\n{\n 'cus_1_dists': ['313.23km', '433.62km', '313.23km', '300.35km'],\n 'cus_2_dists': ['313.15km', '433.69km', '313.15km', '300.37km'],\n 'cus_3_dists': ['313.32km', '433.58km', '313.32km', '300.41km'],\n 'cus_4_dists': ['313.25km', '433.60km', '313.25km', '300.35km']\n}\n\n"
] |
[
0
] |
[] |
[] |
[
"data_science",
"geopy",
"machine_learning",
"python",
"r"
] |
stackoverflow_0074596952_data_science_geopy_machine_learning_python_r.txt
|
Q:
Python - AttributeError: 'NoneType' object has no attribute 'cursor' python flask
I'm trying to populate the courses selectfield in my webapp using data from the database.
this is my attempt.
this the form
`
class StudentForm(FlaskForm):
idnumber = StringField('ID Number', [validators.DataRequired(), validators.Length(min=9, max=9)])
fname = StringField('First Name', [validators.DataRequired(), validators.Length(max=50)])
mname = StringField('Middle Name', [validators.Length(max=50)])
lname = StringField('Last Name', [validators.DataRequired(), validators.Length(max=50)])
gender = SelectField('Gender', choices=gengen)
yearlvl = SelectField('Year Level', choices= year_level)
course = SelectField('Course', choices= models.Courses.populate())
submit = SubmitField("Save")
`
@classmethod
def populate(cls):
curs = mysql.connection.cursor()
sql = curs.execute("SELECT COURSEID from courses")
if sql > 0:
result = curs.fetchall()
return result
'
when I run the program i get this error
`
File "C:\laragon\SISwebapp\webapp\students\forms.py", line 15, in StudentForm
course = SelectField('Course', choices= models.Courses.populate())
File "C:\laragon\SISwebapp\webapp\models.py", line 87, in populate
curs = mysql.connection.cursor()
AttributeError: 'NoneType' object has no attribute 'cursor'
`
I can't seem to figure out whats wrong..
edit:
This part works fine:
def all(cls):
cursor = mysql.connection.cursor()
sql = "SELECT * from courses"
cursor.execute(sql)
result = cursor.fetchall()
return result
It fetches all the data from the database table. However, it doesn't work when selecting only one column.
Please bear with me. I'm new to this kind of stuff.
A:
To create a cursor, use the cursor() method of a connection object:
cnx = mysql.connector.connect(database='Hello_World')
cursor = cnx.cursor()
A:
It seems like your mysql.connection object is None in that particular case. That's why it doesn't have a cursor attribute.
Generally, I recommend you to check how the connection has been established.
Here is a basic example for the connection and query objects.
|
Python - AttributeError: 'NoneType' object has no attribute 'cursor' python flask
|
I'm trying to populate the courses selectfield in my webapp using data from the database.
this is my attempt.
this the form
`
class StudentForm(FlaskForm):
idnumber = StringField('ID Number', [validators.DataRequired(), validators.Length(min=9, max=9)])
fname = StringField('First Name', [validators.DataRequired(), validators.Length(max=50)])
mname = StringField('Middle Name', [validators.Length(max=50)])
lname = StringField('Last Name', [validators.DataRequired(), validators.Length(max=50)])
gender = SelectField('Gender', choices=gengen)
yearlvl = SelectField('Year Level', choices= year_level)
course = SelectField('Course', choices= models.Courses.populate())
submit = SubmitField("Save")
`
@classmethod
def populate(cls):
curs = mysql.connection.cursor()
sql = curs.execute("SELECT COURSEID from courses")
if sql > 0:
result = curs.fetchall()
return result
'
when I run the program i get this error
`
File "C:\laragon\SISwebapp\webapp\students\forms.py", line 15, in StudentForm
course = SelectField('Course', choices= models.Courses.populate())
File "C:\laragon\SISwebapp\webapp\models.py", line 87, in populate
curs = mysql.connection.cursor()
AttributeError: 'NoneType' object has no attribute 'cursor'
`
I can't seem to figure out whats wrong..
edit:
This part works fine:
def all(cls):
cursor = mysql.connection.cursor()
sql = "SELECT * from courses"
cursor.execute(sql)
result = cursor.fetchall()
return result
It fetches all the data from the database table. However, it doesn't work when selecting only one column.
Please bear with me. I'm new to this kind of stuff.
|
[
"To create a cursor, use the cursor() method of a connection object:\ncnx = mysql.connector.connect(database='Hello_World')\ncursor = cnx.cursor()\n\n",
"It seems like your mysql.connection object is None in that particular case. That's why it doesn't have a cursor attribute.\nGenerally, I recommend you to check how the connection has been established.\nHere is a basic example for the connection and query objects.\n"
] |
[
0,
0
] |
[] |
[] |
[
"database",
"flask",
"laragon",
"python",
"web_applications"
] |
stackoverflow_0074596876_database_flask_laragon_python_web_applications.txt
|
Q:
Python mock() not mocking the return value
I'm having some trouble with Python mock() and I'm not familiar enough to figure out what's going on with it.
I have an abstract async task class that looks something like:
class AsyncTask(object):
@classmethod
def enqueue(cls):
....
task_ent = cls.createAsyncTask(body, delayed=will_delay)
....
I'd like to patch the createAsyncTask method for a specific instance of this class.
The code I wrote looks like:
@patch.object(CustomAsyncTaskClass, "createAsyncTask")
def test_my_test(self, mock_create_task):
....
mock_create_task.return_value = "12"
fn() # calls CustomAsyncTaskClass.enqueue(...)
....
When I print out task_ent in enqueue, I get <MagicMock name='createAsyncTask()' id='140578431952144'>
When I print out cls.createAsyncTask in enqueue, I get <MagicMock name='createAsyncTask' id='140578609336400'>
What am I doing wrong? Why won't createAsyncTask return 12?
A:
Try the following:
@patch("package_name.module_name.createAsyncTask")
def test_my_test(self, mock_create_task):
....
mock_create_task.return_value = "12"
fn() # calls CustomAsyncTaskClass.enqueue(...)
....
where module_name is the name of the module which contains the class AsyncTask.
In general, this is the guideline https://docs.python.org/3/library/unittest.mock.html#where-to-patch
A:
I know that this question is old but I just had the same problem and fixed it now.
If you patch multiple functions it is very important to keep the order in mind. It has to be reversed from the patches.
@patch("package_name.function1")
@patch("package_name.function2")
def test_method(
mocked_function2: MagicMock,
mocked_function1: MagicMock
)
|
Python mock() not mocking the return value
|
I'm having some trouble with Python mock() and I'm not familiar enough to figure out what's going on with it.
I have an abstract async task class that looks something like:
class AsyncTask(object):
@classmethod
def enqueue(cls):
....
task_ent = cls.createAsyncTask(body, delayed=will_delay)
....
I'd like to patch the createAsyncTask method for a specific instance of this class.
The code I wrote looks like:
@patch.object(CustomAsyncTaskClass, "createAsyncTask")
def test_my_test(self, mock_create_task):
....
mock_create_task.return_value = "12"
fn() # calls CustomAsyncTaskClass.enqueue(...)
....
When I print out task_ent in enqueue, I get <MagicMock name='createAsyncTask()' id='140578431952144'>
When I print out cls.createAsyncTask in enqueue, I get <MagicMock name='createAsyncTask' id='140578609336400'>
What am I doing wrong? Why won't createAsyncTask return 12?
|
[
"Try the following:\n@patch(\"package_name.module_name.createAsyncTask\")\ndef test_my_test(self, mock_create_task):\n ....\n mock_create_task.return_value = \"12\"\n fn() # calls CustomAsyncTaskClass.enqueue(...)\n ....\n\nwhere module_name is the name of the module which contains the class AsyncTask.\nIn general, this is the guideline https://docs.python.org/3/library/unittest.mock.html#where-to-patch\n",
"I know that this question is old but I just had the same problem and fixed it now.\nIf you patch multiple functions it is very important to keep the order in mind. It has to be reversed from the patches.\n@patch(\"package_name.function1\")\n@patch(\"package_name.function2\")\ndef test_method(\nmocked_function2: MagicMock,\nmocked_function1: MagicMock\n)\n\n"
] |
[
2,
0
] |
[] |
[] |
[
"magicmock",
"python",
"python_mock",
"python_unittest",
"unit_testing"
] |
stackoverflow_0031014939_magicmock_python_python_mock_python_unittest_unit_testing.txt
|
Q:
SBERT gives same result no matter what
I have a test script for SBERT:
import torch
from transformers import BertTokenizer, BertModel
from sklearn.cluster import KMeans
# 1. Use SBERT to compare two sentences for semantic similarity.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids_1 = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
input_ids_2 = torch.tensor(tokenizer.encode("Hello, my cat is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs_1 = model(input_ids_1)
outputs_2 = model(input_ids_2)
last_hidden_states_1 = outputs_1[0] # The last hidden-state is the first element of the output tuple
last_hidden_states_2 = outputs_2[0] # The last hidden-state is the first element of the output tuple
# 2. Take SBERT embeddings for both sentences and cluster them.
kmeans = KMeans(n_clusters=2, random_state=0).fit(last_hidden_states_1.detach().numpy()[0], last_hidden_states_2.detach().numpy()[0])
# 3. Print the clusters.
print(kmeans.labels_)
print(kmeans.cluster_centers_)
The output is:
[0 0 0 0 0 0 0 1]
[[-0.2281394 0.29968688 0.3390873 ... -0.40648264 0.2930719
0.41721284]
[ 0.6079925 0.26097086 -0.3130729 ... 0.03109726 -0.6282735
-0.19942412]]
This happens no matter what the second sentence is. I changed it to "The capital of France is Paris" and it still gave me the same output, so clearly I am not passing/transforming the data correctly.
What am I doing wrong?
A:
There was a couple of tiny modification to sort things out. Please bear in mind in order to cluster sentences you need to catch only the first/last embedding for the sentence. In addition KMeans expects to receive a 2D array for clustering.
import torch
from transformers import BertTokenizer, BertModel
from sklearn.cluster import KMeans
import numpy as np
# 1. Use SBERT to compare two sentences for semantic similarity.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids_1 = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
input_ids_2 = torch.tensor(tokenizer.encode("The capital of France is Paris", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs_1 = model(input_ids_1)
outputs_2 = model(input_ids_2)
last_hidden_states_1 = outputs_1[0][0, 0, :] # The last hidden-state is the first element of the output tuple
last_hidden_states_2 = outputs_2[0][0, 0, :] # The last hidden-state is the first element of the output tuple
# 2. Take SBERT embeddings for both sentences and cluster them.
kmeans = KMeans(n_clusters=2, random_state=0).fit([last_hidden_states_1.detach().numpy(), last_hidden_states_2.detach().numpy()])
# 3. Print the clusters.
print(kmeans.labels_)
print(kmeans.cluster_centers_)
output:
[0 1]
[[-0.11437159 0.19371444 0.1249602 ... -0.38269117 0.21065859
0.54070717]
[-0.06510071 0.06050608 -0.10048206 ... -0.27256876 0.36847278
0.57706201]]
|
SBERT gives same result no matter what
|
I have a test script for SBERT:
import torch
from transformers import BertTokenizer, BertModel
from sklearn.cluster import KMeans
# 1. Use SBERT to compare two sentences for semantic similarity.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids_1 = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
input_ids_2 = torch.tensor(tokenizer.encode("Hello, my cat is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs_1 = model(input_ids_1)
outputs_2 = model(input_ids_2)
last_hidden_states_1 = outputs_1[0] # The last hidden-state is the first element of the output tuple
last_hidden_states_2 = outputs_2[0] # The last hidden-state is the first element of the output tuple
# 2. Take SBERT embeddings for both sentences and cluster them.
kmeans = KMeans(n_clusters=2, random_state=0).fit(last_hidden_states_1.detach().numpy()[0], last_hidden_states_2.detach().numpy()[0])
# 3. Print the clusters.
print(kmeans.labels_)
print(kmeans.cluster_centers_)
The output is:
[0 0 0 0 0 0 0 1]
[[-0.2281394 0.29968688 0.3390873 ... -0.40648264 0.2930719
0.41721284]
[ 0.6079925 0.26097086 -0.3130729 ... 0.03109726 -0.6282735
-0.19942412]]
This happens no matter what the second sentence is. I changed it to "The capital of France is Paris" and it still gave me the same output, so clearly I am not passing/transforming the data correctly.
What am I doing wrong?
|
[
"There was a couple of tiny modification to sort things out. Please bear in mind in order to cluster sentences you need to catch only the first/last embedding for the sentence. In addition KMeans expects to receive a 2D array for clustering.\nimport torch\nfrom transformers import BertTokenizer, BertModel\nfrom sklearn.cluster import KMeans\nimport numpy as np \n\n# 1. Use SBERT to compare two sentences for semantic similarity.\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\nmodel = BertModel.from_pretrained('bert-base-uncased')\n\ninput_ids_1 = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\ninput_ids_2 = torch.tensor(tokenizer.encode(\"The capital of France is Paris\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\noutputs_1 = model(input_ids_1)\noutputs_2 = model(input_ids_2)\nlast_hidden_states_1 = outputs_1[0][0, 0, :] # The last hidden-state is the first element of the output tuple\nlast_hidden_states_2 = outputs_2[0][0, 0, :] # The last hidden-state is the first element of the output tuple\n\n# 2. Take SBERT embeddings for both sentences and cluster them.\nkmeans = KMeans(n_clusters=2, random_state=0).fit([last_hidden_states_1.detach().numpy(), last_hidden_states_2.detach().numpy()])\n\n# 3. Print the clusters.\nprint(kmeans.labels_)\nprint(kmeans.cluster_centers_)\n\noutput:\n[0 1]\n[[-0.11437159 0.19371444 0.1249602 ... -0.38269117 0.21065859\n 0.54070717]\n [-0.06510071 0.06050608 -0.10048206 ... -0.27256876 0.36847278\n 0.57706201]]\n\n"
] |
[
1
] |
[] |
[] |
[
"bert_language_model",
"nlp",
"python"
] |
stackoverflow_0074591917_bert_language_model_nlp_python.txt
|
Q:
Count number of substrings in String by multiple delimiters
Imagine the following example Strings
‘John @ Mary John v Mary John vs Mary’
‘John v Mary Ben v Paul John v Mary’
‘Hello World / John v Mary John @ Mary John vs Mary’
‘John v Mary John vs Mary John @ Mary John v Mary’
There are 3 identified delimiters
' @ '
' v '
' vs '
For every field row in my file, I would like to iterate through each delimiter, look left and right by 4 characters, concatenate left and right together, and return the count should all concatenated substrings match.
we would end up finding 'JohnMary' 3 times. Return = 3
we would end up finding 'JohnMary','BenPaul' and 'JohnMary'. Return = 0
we would end up finding 'JohnMary' 3 times. note the Hello World is irrelevant as we only look 4 characters left and right. Return = 3
we would end up finding 'JohnMary' 4 times. Return = 4
For this I'll need some sort recursive/loop query to iterate through each delimiter in each row, and count the number of matched substrings.
note, if the first 2 substrings encountered aren't a match, we don't need to continue checking any further and can return 0 (like in example 2)
A:
Try with this code that assumes always exists a space before and after the delimiter
!/usr/bin/python3
import re
from copy import deepcopy
from typing import List, Tuple, Union
def count_match(s: str, d: List[str]) -> Tuple[Union[None, str], int, int]:
if len(s) == 0:
return None, 0, 0
counter = dict()
offset = 0
for each in d:
match = re.search(each, s)
if match is None:
break
idx = match.start()
sub_string1 = s[idx-4: idx]
sub_string2 = s[idx+len(each): idx+len(each)+4]
sub_string = ''.join((sub_string1, sub_string2))
offset = max(offset, idx+len(each)+4)
try:
counter[sub_string] += 1
except KeyError:
counter[sub_string] = 1
if not len(counter):
return None, 0, 0
if len(counter.keys()) > 1:
return None, -1, 0
return sub_string, list(counter.values())[0], offset
if __name__ == '__main__':
text = 'John @ Mary John v Mary John vs Mary John @ Mary'
delimiter = [' @ ', ' v ', ' vs ']
count = 0
ref_string = ""
while text:
string, partial, start = count_match(text, delimiter)
if string != ref_string and ref_string != "":
count = 0
break
if partial == -1:
count = 0
break
if partial == 0:
break
ref_string = string
count += partial
text = text[start:]
print(count)
A:
Got this answer from a Matthew Barnett on a Python help forum. It also works great :)
text = '''\
John @ Mary John v Mary John vs Mary
John v Mary Ben v Paul John v Mary
Hello World / John v Mary John @ Mary John vs Mary
John v Mary John vs Mary John @ Mary John v Mary
'''
from collections import defaultdict
import re
pattern = re.compile('(.{4})( @ | v | vs )(.{4})')
for line in text.splitlines():
found = defaultdict(lambda: 0)
for before, sep, after in pattern.findall(line):
key = before, sep, after
found[before + after] += 1
if len(found) == 1 and sum(found.values()) > 1:
print(list(found.values())[0])
else:
print(0)
|
Count number of substrings in String by multiple delimiters
|
Imagine the following example Strings
‘John @ Mary John v Mary John vs Mary’
‘John v Mary Ben v Paul John v Mary’
‘Hello World / John v Mary John @ Mary John vs Mary’
‘John v Mary John vs Mary John @ Mary John v Mary’
There are 3 identified delimiters
' @ '
' v '
' vs '
For every field row in my file, I would like to iterate through each delimiter, look left and right by 4 characters, concatenate left and right together, and return the count should all concatenated substrings match.
we would end up finding 'JohnMary' 3 times. Return = 3
we would end up finding 'JohnMary','BenPaul' and 'JohnMary'. Return = 0
we would end up finding 'JohnMary' 3 times. note the Hello World is irrelevant as we only look 4 characters left and right. Return = 3
we would end up finding 'JohnMary' 4 times. Return = 4
For this I'll need some sort recursive/loop query to iterate through each delimiter in each row, and count the number of matched substrings.
note, if the first 2 substrings encountered aren't a match, we don't need to continue checking any further and can return 0 (like in example 2)
|
[
"Try with this code that assumes always exists a space before and after the delimiter\n!/usr/bin/python3\n\nimport re\nfrom copy import deepcopy\nfrom typing import List, Tuple, Union\n\ndef count_match(s: str, d: List[str]) -> Tuple[Union[None, str], int, int]:\n\n if len(s) == 0:\n return None, 0, 0\n\n counter = dict()\n offset = 0\n for each in d:\n match = re.search(each, s)\n if match is None:\n break\n idx = match.start()\n sub_string1 = s[idx-4: idx]\n sub_string2 = s[idx+len(each): idx+len(each)+4]\n sub_string = ''.join((sub_string1, sub_string2))\n offset = max(offset, idx+len(each)+4)\n try:\n counter[sub_string] += 1\n except KeyError:\n counter[sub_string] = 1\n if not len(counter):\n return None, 0, 0\n if len(counter.keys()) > 1:\n return None, -1, 0\n return sub_string, list(counter.values())[0], offset\n\n\nif __name__ == '__main__':\n text = 'John @ Mary John v Mary John vs Mary John @ Mary'\n delimiter = [' @ ', ' v ', ' vs ']\n count = 0\n ref_string = \"\"\n while text:\n string, partial, start = count_match(text, delimiter)\n if string != ref_string and ref_string != \"\":\n count = 0\n break\n if partial == -1:\n count = 0\n break\n if partial == 0:\n break\n ref_string = string\n count += partial\n text = text[start:]\n\n print(count)\n\n",
"Got this answer from a Matthew Barnett on a Python help forum. It also works great :)\ntext = '''\\\nJohn @ Mary John v Mary John vs Mary\nJohn v Mary Ben v Paul John v Mary\nHello World / John v Mary John @ Mary John vs Mary\nJohn v Mary John vs Mary John @ Mary John v Mary\n'''\n\nfrom collections import defaultdict\n\nimport re\npattern = re.compile('(.{4})( @ | v | vs )(.{4})')\n\nfor line in text.splitlines():\n found = defaultdict(lambda: 0)\n\n for before, sep, after in pattern.findall(line):\n key = before, sep, after\n found[before + after] += 1\n\n if len(found) == 1 and sum(found.values()) > 1:\n print(list(found.values())[0])\n else:\n print(0)\n\n"
] |
[
1,
0
] |
[] |
[] |
[
"delimiter",
"python",
"string",
"substring"
] |
stackoverflow_0074588275_delimiter_python_string_substring.txt
|
Q:
pd.read_html(url) - awkward table design
Table headings through the table are being converted into single column headings.
url = "https://www.environment.nsw.gov.au/topics/animals-and-plants/threatened-species/programs-legislation-and-framework/nsw-koala-strategy/local-government-resources-for-koala-conservation/north-coast-koala-management-area#:~:text=The%20North%20Coast%20Koala%20Management,Valley%2C%20Clarence%20Valley%20and%20Taree."
dfs = pd.read_html(url)
df = dfs[0]
df.head()
Be great if I could have the High preferred use as a column that assigns to the correct species.
Tried reset_index() this did not work.
I'm lost for searching can't find anything similar.
Response to @Master Oogway and thanks @DYZ for the edits.
There are multiple "table-striped"
The amendment suggested removes the error, but does not interact with the second table.
Take White Box, Eucalyptus albens. Occurs in second table and not first.
If I export dftable and filter - no White Box:
If I write htmltable to .txt when using find_all and search, it's there:
I have never done this before and appreciate that this is annoying.
Thanks for the help so far.
It appears that find_all is gathering all the table data.
But the creating of dftable is limiting to the first "table-striped".
A:
The table cannot be easily parsed with read_html because of its unorthodox use of <thead> attribute. You can try luck with BeautifulSoup:
import bs4
import urllib.request
soup = bs4.BeautifulSoup(urllib.request.urlopen(url))
data = [["".join(cell.strings).strip()
for cell in row.find_all(['td', 'th'])]
for row in soup.find_all('table')[0].find_all('tr')]
table = pd.DataFrame(data[1:])\
.rename(columns=dict(enumerate(data[0])))\
.dropna(how='all')
A:
So I took a look at the link and the table you're trying to get.
The problem with the table in the link is that it contains multiple headers so the .read_html(URL) function, gets all of them and sets those as your
header:
so instead of using pandas to read the HTML I used
beautiful soup for what you're trying to accomplish.
With beautiful and urllib.requests I got the HTML from the URL and extracted the HTML with the table class name
url = "https://www.environment.nsw.gov.au/topics/animals-and-plants/threatened-species/programs-legislation-and-framework/nsw-koala-strategy/local-government-resources-for-koala-conservation/north-coast-koala-management-area#:~:text=The%20North%20Coast%20Koala%20Management,Valley%2C%20Clarence%20Valley%20and%20Taree."
#load html with urllib
html = urllib.request.urlopen(url)
soup = BeautifulSoup(html.read(), 'lxml')
#get the table you're trying to get based
#on html elements
htmltable = soup.find('table', { 'class' : 'table-striped' })
Then using a function I found to make a list from tables extract from beautiful soup, I modified the function to get your values in a shape that would be easy to load into a dataframe and would also be easy to call depending on what you want:
[{"common name" : value, "Species name": value, "type": value}...{}]
def tableDataText(table):
"""Parses a html segment started with tag <table> followed
by multiple <tr> (table rows) and inner <td> (table data) tags.
It returns a list of rows with inner columns.
Accepts only one <th> (table header/data) in the first row.
"""
def rowgetDataText(tr, coltag='td'): # td (data) or th (header)
return [td.get_text(strip=True) for td in tr.find_all(coltag)]
rows = []
trs = table.find_all('tr')
headerow = rowgetDataText(trs[0], 'th')
if headerow: # if there is a header row include first
trs = trs[1:]
for tr in trs: # for every table row
#this part is modified
#basically we'll get the type of
#used based of the second table header
#in your url table html
if(rowgetDataText(tr, 'th')):
last_head = rowgetDataText(tr, 'th')
#we'll add to the list a dict
#that contains "common name", "species name", "type" (use type)
if(rowgetDataText(tr, 'td')):
row = rowgetDataText(tr, 'td')
rows.append({headerow[0]: row[0], headerow[1]: row[1], 'type': last_head[0]})
return rows
then when we convert the results of that function using
the table content we extracted with beautiful soup we get this:
Then you can easily reference the type of use and each value common/species name.
Here is the full code:
import pandas as pd
from bs4 import BeautifulSoup
import urllib.request
url = "https://www.environment.nsw.gov.au/topics/animals-and-plants/threatened-species/programs-legislation-and-framework/nsw-koala-strategy/local-government-resources-for-koala-conservation/north-coast-koala-management-area#:~:text=The%20North%20Coast%20Koala%20Management,Valley%2C%20Clarence%20Valley%20and%20Taree."
#load html with urllib
html = urllib.request.urlopen(url)
soup = BeautifulSoup(html.read(), 'lxml')
#get the table you're trying to get based
#on html elements
htmltable = soup.find('table', { 'class' : 'table-striped' })
#modified function taken from: https://stackoverflow.com/a/58274853/6297478
#to fit your data shape in a way that
#you can use.
def tableDataText(table):
"""Parses a html segment started with tag <table> followed
by multiple <tr> (table rows) and inner <td> (table data) tags.
It returns a list of rows with inner columns.
Accepts only one <th> (table header/data) in the first row.
"""
def rowgetDataText(tr, coltag='td'): # td (data) or th (header)
return [td.get_text(strip=True) for td in tr.find_all(coltag)]
rows = []
trs = table.find_all('tr')
headerow = rowgetDataText(trs[0], 'th')
if headerow: # if there is a header row include first
trs = trs[1:]
for tr in trs: # for every table row
#this part is modified
#basically we'll get the type of
#used based of the second table header
#in your url table html
if(rowgetDataText(tr, 'th')):
last_head = rowgetDataText(tr, 'th')
#we'll add to the list a dict
#that contains "common name", "species name", "type" (use type)
if(rowgetDataText(tr, 'td')):
row = rowgetDataText(tr, 'td')
rows.append({headerow[0]: row[0], headerow[1]: row[1], 'type': last_head[0]})
return rows
#we store our results from the function in list_table
list_table = tableDataText(htmltable)
#turn our table into a DataFrame
dftable = pd.DataFrame(list_table)
dftable
I left some comments for you in the code to help you out.
I hope this helps!
A:
Just in addition to @DYZ approach, using css selectors, stripped_strings and find_previous(). This will create a list of dicts that will be transformed into a dataframe:
from bs4 import BeautifulSoup
import requests
import pandas as pd
url = "https://www.environment.nsw.gov.au/topics/animals-and-plants/threatened-species/programs-legislation-and-framework/nsw-koala-strategy/local-government-resources-for-koala-conservation/north-coast-koala-management-area#:~:text=The%20North%20Coast%20Koala%20Management,Valley%2C%20Clarence%20Valley%20and%20Taree."
data = []
soup = BeautifulSoup(requests.get(url).text)
for e in soup.select('table tbody tr'):
data.append(
dict(
zip(
soup.table.thead.stripped_strings,
[e.find_previous('th').get_text(strip=True)]+list(e.stripped_strings)
)
)
)
pd.DataFrame(data)
Common name
Species name
High preferred use
0
High preferred use
Grey gum
Eucalyptus biturbinata
1
High preferred use
Large-fruited grey gum
Eucalyptus canaliculata
...
...
...
...
107
Occasional use
Broad-leaved paperbark
Melaleuca quinquenervia
108
Occasional use
nan
nan
|
pd.read_html(url) - awkward table design
|
Table headings through the table are being converted into single column headings.
url = "https://www.environment.nsw.gov.au/topics/animals-and-plants/threatened-species/programs-legislation-and-framework/nsw-koala-strategy/local-government-resources-for-koala-conservation/north-coast-koala-management-area#:~:text=The%20North%20Coast%20Koala%20Management,Valley%2C%20Clarence%20Valley%20and%20Taree."
dfs = pd.read_html(url)
df = dfs[0]
df.head()
Be great if I could have the High preferred use as a column that assigns to the correct species.
Tried reset_index() this did not work.
I'm lost for searching can't find anything similar.
Response to @Master Oogway and thanks @DYZ for the edits.
There are multiple "table-striped"
The amendment suggested removes the error, but does not interact with the second table.
Take White Box, Eucalyptus albens. Occurs in second table and not first.
If I export dftable and filter - no White Box:
If I write htmltable to .txt when using find_all and search, it's there:
I have never done this before and appreciate that this is annoying.
Thanks for the help so far.
It appears that find_all is gathering all the table data.
But the creating of dftable is limiting to the first "table-striped".
|
[
"The table cannot be easily parsed with read_html because of its unorthodox use of <thead> attribute. You can try luck with BeautifulSoup:\nimport bs4\nimport urllib.request\n\nsoup = bs4.BeautifulSoup(urllib.request.urlopen(url))\ndata = [[\"\".join(cell.strings).strip() \n for cell in row.find_all(['td', 'th'])] \n for row in soup.find_all('table')[0].find_all('tr')] \ntable = pd.DataFrame(data[1:])\\\n .rename(columns=dict(enumerate(data[0])))\\\n .dropna(how='all')\n\n",
"So I took a look at the link and the table you're trying to get.\nThe problem with the table in the link is that it contains multiple headers so the .read_html(URL) function, gets all of them and sets those as your\nheader:\n\nso instead of using pandas to read the HTML I used\nbeautiful soup for what you're trying to accomplish.\nWith beautiful and urllib.requests I got the HTML from the URL and extracted the HTML with the table class name\nurl = \"https://www.environment.nsw.gov.au/topics/animals-and-plants/threatened-species/programs-legislation-and-framework/nsw-koala-strategy/local-government-resources-for-koala-conservation/north-coast-koala-management-area#:~:text=The%20North%20Coast%20Koala%20Management,Valley%2C%20Clarence%20Valley%20and%20Taree.\"\n\n#load html with urllib\nhtml = urllib.request.urlopen(url)\nsoup = BeautifulSoup(html.read(), 'lxml')\n\n\n#get the table you're trying to get based\n#on html elements\nhtmltable = soup.find('table', { 'class' : 'table-striped' })\n\nThen using a function I found to make a list from tables extract from beautiful soup, I modified the function to get your values in a shape that would be easy to load into a dataframe and would also be easy to call depending on what you want:\n[{\"common name\" : value, \"Species name\": value, \"type\": value}...{}]\ndef tableDataText(table): \n \"\"\"Parses a html segment started with tag <table> followed \n by multiple <tr> (table rows) and inner <td> (table data) tags. \n It returns a list of rows with inner columns. \n Accepts only one <th> (table header/data) in the first row.\n \"\"\"\n def rowgetDataText(tr, coltag='td'): # td (data) or th (header) \n return [td.get_text(strip=True) for td in tr.find_all(coltag)] \n rows = []\n trs = table.find_all('tr')\n headerow = rowgetDataText(trs[0], 'th')\n \n\n if headerow: # if there is a header row include first\n trs = trs[1:]\n for tr in trs: # for every table row\n\n #this part is modified\n #basically we'll get the type of \n #used based of the second table header\n #in your url table html\n if(rowgetDataText(tr, 'th')):\n last_head = rowgetDataText(tr, 'th')\n\n #we'll add to the list a dict\n #that contains \"common name\", \"species name\", \"type\" (use type)\n if(rowgetDataText(tr, 'td')):\n row = rowgetDataText(tr, 'td')\n rows.append({headerow[0]: row[0], headerow[1]: row[1], 'type': last_head[0]})\n \n return rows\n\nthen when we convert the results of that function using\nthe table content we extracted with beautiful soup we get this:\n\nThen you can easily reference the type of use and each value common/species name.\nHere is the full code:\n\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport urllib.request\n\nurl = \"https://www.environment.nsw.gov.au/topics/animals-and-plants/threatened-species/programs-legislation-and-framework/nsw-koala-strategy/local-government-resources-for-koala-conservation/north-coast-koala-management-area#:~:text=The%20North%20Coast%20Koala%20Management,Valley%2C%20Clarence%20Valley%20and%20Taree.\"\n\n#load html with urllib\nhtml = urllib.request.urlopen(url)\nsoup = BeautifulSoup(html.read(), 'lxml')\n\n\n#get the table you're trying to get based\n#on html elements\nhtmltable = soup.find('table', { 'class' : 'table-striped' })\n\n\n#modified function taken from: https://stackoverflow.com/a/58274853/6297478\n#to fit your data shape in a way that \n#you can use. \ndef tableDataText(table): \n \"\"\"Parses a html segment started with tag <table> followed \n by multiple <tr> (table rows) and inner <td> (table data) tags. \n It returns a list of rows with inner columns. \n Accepts only one <th> (table header/data) in the first row.\n \"\"\"\n def rowgetDataText(tr, coltag='td'): # td (data) or th (header) \n return [td.get_text(strip=True) for td in tr.find_all(coltag)] \n rows = []\n trs = table.find_all('tr')\n headerow = rowgetDataText(trs[0], 'th')\n \n\n if headerow: # if there is a header row include first\n trs = trs[1:]\n for tr in trs: # for every table row\n\n #this part is modified\n #basically we'll get the type of \n #used based of the second table header\n #in your url table html\n if(rowgetDataText(tr, 'th')):\n last_head = rowgetDataText(tr, 'th')\n\n #we'll add to the list a dict\n #that contains \"common name\", \"species name\", \"type\" (use type)\n if(rowgetDataText(tr, 'td')):\n row = rowgetDataText(tr, 'td')\n rows.append({headerow[0]: row[0], headerow[1]: row[1], 'type': last_head[0]})\n \n return rows\n\n#we store our results from the function in list_table\nlist_table = tableDataText(htmltable)\n\n#turn our table into a DataFrame\ndftable = pd.DataFrame(list_table)\ndftable\n\nI left some comments for you in the code to help you out.\nI hope this helps!\n",
"Just in addition to @DYZ approach, using css selectors, stripped_strings and find_previous(). This will create a list of dicts that will be transformed into a dataframe:\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nurl = \"https://www.environment.nsw.gov.au/topics/animals-and-plants/threatened-species/programs-legislation-and-framework/nsw-koala-strategy/local-government-resources-for-koala-conservation/north-coast-koala-management-area#:~:text=The%20North%20Coast%20Koala%20Management,Valley%2C%20Clarence%20Valley%20and%20Taree.\"\n\ndata = []\nsoup = BeautifulSoup(requests.get(url).text)\nfor e in soup.select('table tbody tr'):\n data.append(\n dict(\n zip(\n soup.table.thead.stripped_strings,\n [e.find_previous('th').get_text(strip=True)]+list(e.stripped_strings)\n )\n )\n )\n\npd.DataFrame(data)\n\n\n\n\n\n\nCommon name\nSpecies name\nHigh preferred use\n\n\n\n\n0\nHigh preferred use\nGrey gum\nEucalyptus biturbinata\n\n\n1\nHigh preferred use\nLarge-fruited grey gum\nEucalyptus canaliculata\n\n\n...\n...\n...\n...\n\n\n107\nOccasional use\nBroad-leaved paperbark\nMelaleuca quinquenervia\n\n\n108\nOccasional use\nnan\nnan\n\n\n\n"
] |
[
3,
1,
0
] |
[] |
[] |
[
"pandas",
"python"
] |
stackoverflow_0074596303_pandas_python.txt
|
Q:
Why won't PySimpleGui.Input adjust the height correctly?
I am trying to create a simple input form for data entry, but I can't get the text input box to change the height, only the width changes when setting the size parameter.
Here is my code:
import PySimpleGUI as sg
def main():
# # create small form for Data Analysis entry
last_printer = sg.Input(key = 'last_printer', size = (10,1))
rejected_carts = sg.Input(key = 'rejected_carts', size = (20, 1))
notes_for_self = sg.Input(key = 'notes_for_self', size = (50,4))
notes_for_ops = sg.Input(key = 'notes_for_ops', size = (50, 4))
notes_for_escalation = sg.Input(key = 'notes_for_escalation', size =(50,4))
layout = [
[sg.Text("Last Printer:"), last_printer],
[sg.Text("Rejected Cartridges:"), rejected_carts],
[sg.Text("Notes for Self:"), notes_for_self],
[sg.Text("Notes for Operators:"), notes_for_ops],
[sg.Text("Notes for Escalation:"), notes_for_escalation],
[sg.Submit(), sg.Cancel()]
]
window = sg.Window("Data Analysis Entry Form", layout) #, finalize=True, keep_on_top=True, grab_anywhere=False, size = (1220, 600))
event, values = window.read()
window.close()
print("event is: " + event)
for each in values.items():
print(each)
if __name__ == '__main__':
main()
A:
last_printer = sg.Multiline(key = 'last_printer', size = (10,1))
rejected_carts = sg.Multiline(key = 'rejected_carts', size = (20, 1))
notes_for_self = sg.Multiline(key = 'notes_for_self', size = (50,4))
notes_for_ops = sg.Multiline(key = 'notes_for_ops', size = (50, 4))
|
Why won't PySimpleGui.Input adjust the height correctly?
|
I am trying to create a simple input form for data entry, but I can't get the text input box to change the height, only the width changes when setting the size parameter.
Here is my code:
import PySimpleGUI as sg
def main():
# # create small form for Data Analysis entry
last_printer = sg.Input(key = 'last_printer', size = (10,1))
rejected_carts = sg.Input(key = 'rejected_carts', size = (20, 1))
notes_for_self = sg.Input(key = 'notes_for_self', size = (50,4))
notes_for_ops = sg.Input(key = 'notes_for_ops', size = (50, 4))
notes_for_escalation = sg.Input(key = 'notes_for_escalation', size =(50,4))
layout = [
[sg.Text("Last Printer:"), last_printer],
[sg.Text("Rejected Cartridges:"), rejected_carts],
[sg.Text("Notes for Self:"), notes_for_self],
[sg.Text("Notes for Operators:"), notes_for_ops],
[sg.Text("Notes for Escalation:"), notes_for_escalation],
[sg.Submit(), sg.Cancel()]
]
window = sg.Window("Data Analysis Entry Form", layout) #, finalize=True, keep_on_top=True, grab_anywhere=False, size = (1220, 600))
event, values = window.read()
window.close()
print("event is: " + event)
for each in values.items():
print(each)
if __name__ == '__main__':
main()
|
[
"last_printer = sg.Multiline(key = 'last_printer', size = (10,1))\nrejected_carts = sg.Multiline(key = 'rejected_carts', size = (20, 1))\nnotes_for_self = sg.Multiline(key = 'notes_for_self', size = (50,4))\nnotes_for_ops = sg.Multiline(key = 'notes_for_ops', size = (50, 4))\n\n"
] |
[
0
] |
[] |
[] |
[
"pysimplegui",
"python"
] |
stackoverflow_0068942822_pysimplegui_python.txt
|
Q:
How to run a Python file in Visual Studio code from the terminal?
I have tried to run a pretty simple code
x = input("What's x? ")
y = input("What's y? ")
z= int(x) + int(y)
print (z)
But, when I try to run that code from the terminal writing "name_of_the_file.py", I find this error:
"The term "name_of_the_file.py" is not recognized as the name of a cmdlet, function, script file, or operable program. Check the spelling of the name, or if a path was included, verify that the path is correct and try again."
If a right click on where you write the code, and then click on "run python file in terminal", it runs!
I am taking the CS50P, and I see that this should be possible because the teacher is able to do that. What am I doing wrong guys?
A:
Click the play button to run the code, watch the terminal and you can see that it is using the command & c:/WorkSpace/pytest11/.venv/Scripts/python.exe c:/WorkSpace/pytest11/main.py to run the code.
So if you need to manually type commands in the terminal to run the code. You can directly copy the above command.
If you use a virtual environment or have system environment variables configured, you can simplify the python path with the following command
python main.py
PS: main.py is my script file name, you need to modify it to your own file name.
A:
try to follow these steps:
Create a folder where you want your script to run
Open the folder using VS Code: File -> Open Folder
Create your script and save it in the folder
Open a new terminal: Terminal -> New Terminal
Type the command: python name_of_the_file.py
If it doesn't work, try: py name_of_the_file.py
If you are using a python 3 version, try: python3 name_of_the_file.py
My output:
A:
Do follow below steps
Open terminal in VS code
go to directory where you have .py file located using cd command
run command in terminal (eg. python3 file_name.py
Me@MyMacBook-Air string % python3 str_to_int.py
Hope this helps
A:
try this one:
open terminal in vscode.
check the directory in terminal, it must be same path to where you file is saved.
run command in terminal :
python3 name_of_the_file.py
|
How to run a Python file in Visual Studio code from the terminal?
|
I have tried to run a pretty simple code
x = input("What's x? ")
y = input("What's y? ")
z= int(x) + int(y)
print (z)
But, when I try to run that code from the terminal writing "name_of_the_file.py", I find this error:
"The term "name_of_the_file.py" is not recognized as the name of a cmdlet, function, script file, or operable program. Check the spelling of the name, or if a path was included, verify that the path is correct and try again."
If a right click on where you write the code, and then click on "run python file in terminal", it runs!
I am taking the CS50P, and I see that this should be possible because the teacher is able to do that. What am I doing wrong guys?
|
[
"Click the play button to run the code, watch the terminal and you can see that it is using the command & c:/WorkSpace/pytest11/.venv/Scripts/python.exe c:/WorkSpace/pytest11/main.py to run the code.\n\nSo if you need to manually type commands in the terminal to run the code. You can directly copy the above command.\nIf you use a virtual environment or have system environment variables configured, you can simplify the python path with the following command\npython main.py\n\nPS: main.py is my script file name, you need to modify it to your own file name.\n\n",
"try to follow these steps:\n\nCreate a folder where you want your script to run\nOpen the folder using VS Code: File -> Open Folder\nCreate your script and save it in the folder\nOpen a new terminal: Terminal -> New Terminal\nType the command: python name_of_the_file.py\nIf it doesn't work, try: py name_of_the_file.py\nIf you are using a python 3 version, try: python3 name_of_the_file.py\n\nMy output:\n\n",
"Do follow below steps\n\nOpen terminal in VS code\n\ngo to directory where you have .py file located using cd command\n\nrun command in terminal (eg. python3 file_name.py\nMe@MyMacBook-Air string % python3 str_to_int.py\n\n\nHope this helps\n",
"try this one:\n\nopen terminal in vscode.\n\ncheck the directory in terminal, it must be same path to where you file is saved.\n\nrun command in terminal :\npython3 name_of_the_file.py\n\n\n"
] |
[
1,
0,
0,
0
] |
[] |
[] |
[
"python",
"terminal",
"visual_studio",
"visual_studio_code"
] |
stackoverflow_0074595712_python_terminal_visual_studio_visual_studio_code.txt
|
Q:
I tried to install emcee and corner for python (linux). I got missing 'python.h' during installation. How to fix the corner installation?
I tried to install 'emcee' for python. It seems to work. To start I tried the example here
http://dfm.io/emcee/current/user/line/
I want to get such corner plots as in the example so I've to install 'corner' too.
This fails.
Uninstallation and reinstallation of wheel with pip get some small progress, but now I stuck.
I'm using the life Linux Knoppix8.1 and python2.7.
I tried
$ pip install corner
It seems to try to recompile matplotlib (and other) with gcc and the main error seems to be:
fatal error: Python.h: Datei oder Verzeichnis nicht gefunden
That means the Python.h file is missing.
I tried then the often suggested
$ sudo apt-get install python-dev
But this fails too with:
Paketlisten werden gelesen... Fertig Abhängigkeitsbaum wird aufgebaut.
Statusinformationen werden eingelesen.... Fertig Paket python-dev ist
nicht verfügbar, wird aber von einem anderen Paket referenziert. Das
kann heißen, dass das Paket fehlt, dass es abgelöst wurde oder nur aus
einer anderen Quelle verfügbar ist. Doch die folgenden Pakete ersetzen
es: python
E: Für Paket »python-dev« existiert kein Installationskandidat.
Thats German for python-dev is not available.
I tried also that suggested here, does not work too.
Python: Python.h file missing
I seem to have generally problems with python modules with (graphical) output or the need of compilation with gcc. Pure calculation python modules seems to work mostly (at least for Python2.7).
How can I install corner to get the plots?
What should I try now?
Thank you for help.
UPDATE1:I made some progress.
My main focus is now to install 'python-dev'.
After updating
$ sudo apt-get update
it knows now python-dev (great!) but there are dependencies.
After
$ sudo apt-get install python-dev
It demands to install first: libpython-dev, python2.7-dev
After
$ sudo apt-get install libpython-dev
It demands to install first: libpython2.7-dev
After
$ sudo apt-get install libpython2.7-dev
It demands a different version of: libpython2.7-stdlib, libpython2.7
Hängt ab von: libpython2.7-stdlib (= 2.7.13-2+deb9u3) aber 2.7.14~rc1-3 soll installiert werden
Hängt ab von: libpython2.7 (= 2.7.13-2+deb9u3) aber 2.7.14~rc1-3 soll installiert werden
After
$ sudo apt-get install libpython2.7-stdlib
It demands a different version of: libssl1.1
Hängt ab von: libssl1.1 (>= 1.1.1) aber 1.1.0j-1~deb9u1 soll installiert werden
After
$ sudo apt-get install libssl1.1
Installation works fine for the first time.
The problem I suck now is:
The version of 'libssl1.1' is now 1.1.0j-1~deb9u1 and it claims that this is the newest version.
But 'libpython2.7-stdlib' demands the version >= 1.1.1
How can I solve this now?
A:
This is the duplicate question.
Please refer to this Answer for installation of libssl
|
I tried to install emcee and corner for python (linux). I got missing 'python.h' during installation. How to fix the corner installation?
|
I tried to install 'emcee' for python. It seems to work. To start I tried the example here
http://dfm.io/emcee/current/user/line/
I want to get such corner plots as in the example so I've to install 'corner' too.
This fails.
Uninstallation and reinstallation of wheel with pip get some small progress, but now I stuck.
I'm using the life Linux Knoppix8.1 and python2.7.
I tried
$ pip install corner
It seems to try to recompile matplotlib (and other) with gcc and the main error seems to be:
fatal error: Python.h: Datei oder Verzeichnis nicht gefunden
That means the Python.h file is missing.
I tried then the often suggested
$ sudo apt-get install python-dev
But this fails too with:
Paketlisten werden gelesen... Fertig Abhängigkeitsbaum wird aufgebaut.
Statusinformationen werden eingelesen.... Fertig Paket python-dev ist
nicht verfügbar, wird aber von einem anderen Paket referenziert. Das
kann heißen, dass das Paket fehlt, dass es abgelöst wurde oder nur aus
einer anderen Quelle verfügbar ist. Doch die folgenden Pakete ersetzen
es: python
E: Für Paket »python-dev« existiert kein Installationskandidat.
Thats German for python-dev is not available.
I tried also that suggested here, does not work too.
Python: Python.h file missing
I seem to have generally problems with python modules with (graphical) output or the need of compilation with gcc. Pure calculation python modules seems to work mostly (at least for Python2.7).
How can I install corner to get the plots?
What should I try now?
Thank you for help.
UPDATE1:I made some progress.
My main focus is now to install 'python-dev'.
After updating
$ sudo apt-get update
it knows now python-dev (great!) but there are dependencies.
After
$ sudo apt-get install python-dev
It demands to install first: libpython-dev, python2.7-dev
After
$ sudo apt-get install libpython-dev
It demands to install first: libpython2.7-dev
After
$ sudo apt-get install libpython2.7-dev
It demands a different version of: libpython2.7-stdlib, libpython2.7
Hängt ab von: libpython2.7-stdlib (= 2.7.13-2+deb9u3) aber 2.7.14~rc1-3 soll installiert werden
Hängt ab von: libpython2.7 (= 2.7.13-2+deb9u3) aber 2.7.14~rc1-3 soll installiert werden
After
$ sudo apt-get install libpython2.7-stdlib
It demands a different version of: libssl1.1
Hängt ab von: libssl1.1 (>= 1.1.1) aber 1.1.0j-1~deb9u1 soll installiert werden
After
$ sudo apt-get install libssl1.1
Installation works fine for the first time.
The problem I suck now is:
The version of 'libssl1.1' is now 1.1.0j-1~deb9u1 and it claims that this is the newest version.
But 'libpython2.7-stdlib' demands the version >= 1.1.1
How can I solve this now?
|
[
"This is the duplicate question.\nPlease refer to this Answer for installation of libssl\n"
] |
[
0
] |
[] |
[] |
[
"apt_get",
"dependencies",
"installation",
"module",
"python"
] |
stackoverflow_0056241573_apt_get_dependencies_installation_module_python.txt
|
Q:
python subprocess does not write the output
I have the following code snippet as a Python script. I get the proper job output files with no errors. However the stdout subprocess log files (i.e. COSMOSIS_output_XXX.txt etc) don't store the expected runtime logs. Instead, these files have <_io.TextIOWrapper name=9 encoding='UTF-8'> as the only output written in the files. What is it I am doing wrong?
import subprocess
from subprocess import Popen
jobname = "cosmosis"
arg2 = "output.filename="
Vector = (
" " + ini + " " + arg2 + COSMOSIS_PATH + os.path.splitext(ini)[0] + ".txt"
)
job3 = subprocess.Popen(
["cosmosis" + Vector],
shell=True,
text=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
file = open("%sCOSMOSIS_output_%s.txt" % (ERROR_PATH, ini), "a")
sys.stdout = file
print(job3.stdout)
file_e = open("%sCOSMOSIS_output_ERROR_%s.txt" % (ERROR_PATH, ini), "a")
sys.stdout = file_e
print(job3.stderr)
try:
outs, errs = job3.communicate(timeout=2000)
except TimeoutExpired:
outs, errs = job3.communicate()
job3.kill()
file.close()
file_e.close()
A:
First of all you are just opening the file, you are not reading anything from it, you are not storing the information of the file anywhere, so it will just create that <_io.TextIOWrapper name=9 encoding='UTF-8'> which is very easy reproducible:
file = open("testtextf.txt","a")
print(file)
You have to read it somehow for example with .read():
data = file.read()
Also i do not recommend using "open" unless you want to deal with files being left open if anything goes wrong in between the lines before you close it again.
I highly recommend to use "with open" instead.
A:
Passing a list as the first argument with shell=True is system-dependent. I'm guessing you really mean
with open("%sCOSMOSIS_output_%s.txt" % (ERROR_PATH, ini), "ab") as file,
open("%sCOSMOSIS_output_ERROR_%s.txt" % (ERROR_PATH, ini), "a") as file_e:
try:
job3 = subprocess.run(
["cosmosis", ini, arg2, COSMOSIS_PATH, os.path.splitext(ini)[0] + ".txt"],
stdout=file, stderr=file_e, check=True,
timeout=2000)
except TimeoutExpired:
pass
There is no way for job3.stdout or job3.stderr to contain anything because we redirected them to files. (We open the file handles with binary mode so we don't need to specify a text encoding.) There is also no way for the process to return a useful result if it is killed by a timeout, and obviously no need to kill it when it was already killed.
As the subprocess documentation already tells you, you should prefer subprocess.run or one of the legacy high-level wrappers instead of Popen when you can. Perhaps see also Actual meaning of shell=True in subprocess which also explains why you want to avoid using a shell when you can.
On the other hand, if (as indicated in comments) you want the process to run in the background until completion while your Python script proceeds to perform other tasks and take out the timeout, you do need Popen.
with open("%sCOSMOSIS_output_%s.txt" % (ERROR_PATH, ini), "ab") as file,
open("%sCOSMOSIS_output_ERROR_%s.txt" % (ERROR_PATH, ini), "a") as file_e:
job3 = subprocess.Popen(
["cosmosis", ini, arg2, COSMOSIS_PATH, os.path.splitext(ini)[0] + ".txt"],
stdout=file, stderr=file_e)
...
# time passes and your script does other things ...
job3.wait()
You might want to periodically poll the job to see if it has finished.
|
python subprocess does not write the output
|
I have the following code snippet as a Python script. I get the proper job output files with no errors. However the stdout subprocess log files (i.e. COSMOSIS_output_XXX.txt etc) don't store the expected runtime logs. Instead, these files have <_io.TextIOWrapper name=9 encoding='UTF-8'> as the only output written in the files. What is it I am doing wrong?
import subprocess
from subprocess import Popen
jobname = "cosmosis"
arg2 = "output.filename="
Vector = (
" " + ini + " " + arg2 + COSMOSIS_PATH + os.path.splitext(ini)[0] + ".txt"
)
job3 = subprocess.Popen(
["cosmosis" + Vector],
shell=True,
text=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
file = open("%sCOSMOSIS_output_%s.txt" % (ERROR_PATH, ini), "a")
sys.stdout = file
print(job3.stdout)
file_e = open("%sCOSMOSIS_output_ERROR_%s.txt" % (ERROR_PATH, ini), "a")
sys.stdout = file_e
print(job3.stderr)
try:
outs, errs = job3.communicate(timeout=2000)
except TimeoutExpired:
outs, errs = job3.communicate()
job3.kill()
file.close()
file_e.close()
|
[
"First of all you are just opening the file, you are not reading anything from it, you are not storing the information of the file anywhere, so it will just create that <_io.TextIOWrapper name=9 encoding='UTF-8'> which is very easy reproducible:\nfile = open(\"testtextf.txt\",\"a\")\nprint(file)\n\nYou have to read it somehow for example with .read():\ndata = file.read()\n\nAlso i do not recommend using \"open\" unless you want to deal with files being left open if anything goes wrong in between the lines before you close it again.\nI highly recommend to use \"with open\" instead.\n",
"Passing a list as the first argument with shell=True is system-dependent. I'm guessing you really mean\nwith open(\"%sCOSMOSIS_output_%s.txt\" % (ERROR_PATH, ini), \"ab\") as file,\n open(\"%sCOSMOSIS_output_ERROR_%s.txt\" % (ERROR_PATH, ini), \"a\") as file_e:\n try:\n job3 = subprocess.run(\n [\"cosmosis\", ini, arg2, COSMOSIS_PATH, os.path.splitext(ini)[0] + \".txt\"],\n stdout=file, stderr=file_e, check=True,\n timeout=2000)\n except TimeoutExpired:\n pass\n\nThere is no way for job3.stdout or job3.stderr to contain anything because we redirected them to files. (We open the file handles with binary mode so we don't need to specify a text encoding.) There is also no way for the process to return a useful result if it is killed by a timeout, and obviously no need to kill it when it was already killed.\nAs the subprocess documentation already tells you, you should prefer subprocess.run or one of the legacy high-level wrappers instead of Popen when you can. Perhaps see also Actual meaning of shell=True in subprocess which also explains why you want to avoid using a shell when you can.\nOn the other hand, if (as indicated in comments) you want the process to run in the background until completion while your Python script proceeds to perform other tasks and take out the timeout, you do need Popen.\nwith open(\"%sCOSMOSIS_output_%s.txt\" % (ERROR_PATH, ini), \"ab\") as file,\n open(\"%sCOSMOSIS_output_ERROR_%s.txt\" % (ERROR_PATH, ini), \"a\") as file_e:\n job3 = subprocess.Popen(\n [\"cosmosis\", ini, arg2, COSMOSIS_PATH, os.path.splitext(ini)[0] + \".txt\"],\n stdout=file, stderr=file_e)\n...\n# time passes and your script does other things ...\njob3.wait()\n\nYou might want to periodically poll the job to see if it has finished.\n"
] |
[
1,
1
] |
[
"Ok, the following snippet is sufficient solution to the question posted above.\nwith open(\"%sCOSMOSIS_output_%s.txt\" % (ERROR_PATH, ini), \"wb\") as file, open(\"%sCOSMOSIS_output_ERROR_%s.txt\" % (ERROR_PATH, ini), \"wb\") as file_e:\n job3 = subprocess.Popen(\n [\"cosmosis\" + Vector],\n shell=True,\n text=True,\n stdout=file,\n stderr=file_e,\n )\n\n"
] |
[
-1
] |
[
"python",
"python_3.x",
"stdout",
"subprocess"
] |
stackoverflow_0074591331_python_python_3.x_stdout_subprocess.txt
|
Q:
how to show both key and value in dictionary in the format they are in
I have a dictionary look like this
mydict = { 'type' : 'fruit', 'quantity': 20 }
i wan to print only the 'type' field in the way it is ,like this {'type': 'fruit'}
i found this on other website
class fruits(dict):
def __str__(self):
return json.dumps(self)
collect = [['apple','grapes']]
result = fruits(collect)
print(result)
is there a simpler way without jsonify it?
i also tried .items() method but it print out as (key, value) which i dont wan it to be
A:
If you're trying to define a class that behaves exactly like dict with the only exception being that it always prints in a particular way, this might be the way to go:
class subclass_of_dict(dict):
def __str__(self):
return "{'type' : " + f"'{self.get('type')}'" + '}'
With your class defined like this, you can now create a couple of instances of this new class:
f1 = subclass_of_dict({'type' : 'fruit', 'quantity': 20})
f2 = subclass_of_dict({'type' : 'bowler hats', 'quantity': 13})
Then calling print on these instances does this:
print (f1)
print (f2)
# result:
# {'type' : 'fruit'}
# {'type' : 'bowler hats'}
Is this what you're after?
A:
This one is very easy. You have to write only few lines of code.
class fruits(dict):
def __str__(self):
return "{'type' : " + f"'{self.get('type')}'" + '}'
Then you have to just make your dictionary and print.
mydict = fruits{ 'type' : 'fruit', 'quantity': 20 }
print(mydict)
I guess you've found your answer. To learn more about a dictionary in an easy way, follow this tutorial.
Hope this comment is helpful for you.
|
how to show both key and value in dictionary in the format they are in
|
I have a dictionary look like this
mydict = { 'type' : 'fruit', 'quantity': 20 }
i wan to print only the 'type' field in the way it is ,like this {'type': 'fruit'}
i found this on other website
class fruits(dict):
def __str__(self):
return json.dumps(self)
collect = [['apple','grapes']]
result = fruits(collect)
print(result)
is there a simpler way without jsonify it?
i also tried .items() method but it print out as (key, value) which i dont wan it to be
|
[
"If you're trying to define a class that behaves exactly like dict with the only exception being that it always prints in a particular way, this might be the way to go:\nclass subclass_of_dict(dict):\n def __str__(self):\n return \"{'type' : \" + f\"'{self.get('type')}'\" + '}'\n\nWith your class defined like this, you can now create a couple of instances of this new class:\nf1 = subclass_of_dict({'type' : 'fruit', 'quantity': 20})\nf2 = subclass_of_dict({'type' : 'bowler hats', 'quantity': 13})\n\nThen calling print on these instances does this:\nprint (f1)\nprint (f2)\n\n# result: \n # {'type' : 'fruit'}\n # {'type' : 'bowler hats'}\n\nIs this what you're after?\n",
"This one is very easy. You have to write only few lines of code.\nclass fruits(dict):\ndef __str__(self):\n return \"{'type' : \" + f\"'{self.get('type')}'\" + '}'\n\nThen you have to just make your dictionary and print.\nmydict = fruits{ 'type' : 'fruit', 'quantity': 20 }\nprint(mydict)\n\nI guess you've found your answer. To learn more about a dictionary in an easy way, follow this tutorial.\nHope this comment is helpful for you.\n"
] |
[
1,
1
] |
[] |
[] |
[
"python"
] |
stackoverflow_0074596593_python.txt
|
Q:
Dealing with outliers in Pandas - Substitution of values
I get a little confused dealing with outliers.
I have a DataFrame that I need to go through and in every column that has a numeric value I need to find the outliers.
If the value exceeds the outliers , I want to replace it with the np.nan value.
I think my problem is in replacing the outlier values with the np.nan value that for some reason I don't understand how to access them.
def outlier(df):
new_df = df.copy()
numeric_cols = new_df._get_numeric_data().columns
for col in numeric_cols:
q1 = np.percentile(new_df[col],25)
q3 = np.percentile(new_df[col],75)
IQR = q3 - q1
lower_limit = q1 - (1.5*IQR)
upper_limit = q3 + (1.5*IQR)
if (new_df[col][0] < lower_limit) | (new_df[col][0] > upper_limit):
new_df[col] = np.nan
return new_df
A:
Change:
if (new_df[col][0] < lower_limit) | (new_df[col][0] > upper_limit):
new_df[col] = np.nan
by DataFrame.loc:
new_df.loc[(new_df[col] < lower_limit) | (new_df[col] > upper_limit), col] = np.nan
Or:
new_df.loc[~new_df[col].between(lower_limit,upper_limit, inclusive="neither"), col] = np.nan
You can also avoid looping by numeric columns with processing all columns together and set NaNs by DataFrame.mask:
def outlier(df):
new_df = df.copy()
numeric_cols = new_df._get_numeric_data().columns
q1 = np.percentile(new_df[numeric_cols],25, axis=0)
q3 = np.percentile(new_df[numeric_cols],75, axis=0)
IQR = q3 - q1
lower_limit = q1 - (1.5*IQR)
upper_limit = q3 + (1.5*IQR)
mask = (new_df[numeric_cols] < lower_limit) | (new_df[numeric_cols] > upper_limit)
new_df[numeric_cols] = new_df[numeric_cols].mask(mask)
return new_df
|
Dealing with outliers in Pandas - Substitution of values
|
I get a little confused dealing with outliers.
I have a DataFrame that I need to go through and in every column that has a numeric value I need to find the outliers.
If the value exceeds the outliers , I want to replace it with the np.nan value.
I think my problem is in replacing the outlier values with the np.nan value that for some reason I don't understand how to access them.
def outlier(df):
new_df = df.copy()
numeric_cols = new_df._get_numeric_data().columns
for col in numeric_cols:
q1 = np.percentile(new_df[col],25)
q3 = np.percentile(new_df[col],75)
IQR = q3 - q1
lower_limit = q1 - (1.5*IQR)
upper_limit = q3 + (1.5*IQR)
if (new_df[col][0] < lower_limit) | (new_df[col][0] > upper_limit):
new_df[col] = np.nan
return new_df
|
[
"Change:\n if (new_df[col][0] < lower_limit) | (new_df[col][0] > upper_limit):\n new_df[col] = np.nan\n\nby DataFrame.loc:\nnew_df.loc[(new_df[col] < lower_limit) | (new_df[col] > upper_limit), col] = np.nan\n\nOr:\nnew_df.loc[~new_df[col].between(lower_limit,upper_limit, inclusive=\"neither\"), col] = np.nan\n\nYou can also avoid looping by numeric columns with processing all columns together and set NaNs by DataFrame.mask:\ndef outlier(df):\n new_df = df.copy()\n numeric_cols = new_df._get_numeric_data().columns\n \n q1 = np.percentile(new_df[numeric_cols],25, axis=0)\n q3 = np.percentile(new_df[numeric_cols],75, axis=0)\n IQR = q3 - q1\n lower_limit = q1 - (1.5*IQR)\n upper_limit = q3 + (1.5*IQR)\n mask = (new_df[numeric_cols] < lower_limit) | (new_df[numeric_cols] > upper_limit)\n new_df[numeric_cols] = new_df[numeric_cols].mask(mask)\n return new_df\n\n"
] |
[
1
] |
[] |
[] |
[
"pandas",
"python"
] |
stackoverflow_0074597261_pandas_python.txt
|
Q:
How to uniquely get the win32api/win32gui figure handle from the matplotlib figure object
I wonder what is the best way to uniquely obtain the win32gui's window handle given the matplotlib's figure object.
I know I can find it based on the window title with win32gui.FindWindow()
import matplotlib.pyplot as plt
import win32gui
fig = plt.figure()
my_title = 'My Figure'
fig.canvas.manager.window.title(my_title)
hdl = win32gui.FindWindow(None, my_title)
print(hdl)
>>> 7735822 (just an arbitrary number here)
but this method fails if I have more than one window with the same title (the reasons why to do so are out of the question), since FindWindow seems to return only the first result that matches the search:
fig1 = plt.figure()
fig2 = plt.figure()
my_title = 'My Figure'
fig1.canvas.manager.window.title(my_title)
fig2.canvas.manager.window.title(my_title) # Both figures have same window title
hdl = win32gui.FindWindow(None, my_title)
print(hdl)
>>> 6424880 (just another one arbitrary number here)
In the second example at least it would be nice if hdl were a list of handles with all matches found.
I seems logical to me that if I have the specific python object pointing to the figure, there must be a unique way to get the win32gui window handle, but I can't find the answer anywhere. Something like:
fig = plt.figure()
hdl = fig.get_w32_handle()
# or
hdl = win32gui.GetWindowFromID(id(fig))
A:
I found the answer here after searching again by using the 'Tkinter' keyword rather than 'matplotlib'
Basically I can get the Windows handle (in hexadecimal) with the frame() method and convert it do decimal to then use it with the win32 api:
import matplotlib.pyplot as plt
import win32gui
fig1 = plt.figure()
fig2 = plt.figure()
my_title = 'My Figure'
fig1.canvas.manager.window.title(my_title)
fig2.canvas.manager.window.title(my_title) # Both figures have same window title
hdl1 = int(fig1.canvas.manager.window.frame(), 16) # Hex -> Dec
hdl2 = int(fig2.canvas.manager.window.frame(), 16)
print(hdl1, ':', win32gui.GetWindowText(hdl1))
print(hdl2, ':', win32gui.GetWindowText(hdl2))
>>> 199924 : My Figure
>>> 396520 : My Figure
|
How to uniquely get the win32api/win32gui figure handle from the matplotlib figure object
|
I wonder what is the best way to uniquely obtain the win32gui's window handle given the matplotlib's figure object.
I know I can find it based on the window title with win32gui.FindWindow()
import matplotlib.pyplot as plt
import win32gui
fig = plt.figure()
my_title = 'My Figure'
fig.canvas.manager.window.title(my_title)
hdl = win32gui.FindWindow(None, my_title)
print(hdl)
>>> 7735822 (just an arbitrary number here)
but this method fails if I have more than one window with the same title (the reasons why to do so are out of the question), since FindWindow seems to return only the first result that matches the search:
fig1 = plt.figure()
fig2 = plt.figure()
my_title = 'My Figure'
fig1.canvas.manager.window.title(my_title)
fig2.canvas.manager.window.title(my_title) # Both figures have same window title
hdl = win32gui.FindWindow(None, my_title)
print(hdl)
>>> 6424880 (just another one arbitrary number here)
In the second example at least it would be nice if hdl were a list of handles with all matches found.
I seems logical to me that if I have the specific python object pointing to the figure, there must be a unique way to get the win32gui window handle, but I can't find the answer anywhere. Something like:
fig = plt.figure()
hdl = fig.get_w32_handle()
# or
hdl = win32gui.GetWindowFromID(id(fig))
|
[
"I found the answer here after searching again by using the 'Tkinter' keyword rather than 'matplotlib'\nBasically I can get the Windows handle (in hexadecimal) with the frame() method and convert it do decimal to then use it with the win32 api:\nimport matplotlib.pyplot as plt\nimport win32gui\n\nfig1 = plt.figure()\nfig2 = plt.figure()\n\nmy_title = 'My Figure'\nfig1.canvas.manager.window.title(my_title)\nfig2.canvas.manager.window.title(my_title) # Both figures have same window title\n\nhdl1 = int(fig1.canvas.manager.window.frame(), 16) # Hex -> Dec\nhdl2 = int(fig2.canvas.manager.window.frame(), 16) \n\nprint(hdl1, ':', win32gui.GetWindowText(hdl1))\nprint(hdl2, ':', win32gui.GetWindowText(hdl2))\n\n\n>>> 199924 : My Figure\n>>> 396520 : My Figure\n\n"
] |
[
0
] |
[] |
[] |
[
"matplotlib",
"python",
"pywin32"
] |
stackoverflow_0074588416_matplotlib_python_pywin32.txt
|
Q:
Making GET request to a tiktok url in order to get a canonical link
I want to make a GET request to a tiktok url via python but it does not work.
Let's say we have a tiktok link from a mobile app – https://vt.tiktok.com/ZS81uRSRR/ and I want to get its video_id which is available in a canonical link. This is the canonical link for the provided tiktok: https://www.tiktok.com/@notorious_foodie/video/7169643841316834566?_r=1&_t=8XdwIuoJjkX&is_from_webapp=v1&item_id=7169643841316834566
video_id comes after /video/, for example in the link above video_id would be 7169643841316834566
When I open a mobile link on my laptop in a browser it redirects me to the canonical link, I wanted to achieve the same behavior via code and managed to do it like so:
import requests
def get_canonical_url(url):
return requests.get(url, timeout=5).url
It was working for a while but then it started raising timeout errors every time, I managed to fix it by providing cookie. I made a request to Postman(it works when I make GET request through postman though), copied cookies, modified my function to accept cookies and it started working again. It had been working with cookies for ~6 months although last week it stopped working again, I thought that the reason might be in the expired cookies but when I updated them it didn't help.
This is the error I keep getting:
requests.exceptions.ReadTimeout: HTTPSConnectionPool(host='www.tiktok.com', port=443): Read timed out. (read timeout=5)
The weirdest thing is that I can make my desired request just fine via curl:
Or via Postman:
Recap
So the problem is that my python GET request never succeeded and I can't understand why. I tried using VPN in case tiktok has banned my ip, also I tried to run this request on some of my servers to try different server locations but none of my attempts worked.
Could you give me a piece of advice how to debug this issue further or maybe any other ideas how I can get video_id out of mobile tiktok link?
A:
Method 1 - Using subprocess
Execute curl command and catch the output and it will take ~0.5 seconds.
import subprocess
import re
process_detail = subprocess.Popen(["curl", "https://vt.tiktok.com/ZS81uRSRR/"], stdout=subprocess.PIPE)
output = process_detail.communicate()[0].decode()
process_detail.kill()
canonical_link = re.search("(?P<url>https?://[^\s]+)+\?", output).group("url")
print("Canonical link: ", canonical_link)
Method 2 - Using proxies
We need to use proxies. here is the solution for free proxies which we can scrap and apply dynamically using BeautifulSoup..
First install BeautifulSoup using pip install BeautifulSoup
Solution:
from bs4 import BeautifulSoup
import requests
def scrap_now(url):
print(f"<======================> Scrapping Started <======================>")
print(f"<======================> Getting proxy <======================>")
source = requests.get('https://free-proxy-list.net/').text
soup = BeautifulSoup(source, "html.parser")
ips_container = soup.findAll("table", {"class": "table table-striped table-bordered"})
ip_trs = ips_container[0].findAll('tr')
for i in ip_trs[1:]:
proxy_ip = i.findAll('td')[0].text + ":" + i.findAll('td')[1].text
try:
proxy = {"https": proxy_ip}
print(f"<======================> Trying with: {proxy_ip}<======================>")
headers = {'User-Agent': 'Mozilla/5.0'}
resp = requests.get(url, headers=headers, proxies=proxy, timeout=5)
if resp.status_code == requests.codes.ok:
print(f"<======================> Got Success with: {proxy_ip}<======================>")
return resp.url
except Exception as e:
print(e)
continue
return ""
canonical_link = scrap_now("https://vt.tiktok.com/ZS81uRSRR/")
print("Canonical link: ", canonical_link)
Output:
Method - 3: Using Selenium
We can do this with selenium as well. It will take almost 5 seconds
First, install selenium using pip install selenium==3.141.0
then execute below lines:
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
options = webdriver.ChromeOptions()
options.add_experimental_option("prefs", {
"profile.default_content_setting_values.media_stream_mic": 1,
"profile.default_content_setting_values.media_stream_camera": 1,
"profile.default_content_setting_values.geolocation": 1,
"profile.default_content_setting_values.notifications": 1,
"credentials_enable_service": False,
"profile.password_manager_enabled": False
})
options.add_argument('--headless')
options.add_experimental_option("excludeSwitches", ['enable-automation'])
browser = webdriver.Chrome(ChromeDriverManager(cache_valid_range=365).install(), options=options)
browser.get("https://vt.tiktok.com/ZS81uRSRR/")
print("Canonical link: ", browser.current_url)
Note: On first run it will take a bit more time as it will download web drivers automatically, but after that it will use cache only.
|
Making GET request to a tiktok url in order to get a canonical link
|
I want to make a GET request to a tiktok url via python but it does not work.
Let's say we have a tiktok link from a mobile app – https://vt.tiktok.com/ZS81uRSRR/ and I want to get its video_id which is available in a canonical link. This is the canonical link for the provided tiktok: https://www.tiktok.com/@notorious_foodie/video/7169643841316834566?_r=1&_t=8XdwIuoJjkX&is_from_webapp=v1&item_id=7169643841316834566
video_id comes after /video/, for example in the link above video_id would be 7169643841316834566
When I open a mobile link on my laptop in a browser it redirects me to the canonical link, I wanted to achieve the same behavior via code and managed to do it like so:
import requests
def get_canonical_url(url):
return requests.get(url, timeout=5).url
It was working for a while but then it started raising timeout errors every time, I managed to fix it by providing cookie. I made a request to Postman(it works when I make GET request through postman though), copied cookies, modified my function to accept cookies and it started working again. It had been working with cookies for ~6 months although last week it stopped working again, I thought that the reason might be in the expired cookies but when I updated them it didn't help.
This is the error I keep getting:
requests.exceptions.ReadTimeout: HTTPSConnectionPool(host='www.tiktok.com', port=443): Read timed out. (read timeout=5)
The weirdest thing is that I can make my desired request just fine via curl:
Or via Postman:
Recap
So the problem is that my python GET request never succeeded and I can't understand why. I tried using VPN in case tiktok has banned my ip, also I tried to run this request on some of my servers to try different server locations but none of my attempts worked.
Could you give me a piece of advice how to debug this issue further or maybe any other ideas how I can get video_id out of mobile tiktok link?
|
[
"Method 1 - Using subprocess\nExecute curl command and catch the output and it will take ~0.5 seconds.\nimport subprocess\nimport re\nprocess_detail = subprocess.Popen([\"curl\", \"https://vt.tiktok.com/ZS81uRSRR/\"], stdout=subprocess.PIPE)\noutput = process_detail.communicate()[0].decode()\nprocess_detail.kill()\ncanonical_link = re.search(\"(?P<url>https?://[^\\s]+)+\\?\", output).group(\"url\")\nprint(\"Canonical link: \", canonical_link)\n\nMethod 2 - Using proxies\nWe need to use proxies. here is the solution for free proxies which we can scrap and apply dynamically using BeautifulSoup..\nFirst install BeautifulSoup using pip install BeautifulSoup\nSolution:\nfrom bs4 import BeautifulSoup\nimport requests\n\n\ndef scrap_now(url):\n print(f\"<======================> Scrapping Started <======================>\")\n print(f\"<======================> Getting proxy <======================>\")\n source = requests.get('https://free-proxy-list.net/').text\n soup = BeautifulSoup(source, \"html.parser\")\n ips_container = soup.findAll(\"table\", {\"class\": \"table table-striped table-bordered\"})\n ip_trs = ips_container[0].findAll('tr')\n for i in ip_trs[1:]:\n proxy_ip = i.findAll('td')[0].text + \":\" + i.findAll('td')[1].text\n try:\n proxy = {\"https\": proxy_ip}\n print(f\"<======================> Trying with: {proxy_ip}<======================>\")\n headers = {'User-Agent': 'Mozilla/5.0'}\n resp = requests.get(url, headers=headers, proxies=proxy, timeout=5)\n if resp.status_code == requests.codes.ok:\n print(f\"<======================> Got Success with: {proxy_ip}<======================>\")\n return resp.url\n except Exception as e:\n print(e)\n continue\n return \"\"\n\n\ncanonical_link = scrap_now(\"https://vt.tiktok.com/ZS81uRSRR/\")\nprint(\"Canonical link: \", canonical_link)\n\nOutput:\n\nMethod - 3: Using Selenium\nWe can do this with selenium as well. It will take almost 5 seconds\nFirst, install selenium using pip install selenium==3.141.0\nthen execute below lines:\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\noptions = webdriver.ChromeOptions()\noptions.add_experimental_option(\"prefs\", {\n \"profile.default_content_setting_values.media_stream_mic\": 1,\n \"profile.default_content_setting_values.media_stream_camera\": 1,\n \"profile.default_content_setting_values.geolocation\": 1,\n \"profile.default_content_setting_values.notifications\": 1,\n \"credentials_enable_service\": False,\n \"profile.password_manager_enabled\": False\n})\noptions.add_argument('--headless')\noptions.add_experimental_option(\"excludeSwitches\", ['enable-automation'])\nbrowser = webdriver.Chrome(ChromeDriverManager(cache_valid_range=365).install(), options=options)\nbrowser.get(\"https://vt.tiktok.com/ZS81uRSRR/\")\nprint(\"Canonical link: \", browser.current_url)\n\nNote: On first run it will take a bit more time as it will download web drivers automatically, but after that it will use cache only.\n"
] |
[
2
] |
[] |
[] |
[
"python",
"python_requests"
] |
stackoverflow_0074596976_python_python_requests.txt
|
Q:
What can I use instead of lambda in my Python code?
I was wondering if there was a simple alternative to lambda in my code.
def add_attack(self, attack_name):
if attack_name in self.known_attacks and attack_name not in self.attacks:
try:
assert(len(self.attacks) < 4)
self.attacks[attack_name] = self.known_attacks.get(attack_name)
return True
except:
#find the min value of self.attacks
minval = min(self.attacks.keys(), key=(lambda k: self.attacks[k]))
for keys, values in self.attacks.items():
if self.attacks[minval] == values and min(minval, keys) == keys:
minval = keys
del self.attacks[minval]
self.attacks[attack_name] = self.known_attacks.get(attack_name)
return True
else:
return False
I'm still learning python, and the lambda function is throwing me off since I haven't learned that much about it yet. Instead of using lambda, can someone help me out with another function to replace lambda? Thanks!
A:
You could define a function for it:
def return_attacks(self,k):
return self.attacks[k]
And use that function in the key:
minval = min(self.attacks.keys(), key=(self.return_attacks))
I would strongly recommend you get comfortable with lambda functions - and I think it is clear to you now that lambda x : expr(x) is equivalent to func when
def func(x):
return expr(x)
A:
A lambda should not scare you! It's just a small anonymous function.
It can take any number of arguments, but can only have one expression.
minval = min(self.attacks.keys(), key=(lambda k: self.attacks[k]))
Here you are getting the result of the expression min() as minval
The min function can take keys, here is more about that. I can see it can be confusing, but this key is not the same thing with a dictionary key. This key is just a way to tell the min function how it should behave.
If we go back to the code:
So the line basically finds the minimum value in self.attacks.keys(), with a lambda function that returns every element in self.attacks[]
If you do not want to use lambda, you can write a function in your class that does exactly the same thing.
def find_min_key(self, my_dict):
return min(my_dict, key= my_dict.get)
You can use this as:
min_val = self.find_min_key(self.attacks)
|
What can I use instead of lambda in my Python code?
|
I was wondering if there was a simple alternative to lambda in my code.
def add_attack(self, attack_name):
if attack_name in self.known_attacks and attack_name not in self.attacks:
try:
assert(len(self.attacks) < 4)
self.attacks[attack_name] = self.known_attacks.get(attack_name)
return True
except:
#find the min value of self.attacks
minval = min(self.attacks.keys(), key=(lambda k: self.attacks[k]))
for keys, values in self.attacks.items():
if self.attacks[minval] == values and min(minval, keys) == keys:
minval = keys
del self.attacks[minval]
self.attacks[attack_name] = self.known_attacks.get(attack_name)
return True
else:
return False
I'm still learning python, and the lambda function is throwing me off since I haven't learned that much about it yet. Instead of using lambda, can someone help me out with another function to replace lambda? Thanks!
|
[
"You could define a function for it:\ndef return_attacks(self,k):\n return self.attacks[k]\n\nAnd use that function in the key:\nminval = min(self.attacks.keys(), key=(self.return_attacks))\n\nI would strongly recommend you get comfortable with lambda functions - and I think it is clear to you now that lambda x : expr(x) is equivalent to func when\ndef func(x):\n return expr(x)\n\n",
"A lambda should not scare you! It's just a small anonymous function.\nIt can take any number of arguments, but can only have one expression.\nminval = min(self.attacks.keys(), key=(lambda k: self.attacks[k]))\n\nHere you are getting the result of the expression min() as minval\nThe min function can take keys, here is more about that. I can see it can be confusing, but this key is not the same thing with a dictionary key. This key is just a way to tell the min function how it should behave.\nIf we go back to the code:\nSo the line basically finds the minimum value in self.attacks.keys(), with a lambda function that returns every element in self.attacks[]\nIf you do not want to use lambda, you can write a function in your class that does exactly the same thing.\ndef find_min_key(self, my_dict):\n return min(my_dict, key= my_dict.get)\n\nYou can use this as:\nmin_val = self.find_min_key(self.attacks)\n\n"
] |
[
3,
1
] |
[] |
[] |
[
"lambda",
"python"
] |
stackoverflow_0074597006_lambda_python.txt
|
Q:
Is there a way to create and visualize a model for this data?
I am working on a small data base (~70 candidates), they are molecules. I want to find the molecule that fits the best with the actual drug. The molecules have different attribute like the type of amino acid, area, volume, affinity of binding and so on.
I want to systematically pick the one that is the best with respect to the actual drug. How can I do that?
Also I wanted to know which amino acid residue has the bigger impact on the drugs' affinities.
* Molecules aff aa1 aa2 SA V SA/V
- **V0L 10.4 non non 357.96 334.58 1.069878654**
- Trp-Trp-Glucose 9.9 Trp Trp 381.74 353.17 1.080895886
- Trp-Phe-Glucose 9.2 Trp Phe 431.57 411.31 1.049257251
- Phe-Trp-Glucose 9.1 Phe Trp 411.36 385.49 1.067109393
- Trp-Arg-Glucose 9.1 Trp Arg 440.12 430.72 1.021823923
- Gln-Trp-Glucose 8.9 Gln Trp 502.22 491.99 1.020793106
- Trp-Ala-Glucose 8.9 Trp Ala 494.11 467.79 1.056264563
- Tyr-Trp-Glucose 8.9 Tyr Trp 405.17 382.69 1.058742063
- Trp-Asn-Glucose 8.8 Trp Asn 464.75 440.79 1.05435695
- Tyr-Phe-Glucose 8.8 Tyr Phe 440.93 415 1.062481928
- Glu-Trp-Glucose 8.7 Glu Trp 395.82 377.62 1.0481966
- Ile-Trp-Glucose 8.6 Ile Trp 449.31 436 1.030527523
- Trp-Gly-Glucose 8.6 Trp Gly 427.09 403.61 1.058174971
- Asn-Trp-Glucose 8.5 Asn Trp 398.61 370.53 1.075783337
- Tyr-Val-Glucose 8.5 Tyr Val 444.07 427.72 1.038225942
- Phe-Asn-Glucose 8.4 Phe Asn 431.91 403.36 1.070780444
- Trp-Leu-Glucose 8.4 Trp Leu 429.28 400.87 1.070870856
- Tyr-Arg-Glucose 8.4 Tyr Arg 482.72 459 1.05167756
- Val-Trp-Glucose 8.4 Val Trp 443.64 431.18 1.028897444
- Asn-Phe-Glucose 8.3 Asn Phe 416.65 395.56 1.053316817
- Leu-Trp-Glucose 8.3 Leu Trp 471.93 454 1.039493392
- Phe-Ala-Glucose 8.3 Phe Ala 440.88 426.5 1.033716295
- Trp-Lys-Glucose 8.3 Trp Lys 363.36 334.96 1.084786243
- Gln-Phe-Glucose 8.2 Gln Phe 414.28 393.3 1.053343504
- His-Phe-Glucose 8.2 His Phe 391.99 367.35 1.067074997
- Lys-Trp-Glucose 8.2 Lys Trp 381.2 353.02 1.079825506
- Phe-Arg-Glucose 8.2 Phe Arg 445.42 431.92 1.031255788
- Phe-Val-Glucose 8.2 Phe Val 401.67 380.86 1.0546395
- Ser-Trp-Glucose 8.2 Ser Trp 391.51 376.59 1.039618683
- Tyr-Ala-Glucose 8.2 Tyr Ala 397.06 370 1.073135135
- Tyr-Asn-Glucose 8.2 Tyr Asn 425.75 400.87 1.062065009
- Arg-Phe-Glucose 8.1 Arg Phe 400.62 393.13 1.019052222
- Glu-Phe-Glucose 8.1 Glu Phe 361.66 334.59 1.080904988
- Gly-Trp-Glucose 8.1 Gly Trp 431.2 411.58 1.047669955
- Ile-Phe-Glucose 8.1 Ile Phe 420.93 405.13 1.038999827
- Met-Trp-Glucose 8.1 Met Trp 437.37 407.54 1.073195269
- Phe-Leu-Glucose 8.1 Phe Leu 431.02 416.09 1.03588166
- Thr-Trp-Glucose 8.1 Thr Trp 425.13 406.44 1.045984647
- Val-Phe-Glucose 8.1 Val Phe 461.46 437.11 1.055706801
- Ala-Phe-Glucose 8.0 Ala Phe 384.52 374.87 1.025742257
- Asp-Trp-Glucose 8.0 Asp Trp 387.03 360.2 1.074486396
- Phe-Lys-Glucose 8.0 Phe Lys 397.19 376.55 1.054813438
- Ser-Phe-Glucose 8.0 Ser Phe 486.33 469.53 1.035780461
- Tyr-Gly-Glucose 8.0 Tyr Gly 520.67 502.11 1.036964012
- Leu-Phe-Glucose 7.9 Leu Phe 405.28 388.97 1.041931254
- Tyr-Lys-Glucose 7.9 Tyr Lys 438.43 408.17 1.074135777
- Phe-Gly-Glucose 7.8 Phe Gly 491.96 466.26 1.055119461
- Phe-Ile-Glucose 7.8 Phe Ile 467.85 440.47 1.062160874
- Pro-Trp-Glucose 7.8 Pro Trp 438.87 408.04 1.075556318
- Cys-Phe-Glucose 7.7 Cys Phe 489.5 465.15 1.052348705
- Gly-Phe-Glucose 7.7 Gly Phe 465.85 440.22 1.05822089
- His-Asn-Glucose 7.7 His Asn 439.2 408.19 1.075969524
- His-Leu-Glucose 7.6 His Leu 487.44 474.28 1.027747322
- His-Ala-Glucose 7.5 His Ala 470.46 449.32 1.047048874
- His-Asp-Glucose 7.5 His Asp 437.14 409.32 1.067966383
- His-Ile-Glucose 7.3 His Ile 498.06 467.71 1.064890637
- Met-Phe-Glucose 7.1 Met Phe 471.35 441.58 1.067417003
- His-Gly-Glucose 7.0 His Gly 402.66 378.63 1.063465652
- His-Lys-Glucose 7.0 His Lys 484.1 461.01 1.050085681
- His-Val-Glucose 7.0 His Val 430.62 408.78 1.053427271
- His-Arg-Glucose 6.9 His Arg 423.45 402.47 1.052128109
- Pro-Phe-Glucose 6.9 Pro Phe 422.36 396.37 1.065570048
I have tried to plot them using excel.
|
Is there a way to create and visualize a model for this data?
|
I am working on a small data base (~70 candidates), they are molecules. I want to find the molecule that fits the best with the actual drug. The molecules have different attribute like the type of amino acid, area, volume, affinity of binding and so on.
I want to systematically pick the one that is the best with respect to the actual drug. How can I do that?
Also I wanted to know which amino acid residue has the bigger impact on the drugs' affinities.
* Molecules aff aa1 aa2 SA V SA/V
- **V0L 10.4 non non 357.96 334.58 1.069878654**
- Trp-Trp-Glucose 9.9 Trp Trp 381.74 353.17 1.080895886
- Trp-Phe-Glucose 9.2 Trp Phe 431.57 411.31 1.049257251
- Phe-Trp-Glucose 9.1 Phe Trp 411.36 385.49 1.067109393
- Trp-Arg-Glucose 9.1 Trp Arg 440.12 430.72 1.021823923
- Gln-Trp-Glucose 8.9 Gln Trp 502.22 491.99 1.020793106
- Trp-Ala-Glucose 8.9 Trp Ala 494.11 467.79 1.056264563
- Tyr-Trp-Glucose 8.9 Tyr Trp 405.17 382.69 1.058742063
- Trp-Asn-Glucose 8.8 Trp Asn 464.75 440.79 1.05435695
- Tyr-Phe-Glucose 8.8 Tyr Phe 440.93 415 1.062481928
- Glu-Trp-Glucose 8.7 Glu Trp 395.82 377.62 1.0481966
- Ile-Trp-Glucose 8.6 Ile Trp 449.31 436 1.030527523
- Trp-Gly-Glucose 8.6 Trp Gly 427.09 403.61 1.058174971
- Asn-Trp-Glucose 8.5 Asn Trp 398.61 370.53 1.075783337
- Tyr-Val-Glucose 8.5 Tyr Val 444.07 427.72 1.038225942
- Phe-Asn-Glucose 8.4 Phe Asn 431.91 403.36 1.070780444
- Trp-Leu-Glucose 8.4 Trp Leu 429.28 400.87 1.070870856
- Tyr-Arg-Glucose 8.4 Tyr Arg 482.72 459 1.05167756
- Val-Trp-Glucose 8.4 Val Trp 443.64 431.18 1.028897444
- Asn-Phe-Glucose 8.3 Asn Phe 416.65 395.56 1.053316817
- Leu-Trp-Glucose 8.3 Leu Trp 471.93 454 1.039493392
- Phe-Ala-Glucose 8.3 Phe Ala 440.88 426.5 1.033716295
- Trp-Lys-Glucose 8.3 Trp Lys 363.36 334.96 1.084786243
- Gln-Phe-Glucose 8.2 Gln Phe 414.28 393.3 1.053343504
- His-Phe-Glucose 8.2 His Phe 391.99 367.35 1.067074997
- Lys-Trp-Glucose 8.2 Lys Trp 381.2 353.02 1.079825506
- Phe-Arg-Glucose 8.2 Phe Arg 445.42 431.92 1.031255788
- Phe-Val-Glucose 8.2 Phe Val 401.67 380.86 1.0546395
- Ser-Trp-Glucose 8.2 Ser Trp 391.51 376.59 1.039618683
- Tyr-Ala-Glucose 8.2 Tyr Ala 397.06 370 1.073135135
- Tyr-Asn-Glucose 8.2 Tyr Asn 425.75 400.87 1.062065009
- Arg-Phe-Glucose 8.1 Arg Phe 400.62 393.13 1.019052222
- Glu-Phe-Glucose 8.1 Glu Phe 361.66 334.59 1.080904988
- Gly-Trp-Glucose 8.1 Gly Trp 431.2 411.58 1.047669955
- Ile-Phe-Glucose 8.1 Ile Phe 420.93 405.13 1.038999827
- Met-Trp-Glucose 8.1 Met Trp 437.37 407.54 1.073195269
- Phe-Leu-Glucose 8.1 Phe Leu 431.02 416.09 1.03588166
- Thr-Trp-Glucose 8.1 Thr Trp 425.13 406.44 1.045984647
- Val-Phe-Glucose 8.1 Val Phe 461.46 437.11 1.055706801
- Ala-Phe-Glucose 8.0 Ala Phe 384.52 374.87 1.025742257
- Asp-Trp-Glucose 8.0 Asp Trp 387.03 360.2 1.074486396
- Phe-Lys-Glucose 8.0 Phe Lys 397.19 376.55 1.054813438
- Ser-Phe-Glucose 8.0 Ser Phe 486.33 469.53 1.035780461
- Tyr-Gly-Glucose 8.0 Tyr Gly 520.67 502.11 1.036964012
- Leu-Phe-Glucose 7.9 Leu Phe 405.28 388.97 1.041931254
- Tyr-Lys-Glucose 7.9 Tyr Lys 438.43 408.17 1.074135777
- Phe-Gly-Glucose 7.8 Phe Gly 491.96 466.26 1.055119461
- Phe-Ile-Glucose 7.8 Phe Ile 467.85 440.47 1.062160874
- Pro-Trp-Glucose 7.8 Pro Trp 438.87 408.04 1.075556318
- Cys-Phe-Glucose 7.7 Cys Phe 489.5 465.15 1.052348705
- Gly-Phe-Glucose 7.7 Gly Phe 465.85 440.22 1.05822089
- His-Asn-Glucose 7.7 His Asn 439.2 408.19 1.075969524
- His-Leu-Glucose 7.6 His Leu 487.44 474.28 1.027747322
- His-Ala-Glucose 7.5 His Ala 470.46 449.32 1.047048874
- His-Asp-Glucose 7.5 His Asp 437.14 409.32 1.067966383
- His-Ile-Glucose 7.3 His Ile 498.06 467.71 1.064890637
- Met-Phe-Glucose 7.1 Met Phe 471.35 441.58 1.067417003
- His-Gly-Glucose 7.0 His Gly 402.66 378.63 1.063465652
- His-Lys-Glucose 7.0 His Lys 484.1 461.01 1.050085681
- His-Val-Glucose 7.0 His Val 430.62 408.78 1.053427271
- His-Arg-Glucose 6.9 His Arg 423.45 402.47 1.052128109
- Pro-Phe-Glucose 6.9 Pro Phe 422.36 396.37 1.065570048
I have tried to plot them using excel.
|
[] |
[] |
[
"Assuming you have the .xlsx file containing the raw data, you can import it in pandas, which is a good way of working with databases within python.\nYou can then use matplot to plot and visualize the data.\n(My examples are gonna seem weird because I do not know much about biochemistry, but I hope you understand how python works from my examples)\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndata = pd.read_excel(\"data.xlsx\")\nplt.scatter(data['Surface Area'],data['Volume'])\nplt.show()\n\nThe above code would plot a scatter plot of surface area against volume.\nTo pick the best candidate, you would have to come up with an objective function that considers all parameters that you want to consider.\nFor example, if you wanted a molecule with the highest ratio of binding affinity to surface area, you would do:\ndef objective(row):\n return row['Binding Affinity']/row['Surface Area'])\n\ndata['Objective'] = data.apply(objective)\nprint(data[data['Objective'].idxmax()])\n\nThis would return the row containing the largest objective function.\nYou could attempt to estimate the impact of different aa residues on the binding affinity by calculating their means and standard deviations. There may be a better way to do this, but this would be my first attempt.\n# To get all unique residues present in the sample data\naa_residues = list(set(data['Amino Acid Residue 1']))\nfor r in aa_residues:\n mean_ba = data[data['Amino Acid Residue 1'] == r]['Binding Affinity'].mean()\n std_ba = data[data['Amino Acid Residue 1'] == r]['Binding Affinity'].std()\n print(r,mean_ba,std_ba)\n\n"
] |
[
-1
] |
[
"matplotlib",
"python",
"seaborn"
] |
stackoverflow_0074596981_matplotlib_python_seaborn.txt
|
Q:
Invalid salt error when comparing plain text and hash with bcrypt
I'm trying to compare a saved hash and a user input in python using bcrypt. My code:
while passnotcorrect == True:
password = input("Enter password: ")
password = password.encode('utf-8')
file = open('password.txt', 'r')
checkhash = file.read()
file.close()
checkhash = checkhash.encode('utf-8')
if bcrypt.checkpw(password, checkhash):
passnotcorrect = False
os.system('cls||clear')
else:
print("Password is incorrect \n")
The error:
ValueError: Invalid salt
I'd really like some help with this. I'm not sure why this function would require the salt in the first place. Thanks
A:
A little late but I think your issue is that you're trying to compare 'password' which is utf8 encoded string input with 'checkhash', another string read from a file.
Bcrypt.checkpw() takes in a UTF8 encoded string for the password to check as the first argument followed by the UTF8 encoded hash to compare the password being provided against to see if the provided pass matches the hash pass.
TLDR; you're passing two strings to the checkpw method, when the second argument needs to be the hash you're comparing against (gold standard).
db_pass = "pw-string123" # from database
password = "pw-string123" # from input
db_hashed_pass = bcrypt.hashpw(db_pass.encode('utf8'), bcrypt.gensalt())
print(f'Hashed pass to save in db: {db_hashed_pass}')
is_pass_matching = bcrypt.checkpw(password.encode('utf8'), db_hashed_pass)
print(f'Do passwords match? {is_pass_matching}')
Make sure your hashed password isn't being truncated (length matches) upon entry or retrieval.
|
Invalid salt error when comparing plain text and hash with bcrypt
|
I'm trying to compare a saved hash and a user input in python using bcrypt. My code:
while passnotcorrect == True:
password = input("Enter password: ")
password = password.encode('utf-8')
file = open('password.txt', 'r')
checkhash = file.read()
file.close()
checkhash = checkhash.encode('utf-8')
if bcrypt.checkpw(password, checkhash):
passnotcorrect = False
os.system('cls||clear')
else:
print("Password is incorrect \n")
The error:
ValueError: Invalid salt
I'd really like some help with this. I'm not sure why this function would require the salt in the first place. Thanks
|
[
"A little late but I think your issue is that you're trying to compare 'password' which is utf8 encoded string input with 'checkhash', another string read from a file.\nBcrypt.checkpw() takes in a UTF8 encoded string for the password to check as the first argument followed by the UTF8 encoded hash to compare the password being provided against to see if the provided pass matches the hash pass.\nTLDR; you're passing two strings to the checkpw method, when the second argument needs to be the hash you're comparing against (gold standard).\ndb_pass = \"pw-string123\" # from database\npassword = \"pw-string123\" # from input\ndb_hashed_pass = bcrypt.hashpw(db_pass.encode('utf8'), bcrypt.gensalt())\nprint(f'Hashed pass to save in db: {db_hashed_pass}')\nis_pass_matching = bcrypt.checkpw(password.encode('utf8'), db_hashed_pass)\n\nprint(f'Do passwords match? {is_pass_matching}')\n\nMake sure your hashed password isn't being truncated (length matches) upon entry or retrieval.\n"
] |
[
0
] |
[] |
[] |
[
"bcrypt",
"compare",
"hash",
"python",
"salt"
] |
stackoverflow_0071628244_bcrypt_compare_hash_python_salt.txt
|
Q:
Marshmallow Validation Error of Unknown field for Dict and List of Dict in POST payload
This is a POST payload body received in backend for store and order generated in a test frontend application that includes 2 keys with objects List and a Object
{
"orderedItems": [
{
"id": 1,
"name": "Asado",
"amount": 2,
"price": 15.99
},
{
"id": 3,
"name": "Sushi",
"amount": 1,
"price": 22.99
},
{
"id": 6,
"name": "Green Bowl",
"amount": 1,
"price": 18.99
}
],
"user": {
"city": "Guatemala",
"email": "dher@example.com",
"name": "danny"
}
}
Once is received I tried to validate in Marshmallow with OrderSchema nested Schema
from marshmallow import validate, validates, validates_schema, \
ValidationError, post_dump, validates
from api import ma
class MealsOrderSchema(ma.Schema):
id= ma.Integer(required=True)
amount= ma.Integer(required=True)
price= ma.Float()
name= ma.String()
class UserDataSchema(ma.Schema):
name= ma.String(required=True)
email= ma.String(required=True, validate=[validate.Email()])
city= ma.String()
class OrderSchema(ma.Schema):
orderedItems= ma.Nested(MealsOrderSchema(many=True))
userData= ma.Nested(UserDataSchema)
Validation is not pass for Object List or object neither, I checked in similar question or other blogs for workaround
{
"code": 400,
"description": "The server found one or more errors in the information that you sent.",
"errors": {
"json": {
"orderedItems": [
"Unknown field."
],
"user": [
"Unknown field."
]
}
},
"message": "Validation Error"
}
Appreciate your hints, thanks
A:
If you need to manage a list of objects you must use the List class.
While if the json field name is different from the property name in python, you have to specify which field to load via the attribute parameter
from marshmallow import Schema, fields
class OrderSchema(Schema):
orderedItems: fields.List(fields.Nested(MealsOrderSchema))
userData: fields.Nested(UserDataSchema, attribute='user')
|
Marshmallow Validation Error of Unknown field for Dict and List of Dict in POST payload
|
This is a POST payload body received in backend for store and order generated in a test frontend application that includes 2 keys with objects List and a Object
{
"orderedItems": [
{
"id": 1,
"name": "Asado",
"amount": 2,
"price": 15.99
},
{
"id": 3,
"name": "Sushi",
"amount": 1,
"price": 22.99
},
{
"id": 6,
"name": "Green Bowl",
"amount": 1,
"price": 18.99
}
],
"user": {
"city": "Guatemala",
"email": "dher@example.com",
"name": "danny"
}
}
Once is received I tried to validate in Marshmallow with OrderSchema nested Schema
from marshmallow import validate, validates, validates_schema, \
ValidationError, post_dump, validates
from api import ma
class MealsOrderSchema(ma.Schema):
id= ma.Integer(required=True)
amount= ma.Integer(required=True)
price= ma.Float()
name= ma.String()
class UserDataSchema(ma.Schema):
name= ma.String(required=True)
email= ma.String(required=True, validate=[validate.Email()])
city= ma.String()
class OrderSchema(ma.Schema):
orderedItems= ma.Nested(MealsOrderSchema(many=True))
userData= ma.Nested(UserDataSchema)
Validation is not pass for Object List or object neither, I checked in similar question or other blogs for workaround
{
"code": 400,
"description": "The server found one or more errors in the information that you sent.",
"errors": {
"json": {
"orderedItems": [
"Unknown field."
],
"user": [
"Unknown field."
]
}
},
"message": "Validation Error"
}
Appreciate your hints, thanks
|
[
"If you need to manage a list of objects you must use the List class.\nWhile if the json field name is different from the property name in python, you have to specify which field to load via the attribute parameter\nfrom marshmallow import Schema, fields\n\nclass OrderSchema(Schema):\n orderedItems: fields.List(fields.Nested(MealsOrderSchema))\n userData: fields.Nested(UserDataSchema, attribute='user')\n\n"
] |
[
1
] |
[] |
[] |
[
"flask",
"marshmallow",
"python"
] |
stackoverflow_0074596324_flask_marshmallow_python.txt
|
Q:
C# class to Python class
I have an class defined in C# as Servicing and i need to convert this code to Python. So how do i convert the Servicing class to a list datatype in python and then use it in Adjusted class?
class Servicing
{
public long StatementName{ get; set; }
public string City{ get; set; }
}
Now this class is used in another class Adjusted
class Adjusted
{
public List<Servicing> Services{ get; set; }
}
For Servicing class I can define the constructor like this and then have its setter and getter defined too.
class Servicing:
def __init__(self):
self._StatementName=0.0
self._City= ""
But how do I use this Servicing class in a similar way how it is used in Adjusted class?
A:
class Adjusted:
def __init__(self):
self.Services = None
class Servicing:
def __init__(self):
self.StatementName = 0
self.City = None
Don't know much about Python, But it should be something like the above.
Please ignore compilation errors, As I am not from Pythn background.
A:
Python doesn't need setters or getters since all class attributes and methods are public. Instead values are given as parameters to __init__ and class is initialized that way. List are almost as easy, just add new values with .append or extend with another iterable with .extend
class Servicing:
# Default values for class
def __init__(self, StatementName=0.0, City=""):
self.StatementName = StatementName
self.City = City
class Adjusted:
def __init__(self, Services=[]):
self.Services = []
self.Services.extend(Services)
def add_service(self, StatementName=0.0, City=""):
new_servicing = Servicing(StatementName, City)
self.Services.append(new_servicing)
a_servicing = Servicing(1.2, "London")
print(Servicing.City)
a_servicing.City = "Boston"
print(Servicing.City)
When you get more familiar with python, there is a way to implement getter/setter with the @property decorator.
|
C# class to Python class
|
I have an class defined in C# as Servicing and i need to convert this code to Python. So how do i convert the Servicing class to a list datatype in python and then use it in Adjusted class?
class Servicing
{
public long StatementName{ get; set; }
public string City{ get; set; }
}
Now this class is used in another class Adjusted
class Adjusted
{
public List<Servicing> Services{ get; set; }
}
For Servicing class I can define the constructor like this and then have its setter and getter defined too.
class Servicing:
def __init__(self):
self._StatementName=0.0
self._City= ""
But how do I use this Servicing class in a similar way how it is used in Adjusted class?
|
[
"class Adjusted:\n\n def __init__(self):\n self.Services = None\n\n\nclass Servicing:\n\n def __init__(self):\n self.StatementName = 0\n self.City = None\n\nDon't know much about Python, But it should be something like the above.\nPlease ignore compilation errors, As I am not from Pythn background.\n",
"Python doesn't need setters or getters since all class attributes and methods are public. Instead values are given as parameters to __init__ and class is initialized that way. List are almost as easy, just add new values with .append or extend with another iterable with .extend\nclass Servicing:\n # Default values for class\n def __init__(self, StatementName=0.0, City=\"\"):\n self.StatementName = StatementName\n self.City = City\n\nclass Adjusted:\n def __init__(self, Services=[]):\n self.Services = []\n self.Services.extend(Services)\n\n def add_service(self, StatementName=0.0, City=\"\"):\n new_servicing = Servicing(StatementName, City)\n self.Services.append(new_servicing)\n\na_servicing = Servicing(1.2, \"London\")\nprint(Servicing.City)\na_servicing.City = \"Boston\"\nprint(Servicing.City)\n\nWhen you get more familiar with python, there is a way to implement getter/setter with the @property decorator.\n"
] |
[
0,
0
] |
[] |
[] |
[
"c#",
"python"
] |
stackoverflow_0074597041_c#_python.txt
|
Q:
Using RandomizedSearchCV to find the best number of neurons and activation function
I used the below code to find the best number of neurons in the two hidden layers and the best activation function.
def binary_nn_builder(units,activation):
model = Sequential()
model.add(Input(shape=x_train_norm.shape[1]))
model.add(Dense(units, kernel_initializer='normal', activation=activation))
model.add(Dense(units, kernel_initializer='normal', activation=activation))
model.add(Dense(1, kernel_initializer='normal', activation=activation))
if activation =='tanh':
activation = keras.activations.tanh(x)
elif activation =='relu':
activation = keras.activations.relu(x)
optimizer = keras.optimizers.Adam(lr=0.01)
model.compile(loss='binary_crossentropy', optimizer=optimizer,metrics=['accuracy'])
return model
classifier_search=KerasClassifier(build_fn=binary_nn_builder,batch_size=22)
parameters={
"activation": ['tanh','relu'],
"units": np.arange(4,20,1).tolist()
}
x_train_norm = np.asarray(x_train_norm).astype(np.float32)
y_train = np.asarray(y_train).astype(np.float32)
rnd_search_cv=RandomizedSearchCV(estimator=classifier_search,param_distributions=parameters,n_iter=20,cv=3,verbose=0,n_jobs=-1)
rnd_search_cv.fit(x_train_norm, y_train,verbose=0,epochs=100)
However I got an error:
Failed to convert a NumPy array to a Tensor (Unsupported object type float).
A:
I believe the issue is somewhere within your data being fitted.
I was able to get it running with some generated data:
import tensorflow as tf
from tensorflow import keras
from keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.layers import Input, Dense
from sklearn.datasets import make_classification
import numpy as np
from sklearn.model_selection import RandomizedSearchCV
def binary_nn_builder(units,activation):
model = keras.Sequential()
model.add(Input(shape=X.shape[1]))
model.add(Dense(units, kernel_initializer='normal', activation=activation))
model.add(Dense(units, kernel_initializer='normal', activation=activation))
model.add(Dense(1, kernel_initializer='normal', activation=activation))
if activation =='tanh':
activation = keras.activations.tanh(X)
elif activation =='relu':
activation = keras.activations.relu(X)
optimizer = keras.optimizers.Adam(lr=0.01)
model.compile(loss='binary_crossentropy', optimizer=optimizer,metrics=['accuracy'])
return model
classifier_search=KerasClassifier(build_fn=binary_nn_builder,batch_size=22)
parameters={
"activation": ['tanh','relu'],
"units": np.arange(4,20,1).tolist()
}
X, y = make_classification(n_samples=100, n_features=4, n_redundant=0, n_informative=2,
n_clusters_per_class=1, random_state=14)
X = np.asarray(X).astype(np.float32)
y = np.asarray(y).astype(np.float32)
rnd_search_cv=RandomizedSearchCV(estimator=classifier_search,param_distributions=parameters,n_iter=2,cv=3,verbose=0,n_jobs=1)
rnd_search_cv.fit(X, y,verbose=0,epochs=100)
|
Using RandomizedSearchCV to find the best number of neurons and activation function
|
I used the below code to find the best number of neurons in the two hidden layers and the best activation function.
def binary_nn_builder(units,activation):
model = Sequential()
model.add(Input(shape=x_train_norm.shape[1]))
model.add(Dense(units, kernel_initializer='normal', activation=activation))
model.add(Dense(units, kernel_initializer='normal', activation=activation))
model.add(Dense(1, kernel_initializer='normal', activation=activation))
if activation =='tanh':
activation = keras.activations.tanh(x)
elif activation =='relu':
activation = keras.activations.relu(x)
optimizer = keras.optimizers.Adam(lr=0.01)
model.compile(loss='binary_crossentropy', optimizer=optimizer,metrics=['accuracy'])
return model
classifier_search=KerasClassifier(build_fn=binary_nn_builder,batch_size=22)
parameters={
"activation": ['tanh','relu'],
"units": np.arange(4,20,1).tolist()
}
x_train_norm = np.asarray(x_train_norm).astype(np.float32)
y_train = np.asarray(y_train).astype(np.float32)
rnd_search_cv=RandomizedSearchCV(estimator=classifier_search,param_distributions=parameters,n_iter=20,cv=3,verbose=0,n_jobs=-1)
rnd_search_cv.fit(x_train_norm, y_train,verbose=0,epochs=100)
However I got an error:
Failed to convert a NumPy array to a Tensor (Unsupported object type float).
|
[
"I believe the issue is somewhere within your data being fitted.\nI was able to get it running with some generated data:\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom tensorflow.keras.layers import Input, Dense\nfrom sklearn.datasets import make_classification\nimport numpy as np\nfrom sklearn.model_selection import RandomizedSearchCV\n\ndef binary_nn_builder(units,activation):\n model = keras.Sequential()\n model.add(Input(shape=X.shape[1]))\n model.add(Dense(units, kernel_initializer='normal', activation=activation))\n model.add(Dense(units, kernel_initializer='normal', activation=activation))\n model.add(Dense(1, kernel_initializer='normal', activation=activation))\n if activation =='tanh':\n activation = keras.activations.tanh(X)\n elif activation =='relu':\n activation = keras.activations.relu(X)\n optimizer = keras.optimizers.Adam(lr=0.01)\n model.compile(loss='binary_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model\n\nclassifier_search=KerasClassifier(build_fn=binary_nn_builder,batch_size=22)\n\nparameters={\n \"activation\": ['tanh','relu'],\n \"units\": np.arange(4,20,1).tolist()\n}\n\nX, y = make_classification(n_samples=100, n_features=4, n_redundant=0, n_informative=2,\n n_clusters_per_class=1, random_state=14)\n\nX = np.asarray(X).astype(np.float32)\ny = np.asarray(y).astype(np.float32)\n\nrnd_search_cv=RandomizedSearchCV(estimator=classifier_search,param_distributions=parameters,n_iter=2,cv=3,verbose=0,n_jobs=1)\n\nrnd_search_cv.fit(X, y,verbose=0,epochs=100)\n\n"
] |
[
0
] |
[] |
[] |
[
"keras",
"machine_learning",
"neural_network",
"python",
"scikit_learn"
] |
stackoverflow_0074593981_keras_machine_learning_neural_network_python_scikit_learn.txt
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.