content
stringlengths
85
101k
title
stringlengths
0
150
question
stringlengths
15
48k
answers
list
answers_scores
list
non_answers
list
non_answers_scores
list
tags
list
name
stringlengths
35
137
Q: Display JSON data on a page as a expandable/collapsible list I need help with with displaying JSON data on a page like expandable/collapsible list. Here is a valid JSON I`ve made converting from XML with Python: JSON Data And to display it I`m usig this: <!DOCTYPE HTML> <head> <title>JSON Tree View</title> <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js" type="text/javascript"></script> <script src="http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.2/jquery-ui.min.js" type="text/javascript"></script> </head> <script> function json_tree(object){ var json="<ul>"; for(prop in object){ var value = object[prop]; switch (typeof(value)){ case "object": var token = Math.random().toString(36).substr(2,16); json += "<li><a class='label' href='#"+token+"' data-toggle='collapse'>"+prop+"="+value+"</a><div id='"+token+"' class='collapse'>"+json_tree(value)+"</div></li>"; break; default: json += "<li>"+prop+"="+value+"</li>"; } } return json+"</ul>"; } </script> <body style="margin: 40px;"> <h3>Paste JSON Into The Textarea Below and Click 'Build Tree'</h3> <textarea id="json" style="width: 100%;min-height:300px;"> </textarea> <button onclick="$('#output').html(json_tree(JSON.parse($('#json').val())));">Build Tree</button> <div id="output"> </div> </body> </html> This is what I get: Image I need help "filltering" (or merge with upper node) those "0" and "1", and also - how to show only values of the attributes without the names (or if do you have some better idea how can I dusplay this list)? A: a good-looking, compact, collapsible tree view pgrabovets' json-view is amazingly clean and well designed. Check out the demo A: If you can consider using JS libraries , consider using JSON Formatter or Render JSON. Both these libraries offer configuration options like themes, maximum depth and sorting. To display simple JSON string in a collapsible form using Render JSON, you can use <script> document.getElementById("test").appendChild( renderjson({ hello: [1,2,3,4], there: { a:1, b:2, c:["hello", null] } }) ); </script> A: Some of the links to the questions are no longer accessible. I assume you are looking for how to make a collapsible JSON view. TL;DR you can jump to Full code. the code is very short (200 lines↓, including JSDoc, comment, test code.) Inspire you on how to solve the problem. This question in some skill is very much like how to make the table of contents. (TOC) First of all, JSON data is like an object. All we need to do is to add some more attributes (key, depth, children, ...) for each item. Once these actions are done, all left is to render, and here is the pseudo-code for rendering. render(node) { const divFlag = document.createRange().createContextualFragment(`<div style="margin-left:${node.depth * 18}px"></div>`) const divElem = divFlag.querySelector("div") const spanFlag = document.createRange().createContextualFragment( `<span class="ms-2">${node.key} : ${node.value}</span>` ) node.children.forEach(subNode => { const subElem = render(subNode) spanFlag.append(subElem) }) divElem.append(spanFlag) return divElem } Full code both CSS is not necessary. bootstrap: ms-2 (margin start) fontawesome: fa-caret-right, fa-caret-down if you don't want to use it, you can use ▸(25B8)▶(25B6)▾(25BE)▼(25BC) as the before contents <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC" crossOrigin="anonymous"> <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.4/css/all.min.css" integrity="sha512-1ycn6IcaQQ40/MKBW2W4Rhis/DbILU74C1vSrLJxCq57o941Ym01SwNsOMqvEBFlcgUa6xLiPY/NS5R+E6ztJQ==" crossOrigin="anonymous" referrerpolicy="no-referrer"/> <script type="module"> // main script {Node, Tree, JsonView} class Node { /** * @description Add more attributes to the item. * @param {*} item * @param {*} key * @param {Node} parent * */ constructor(item, key, parent) { this.key = key /** @param {string} */ this.type = Array.isArray(item) ? "array" : typeof item /** @param {Number} */ this.depth = parent ? parent.depth + 1 : 0 this.value = item this.parent = parent /** @param {[Node]} */ this.children = [] } } class Tree { /** * @description Given the root node, it will complete the children of it. * @param {Node} rootNode */ constructor(rootNode) { this.root = rootNode const obj = this.root.value if (!(obj instanceof Object)) { // Array is an Object too. return } Object.keys(obj).forEach(keyOrIdx => { const value = obj[keyOrIdx] const subNode = new Node(value, keyOrIdx, rootNode) const subTree = new Tree(subNode) rootNode.children.push(subTree.root) }) } /** * @param {string | Object} jsonData * @return {Tree} */ static CreateTree(jsonData) { jsonData = typeof jsonData === "string" ? JSON.parse(jsonData) : jsonData const rootNode = new Node(jsonData, "root", null) return new Tree(rootNode) } } class JsonView { static DefaultColorMap = { text: { string: "green", number: "#f9ae58", boolean: "#ca4ff8", array: "black", object: "black", }, bg: { object: undefined, // ... You can add more by yourself. They are like the text as above. } } static NewConfig() { return JSON.parse(JSON.stringify(JsonView.DefaultColorMap)) } static SEPARATOR = " : " /** @type {Tree} */ #tree /** * @param {Tree} tree * */ constructor(tree) { this.#tree = tree } /** * @param {Node} node * @param {Object} colorMap */ #render(node, colorMap = JsonView.DefaultColorMap) { /** * @param {Node} node * */ const getValue = (node) => { const typeName = node.type switch (typeName) { case "object": return `object {${Object.keys(node.value).length}}` case "array": return `array [${Object.keys(node.value).length}]` default: return node.value } } const arrowIcon = ["object", "array"].includes(node.type) ? `<i class="fas fa-caret-down"></i>` : "" const divFlag = document.createRange().createContextualFragment(`<div style="margin-left:${node.depth * 18}px">${arrowIcon}</div>`) const divElem = divFlag.querySelector("div") const textColor = colorMap.text[node.type] !== undefined ? `color:${colorMap.text[node.type]}` : "" const bgColor = colorMap.bg[node.type] !== undefined ? `background-color:${colorMap.bg[node.type]}` : "" const valueStyle = (textColor + bgColor).length > 0 ? `style=${[textColor, bgColor].join(";")}` : "" const keyName = node.depth !== 0 ? node.key + JsonView.SEPARATOR : "" // depth = 0 its key is "root" which is created by the system, so ignore it. const spanFlag = document.createRange().createContextualFragment( `<span class="ms-2">${keyName}<span ${valueStyle}>${getValue(node)}</span></span>` ) const isCollapsible = ["object", "array"].includes(node.type) node.children.forEach(subNode => { const subElem = this.#render(subNode, colorMap) if (isCollapsible) { divFlag.querySelector(`i`).addEventListener("click", (e) => { e.stopPropagation() subElem.dataset.toggle = subElem.dataset.toggle === undefined ? "none" : subElem.dataset.toggle === "none" ? "" : "none" e.target.className = subElem.dataset.toggle === "none" ? "fas fa-caret-right" : "fas fa-caret-down" // Change the icon to ▶ or ▼ subElem.querySelectorAll(`*`).forEach(e => e.style.display = subElem.dataset.toggle) }) } spanFlag.append(subElem) }) divElem.append(spanFlag) return divElem } /** * @param {Element} targetElem * @param {?Object} colorMap */ render(targetElem, colorMap = JsonView.DefaultColorMap) { targetElem.append(this.#render(this.#tree.root, colorMap)) } } // Below is Test function main(outputElem) { const testObj = { db: { port: 1234, name: "My db", tables: [ {id: 1, name: "table 1"}, {id: 2, name: "table 2"}, ], }, options: { debug: false, ui: true, }, person: [ "Foo", "Bar" ] } const tree = Tree.CreateTree(testObj) const jsonView = new JsonView(tree) jsonView.render(outputElem) /* If you want to set the color by yourself, you can try as below const config = JsonView.NewConfig() config.bg.object = "red" jsonView.render(outputElem, config) */ } (() => { window.onload = () => { main(document.body) } })() </script> vanilla JavaScript A: I used Carson Reply And it Work Like charm but null object problem as woto says. change in script. const getValue = (node) => { const typeName = node.type switch (typeName) { case "object": if (node.value !== null) return `object {${Object.keys(node.value).length}}` else return 'null' case "array": if (node.value !== null) return `array {${Object.keys(node.value).length}}` else return 'null' default: if (node.value !== null) return node.value else return 'null' } }
Display JSON data on a page as a expandable/collapsible list
I need help with with displaying JSON data on a page like expandable/collapsible list. Here is a valid JSON I`ve made converting from XML with Python: JSON Data And to display it I`m usig this: <!DOCTYPE HTML> <head> <title>JSON Tree View</title> <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js" type="text/javascript"></script> <script src="http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.2/jquery-ui.min.js" type="text/javascript"></script> </head> <script> function json_tree(object){ var json="<ul>"; for(prop in object){ var value = object[prop]; switch (typeof(value)){ case "object": var token = Math.random().toString(36).substr(2,16); json += "<li><a class='label' href='#"+token+"' data-toggle='collapse'>"+prop+"="+value+"</a><div id='"+token+"' class='collapse'>"+json_tree(value)+"</div></li>"; break; default: json += "<li>"+prop+"="+value+"</li>"; } } return json+"</ul>"; } </script> <body style="margin: 40px;"> <h3>Paste JSON Into The Textarea Below and Click 'Build Tree'</h3> <textarea id="json" style="width: 100%;min-height:300px;"> </textarea> <button onclick="$('#output').html(json_tree(JSON.parse($('#json').val())));">Build Tree</button> <div id="output"> </div> </body> </html> This is what I get: Image I need help "filltering" (or merge with upper node) those "0" and "1", and also - how to show only values of the attributes without the names (or if do you have some better idea how can I dusplay this list)?
[ "a good-looking, compact, collapsible tree view\npgrabovets' json-view is amazingly clean and well designed.\nCheck out the demo\n", "If you can consider using JS libraries , consider using JSON Formatter or Render JSON.\nBoth these libraries offer configuration options like themes, maximum depth and sorting.\nTo display simple JSON string in a collapsible form using Render JSON, you can use \n<script>\n document.getElementById(\"test\").appendChild(\n renderjson({ hello: [1,2,3,4], there: { a:1, b:2, c:[\"hello\", null] } })\n );\n</script>\n\n", "Some of the links to the questions are no longer accessible. I assume you are looking for how to make a collapsible JSON view.\n\nTL;DR\nyou can jump to Full code.\nthe code is very short (200 lines↓, including JSDoc, comment, test code.)\nInspire you on how to solve the problem.\nThis question in some skill is very much like how to make the table of contents. (TOC)\n\nFirst of all, JSON data is like an object. All we need to do is to add some more attributes (key, depth, children, ...) for each item.\n\nOnce these actions are done, all left is to render, and here is the pseudo-code for rendering.\nrender(node) {\n const divFlag = document.createRange().createContextualFragment(`<div style=\"margin-left:${node.depth * 18}px\"></div>`)\n const divElem = divFlag.querySelector(\"div\")\n const spanFlag = document.createRange().createContextualFragment(\n `<span class=\"ms-2\">${node.key} : ${node.value}</span>`\n )\n node.children.forEach(subNode => {\n const subElem = render(subNode)\n spanFlag.append(subElem)\n })\n divElem.append(spanFlag)\n return divElem\n}\n\n\n\nFull code\nboth CSS is not necessary.\n\nbootstrap: ms-2 (margin start)\nfontawesome: fa-caret-right, fa-caret-down if you don't want to use it, you can use ▸(25B8)▶(25B6)▾(25BE)▼(25BC) as the before contents\n\n\n\n<link href=\"https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css\" rel=\"stylesheet\"\n integrity=\"sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC\" crossOrigin=\"anonymous\">\n<link rel=\"stylesheet\" href=\"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.4/css/all.min.css\"\n integrity=\"sha512-1ycn6IcaQQ40/MKBW2W4Rhis/DbILU74C1vSrLJxCq57o941Ym01SwNsOMqvEBFlcgUa6xLiPY/NS5R+E6ztJQ==\"\n crossOrigin=\"anonymous\" referrerpolicy=\"no-referrer\"/>\n\n<script type=\"module\">\n // main script {Node, Tree, JsonView}\n class Node {\n /**\n * @description Add more attributes to the item.\n * @param {*} item\n * @param {*} key\n * @param {Node} parent\n * */\n constructor(item, key, parent) {\n this.key = key\n\n /** @param {string} */\n this.type = Array.isArray(item) ? \"array\" : typeof item\n\n /** @param {Number} */\n this.depth = parent ? parent.depth + 1 : 0\n this.value = item\n this.parent = parent\n\n /** @param {[Node]} */\n this.children = []\n }\n }\n\n class Tree {\n /**\n * @description Given the root node, it will complete the children of it.\n * @param {Node} rootNode\n */\n constructor(rootNode) {\n this.root = rootNode\n\n const obj = this.root.value\n if (!(obj instanceof Object)) { // Array is an Object too.\n return\n }\n Object.keys(obj).forEach(keyOrIdx => {\n const value = obj[keyOrIdx]\n const subNode = new Node(value, keyOrIdx, rootNode)\n const subTree = new Tree(subNode)\n rootNode.children.push(subTree.root)\n })\n }\n\n /**\n * @param {string | Object} jsonData\n * @return {Tree}\n */\n static CreateTree(jsonData) {\n jsonData = typeof jsonData === \"string\" ? JSON.parse(jsonData) : jsonData\n const rootNode = new Node(jsonData, \"root\", null)\n return new Tree(rootNode)\n }\n }\n\n class JsonView {\n static DefaultColorMap = {\n text: {\n string: \"green\",\n number: \"#f9ae58\",\n boolean: \"#ca4ff8\",\n array: \"black\",\n object: \"black\",\n },\n bg: {\n object: undefined,\n // ... You can add more by yourself. They are like the text as above.\n }\n }\n\n static NewConfig() {\n return JSON.parse(JSON.stringify(JsonView.DefaultColorMap))\n }\n\n static SEPARATOR = \" : \"\n\n /** @type {Tree} */\n #tree\n\n /**\n * @param {Tree} tree\n * */\n constructor(tree) {\n this.#tree = tree\n }\n\n /**\n * @param {Node} node\n * @param {Object} colorMap\n */\n #render(node, colorMap = JsonView.DefaultColorMap) {\n /**\n * @param {Node} node\n * */\n const getValue = (node) => {\n const typeName = node.type\n switch (typeName) {\n case \"object\":\n return `object {${Object.keys(node.value).length}}`\n case \"array\":\n return `array [${Object.keys(node.value).length}]`\n default:\n return node.value\n }\n }\n\n const arrowIcon = [\"object\", \"array\"].includes(node.type) ? `<i class=\"fas fa-caret-down\"></i>` : \"\"\n const divFlag = document.createRange().createContextualFragment(`<div style=\"margin-left:${node.depth * 18}px\">${arrowIcon}</div>`)\n const divElem = divFlag.querySelector(\"div\")\n\n const textColor = colorMap.text[node.type] !== undefined ? `color:${colorMap.text[node.type]}` : \"\"\n const bgColor = colorMap.bg[node.type] !== undefined ? `background-color:${colorMap.bg[node.type]}` : \"\"\n const valueStyle = (textColor + bgColor).length > 0 ? `style=${[textColor, bgColor].join(\";\")}` : \"\"\n\n const keyName = node.depth !== 0 ? node.key + JsonView.SEPARATOR : \"\" // depth = 0 its key is \"root\" which is created by the system, so ignore it.\n const spanFlag = document.createRange().createContextualFragment(\n `<span class=\"ms-2\">${keyName}<span ${valueStyle}>${getValue(node)}</span></span>`\n )\n\n const isCollapsible = [\"object\", \"array\"].includes(node.type)\n\n node.children.forEach(subNode => {\n const subElem = this.#render(subNode, colorMap)\n\n if (isCollapsible) {\n divFlag.querySelector(`i`).addEventListener(\"click\", (e) => {\n e.stopPropagation()\n subElem.dataset.toggle = subElem.dataset.toggle === undefined ? \"none\" :\n subElem.dataset.toggle === \"none\" ? \"\" : \"none\"\n\n e.target.className = subElem.dataset.toggle === \"none\" ? \"fas fa-caret-right\" : \"fas fa-caret-down\" // Change the icon to ▶ or ▼\n\n subElem.querySelectorAll(`*`).forEach(e => e.style.display = subElem.dataset.toggle)\n })\n }\n\n spanFlag.append(subElem)\n })\n divElem.append(spanFlag)\n return divElem\n }\n\n /**\n * @param {Element} targetElem\n * @param {?Object} colorMap\n */\n render(targetElem, colorMap = JsonView.DefaultColorMap) {\n targetElem.append(this.#render(this.#tree.root, colorMap))\n }\n }\n\n // Below is Test\n function main(outputElem) {\n const testObj = {\n db: {\n port: 1234,\n name: \"My db\",\n tables: [\n {id: 1, name: \"table 1\"},\n {id: 2, name: \"table 2\"},\n ],\n },\n options: {\n debug: false,\n ui: true,\n },\n person: [\n \"Foo\", \n \"Bar\"\n ]\n }\n const tree = Tree.CreateTree(testObj)\n const jsonView = new JsonView(tree)\n jsonView.render(outputElem)\n /* If you want to set the color by yourself, you can try as below\n const config = JsonView.NewConfig()\n config.bg.object = \"red\"\n jsonView.render(outputElem, config)\n */\n }\n\n (() => {\n window.onload = () => {\n main(document.body)\n }\n })()\n</script>\n\n\n\nvanilla JavaScript\n", "I used Carson Reply And it Work Like charm but null object problem as woto says.\nchange in script.\nconst getValue = (node) => {\n const typeName = node.type\n switch (typeName) {\n case \"object\":\n if (node.value !== null)\n return `object {${Object.keys(node.value).length}}`\n else\n return 'null'\n case \"array\":\n if (node.value !== null)\n return `array {${Object.keys(node.value).length}}`\n else\n return 'null'\n default:\n if (node.value !== null)\n return node.value\n else\n return 'null'\n }\n }\n\n" ]
[ 13, 9, 3, 0 ]
[]
[]
[ "javascript", "json", "list", "python", "tree" ]
stackoverflow_0032549518_javascript_json_list_python_tree.txt
Q: How to merge multiple columns in Pandas with a single series? I have a dataframe (t) of codes for patients surgeries. On any hospital admission they can have 5 surgeries or combination of surgeries, - the index on the left column is the indiviudal patient. I want to add the text description for all 5 surgeries to indivisual new columns. | OPERTN_01 | OPERTN_02 | OPERTN_03 | OPERTN_04 | OPERTN_05 | | ------ | --------- | --------- | --------- | --------- | --------- | | 85 | B041 | Y766 | Z943 | NaN | NaN | | 144 | B041 | Y766 | Y539 | NaN | NaN | | 260 | B041 | Y766 | NaN | NaN | NaN | | 276 | B041 | Y766 | NaN | NaN | NaN | | 345 | B041 | Y766 | NaN | NaN | NaN | | ... | ... | ... | ... | ... | ... | | 557445 | B041 | Y461 | L714 | Z954 | Z942 | | 557525 | B041 | Y766 | NaN | NaN | NaN | | 557533 | B041 | Y766 | E158 | Y766 | Y261 | | 557765 | B041 | Y766 | NaN | NaN | NaN | | 557832 | B041 | Y766 | U051 | Y973 | Y981 | I want to merge the code from all 5 columns ( also handing the null values) with the text description from this dataframe (opcs_short) | | opcs_4.9str | Description | | 0 | A011 | A01.1: Hemispherectomy | | 1 | A012 | A01.2: Total lobectomy of brain | | 2 | A013 | A01.3: Partial lobectomy of brain | | 3 | A018 | A01.8: Other specified major excision of tissu... | | 4 | A019 | A01.9: Unspecified major excision of tissue of... | | ... | ... | ... | | 9673 | O439 | O43.9: Part of heart NEC | | 9674 | O451 | O45.1: Bifurcation of aorta | | 9675 | O452 | O45.2: Juxtarenal abdominal aorta | | 9676 | O458 | O45.8: Specified other aorta NEC | | 9677 | O459 | O45.9: Other aorta NEC | I tried to do this using this code, t2 = t.merge(opcs_short, how = 'left', left_on =['OPERTN_01','OPERTN_02', 'OPERTN_03', 'OPERTN_04', 'OPERTN_05'], right_on =['opcs_4.9str','opcs_4.9str','opcs_4.9str','opcs_4.9str','opcs_4.9str']) This produces | | OPERTN_01 | OPERTN_02 | OPERTN_03 | OPERTN_04 | OPERTN_05 | opcs_4.9str | Description | | 0 | B041 | Y766 | Z943 | NaN | NaN | NaN | NaN | | 1 | B041 | Y766 | Y539 | NaN | NaN | NaN | NaN | | 2 | B041 | Y766 | NaN | NaN | NaN | NaN | NaN | | 3 | B041 | Y766 | NaN | NaN | NaN | NaN | NaN | | 4 | B041 | Y766 | NaN | NaN | NaN | NaN | NaN | | ... | ... | ... | ... | ... | ... | ... | ... | | 6410 | B041 | Y461 | L714 | Z954 | Z942 | NaN | NaN | | 6411 | B041 | Y766 | NaN | NaN | NaN | NaN | NaN | | 6412 | B041 | Y766 | E158 | Y766 | Y261 | NaN | NaN | | 6413 | B041 | Y766 | NaN | NaN | NaN | NaN | NaN | | 6414 | B041 | Y766 | U051 | Y973 | Y981 | NaN | NaN | So nothing has merged. I am not sure why but I know I haven't handled the null values. Some patients have only one simple surgery and the rest of the columns are empty so I don't want to drop them. TBH I am not sure using merge is the right approach here but don't have enough knowledge to know if a eg dictionary technique would be a better way. The code description dataframe though has 19,000 records. A: I think a dictionary-based replace method is what you're after. Does the following achieve your desired result? code_map = {i[1]: i[2] for i in opcs_short.to_records()} for col in t.columns: t[col + " Description"] = t[col].replace(code_map)
How to merge multiple columns in Pandas with a single series?
I have a dataframe (t) of codes for patients surgeries. On any hospital admission they can have 5 surgeries or combination of surgeries, - the index on the left column is the indiviudal patient. I want to add the text description for all 5 surgeries to indivisual new columns. | OPERTN_01 | OPERTN_02 | OPERTN_03 | OPERTN_04 | OPERTN_05 | | ------ | --------- | --------- | --------- | --------- | --------- | | 85 | B041 | Y766 | Z943 | NaN | NaN | | 144 | B041 | Y766 | Y539 | NaN | NaN | | 260 | B041 | Y766 | NaN | NaN | NaN | | 276 | B041 | Y766 | NaN | NaN | NaN | | 345 | B041 | Y766 | NaN | NaN | NaN | | ... | ... | ... | ... | ... | ... | | 557445 | B041 | Y461 | L714 | Z954 | Z942 | | 557525 | B041 | Y766 | NaN | NaN | NaN | | 557533 | B041 | Y766 | E158 | Y766 | Y261 | | 557765 | B041 | Y766 | NaN | NaN | NaN | | 557832 | B041 | Y766 | U051 | Y973 | Y981 | I want to merge the code from all 5 columns ( also handing the null values) with the text description from this dataframe (opcs_short) | | opcs_4.9str | Description | | 0 | A011 | A01.1: Hemispherectomy | | 1 | A012 | A01.2: Total lobectomy of brain | | 2 | A013 | A01.3: Partial lobectomy of brain | | 3 | A018 | A01.8: Other specified major excision of tissu... | | 4 | A019 | A01.9: Unspecified major excision of tissue of... | | ... | ... | ... | | 9673 | O439 | O43.9: Part of heart NEC | | 9674 | O451 | O45.1: Bifurcation of aorta | | 9675 | O452 | O45.2: Juxtarenal abdominal aorta | | 9676 | O458 | O45.8: Specified other aorta NEC | | 9677 | O459 | O45.9: Other aorta NEC | I tried to do this using this code, t2 = t.merge(opcs_short, how = 'left', left_on =['OPERTN_01','OPERTN_02', 'OPERTN_03', 'OPERTN_04', 'OPERTN_05'], right_on =['opcs_4.9str','opcs_4.9str','opcs_4.9str','opcs_4.9str','opcs_4.9str']) This produces | | OPERTN_01 | OPERTN_02 | OPERTN_03 | OPERTN_04 | OPERTN_05 | opcs_4.9str | Description | | 0 | B041 | Y766 | Z943 | NaN | NaN | NaN | NaN | | 1 | B041 | Y766 | Y539 | NaN | NaN | NaN | NaN | | 2 | B041 | Y766 | NaN | NaN | NaN | NaN | NaN | | 3 | B041 | Y766 | NaN | NaN | NaN | NaN | NaN | | 4 | B041 | Y766 | NaN | NaN | NaN | NaN | NaN | | ... | ... | ... | ... | ... | ... | ... | ... | | 6410 | B041 | Y461 | L714 | Z954 | Z942 | NaN | NaN | | 6411 | B041 | Y766 | NaN | NaN | NaN | NaN | NaN | | 6412 | B041 | Y766 | E158 | Y766 | Y261 | NaN | NaN | | 6413 | B041 | Y766 | NaN | NaN | NaN | NaN | NaN | | 6414 | B041 | Y766 | U051 | Y973 | Y981 | NaN | NaN | So nothing has merged. I am not sure why but I know I haven't handled the null values. Some patients have only one simple surgery and the rest of the columns are empty so I don't want to drop them. TBH I am not sure using merge is the right approach here but don't have enough knowledge to know if a eg dictionary technique would be a better way. The code description dataframe though has 19,000 records.
[ "I think a dictionary-based replace method is what you're after. Does the following achieve your desired result?\ncode_map = {i[1]: i[2] for i in opcs_short.to_records()}\n\nfor col in t.columns:\n t[col + \" Description\"] = t[col].replace(code_map)\n\n" ]
[ 2 ]
[]
[]
[ "merge", "pandas", "python" ]
stackoverflow_0074588410_merge_pandas_python.txt
Q: Does `await` in Python yield to the event loop? I was wondering what exactly happens when we await a coroutine in async Python code, for example: await send_message(string) (1) send_message is added to the event loop, and the calling coroutine gives up control to the event loop, or (2) We jump directly into send_message Most explanations I read point to (1), as they describe the calling coroutine as exiting. But my own experiments suggest (2) is the case: I tried to have a coroutine run after the caller but before the callee and could not achieve this. A: Disclaimer: Open to correction (particularly as to details and correct terminology) since I arrived here looking for the answer to this myself. Nevertheless, the research below points to a pretty decisive "main point" conclusion: Correct OP answer: No, await (per se) does not yield to the event loop, yield yields to the event loop, hence for the case given: "(2) We jump directly into send_message". In particular, certain yield expressions are the only points, at bottom, where async tasks can actually be switched out (in terms of nailing down the precise spot where Python code execution can be suspended). To be proven and demonstrated: 1) by theory/documentation, 2) by implementation code, 3) by example. By theory/documentation PEP 492: Coroutines with async and await syntax While the PEP is not tied to any specific Event Loop implementation, it is relevant only to the kind of coroutine that uses yield as a signal to the scheduler, indicating that the coroutine will be waiting until an event (such as IO) is completed. ... [await] uses the yield from implementation [with an extra step of validating its argument.] ... Any yield from chain of calls ends with a yield. This is a fundamental mechanism of how Futures are implemented. Since, internally, coroutines are a special kind of generators, every await is suspended by a yield somewhere down the chain of await calls (please refer to PEP 3156 for a detailed explanation). ... Coroutines are based on generators internally, thus they share the implementation. Similarly to generator objects, coroutines have throw(), send() and close() methods. ... The vision behind existing generator-based coroutines and this proposal is to make it easy for users to see where the code might be suspended. In context, "easy for users to see where the code might be suspended" seems to refer to the fact that in synchronous code yield is the place where execution can be "suspended" within a routine allowing other code to run, and that principle now extends perfectly to the async context wherein a yield (if its value is not consumed within the running task but is propagated up to the scheduler) is the "signal to the scheduler" to switch out tasks. More succinctly: where does a generator yield control? At a yield. Coroutines (including those using async and await syntax) are generators, hence likewise. And it is not merely an analogy, in implementation (see below) the actual mechanism by which a task gets "into" and "out of" coroutines is not anything new, magical, or unique to the async world, but simply by calling the coro's <generator>.send() method. That was (as I understand the text) part of the "vision" behind PEP 492: async and await would provide no novel mechanism for code suspension but just pour async-sugar on Python's already well-beloved and powerful generators. And PEP 3156: The "asyncio" module The loop.slow_callback_duration attribute controls the maximum execution time allowed between two yield points before a slow callback is reported [emphasis in original]. That is, an uninterrupted segment of code (from the async perspective) is demarcated as that between two successive yield points (whose values reached up to the running Task level (via an await/yield from tunnel) without being consumed within it). And this: The scheduler has no public interface. You interact with it by using yield from future and yield from task. Objection: "That says 'yield from', but you're trying to argue that the task can only switch out at a yield itself! yield from and yield are different things, my friend, and yield from itself doesn't suspend code!" Ans: Not a contradiction. The PEP is saying you interact with the scheduler by using yield from future/task. But as noted above in PEP 492, any chain of yield from (~aka await) ultimately reaches a yield (the "bottom turtle"). In particular (see below), yield from future does in fact yield that same future after some wrapper work, and that yield is the actual "switch out point" where another task takes over. But it is incorrect for your code to directly yield a Future up to the current Task because you would bypass the necessary wrapper. The objection having been answered, and its practical coding considerations being noted, the point I wish to make from the above quote remains: that a suitable yield in Python async code is ultimately the one thing which, having suspended code execution in the standard way that any other yield would do, now futher engages the scheduler to bring about a possible task switch. By implementation code asyncio/futures.py class Future: ... def __await__(self): if not self.done(): self._asyncio_future_blocking = True yield self # This tells Task to wait for completion. if not self.done(): raise RuntimeError("await wasn't used with future") return self.result() # May raise too. __iter__ = __await__ # make compatible with 'yield from'. Paraphrase: The line yield self is what tells the running task to sit out for now and let other tasks run, coming back to this one sometime after self is done. Almost all of your awaitables in asyncio world are (multiple layers of) wrappers around a Future. The event loop remains utterly blind to all higher level await awaitable expressions until the code execution trickles down to an await future or yield from future and then (as seen here) calls yield self, which yielded self is then "caught" by none other than the Task under which the present coroutine stack is running thereby signaling to the task to take a break. Possibly the one and only exception to the above "code suspends at yield self within await future" rule, in an asyncio context, is the potential use of a bare yield such as in asyncio.sleep(0). And since the sleep function is a topic of discourse in the comments of this post, let's look at that. asyncio/tasks.py @types.coroutine def __sleep0(): """Skip one event loop run cycle. This is a private helper for 'asyncio.sleep()', used when the 'delay' is set to 0. It uses a bare 'yield' expression (which Task.__step knows how to handle) instead of creating a Future object. """ yield async def sleep(delay, result=None, *, loop=None): """Coroutine that completes after a given time (in seconds).""" if delay <= 0: await __sleep0() return result if loop is None: loop = events.get_running_loop() else: warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) future = loop.create_future() h = loop.call_later(delay, futures._set_result_unless_cancelled, future, result) try: return await future finally: h.cancel() Note: We have here the two interesting cases at which control can shift to the scheduler: (1) The bare yield in __sleep0 (when called via an await). (2) The yield self immediately within await future. The crucial line (for our purposes) in asyncio/tasks.py is when Task._step runs its top-level coroutine via result = self._coro.send(None) and recognizes fourish cases: (1) result = None is generated by the coro (which, again, is a generator): the task "relinquishes control for one event loop iteration". (2) result = future is generated within the coro, with further magic member field evidence that the future was yielded in a proper manner from out of Future.__iter__ == Future.__await__: the task relinquishes control to the event loop until the future is complete. (3) A StopIteration is raised by the coro indicating the coroutine completed (i.e. as a generator it exhausted all its yields): the final result of the task (which is itself a Future) is set to the coroutine return value. (4) Any other Exception occurs: the task's set_exception is set accordingly. Modulo details, the main point for our concern is that coroutine segments in an asyncio event loop ultimately run via coro.send(). Initial startup and final termination aside, send() proceeds precisely from the last yield value it generated to the next one. By example import asyncio import types def task_print(s): print(f"{asyncio.current_task().get_name()}: {s}") async def other_task(s): task_print(s) class AwaitableCls: def __await__(self): task_print(" 'Jumped straight into' another `await`; the act of `await awaitable` *itself* doesn't 'pause' anything") yield task_print(" We're back to our awaitable object because that other task completed") asyncio.create_task(other_task("The event loop gets control when `yield` points (from an iterable coroutine) propagate up to the `current_task` through a suitable chain of `await` or `yield from` statements")) async def coro(): task_print(" 'Jumped straight into' coro; the `await` keyword itself does nothing to 'pause' the current_task") await AwaitableCls() task_print(" 'Jumped straight back into' coro; we have another pending task, but leaving an `__await__` doesn't 'pause' the task any more than entering the `__await__` does") @types.coroutine def iterable_coro(context): task_print(f"`{context} iterable_coro`: pre-yield") yield None # None or a Future object are the only legitimate yields to the task in asyncio task_print(f"`{context} iterable_coro`: post-yield") async def original_task(): asyncio.create_task(other_task("Aha, but a (suitably unconsumed) *`yield`* DOES 'pause' the current_task allowing the event scheduler to `_wakeup` another task")) task_print("Original task") await coro() task_print("'Jumped straight out of' coro. Leaving a coro, as with leaving/entering any awaitable, doesn't give control to the event loop") res = await iterable_coro("await") assert res is None asyncio.create_task(other_task("This doesn't run until the very end because the generated None following the creation of this task is consumed by the `for` loop")) for y in iterable_coro("for y in"): task_print(f"But 'ordinary' `yield` points (those which are consumed by the `current_task` itself) behave as ordinary without relinquishing control at the async/task-level; `y={y}`") task_print("Done with original task") asyncio.get_event_loop().run_until_complete(original_task()) run in python3.8 produces Task-1: Original task Task-1: 'Jumped straight into' coro; the await keyword itself does nothing to 'pause' the current_task Task-1: 'Jumped straight into' another await; the act of await awaitable itself doesn't 'pause' anything Task-2: Aha, but a (suitably unconsumed) yield DOES 'pause' the current_task allowing the event scheduler to _wakeup another task Task-1: We're back to our awaitable object because that other task completed Task-1: 'Jumped straight back into' coro; we have another pending task, but leaving an __await__ doesn't 'pause' the task any more than entering the __await__ does Task-1: 'Jumped straight out of' coro. Leaving a coro, as with leaving/entering any awaitable, doesn't give control to the event loop Task-1: await iterable_coro: pre-yield Task-3: The event loop gets control when yield points (from an iterable coroutine) propagate up to the current_task through a suitable chain of await or yield from statements Task-1: await iterable_coro: post-yield Task-1: for y in iterable_coro: pre-yield Task-1: But 'ordinary' yield points (those which are consumed by the current_task itself) behave as ordinary without relinquishing control at the async/task-level; y=None Task-1: for y in iterable_coro: post-yield Task-1: Done with original task Task-4: This doesn't run until the very end because the generated None following the creation of this task is consumed by the for loop Indeed, exercises such as the following can help one's mind to decouple the functionality of async/await from notion of "event loops" and such. The former is conducive to nice implementations and usages of the latter, but you can use async and await just as specially syntaxed generator stuff without any "loop" (whether asyncio or otherwise) whatsoever: import types # no asyncio, nor any other loop framework async def f1(): print(1) print(await f2(),'= await f2()') return 8 @types.coroutine def f2(): print(2) print((yield 3),'= yield 3') return 7 class F3: def __await__(self): print(4) print((yield 5),'= yield 5') print(10) return 11 task1 = f1() task2 = F3().__await__() """ You could say calls to send() represent our "manual task management" in this script. """ print(task1.send(None), '= task1.send(None)') print(task2.send(None), '= task2.send(None)') try: print(task1.send(6), 'try task1.send(6)') except StopIteration as e: print(e.value, '= except task1.send(6)') try: print(task2.send(9), 'try task2.send(9)') except StopIteration as e: print(e.value, '= except task2.send(9)') produces 1 2 3 = task1.send(None) 4 5 = task2.send(None) 6 = yield 3 7 = await f2() 8 = except task1.send(6) 9 = yield 5 10 11 = except task2.send(9) A: Yes, await passes control back to the asyncio eventloop, and allows it to schedule other async functions. Another way is await asyncio.sleep(0)
Does `await` in Python yield to the event loop?
I was wondering what exactly happens when we await a coroutine in async Python code, for example: await send_message(string) (1) send_message is added to the event loop, and the calling coroutine gives up control to the event loop, or (2) We jump directly into send_message Most explanations I read point to (1), as they describe the calling coroutine as exiting. But my own experiments suggest (2) is the case: I tried to have a coroutine run after the caller but before the callee and could not achieve this.
[ "Disclaimer: Open to correction (particularly as to details and correct terminology) since I arrived here looking for the answer to this myself. Nevertheless, the research below points to a pretty decisive \"main point\" conclusion:\nCorrect OP answer: No, await (per se) does not yield to the event loop, yield yields to the event loop, hence for the case given: \"(2) We jump directly into send_message\". In particular, certain yield expressions are the only points, at bottom, where async tasks can actually be switched out (in terms of nailing down the precise spot where Python code execution can be suspended).\nTo be proven and demonstrated: 1) by theory/documentation, 2) by implementation code, 3) by example.\nBy theory/documentation\nPEP 492: Coroutines with async and await syntax\n\nWhile the PEP is not tied to any specific Event Loop implementation, it is relevant only to the kind of coroutine that uses yield as a signal to the scheduler, indicating that the coroutine will be waiting until an event (such as IO) is completed. ...\n[await] uses the yield from implementation [with an extra step of validating its argument.] ...\nAny yield from chain of calls ends with a yield. This is a fundamental mechanism of how Futures are implemented. Since, internally, coroutines are a special kind of generators, every await is suspended by a yield somewhere down the chain of await calls (please refer to PEP 3156 for a detailed explanation). ...\nCoroutines are based on generators internally, thus they share the implementation. Similarly to generator objects, coroutines have throw(), send() and close() methods. ...\nThe vision behind existing generator-based coroutines and this proposal is to make it easy for users to see where the code might be suspended.\n\nIn context, \"easy for users to see where the code might be suspended\" seems to refer to the fact that in synchronous code yield is the place where execution can be \"suspended\" within a routine allowing other code to run, and that principle now extends perfectly to the async context wherein a yield (if its value is not consumed within the running task but is propagated up to the scheduler) is the \"signal to the scheduler\" to switch out tasks.\nMore succinctly: where does a generator yield control? At a yield. Coroutines (including those using async and await syntax) are generators, hence likewise.\nAnd it is not merely an analogy, in implementation (see below) the actual mechanism by which a task gets \"into\" and \"out of\" coroutines is not anything new, magical, or unique to the async world, but simply by calling the coro's <generator>.send() method. That was (as I understand the text) part of the \"vision\" behind PEP 492: async and await would provide no novel mechanism for code suspension but just pour async-sugar on Python's already well-beloved and powerful generators.\nAnd\nPEP 3156: The \"asyncio\" module\n\nThe loop.slow_callback_duration attribute controls the maximum execution time allowed between two yield points before a slow callback is reported [emphasis in original].\n\nThat is, an uninterrupted segment of code (from the async perspective) is demarcated as that between two successive yield points (whose values reached up to the running Task level (via an await/yield from tunnel) without being consumed within it).\nAnd this:\n\nThe scheduler has no public interface. You interact with it by using yield from future and yield from task.\n\nObjection: \"That says 'yield from', but you're trying to argue that the task can only switch out at a yield itself! yield from and yield are different things, my friend, and yield from itself doesn't suspend code!\"\nAns: Not a contradiction. The PEP is saying you interact with the scheduler by using yield from future/task. But as noted above in PEP 492, any chain of yield from (~aka await) ultimately reaches a yield (the \"bottom turtle\"). In particular (see below), yield from future does in fact yield that same future after some wrapper work, and that yield is the actual \"switch out point\" where another task takes over. But it is incorrect for your code to directly yield a Future up to the current Task because you would bypass the necessary wrapper.\nThe objection having been answered, and its practical coding considerations being noted, the point I wish to make from the above quote remains: that a suitable yield in Python async code is ultimately the one thing which, having suspended code execution in the standard way that any other yield would do, now futher engages the scheduler to bring about a possible task switch.\nBy implementation code\nasyncio/futures.py\nclass Future:\n...\n def __await__(self):\n if not self.done():\n self._asyncio_future_blocking = True\n yield self # This tells Task to wait for completion.\n if not self.done():\n raise RuntimeError(\"await wasn't used with future\")\n return self.result() # May raise too.\n\n __iter__ = __await__ # make compatible with 'yield from'.\n\nParaphrase: The line yield self is what tells the running task to sit out for now and let other tasks run, coming back to this one sometime after self is done.\nAlmost all of your awaitables in asyncio world are (multiple layers of) wrappers around a Future. The event loop remains utterly blind to all higher level await awaitable expressions until the code execution trickles down to an await future or yield from future and then (as seen here) calls yield self, which yielded self is then \"caught\" by none other than the Task under which the present coroutine stack is running thereby signaling to the task to take a break.\nPossibly the one and only exception to the above \"code suspends at yield self within await future\" rule, in an asyncio context, is the potential use of a bare yield such as in asyncio.sleep(0). And since the sleep function is a topic of discourse in the comments of this post, let's look at that.\nasyncio/tasks.py\n@types.coroutine\ndef __sleep0():\n \"\"\"Skip one event loop run cycle.\n This is a private helper for 'asyncio.sleep()', used\n when the 'delay' is set to 0. It uses a bare 'yield'\n expression (which Task.__step knows how to handle)\n instead of creating a Future object.\n \"\"\"\n yield\n\n\nasync def sleep(delay, result=None, *, loop=None):\n \"\"\"Coroutine that completes after a given time (in seconds).\"\"\"\n if delay <= 0:\n await __sleep0()\n return result\n\n if loop is None:\n loop = events.get_running_loop()\n else:\n warnings.warn(\"The loop argument is deprecated since Python 3.8, \"\n \"and scheduled for removal in Python 3.10.\",\n DeprecationWarning, stacklevel=2)\n\n future = loop.create_future()\n h = loop.call_later(delay,\n futures._set_result_unless_cancelled,\n future, result)\n try:\n return await future\n\n finally:\n h.cancel()\n\nNote: We have here the two interesting cases at which control can shift to the scheduler:\n(1) The bare yield in __sleep0 (when called via an await).\n(2) The yield self immediately within await future.\nThe crucial line (for our purposes) in asyncio/tasks.py is when Task._step runs its top-level coroutine via result = self._coro.send(None) and recognizes fourish cases:\n(1) result = None is generated by the coro (which, again, is a generator): the task \"relinquishes control for one event loop iteration\".\n(2) result = future is generated within the coro, with further magic member field evidence that the future was yielded in a proper manner from out of Future.__iter__ == Future.__await__: the task relinquishes control to the event loop until the future is complete.\n(3) A StopIteration is raised by the coro indicating the coroutine completed (i.e. as a generator it exhausted all its yields): the final result of the task (which is itself a Future) is set to the coroutine return value.\n(4) Any other Exception occurs: the task's set_exception is set accordingly.\nModulo details, the main point for our concern is that coroutine segments in an asyncio event loop ultimately run via coro.send(). Initial startup and final termination aside, send() proceeds precisely from the last yield value it generated to the next one.\nBy example\nimport asyncio\nimport types\n\ndef task_print(s):\n print(f\"{asyncio.current_task().get_name()}: {s}\")\n\nasync def other_task(s):\n task_print(s)\n\nclass AwaitableCls:\n def __await__(self):\n task_print(\" 'Jumped straight into' another `await`; the act of `await awaitable` *itself* doesn't 'pause' anything\")\n yield\n task_print(\" We're back to our awaitable object because that other task completed\")\n asyncio.create_task(other_task(\"The event loop gets control when `yield` points (from an iterable coroutine) propagate up to the `current_task` through a suitable chain of `await` or `yield from` statements\"))\n\nasync def coro():\n task_print(\" 'Jumped straight into' coro; the `await` keyword itself does nothing to 'pause' the current_task\")\n await AwaitableCls()\n task_print(\" 'Jumped straight back into' coro; we have another pending task, but leaving an `__await__` doesn't 'pause' the task any more than entering the `__await__` does\")\n\n@types.coroutine\ndef iterable_coro(context):\n task_print(f\"`{context} iterable_coro`: pre-yield\")\n yield None # None or a Future object are the only legitimate yields to the task in asyncio\n task_print(f\"`{context} iterable_coro`: post-yield\")\n\nasync def original_task():\n asyncio.create_task(other_task(\"Aha, but a (suitably unconsumed) *`yield`* DOES 'pause' the current_task allowing the event scheduler to `_wakeup` another task\"))\n\n task_print(\"Original task\")\n await coro()\n task_print(\"'Jumped straight out of' coro. Leaving a coro, as with leaving/entering any awaitable, doesn't give control to the event loop\")\n res = await iterable_coro(\"await\")\n assert res is None\n asyncio.create_task(other_task(\"This doesn't run until the very end because the generated None following the creation of this task is consumed by the `for` loop\"))\n for y in iterable_coro(\"for y in\"):\n task_print(f\"But 'ordinary' `yield` points (those which are consumed by the `current_task` itself) behave as ordinary without relinquishing control at the async/task-level; `y={y}`\")\n task_print(\"Done with original task\")\n\nasyncio.get_event_loop().run_until_complete(original_task())\n\nrun in python3.8 produces\n\nTask-1: Original task\nTask-1: 'Jumped straight into' coro; the await keyword itself does nothing to 'pause' the current_task\nTask-1: 'Jumped straight into' another await; the act of await awaitable itself doesn't 'pause' anything\nTask-2: Aha, but a (suitably unconsumed) yield DOES 'pause' the current_task allowing the event scheduler to _wakeup another task\nTask-1: We're back to our awaitable object because that other task completed\nTask-1: 'Jumped straight back into' coro; we have another pending task, but leaving an __await__ doesn't 'pause' the task any more than entering the __await__ does\nTask-1: 'Jumped straight out of' coro. Leaving a coro, as with leaving/entering any awaitable, doesn't give control to the event loop\nTask-1: await iterable_coro: pre-yield\nTask-3: The event loop gets control when yield points (from an iterable coroutine) propagate up to the current_task through a suitable chain of await or yield from statements\nTask-1: await iterable_coro: post-yield\nTask-1: for y in iterable_coro: pre-yield\nTask-1: But 'ordinary' yield points (those which are consumed by the current_task itself) behave as ordinary without relinquishing control at the async/task-level; y=None\nTask-1: for y in iterable_coro: post-yield\nTask-1: Done with original task\nTask-4: This doesn't run until the very end because the generated None following the creation of this task is consumed by the for loop\n\nIndeed, exercises such as the following can help one's mind to decouple the functionality of async/await from notion of \"event loops\" and such. The former is conducive to nice implementations and usages of the latter, but you can use async and await just as specially syntaxed generator stuff without any \"loop\" (whether asyncio or otherwise) whatsoever:\nimport types # no asyncio, nor any other loop framework\n\nasync def f1():\n print(1)\n print(await f2(),'= await f2()')\n return 8\n\n@types.coroutine\ndef f2():\n print(2)\n print((yield 3),'= yield 3')\n return 7\n\nclass F3:\n def __await__(self):\n print(4)\n print((yield 5),'= yield 5')\n print(10)\n return 11\n\ntask1 = f1()\ntask2 = F3().__await__()\n\"\"\" You could say calls to send() represent our\n \"manual task management\" in this script.\n\"\"\"\nprint(task1.send(None), '= task1.send(None)')\nprint(task2.send(None), '= task2.send(None)')\ntry:\n print(task1.send(6), 'try task1.send(6)')\nexcept StopIteration as e:\n print(e.value, '= except task1.send(6)')\ntry:\n print(task2.send(9), 'try task2.send(9)')\nexcept StopIteration as e:\n print(e.value, '= except task2.send(9)')\n\nproduces\n\n1\n2\n3 = task1.send(None)\n4\n5 = task2.send(None)\n6 = yield 3\n7 = await f2()\n8 = except task1.send(6)\n9 = yield 5\n10\n11 = except task2.send(9)\n\n", "Yes, await passes control back to the asyncio eventloop, and allows it to schedule other async functions.\nAnother way is\nawait asyncio.sleep(0)\n\n" ]
[ 38, 0 ]
[]
[]
[ "async_await", "asynchronous", "python", "python_3.x", "python_asyncio" ]
stackoverflow_0059586879_async_await_asynchronous_python_python_3.x_python_asyncio.txt
Q: Python freezes on smtplib.SMTP("smtp.gmail.com", 587) I am attempting to create a script that send an email, using Gmail. However, my code freezes when the line below is ran: smtplib.SMTP("smtp.gmail.com", 587) It is before my username and password are entered, so it is nothing to do with my Gmail account. Why is this happening? I am using Python 3.6.3 The full code is below: import smtplib # Specifying the from and to addresses fromaddr = 'XXX@gmail.com' toaddrs = 'YYY@gmail.com' # Writing the message (this message will appear in the email) msg = 'Enter you message here' # Gmail Login username = 'XXX@gmail.com' password = 'PPP' # Sending the mail server = smtplib.SMTP('smtp.gmail.com', 587) server.starttls() server.login(username,password) server.sendmail(fromaddr, toaddrs, msg) server.quit() A: It is most likely a firewall or similar issue. On the machine having the issue, try running this on the command line: ping smtp.gmail.com Assuming that works, then try: telnet smtp.gmail.com 587 I'm assuming a Linux machine with this command. You'll need to adapt for others. If that connects, type ehlo list and the command should show some info. Type quit to exit. If that doesn't work, then check your iptables. sudo iptables -L This will either show something like ACCEPT all under Chain INPUT or if not, you'll need to ensure that you are accepting established connections with something like: ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED The output chain is often open, but you should check that too. If you are on AWS, check your security group isn't blocking outgoing connections. A: If it's hanging in the call to smtplib.SMTP, and the server requires SSL, then most likely the issue is that you need to call smtplib.SMTP_SSL() (note the _SSL) instead of calling smtplib.SMTP() with a subsequent call to server.starttls() after the ehlo. See SMTPLib docs for SMTP_SSL for more details. This fixed the issue for me. A: Use server.ehlo() in your code. Code Snippet: server = smtplib.SMTP('smtp.gmail.com', 587) server.ehlo() server.starttls() For authentication error: http://joequery.me/guides/python-smtp-authenticationerror/ Add following code snippet and run again. try: server = smtplib.SMTP('smtp.gmail.com', 587) server.ehlo() server.starttls() server.login(username,password) server.sendmail(fromaddr, toaddrs, msg) server.close() print 'successfully sent the mail' except: print "failed to send mail" A: You don't need the ehlo call. These days, you do need an app password with Gmail. And crucially, you need the right server address. I stupidly copied smtp.google.com from some bad instructions, and the call hung. Changing to smtp.gmail.com fixed it. Duh.
Python freezes on smtplib.SMTP("smtp.gmail.com", 587)
I am attempting to create a script that send an email, using Gmail. However, my code freezes when the line below is ran: smtplib.SMTP("smtp.gmail.com", 587) It is before my username and password are entered, so it is nothing to do with my Gmail account. Why is this happening? I am using Python 3.6.3 The full code is below: import smtplib # Specifying the from and to addresses fromaddr = 'XXX@gmail.com' toaddrs = 'YYY@gmail.com' # Writing the message (this message will appear in the email) msg = 'Enter you message here' # Gmail Login username = 'XXX@gmail.com' password = 'PPP' # Sending the mail server = smtplib.SMTP('smtp.gmail.com', 587) server.starttls() server.login(username,password) server.sendmail(fromaddr, toaddrs, msg) server.quit()
[ "It is most likely a firewall or similar issue. On the machine having the issue, try running this on the command line:\nping smtp.gmail.com\n\nAssuming that works, then try:\ntelnet smtp.gmail.com 587\n\nI'm assuming a Linux machine with this command. You'll need to adapt for others. If that connects, type ehlo list and the command should show some info. Type quit to exit.\nIf that doesn't work, then check your iptables. \nsudo iptables -L\n\nThis will either show something like ACCEPT all under Chain INPUT or if not, you'll need to ensure that you are accepting established connections with something like:\nACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED\n\nThe output chain is often open, but you should check that too.\nIf you are on AWS, check your security group isn't blocking outgoing connections.\n", "If it's hanging in the call to smtplib.SMTP, and the server requires SSL, then most likely the issue is that you need to call smtplib.SMTP_SSL() (note the _SSL) instead of calling smtplib.SMTP() with a subsequent call to server.starttls() after the ehlo. See SMTPLib docs for SMTP_SSL for more details.\nThis fixed the issue for me.\n", "Use server.ehlo() in your code.\nCode Snippet:\nserver = smtplib.SMTP('smtp.gmail.com', 587)\nserver.ehlo()\nserver.starttls()\n\nFor authentication error:\nhttp://joequery.me/guides/python-smtp-authenticationerror/\nAdd following code snippet and run again.\ntry:\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.login(username,password)\n server.sendmail(fromaddr, toaddrs, msg)\n server.close()\n print 'successfully sent the mail'\nexcept:\n print \"failed to send mail\"\n\n", "You don't need the ehlo call. These days, you do need an app password with Gmail. And crucially, you need the right server address. I stupidly copied smtp.google.com from some bad instructions, and the call hung. Changing to smtp.gmail.com fixed it. Duh.\n" ]
[ 6, 4, 3, 0 ]
[]
[]
[ "python", "smtplib" ]
stackoverflow_0050624003_python_smtplib.txt
Q: How to match complete words for acronym using regex? I want to only get complete words from acronyms with ( ) around them. For example, there is a sentence 'Lung cancer screening (LCS) reduces NSCLC mortality'; ->I want to get 'Lung cancer screening' as a result. How can I do it with regex? original question: I want to remove repeated upper alphabets : "HIV acquired immunodeficiency syndrome are at a particularly high risk of cervical cancer" => " acquired immunodeficiency syndrome are at a particularly high risk of cervical cancer" A: Assuming you want to target 2 or more capital letters, I would use re.sub here: inp = "Lung cancer screening (LCS) reduces NSCLC mortality" output = re.sub(r'\s*(?:\([A-Z]+\)|[A-Z]{2,})\s*', ' ', inp).strip() print(output) # Lung cancer screening reduces mortality A: import re s = 'HIV acquired immunodeficiency syndrome are at a particularly high risk of cervical cancer' print(re.sub(r'([A-Z])', lambda pat:'', s).strip()) # Inline according to @jensgram answer
How to match complete words for acronym using regex?
I want to only get complete words from acronyms with ( ) around them. For example, there is a sentence 'Lung cancer screening (LCS) reduces NSCLC mortality'; ->I want to get 'Lung cancer screening' as a result. How can I do it with regex? original question: I want to remove repeated upper alphabets : "HIV acquired immunodeficiency syndrome are at a particularly high risk of cervical cancer" => " acquired immunodeficiency syndrome are at a particularly high risk of cervical cancer"
[ "Assuming you want to target 2 or more capital letters, I would use re.sub here:\ninp = \"Lung cancer screening (LCS) reduces NSCLC mortality\"\noutput = re.sub(r'\\s*(?:\\([A-Z]+\\)|[A-Z]{2,})\\s*', ' ', inp).strip()\nprint(output) # Lung cancer screening reduces mortality\n\n", "import re\ns = 'HIV acquired immunodeficiency syndrome are at a particularly high risk of cervical cancer'\nprint(re.sub(r'([A-Z])', lambda pat:'', s).strip()) # Inline\n\naccording to @jensgram answer\n" ]
[ 0, 0 ]
[]
[]
[ "alphabet", "python", "regex" ]
stackoverflow_0074588659_alphabet_python_regex.txt
Q: Using json_normalize function to create a relational data model? I have a nested Python dictionary that I want to convert into a relational model. I am struggling to parse the dictionary into two related tables: a "workspace" table and a "datasets" table - joined by the key workspace_id simplified_dict ={ "workspaces":[ { "workspace_id":"d507422c", "workspace_name":"Workspace 1", "datasets":[ { "dataset_id":"e7e8a355", "dataset_name":"Dataset 1 in workspace 1" }, { "dataset_id":"bbe8a355", "dataset_name":"Dataset 2 in workspace 1" } ] }, { "workspace_id":"etyyy422c-8d6d", "workspace_name":"Workspace 2", "datasets":[ { "dataset_id":"89jke8a355", "dataset_name":"Dataset 3 in Workspace 2" }, { "dataset_id":"tyii8a355", "dataset_name":"Dataset 4 in workspace 2" } ] } ], "datasourceInstances":[ ] } I can create a table containing workspace information using the pandas json_normalize function. import pandas as pd df_workspaces = pd.json_normalize(simplified_dict, record_path=['workspaces']) df_workspaces However, when I try and create the second table "datasets" using the same function, I get a dataframe, but it doesn't have a workspace key, that allows me to join the two tables. df_datasets_in_workspaces = pd.json_normalize(simplified_dict, record_path=['workspaces','datasets']) df_datasets_in_workspaces Is there a way to add the workspace key to the this datasets table, to enable the join, while still using the json_normalize function? If possible I would prefer a solution using json_normalize, rather than using a loop or comprehension, as the json_normalize allows me to easily convert any layer of my real data (with 5 levels of nesting) into a dataframe. With my real datset, I will be looking to generate circa 15 tables, so a low code, very intuitive approach is prefered. Copy of a google colab notebook with the code is accessible here Any help would be appreciated. A: workspace_df = pd.json_normalize(data=simplified_dict, record_path=["workspaces"]).drop(columns="datasets") print(workspace_df) datasets_df = pd.json_normalize(data=simplified_dict["workspaces"], meta=["workspace_id"], record_path=["datasets"]) print(datasets_df) Alternative: datasets_df = pd.json_normalize( data=simplified_dict, meta=[["workspaces", "workspace_id"]], record_path=["workspaces", "datasets"] ) datasets_df.columns = datasets_df.columns.str.split(".").str[-1] Outputs: workspace_id workspace_name 0 d507422c Workspace 1 1 etyyy422c-8d6d Workspace 2 dataset_id dataset_name workspace_id 0 e7e8a355 Dataset 1 in workspace 1 d507422c 1 bbe8a355 Dataset 2 in workspace 1 d507422c 2 89jke8a355 Dataset 3 in Workspace 2 etyyy422c-8d6d 3 tyii8a355 Dataset 4 in workspace 2 etyyy422c-8d6d
Using json_normalize function to create a relational data model?
I have a nested Python dictionary that I want to convert into a relational model. I am struggling to parse the dictionary into two related tables: a "workspace" table and a "datasets" table - joined by the key workspace_id simplified_dict ={ "workspaces":[ { "workspace_id":"d507422c", "workspace_name":"Workspace 1", "datasets":[ { "dataset_id":"e7e8a355", "dataset_name":"Dataset 1 in workspace 1" }, { "dataset_id":"bbe8a355", "dataset_name":"Dataset 2 in workspace 1" } ] }, { "workspace_id":"etyyy422c-8d6d", "workspace_name":"Workspace 2", "datasets":[ { "dataset_id":"89jke8a355", "dataset_name":"Dataset 3 in Workspace 2" }, { "dataset_id":"tyii8a355", "dataset_name":"Dataset 4 in workspace 2" } ] } ], "datasourceInstances":[ ] } I can create a table containing workspace information using the pandas json_normalize function. import pandas as pd df_workspaces = pd.json_normalize(simplified_dict, record_path=['workspaces']) df_workspaces However, when I try and create the second table "datasets" using the same function, I get a dataframe, but it doesn't have a workspace key, that allows me to join the two tables. df_datasets_in_workspaces = pd.json_normalize(simplified_dict, record_path=['workspaces','datasets']) df_datasets_in_workspaces Is there a way to add the workspace key to the this datasets table, to enable the join, while still using the json_normalize function? If possible I would prefer a solution using json_normalize, rather than using a loop or comprehension, as the json_normalize allows me to easily convert any layer of my real data (with 5 levels of nesting) into a dataframe. With my real datset, I will be looking to generate circa 15 tables, so a low code, very intuitive approach is prefered. Copy of a google colab notebook with the code is accessible here Any help would be appreciated.
[ "workspace_df = pd.json_normalize(data=simplified_dict, record_path=[\"workspaces\"]).drop(columns=\"datasets\")\nprint(workspace_df)\n\ndatasets_df = pd.json_normalize(data=simplified_dict[\"workspaces\"], meta=[\"workspace_id\"], record_path=[\"datasets\"])\nprint(datasets_df)\n\nAlternative:\ndatasets_df = pd.json_normalize(\n data=simplified_dict,\n meta=[[\"workspaces\", \"workspace_id\"]],\n record_path=[\"workspaces\", \"datasets\"]\n)\ndatasets_df.columns = datasets_df.columns.str.split(\".\").str[-1]\n\nOutputs:\n workspace_id workspace_name\n0 d507422c Workspace 1\n1 etyyy422c-8d6d Workspace 2\n\n dataset_id dataset_name workspace_id\n0 e7e8a355 Dataset 1 in workspace 1 d507422c\n1 bbe8a355 Dataset 2 in workspace 1 d507422c\n2 89jke8a355 Dataset 3 in Workspace 2 etyyy422c-8d6d\n3 tyii8a355 Dataset 4 in workspace 2 etyyy422c-8d6d\n\n" ]
[ 2 ]
[]
[]
[ "dictionary", "json_normalize", "jsonparser", "pandas", "python" ]
stackoverflow_0074588672_dictionary_json_normalize_jsonparser_pandas_python.txt
Q: flask-migrate / alembic: How to add a postgresql identity column to an existing table? I’m trying to create a migration to add a new identity column to an existing table. The table should eventually become the new primary key of that table. class Action(db.Model): id = db.Column(db.Integer(), db.Identity(), primary_key=True) # new primary key uuid = db.Column(sqlalchemy_utils.UUIDType, index=True, nullable=False) # old primary key When I generate the migration using flask db migrate I only get a new integer column: def upgrade(): """Upgrade from previous version.""" op.add_column("actions", sa.Column("id", sa.Integer(), nullable=False)) For a new sequence based id-column I know that I would need to create the sequence explicitly to use it as server_default (eg as described here: https://sqlalchemy-alembic.narkive.com/U6p0nSGQ/adding-an-auto-increment-column-to-an-existing-table): def upgrade(): """Upgrade from previous version.""" op.execute(sa.schema.CreateSequence(sa.Sequence("actions_id_seq"))) op.add_column("actions", sa.Column("id", sa.Integer(), server_default=sa.text("nextval('actions_id_seq'::regclass)"), nullable=False)) This is using sqlalchemy 1.4.42 and alembic 1.8.1. I couldn’t find any way to add an identity column with GENERATED BY DEFAULT AS IDENTITY. Any suggestions on how to accomplish this? A: It seems it was as simple as adding sa.Identity(): def upgrade(): """Upgrade from previous version.""" op.add_column("actions", sa.Column("id", sa.Integer(), sa.Identity(), nullable=False))
flask-migrate / alembic: How to add a postgresql identity column to an existing table?
I’m trying to create a migration to add a new identity column to an existing table. The table should eventually become the new primary key of that table. class Action(db.Model): id = db.Column(db.Integer(), db.Identity(), primary_key=True) # new primary key uuid = db.Column(sqlalchemy_utils.UUIDType, index=True, nullable=False) # old primary key When I generate the migration using flask db migrate I only get a new integer column: def upgrade(): """Upgrade from previous version.""" op.add_column("actions", sa.Column("id", sa.Integer(), nullable=False)) For a new sequence based id-column I know that I would need to create the sequence explicitly to use it as server_default (eg as described here: https://sqlalchemy-alembic.narkive.com/U6p0nSGQ/adding-an-auto-increment-column-to-an-existing-table): def upgrade(): """Upgrade from previous version.""" op.execute(sa.schema.CreateSequence(sa.Sequence("actions_id_seq"))) op.add_column("actions", sa.Column("id", sa.Integer(), server_default=sa.text("nextval('actions_id_seq'::regclass)"), nullable=False)) This is using sqlalchemy 1.4.42 and alembic 1.8.1. I couldn’t find any way to add an identity column with GENERATED BY DEFAULT AS IDENTITY. Any suggestions on how to accomplish this?
[ "It seems it was as simple as adding sa.Identity():\ndef upgrade():\n \"\"\"Upgrade from previous version.\"\"\"\n op.add_column(\"actions\", sa.Column(\"id\", sa.Integer(), sa.Identity(), nullable=False))\n\n" ]
[ 0 ]
[]
[]
[ "alembic", "flask_migrate", "postgresql", "python" ]
stackoverflow_0074588719_alembic_flask_migrate_postgresql_python.txt
Q: Remove rows in pandas dataframe after a certain value at max index I have a pandas dataframe with rate look like below: import numpy as np import pandas as pd num = np.repeat(12, 3) num1 = np.repeat(11, 3) num2 = np.repeat(7, 2) num3 = np.repeat(10, 2) num4 = np.repeat(7, 3) num5 = np.repeat(9, 5) num6 = np.repeat(3, 4) num7 = np.repeat(7, 4) df = pd.DataFrame(columns= ['rate']) df['rate'] = num df = pd.concat([df, pd.DataFrame(num1, columns=['rate'])]) df = pd.concat([df, pd.DataFrame(num2, columns=['rate'])]) df = pd.concat([df, pd.DataFrame(num3, columns=['rate'])]) df = pd.concat([df, pd.DataFrame(num4, columns=['rate'])]) df = pd.concat([df, pd.DataFrame(num5, columns=['rate'])]) df = pd.concat([df, pd.DataFrame(num6, columns=['rate'])]) df = pd.concat([df, pd.DataFrame(num7, columns=['rate'])]) df = df.reset_index(drop = True) values = (7,9) There can be more 7s or 9s. I would like to delete 2 rows after the end points (max index) of each run of 7 or 9. The expected result would look like below: num = np.repeat(12, 3) num1 = np.repeat(11, 3) num2 = np.repeat(7, 2) num3 = np.repeat(7, 3) num4 = np.repeat(9, 3) num5 = np.repeat(3, 2) num6 = np.repeat(7, 4) dd = pd.DataFrame(columns= ['rate']) dd['rate'] = num dd = pd.concat([dd, pd.DataFrame(num1, columns=['rate'])]) dd = pd.concat([dd, pd.DataFrame(num2, columns=['rate'])]) dd = pd.concat([dd, pd.DataFrame(num3, columns=['rate'])]) dd = pd.concat([dd, pd.DataFrame(num4, columns=['rate'])]) dd = pd.concat([dd, pd.DataFrame(num5, columns=['rate'])]) dd = pd.concat([dd, pd.DataFrame(num6, columns=['rate'])]) dd = dd.reset_index(drop = True) Any suggestion how can I do that? Thank you for your time and effort! A: Here is one way to do it using Pandas shift method: # Setup max_indices = df[(df["rate"] != df["rate"].shift(-1)) & (df["rate"].isin([7, 9]))].index index = df.index.to_list() new_index = [] start = 0 # Build new index for idx in max_indices: new_index = new_index + index[start: idx + 1] start = idx + 3 dd = df.loc[new_index, :].reset_index(drop=True) Then: print(dd) # Output rate 0 12 1 12 2 12 3 11 4 11 5 11 6 7 7 7 8 7 9 7 10 7 11 9 12 9 13 9 14 3 15 3 16 7 17 7 18 7 19 7
Remove rows in pandas dataframe after a certain value at max index
I have a pandas dataframe with rate look like below: import numpy as np import pandas as pd num = np.repeat(12, 3) num1 = np.repeat(11, 3) num2 = np.repeat(7, 2) num3 = np.repeat(10, 2) num4 = np.repeat(7, 3) num5 = np.repeat(9, 5) num6 = np.repeat(3, 4) num7 = np.repeat(7, 4) df = pd.DataFrame(columns= ['rate']) df['rate'] = num df = pd.concat([df, pd.DataFrame(num1, columns=['rate'])]) df = pd.concat([df, pd.DataFrame(num2, columns=['rate'])]) df = pd.concat([df, pd.DataFrame(num3, columns=['rate'])]) df = pd.concat([df, pd.DataFrame(num4, columns=['rate'])]) df = pd.concat([df, pd.DataFrame(num5, columns=['rate'])]) df = pd.concat([df, pd.DataFrame(num6, columns=['rate'])]) df = pd.concat([df, pd.DataFrame(num7, columns=['rate'])]) df = df.reset_index(drop = True) values = (7,9) There can be more 7s or 9s. I would like to delete 2 rows after the end points (max index) of each run of 7 or 9. The expected result would look like below: num = np.repeat(12, 3) num1 = np.repeat(11, 3) num2 = np.repeat(7, 2) num3 = np.repeat(7, 3) num4 = np.repeat(9, 3) num5 = np.repeat(3, 2) num6 = np.repeat(7, 4) dd = pd.DataFrame(columns= ['rate']) dd['rate'] = num dd = pd.concat([dd, pd.DataFrame(num1, columns=['rate'])]) dd = pd.concat([dd, pd.DataFrame(num2, columns=['rate'])]) dd = pd.concat([dd, pd.DataFrame(num3, columns=['rate'])]) dd = pd.concat([dd, pd.DataFrame(num4, columns=['rate'])]) dd = pd.concat([dd, pd.DataFrame(num5, columns=['rate'])]) dd = pd.concat([dd, pd.DataFrame(num6, columns=['rate'])]) dd = dd.reset_index(drop = True) Any suggestion how can I do that? Thank you for your time and effort!
[ "Here is one way to do it using Pandas shift method:\n# Setup\nmax_indices = df[(df[\"rate\"] != df[\"rate\"].shift(-1)) & (df[\"rate\"].isin([7, 9]))].index\nindex = df.index.to_list()\nnew_index = []\nstart = 0\n\n# Build new index\nfor idx in max_indices:\n new_index = new_index + index[start: idx + 1]\n start = idx + 3\n\ndd = df.loc[new_index, :].reset_index(drop=True)\n\nThen:\nprint(dd)\n# Output\n rate\n0 12\n1 12\n2 12\n3 11\n4 11\n5 11\n6 7\n7 7\n8 7\n9 7\n10 7\n11 9\n12 9\n13 9\n14 3\n15 3\n16 7\n17 7\n18 7\n19 7\n\n" ]
[ 0 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074553929_dataframe_pandas_python.txt
Q: How can we get weekly and monthly returns based on daily stock closing prices? All my data is in a dataframe My dataframe originally looks like this. index ticker Adj Close Date 2022-01-03 0 AXP 166.056259 2022-01-04 1 AXP 171.387100 2022-01-05 2 AXP 169.560791 2022-01-06 3 AXP 171.114563 2022-01-07 4 AXP 172.579315 Or, if I reset the index, it looks like this. Date ticker Adj Close 0 2022-01-03 AXP 166.056229 1 2022-01-04 AXP 171.387100 2 2022-01-05 AXP 169.560791 3 2022-01-06 AXP 171.114563 4 2022-01-07 AXP 172.579300 I have closing prices for 30 stocks, and the shape of the DF is (1769, 3). I can easily get returns and mean returns: returns = table.pct_change() mean_returns = returns.mean() What's the easiest way to get weekly and monthly returns? There must be some kind of df.groupby 'W' or 'M', then returns are automatically calculated, but when I Googled it, I didn't find a whole lot. Any thoughts on how to get this working for weeks and months? A: You can extract the month and the week as separate columns as below, and then you can use groupby with aggregates first and last which will allow you to compute the gain for the whole week (in case you hold the stock) import random import pandas as pd value = random.sample(range(1, 80), 79) begin_date = '2019-10-16' df = pd.DataFrame({'value': value, 'ticker':'tst', 'date':pd.date_range(begin_date, periods=len(value))}) df['year'] = df['date'].dt.isocalendar().year df['week'] = df['date'].dt.isocalendar().week df['month'] = df['date'].dt.month weekly_increase = df.groupby(['year', 'week']).agg(['first','last']) weekly_increase['increase'] = (weekly_increase[('value','last')] - weekly_increase[('value','first')]) / weekly_increase[('value','first')] print(weekly_increase) Results in value ticker date month increase first last first last first last first last year week 2019 42 5 59 tst tst 2019-10-16 2019-10-20 10 10 10.800000 43 79 3 tst tst 2019-10-21 2019-10-27 10 10 -0.962025 44 58 26 tst tst 2019-10-28 2019-11-03 10 11 -0.551724 45 70 31 tst tst 2019-11-04 2019-11-10 11 11 -0.557143 46 78 62 tst tst 2019-11-11 2019-11-17 11 11 -0.205128
How can we get weekly and monthly returns based on daily stock closing prices? All my data is in a dataframe
My dataframe originally looks like this. index ticker Adj Close Date 2022-01-03 0 AXP 166.056259 2022-01-04 1 AXP 171.387100 2022-01-05 2 AXP 169.560791 2022-01-06 3 AXP 171.114563 2022-01-07 4 AXP 172.579315 Or, if I reset the index, it looks like this. Date ticker Adj Close 0 2022-01-03 AXP 166.056229 1 2022-01-04 AXP 171.387100 2 2022-01-05 AXP 169.560791 3 2022-01-06 AXP 171.114563 4 2022-01-07 AXP 172.579300 I have closing prices for 30 stocks, and the shape of the DF is (1769, 3). I can easily get returns and mean returns: returns = table.pct_change() mean_returns = returns.mean() What's the easiest way to get weekly and monthly returns? There must be some kind of df.groupby 'W' or 'M', then returns are automatically calculated, but when I Googled it, I didn't find a whole lot. Any thoughts on how to get this working for weeks and months?
[ "You can extract the month and the week as separate columns as below, and then you can use groupby with aggregates first and last which will allow you to compute the gain for the whole week (in case you hold the stock)\nimport random\nimport pandas as pd\n\nvalue = random.sample(range(1, 80), 79)\nbegin_date = '2019-10-16'\n\ndf = pd.DataFrame({'value': value,\n 'ticker':'tst',\n 'date':pd.date_range(begin_date, periods=len(value))})\n\ndf['year'] = df['date'].dt.isocalendar().year\ndf['week'] = df['date'].dt.isocalendar().week\ndf['month'] = df['date'].dt.month\n\nweekly_increase = df.groupby(['year', 'week']).agg(['first','last'])\nweekly_increase['increase'] = (weekly_increase[('value','last')] - weekly_increase[('value','first')]) / weekly_increase[('value','first')]\n\nprint(weekly_increase)\n\n\nResults in\n value ticker date month increase\n first last first last first last first last \nyear week \n2019 42 5 59 tst tst 2019-10-16 2019-10-20 10 10 10.800000\n 43 79 3 tst tst 2019-10-21 2019-10-27 10 10 -0.962025\n 44 58 26 tst tst 2019-10-28 2019-11-03 10 11 -0.551724\n 45 70 31 tst tst 2019-11-04 2019-11-10 11 11 -0.557143\n 46 78 62 tst tst 2019-11-11 2019-11-17 11 11 -0.205128\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "python", "python_3.x" ]
stackoverflow_0074587121_dataframe_python_python_3.x.txt
Q: Creating and updating data from API answerI to Django REST project (MySQ)? I have a Django REST project. I have a models User, Store and Warehouse. And I have a module with marketplace parser, that gets data from marketplace API. In this module there is a class Market and a method "get_warehouses_list". This method returns a JSON with a STORE's warehouse list. Examle answer: { "result": [ { "warehouse_id": 1, "name": "name1", "is_rfbs": false }, { "warehouse_id": 2, "name": "name2", "is_rfbs": true } ] } What I have to do is to make creating and updating methods to set and update this warehouse list into my MySQL DB (with creating an endpoint for setting and updating this data). I don't know what is incorrect in my code, but when I send POST request to my endpoint in urls.py router.register("", WarehouseApi, basename="warehouse") I get 400 error instead of setting warehouse list into my DB. My code: user/models.py class User(AbstractUser): username = models.CharField( max_length=150, unique=True, null=True) id = models.UUIDField( primary_key=True, default=uuid.uuid4, unique=True, editable=False) store/models.py ` class Store(models.Model): user = models.ForeignKey( User, on_delete=models.PROTECT) name = models.CharField(max_length=128, blank=True) type = models.PositiveSmallIntegerField( choices=MARKET, default=1, verbose_name="Type API") api_key = models.CharField(max_length=128) client_id = models.CharField(max_length=128) warehouses/models.py class Warehouse(models.Model): store = models.ForeignKey( Store, on_delete=models.СASCAD, null=True) warehouse_id = models.BigIntegerField( unique = True, null = True) name = models.CharField( max_length=150) is_rfbs = models.BooleanField(default=False) ` serializers.py ` class WarehouseSerializer(serializers.ModelSerializer): class Meta: model = Warehouse fields = '__all__' store = serializers.CharField(max_length=50) warehouse_id = serializers.IntegerField() name = serializers.CharField(max_length=100) is_rfbs = serializers.BooleanField() is_default_warehouse = serializers.BooleanField() class WarehouseUpdateSerializer(serializers.ModelSerializer): class Meta: model = Warehouse fields = ('name', 'is_rfbs',) def save(self, **kwargs): self.instance.name = self.validated_data["name"] self.instance.is_rfbs = self.validated_data["is_rfbs"] self.instance.save returself.instance views.py class WarehouseApi(ModelViewSet): def get_queryset(self): return Warehouse.objects.filter( store__user_id=self.request.user.pk)\ .order_by('-warehouse_id') def get_serializer_class(self): if self.request.method in ("PUT", "PATCH"): return WarehouseUpdateSerializer return WarehouseSerializer def create(self, request, *args, **kwargs): st = Store.objects.filter(user=self.request.user, # getting all stores with marketplace type = 1 type=1) for e in st: api = Market(api_key=e.api_key, # call parser class Market client_id=e.client_id) data = api.get_warehouses_list() # call Market's method 'get_warehouses_list' if len(data['result']) > 0: for wh in data['result']: alreadyExists = Warehouse.objects.filter( # check if warehouses with such ID already exists warehouse_id=wh.get( 'warehouse_id')).exists() if alreadyExists: return Response({'message':'Warehouse ID already exists'}) else: wh_data = { 'warehouse_id': wh.get('warehouse_id'), 'name': wh.get('name'), 'is_rfbs': wh.get('is_rfbs') } Warehouse.objects.create(**wh_data) warehouses = Warehouse.objects.filter( marketplace=1, store__in=st).order_by( '-warehouse_id') s = WarehouseSerializer(warehouses, many=True) return Response(status=200, data=s.data) else: return Response(status=400, data={"Error": "Store has no warehouses"}) def update(self, request, *args, **kwargs): partial = kwargs.pop('partial', False) instance = self.get_object() serializer = self.get_serializer(instance, data=request.data, partial=partial) serializer.is_valid(raise_exception=True) api = Market(api_key=instance.store.api_key, client_id=instance.store.client_id) warehouses = Warehouse.objects.filter(store__user_id=self.request.user, store__type=1) # get all store's warehouses if warehouses: for warehouse in warehouses: r = api.get_warehouses_list() # call Market's method 'get_warehouses_list' if len(r['result']) > 0: for wh in r['result']: if serializer.validated_data["name"] != wh['name']: instance.name = wh['name'] instance.save(['name']) if serializer.validated_data["is_rfbs"] != wh['is_rfbs']: instance.name = wh['is_rfbs'] instance.save(['is_rfbs']) serializer = WarehouseUpdateSerializer(instance) return Response(serializer.data, { 'status': status.HTTP_200_OK, 'message': 'Warehouse updated successfully' }) else: return Response({ 'message': 'Store has no warehouses' }) else: return Response({ 'message': 'There are no saved warehouses in DB' }) ` A: The 400 Error might be caused by a number of reasons. Do you have any logging information you can provide? Either from the Python logs or using the browser developer tools? In your own code you are returning a 400 code if the variable data is empty. Are you sure you are not hitting this validation? If nothing turns up, I advise you to add some exception dealing and logging capabilities to your own code as it seems to be very unprotected and post that information here if needed.
Creating and updating data from API answerI to Django REST project (MySQ)?
I have a Django REST project. I have a models User, Store and Warehouse. And I have a module with marketplace parser, that gets data from marketplace API. In this module there is a class Market and a method "get_warehouses_list". This method returns a JSON with a STORE's warehouse list. Examle answer: { "result": [ { "warehouse_id": 1, "name": "name1", "is_rfbs": false }, { "warehouse_id": 2, "name": "name2", "is_rfbs": true } ] } What I have to do is to make creating and updating methods to set and update this warehouse list into my MySQL DB (with creating an endpoint for setting and updating this data). I don't know what is incorrect in my code, but when I send POST request to my endpoint in urls.py router.register("", WarehouseApi, basename="warehouse") I get 400 error instead of setting warehouse list into my DB. My code: user/models.py class User(AbstractUser): username = models.CharField( max_length=150, unique=True, null=True) id = models.UUIDField( primary_key=True, default=uuid.uuid4, unique=True, editable=False) store/models.py ` class Store(models.Model): user = models.ForeignKey( User, on_delete=models.PROTECT) name = models.CharField(max_length=128, blank=True) type = models.PositiveSmallIntegerField( choices=MARKET, default=1, verbose_name="Type API") api_key = models.CharField(max_length=128) client_id = models.CharField(max_length=128) warehouses/models.py class Warehouse(models.Model): store = models.ForeignKey( Store, on_delete=models.СASCAD, null=True) warehouse_id = models.BigIntegerField( unique = True, null = True) name = models.CharField( max_length=150) is_rfbs = models.BooleanField(default=False) ` serializers.py ` class WarehouseSerializer(serializers.ModelSerializer): class Meta: model = Warehouse fields = '__all__' store = serializers.CharField(max_length=50) warehouse_id = serializers.IntegerField() name = serializers.CharField(max_length=100) is_rfbs = serializers.BooleanField() is_default_warehouse = serializers.BooleanField() class WarehouseUpdateSerializer(serializers.ModelSerializer): class Meta: model = Warehouse fields = ('name', 'is_rfbs',) def save(self, **kwargs): self.instance.name = self.validated_data["name"] self.instance.is_rfbs = self.validated_data["is_rfbs"] self.instance.save returself.instance views.py class WarehouseApi(ModelViewSet): def get_queryset(self): return Warehouse.objects.filter( store__user_id=self.request.user.pk)\ .order_by('-warehouse_id') def get_serializer_class(self): if self.request.method in ("PUT", "PATCH"): return WarehouseUpdateSerializer return WarehouseSerializer def create(self, request, *args, **kwargs): st = Store.objects.filter(user=self.request.user, # getting all stores with marketplace type = 1 type=1) for e in st: api = Market(api_key=e.api_key, # call parser class Market client_id=e.client_id) data = api.get_warehouses_list() # call Market's method 'get_warehouses_list' if len(data['result']) > 0: for wh in data['result']: alreadyExists = Warehouse.objects.filter( # check if warehouses with such ID already exists warehouse_id=wh.get( 'warehouse_id')).exists() if alreadyExists: return Response({'message':'Warehouse ID already exists'}) else: wh_data = { 'warehouse_id': wh.get('warehouse_id'), 'name': wh.get('name'), 'is_rfbs': wh.get('is_rfbs') } Warehouse.objects.create(**wh_data) warehouses = Warehouse.objects.filter( marketplace=1, store__in=st).order_by( '-warehouse_id') s = WarehouseSerializer(warehouses, many=True) return Response(status=200, data=s.data) else: return Response(status=400, data={"Error": "Store has no warehouses"}) def update(self, request, *args, **kwargs): partial = kwargs.pop('partial', False) instance = self.get_object() serializer = self.get_serializer(instance, data=request.data, partial=partial) serializer.is_valid(raise_exception=True) api = Market(api_key=instance.store.api_key, client_id=instance.store.client_id) warehouses = Warehouse.objects.filter(store__user_id=self.request.user, store__type=1) # get all store's warehouses if warehouses: for warehouse in warehouses: r = api.get_warehouses_list() # call Market's method 'get_warehouses_list' if len(r['result']) > 0: for wh in r['result']: if serializer.validated_data["name"] != wh['name']: instance.name = wh['name'] instance.save(['name']) if serializer.validated_data["is_rfbs"] != wh['is_rfbs']: instance.name = wh['is_rfbs'] instance.save(['is_rfbs']) serializer = WarehouseUpdateSerializer(instance) return Response(serializer.data, { 'status': status.HTTP_200_OK, 'message': 'Warehouse updated successfully' }) else: return Response({ 'message': 'Store has no warehouses' }) else: return Response({ 'message': 'There are no saved warehouses in DB' }) `
[ "The 400 Error might be caused by a number of reasons. Do you have any logging information you can provide? Either from the Python logs or using the browser developer tools?\nIn your own code you are returning a 400 code if the variable data is empty. Are you sure you are not hitting this validation?\nIf nothing turns up, I advise you to add some exception dealing and logging capabilities to your own code as it seems to be very unprotected and post that information here if needed.\n" ]
[ 0 ]
[]
[]
[ "django", "django_rest_framework", "python" ]
stackoverflow_0074588744_django_django_rest_framework_python.txt
Q: Set specific tick lablels while deleting preexisting labels on an axis I need to set a specific tick label on a certain tick position, while deleting preexisting labels. Specifically, The labels on the x axis are dates for the value of a stock, and I want to delete those and set one for each month instead. Date Open High Low Close/Price Volume 6/24/2019 86.78 87.11 86.06 86.55 1507828 6/25/2019 86.63 87.23 84.81 85.06 2481284 6/26/2019 85.38 85.81 84.75 85.33 2034693 6/27/2019 85.65 86.86 85.13 86.43 1394847 6/28/2019 86.66 87.74 86.66 87.55 3025379 7/1/2019 88.84 89.72 87.77 88.45 4017249 7/2/2019 89.21 90 87.95 88.87 2237183 7/3/2019 89.14 91.08 89.14 90.67 1647124 import pandas as pd import matplotlib.pyplot as plt def main(): df = pd.read_excel('DatosUnited.xlsx') date = df['Date'] closePrice = df['Close/Price'] plt.xlabel('Tiempo') plt.ylabel('Valor de las acciones (USD) ') plt.title('Este mes') plt.plot(date,closePrice,'r') plt.show() main() I tried to delete all the tick labels, and set a list of new ones, but failed to set them in the desired position A: How about something like this? import pandas as pd import datetime import matplotlib.dates as mdates import matplotlib.pyplot as plt df = pd.DataFrame({'Date': {0: '6/24/2019', 1: '6/25/2019', 2: '6/26/2019', 3: '6/27/2019', 4: '6/28/2019', 5: '7/1/2019', 6: '7/2/2019', 7: '7/3/2019'}, 'Open': {0: 86.78, 1: 86.63, 2: 85.38, 3: 85.65, 4: 86.66, 5: 88.84, 6: 89.21, 7: 89.14}, 'High': {0: 87.11, 1: 87.23, 2: 85.81, 3: 86.86, 4: 87.74, 5: 89.72, 6: 90.0, 7: 91.08}, 'Low': {0: 86.06, 1: 84.81, 2: 84.75, 3: 85.13, 4: 86.66, 5: 87.77, 6: 87.95, 7: 89.14}, 'Close/Price': {0: 86.55, 1: 85.06, 2: 85.33, 3: 86.43, 4: 87.55, 5: 88.45, 6: 88.87, 7: 90.67}, 'Volume': {0: 1507828, 1: 2481284, 2: 2034693, 3: 1394847, 4: 3025379, 5: 4017249, 6: 2237183, 7: 1647124}}) date = [datetime.datetime.strptime(d, '%m/%d/%Y') for d in df['Date']] closePrice = df['Close/Price'] fig, ax = plt.subplots() plt.xlabel('Tiempo') plt.ylabel('Valor de las acciones (USD) ') plt.title('Este mes') plt.plot(date,closePrice,'r') locator = mdates.AutoDateLocator() formatter = mdates.ConciseDateFormatter(locator) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(formatter) plt.show() gives the following for your example data: You can replace AutoDateLocator() with MonthLocator() to just get months for certain, but this didn't look great for the example data. See this question for more info on matplotlib date locators.
Set specific tick lablels while deleting preexisting labels on an axis
I need to set a specific tick label on a certain tick position, while deleting preexisting labels. Specifically, The labels on the x axis are dates for the value of a stock, and I want to delete those and set one for each month instead. Date Open High Low Close/Price Volume 6/24/2019 86.78 87.11 86.06 86.55 1507828 6/25/2019 86.63 87.23 84.81 85.06 2481284 6/26/2019 85.38 85.81 84.75 85.33 2034693 6/27/2019 85.65 86.86 85.13 86.43 1394847 6/28/2019 86.66 87.74 86.66 87.55 3025379 7/1/2019 88.84 89.72 87.77 88.45 4017249 7/2/2019 89.21 90 87.95 88.87 2237183 7/3/2019 89.14 91.08 89.14 90.67 1647124 import pandas as pd import matplotlib.pyplot as plt def main(): df = pd.read_excel('DatosUnited.xlsx') date = df['Date'] closePrice = df['Close/Price'] plt.xlabel('Tiempo') plt.ylabel('Valor de las acciones (USD) ') plt.title('Este mes') plt.plot(date,closePrice,'r') plt.show() main() I tried to delete all the tick labels, and set a list of new ones, but failed to set them in the desired position
[ "How about something like this?\nimport pandas as pd\nimport datetime\nimport matplotlib.dates as mdates\nimport matplotlib.pyplot as plt\n\n\ndf = pd.DataFrame({'Date': {0: '6/24/2019', 1: '6/25/2019', 2: '6/26/2019', 3: '6/27/2019', 4: '6/28/2019', 5: '7/1/2019', 6: '7/2/2019', 7: '7/3/2019'}, 'Open': {0: 86.78, 1: 86.63, 2: 85.38, 3: 85.65, 4: 86.66, 5: 88.84, 6: 89.21, 7: 89.14}, 'High': {0: 87.11, 1: 87.23, 2: 85.81, 3: 86.86, 4: 87.74, 5: 89.72, 6: 90.0, 7: 91.08}, 'Low': {0: 86.06, 1: 84.81, 2: 84.75, 3: 85.13, 4: 86.66, 5: 87.77, 6: 87.95, 7: 89.14}, 'Close/Price': {0: 86.55, 1: 85.06, 2: 85.33, 3: 86.43, 4: 87.55, 5: 88.45, 6: 88.87, 7: 90.67}, 'Volume': {0: 1507828, 1: 2481284, 2: 2034693, 3: 1394847, 4: 3025379, 5: 4017249, 6: 2237183, 7: 1647124}})\n\ndate = [datetime.datetime.strptime(d, '%m/%d/%Y') for d in df['Date']]\nclosePrice = df['Close/Price']\n\nfig, ax = plt.subplots()\nplt.xlabel('Tiempo')\nplt.ylabel('Valor de las acciones (USD) ')\nplt.title('Este mes')\nplt.plot(date,closePrice,'r')\n\nlocator = mdates.AutoDateLocator()\nformatter = mdates.ConciseDateFormatter(locator)\nax.xaxis.set_major_locator(locator)\nax.xaxis.set_major_formatter(formatter)\n\nplt.show()\n\ngives the following for your example data:\n\nYou can replace AutoDateLocator() with MonthLocator() to just get months for certain, but this didn't look great for the example data. See this question for more info on matplotlib date locators.\n" ]
[ 0 ]
[]
[]
[ "matplotlib", "pandas", "python" ]
stackoverflow_0074587661_matplotlib_pandas_python.txt
Q: requests.Session() creating different session every time. How to reuse it? I am trying to initialize a requests.Session, to keep a connection with a webpage. However, I read that each time the session class is called, a new session is created. How is it possible to keep the connection alive? Because with my current code, it's giving me the webpage content after I call the login method (that's OK, it shows that I logged into the page and gives me the content I want), but when I call the update method, it gives me the content from the login page again, not from the page I actually want after login. import requests class LoginLogout: # creating session def __init__(self): self.s = requests.Session() # login method def login(self, user, password, server): payload_data = {'user': user, 'pass': password, 'server': server} print(self.s.post(LOGIN_LINK, payload_data)) # update method def update(self, updt_link): print(self.s.get(updt_link)) def logout(self): response = self.s.get('...some webpage/user/logout...') self.s.close() print(response) Here I am calling the objects: if switch_parameter == "login": login_var = LoginLogout() login_var.login(USER, PASSWORD, SERVER) print('IS IT OK ?', login_var.s.get('.../login...')) # <-OK it shows 200 result (but should I use there "s" too ?) elif switch_parameter == "start": start() elif switch_parameter == "stop": stop() elif switch_parameter == "update": update_prem = LoginLogout() update_prem.update('...different/page...') # (am I missing here "s" ?, or I shouldnt be using it here anyway) elif switch_parameter == "logout": logout() else: pass What am I doing wrong here? I just want to use login to log into the website and keep the session active, while calling update every time I need to get another page. Am I even on the right track or completely wrong? A: The whole point of requests.Session is to persist ephemeral constants (like cookies) between requests. In your code you initialize a new session object, when you initialize a LoginLogout object. You do that here: if switch_parameter == "login": login_var = LoginLogout() ... And you do that here: elif switch_parameter == "update": update_prem = LoginLogout() ... Now login_var and update_prem are obviously different objects and both have the s attribute, each holding a different requests.Session object. How do you expect the attributes of one session to be magically available to the other? If you want to use an existing session, use it. Don't create a new one. I don't know about your actual use case of course, but from what you have presented here, it seems you need to do something like this: scraper_obj = LoginLogout() scraper_obj.login(USER, PASSWORD, SERVER) ... scraper_obj.update('...') ... scraper_obj.logout() Since your created a wrapper around the actual requests.Session instance with LoginLogout, you should not ever need to deal with its s attribute directly, assuming you have methods on LoginLogout for every kind of request you want to make. You initialize it once and then use its methods to perform requests via its internal session object. PS You casually mentioned in a follow-up comment that you set this up as a script to be called repeatedly from the outside and depending on the parameter passed to the script, you want to either log into the site or scrape a specific page. This shows that you either don't understand how "logging in" even works or that you don't understand how processes work. Typically some session attribute (e.g. cookie) is created on the client so that it can present it to the server to show that it is already authenticated. When using requests as an HTTP client library, this data is stored inside a requests.Session object. When you call a Python script, you create a new process. Just because you start the same script twice in a row does not mean that one of those processes has any connection to the other. Calling the script to login once has absolutely no effect on what happens the next time you call that script to do something else. None of those earlier session attributes will be present in the second process. I hope this is clear now.
requests.Session() creating different session every time. How to reuse it?
I am trying to initialize a requests.Session, to keep a connection with a webpage. However, I read that each time the session class is called, a new session is created. How is it possible to keep the connection alive? Because with my current code, it's giving me the webpage content after I call the login method (that's OK, it shows that I logged into the page and gives me the content I want), but when I call the update method, it gives me the content from the login page again, not from the page I actually want after login. import requests class LoginLogout: # creating session def __init__(self): self.s = requests.Session() # login method def login(self, user, password, server): payload_data = {'user': user, 'pass': password, 'server': server} print(self.s.post(LOGIN_LINK, payload_data)) # update method def update(self, updt_link): print(self.s.get(updt_link)) def logout(self): response = self.s.get('...some webpage/user/logout...') self.s.close() print(response) Here I am calling the objects: if switch_parameter == "login": login_var = LoginLogout() login_var.login(USER, PASSWORD, SERVER) print('IS IT OK ?', login_var.s.get('.../login...')) # <-OK it shows 200 result (but should I use there "s" too ?) elif switch_parameter == "start": start() elif switch_parameter == "stop": stop() elif switch_parameter == "update": update_prem = LoginLogout() update_prem.update('...different/page...') # (am I missing here "s" ?, or I shouldnt be using it here anyway) elif switch_parameter == "logout": logout() else: pass What am I doing wrong here? I just want to use login to log into the website and keep the session active, while calling update every time I need to get another page. Am I even on the right track or completely wrong?
[ "The whole point of requests.Session is to persist ephemeral constants (like cookies) between requests. In your code you initialize a new session object, when you initialize a LoginLogout object.\nYou do that here:\nif switch_parameter == \"login\": \n login_var = LoginLogout()\n...\n\nAnd you do that here:\nelif switch_parameter == \"update\":\n update_prem = LoginLogout()\n...\n\nNow login_var and update_prem are obviously different objects and both have the s attribute, each holding a different requests.Session object. How do you expect the attributes of one session to be magically available to the other?\nIf you want to use an existing session, use it. Don't create a new one.\nI don't know about your actual use case of course, but from what you have presented here, it seems you need to do something like this:\nscraper_obj = LoginLogout()\nscraper_obj.login(USER, PASSWORD, SERVER)\n...\nscraper_obj.update('...')\n...\nscraper_obj.logout()\n\nSince your created a wrapper around the actual requests.Session instance with LoginLogout, you should not ever need to deal with its s attribute directly, assuming you have methods on LoginLogout for every kind of request you want to make. You initialize it once and then use its methods to perform requests via its internal session object.\n\nPS\nYou casually mentioned in a follow-up comment that you set this up as a script to be called repeatedly from the outside and depending on the parameter passed to the script, you want to either log into the site or scrape a specific page.\nThis shows that you either don't understand how \"logging in\" even works or that you don't understand how processes work. Typically some session attribute (e.g. cookie) is created on the client so that it can present it to the server to show that it is already authenticated. When using requests as an HTTP client library, this data is stored inside a requests.Session object.\nWhen you call a Python script, you create a new process. Just because you start the same script twice in a row does not mean that one of those processes has any connection to the other. Calling the script to login once has absolutely no effect on what happens the next time you call that script to do something else. None of those earlier session attributes will be present in the second process. I hope this is clear now.\n" ]
[ 1 ]
[]
[]
[ "python", "request", "session_cookies" ]
stackoverflow_0074588769_python_request_session_cookies.txt
Q: How to get temperature measure given location and time values in a dataframe? I have a pandas dataframe consisting of geo-locations and a time in the past. location_time = pd.read_csv(r'geo_time.csv') print (geo_time) > +---------+---------+---------+-------------------+ | latitude|longitude| altitude| start| +---------+---------+---------+-------------------+ | 48.2393| 11.5713| 520|2020-03-12 13:00:00| +---------+---------+---------+-------------------+ | 35.5426| 139.5975| 5|2020-07-31 18:00:00| +---------+---------+---------+-------------------+ | 49.2466|-123.2214| 5|2020-06-23 11:00:00| +---------+---------+---------+-------------------+ ... I want to add the temperatures at these locations and time in a new column from the Meteostat library in Python. The library has the "Point" class. For a single location, it works like this: location = Point(40.416775, -3.703790, 660) You can now use this in the class "Hourly" that gives you a dataframe of different climatic variables. (normally you use like "start" and "end" to get values for every hour in this range, but using "start" twice, gives you only one row for the desired time). The output is just an example how the dataframe looks like. data = Hourly(location, start, start).fetch() print (data) > temp dwpt rhum prcp ... wpgt pres tsun coco time ... 2020-01-10 01:00:00 -15.9 -18.8 78.0 0.0 ... NaN 1028.0 NaN 0.0 What I want to do now, is to use the values from the dataframe "geo_time" as parameters for the classes to get a temperature for every row. My stupid idea was the following: geo_time['location'] = Point(geo_time['latitude'], geo_time['longitude'], geo_time['altitude']) data = Hourly(geo_time['location'], geo_time['start'], geo_time['start']) Afterwards, I would add the "temp" column from "data" to "geo_time". Does someone have an idea how to solve this problem or knows if Meteostat is even capable doing this? Thanks in advance! A: With the dataframe you provided: import pandas as pd df = pd.DataFrame( { "latitude": [48.2393, 35.5426, 49.2466], "longitude": [11.5713, 139.5975, -123.2214], "altitude": [520, 5, 5], "start": ["2020-03-12 13:00:00", "2020-07-31 18:00:00", "2020-06-23 11:00:00"], } ) Here is one way to do it with Pandas to_datetime and apply methods: df["start"] = pd.to_datetime(df["start"], format="%Y-%m-%d %H:%M:%S") df["temp"] = df.apply( lambda x: Hourly( Point(x["latitude"], x["longitude"], x["altitude"]), x["start"], x["start"], ) .fetch()["temp"] .values[0], axis=1, ) Then: print(df) # Output latitude longitude altitude start temp 0 48.2393 11.5713 520 2020-03-12 13:00:00 16.8 1 35.5426 139.5975 5 2020-07-31 18:00:00 24.3 2 49.2466 -123.2214 5 2020-06-23 11:00:00 14.9
How to get temperature measure given location and time values in a dataframe?
I have a pandas dataframe consisting of geo-locations and a time in the past. location_time = pd.read_csv(r'geo_time.csv') print (geo_time) > +---------+---------+---------+-------------------+ | latitude|longitude| altitude| start| +---------+---------+---------+-------------------+ | 48.2393| 11.5713| 520|2020-03-12 13:00:00| +---------+---------+---------+-------------------+ | 35.5426| 139.5975| 5|2020-07-31 18:00:00| +---------+---------+---------+-------------------+ | 49.2466|-123.2214| 5|2020-06-23 11:00:00| +---------+---------+---------+-------------------+ ... I want to add the temperatures at these locations and time in a new column from the Meteostat library in Python. The library has the "Point" class. For a single location, it works like this: location = Point(40.416775, -3.703790, 660) You can now use this in the class "Hourly" that gives you a dataframe of different climatic variables. (normally you use like "start" and "end" to get values for every hour in this range, but using "start" twice, gives you only one row for the desired time). The output is just an example how the dataframe looks like. data = Hourly(location, start, start).fetch() print (data) > temp dwpt rhum prcp ... wpgt pres tsun coco time ... 2020-01-10 01:00:00 -15.9 -18.8 78.0 0.0 ... NaN 1028.0 NaN 0.0 What I want to do now, is to use the values from the dataframe "geo_time" as parameters for the classes to get a temperature for every row. My stupid idea was the following: geo_time['location'] = Point(geo_time['latitude'], geo_time['longitude'], geo_time['altitude']) data = Hourly(geo_time['location'], geo_time['start'], geo_time['start']) Afterwards, I would add the "temp" column from "data" to "geo_time". Does someone have an idea how to solve this problem or knows if Meteostat is even capable doing this? Thanks in advance!
[ "With the dataframe you provided:\nimport pandas as pd\n\ndf = pd.DataFrame(\n {\n \"latitude\": [48.2393, 35.5426, 49.2466],\n \"longitude\": [11.5713, 139.5975, -123.2214],\n \"altitude\": [520, 5, 5],\n \"start\": [\"2020-03-12 13:00:00\", \"2020-07-31 18:00:00\", \"2020-06-23 11:00:00\"],\n }\n)\n\nHere is one way to do it with Pandas to_datetime and apply methods:\ndf[\"start\"] = pd.to_datetime(df[\"start\"], format=\"%Y-%m-%d %H:%M:%S\")\n\ndf[\"temp\"] = df.apply(\n lambda x: Hourly(\n Point(x[\"latitude\"], x[\"longitude\"], x[\"altitude\"]),\n x[\"start\"],\n x[\"start\"],\n )\n .fetch()[\"temp\"]\n .values[0],\n axis=1,\n)\n\nThen:\nprint(df)\n# Output\n latitude longitude altitude start temp\n0 48.2393 11.5713 520 2020-03-12 13:00:00 16.8\n1 35.5426 139.5975 5 2020-07-31 18:00:00 24.3\n2 49.2466 -123.2214 5 2020-06-23 11:00:00 14.9\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "meteostat", "pandas", "python", "weather" ]
stackoverflow_0074549899_dataframe_meteostat_pandas_python_weather.txt
Q: How to achieve this dynamic alloction in Pyspark I have a dataframe store_df :- store ID Div 637 4000000970 Pac 637 4000000435 Pac 637 4000055542 Pac 637 4000042206 Pac 637 4000014114 Pac I have another dataframe final_list :- Div ID Rank Category Pac 4000000970 1 A Pac 4000000432 2 A Pac 4000000405 3 A Pac 4000042431 4 A Pac 2200028596 5 B Pac 4000000032 6 A Pac 2200028594 7 B Pac 4000014114 8 B Pac 2230001789 9 D Pac 2200001789 10 C Pac 2200001787 11 D Yet another dataframe max_df :- store MAX_A MAX_B MAX_C MAX_D N 637 3 0 2 0 5 I want to add columns Add_A, Add_B,Add_C, Add_D in max_df such that this criteria is satisfied :- 1.)ID in store_df should be removed from final_list for that store based on Div 2.)Post removing,Nnumber of ID is to be picked from final_list in order of Rank such that it does not overshoot MAX_A,MAX_B,MAX_C,MAX_D for respective Category and this should happen dynamically for all stores in store_df.Note :- N will always be <= sum of MAX category columns.So we need to pick N from final_list so that for each category the count does not exceed MAX .In this example its equal .For example above post eliminating 4000000970 & 4000014114 and do a group by count of final_list we get 4 for A,2 for B,1 for C,2 for D but as should not allocate above MAX_A,MAX_B,MAX_C,MAX_D respectively we have to allocate this way:- Expected output for above max_df :- 1.)max_df store MAX_A MAX_B MAX_C MAX_D N Add_A Add_B Add_C Add_D 637 3 0 2 0 5 3 0 1 0 2.)A pyspark dataframe result_df with the ID s picked in order of Rank for all the stores:- store ID New_Rank Category 637 4000000432 1 A 637 4000000405 2 A 637 4000042431 3 A 637 2200001789 4 C =>(Example 2) :- final_list :- Div ID Rank Category Pac 4000000970 1 A Pac 4000000432 2 A Pac 4000000405 3 A Pac 4000042431 4 A Pac 2200028596 5 B Pac 4000000032 6 A Pac 2200028594 7 B Pac 4000014114 8 B Pac 2230001789 9 D Pac 2200001789 10 C Pac 2200001786 11 C Expected output :- store MAX_A MAX_B MAX_C MAX_D N Add_A Add_B Add_C Add_D 637 3 0 2 0 5 3 0 2 0 result_df :- store ID New_Rank Category 637 4000000432 1 A 637 4000000405 2 A 637 4000042431 3 A 637 2200001789 4 C 637 2200001786 5 C A: from pyspark.sql import SparkSession,Row from pyspark.sql.functions import mean, min, max,count,row_number,lit,udf,col from pyspark.sql import functions as F from pyspark.sql.types import IntegerType from pyspark.sql.window import Window # creating the session spark = SparkSession.builder.getOrCreate() # schema creation by passing list store_df = spark.createDataFrame([ Row(Store=637,ID=4000000970, Div='Pac'), Row(Store=637,ID=4000000435, Div='Pac'), Row(Store=637,ID=4000055542, Div='Pac'), Row(Store=637,ID=4000042206, Div='Pac'), Row(Store=637,ID=4000014114, Div='Pac'), ]) final_list = spark.createDataFrame([ Row(Div='Pac',ID=4000000970, Rank=1,Category='A'), Row(Div='Pac',ID=4000000432, Rank=2,Category='A'), Row(Div='Pac',ID=4000000405, Rank=3,Category='A'), Row(Div='Pac',ID=4000042431, Rank=4,Category='A'), Row(Div='Pac',ID=2200028596, Rank=5,Category='B'), Row(Div='Pac',ID=4000000032, Rank=6,Category='A'), Row(Div='Pac',ID=2200028594, Rank=7,Category='B'), Row(Div='Pac',ID=4000014114, Rank=8,Category='B'), Row(Div='Pac',ID=2230001789, Rank=9,Category='D'), Row(Div='Pac',ID=2200001789, Rank=10,Category='C'), Row(Div='Pac',ID=2200001787, Rank=11,Category='D'), ]) max_df = spark.createDataFrame([ Row(Store=637,MAX_A=3, MAX_B=0,MAX_C=2,MAX_D=0,N=5),]) Updated_final_list = final_list.join(store_df, ["Div","ID"], "left_anti") Updated_final_list.show() # Count each Category Category_count = ( Updated_final_list.groupBy("Category") .pivot("Category") .count() ) Category_count = Category_count.fillna(value=0) Category_count = Category_count.agg({i:'max' for i in Category_count.columns[1:]}) for i in Category_count.columns: Category_count = Category_count.withColumnRenamed(i, i.split('(')[1][0]) # The following code is to create id to merge max df and Category_count max_df = max_df.withColumn("M_ID", lit(1)) Category_count = Category_count.withColumn("M_ID", lit(1)) max_df1 = max_df.join(Category_count,['M_ID'],'inner') # Calculate number of id needs to be created for each category @udf(returnType=IntegerType()) def max_id_cal(A,B): if B <=A: return B else: return B-(B-A) max_col = [i for i in max_df.columns if i.startswith('MAX')] for c in max_col: max_df1 = max_df1.withColumn(c.split('_')[1], max_id_cal(max_df1[c],c.split('_')[1])) # New Rank is created for each category windowDept = Window.partitionBy("Category").orderBy(col("Rank").asc()) Updated_final_list1=Updated_final_list.withColumn("Rank1",row_number() .over(windowDept)) # Filter number records for each category idx = 0 max_col = [i for i in max_df.columns if i.startswith('MAX')] for c in max_col: max_value = max_df1.select(c.split('_')[1]).collect()[0][0] if idx == 0: df=Updated_final_list1.filter((col("Category")==c.split('_')[1]) & (col("Rank1") <= max_value)) else: df2=Updated_final_list1.filter((col("Category")==c.split('_')[1]) & (col("Rank1") <= max_value)) df = df.union(df2) idx +=1 df = df.drop("Rank") df= df.withColumnRenamed('Rank1', 'Rank') df.show()
How to achieve this dynamic alloction in Pyspark
I have a dataframe store_df :- store ID Div 637 4000000970 Pac 637 4000000435 Pac 637 4000055542 Pac 637 4000042206 Pac 637 4000014114 Pac I have another dataframe final_list :- Div ID Rank Category Pac 4000000970 1 A Pac 4000000432 2 A Pac 4000000405 3 A Pac 4000042431 4 A Pac 2200028596 5 B Pac 4000000032 6 A Pac 2200028594 7 B Pac 4000014114 8 B Pac 2230001789 9 D Pac 2200001789 10 C Pac 2200001787 11 D Yet another dataframe max_df :- store MAX_A MAX_B MAX_C MAX_D N 637 3 0 2 0 5 I want to add columns Add_A, Add_B,Add_C, Add_D in max_df such that this criteria is satisfied :- 1.)ID in store_df should be removed from final_list for that store based on Div 2.)Post removing,Nnumber of ID is to be picked from final_list in order of Rank such that it does not overshoot MAX_A,MAX_B,MAX_C,MAX_D for respective Category and this should happen dynamically for all stores in store_df.Note :- N will always be <= sum of MAX category columns.So we need to pick N from final_list so that for each category the count does not exceed MAX .In this example its equal .For example above post eliminating 4000000970 & 4000014114 and do a group by count of final_list we get 4 for A,2 for B,1 for C,2 for D but as should not allocate above MAX_A,MAX_B,MAX_C,MAX_D respectively we have to allocate this way:- Expected output for above max_df :- 1.)max_df store MAX_A MAX_B MAX_C MAX_D N Add_A Add_B Add_C Add_D 637 3 0 2 0 5 3 0 1 0 2.)A pyspark dataframe result_df with the ID s picked in order of Rank for all the stores:- store ID New_Rank Category 637 4000000432 1 A 637 4000000405 2 A 637 4000042431 3 A 637 2200001789 4 C =>(Example 2) :- final_list :- Div ID Rank Category Pac 4000000970 1 A Pac 4000000432 2 A Pac 4000000405 3 A Pac 4000042431 4 A Pac 2200028596 5 B Pac 4000000032 6 A Pac 2200028594 7 B Pac 4000014114 8 B Pac 2230001789 9 D Pac 2200001789 10 C Pac 2200001786 11 C Expected output :- store MAX_A MAX_B MAX_C MAX_D N Add_A Add_B Add_C Add_D 637 3 0 2 0 5 3 0 2 0 result_df :- store ID New_Rank Category 637 4000000432 1 A 637 4000000405 2 A 637 4000042431 3 A 637 2200001789 4 C 637 2200001786 5 C
[ "from pyspark.sql import SparkSession,Row\nfrom pyspark.sql.functions import mean, min, max,count,row_number,lit,udf,col\nfrom pyspark.sql import functions as F\nfrom pyspark.sql.types import IntegerType\nfrom pyspark.sql.window import Window\n\n# creating the session\nspark = SparkSession.builder.getOrCreate()\n \n# schema creation by passing list\nstore_df = spark.createDataFrame([\n Row(Store=637,ID=4000000970, Div='Pac'),\n Row(Store=637,ID=4000000435, Div='Pac'),\n Row(Store=637,ID=4000055542, Div='Pac'),\n Row(Store=637,ID=4000042206, Div='Pac'),\n Row(Store=637,ID=4000014114, Div='Pac'),\n])\n\nfinal_list = spark.createDataFrame([\n Row(Div='Pac',ID=4000000970, Rank=1,Category='A'),\n Row(Div='Pac',ID=4000000432, Rank=2,Category='A'),\n Row(Div='Pac',ID=4000000405, Rank=3,Category='A'),\n Row(Div='Pac',ID=4000042431, Rank=4,Category='A'),\n Row(Div='Pac',ID=2200028596, Rank=5,Category='B'),\n Row(Div='Pac',ID=4000000032, Rank=6,Category='A'),\n Row(Div='Pac',ID=2200028594, Rank=7,Category='B'),\n Row(Div='Pac',ID=4000014114, Rank=8,Category='B'),\n Row(Div='Pac',ID=2230001789, Rank=9,Category='D'),\n Row(Div='Pac',ID=2200001789, Rank=10,Category='C'),\n Row(Div='Pac',ID=2200001787, Rank=11,Category='D'),\n\n])\n\nmax_df = spark.createDataFrame([\n Row(Store=637,MAX_A=3, MAX_B=0,MAX_C=2,MAX_D=0,N=5),])\n\nUpdated_final_list = final_list.join(store_df, [\"Div\",\"ID\"], \"left_anti\")\nUpdated_final_list.show()\n\n# Count each Category\nCategory_count = ( Updated_final_list.groupBy(\"Category\")\n .pivot(\"Category\")\n .count() )\nCategory_count = Category_count.fillna(value=0)\n\nCategory_count = Category_count.agg({i:'max' for i in \n Category_count.columns[1:]})\n\nfor i in Category_count.columns:\n Category_count = Category_count.withColumnRenamed(i, i.split('(')[1][0])\n\n# The following code is to create id to merge max df and Category_count\nmax_df = max_df.withColumn(\"M_ID\", lit(1))\nCategory_count = Category_count.withColumn(\"M_ID\", lit(1))\n\nmax_df1 = max_df.join(Category_count,['M_ID'],'inner')\n\n# Calculate number of id needs to be created for each category\n@udf(returnType=IntegerType())\n\ndef max_id_cal(A,B):\n if B <=A:\n return B\n else:\n return B-(B-A)\n\nmax_col = [i for i in max_df.columns if i.startswith('MAX')]\nfor c in max_col:\n max_df1 = max_df1.withColumn(c.split('_')[1], \n max_id_cal(max_df1[c],c.split('_')[1]))\n\n# New Rank is created for each category\nwindowDept = Window.partitionBy(\"Category\").orderBy(col(\"Rank\").asc())\nUpdated_final_list1=Updated_final_list.withColumn(\"Rank1\",row_number()\n .over(windowDept))\n\n# Filter number records for each category\nidx = 0\nmax_col = [i for i in max_df.columns if i.startswith('MAX')]\nfor c in max_col:\n max_value = max_df1.select(c.split('_')[1]).collect()[0][0]\n\n if idx == 0:\n df=Updated_final_list1.filter((col(\"Category\")==c.split('_')[1]) & \n (col(\"Rank1\") <= max_value))\n else:\n df2=Updated_final_list1.filter((col(\"Category\")==c.split('_')[1]) & \n (col(\"Rank1\") <= max_value))\n df = df.union(df2)\n \n idx +=1\n\n\ndf = df.drop(\"Rank\")\ndf= df.withColumnRenamed('Rank1', 'Rank')\n\ndf.show()\n\n" ]
[ 0 ]
[]
[]
[ "azure_databricks", "pyspark", "python" ]
stackoverflow_0074570168_azure_databricks_pyspark_python.txt
Q: I added a proxy to Selenium and now the page won't open [Python] My bot successfully worked on my local network. But by adding proxies as if lost connection to the network... Here's my code: from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.common.exceptions import TimeoutException from selenium.webdriver.chrome.options import Options from time import sleep # SMM Setup VIDEO = 'https://www.youtube.com/watch?v=TjiUC8jeF1o' VIEWS = 20 # PROBLEM SETUP PROXIES = [ '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000' ] YOUTUBE_ACCEPT_COOKIES_A = '/html/body/ytd-app/ytd-consent-bump-v2-lightbox/tp-yt-paper-dialog/div[4]/div[2]/div[6]/div[1]/ytd-button-renderer[2]/a/tp-yt-paper-button' YOUTUBE_ACCEPT_COOKIES_B = '/html/body/ytd-app/ytd-consent-bump-v2-lightbox/tp-yt-paper-dialog/div[4]/div[2]/div[6]/div[1]/ytd-button-renderer[2]/yt-button-shape/button' # start - 153 # Init global driver # Create Views for x in range(len(PROXIES)): options = Options() options.add_argument('--proxy-server=%s' % PROXIES[x]) driver = webdriver.Chrome(executable_path='C:/Users/sasha/OneDrive/Desktop/CHROME_DRIVER/chromedriver.exe', chrome_options = options) driver.get(VIDEO) for y in range(VIEWS / PROXIES): driver.refresh() try: WebDriverWait(driver, 1).until(EC.element_to_be_clickable((By.XPATH, YOUTUBE_ACCEPT_COOKIES_A))).click() except TimeoutException: try: WebDriverWait(driver, 1).until(EC.element_to_be_clickable((By.XPATH, YOUTUBE_ACCEPT_COOKIES_B))).click() except: print('Any Cookie Accepts Not Found') sleep(2) driver.quit() # Close All Tabs driver.quit() I tried to remove chrome_options = options. It worked. But I can't use my local network for some reason. A: Try to change port Example: 4444 to 12345 / 55555
I added a proxy to Selenium and now the page won't open [Python]
My bot successfully worked on my local network. But by adding proxies as if lost connection to the network... Here's my code: from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.common.exceptions import TimeoutException from selenium.webdriver.chrome.options import Options from time import sleep # SMM Setup VIDEO = 'https://www.youtube.com/watch?v=TjiUC8jeF1o' VIEWS = 20 # PROBLEM SETUP PROXIES = [ '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000', '0.0.0.0:0000' ] YOUTUBE_ACCEPT_COOKIES_A = '/html/body/ytd-app/ytd-consent-bump-v2-lightbox/tp-yt-paper-dialog/div[4]/div[2]/div[6]/div[1]/ytd-button-renderer[2]/a/tp-yt-paper-button' YOUTUBE_ACCEPT_COOKIES_B = '/html/body/ytd-app/ytd-consent-bump-v2-lightbox/tp-yt-paper-dialog/div[4]/div[2]/div[6]/div[1]/ytd-button-renderer[2]/yt-button-shape/button' # start - 153 # Init global driver # Create Views for x in range(len(PROXIES)): options = Options() options.add_argument('--proxy-server=%s' % PROXIES[x]) driver = webdriver.Chrome(executable_path='C:/Users/sasha/OneDrive/Desktop/CHROME_DRIVER/chromedriver.exe', chrome_options = options) driver.get(VIDEO) for y in range(VIEWS / PROXIES): driver.refresh() try: WebDriverWait(driver, 1).until(EC.element_to_be_clickable((By.XPATH, YOUTUBE_ACCEPT_COOKIES_A))).click() except TimeoutException: try: WebDriverWait(driver, 1).until(EC.element_to_be_clickable((By.XPATH, YOUTUBE_ACCEPT_COOKIES_B))).click() except: print('Any Cookie Accepts Not Found') sleep(2) driver.quit() # Close All Tabs driver.quit() I tried to remove chrome_options = options. It worked. But I can't use my local network for some reason.
[ "Try to change port\nExample: 4444 to 12345 / 55555\n" ]
[ 0 ]
[]
[]
[ "proxy", "python", "python_3.x", "selenium", "selenium_webdriver" ]
stackoverflow_0074206758_proxy_python_python_3.x_selenium_selenium_webdriver.txt
Q: Python Binance Futures - problem creating Take Profit Limit order -> (APIError(code=-2021): Order would immediately trigger.) Trying to write a basic Binance trading bot in python. Keep getting "APIError(code=-2021): Order would immediately trigger" even though it makes no sense when placing a limit order. At the time of writing this the ETH/BUSD exchange is rate is at about 1210. I printed out my current price (1210.00) and target price (1215.44) when take profit is supposed to trigger. I can do this without any problems whatsoever via the Binance GUI and the order is accepted and triggered. But via the API even if I set my price to over (or under) the current market price and target price to like 2000 (far above the market price) the order is not accepted and I get the same error. I think there is something wrong with my futures_create_order parameters but I cannout figure it out from the documentation. Any help would be greatly appreciated. Here is my code from binance.client import Client test_key = "xxx" test_secret_key = "xxx" client = Client(test_key, test_secret_key, testnet = True) symbol = 'ETHBUSD' tar_profit = 0.09 #take profit when ROE hits 9% lev = 20 #leverage ticker_data = client.futures_symbol_ticker(symbol = symbol) current_price = float(ticker_data["price"]) cp_adder = 1 + float(tar_profit / lev) tp_price = round(current_price * cp_adder, 2) qty = 0.2 client.futures_create_order( symbol=symbol, side='BUY', #'SELL' or 'BUY' type ='TAKE_PROFIT', timeInForce='GTC', #good until cancelled price = current_price, quantity = qty, #isolated=True, stopPrice = tp_price, #take_profit price workingType='CONTRACT_PRICE' #or MARK PRICE ) A: Answering my own question because I figured it out. Ashamed to admit it but I realized the take profit / stop loss orders are additional separate orders you can send AFTER your first limit/market... order. This means you have to send 2 separate orders to Bianance. If we take a look at my example: First I send a limit order to open a long/short position. Right after that I can send an equivalent but opposite take profit order (with reduceOnly parameter set to True; improtant!) which will close my position once the criteria are met. So only after the first order is executed and a position is opened can the second order become "active" and close my position. client.futures_create_order( symbol=symbol, side='BUY', #'SELL' or 'BUY' type ='LIMIT', timeInForce='GTC', price = price, quantity = qty, #isolated=True, #stopPrice=stop_price, workingType='CONTRACT_PRICE' #or MARK PRICE ) client.futures_create_order( symbol=symbol, side='SELL', #'SELL' or 'BUY' type ='TAKE_PROFIT', timeInForce='GTC', price = price, reduceOnly= True, quantity = qty, #isolated=True, stopPrice=stop_price, workingType='CONTRACT_PRICE' #or MARK PRICE )
Python Binance Futures - problem creating Take Profit Limit order -> (APIError(code=-2021): Order would immediately trigger.)
Trying to write a basic Binance trading bot in python. Keep getting "APIError(code=-2021): Order would immediately trigger" even though it makes no sense when placing a limit order. At the time of writing this the ETH/BUSD exchange is rate is at about 1210. I printed out my current price (1210.00) and target price (1215.44) when take profit is supposed to trigger. I can do this without any problems whatsoever via the Binance GUI and the order is accepted and triggered. But via the API even if I set my price to over (or under) the current market price and target price to like 2000 (far above the market price) the order is not accepted and I get the same error. I think there is something wrong with my futures_create_order parameters but I cannout figure it out from the documentation. Any help would be greatly appreciated. Here is my code from binance.client import Client test_key = "xxx" test_secret_key = "xxx" client = Client(test_key, test_secret_key, testnet = True) symbol = 'ETHBUSD' tar_profit = 0.09 #take profit when ROE hits 9% lev = 20 #leverage ticker_data = client.futures_symbol_ticker(symbol = symbol) current_price = float(ticker_data["price"]) cp_adder = 1 + float(tar_profit / lev) tp_price = round(current_price * cp_adder, 2) qty = 0.2 client.futures_create_order( symbol=symbol, side='BUY', #'SELL' or 'BUY' type ='TAKE_PROFIT', timeInForce='GTC', #good until cancelled price = current_price, quantity = qty, #isolated=True, stopPrice = tp_price, #take_profit price workingType='CONTRACT_PRICE' #or MARK PRICE )
[ "Answering my own question because I figured it out.\nAshamed to admit it but I realized the take profit / stop loss orders are additional separate orders you can send AFTER your first limit/market... order. This means you have to send 2 separate orders to Bianance. If we take a look at my example:\nFirst I send a limit order to open a long/short position.\nRight after that I can send an equivalent but opposite take profit order (with reduceOnly parameter set to True; improtant!) which will close my position once the criteria are met.\nSo only after the first order is executed and a position is opened can the second order become \"active\" and close my position.\n client.futures_create_order(\n symbol=symbol,\n side='BUY', #'SELL' or 'BUY'\n type ='LIMIT',\n timeInForce='GTC',\n price = price,\n quantity = qty,\n #isolated=True,\n #stopPrice=stop_price,\n workingType='CONTRACT_PRICE' #or MARK PRICE\n )\n\n client.futures_create_order(\n symbol=symbol,\n side='SELL', #'SELL' or 'BUY'\n type ='TAKE_PROFIT',\n timeInForce='GTC',\n price = price,\n reduceOnly= True,\n quantity = qty,\n #isolated=True,\n stopPrice=stop_price,\n workingType='CONTRACT_PRICE' #or MARK PRICE\n )\n\n" ]
[ 0 ]
[]
[]
[ "binance", "cryptocurrency", "python", "trading" ]
stackoverflow_0074584665_binance_cryptocurrency_python_trading.txt
Q: Running Python code in parallel from Rust with rust-cpython I'm trying to speed up a data pipeline using Rust. The pipeline contains bits of Python code that I don't want to modify, so I'm trying to run them as-is from Rust using rust-cpython and multiple threads. However, the performance is not what I expected, it's actually the same as running the python code bits sequentially in a single thread. Reading the documentation, I understand when invoking the following, you actually get a pointer to a single Python interpreter that can only be created once, even if you run it from multiple threads separately. let gil = Python::acquire_gil(); let py = gil.python(); If that's the case, it means the Python GIL is actually preventing all parallel execution in Rust as well. Is there a way to solve this problem? Here's the code of my test: use cpython::Python; use std::thread; use std::sync::mpsc; use std::time::Instant; #[test] fn python_test_parallel() { let start = Instant::now(); let (tx_output, rx_output) = mpsc::channel(); let tx_output_1 = mpsc::Sender::clone(&tx_output); thread::spawn(move || { let gil = Python::acquire_gil(); let py = gil.python(); let start_thread = Instant::now(); py.run("j=0\nfor i in range(10000000): j=j+i;", None, None).unwrap(); println!("{:27} : {:6.1} ms", "Run time thread 1, parallel", (Instant::now() - start_thread).as_secs_f64() * 1000f64); tx_output_1.send(()).unwrap(); }); let tx_output_2 = mpsc::Sender::clone(&tx_output); thread::spawn(move || { let gil = Python::acquire_gil(); let py = gil.python(); let start_thread = Instant::now(); py.run("j=0\nfor i in range(10000000): j=j+i;", None, None).unwrap(); println!("{:27} : {:6.1} ms", "Run time thread 2, parallel", (Instant::now() - start_thread).as_secs_f64() * 1000f64); tx_output_2.send(()).unwrap(); }); // Receivers to ensure all threads run let _output_1 = rx_output.recv().unwrap(); let _output_2 = rx_output.recv().unwrap(); println!("{:37} : {:6.1} ms", "Total time, parallel", (Instant::now() - start).as_secs_f64() * 1000f64); } A: The CPython implementation of Python does not allow executing Python bytecode in multiple threads at the same time. As you note yourself, the global interpreter lock (GIL) prevents this. We don't have any information on what exactly your Python code is doing, so I'll give a few general hints how you could improve the performance of your code. If your code is I/O-bound, e.g. reading from the network, you will generally get nice performance improvements from using multiple threads. Blocking I/O calls will release the GIL before blocking, so other threads can execute during that time. Some libraries, e.g. NumPy, internally release the GIL during long-running library calls that don't need access to Python data structures. With these libraries, you can get performance improvements for multi-threaded, CPU-bound code even if you only write pure Python code using the library. If your code is CPU-bound and spends most of its time executing Python bytecode, you can often use multipe processes rather than threads to achieve parallel execution. The multiprocessing in the Python standard library helps with this. If your code is CPU-bound, spends most of its time executing Python bytecode and can't be run in parallel processes because it accesses shared data, you can't run it in multiple threads in parallel – the GIL prevents this. However, even without the GIL, you can't just run sequential code in parallel without changes in any language. Since you have concurrent access to some data, you need to add locking and possibly make algorithmic changes to prevent data races; the details of how to do this depend on your use case. (And if you don't have concurrent data access, you should use processes instead of threads – see above.) Beyond parallelism, a good way to speed up Python code with Rust is to profile your Python code, find the hot spots where most of the time is spent, and rewrite these bits as Rust functions that you call from your Python code. If this doesn't give you enough of a speedup, you can combine this approach with parallelism – preventing data races is generally easier to achieve in Rust than in most other languages. A: If you use py03 bindings you can use the allow_threads method and callbacks to free the GIL for faster parralelism: https://pyo3.rs/v0.13.2/parallelism.html
Running Python code in parallel from Rust with rust-cpython
I'm trying to speed up a data pipeline using Rust. The pipeline contains bits of Python code that I don't want to modify, so I'm trying to run them as-is from Rust using rust-cpython and multiple threads. However, the performance is not what I expected, it's actually the same as running the python code bits sequentially in a single thread. Reading the documentation, I understand when invoking the following, you actually get a pointer to a single Python interpreter that can only be created once, even if you run it from multiple threads separately. let gil = Python::acquire_gil(); let py = gil.python(); If that's the case, it means the Python GIL is actually preventing all parallel execution in Rust as well. Is there a way to solve this problem? Here's the code of my test: use cpython::Python; use std::thread; use std::sync::mpsc; use std::time::Instant; #[test] fn python_test_parallel() { let start = Instant::now(); let (tx_output, rx_output) = mpsc::channel(); let tx_output_1 = mpsc::Sender::clone(&tx_output); thread::spawn(move || { let gil = Python::acquire_gil(); let py = gil.python(); let start_thread = Instant::now(); py.run("j=0\nfor i in range(10000000): j=j+i;", None, None).unwrap(); println!("{:27} : {:6.1} ms", "Run time thread 1, parallel", (Instant::now() - start_thread).as_secs_f64() * 1000f64); tx_output_1.send(()).unwrap(); }); let tx_output_2 = mpsc::Sender::clone(&tx_output); thread::spawn(move || { let gil = Python::acquire_gil(); let py = gil.python(); let start_thread = Instant::now(); py.run("j=0\nfor i in range(10000000): j=j+i;", None, None).unwrap(); println!("{:27} : {:6.1} ms", "Run time thread 2, parallel", (Instant::now() - start_thread).as_secs_f64() * 1000f64); tx_output_2.send(()).unwrap(); }); // Receivers to ensure all threads run let _output_1 = rx_output.recv().unwrap(); let _output_2 = rx_output.recv().unwrap(); println!("{:37} : {:6.1} ms", "Total time, parallel", (Instant::now() - start).as_secs_f64() * 1000f64); }
[ "The CPython implementation of Python does not allow executing Python bytecode in multiple threads at the same time. As you note yourself, the global interpreter lock (GIL) prevents this.\nWe don't have any information on what exactly your Python code is doing, so I'll give a few general hints how you could improve the performance of your code.\n\nIf your code is I/O-bound, e.g. reading from the network, you will generally get nice performance improvements from using multiple threads. Blocking I/O calls will release the GIL before blocking, so other threads can execute during that time.\nSome libraries, e.g. NumPy, internally release the GIL during long-running library calls that don't need access to Python data structures. With these libraries, you can get performance improvements for multi-threaded, CPU-bound code even if you only write pure Python code using the library.\nIf your code is CPU-bound and spends most of its time executing Python bytecode, you can often use multipe processes rather than threads to achieve parallel execution. The multiprocessing in the Python standard library helps with this.\nIf your code is CPU-bound, spends most of its time executing Python bytecode and can't be run in parallel processes because it accesses shared data, you can't run it in multiple threads in parallel – the GIL prevents this. However, even without the GIL, you can't just run sequential code in parallel without changes in any language. Since you have concurrent access to some data, you need to add locking and possibly make algorithmic changes to prevent data races; the details of how to do this depend on your use case. (And if you don't have concurrent data access, you should use processes instead of threads – see above.)\n\nBeyond parallelism, a good way to speed up Python code with Rust is to profile your Python code, find the hot spots where most of the time is spent, and rewrite these bits as Rust functions that you call from your Python code. If this doesn't give you enough of a speedup, you can combine this approach with parallelism – preventing data races is generally easier to achieve in Rust than in most other languages.\n", "If you use py03 bindings you can use the allow_threads method and callbacks to free the GIL for faster parralelism: https://pyo3.rs/v0.13.2/parallelism.html\n" ]
[ 2, 0 ]
[]
[]
[ "cpython", "python", "rust" ]
stackoverflow_0060148992_cpython_python_rust.txt
Q: Is there any way to solve "string index out of range" My problem is to count words having alphabet 'a' at second position in a string Eg : banana in a bag o/p banana bag for i in re: if i[1]== 'a': print(i) It was showing index out of range error due to word "a" in a sentence I want output without error can anyone solve it? A: I think this is what you wanted If i am clear..! Please let me know.!! Code:- string="banana in a bag" result="" for word in string.split(): try: if word[1]=="a": result+=word+" " except IndexError: continue print(result) Output:- banana bag A: we can use EAFP pattern here try index = 1 # for example my_str = ''; print(my_str[index]) except IndexError: # handle your error here.
Is there any way to solve "string index out of range"
My problem is to count words having alphabet 'a' at second position in a string Eg : banana in a bag o/p banana bag for i in re: if i[1]== 'a': print(i) It was showing index out of range error due to word "a" in a sentence I want output without error can anyone solve it?
[ "I think this is what you wanted If i am clear..!\nPlease let me know.!!\nCode:-\nstring=\"banana in a bag\"\nresult=\"\"\nfor word in string.split():\n try: \n if word[1]==\"a\":\n result+=word+\" \"\n except IndexError:\n continue\nprint(result)\n\nOutput:-\nbanana bag\n\n", "we can use EAFP pattern here\ntry\n index = 1 # for example\n my_str = '';\n print(my_str[index])\nexcept IndexError:\n # handle your error here. \n\n" ]
[ 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074588792_python.txt
Q: Mysql connector cannot be installed When I try to install MySQL python connector, it installs but at the last step it shows that the directory doesn't have those privileges. What do I do? I try it every alternate day. It should be installed. A: Run the Setup as Administrator
Mysql connector cannot be installed
When I try to install MySQL python connector, it installs but at the last step it shows that the directory doesn't have those privileges. What do I do? I try it every alternate day. It should be installed.
[ "Run the Setup as Administrator\n" ]
[ 0 ]
[]
[]
[ "mysql_connector_python", "python" ]
stackoverflow_0074587927_mysql_connector_python_python.txt
Q: Making an advent calendar on Python I'm trying to create a python script that prints a different statement on every day of December leading up to Christmas. Below is what I've tried so far as a test but it doesn't work :/ from datetime import date today = date.today() nov_27 = 2022-11-27 nov_28 = 2022-11-28 if today == nov_27: print("words") elif today == nov_28: print("no words") A: Observe that nov_27 = 2022-11-27 print(nov_27) gives output 1984 as 2022-11-27 is treated as arithmetic by python, use datetime.date to create date object instance which you can compare with today e.g. from datetime import date today = date.today() nov_27 = date(2022,11,27) nov_28 = date(2022,11,28) if today == nov_27: print("today is nov_27") if today == nov_28: print("today is nov_28") gives (at 2022-11-27) output today is nov_27 A: Try it like that: Code: from datetime import date today = date.today() nov_27 = date(2022, 11, 27) nov_28 = date(2022, 11, 28) if today == nov_27: print("words") elif today == nov_28: print("no words") Output: words
Making an advent calendar on Python
I'm trying to create a python script that prints a different statement on every day of December leading up to Christmas. Below is what I've tried so far as a test but it doesn't work :/ from datetime import date today = date.today() nov_27 = 2022-11-27 nov_28 = 2022-11-28 if today == nov_27: print("words") elif today == nov_28: print("no words")
[ "Observe that\nnov_27 = 2022-11-27\nprint(nov_27)\n\ngives output\n1984\n\nas 2022-11-27 is treated as arithmetic by python, use datetime.date to create date object instance which you can compare with today e.g.\nfrom datetime import date\ntoday = date.today()\nnov_27 = date(2022,11,27)\nnov_28 = date(2022,11,28)\nif today == nov_27:\n print(\"today is nov_27\")\nif today == nov_28:\n print(\"today is nov_28\")\n\ngives (at 2022-11-27) output\ntoday is nov_27\n\n", "Try it like that:\nCode:\nfrom datetime import date\n\ntoday = date.today()\n\nnov_27 = date(2022, 11, 27)\nnov_28 = date(2022, 11, 28)\n\nif today == nov_27:\n print(\"words\")\nelif today == nov_28:\n print(\"no words\")\n\nOutput:\nwords\n\n" ]
[ 0, 0 ]
[]
[]
[ "datetime", "if_statement", "python" ]
stackoverflow_0074588927_datetime_if_statement_python.txt
Q: My program crashes after i hold the w key to move the turtle for too long in the turtle library The code is as follows: import turtle width = 400 length = 300 wn = turtle.Screen() wn.bgcolor("black") wn.title("x") drawer = turtle.Turtle() drawer.speed(3) drawer.begin_fill() drawer.color("blue", "yellow") def drawern(): drawer.seth(90) drawer.fd(1) def drawerw(): drawer.seth(180) drawer.fd(1) def drawers(): drawer.seth(270) drawer.fd(1) def drawere(): drawer.seth(0) drawer.fd(1) wn.onkeypress(drawern, "w") wn.onkeypress(drawerw, "a") wn.onkeypress(drawers, "s") wn.onkeypress(drawere, "d") wn.listen() wn.mainloop() It gives a stack overflow error. Does anyone know why this issue persists? It doesn't happen when i let go of it once in a while. A: I believe the stack overflow error is due to repeated assigning of angle to the stack. To prevent this, you can introduce a debounce. We will name our debounce as move. A debounce, in simple terms, is fail-safe to prevent an event for triggering again and again while keeping the rest of the code running. Define the variable in global space: move = False As for the function: def drawern(): global move #To let the function know the variable is from global scope if not move: #The key is pressed first time drawer.seth(90) move = True #Sets our debounce to True, it will not activate until the key is released drawer.fd(1) wn.onkeypress(drawern, "w") We need to have another function with an event to reset our debounce: def reset_db(): global move move = False #Resets the debounce, referencing the key has been released wn.onkeyrelease(reset_db, "w") #Connects the event I have demonstrated for w key here only. You can duplicate it for the rest of the keys too.
My program crashes after i hold the w key to move the turtle for too long in the turtle library
The code is as follows: import turtle width = 400 length = 300 wn = turtle.Screen() wn.bgcolor("black") wn.title("x") drawer = turtle.Turtle() drawer.speed(3) drawer.begin_fill() drawer.color("blue", "yellow") def drawern(): drawer.seth(90) drawer.fd(1) def drawerw(): drawer.seth(180) drawer.fd(1) def drawers(): drawer.seth(270) drawer.fd(1) def drawere(): drawer.seth(0) drawer.fd(1) wn.onkeypress(drawern, "w") wn.onkeypress(drawerw, "a") wn.onkeypress(drawers, "s") wn.onkeypress(drawere, "d") wn.listen() wn.mainloop() It gives a stack overflow error. Does anyone know why this issue persists? It doesn't happen when i let go of it once in a while.
[ "I believe the stack overflow error is due to repeated assigning of angle to the stack. To prevent this, you can introduce a debounce. We will name our debounce as move.\nA debounce, in simple terms, is fail-safe to prevent an event for triggering again and again while keeping the rest of the code running.\nDefine the variable in global space:\nmove = False\n\nAs for the function:\ndef drawern():\n global move #To let the function know the variable is from global scope\n\n if not move: #The key is pressed first time\n drawer.seth(90)\n move = True #Sets our debounce to True, it will not activate until the key is released\n \n drawer.fd(1)\n\nwn.onkeypress(drawern, \"w\")\n\nWe need to have another function with an event to reset our debounce:\ndef reset_db():\n global move\n move = False #Resets the debounce, referencing the key has been released\n\nwn.onkeyrelease(reset_db, \"w\") #Connects the event\n\nI have demonstrated for w key here only. You can duplicate it for the rest of the keys too.\n" ]
[ 0 ]
[]
[]
[ "python", "turtle_graphics" ]
stackoverflow_0074588827_python_turtle_graphics.txt
Q: pandas insert new column based on two column header value I want to add new column to see exam differences in a percentage value. import pandas as pd exam_1 = { 'Name': ['Jonn', 'Tomas', 'Fran', 'Olga', 'Veronika', 'Stephan'], 'Mat': [85, 75, 50, 93, 88, 90], 'Science': [96, 97, 99, 87, 90, 88], 'Reading': [80, 60, 72, 86, 84, 77], 'Wiritng': [78, 82, 88, 78, 86, 82], 'Lang': [77, 79, 77, 72, 90, 92], } exam_2 = { 'Name': ['Jonn', 'Tomas', 'Fran', 'Olga', 'Veronika', 'Stephan'], 'Mat': [80, 80, 90, 90, 85, 80], 'Science': [50, 60, 85, 90, 66, 82], 'Reading': [60, 75, 55, 90, 85, 60], 'Wiritng': [56, 66, 90, 82, 60, 80], 'Lang': [80, 78, 76, 90, 77, 66], } df_1 = pd.DataFrame(exam_1) df_2 = pd.DataFrame(exam_2) #cmp = pd.merge(df_1, df_2, how="outer", on=["Name"], suffixes=("_1", "_2")) cmp = pd.merge( df_1, df_2, how="outer", on=["Name"], suffixes=("_1", "_2")).set_index("Name").sort_index(axis=1).reset_index() print(cmp) The output of the above code is like below; Name Lang_1 Lang_2 Mat_1 Mat_2 Reading_1 Reading_2 Science_1 Science_2 Wiritng_1 Wiritng_2 0 Jonn 77 80 85 80 80 60 96 50 78 56 1 Tomas 79 78 75 80 60 75 97 60 82 66 2 Fran 77 76 50 90 72 55 99 85 88 90 3 Olga 72 90 93 90 86 90 87 90 78 82 4 Veronika 90 77 88 85 84 85 90 66 86 60 5 Stephan 92 66 90 80 77 60 88 82 82 80 What I want is that, add new column after compared value, is there any built-in function for that one. Because constant section like Name can be change, maybe 3 column can be constant in the future. I want to use built-in function to use reusability. I try to use it manually but it is not reusable. What I want exactly in below; Name Lang_1 Lang_2 Lang_Res Mat_1 Mat_2 Mat_Res Reading_1 Reading_2 Reading_Res Science_1 Science_2 Science_Res Writing_1 Writing_2 Writing_Res 0 Jonn 77 80 Lang_data 85 80 Mat_data 80 60 Reading_data 96 50 Science_data 78 56 Writing_data 1 Tomas 79 78 Lang_data 75 80 Mat_data 60 75 Reading_data 97 60 Science_data 82 66 Writing_data 2 Fran 77 76 Lang_data 50 90 Mat_data 72 55 Reading_data 99 85 Science_data 88 90 Writing_data 3 Olga 72 90 Lang_data 93 90 Mat_data 86 90 Reading_data 87 90 Science_data 78 82 Writing_data 4 Veronika 90 77 Lang_data 88 85 Mat_data 84 85 Reading_data 90 66 Science_data 86 60 Writing_data 5 Stephan 92 66 Lang_data 90 80 Mat_data 77 60 Reading_data 88 82 Science_data 82 80 Writing_data A: You can start by making a list with every column having the suffixe _2 and then use pandas.DataFrame.insert with pandas.Index.get_loc on a list comprehension to insert the result columns where they should. Try this : edge_cols= cmp.columns.str.extractall("(\w+_2)")[0].tolist() [cmp.insert(cmp.columns.get_loc(col)+1, col.split("_")[0]+"_Res", col.split("_")[0]+"_Data") for col in edge_cols] # Output : print(cmp.to_string()) Name Lang_1 Lang_2 Lang_Res Mat_1 Mat_2 Mat_Res Reading_1 Reading_2 Reading_Res Science_1 Science_2 Science_Res Wiritng_1 Wiritng_2 Wiritng_Res 0 Jonn 77 80 Lang_Data 85 80 Mat_Data 80 60 Reading_Data 96 50 Science_Data 78 56 Wiritng_Data 1 Tomas 79 78 Lang_Data 75 80 Mat_Data 60 75 Reading_Data 97 60 Science_Data 82 66 Wiritng_Data 2 Fran 77 76 Lang_Data 50 90 Mat_Data 72 55 Reading_Data 99 85 Science_Data 88 90 Wiritng_Data 3 Olga 72 90 Lang_Data 93 90 Mat_Data 86 90 Reading_Data 87 90 Science_Data 78 82 Wiritng_Data 4 Veronika 90 77 Lang_Data 88 85 Mat_Data 84 85 Reading_Data 90 66 Science_Data 86 60 Wiritng_Data 5 Stephan 92 66 Lang_Data 90 80 Mat_Data 77 60 Reading_Data 88 82 Science_Data 82 80 Wiritng_Data A: If I understand correctly, you're hoping to compute a column from two other columns that are related. What I suggest is this. Keep your basic column prefixes in a list. prefixes = ['Lang', 'Mat', 'Reading', ...] Use these prefixes to automate the lookup and calculation on each column. Let's say we want to store the average of items _1 and _2 for every prefix. for prefix in prefixes: column1 = df[f"{prefix}_1"] column2 = df[f"{prefix}_2"] averaged = (column1 + column2) / 2 df.loc[:, f"{prefix}_average"] = averaged This will add an average column for every category you have a prefix for.
pandas insert new column based on two column header value
I want to add new column to see exam differences in a percentage value. import pandas as pd exam_1 = { 'Name': ['Jonn', 'Tomas', 'Fran', 'Olga', 'Veronika', 'Stephan'], 'Mat': [85, 75, 50, 93, 88, 90], 'Science': [96, 97, 99, 87, 90, 88], 'Reading': [80, 60, 72, 86, 84, 77], 'Wiritng': [78, 82, 88, 78, 86, 82], 'Lang': [77, 79, 77, 72, 90, 92], } exam_2 = { 'Name': ['Jonn', 'Tomas', 'Fran', 'Olga', 'Veronika', 'Stephan'], 'Mat': [80, 80, 90, 90, 85, 80], 'Science': [50, 60, 85, 90, 66, 82], 'Reading': [60, 75, 55, 90, 85, 60], 'Wiritng': [56, 66, 90, 82, 60, 80], 'Lang': [80, 78, 76, 90, 77, 66], } df_1 = pd.DataFrame(exam_1) df_2 = pd.DataFrame(exam_2) #cmp = pd.merge(df_1, df_2, how="outer", on=["Name"], suffixes=("_1", "_2")) cmp = pd.merge( df_1, df_2, how="outer", on=["Name"], suffixes=("_1", "_2")).set_index("Name").sort_index(axis=1).reset_index() print(cmp) The output of the above code is like below; Name Lang_1 Lang_2 Mat_1 Mat_2 Reading_1 Reading_2 Science_1 Science_2 Wiritng_1 Wiritng_2 0 Jonn 77 80 85 80 80 60 96 50 78 56 1 Tomas 79 78 75 80 60 75 97 60 82 66 2 Fran 77 76 50 90 72 55 99 85 88 90 3 Olga 72 90 93 90 86 90 87 90 78 82 4 Veronika 90 77 88 85 84 85 90 66 86 60 5 Stephan 92 66 90 80 77 60 88 82 82 80 What I want is that, add new column after compared value, is there any built-in function for that one. Because constant section like Name can be change, maybe 3 column can be constant in the future. I want to use built-in function to use reusability. I try to use it manually but it is not reusable. What I want exactly in below; Name Lang_1 Lang_2 Lang_Res Mat_1 Mat_2 Mat_Res Reading_1 Reading_2 Reading_Res Science_1 Science_2 Science_Res Writing_1 Writing_2 Writing_Res 0 Jonn 77 80 Lang_data 85 80 Mat_data 80 60 Reading_data 96 50 Science_data 78 56 Writing_data 1 Tomas 79 78 Lang_data 75 80 Mat_data 60 75 Reading_data 97 60 Science_data 82 66 Writing_data 2 Fran 77 76 Lang_data 50 90 Mat_data 72 55 Reading_data 99 85 Science_data 88 90 Writing_data 3 Olga 72 90 Lang_data 93 90 Mat_data 86 90 Reading_data 87 90 Science_data 78 82 Writing_data 4 Veronika 90 77 Lang_data 88 85 Mat_data 84 85 Reading_data 90 66 Science_data 86 60 Writing_data 5 Stephan 92 66 Lang_data 90 80 Mat_data 77 60 Reading_data 88 82 Science_data 82 80 Writing_data
[ "You can start by making a list with every column having the suffixe _2 and then use pandas.DataFrame.insert with pandas.Index.get_loc on a list comprehension to insert the result columns where they should.\nTry this :\nedge_cols= cmp.columns.str.extractall(\"(\\w+_2)\")[0].tolist()\n\n[cmp.insert(cmp.columns.get_loc(col)+1, col.split(\"_\")[0]+\"_Res\", col.split(\"_\")[0]+\"_Data\") for col in edge_cols]\n\n# Output :\nprint(cmp.to_string())\n\n Name Lang_1 Lang_2 Lang_Res Mat_1 Mat_2 Mat_Res Reading_1 Reading_2 Reading_Res Science_1 Science_2 Science_Res Wiritng_1 Wiritng_2 Wiritng_Res\n0 Jonn 77 80 Lang_Data 85 80 Mat_Data 80 60 Reading_Data 96 50 Science_Data 78 56 Wiritng_Data\n1 Tomas 79 78 Lang_Data 75 80 Mat_Data 60 75 Reading_Data 97 60 Science_Data 82 66 Wiritng_Data\n2 Fran 77 76 Lang_Data 50 90 Mat_Data 72 55 Reading_Data 99 85 Science_Data 88 90 Wiritng_Data\n3 Olga 72 90 Lang_Data 93 90 Mat_Data 86 90 Reading_Data 87 90 Science_Data 78 82 Wiritng_Data\n4 Veronika 90 77 Lang_Data 88 85 Mat_Data 84 85 Reading_Data 90 66 Science_Data 86 60 Wiritng_Data\n5 Stephan 92 66 Lang_Data 90 80 Mat_Data 77 60 Reading_Data 88 82 Science_Data 82 80 Wiritng_Data\n\n", "If I understand correctly, you're hoping to compute a column from two other columns that are related.\nWhat I suggest is this.\n\nKeep your basic column prefixes in a list.\n\nprefixes = ['Lang', 'Mat', 'Reading', ...]\n\n\nUse these prefixes to automate the lookup and calculation on each column. Let's say we want to store the average of items _1 and _2 for every prefix.\n\nfor prefix in prefixes:\n column1 = df[f\"{prefix}_1\"]\n column2 = df[f\"{prefix}_2\"]\n averaged = (column1 + column2) / 2\n df.loc[:, f\"{prefix}_average\"] = averaged\n\nThis will add an average column for every category you have a prefix for.\n" ]
[ 1, 0 ]
[]
[]
[ "merge", "pandas", "python", "sorting" ]
stackoverflow_0074588293_merge_pandas_python_sorting.txt
Q: How to fix PyQt6-tools installation error? I want to create my small GUI app with PyQt6. I've installed PyQt6, but I also have to install PyQt6-tools. So, when I tried to get it, I got this error: C:\Users\egorl>pip install pyqt6-tools Collecting pyqt6-tools Using cached pyqt6_tools-6.1.0.3.2-py3-none-any.whl (29 kB) Using cached pyqt6_tools-6.0.3.3.2-py3-none-any.whl (29 kB) Using cached pyqt6_tools-6.0.2.3.2-py3-none-any.whl (29 kB) Collecting pyqt6==6.0.2 Using cached PyQt6-6.0.2.tar.gz (940 kB) Installing build dependencies ... done Getting requirements to build wheel ... done Preparing metadata (pyproject.toml) ... error ERROR: Command errored out with exit status 1: command: 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\python.exe' 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py' prepare_metadata_for_build_wheel 'C:\Users\egorl\AppData\Local\Temp\tmpjhw74rau' cwd: C:\Users\egorl\AppData\Local\Temp\pip-install-yqby2el1\pyqt6_1bb69b3deb294f56858c9f93a5b67097 Complete output (29 lines): Traceback (most recent call last): File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 156, in prepare_metadata_for_build_wheel hook = backend.prepare_metadata_for_build_wheel AttributeError: module 'sipbuild.api' has no attribute 'prepare_metadata_for_build_wheel' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 363, in <module> main() File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 345, in main json_out['return_val'] = hook(**hook_input['kwargs']) File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 160, in prepare_metadata_for_build_wheel whl_basename = backend.build_wheel(metadata_directory, config_settings) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-4moc33ew\overlay\Lib\site-packages\sipbuild\api.py", line 51, in build_wheel project = AbstractProject.bootstrap('pep517') File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-4moc33ew\overlay\Lib\site-packages\sipbuild\abstract_project.py", line 83, in bootstrap project.setup(pyproject, tool, tool_description) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-4moc33ew\overlay\Lib\site-packages\sipbuild\project.py", line 587, in setup self.apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-install-yqby2el1\pyqt6_1bb69b3deb294f56858c9f93a5b67097\project.py", line 60, in apply_user_defaults super().apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-4moc33ew\overlay\Lib\site-packages\pyqtbuild\project.py", line 70, in apply_user_defaults super().apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-4moc33ew\overlay\Lib\site-packages\sipbuild\project.py", line 234, in apply_user_defaults self.builder.apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-4moc33ew\overlay\Lib\site-packages\pyqtbuild\builder.py", line 67, in apply_user_defaults raise PyProjectOptionException('qmake', sipbuild.pyproject.PyProjectOptionException ---------------------------------------- WARNING: Discarding https://files.pythonhosted.org/packages/80/2a/70619beeebc44e0d554beaed5c6687e55d8c284d8f0fa0eeb66372ce15d0/PyQt6-6.0.2.tar.gz#sha256=aab1b58e287cd8c91d7f7b8c4f91d68b8980f07fcd2ad6d9adbf0bb75347fb9a (from https://pypi.org/simple/pyqt6/) (requires-python:>=3.6.1). Command errored out with exit status 1: 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\python.exe' 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py' prepare_metadata_for_build_wheel 'C:\Users\egorl\AppData\Local\Temp\tmpjhw74rau' Check the logs for full command output. Collecting pyqt6-tools Using cached pyqt6_tools-6.0.1.3.2-py3-none-any.whl (29 kB) Collecting pyqt6==6.0.1 Using cached PyQt6-6.0.1.tar.gz (940 kB) Installing build dependencies ... done Getting requirements to build wheel ... done Preparing metadata (pyproject.toml) ... error ERROR: Command errored out with exit status 1: command: 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\python.exe' 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py' prepare_metadata_for_build_wheel 'C:\Users\egorl\AppData\Local\Temp\tmpmzbc3d1o' cwd: C:\Users\egorl\AppData\Local\Temp\pip-install-yqby2el1\pyqt6_271acbd829354c02a24c2e3326134647 Complete output (29 lines): Traceback (most recent call last): File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 156, in prepare_metadata_for_build_wheel hook = backend.prepare_metadata_for_build_wheel AttributeError: module 'sipbuild.api' has no attribute 'prepare_metadata_for_build_wheel' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 363, in <module> main() File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 345, in main json_out['return_val'] = hook(**hook_input['kwargs']) File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 160, in prepare_metadata_for_build_wheel whl_basename = backend.build_wheel(metadata_directory, config_settings) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-hk2owijm\overlay\Lib\site-packages\sipbuild\api.py", line 51, in build_wheel project = AbstractProject.bootstrap('pep517') File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-hk2owijm\overlay\Lib\site-packages\sipbuild\abstract_project.py", line 83, in bootstrap project.setup(pyproject, tool, tool_description) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-hk2owijm\overlay\Lib\site-packages\sipbuild\project.py", line 587, in setup self.apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-install-yqby2el1\pyqt6_271acbd829354c02a24c2e3326134647\project.py", line 61, in apply_user_defaults super().apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-hk2owijm\overlay\Lib\site-packages\pyqtbuild\project.py", line 70, in apply_user_defaults super().apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-hk2owijm\overlay\Lib\site-packages\sipbuild\project.py", line 234, in apply_user_defaults self.builder.apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-hk2owijm\overlay\Lib\site-packages\pyqtbuild\builder.py", line 67, in apply_user_defaults raise PyProjectOptionException('qmake', sipbuild.pyproject.PyProjectOptionException ---------------------------------------- WARNING: Discarding https://files.pythonhosted.org/packages/f9/29/db2cb373ceac78c829def458fdfe72a637ed991d65f4e026997db5f3f200/PyQt6-6.0.1.tar.gz#sha256=313f0f4bf0158e7ce3b892f1ec16b3378e505e0f2458394c1245eb6807695c0d (from https://pypi.org/simple/pyqt6/) (requires-python:>=3.6.1). Command errored out with exit status 1: 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\python.exe' 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py' prepare_metadata_for_build_wheel 'C:\Users\egorl\AppData\Local\Temp\tmpmzbc3d1o' Check the logs for full command output. ERROR: Cannot install pyqt6-tools==6.0.1.3.2, pyqt6-tools==6.0.2.3.2, pyqt6-tools==6.0.3.3.2 and pyqt6-tools==6.1.0.3.2 because these package versions have conflicting dependencies. The conflict is caused by: pyqt6-tools 6.1.0.3.2 depends on pyqt6-plugins<6.1.0.3 and >=6.1.0.2.2 pyqt6-tools 6.0.3.3.2 depends on pyqt6-plugins<6.0.3.3 and >=6.0.3.2.2 pyqt6-tools 6.0.2.3.2 depends on pyqt6==6.0.2 pyqt6-tools 6.0.1.3.2 depends on pyqt6==6.0.1 To fix this you could try to: 1. loosen the range of package versions you've specified 2. remove package versions to allow pip attempt to solve the dependency conflict ERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/user_guide/#fixing-conflicting-dependencies I understand, that there is problem with dependencies, but I don't have any experiance in pip, so I don't know how to solve this problem. OS: Windows 10, python 3.10 A: Had the same errors with python 3.10, but didnt want to give up and revert to older version 3.9 Installed the designer seperately from https://build-system.fman.io/qt-designer-download Its a small and quick install. A: As suggested by eyllanesc, using python3.9 worked out for me. <PathToPython3.9>\python.exe -m pip install pyqt6_tools A: For those who use Linux (Ubuntu for me) and don't want to revert to Python 3.9 use this method: Install flatpak if you don't have it sudo apt install flatpak Add flatpak repo: flatpak remote-add --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo Install Qt Designer: flatpak install flathub io.qt.Designer AFter installation complete: run designer in terminal. You can find more on: https://flathub.org/apps/details/io.qt.Designer
How to fix PyQt6-tools installation error?
I want to create my small GUI app with PyQt6. I've installed PyQt6, but I also have to install PyQt6-tools. So, when I tried to get it, I got this error: C:\Users\egorl>pip install pyqt6-tools Collecting pyqt6-tools Using cached pyqt6_tools-6.1.0.3.2-py3-none-any.whl (29 kB) Using cached pyqt6_tools-6.0.3.3.2-py3-none-any.whl (29 kB) Using cached pyqt6_tools-6.0.2.3.2-py3-none-any.whl (29 kB) Collecting pyqt6==6.0.2 Using cached PyQt6-6.0.2.tar.gz (940 kB) Installing build dependencies ... done Getting requirements to build wheel ... done Preparing metadata (pyproject.toml) ... error ERROR: Command errored out with exit status 1: command: 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\python.exe' 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py' prepare_metadata_for_build_wheel 'C:\Users\egorl\AppData\Local\Temp\tmpjhw74rau' cwd: C:\Users\egorl\AppData\Local\Temp\pip-install-yqby2el1\pyqt6_1bb69b3deb294f56858c9f93a5b67097 Complete output (29 lines): Traceback (most recent call last): File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 156, in prepare_metadata_for_build_wheel hook = backend.prepare_metadata_for_build_wheel AttributeError: module 'sipbuild.api' has no attribute 'prepare_metadata_for_build_wheel' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 363, in <module> main() File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 345, in main json_out['return_val'] = hook(**hook_input['kwargs']) File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 160, in prepare_metadata_for_build_wheel whl_basename = backend.build_wheel(metadata_directory, config_settings) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-4moc33ew\overlay\Lib\site-packages\sipbuild\api.py", line 51, in build_wheel project = AbstractProject.bootstrap('pep517') File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-4moc33ew\overlay\Lib\site-packages\sipbuild\abstract_project.py", line 83, in bootstrap project.setup(pyproject, tool, tool_description) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-4moc33ew\overlay\Lib\site-packages\sipbuild\project.py", line 587, in setup self.apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-install-yqby2el1\pyqt6_1bb69b3deb294f56858c9f93a5b67097\project.py", line 60, in apply_user_defaults super().apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-4moc33ew\overlay\Lib\site-packages\pyqtbuild\project.py", line 70, in apply_user_defaults super().apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-4moc33ew\overlay\Lib\site-packages\sipbuild\project.py", line 234, in apply_user_defaults self.builder.apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-4moc33ew\overlay\Lib\site-packages\pyqtbuild\builder.py", line 67, in apply_user_defaults raise PyProjectOptionException('qmake', sipbuild.pyproject.PyProjectOptionException ---------------------------------------- WARNING: Discarding https://files.pythonhosted.org/packages/80/2a/70619beeebc44e0d554beaed5c6687e55d8c284d8f0fa0eeb66372ce15d0/PyQt6-6.0.2.tar.gz#sha256=aab1b58e287cd8c91d7f7b8c4f91d68b8980f07fcd2ad6d9adbf0bb75347fb9a (from https://pypi.org/simple/pyqt6/) (requires-python:>=3.6.1). Command errored out with exit status 1: 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\python.exe' 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py' prepare_metadata_for_build_wheel 'C:\Users\egorl\AppData\Local\Temp\tmpjhw74rau' Check the logs for full command output. Collecting pyqt6-tools Using cached pyqt6_tools-6.0.1.3.2-py3-none-any.whl (29 kB) Collecting pyqt6==6.0.1 Using cached PyQt6-6.0.1.tar.gz (940 kB) Installing build dependencies ... done Getting requirements to build wheel ... done Preparing metadata (pyproject.toml) ... error ERROR: Command errored out with exit status 1: command: 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\python.exe' 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py' prepare_metadata_for_build_wheel 'C:\Users\egorl\AppData\Local\Temp\tmpmzbc3d1o' cwd: C:\Users\egorl\AppData\Local\Temp\pip-install-yqby2el1\pyqt6_271acbd829354c02a24c2e3326134647 Complete output (29 lines): Traceback (most recent call last): File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 156, in prepare_metadata_for_build_wheel hook = backend.prepare_metadata_for_build_wheel AttributeError: module 'sipbuild.api' has no attribute 'prepare_metadata_for_build_wheel' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 363, in <module> main() File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 345, in main json_out['return_val'] = hook(**hook_input['kwargs']) File "C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py", line 160, in prepare_metadata_for_build_wheel whl_basename = backend.build_wheel(metadata_directory, config_settings) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-hk2owijm\overlay\Lib\site-packages\sipbuild\api.py", line 51, in build_wheel project = AbstractProject.bootstrap('pep517') File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-hk2owijm\overlay\Lib\site-packages\sipbuild\abstract_project.py", line 83, in bootstrap project.setup(pyproject, tool, tool_description) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-hk2owijm\overlay\Lib\site-packages\sipbuild\project.py", line 587, in setup self.apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-install-yqby2el1\pyqt6_271acbd829354c02a24c2e3326134647\project.py", line 61, in apply_user_defaults super().apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-hk2owijm\overlay\Lib\site-packages\pyqtbuild\project.py", line 70, in apply_user_defaults super().apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-hk2owijm\overlay\Lib\site-packages\sipbuild\project.py", line 234, in apply_user_defaults self.builder.apply_user_defaults(tool) File "C:\Users\egorl\AppData\Local\Temp\pip-build-env-hk2owijm\overlay\Lib\site-packages\pyqtbuild\builder.py", line 67, in apply_user_defaults raise PyProjectOptionException('qmake', sipbuild.pyproject.PyProjectOptionException ---------------------------------------- WARNING: Discarding https://files.pythonhosted.org/packages/f9/29/db2cb373ceac78c829def458fdfe72a637ed991d65f4e026997db5f3f200/PyQt6-6.0.1.tar.gz#sha256=313f0f4bf0158e7ce3b892f1ec16b3378e505e0f2458394c1245eb6807695c0d (from https://pypi.org/simple/pyqt6/) (requires-python:>=3.6.1). Command errored out with exit status 1: 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\python.exe' 'C:\Users\egorl\AppData\Local\Programs\Python\Python310\lib\site-packages\pip\_vendor\pep517\in_process\_in_process.py' prepare_metadata_for_build_wheel 'C:\Users\egorl\AppData\Local\Temp\tmpmzbc3d1o' Check the logs for full command output. ERROR: Cannot install pyqt6-tools==6.0.1.3.2, pyqt6-tools==6.0.2.3.2, pyqt6-tools==6.0.3.3.2 and pyqt6-tools==6.1.0.3.2 because these package versions have conflicting dependencies. The conflict is caused by: pyqt6-tools 6.1.0.3.2 depends on pyqt6-plugins<6.1.0.3 and >=6.1.0.2.2 pyqt6-tools 6.0.3.3.2 depends on pyqt6-plugins<6.0.3.3 and >=6.0.3.2.2 pyqt6-tools 6.0.2.3.2 depends on pyqt6==6.0.2 pyqt6-tools 6.0.1.3.2 depends on pyqt6==6.0.1 To fix this you could try to: 1. loosen the range of package versions you've specified 2. remove package versions to allow pip attempt to solve the dependency conflict ERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/user_guide/#fixing-conflicting-dependencies I understand, that there is problem with dependencies, but I don't have any experiance in pip, so I don't know how to solve this problem. OS: Windows 10, python 3.10
[ "Had the same errors with python 3.10, but didnt want to give up and revert to older version 3.9\nInstalled the designer seperately from https://build-system.fman.io/qt-designer-download\nIts a small and quick install.\n", "As suggested by eyllanesc, using python3.9 worked out for me.\n<PathToPython3.9>\\python.exe -m pip install pyqt6_tools\n", "For those who use Linux (Ubuntu for me) and don't want to revert to Python 3.9 use this method:\n\nInstall flatpak if you don't have it sudo apt install flatpak\nAdd flatpak repo: flatpak remote-add --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo\nInstall Qt Designer: flatpak install flathub io.qt.Designer\nAFter installation complete: run designer in terminal.\n\nYou can find more on: https://flathub.org/apps/details/io.qt.Designer\n" ]
[ 2, 1, 0 ]
[]
[]
[ "pip", "pyqt", "pyqt6", "python" ]
stackoverflow_0069870103_pip_pyqt_pyqt6_python.txt
Q: How to delete an instantiated object Python? I am relatively new to object oriented programming and I cannot figure out how to delete an instantiated object in Python. if self.hit_paddle(pos) == True or self.hit_paddle2(pos) == True: bar = bar + 1 if bar == 1: global barbox1 barbox1 = barfill(canvas) barbox1.canvas.move(barbox1.id, 253, 367) if bar == 2: global barbox2 barbox2 = barfill(canvas) barbox2.canvas.move(barbox5.id, 413, 367) bar = 0 time.sleep(0.2) barbox1 = None barbox2 = None That is the code, the main thing I was trying in order to delete the objects was barbox1 = None, but that doesn't seem to work. A: object.__del__(self) is called when the instance is about to be destroyed. >>> class Test: ... def __del__(self): ... print "deleted" ... >>> test = Test() >>> del test deleted Object is not deleted unless all of its references are removed(As quoted by ethan) Also, From Python official doc reference: del x doesn’t directly call x.__del__() — the former decrements the reference count for x by one, and the latter is only called when x‘s reference count reaches zero A: What do you mean by delete? In Python, removing a reference (or a name) can be done with the del keyword, but if there are other names to the same object that object will not be deleted. --> test = 3 --> print(test) 3 --> del test --> print(test) Traceback (most recent call last): File "<stdin>", line 1, in <module> NameError: name 'test' is not defined compared to: --> test = 5 --> other is test # check that both name refer to the exact same object True --> del test # gets rid of test, but the object is still referenced by other --> print(other) 5 A: Simple answer: del <variable> Minimal Reproducible Example # Create variable num = 5 num # 5 # Delete it del num # Confirm it's gone num # Traceback (most recent call last): # File "<stdin>", line 1, in <module> # NameError: name 'num' is not defined
How to delete an instantiated object Python?
I am relatively new to object oriented programming and I cannot figure out how to delete an instantiated object in Python. if self.hit_paddle(pos) == True or self.hit_paddle2(pos) == True: bar = bar + 1 if bar == 1: global barbox1 barbox1 = barfill(canvas) barbox1.canvas.move(barbox1.id, 253, 367) if bar == 2: global barbox2 barbox2 = barfill(canvas) barbox2.canvas.move(barbox5.id, 413, 367) bar = 0 time.sleep(0.2) barbox1 = None barbox2 = None That is the code, the main thing I was trying in order to delete the objects was barbox1 = None, but that doesn't seem to work.
[ "object.__del__(self) is called when the instance is about to be destroyed.\n>>> class Test:\n... def __del__(self):\n... print \"deleted\"\n... \n>>> test = Test()\n>>> del test\ndeleted\n\nObject is not deleted unless all of its references are removed(As quoted by ethan)\nAlso, From Python official doc reference:\n\ndel x doesn’t directly call x.__del__() — the former decrements the\nreference count for x by one, and the latter is only called when x‘s\nreference count reaches zero\n\n", "What do you mean by delete? In Python, removing a reference (or a name) can be done with the del keyword, but if there are other names to the same object that object will not be deleted.\n--> test = 3\n--> print(test)\n3\n--> del test\n--> print(test)\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nNameError: name 'test' is not defined\n\ncompared to:\n--> test = 5\n--> other is test # check that both name refer to the exact same object\nTrue\n--> del test # gets rid of test, but the object is still referenced by other\n--> print(other)\n5\n\n", "Simple answer: del <variable>\nMinimal Reproducible Example\n# Create variable\nnum = 5\nnum\n# 5\n\n# Delete it\ndel num\n\n# Confirm it's gone\nnum\n# Traceback (most recent call last):\n# File \"<stdin>\", line 1, in <module>\n# NameError: name 'num' is not defined\n\n" ]
[ 66, 26, 0 ]
[]
[]
[ "object", "python", "variables" ]
stackoverflow_0021514631_object_python_variables.txt
Q: How to create a repeating loop in python with sympy, without changing one variable in the loop so basically I have encountered a problem where I have made my loop, but since one of the variables is defined before the actual assignment, the code stops working. the code. Another thing is that I'm working in Spyder, and I don't know why, but if I try to code so that the program collect variables initially (which is essential for this purpose), the program encounters an internal issue. Any ideas on how to ask for user input in Spyder? Thanks to everyone in advance Basically, what I want this to do is to differentiate functions, and the with these derivatives create a maclaurin series. When the actual error ((r-j)/r) is smaller than the stopping variable s, I want the program to stop and display the result, but I don't know what is wrong. The second thing I want to achieve is to get to know how to gain input from the user in the program. A: So, here is, in text form, your code (sorry, plagiarism argument is not valid around here) q = sympy.Function('q') q = sympy.diff(f) def main(): a = sympy.Function('a') a = sympy.diff(q) j = sympy.function j = 1+(1/sympy.factorial(count))*q r = sympy.Function('r') r = j+(1/sympy.factorial(count+1))*a if ((r-j)/r)>s: count = count + 1 q = sympy.diff(a) j = r+(1/sympy.factorial(count))*q r = j+(1/sympy.factorial(count+1))*a main() else: print(f"Answer{r}") In the middle of an obviously very confused code, and even more confused situation with that "plagiarism" stuff (don't worry, if you have a working code at the end of your effort, there is no way it will look similar enough to your original to be detected as plagiarism because I've posted it here:D) there is actually a good question, and I remember that I while ago I had to scratch my head to understand what was happening in a similar situation. The problem you have here, is that you can't compute sympy.diff(q) in main function, before assigning something to q in that same function later. Because of the line q = sympy.diff(a) that you have later, q has to be a local variable (unless you say otherwise, see later). That's default behavior in python: a variable that you assign in a function is a local variable. But because you are reading this variable before, in the previous line (the one that triggers the error) a = sympy.diff(q), then python has a problem: the local variable a doesn't exist yet. Without the future q = sympy.diff(a), q would have been "read-only" inside main, and python would have done what you probably expect: read the global variable q. So, your real problem is not really that line. Your real problem is the line q = sympy.diff(a). Because even without the current error, that line would not do what you expect it to do. You probably expect it to alter q so that in the subsequent recursive call of main, q has this new value for now on. Not to create a local value q, and have the next call of main start over with the global value of q. You have several options here. One would be to declare q has global. Just add line global q at the beginning of your main, and q is now the global variable. A cleaner way would be to pass q as an argument to main. Note that you'll have probably the same problem with count. You should also either declare it as global, or, neater way, pass it to main. Note also that this would solve only syntax errors (well, not even all. j=sympy.function is meaningless for example). But there are many other errors here. The least serious being your strange habit to create a Function and immediately delete it by overwriting it by another function or expression.
How to create a repeating loop in python with sympy, without changing one variable in the loop
so basically I have encountered a problem where I have made my loop, but since one of the variables is defined before the actual assignment, the code stops working. the code. Another thing is that I'm working in Spyder, and I don't know why, but if I try to code so that the program collect variables initially (which is essential for this purpose), the program encounters an internal issue. Any ideas on how to ask for user input in Spyder? Thanks to everyone in advance Basically, what I want this to do is to differentiate functions, and the with these derivatives create a maclaurin series. When the actual error ((r-j)/r) is smaller than the stopping variable s, I want the program to stop and display the result, but I don't know what is wrong. The second thing I want to achieve is to get to know how to gain input from the user in the program.
[ "So, here is, in text form, your code (sorry, plagiarism argument is not valid around here)\nq = sympy.Function('q')\nq = sympy.diff(f)\n\ndef main():\n a = sympy.Function('a')\n a = sympy.diff(q)\n j = sympy.function\n j = 1+(1/sympy.factorial(count))*q\n r = sympy.Function('r')\n r = j+(1/sympy.factorial(count+1))*a\n if ((r-j)/r)>s:\n count = count + 1\n q = sympy.diff(a)\n j = r+(1/sympy.factorial(count))*q\n r = j+(1/sympy.factorial(count+1))*a\n main()\n else:\n print(f\"Answer{r}\")\n\nIn the middle of an obviously very confused code, and even more confused situation with that \"plagiarism\" stuff (don't worry, if you have a working code at the end of your effort, there is no way it will look similar enough to your original to be detected as plagiarism because I've posted it here:D) there is actually a good question, and I remember that I while ago I had to scratch my head to understand what was happening in a similar situation.\nThe problem you have here, is that you can't compute sympy.diff(q) in main function, before assigning something to q in that same function later.\nBecause of the line q = sympy.diff(a) that you have later, q has to be a local variable (unless you say otherwise, see later). That's default behavior in python: a variable that you assign in a function is a local variable.\nBut because you are reading this variable before, in the previous line (the one that triggers the error) a = sympy.diff(q), then python has a problem: the local variable a doesn't exist yet.\nWithout the future q = sympy.diff(a), q would have been \"read-only\" inside main, and python would have done what you probably expect: read the global variable q.\nSo, your real problem is not really that line. Your real problem is the line q = sympy.diff(a).\nBecause even without the current error, that line would not do what you expect it to do. You probably expect it to alter q so that in the subsequent recursive call of main, q has this new value for now on. Not to create a local value q, and have the next call of main start over with the global value of q.\nYou have several options here.\nOne would be to declare q has global. Just add line global q at the beginning of your main, and q is now the global variable.\nA cleaner way would be to pass q as an argument to main.\nNote that you'll have probably the same problem with count. You should also either declare it as global, or, neater way, pass it to main.\nNote also that this would solve only syntax errors (well, not even all. j=sympy.function is meaningless for example). But there are many other errors here. The least serious being your strange habit to create a Function and immediately delete it by overwriting it by another function or expression.\n" ]
[ 0 ]
[]
[]
[ "python", "spyder", "sympy" ]
stackoverflow_0074586228_python_spyder_sympy.txt
Q: Pandas numerci column concider as string if NaN is inside I am starting to learn Python and I have an issue with pandas data frame. In R even if numeric columns have NaN values R manages to define the correct type of data in each column. In Pandas this does not seem to be the case: data = { "calories": ["NA", 380, 390], "duration": [50, 40, 45] } df = pd.DataFrame(data) df.dtypes How can I manage to automatically detect the right type of data in each column? Thanks in advance A: "NA" is a string, use np.nan or float('nan'): data = { "calories": [float('nan'), 380, 390], "duration": [50, 40, 45] } df = pd.DataFrame(data) print(df.dtypes) calories float64 duration int64 dtype: object Or: import numpy as np data = { "calories": [np.nan, 380, 390], "duration": [50, 40, 45] } df = pd.DataFrame(data) Note that if you use read_csv, pandas can infer NA values (by default, '', '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan', '1.#IND', '1.#QNAN', '<NA>', 'N/A', 'NA', 'NULL', 'NaN', 'n/a', 'nan', 'null').
Pandas numerci column concider as string if NaN is inside
I am starting to learn Python and I have an issue with pandas data frame. In R even if numeric columns have NaN values R manages to define the correct type of data in each column. In Pandas this does not seem to be the case: data = { "calories": ["NA", 380, 390], "duration": [50, 40, 45] } df = pd.DataFrame(data) df.dtypes How can I manage to automatically detect the right type of data in each column? Thanks in advance
[ "\"NA\" is a string, use np.nan or float('nan'):\ndata = {\n\"calories\": [float('nan'), 380, 390],\n\"duration\": [50, 40, 45]\n}\n\ndf = pd.DataFrame(data)\nprint(df.dtypes)\n\ncalories float64\nduration int64\ndtype: object\n\nOr:\nimport numpy as np\ndata = {\n\"calories\": [np.nan, 380, 390],\n\"duration\": [50, 40, 45]\n}\ndf = pd.DataFrame(data)\n\nNote that if you use read_csv, pandas can infer NA values (by default, '', '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan', '1.#IND', '1.#QNAN', '<NA>', 'N/A', 'NA', 'NULL', 'NaN', 'n/a', 'nan', 'null').\n" ]
[ 2 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074589088_pandas_python.txt
Q: entering int or float using input() I need to enter different values to input(), sometimes integer sometime float. My code is number1 = input() number2 = input() Formula = (number1 + 20) * (10 + number2) I know that input() returns a string which is why I need to convert the numbers to float or int. But how can I enter a float or integer without using number1 = int(input()) for example? because my input values are both floats and integers so I need a code that accepts both somehow. A: If your inputs are "sometimes" ints and "sometimes" floats then just wrap each input in a float(). You could make something more complex, but why would you? A: You could check for the presence of a decimal point in your string to decide if you want to coerce it into a float or an int. number = input() if '.' in number: number = float(number) else: number = int(float(number)) A: You can always just use float: number1 = float(input()) If you'd like to cast any of your result to integer you always can easily do this int_res = int(res) # res was float; int_res will be an integer A: number1 = float (input(‘ enter first value ‘) ) number2 = float (input(‘ enter second value ‘) ) Formula = print ( (number1 + 20) * (10 + number2) )
entering int or float using input()
I need to enter different values to input(), sometimes integer sometime float. My code is number1 = input() number2 = input() Formula = (number1 + 20) * (10 + number2) I know that input() returns a string which is why I need to convert the numbers to float or int. But how can I enter a float or integer without using number1 = int(input()) for example? because my input values are both floats and integers so I need a code that accepts both somehow.
[ "If your inputs are \"sometimes\" ints and \"sometimes\" floats then just wrap each input in a float(). You could make something more complex, but why would you?\n", "You could check for the presence of a decimal point in your string to decide if you want to coerce it into a float or an int.\nnumber = input()\n\nif '.' in number:\n number = float(number)\nelse:\n number = int(float(number))\n\n", "You can always just use float:\nnumber1 = float(input())\n\nIf you'd like to cast any of your result to integer you always can easily do this\nint_res = int(res) # res was float; int_res will be an integer\n\n", "number1 = float (input(‘ enter first value ‘) )\n\nnumber2 = float (input(‘ enter second value ‘) )\n\nFormula = print ( (number1 + 20) * (10 + number2) )\n\n" ]
[ 2, 0, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0071789343_python.txt
Q: Lauch default editor (like 'webbrowser' module) Is there a simple way to lauch the systems default editor from a Python command-line tool, like the webbrowser module? A: Under windows you can simply "execute" the file and the default action will be taken: os.system('c:/tmp/sample.txt') For this example a default editor will spawn. Under UNIX there is an environment variable called EDITOR, so you need to use something like: os.system('%s %s' % (os.getenv('EDITOR'), filename)) A: The modern Linux way to open a file is using xdg-open; however it does not guarantee that a text editor will open the file. Using $EDITOR is appropriate if your program is command-line oriented (and your users). A: If you need to open a file for editing, you could be interested in this question. A: You can actually use the webbrowser module to do this. All the answers given so far for both this and the linked question are just the same things the webbrowser module does behind the hood. The ONLY difference is if they have $EDITOR set, which is rare. So perhaps a better flow would be: editor = os.getenv('EDITOR') if editor: os.system(editor + ' ' + filename) else: webbrowser.open(filename) OK, now that I’ve told you that, I should let you know that the webbrowser module does state that it does not support this case. Note that on some platforms, trying to open a filename using this function, may work and start the operating system's associated program. However, this is neither supported nor portable. So if it doesn't work, don’t submit a bug report. But for most uses, it should work. A: As the committer of the python Y-Principle generator i had the need to check the generated files against the original and wanted to call a diff-capable editor from python. My search pointed me to this questions and the most upvoted answer had some comments and follow-up issue that i'd also want to address: make sure the EDITOR env variable is used if set make sure things work on MacOS (defaulting to Atom in my case) make sure a text can be opened in a temporary file make sure that if an url is opened the html text is extracted by default You'll find the solution at editory.py and the test case at test_editor.py in my project's repository. Test Code ''' Created on 2022-11-27 @author: wf ''' from tests.basetest import Basetest from yprinciple.editor import Editor class TestEditor(Basetest): """ test opening an editor """ def test_Editor(self): """ test the editor """ if not self.inPublicCI(): # open this source file Editor.open(__file__) Editor.open("https://stackoverflow.com/questions/1442841/lauch-default-editor-like-webbrowser-module") Editor.open_tmp_text("A sample text to be opened in a temporary file") Screenshot Source Code ''' Created on 2022-11-27 @author: wf ''' from sys import platform import os import tempfile from urllib.request import urlopen from bs4 import BeautifulSoup class Editor: """ helper class to open the system defined editor see https://stackoverflow.com/questions/1442841/lauch-default-editor-like-webbrowser-module """ @classmethod def extract_text(cls,html_text:str)->str: """ extract the text from the given html_text Args: html_text(str): the input for the html text Returns: str: the plain text """ soup = BeautifulSoup(html_text, features="html.parser") # kill all script and style elements for script in soup(["script", "style"]): script.extract() # rip it out # get text text = soup.get_text() # break into lines and remove leading and trailing space on each lines = (line.strip() for line in text.splitlines()) # break multi-headlines into a line each chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) # drop blank lines text = '\n'.join(chunk for chunk in chunks if chunk) return text @classmethod def open(cls,file_source:str,extract_text:bool=True)->str: """ open an editor for the given file_source Args: file_source(str): the path to the file extract_text(bool): if True extract the text from html sources Returns: str: the path to the file e.g. a temporary file if the file_source points to an url """ # handle urls # https://stackoverflow.com/a/45886824/1497139 if file_source.startswith("http"): url_source = urlopen(file_source) #https://stackoverflow.com/a/19156107/1497139 charset=url_source.headers.get_content_charset() # if charset fails here you might want to set it to utf-8 as a default! text = url_source.read().decode(charset) if extract_text: # https://stackoverflow.com/a/24618186/1497139 text=cls.extract_text(text) return cls.open_tmp_text(text) editor_cmd=None editor_env=os.getenv('EDITOR') if editor_env: editor_cmd=editor_env if platform == "darwin": if not editor_env: # https://stackoverflow.com/questions/22390709/how-can-i-open-the-atom-editor-from-the-command-line-in-os-x editor_cmd="/usr/local/bin/atom" os_cmd=f"{editor_cmd} {file_source}" os.system(os_cmd) return file_source @classmethod def open_tmp_text(cls,text:str)->str: """ open an editor for the given text in a newly created temporary file Args: text(str): the text to write to a temporary file and then open Returns: str: the path to the temp file """ # see https://stackoverflow.com/a/8577226/1497139 # https://stackoverflow.com/a/3924253/1497139 with tempfile.NamedTemporaryFile(delete=False) as tmp: with open(tmp.name,"w") as tmp_file: tmp_file.write(text) tmp_file.close() return cls.open(tmp.name) Stackoverflow answers applied https://stackoverflow.com/a/45886824/1497139 https://stackoverflow.com/a/19156107/1497139 https://stackoverflow.com/a/24618186/1497139 How can I open the Atom editor from the command line in OS X? https://stackoverflow.com/a/8577226/1497139 https://stackoverflow.com/a/3924253/1497139
Lauch default editor (like 'webbrowser' module)
Is there a simple way to lauch the systems default editor from a Python command-line tool, like the webbrowser module?
[ "Under windows you can simply \"execute\" the file and the default action will be taken: \nos.system('c:/tmp/sample.txt')\nFor this example a default editor will spawn. Under UNIX there is an environment variable called EDITOR, so you need to use something like: \nos.system('%s %s' % (os.getenv('EDITOR'), filename))\n", "The modern Linux way to open a file is using xdg-open; however it does not guarantee that a text editor will open the file. Using $EDITOR is appropriate if your program is command-line oriented (and your users).\n", "If you need to open a file for editing, you could be interested in this question.\n", "You can actually use the webbrowser module to do this. All the answers given so far for both this and the linked question are just the same things the webbrowser module does behind the hood. \nThe ONLY difference is if they have $EDITOR set, which is rare. So perhaps a better flow would be:\neditor = os.getenv('EDITOR')\nif editor:\n os.system(editor + ' ' + filename)\nelse:\n webbrowser.open(filename)\n\nOK, now that I’ve told you that, I should let you know that the webbrowser module does state that it does not support this case.\n\nNote that on some platforms, trying to open a filename using this function, may work and start the operating system's associated program. However, this is neither supported nor portable.\n\nSo if it doesn't work, don’t submit a bug report. But for most uses, it should work.\n", "As the committer of the python Y-Principle generator i had the need to check the generated files against the original and wanted to call a diff-capable editor from python.\nMy search pointed me to this questions and the most upvoted answer had some comments and follow-up issue that i'd also want to address:\n\nmake sure the EDITOR env variable is used if set\nmake sure things work on MacOS (defaulting to Atom in my case)\nmake sure a text can be opened in a temporary file\nmake sure that if an url is opened the html text is extracted by default\n\nYou'll find the solution at editory.py and the test case at test_editor.py in my project's repository.\nTest Code\n'''\nCreated on 2022-11-27\n@author: wf\n'''\nfrom tests.basetest import Basetest\nfrom yprinciple.editor import Editor\n\nclass TestEditor(Basetest):\n \"\"\"\n test opening an editor\n \"\"\"\n \n def test_Editor(self):\n \"\"\"\n test the editor\n \"\"\"\n if not self.inPublicCI():\n # open this source file\n Editor.open(__file__)\n Editor.open(\"https://stackoverflow.com/questions/1442841/lauch-default-editor-like-webbrowser-module\")\n Editor.open_tmp_text(\"A sample text to be opened in a temporary file\")\n\nScreenshot\n\nSource Code\n'''\nCreated on 2022-11-27\n\n@author: wf\n'''\nfrom sys import platform\nimport os\nimport tempfile\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nclass Editor:\n \"\"\"\n helper class to open the system defined editor\n \n see https://stackoverflow.com/questions/1442841/lauch-default-editor-like-webbrowser-module\n \"\"\"\n \n @classmethod\n def extract_text(cls,html_text:str)->str:\n \"\"\"\n extract the text from the given html_text\n \n Args:\n html_text(str): the input for the html text\n \n Returns:\n str: the plain text \n \"\"\"\n soup = BeautifulSoup(html_text, features=\"html.parser\")\n\n # kill all script and style elements\n for script in soup([\"script\", \"style\"]):\n script.extract() # rip it out\n \n # get text\n text = soup.get_text()\n \n # break into lines and remove leading and trailing space on each\n lines = (line.strip() for line in text.splitlines())\n # break multi-headlines into a line each\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n # drop blank lines\n text = '\\n'.join(chunk for chunk in chunks if chunk)\n return text\n\n @classmethod\n def open(cls,file_source:str,extract_text:bool=True)->str:\n \"\"\"\n open an editor for the given file_source\n \n Args:\n file_source(str): the path to the file\n extract_text(bool): if True extract the text from html sources\n \n Returns:\n str: the path to the file e.g. a temporary file if the file_source points to an url\n \"\"\"\n # handle urls\n # https://stackoverflow.com/a/45886824/1497139\n if file_source.startswith(\"http\"):\n url_source = urlopen(file_source)\n #https://stackoverflow.com/a/19156107/1497139\n charset=url_source.headers.get_content_charset()\n # if charset fails here you might want to set it to utf-8 as a default!\n text = url_source.read().decode(charset)\n if extract_text:\n # https://stackoverflow.com/a/24618186/1497139\n text=cls.extract_text(text)\n \n return cls.open_tmp_text(text)\n \n editor_cmd=None\n editor_env=os.getenv('EDITOR')\n if editor_env:\n editor_cmd=editor_env\n if platform == \"darwin\":\n if not editor_env:\n # https://stackoverflow.com/questions/22390709/how-can-i-open-the-atom-editor-from-the-command-line-in-os-x\n editor_cmd=\"/usr/local/bin/atom\"\n os_cmd=f\"{editor_cmd} {file_source}\"\n os.system(os_cmd)\n return file_source\n \n @classmethod\n def open_tmp_text(cls,text:str)->str:\n \"\"\"\n open an editor for the given text in a newly created temporary file\n \n Args:\n text(str): the text to write to a temporary file and then open\n \n Returns:\n str: the path to the temp file\n \"\"\"\n # see https://stackoverflow.com/a/8577226/1497139\n # https://stackoverflow.com/a/3924253/1497139\n with tempfile.NamedTemporaryFile(delete=False) as tmp:\n with open(tmp.name,\"w\") as tmp_file:\n tmp_file.write(text)\n tmp_file.close()\n return cls.open(tmp.name)\n\nStackoverflow answers applied\n\nhttps://stackoverflow.com/a/45886824/1497139\nhttps://stackoverflow.com/a/19156107/1497139\nhttps://stackoverflow.com/a/24618186/1497139\nHow can I open the Atom editor from the command line in OS X?\nhttps://stackoverflow.com/a/8577226/1497139\nhttps://stackoverflow.com/a/3924253/1497139\n\n" ]
[ 18, 4, 3, 2, 0 ]
[]
[]
[ "command_line", "editor", "python" ]
stackoverflow_0001442841_command_line_editor_python.txt
Q: How to call a function in a Django template? I have a function on my views.py file that connects to a mail server and then appends to my Django model the email addresses of the recipients. The script works good. In Django, I'm displaying the model with a table, and I'd like to include a button that says Get Emails and runs this function and it then reloads the page with the new data in the model / table. This is my views.py: class SubscriberListView(LoginRequiredMixin, SingleTableView): model = EmailMarketing table_class = EmailMarketingTable template_name = 'marketing/subscribers.html' # Get emails from email server # Connection settings HOST = 'xXxxxxXxXx' USERNAME = 'xXxxxxXxXx' PASSWORD = "xXxxxxXxXx" m = imaplib.IMAP4_SSL(HOST, 993) m.login(USERNAME, PASSWORD) m.select('INBOX') def get_emails(): result, data = m.uid('search', None, "ALL") if result == 'OK': for num in data[0].split(): result, data = m.uid('fetch', num, '(RFC822)') if result == 'OK': email_message_raw = email.message_from_bytes(data[0][1]) email_from = str(make_header(decode_header(email_message_raw['From']))) email_addr = email_from.replace('<', '>').split('>') if len(email_addr) > 1: new_entry = EmailMarketing(email_address=email_addr[1]) new_entry.save() else: new_entry = EmailMarketing(email_address=email_addr[0]) new_entry.save() # Close server connection m.close() m.logout() My main urls.py: urlpatterns = [ path('marketing/', SubscriberListView.as_view(), name='marketing') ] And this is what I tried on the app urls.py: from django.urls import path from django.contrib.auth import views as auth_views from . import views urlpatterns = [ path('', views.marketing, name='marketing'), path('/getemails', views.get_emails, name='getemails'), ] And then on my subscribers.html I tried this: <button type="submit" onclick="location.href='{% url 'getemails' %}'" class="btn btn-primary">Get Emails</button> But I get an error: Reverse for 'getemails' not found. 'getemails' is not a valid view function or pattern name. How can I call this function defined on my views.py inside my template? A: Django does not use app-specific urls.py files by default. You must include them in your main urls.py, for example: from django.urls import include, path urlpatterns = [ path('marketing/', SubscriberListView.as_view(), name='marketing'), path('myapp/', include('myapp.urls')), ... ] Assuming that your app name is myapp. A: It should be {% url 'getemails' %} so: <button type="submit" onclick="location.href='{% url 'getemails' %}'" class="btn btn-primary">Get Emails</button> Note: Always give / at the end of every route so urls should be like path('getemails/'...). Edit: It seems that you are using <form> tag, so simply define in action attribute as: <form method='POST' action="{% url 'getemails' %}">
How to call a function in a Django template?
I have a function on my views.py file that connects to a mail server and then appends to my Django model the email addresses of the recipients. The script works good. In Django, I'm displaying the model with a table, and I'd like to include a button that says Get Emails and runs this function and it then reloads the page with the new data in the model / table. This is my views.py: class SubscriberListView(LoginRequiredMixin, SingleTableView): model = EmailMarketing table_class = EmailMarketingTable template_name = 'marketing/subscribers.html' # Get emails from email server # Connection settings HOST = 'xXxxxxXxXx' USERNAME = 'xXxxxxXxXx' PASSWORD = "xXxxxxXxXx" m = imaplib.IMAP4_SSL(HOST, 993) m.login(USERNAME, PASSWORD) m.select('INBOX') def get_emails(): result, data = m.uid('search', None, "ALL") if result == 'OK': for num in data[0].split(): result, data = m.uid('fetch', num, '(RFC822)') if result == 'OK': email_message_raw = email.message_from_bytes(data[0][1]) email_from = str(make_header(decode_header(email_message_raw['From']))) email_addr = email_from.replace('<', '>').split('>') if len(email_addr) > 1: new_entry = EmailMarketing(email_address=email_addr[1]) new_entry.save() else: new_entry = EmailMarketing(email_address=email_addr[0]) new_entry.save() # Close server connection m.close() m.logout() My main urls.py: urlpatterns = [ path('marketing/', SubscriberListView.as_view(), name='marketing') ] And this is what I tried on the app urls.py: from django.urls import path from django.contrib.auth import views as auth_views from . import views urlpatterns = [ path('', views.marketing, name='marketing'), path('/getemails', views.get_emails, name='getemails'), ] And then on my subscribers.html I tried this: <button type="submit" onclick="location.href='{% url 'getemails' %}'" class="btn btn-primary">Get Emails</button> But I get an error: Reverse for 'getemails' not found. 'getemails' is not a valid view function or pattern name. How can I call this function defined on my views.py inside my template?
[ "Django does not use app-specific urls.py files by default. You must include them in your main urls.py, for example:\nfrom django.urls import include, path\n\nurlpatterns = [\n path('marketing/', SubscriberListView.as_view(), name='marketing'),\n path('myapp/', include('myapp.urls')),\n ...\n]\n\nAssuming that your app name is myapp.\n", "It should be {% url 'getemails' %} so:\n <button type=\"submit\" onclick=\"location.href='{% url 'getemails' %}'\" class=\"btn btn-primary\">Get Emails</button>\n\n\nNote: Always give / at the end of every route so urls should be like path('getemails/'...).\n\nEdit:\nIt seems that you are using <form> tag, so simply define in action attribute as:\n<form method='POST' action=\"{% url 'getemails' %}\">\n\n" ]
[ 1, 0 ]
[]
[]
[ "django", "function", "python", "templates" ]
stackoverflow_0074588972_django_function_python_templates.txt
Q: why after TfidfVectorizer i have X has 24 features, but PassiveAggressiveClassifier is expecting 113905 features as input I'm trying to use TfidfVectorizer on array with one example and use it for model prediction, but after TfidfVectorizer i get: <1x24 sparse matrix of type '<class 'numpy.float64'>' with 24 stored elements in Compressed Sparse Row format> insted of: 2x113905 like my x_test or x_train, thats what i did: labels=df.Label #clasify labels x_train,x_test,y_train,y_test=train_test_split(df['Text'], labels, test_size=0.2, random_state=7) #split a data print(len(x_train),"\t\t",len(x_test),"\t\t",len(y_train),"\t\t",len(y_test)) my_stopwords_list = stopwords.words('ukrainian') test = ['Жінка пропагувала "руській мір" на весь вагон: скандал в електричці на Київщині. Інцидент стався в у приміській електричці сполученням "Святошин" - "Тетерів" у понеділок ввечері. Небайдужі пасажири рішуче відреагували й "висадили" жінку на найближчій станції.'] test = pd.Series(test,name="Text") #DataFlair - Initialize a TfidfVectorizer tfidf_vectorizer=TfidfVectorizer(stop_words=my_stopwords_list,smooth_idf=False) #DataFlair - Fit and transform train set, transform test set tfidf_train=tfidf_vectorizer.fit_transform(x_train.values.astype('U')) tfidf_test=tfidf_vectorizer.transform(x_test.values.astype('U')) tfidf_train1=tfidf_vectorizer.fit_transform(test.values.astype('U')) but when i look on tfidf_test and tfidf_train1, i get: (https://i.stack.imgur.com/Xrzbf.png) and than can't use model.predict(): pred = model_PassiveAggressiveClassifier.predict(tfidf_train1) ***ValueError**: X has 24 features, but PassiveAggressiveClassifier is expecting 113905 features as input.* i tried same in this kaggle work, but it didn't work, i have only one clue: i use ukrainian text, but i don't think that it has big impact A: I found a solution in this stack problem was that I made new vocabulary and my new "test example" have had only few characters like that: So i reruned this karnels: #DataFlair - Initialize a TfidfVectorizer tfidf_vectorizer=TfidfVectorizer(stop_words=my_stopwords_list,smooth_idf=False) #DataFlair - Fit and transform train set, transform test set tfidf_train=tfidf_vectorizer.fit_transform(x_train.values.astype('U')) tfidf_test=tfidf_vectorizer.transform(x_test.values.astype('U')) tfidf_train1=tfidf_vectorizer.transform(test) After that my "test example" was like that: <1x113905 sparse matrix of type '<class 'numpy.float64'>' with 6 stored elements in Compressed Sparse Row format>
why after TfidfVectorizer i have X has 24 features, but PassiveAggressiveClassifier is expecting 113905 features as input
I'm trying to use TfidfVectorizer on array with one example and use it for model prediction, but after TfidfVectorizer i get: <1x24 sparse matrix of type '<class 'numpy.float64'>' with 24 stored elements in Compressed Sparse Row format> insted of: 2x113905 like my x_test or x_train, thats what i did: labels=df.Label #clasify labels x_train,x_test,y_train,y_test=train_test_split(df['Text'], labels, test_size=0.2, random_state=7) #split a data print(len(x_train),"\t\t",len(x_test),"\t\t",len(y_train),"\t\t",len(y_test)) my_stopwords_list = stopwords.words('ukrainian') test = ['Жінка пропагувала "руській мір" на весь вагон: скандал в електричці на Київщині. Інцидент стався в у приміській електричці сполученням "Святошин" - "Тетерів" у понеділок ввечері. Небайдужі пасажири рішуче відреагували й "висадили" жінку на найближчій станції.'] test = pd.Series(test,name="Text") #DataFlair - Initialize a TfidfVectorizer tfidf_vectorizer=TfidfVectorizer(stop_words=my_stopwords_list,smooth_idf=False) #DataFlair - Fit and transform train set, transform test set tfidf_train=tfidf_vectorizer.fit_transform(x_train.values.astype('U')) tfidf_test=tfidf_vectorizer.transform(x_test.values.astype('U')) tfidf_train1=tfidf_vectorizer.fit_transform(test.values.astype('U')) but when i look on tfidf_test and tfidf_train1, i get: (https://i.stack.imgur.com/Xrzbf.png) and than can't use model.predict(): pred = model_PassiveAggressiveClassifier.predict(tfidf_train1) ***ValueError**: X has 24 features, but PassiveAggressiveClassifier is expecting 113905 features as input.* i tried same in this kaggle work, but it didn't work, i have only one clue: i use ukrainian text, but i don't think that it has big impact
[ "I found a solution in this stack problem was that I made new vocabulary and my new \"test example\" have had only few characters like that:\nSo i reruned this karnels:\n#DataFlair - Initialize a TfidfVectorizer\ntfidf_vectorizer=TfidfVectorizer(stop_words=my_stopwords_list,smooth_idf=False)\n#DataFlair - Fit and transform train set, transform test set\ntfidf_train=tfidf_vectorizer.fit_transform(x_train.values.astype('U')) \ntfidf_test=tfidf_vectorizer.transform(x_test.values.astype('U'))\ntfidf_train1=tfidf_vectorizer.transform(test)\n\nAfter that my \"test example\" was like that:\n<1x113905 sparse matrix of type '<class 'numpy.float64'>'\nwith 6 stored elements in Compressed Sparse Row format>\n\n" ]
[ 0 ]
[]
[]
[ "python", "tf_idf", "tfidfvectorizer", "vectorization" ]
stackoverflow_0074553178_python_tf_idf_tfidfvectorizer_vectorization.txt
Q: How to edit/delete slash command message after its buttons have been interacted with (discord py disnake api wrapper) here's a mega simple coin flip example i made for this question is there any way to -edit the original slash command message with the result of the coin flip to get rid of the buttons -delete the original slash command message class CoinFlipButtons(disnake.ui.View): def __init__(self): super().__init__(timeout=60) @bot.slash_command(description="starts coin flip game") async def coinflip(inter): await inter.response.send_message("Coin has been flipped what shall you choose?", view=CoinFlipButtons()) @disnake.ui.button(label="Heads", style=ButtonStyle.green) async def first_button(self, button: disnake.ui.Button, inter: disnake.MessageInteraction): coin = random.randrange(1,2) if coin == 1: coin = "heads" else: coin = "tails" if coin == "heads": await inter.response.send_message(content=f"You were correct! It was {coin}") else: await inter.response.send_message(content=f"Oh no! The coin was {coin}") @disnake.ui.button(label="Tails", style=ButtonStyle.red) async def second_button(self, button: disnake.ui.Button, inter: disnake.MessageInteraction): coin = random.randrange(1,2) if coin == 1: coin = "heads" else: coin = "tails" if coin == "tails": await inter.response.send_message(f"You were correct! It was {coin}") else: await inter.response.send_message(f"Oh no! The coin was {coin}") I tried searching the docs for a edit original response message function or something of the sorts but those wouldn't work because interaction is redefined when i send the new response to the buttons and that's where im stumped because i wanna edit/delete the slash command interaction after the button interaction A: to edit: await inter.response.edit_message()
How to edit/delete slash command message after its buttons have been interacted with (discord py disnake api wrapper)
here's a mega simple coin flip example i made for this question is there any way to -edit the original slash command message with the result of the coin flip to get rid of the buttons -delete the original slash command message class CoinFlipButtons(disnake.ui.View): def __init__(self): super().__init__(timeout=60) @bot.slash_command(description="starts coin flip game") async def coinflip(inter): await inter.response.send_message("Coin has been flipped what shall you choose?", view=CoinFlipButtons()) @disnake.ui.button(label="Heads", style=ButtonStyle.green) async def first_button(self, button: disnake.ui.Button, inter: disnake.MessageInteraction): coin = random.randrange(1,2) if coin == 1: coin = "heads" else: coin = "tails" if coin == "heads": await inter.response.send_message(content=f"You were correct! It was {coin}") else: await inter.response.send_message(content=f"Oh no! The coin was {coin}") @disnake.ui.button(label="Tails", style=ButtonStyle.red) async def second_button(self, button: disnake.ui.Button, inter: disnake.MessageInteraction): coin = random.randrange(1,2) if coin == 1: coin = "heads" else: coin = "tails" if coin == "tails": await inter.response.send_message(f"You were correct! It was {coin}") else: await inter.response.send_message(f"Oh no! The coin was {coin}") I tried searching the docs for a edit original response message function or something of the sorts but those wouldn't work because interaction is redefined when i send the new response to the buttons and that's where im stumped because i wanna edit/delete the slash command interaction after the button interaction
[ "to edit:\nawait inter.response.edit_message()\n\n" ]
[ 0 ]
[]
[]
[ "discord", "discord_buttons", "disnake", "python" ]
stackoverflow_0074577232_discord_discord_buttons_disnake_python.txt
Q: Using rdflib with anzograph I am using the community edition of anzograph. I have no problem using the sparql http protocol, however when I try to use the graph store protocol via rdflib I get a result I don't understand. I am running the docker image from the anzo website and have mapped ports -p 80:8080 443:8443 7070:7070. Here is the snippet from jupyter notbook import rdflib import rdflib.plugins.stores.sparqlstore as store store = store.SPARQLStore("http://192.168.1.104:7070/rdf-graph-store") graph = rdflib.ConjunctiveGraph(store=store) graph.query("select (count(*) as ?c) {?s ?p ?o}") This gives the message ValueError: You did something wrong formulating either the URI or your SPARQL query and an http error 406, higher up in the stack. Is there anything obvious I should be changing? A: Solved! In spite of what the documentation says, using /sparql (rather than /rdf-graph-store) together with the graph store port of 7070 works. So the correct snippet is: import rdflib import rdflib.plugins.stores.sparqlstore as store store = store.SPARQLStore("http://192.168.1.104:7070/sparql") graph = rdflib.ConjunctiveGraph(store=store) graph.query("select (count(*) as ?c) {?s ?p ?o}")
Using rdflib with anzograph
I am using the community edition of anzograph. I have no problem using the sparql http protocol, however when I try to use the graph store protocol via rdflib I get a result I don't understand. I am running the docker image from the anzo website and have mapped ports -p 80:8080 443:8443 7070:7070. Here is the snippet from jupyter notbook import rdflib import rdflib.plugins.stores.sparqlstore as store store = store.SPARQLStore("http://192.168.1.104:7070/rdf-graph-store") graph = rdflib.ConjunctiveGraph(store=store) graph.query("select (count(*) as ?c) {?s ?p ?o}") This gives the message ValueError: You did something wrong formulating either the URI or your SPARQL query and an http error 406, higher up in the stack. Is there anything obvious I should be changing?
[ "Solved! In spite of what the documentation says, using /sparql (rather than /rdf-graph-store) together with the graph store port of 7070 works. So the correct snippet is:\nimport rdflib\nimport rdflib.plugins.stores.sparqlstore as store\n\nstore = store.SPARQLStore(\"http://192.168.1.104:7070/sparql\")\ngraph = rdflib.ConjunctiveGraph(store=store)\n\n\ngraph.query(\"select (count(*) as ?c) {?s ?p ?o}\")\n\n" ]
[ 0 ]
[]
[]
[ "anzograph", "graph_databases", "python", "rdflib" ]
stackoverflow_0074588464_anzograph_graph_databases_python_rdflib.txt
Q: How to make a dataframe from a list of tuples, where tuples are the values? I have a time series dataset on which I am running the auto arima model. The dataset has multiple columns that are independent of each other, so basically it's like multiple auto arima analysis. The code I currently have loops through all the columns in the dataframe and stores the order values of p,d,q for each column in a list. What I want to achieve is : to store the p,d,q values for each column in a dataframe row wise. Time Series Dataframe date Col1 Col2 Col3 Col4 Col5 Col6 Col7 Col8 Col9 2022-01-02 10:30:00 24 24 24.8 24.8 25 25 25.5 26.3 26.9 2022-01-02 10:45:00 59 58 60 60.3 59.3 59.2 58.4 56.9 58.0 2022-01-02 11:00:00 43.7 43.9 48 48 48.1 48.9 49 49.5 49.5 Code ##Auto arima # def arimamodel(series): autoarima_results=[] series = df.columns for col in series: print("Auto Arima for : ", {col}) ARIMA_model = pm.auto_arima( df[col], start_p=1, start_q=1, test="adf", max_p=5, max_q=5, d=None, trace=True, error_action="ignore", suppress_warnings=True, stepwise=True, ) ARIMA_model.summary() autoarima_results.append(ARIMA_model.order) This returns a list that looks like : [(1,1,0), (2,1,1), (1,1,1)] For example, the orders of p,d,q suggested by auto arima are, Col1 : 1,1,0 , Col2 : 2,1,1 , Col3 : 1,1,1 and so on. The final output should be a dataframe that would look like is as below. Where every row represents one column and its p,d,q values: Results pdq_values Col1 (1,1,0) Col2 (2,1,1) Col3 (1,1,1) A: Let's say that: df has three columns: "Col1", "Col2", "Col3" and autoarima_results == [(1, 1, 0), (2, 1, 1), (1, 1, 1)] Then, here is one way to do it: new_df = ( pd.DataFrame(autoarima_results, index=cols) .pipe(lambda df_: df_.assign(pdq_values=df_.apply(lambda x: tuple(x), axis=1)))[ "pdq_values" ] .to_frame("pdq_values") ) new_df.index.name = "Results" print(new_df) # Output pdq_values Results Col1 (1, 1, 0) Col2 (2, 1, 1) Col3 (1, 1, 1) A: Starting with the output of your ARIMA model, here is a simple way to do this without using lambda functions - #Your ARIMA output l = [(1,1,0), (2,1,1), (1,1,1)] #Convert the list of tuples into entries from a single column df = pd.DataFrame({'pdq_values':l}) #<---- #df = pd.DataFrame([l]).T #Another way! #Change the index by adding 1, setting column name and prepending 'Col' df.index = 'Col' + (df.index.set_names('Results')+1).astype(str) #Reset index to get the index as a column in the df df = df.reset_index() df Results pdq_values 0 Col1 (1, 1, 0) 1 Col2 (2, 1, 1) 2 Col3 (1, 1, 1) Additional notes: The trick is to pass it as a dictionary (key, value pair) where you can specify the column name as key {'pdq_values':l}. Pandas reads this as a single column where each tuple is the entries for each row respectively. Another way to force this behavior is to pass the list of tuples as a list of lists [l]. This would create a dataframe with n columns and then you will need to do a transpose. 'Col' + (df.index.set_names('Results')+1).astype(str) does 4 things at once. Changes the name of the index column to Results, adds 1 to it, converts it to a string, and prepends it with Col. This results in 0,1,2.. to become Col1, Col2, Col3..
How to make a dataframe from a list of tuples, where tuples are the values?
I have a time series dataset on which I am running the auto arima model. The dataset has multiple columns that are independent of each other, so basically it's like multiple auto arima analysis. The code I currently have loops through all the columns in the dataframe and stores the order values of p,d,q for each column in a list. What I want to achieve is : to store the p,d,q values for each column in a dataframe row wise. Time Series Dataframe date Col1 Col2 Col3 Col4 Col5 Col6 Col7 Col8 Col9 2022-01-02 10:30:00 24 24 24.8 24.8 25 25 25.5 26.3 26.9 2022-01-02 10:45:00 59 58 60 60.3 59.3 59.2 58.4 56.9 58.0 2022-01-02 11:00:00 43.7 43.9 48 48 48.1 48.9 49 49.5 49.5 Code ##Auto arima # def arimamodel(series): autoarima_results=[] series = df.columns for col in series: print("Auto Arima for : ", {col}) ARIMA_model = pm.auto_arima( df[col], start_p=1, start_q=1, test="adf", max_p=5, max_q=5, d=None, trace=True, error_action="ignore", suppress_warnings=True, stepwise=True, ) ARIMA_model.summary() autoarima_results.append(ARIMA_model.order) This returns a list that looks like : [(1,1,0), (2,1,1), (1,1,1)] For example, the orders of p,d,q suggested by auto arima are, Col1 : 1,1,0 , Col2 : 2,1,1 , Col3 : 1,1,1 and so on. The final output should be a dataframe that would look like is as below. Where every row represents one column and its p,d,q values: Results pdq_values Col1 (1,1,0) Col2 (2,1,1) Col3 (1,1,1)
[ "Let's say that:\n\ndf has three columns: \"Col1\", \"Col2\", \"Col3\"\nand autoarima_results == [(1, 1, 0), (2, 1, 1), (1, 1, 1)]\n\nThen, here is one way to do it:\nnew_df = (\n pd.DataFrame(autoarima_results, index=cols)\n .pipe(lambda df_: df_.assign(pdq_values=df_.apply(lambda x: tuple(x), axis=1)))[\n \"pdq_values\"\n ]\n .to_frame(\"pdq_values\")\n)\n\nnew_df.index.name = \"Results\"\n\nprint(new_df)\n# Output\n pdq_values\nResults\nCol1 (1, 1, 0)\nCol2 (2, 1, 1)\nCol3 (1, 1, 1)\n\n", "Starting with the output of your ARIMA model, here is a simple way to do this without using lambda functions -\n#Your ARIMA output\nl = [(1,1,0), (2,1,1), (1,1,1)]\n\n#Convert the list of tuples into entries from a single column\ndf = pd.DataFrame({'pdq_values':l}) #<----\n#df = pd.DataFrame([l]).T #Another way!\n\n#Change the index by adding 1, setting column name and prepending 'Col'\ndf.index = 'Col' + (df.index.set_names('Results')+1).astype(str)\n\n#Reset index to get the index as a column in the df\ndf = df.reset_index()\n\ndf\n\n Results pdq_values\n0 Col1 (1, 1, 0)\n1 Col2 (2, 1, 1)\n2 Col3 (1, 1, 1)\n\nAdditional notes:\n\nThe trick is to pass it as a dictionary (key, value pair) where you can specify the column name as key {'pdq_values':l}. Pandas reads this as a single column where each tuple is the entries for each row respectively. Another way to force this behavior is to pass the list of tuples as a list of lists [l]. This would create a dataframe with n columns and then you will need to do a transpose.\n'Col' + (df.index.set_names('Results')+1).astype(str) does 4 things at once. Changes the name of the index column to Results, adds 1 to it, converts it to a string, and prepends it with Col. This results in 0,1,2.. to become Col1, Col2, Col3..\n\n" ]
[ 0, 0 ]
[]
[]
[ "arima", "dataframe", "pandas", "python", "tuples" ]
stackoverflow_0074548631_arima_dataframe_pandas_python_tuples.txt
Q: How to update text in pysimplegui I'm very new to python so I'm just experimenting with new GUIs and other things. I was wondering if you could open up a window using pysimplegui and have a piece of text which says "0", and when you click a button the text changes to the previous number +1. For E.G: at first the text says "0", but when you click a button of some sort, the text changes to a "1". Sorry if my explanation of my problem is bad but I have not used stack overflow before. import PySimpleGUI as sg num = 0 layout = [ [sg.Text(num)], [sg.Button("hi")] ] window = sg.Window("the box", layout) while True: event, values = window.read() if event == "hi": num = num+1 layout = [ [sg.Text(num)], [sg.Button("hi")] ] window = sg.Window("the box", layout) I tried this code but it did not work. As you can probably see I am very new to all of this so kindly do not type an answer with complicated/advanced code. Thanks to tim who gave me an answer but i do not understand it, here is the new(still faulty) code: import PySimpleGUI as sg num = 0 layout = [ [sg.Text(key='xxx')], [sg.Button("hi")] ] window = sg.Window("the box", layout) while True: event, values['xxx'] = window.read() if event == "hi": xxx = num num = num + 1 A: Should call method window[element_key].update(value=new_value) to update the element window[element_key] with new value new_value. New layout and new window are not required. import PySimpleGUI as sg num = 0 layout = [ [sg.Text(num, key='xxx')], [sg.Button("hi")], ] window = sg.Window("the box", layout) while True: event, values = window.read() if event == sg.WIN_CLOSED: break elif event == "hi": num += 1 window['xxx'].update(num) window.close()
How to update text in pysimplegui
I'm very new to python so I'm just experimenting with new GUIs and other things. I was wondering if you could open up a window using pysimplegui and have a piece of text which says "0", and when you click a button the text changes to the previous number +1. For E.G: at first the text says "0", but when you click a button of some sort, the text changes to a "1". Sorry if my explanation of my problem is bad but I have not used stack overflow before. import PySimpleGUI as sg num = 0 layout = [ [sg.Text(num)], [sg.Button("hi")] ] window = sg.Window("the box", layout) while True: event, values = window.read() if event == "hi": num = num+1 layout = [ [sg.Text(num)], [sg.Button("hi")] ] window = sg.Window("the box", layout) I tried this code but it did not work. As you can probably see I am very new to all of this so kindly do not type an answer with complicated/advanced code. Thanks to tim who gave me an answer but i do not understand it, here is the new(still faulty) code: import PySimpleGUI as sg num = 0 layout = [ [sg.Text(key='xxx')], [sg.Button("hi")] ] window = sg.Window("the box", layout) while True: event, values['xxx'] = window.read() if event == "hi": xxx = num num = num + 1
[ "Should call method window[element_key].update(value=new_value) to update the element window[element_key] with new value new_value.\nNew layout and new window are not required.\nimport PySimpleGUI as sg\n\nnum = 0\n\nlayout = [\n [sg.Text(num, key='xxx')],\n [sg.Button(\"hi\")],\n]\n\nwindow = sg.Window(\"the box\", layout)\n\nwhile True:\n\n event, values = window.read()\n\n if event == sg.WIN_CLOSED:\n break\n elif event == \"hi\":\n num += 1\n window['xxx'].update(num)\n\nwindow.close()\n\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074578705_python.txt
Q: how to count the number of specific lines between time intervals I have a dataset that contains two columns: time and source and I want to count 192.168.1.128 between each second I have this: Time Source 2022-11-27 09:19:27 192.168.1.128 2022-11-27 09:19:27 152.199.19.161 2022-11-27 09:19:27 192.168.1.128 2022-11-27 09:19:27 192.168.1.128 2022-11-27 09:19:28 142.250.186.67 2022-11-27 09:19:29 192.168.1.128 2022-11-27 09:19:29 192.168.1.128 2022-11-27 09:19:30 192.168.1.128 2022-11-27 09:19:30 142.250.186.67 and trying to get it like this: Time Count 2022-11-27 09:19:27 3 2022-11-27 09:19:28 0 2022-11-27 09:19:29 2 2022-11-27 09:19:30 1 A: You could use: df['Source'].eq('192.168.1.128').groupby(df['Time']).count().reset_index()
how to count the number of specific lines between time intervals
I have a dataset that contains two columns: time and source and I want to count 192.168.1.128 between each second I have this: Time Source 2022-11-27 09:19:27 192.168.1.128 2022-11-27 09:19:27 152.199.19.161 2022-11-27 09:19:27 192.168.1.128 2022-11-27 09:19:27 192.168.1.128 2022-11-27 09:19:28 142.250.186.67 2022-11-27 09:19:29 192.168.1.128 2022-11-27 09:19:29 192.168.1.128 2022-11-27 09:19:30 192.168.1.128 2022-11-27 09:19:30 142.250.186.67 and trying to get it like this: Time Count 2022-11-27 09:19:27 3 2022-11-27 09:19:28 0 2022-11-27 09:19:29 2 2022-11-27 09:19:30 1
[ "You could use:\ndf['Source'].eq('192.168.1.128').groupby(df['Time']).count().reset_index()\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074589226_dataframe_pandas_python.txt
Q: How to create abstract properties in python abstract classes In the following code, I create a base abstract class Base. I want all the classes that inherit from Base to provide the name property, so I made this property an @abstractmethod. Then I created a subclass of Base, called Base_1, which is meant to supply some functionality, but still remain abstract. There is no name property in Base_1, but nevertheless python instatinates an object of that class without an error. How does one create abstract properties? from abc import ABCMeta, abstractmethod class Base(object): __metaclass__ = ABCMeta def __init__(self, strDirConfig): self.strDirConfig = strDirConfig @abstractmethod def _doStuff(self, signals): pass @property @abstractmethod def name(self): # this property will be supplied by the inheriting classes # individually pass class Base_1(Base): __metaclass__ = ABCMeta # this class does not provide the name property, should raise an error def __init__(self, strDirConfig): super(Base_1, self).__init__(strDirConfig) def _doStuff(self, signals): print 'Base_1 does stuff' class C(Base_1): @property def name(self): return 'class C' if __name__ == '__main__': b1 = Base_1('abc') A: Since Python 3.3 a bug was fixed meaning the property() decorator is now correctly identified as abstract when applied to an abstract method. Note: Order matters, you have to use @property above @abstractmethod Python 3.3+: (python docs): from abc import ABC, abstractmethod class C(ABC): @property @abstractmethod def my_abstract_property(self): ... Python 2: (python docs) from abc import ABC, abstractproperty class C(ABC): @abstractproperty def my_abstract_property(self): ... A: Until Python 3.3, you cannot nest @abstractmethod and @property. Use @abstractproperty to create abstract properties (docs). from abc import ABCMeta, abstractmethod, abstractproperty class Base(object): # ... @abstractproperty def name(self): pass The code now raises the correct exception: Traceback (most recent call last): File "foo.py", line 36, in b1 = Base_1('abc') TypeError: Can't instantiate abstract class Base_1 with abstract methods name A: Based on James answer above def compatibleabstractproperty(func): if sys.version_info > (3, 3): return property(abstractmethod(func)) else: return abstractproperty(func) and use it as a decorator @compatibleabstractproperty def env(self): raise NotImplementedError() A: Using the @property decorator in the abstract class (as recommended in the answer by James) works if you want the required instance level attributes to use the property decorator as well. If you don't want to use the property decorator, you can use super(). I ended up using something like the __post_init__() from dataclasses and it gets the desired functionality for instance level attributes: import abc from typing import List class Abstract(abc.ABC): """An ABC with required attributes. Attributes: attr0 attr1 """ @abc.abstractmethod def __init__(self): """Forces you to implement __init__ in 'Concrete'. Make sure to call __post_init__() from inside 'Concrete'.""" def __post_init__(self): self._has_required_attributes() # You can also type check here if you want. def _has_required_attributes(self): req_attrs: List[str] = ['attr0', 'attr1'] for attr in req_attrs: if not hasattr(self, attr): raise AttributeError(f"Missing attribute: '{attr}'") class Concrete(Abstract): def __init__(self, attr0, attr1): self.attr0 = attr0 self.attr1 = attr1 self.attr2 = "some value" # not required super().__post_init__() # Enforces the attribute requirement. A: In python 3.6+, you can also anotate a variable without providing a default. I find this to be a more concise way to make it abstract. class Base(): name: str def print_name(self): print(self.name) # will raise an Attribute error at runtime if `name` isn't defined in subclass class Base_1(Base): name = "base one" it may also be used to force you to initialize the variable in the __new__ or __init__ methods As another example, the following code will fail when you try to initialize the Base_1 class class Base(): name: str def __init__(self): self.print_name() class Base_1(Base): _nemo = "base one" b = Base_1() AttributeError: 'Base_1' object has no attribute 'name' A: For example, you can define the abstract getter, setter and deleter with @abstractmethod and @property, @name.setter or @name.deleter in Person abstract class as shown below. *@abstractmethod must be the innermost decorator otherwise error occurs: from abc import ABC, abstractmethod class Person(ABC): @property @abstractmethod # The innermost decorator def name(self): # Abstract getter pass @name.setter @abstractmethod # The innermost decorator def name(self, name): # Abstract setter pass @name.deleter @abstractmethod # The innermost decorator def name(self): # Abstract deleter pass Then, you can extend Person abstract class with Student class, override the abstract getter, setter and deleter in Student class, instantiate Student class and call the getter, setter and deleter as shown below: class Student(Person): def __init__(self, name): self._name = name @property def name(self): # Overrides abstract getter return self._name @name.setter def name(self, name): # Overrides abstract setter self._name = name @name.deleter def name(self): # Overrides abstract deleter del self._name obj = Student("John") # Instantiates "Student" class print(obj.name) # Getter obj.name = "Tom" # Setter print(obj.name) # Getter del obj.name # Deleter print(hasattr(obj, "name")) Output: John Tom False Actually, even if you don't override the abstract setter and deleter in Student class and instantiate Student class as shown below: class Student(Person): # Extends "Person" class def __init__(self, name): self._name = name @property def name(self): # Overrides only abstract getter return self._name # @name.setter # def name(self, name): # Overrides abstract setter # self._name = name # @name.deleter # def name(self): # Overrides abstract deleter # del self._name obj = Student("John") # Instantiates "Student" class # ... No error occurs as shown below: John Tom False But, if you don't override the abstract getter, setter and deleter in Student class and instantiate Student class as shown below: class Student(Person): # Extends "Person" class def __init__(self, name): self._name = name # @property # def name(self): # Overrides only abstract getter # return self._name # @name.setter # def name(self, name): # Overrides abstract setter # self._name = name # @name.deleter # def name(self): # Overrides abstract deleter # del self._name obj = Student("John") # Instantiates "Student" class # ... The error below occurs: TypeError: Can't instantiate abstract class Student with abstract methods name And, if you don't override the abstract getter in Student class and instantiate Student class as shown below: class Student(Person): # Extends "Person" class def __init__(self, name): self._name = name # @property # def name(self): # Overrides only abstract getter # return self._name @name.setter def name(self, name): # Overrides abstract setter self._name = name @name.deleter def name(self): # Overrides abstract deleter del self._name obj = Student("John") # Instantiates "Student" class # ... The error below occurs: NameError: name 'name' is not defined And, if @abstractmethod is not the innermost decorator as shown below: from abc import ABC, abstractmethod class Person(ABC): @abstractmethod # Not the innermost decorator @property def name(self): # Abstract getter pass @name.setter @abstractmethod # The innermost decorator def name(self, name): # Abstract setter pass @name.deleter @abstractmethod # The innermost decorator def name(self): # Abstract deleter pass The error below occurs: AttributeError: attribute 'isabstractmethod' of 'property' objects is not writable A: Another possible solution is to use metaclasses. A minimal example can look like this: class BaseMetaClass(type): def __new__(mcls, class_name, bases, attrs): required_attrs = ('foo', 'bar') for attr in required_attrs: if not attr in attrs: raise RunTimeError(f"You need to set {attr} in {class_name}") return super().__new__(mcls, class_name, bases, attrs) class Base(metaclass=BaseMeta): foo: str bar: int One advantage of this approach is that the check will happen at definition time (not instantiation). Also, setting class attributes in child classes is a bit easier than declaring properties (as long as they are simple values known in advance) and your final classes will look more concise
How to create abstract properties in python abstract classes
In the following code, I create a base abstract class Base. I want all the classes that inherit from Base to provide the name property, so I made this property an @abstractmethod. Then I created a subclass of Base, called Base_1, which is meant to supply some functionality, but still remain abstract. There is no name property in Base_1, but nevertheless python instatinates an object of that class without an error. How does one create abstract properties? from abc import ABCMeta, abstractmethod class Base(object): __metaclass__ = ABCMeta def __init__(self, strDirConfig): self.strDirConfig = strDirConfig @abstractmethod def _doStuff(self, signals): pass @property @abstractmethod def name(self): # this property will be supplied by the inheriting classes # individually pass class Base_1(Base): __metaclass__ = ABCMeta # this class does not provide the name property, should raise an error def __init__(self, strDirConfig): super(Base_1, self).__init__(strDirConfig) def _doStuff(self, signals): print 'Base_1 does stuff' class C(Base_1): @property def name(self): return 'class C' if __name__ == '__main__': b1 = Base_1('abc')
[ "Since Python 3.3 a bug was fixed meaning the property() decorator is now correctly identified as abstract when applied to an abstract method.\nNote: Order matters, you have to use @property above @abstractmethod\nPython 3.3+: (python docs):\nfrom abc import ABC, abstractmethod\n\nclass C(ABC):\n @property\n @abstractmethod\n def my_abstract_property(self):\n ...\n\nPython 2: (python docs)\nfrom abc import ABC, abstractproperty\n\nclass C(ABC):\n @abstractproperty\n def my_abstract_property(self):\n ...\n\n", "Until Python 3.3, you cannot nest @abstractmethod and @property.\nUse @abstractproperty to create abstract properties (docs).\nfrom abc import ABCMeta, abstractmethod, abstractproperty\n\nclass Base(object):\n # ...\n @abstractproperty\n def name(self):\n pass\n\nThe code now raises the correct exception:\n\nTraceback (most recent call last):\n File \"foo.py\", line 36, in \n b1 = Base_1('abc') \nTypeError: Can't instantiate abstract class Base_1 with abstract methods name\n\n", "Based on James answer above \ndef compatibleabstractproperty(func):\n\n if sys.version_info > (3, 3): \n return property(abstractmethod(func))\n else:\n return abstractproperty(func)\n\nand use it as a decorator\n@compatibleabstractproperty\ndef env(self):\n raise NotImplementedError()\n\n", "Using the @property decorator in the abstract class (as recommended in the answer by James) works if you want the required instance level attributes to use the property decorator as well.\nIf you don't want to use the property decorator, you can use super(). I ended up using something like the __post_init__() from dataclasses and it gets the desired functionality for instance level attributes:\nimport abc\nfrom typing import List\n\nclass Abstract(abc.ABC):\n \"\"\"An ABC with required attributes.\n\n Attributes:\n attr0\n attr1 \n \"\"\"\n\n @abc.abstractmethod\n def __init__(self):\n \"\"\"Forces you to implement __init__ in 'Concrete'. \n Make sure to call __post_init__() from inside 'Concrete'.\"\"\"\n\n def __post_init__(self):\n self._has_required_attributes()\n # You can also type check here if you want.\n\n def _has_required_attributes(self):\n req_attrs: List[str] = ['attr0', 'attr1']\n for attr in req_attrs:\n if not hasattr(self, attr):\n raise AttributeError(f\"Missing attribute: '{attr}'\")\n\nclass Concrete(Abstract):\n\n def __init__(self, attr0, attr1):\n self.attr0 = attr0\n self.attr1 = attr1\n self.attr2 = \"some value\" # not required\n super().__post_init__() # Enforces the attribute requirement.\n\n", "In python 3.6+, you can also anotate a variable without providing a default. I find this to be a more concise way to make it abstract.\nclass Base():\n name: str\n \n def print_name(self):\n print(self.name) # will raise an Attribute error at runtime if `name` isn't defined in subclass\n\nclass Base_1(Base):\n name = \"base one\"\n\nit may also be used to force you to initialize the variable in the __new__ or __init__ methods\nAs another example, the following code will fail when you try to initialize the Base_1 class\n class Base():\n name: str\n\n def __init__(self):\n self.print_name()\n\n class Base_1(Base):\n _nemo = \"base one\"\n \n b = Base_1() \n\nAttributeError: 'Base_1' object has no attribute 'name'\n", "For example, you can define the abstract getter, setter and deleter with @abstractmethod and @property, @name.setter or @name.deleter in Person abstract class as shown below. *@abstractmethod must be the innermost decorator otherwise error occurs:\nfrom abc import ABC, abstractmethod\n\nclass Person(ABC):\n\n @property\n @abstractmethod # The innermost decorator\n def name(self): # Abstract getter\n pass\n\n @name.setter\n @abstractmethod # The innermost decorator\n def name(self, name): # Abstract setter\n pass\n\n @name.deleter\n @abstractmethod # The innermost decorator\n def name(self): # Abstract deleter\n pass\n\nThen, you can extend Person abstract class with Student class, override the abstract getter, setter and deleter in Student class, instantiate Student class and call the getter, setter and deleter as shown below:\nclass Student(Person):\n\n def __init__(self, name):\n self._name = name\n \n @property\n def name(self): # Overrides abstract getter\n return self._name\n \n @name.setter\n def name(self, name): # Overrides abstract setter\n self._name = name\n \n @name.deleter\n def name(self): # Overrides abstract deleter \n del self._name\n\nobj = Student(\"John\") # Instantiates \"Student\" class\nprint(obj.name) # Getter\nobj.name = \"Tom\" # Setter\nprint(obj.name) # Getter\ndel obj.name # Deleter\nprint(hasattr(obj, \"name\"))\n\nOutput:\nJohn\nTom\nFalse\n\nActually, even if you don't override the abstract setter and deleter in Student class and instantiate Student class as shown below:\nclass Student(Person): # Extends \"Person\" class\n \n def __init__(self, name):\n self._name = name\n \n @property\n def name(self): # Overrides only abstract getter\n return self._name\n\n # @name.setter\n # def name(self, name): # Overrides abstract setter\n # self._name = name\n \n # @name.deleter\n # def name(self): # Overrides abstract deleter \n # del self._name\n\nobj = Student(\"John\") # Instantiates \"Student\" class\n# ...\n\nNo error occurs as shown below:\nJohn\nTom\nFalse\n\nBut, if you don't override the abstract getter, setter and deleter in Student class and instantiate Student class as shown below:\nclass Student(Person): # Extends \"Person\" class\n \n def __init__(self, name):\n self._name = name\n \n # @property\n # def name(self): # Overrides only abstract getter\n # return self._name\n\n # @name.setter\n # def name(self, name): # Overrides abstract setter\n # self._name = name\n \n # @name.deleter\n # def name(self): # Overrides abstract deleter \n # del self._name\n\nobj = Student(\"John\") # Instantiates \"Student\" class\n# ...\n\nThe error below occurs:\n\nTypeError: Can't instantiate abstract class Student with abstract methods name\n\nAnd, if you don't override the abstract getter in Student class and instantiate Student class as shown below:\nclass Student(Person): # Extends \"Person\" class\n \n def __init__(self, name):\n self._name = name\n \n # @property\n # def name(self): # Overrides only abstract getter\n # return self._name\n\n @name.setter\n def name(self, name): # Overrides abstract setter\n self._name = name\n \n @name.deleter\n def name(self): # Overrides abstract deleter \n del self._name\n\nobj = Student(\"John\") # Instantiates \"Student\" class\n# ...\n\nThe error below occurs:\n\nNameError: name 'name' is not defined\n\nAnd, if @abstractmethod is not the innermost decorator as shown below:\nfrom abc import ABC, abstractmethod\n\nclass Person(ABC):\n\n @abstractmethod # Not the innermost decorator\n @property\n def name(self): # Abstract getter\n pass\n\n @name.setter\n @abstractmethod # The innermost decorator\n def name(self, name): # Abstract setter\n pass\n\n @name.deleter\n @abstractmethod # The innermost decorator\n def name(self): # Abstract deleter\n pass\n\nThe error below occurs:\n\nAttributeError: attribute 'isabstractmethod' of 'property' objects is not writable\n\n", "Another possible solution is to use metaclasses.\nA minimal example can look like this:\nclass BaseMetaClass(type):\n def __new__(mcls, class_name, bases, attrs):\n required_attrs = ('foo', 'bar')\n for attr in required_attrs:\n if not attr in attrs:\n raise RunTimeError(f\"You need to set {attr} in {class_name}\")\n return super().__new__(mcls, class_name, bases, attrs)\n\n\nclass Base(metaclass=BaseMeta):\n foo: str\n bar: int\n\nOne advantage of this approach is that the check will happen at definition time (not instantiation).\nAlso, setting class attributes in child classes is a bit easier than declaring properties (as long as they are simple values known in advance) and your final classes will look more concise\n" ]
[ 255, 57, 10, 5, 3, 0, 0 ]
[]
[]
[ "abstract_class", "decorator", "properties", "python" ]
stackoverflow_0005960337_abstract_class_decorator_properties_python.txt
Q: How to get p-value and pearson's r for a list of columns in Pandas? I'm trying to make a multiindexed table (a matrix) of correlation coefficients and p-values. I'd prefer to use the scipy.stats tests. x = pd.DataFrame( list( zip( [1,2,3,4,5,6], [5, 7, 8, 4, 2, 8], [13, 16, 12, 11, 9, 10] ) ), columns= ['a', 'b', 'c'] ) # I've tried something like this for i in range(len(x.columns)): r,p = pearsonr(x[x.columns[i]], x[x.columns[i+1]]) print(f'{r}, {p}') Obviously the for loop won't work. What I want to end up with is: a b c a r 1.0 -.09 -.8 p .00 .87 .06 b r -.09 1 .42 p .87 .00 .41 c r -.8 .42 1 p .06 .41 00 I had written code to solve this problem (with help from this community) years ago, but it only worked for an older version of spearmanr. Any help would be very much appreciated. A: Here is one way to do it using scipy pearsonr and Pandas corr methods: import pandas as pd from scipy.stats import pearsonr def pearsonr_pval(x, y): return pearsonr(x, y)[1] df = ( pd.concat( [ x.corr(method="pearson").reset_index().assign(value="r"), x.corr(method=pearsonr_pval).reset_index().assign(value="p"), ] ) .groupby(["index", "value"]) .agg(lambda x: list(x)[0]) ).sort_index(ascending=[True, False]) df.index.names = ["", ""] Then: print(df) # Output a b c a r 1.000000 -0.088273 -0.796421 p 1.000000 0.867934 0.057948 b r -0.088273 1.000000 0.421184 p 0.867934 1.000000 0.405583 c r -0.796421 0.421184 1.000000 p 0.057948 0.405583 1.000000
How to get p-value and pearson's r for a list of columns in Pandas?
I'm trying to make a multiindexed table (a matrix) of correlation coefficients and p-values. I'd prefer to use the scipy.stats tests. x = pd.DataFrame( list( zip( [1,2,3,4,5,6], [5, 7, 8, 4, 2, 8], [13, 16, 12, 11, 9, 10] ) ), columns= ['a', 'b', 'c'] ) # I've tried something like this for i in range(len(x.columns)): r,p = pearsonr(x[x.columns[i]], x[x.columns[i+1]]) print(f'{r}, {p}') Obviously the for loop won't work. What I want to end up with is: a b c a r 1.0 -.09 -.8 p .00 .87 .06 b r -.09 1 .42 p .87 .00 .41 c r -.8 .42 1 p .06 .41 00 I had written code to solve this problem (with help from this community) years ago, but it only worked for an older version of spearmanr. Any help would be very much appreciated.
[ "Here is one way to do it using scipy pearsonr and Pandas corr methods:\nimport pandas as pd\nfrom scipy.stats import pearsonr\n\ndef pearsonr_pval(x, y):\n return pearsonr(x, y)[1]\n\n\ndf = (\n pd.concat(\n [\n x.corr(method=\"pearson\").reset_index().assign(value=\"r\"),\n x.corr(method=pearsonr_pval).reset_index().assign(value=\"p\"),\n ]\n )\n .groupby([\"index\", \"value\"])\n .agg(lambda x: list(x)[0])\n).sort_index(ascending=[True, False])\n\ndf.index.names = [\"\", \"\"]\n\nThen:\nprint(df)\n# Output\n a b c\n\na r 1.000000 -0.088273 -0.796421\n p 1.000000 0.867934 0.057948\nb r -0.088273 1.000000 0.421184\n p 0.867934 1.000000 0.405583\nc r -0.796421 0.421184 1.000000\n p 0.057948 0.405583 1.000000\n\n" ]
[ 1 ]
[]
[]
[ "correlation", "pandas", "python", "scipy" ]
stackoverflow_0074537135_correlation_pandas_python_scipy.txt
Q: Why this replace(), re.sub() or strip() do not work with this string? I'm using BeautifulSoup to get a result from a webpage. I've transformed the data object to string and I'm not being able to trim it. I've got the following string: text = '\n\n\n This product is not available.\n \n' I've tried three options to start removing the newline character: string=text.replace('\n','') string=text.strip('\n') import re string = re.sub('\n','', text) Why string output is still the same as text in all cases? I haven't understood the logic yet. Does someone know what's happening? UPDATE: THe whole programming text in case it allows to reproduce: import requests from bs4 import BeautifulSoup import re resp = requests.get('https://soysuper.com/p/granola-con-avena-y-frutos-rojos-kellogg-s-special-k-320-g-320-g', headers={'User-Agent':'Chrome/44.0.2403.157','Accept-Language': 'es-ES, es;q=0.5'}) soup = BeautifulSoup(resp.content.decode('UTF-8'),'html.parser') data = [element.text for element in soup.find_all("section", {"class": "display display--coco"})] text=str(data) #option1 string=text.replace('\n',' ') #option2 string=text.strip('\n') #option3 string = re.sub('\n','', text) print(string) A: Just use .getText(strip=True). Here's how: import requests from bs4 import BeautifulSoup resp = requests.get('https://soysuper.com/p/granola-con-avena-y-frutos-rojos-kellogg-s-special-k-320-g-320-g', headers={'User-Agent':'Chrome/44.0.2403.157','Accept-Language': 'es-ES, es;q=0.5'}) soup = BeautifulSoup(resp.content.decode('UTF-8'),'html.parser') data = [element.getText(strip=True) for element in soup.find_all("section", {"class": "display display--coco"})] print(data) Output: ['Este producto no está disponible en ningún supermercado online.']
Why this replace(), re.sub() or strip() do not work with this string?
I'm using BeautifulSoup to get a result from a webpage. I've transformed the data object to string and I'm not being able to trim it. I've got the following string: text = '\n\n\n This product is not available.\n \n' I've tried three options to start removing the newline character: string=text.replace('\n','') string=text.strip('\n') import re string = re.sub('\n','', text) Why string output is still the same as text in all cases? I haven't understood the logic yet. Does someone know what's happening? UPDATE: THe whole programming text in case it allows to reproduce: import requests from bs4 import BeautifulSoup import re resp = requests.get('https://soysuper.com/p/granola-con-avena-y-frutos-rojos-kellogg-s-special-k-320-g-320-g', headers={'User-Agent':'Chrome/44.0.2403.157','Accept-Language': 'es-ES, es;q=0.5'}) soup = BeautifulSoup(resp.content.decode('UTF-8'),'html.parser') data = [element.text for element in soup.find_all("section", {"class": "display display--coco"})] text=str(data) #option1 string=text.replace('\n',' ') #option2 string=text.strip('\n') #option3 string = re.sub('\n','', text) print(string)
[ "Just use .getText(strip=True).\nHere's how:\nimport requests\nfrom bs4 import BeautifulSoup\n\nresp = requests.get('https://soysuper.com/p/granola-con-avena-y-frutos-rojos-kellogg-s-special-k-320-g-320-g', headers={'User-Agent':'Chrome/44.0.2403.157','Accept-Language': 'es-ES, es;q=0.5'})\nsoup = BeautifulSoup(resp.content.decode('UTF-8'),'html.parser')\n\ndata = [element.getText(strip=True) for element in soup.find_all(\"section\", {\"class\": \"display display--coco\"})]\nprint(data)\n\nOutput:\n['Este producto no está disponible en ningún supermercado online.']\n\n" ]
[ 1 ]
[]
[]
[ "beautifulsoup", "python", "replace", "strip", "trim" ]
stackoverflow_0074589213_beautifulsoup_python_replace_strip_trim.txt
Q: Why am i getting this error when i am trying to install pygame through cmd and pip? Microsoft Windows [Version 10.0.19044.2251] (c) Microsoft Corporation. All rights reserved. C:\Users\User>py -3.10 -m pip install pygame Collecting pygame Using cached pygame-2.1.2.tar.gz (10.1 MB) Preparing metadata (setup.py) ... error error: subprocess-exited-with-error × python setup.py egg_info did not run successfully. │ exit code: 1 ╰─> [77 lines of output] WARNING, No "Setup" File Exists, Running "buildconfig/config.py" Using WINDOWS configuration... output in image attached [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. error: metadata-generation-failed × Encountered error while generating package metadata. ╰─> See above for output. note: This is an issue with the package mentioned above, not pip. hint: See above for details. C:\Users\User> also tried just using pip install pygame, but keep getting the same error. Using windows 10 pro. Here is an image of all the errors i got errors ->
Why am i getting this error when i am trying to install pygame through cmd and pip?
Microsoft Windows [Version 10.0.19044.2251] (c) Microsoft Corporation. All rights reserved. C:\Users\User>py -3.10 -m pip install pygame Collecting pygame Using cached pygame-2.1.2.tar.gz (10.1 MB) Preparing metadata (setup.py) ... error error: subprocess-exited-with-error × python setup.py egg_info did not run successfully. │ exit code: 1 ╰─> [77 lines of output] WARNING, No "Setup" File Exists, Running "buildconfig/config.py" Using WINDOWS configuration... output in image attached [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. error: metadata-generation-failed × Encountered error while generating package metadata. ╰─> See above for output. note: This is an issue with the package mentioned above, not pip. hint: See above for details. C:\Users\User> also tried just using pip install pygame, but keep getting the same error. Using windows 10 pro. Here is an image of all the errors i got errors ->
[]
[]
[ "just run this command its installs the pygame command on windows\npip install pygame\n\n" ]
[ -1 ]
[ "pygame", "python", "windows" ]
stackoverflow_0074589224_pygame_python_windows.txt
Q: can't install ta-lib on amazon sagemaker I can't manage to install TA-LIB on amazon sage maker; the error is very weird: Making install in src /bin/sh: line 17: cd: src: Not a directory make: *** [install-recursive] Error 1 bash-4.2$ ./configure checking for a BSD-compatible install... /usr/bin/install -c checking whether build environment is sane... yes checking for a thread-safe mkdir -p... /bin/mkdir -p checking for gawk... gawk checking whether make sets $(MAKE)... yes checking for gcc... gcc checking for C compiler default output file name... a.out checking whether the C compiler works... yes checking whether we are cross compiling... no checking for suffix of executables... checking for suffix of object files... o checking whether we are using the GNU C compiler... yes checking whether gcc accepts -g... yes checking for gcc option to accept ISO C89... none needed checking for style of include used by make... GNU checking dependency style of gcc... gcc3 checking build system type... x86_64-unknown-linux-gnu checking host system type... x86_64-unknown-linux-gnu checking for a sed that does not truncate output... /bin/sed checking for grep that handles long lines and -e... /bin/grep checking for egrep... /bin/grep -E checking for ld used by gcc... /usr/bin/ld checking if the linker (/usr/bin/ld) is GNU ld... yes checking for /usr/bin/ld option to reload object files... -r checking for BSD-compatible nm... /usr/bin/nm -B checking whether ln -s works... yes checking how to recognise dependent libraries... pass_all checking how to run the C preprocessor... gcc -E checking for ANSI C header files... yes checking for sys/types.h... yes checking for sys/stat.h... yes checking for stdlib.h... yes checking for string.h... yes checking for memory.h... yes checking for strings.h... yes checking for inttypes.h... yes checking for stdint.h... yes checking for unistd.h... yes checking dlfcn.h usability... yes checking dlfcn.h presence... yes checking for dlfcn.h... yes checking for g++... g++ checking whether we are using the GNU C++ compiler... yes checking whether g++ accepts -g... yes checking dependency style of g++... gcc3 checking how to run the C++ preprocessor... g++ -E checking for g77... no checking for xlf... no checking for f77... no checking for frt... no checking for pgf77... no checking for cf77... no checking for fort77... no checking for fl32... no checking for af77... no checking for xlf90... no checking for f90... no checking for pgf90... no checking for pghpf... no checking for epcf90... no checking for gfortran... gfortran checking whether we are using the GNU Fortran 77 compiler... yes checking whether gfortran accepts -g... yes checking the maximum length of command line arguments... 32768 checking command to parse /usr/bin/nm -B output from gcc object... ok checking for objdir... .libs checking for ar... ar checking for ranlib... ranlib checking for strip... strip checking if gcc supports -fno-rtti -fno-exceptions... no checking for gcc option to produce PIC... -fPIC checking if gcc PIC flag -fPIC works... yes checking if gcc static flag -static works... no checking if gcc supports -c -o file.o... yes checking whether the gcc linker (/usr/bin/ld -m elf_x86_64) supports shared libraries... yes checking whether -lc should be explicitly linked in... no checking dynamic linker characteristics... GNU/Linux ld.so checking how to hardcode library paths into programs... immediate checking whether stripping libraries is possible... yes checking if libtool supports shared libraries... yes checking whether to build shared libraries... yes checking whether to build static libraries... yes configure: creating libtool appending configuration tag "CXX" to libtool checking for ld used by g++... /usr/bin/ld -m elf_x86_64 checking if the linker (/usr/bin/ld -m elf_x86_64) is GNU ld... yes checking whether the g++ linker (/usr/bin/ld -m elf_x86_64) supports shared libraries... yes checking for g++ option to produce PIC... -fPIC checking if g++ PIC flag -fPIC works... yes checking if g++ static flag -static works... no checking if g++ supports -c -o file.o... yes checking whether the g++ linker (/usr/bin/ld -m elf_x86_64) supports shared libraries... yes checking dynamic linker characteristics... GNU/Linux ld.so checking how to hardcode library paths into programs... immediate appending configuration tag "F77" to libtool checking if libtool supports shared libraries... yes checking whether to build shared libraries... yes checking whether to build static libraries... yes checking for gfortran option to produce PIC... -fPIC checking if gfortran PIC flag -fPIC works... yes checking if gfortran static flag -static works... no checking if gfortran supports -c -o file.o... yes checking whether the gfortran linker (/usr/bin/ld -m elf_x86_64) supports shared libraries... yes checking dynamic linker characteristics... GNU/Linux ld.so checking how to hardcode library paths into programs... immediate checking for dlopen in -ldl... yes checking for pthread_create in -lpthread... yes checking for ANSI C header files... (cached) yes checking float.h usability... yes checking float.h presence... yes checking for float.h... yes checking for inttypes.h... (cached) yes checking limits.h usability... yes checking limits.h presence... yes checking for limits.h... yes checking locale.h usability... yes checking locale.h presence... yes checking for locale.h... yes checking stddef.h usability... yes checking stddef.h presence... yes checking for stddef.h... yes checking for stdint.h... (cached) yes checking for stdlib.h... (cached) yes checking for string.h... (cached) yes checking for unistd.h... (cached) yes checking wchar.h usability... yes checking wchar.h presence... yes checking for wchar.h... yes checking wctype.h usability... yes checking wctype.h presence... yes checking for wctype.h... yes checking for an ANSI C-conforming const... yes checking for size_t... yes checking whether struct tm is in sys/time.h or time.h... time.h checking for working volatile... yes checking for ptrdiff_t... yes checking return type of signal handlers... void checking for working strcoll... yes checking for strftime... yes checking for working strtod... yes checking for vprintf... yes checking for _doprnt... no checking for floor... no checking for isascii... yes checking for localeconv... yes checking for mblen... yes checking for memmove... yes checking for memset... yes checking for modf... yes checking for pow... no checking for sqrt... no checking for strcasecmp... yes checking for strchr... yes checking for strerror... yes checking for strncasecmp... yes checking for strrchr... yes checking for strstr... yes checking for strtol... yes checking for strtoul... yes configure: creating ./config.status config.status: creating Makefile config.status: creating src/Makefile config.status: creating src/ta_abstract/Makefile config.status: creating src/ta_common/Makefile config.status: creating src/ta_func/Makefile config.status: creating src/tools/Makefile config.status: creating src/tools/gen_code/Makefile config.status: creating src/tools/ta_regtest/Makefile config.status: creating ta-lib-config config.status: creating ta-lib.spec config.status: creating ta-lib.dpkg config.status: creating include/ta_config.h config.status: include/ta_config.h is unchanged config.status: executing depfiles commands bash-4.2$ make Making all in src make[1]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src' Making all in ta_abstract make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/ta_abstract' make[2]: Nothing to be done for `all'. make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/ta_abstract' Making all in ta_common make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/ta_common' make[2]: Nothing to be done for `all'. make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/ta_common' Making all in ta_func make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/ta_func' make[2]: Nothing to be done for `all'. make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/ta_func' make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src' make[2]: Nothing to be done for `all-am'. make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src' make[1]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src' Making all in src/tools make[1]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools' Making all in gen_code make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools/gen_code' make gen_code make[3]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools/gen_code' make[3]: `gen_code' is up to date. make[3]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools/gen_code' cp gen_code ../../../bin make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools/gen_code' Making all in ta_regtest make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools/ta_regtest' make[2]: Nothing to be done for `all'. make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools/ta_regtest' make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools' make[2]: Nothing to be done for `all-am'. make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools' make[1]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools' make[1]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib' make[1]: Nothing to be done for `all-am'. make[1]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib' bash-4.2$ sudo make install Making install in src /bin/sh: line 17: cd: src: Not a directory make: *** [install-recursive] Error 1 A: As was discussed at TA-Lib's python wrapper project's issue pages one may install ta-lib to the folder that's allowed for writing with current user permissions: mkdir ~/ta-lib-bin ./configure --prefix=~/ta-lib-bin make make install And then instruct the wrapper to search for TA-Lib headers and binaries in this folder: export TA_INCLUDE_PATH=~/ta-lib-bin/include export TA_LIBRARY_PATH=~/ta-lib-bin/lib pip install ta-lib This workarounds the problem. A: Follow the steps from https://github.com/mrjbq7/ta-lib $ tar -xzf ta-lib-0.4.0-src.tar.gz $ cd ta-lib/ $ ./configure --prefix=/usr $ make $ sudo make install Before pip install use whereis ta-lib to find the installed location navigate to the respective location. yum install python3-wheel.noarch yum install python3-devel.x86_64 Then python3 -m pip install ta-lib it should work
can't install ta-lib on amazon sagemaker
I can't manage to install TA-LIB on amazon sage maker; the error is very weird: Making install in src /bin/sh: line 17: cd: src: Not a directory make: *** [install-recursive] Error 1 bash-4.2$ ./configure checking for a BSD-compatible install... /usr/bin/install -c checking whether build environment is sane... yes checking for a thread-safe mkdir -p... /bin/mkdir -p checking for gawk... gawk checking whether make sets $(MAKE)... yes checking for gcc... gcc checking for C compiler default output file name... a.out checking whether the C compiler works... yes checking whether we are cross compiling... no checking for suffix of executables... checking for suffix of object files... o checking whether we are using the GNU C compiler... yes checking whether gcc accepts -g... yes checking for gcc option to accept ISO C89... none needed checking for style of include used by make... GNU checking dependency style of gcc... gcc3 checking build system type... x86_64-unknown-linux-gnu checking host system type... x86_64-unknown-linux-gnu checking for a sed that does not truncate output... /bin/sed checking for grep that handles long lines and -e... /bin/grep checking for egrep... /bin/grep -E checking for ld used by gcc... /usr/bin/ld checking if the linker (/usr/bin/ld) is GNU ld... yes checking for /usr/bin/ld option to reload object files... -r checking for BSD-compatible nm... /usr/bin/nm -B checking whether ln -s works... yes checking how to recognise dependent libraries... pass_all checking how to run the C preprocessor... gcc -E checking for ANSI C header files... yes checking for sys/types.h... yes checking for sys/stat.h... yes checking for stdlib.h... yes checking for string.h... yes checking for memory.h... yes checking for strings.h... yes checking for inttypes.h... yes checking for stdint.h... yes checking for unistd.h... yes checking dlfcn.h usability... yes checking dlfcn.h presence... yes checking for dlfcn.h... yes checking for g++... g++ checking whether we are using the GNU C++ compiler... yes checking whether g++ accepts -g... yes checking dependency style of g++... gcc3 checking how to run the C++ preprocessor... g++ -E checking for g77... no checking for xlf... no checking for f77... no checking for frt... no checking for pgf77... no checking for cf77... no checking for fort77... no checking for fl32... no checking for af77... no checking for xlf90... no checking for f90... no checking for pgf90... no checking for pghpf... no checking for epcf90... no checking for gfortran... gfortran checking whether we are using the GNU Fortran 77 compiler... yes checking whether gfortran accepts -g... yes checking the maximum length of command line arguments... 32768 checking command to parse /usr/bin/nm -B output from gcc object... ok checking for objdir... .libs checking for ar... ar checking for ranlib... ranlib checking for strip... strip checking if gcc supports -fno-rtti -fno-exceptions... no checking for gcc option to produce PIC... -fPIC checking if gcc PIC flag -fPIC works... yes checking if gcc static flag -static works... no checking if gcc supports -c -o file.o... yes checking whether the gcc linker (/usr/bin/ld -m elf_x86_64) supports shared libraries... yes checking whether -lc should be explicitly linked in... no checking dynamic linker characteristics... GNU/Linux ld.so checking how to hardcode library paths into programs... immediate checking whether stripping libraries is possible... yes checking if libtool supports shared libraries... yes checking whether to build shared libraries... yes checking whether to build static libraries... yes configure: creating libtool appending configuration tag "CXX" to libtool checking for ld used by g++... /usr/bin/ld -m elf_x86_64 checking if the linker (/usr/bin/ld -m elf_x86_64) is GNU ld... yes checking whether the g++ linker (/usr/bin/ld -m elf_x86_64) supports shared libraries... yes checking for g++ option to produce PIC... -fPIC checking if g++ PIC flag -fPIC works... yes checking if g++ static flag -static works... no checking if g++ supports -c -o file.o... yes checking whether the g++ linker (/usr/bin/ld -m elf_x86_64) supports shared libraries... yes checking dynamic linker characteristics... GNU/Linux ld.so checking how to hardcode library paths into programs... immediate appending configuration tag "F77" to libtool checking if libtool supports shared libraries... yes checking whether to build shared libraries... yes checking whether to build static libraries... yes checking for gfortran option to produce PIC... -fPIC checking if gfortran PIC flag -fPIC works... yes checking if gfortran static flag -static works... no checking if gfortran supports -c -o file.o... yes checking whether the gfortran linker (/usr/bin/ld -m elf_x86_64) supports shared libraries... yes checking dynamic linker characteristics... GNU/Linux ld.so checking how to hardcode library paths into programs... immediate checking for dlopen in -ldl... yes checking for pthread_create in -lpthread... yes checking for ANSI C header files... (cached) yes checking float.h usability... yes checking float.h presence... yes checking for float.h... yes checking for inttypes.h... (cached) yes checking limits.h usability... yes checking limits.h presence... yes checking for limits.h... yes checking locale.h usability... yes checking locale.h presence... yes checking for locale.h... yes checking stddef.h usability... yes checking stddef.h presence... yes checking for stddef.h... yes checking for stdint.h... (cached) yes checking for stdlib.h... (cached) yes checking for string.h... (cached) yes checking for unistd.h... (cached) yes checking wchar.h usability... yes checking wchar.h presence... yes checking for wchar.h... yes checking wctype.h usability... yes checking wctype.h presence... yes checking for wctype.h... yes checking for an ANSI C-conforming const... yes checking for size_t... yes checking whether struct tm is in sys/time.h or time.h... time.h checking for working volatile... yes checking for ptrdiff_t... yes checking return type of signal handlers... void checking for working strcoll... yes checking for strftime... yes checking for working strtod... yes checking for vprintf... yes checking for _doprnt... no checking for floor... no checking for isascii... yes checking for localeconv... yes checking for mblen... yes checking for memmove... yes checking for memset... yes checking for modf... yes checking for pow... no checking for sqrt... no checking for strcasecmp... yes checking for strchr... yes checking for strerror... yes checking for strncasecmp... yes checking for strrchr... yes checking for strstr... yes checking for strtol... yes checking for strtoul... yes configure: creating ./config.status config.status: creating Makefile config.status: creating src/Makefile config.status: creating src/ta_abstract/Makefile config.status: creating src/ta_common/Makefile config.status: creating src/ta_func/Makefile config.status: creating src/tools/Makefile config.status: creating src/tools/gen_code/Makefile config.status: creating src/tools/ta_regtest/Makefile config.status: creating ta-lib-config config.status: creating ta-lib.spec config.status: creating ta-lib.dpkg config.status: creating include/ta_config.h config.status: include/ta_config.h is unchanged config.status: executing depfiles commands bash-4.2$ make Making all in src make[1]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src' Making all in ta_abstract make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/ta_abstract' make[2]: Nothing to be done for `all'. make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/ta_abstract' Making all in ta_common make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/ta_common' make[2]: Nothing to be done for `all'. make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/ta_common' Making all in ta_func make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/ta_func' make[2]: Nothing to be done for `all'. make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/ta_func' make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src' make[2]: Nothing to be done for `all-am'. make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src' make[1]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src' Making all in src/tools make[1]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools' Making all in gen_code make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools/gen_code' make gen_code make[3]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools/gen_code' make[3]: `gen_code' is up to date. make[3]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools/gen_code' cp gen_code ../../../bin make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools/gen_code' Making all in ta_regtest make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools/ta_regtest' make[2]: Nothing to be done for `all'. make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools/ta_regtest' make[2]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools' make[2]: Nothing to be done for `all-am'. make[2]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools' make[1]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib/src/tools' make[1]: Entering directory `/home/sagemaker-user/cache/trading-strat/ta-lib' make[1]: Nothing to be done for `all-am'. make[1]: Leaving directory `/home/sagemaker-user/cache/trading-strat/ta-lib' bash-4.2$ sudo make install Making install in src /bin/sh: line 17: cd: src: Not a directory make: *** [install-recursive] Error 1
[ "As was discussed at TA-Lib's python wrapper project's issue pages one may install ta-lib to the folder that's allowed for writing with current user permissions:\nmkdir ~/ta-lib-bin\n./configure --prefix=~/ta-lib-bin\nmake\nmake install\n\nAnd then instruct the wrapper to search for TA-Lib headers and binaries in this folder:\nexport TA_INCLUDE_PATH=~/ta-lib-bin/include\nexport TA_LIBRARY_PATH=~/ta-lib-bin/lib\npip install ta-lib\n\nThis workarounds the problem.\n", "Follow the steps from https://github.com/mrjbq7/ta-lib\n$ tar -xzf ta-lib-0.4.0-src.tar.gz\n$ cd ta-lib/\n$ ./configure --prefix=/usr\n$ make\n$ sudo make install\n\n\nBefore pip install use whereis ta-lib to find the installed location navigate to the respective location.\nyum install python3-wheel.noarch\nyum install python3-devel.x86_64\n\nThen\npython3 -m pip install ta-lib\n\nit should work\n" ]
[ 1, 0 ]
[]
[]
[ "amazon", "python", "ta_lib" ]
stackoverflow_0070448069_amazon_python_ta_lib.txt
Q: Convert string into datetime.time object Given the string in this format "HH:MM", for example "03:55", that represents 3 hours and 55 minutes. I want to convert it to datetime.time object for easier manipulation. What would be the easiest way to do that? A: Use datetime.datetime.strptime() and call .time() on the result: >>> datetime.datetime.strptime('03:55', '%H:%M').time() datetime.time(3, 55) The first argument to .strptime() is the string to parse, the second is the expected format. A: >>> datetime.time(*map(int, '03:55'.split(':'))) datetime.time(3, 55) A: It is perhaps less clear to future readers, but the *map method is more than 10 times faster. See below and make an informed decision in your code. If calling this check many times and speed matters, go with the generator ("map"). In [31]: timeit(datetime.strptime('15:00', '%H:%M').time()) 7.76 µs ± 111 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) In [28]: timeit(dtime(*map(int, SHUTDOWN_AT.split(':')))) 696 ns ± 11.5 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) A: Simply load if as iso: >>> from datetime import time >>> time.fromisoformat("03:55") datetime.time(3, 55)
Convert string into datetime.time object
Given the string in this format "HH:MM", for example "03:55", that represents 3 hours and 55 minutes. I want to convert it to datetime.time object for easier manipulation. What would be the easiest way to do that?
[ "Use datetime.datetime.strptime() and call .time() on the result:\n>>> datetime.datetime.strptime('03:55', '%H:%M').time()\ndatetime.time(3, 55)\n\nThe first argument to .strptime() is the string to parse, the second is the expected format.\n", ">>> datetime.time(*map(int, '03:55'.split(':')))\ndatetime.time(3, 55)\n\n", "It is perhaps less clear to future readers, but the *map method is more than 10 times faster. See below and make an informed decision in your code. If calling this check many times and speed matters, go with the generator (\"map\").\nIn [31]: timeit(datetime.strptime('15:00', '%H:%M').time())\n7.76 µs ± 111 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)\n\n\nIn [28]: timeit(dtime(*map(int, SHUTDOWN_AT.split(':'))))\n696 ns ± 11.5 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)\n\n", "Simply load if as iso:\n>>> from datetime import time\n>>> time.fromisoformat(\"03:55\")\ndatetime.time(3, 55)\n\n" ]
[ 152, 17, 2, 0 ]
[]
[]
[ "python", "python_datetime", "time" ]
stackoverflow_0014295673_python_python_datetime_time.txt
Q: 'int' does not uspport indexing I am trying to make a loop for a string that contains 16 numbers, idea is to multiply *2 all the pair digits, but while doing that, I get an error of a string. I tried several ways but not succeeding. cardNumber = input("Enter a 16-digit card number:") cardNumber = int(cardNumber.replace(" ","")) #cardNumber = str(cardNumber) print(cardNumber) i = 0 for i in range(0, 16, 2): cardNumber[i] *= 2 print(cardNumber) Can you help me to understand this simple issue? I do not understand why is not allowing it. Thanks for the help A: You convert cardnumber to an integer, e.g. 4137894711755904. Integers do not have a "digit position", thus cardNumber[i] cannot work. This indexing works on strings, but not on number types. You could convert the string to a list of integers, e.g. card_number= input("Enter a 16-digit card number:") digits = list(map(int, list(card_number.replace(" ","")))) # Line above is short for [int(i) for i in list(card_number.replace(" ",""))] Now indexing works as exected. To get an integer back you can join the list and convert to int again, e.g. number = int(''.join(map(str, digits))) A: the following maybe meet what you want :) cardNumber = input("Enter a 16-digit card number:").replace(" ","") numbers = [ int(x) for x in cardNumber ] i = 0 for i in range(0, 16, 2): numbers[i] *= 2 print(numbers) var = '' #iterate over the list elements for element in numbers: # converting integer to string and adding into variable var += str(element) # converting back into integer and printing the final result print(int(var))
'int' does not uspport indexing
I am trying to make a loop for a string that contains 16 numbers, idea is to multiply *2 all the pair digits, but while doing that, I get an error of a string. I tried several ways but not succeeding. cardNumber = input("Enter a 16-digit card number:") cardNumber = int(cardNumber.replace(" ","")) #cardNumber = str(cardNumber) print(cardNumber) i = 0 for i in range(0, 16, 2): cardNumber[i] *= 2 print(cardNumber) Can you help me to understand this simple issue? I do not understand why is not allowing it. Thanks for the help
[ "You convert cardnumber to an integer, e.g. 4137894711755904. Integers do not have a \"digit position\", thus cardNumber[i] cannot work. This indexing works on strings, but not on number types.\nYou could convert the string to a list of integers, e.g.\ncard_number= input(\"Enter a 16-digit card number:\")\ndigits = list(map(int, list(card_number.replace(\" \",\"\")))) \n# Line above is short for [int(i) for i in list(card_number.replace(\" \",\"\"))]\n\nNow indexing works as exected.\nTo get an integer back you can join the list and convert to int again, e.g.\nnumber = int(''.join(map(str, digits)))\n\n", "the following maybe meet what you want :)\ncardNumber = input(\"Enter a 16-digit card number:\").replace(\" \",\"\")\nnumbers = [ int(x) for x in cardNumber ]\ni = 0\nfor i in range(0, 16, 2):\n numbers[i] *= 2\n \nprint(numbers)\n\nvar = '' \n#iterate over the list elements\nfor element in numbers: \n # converting integer to string and adding into variable\n var += str(element)\n \n# converting back into integer and printing the final result\nprint(int(var))\n\n" ]
[ 1, 1 ]
[]
[]
[ "integer", "python", "string" ]
stackoverflow_0074589198_integer_python_string.txt
Q: MicroPython on Wemos D1 esp8266:- TypeError: can't convert Pin to int Code: from machine import Pin from machine import ADC from time import sleep_ms x = ADC(Pin(4, Pin.IN)) y = ADC(Pin(5, Pin.IN)) x.atten(ADC.ATTN_11DB) y.atten(ADC.ATTN_11DB) while True: x_val = x.read() y_val = y.read() print('Current position:{},{}'.format(x_val,y_val)) sleep_ms(300) Error: Traceback (most recent call last): File "<stdin>", line 5, in <module> TypeError: can't convert Pin to int I want to know what's my mistake. I tried to concatenate str to int and int to str. It did not work. A: The TypeError means you are using an object (a variable) of a different type then expected. The error tells you that, in this case, on line 5, an int is expected, but a Pin was used in your code. Line 5 of your code is: x = ADC(Pin(4, Pin.IN)) and it contains a Pin indeed. Looking into the documentation of the machine.ADC class shows this should be fine: class machine.ADC(id, *, sample_ns, atten) Access the ADC associated with a source identified by id. This id may be an integer (usually specifying a channel number), a Pin object, or other value supported by the underlying machine. However, has the library always supported this? Browsing through older versions of the library shows this has not always been the case. Up to version 1.11 it expected an int: class machine.ADC(id=0, *, bits=12) Create an ADC object associated with the given pin. So I assume you are using an old version of the library. To fix the problem you should either update your library to the latest version (at least v1.12), or you should read the documentation of the version you are using and update your code accordingly. For older versions, you should write x = ADC(4) y = ADC(5)
MicroPython on Wemos D1 esp8266:- TypeError: can't convert Pin to int
Code: from machine import Pin from machine import ADC from time import sleep_ms x = ADC(Pin(4, Pin.IN)) y = ADC(Pin(5, Pin.IN)) x.atten(ADC.ATTN_11DB) y.atten(ADC.ATTN_11DB) while True: x_val = x.read() y_val = y.read() print('Current position:{},{}'.format(x_val,y_val)) sleep_ms(300) Error: Traceback (most recent call last): File "<stdin>", line 5, in <module> TypeError: can't convert Pin to int I want to know what's my mistake. I tried to concatenate str to int and int to str. It did not work.
[ "The TypeError means you are using an object (a variable) of a different type then expected. The error tells you that, in this case, on line 5, an int is expected, but a Pin was used in your code.\nLine 5 of your code is:\nx = ADC(Pin(4, Pin.IN))\n\nand it contains a Pin indeed.\nLooking into the documentation of the machine.ADC class shows this should be fine:\n\nclass machine.ADC(id, *, sample_ns, atten)\nAccess the ADC associated with a source identified by id. This id may be an integer (usually specifying a channel number), a Pin object, or other value supported by the underlying machine.\n\nHowever, has the library always supported this?\nBrowsing through older versions of the library shows this has not always been the case. Up to version 1.11 it expected an int:\n\nclass machine.ADC(id=0, *, bits=12)\nCreate an ADC object associated with the given pin.\n\nSo I assume you are using an old version of the library. To fix the problem you should either update your library to the latest version (at least v1.12), or you should read the documentation of the version you are using and update your code accordingly. For older versions, you should write\nx = ADC(4)\ny = ADC(5)\n\n" ]
[ 2 ]
[]
[]
[ "esp8266", "micropython", "python", "typeerror" ]
stackoverflow_0074589133_esp8266_micropython_python_typeerror.txt
Q: How i understanding AD9833 SPI communication using Python with my raspberry? Hi i had an issue to discuss, and i dont realy understand to send data with SPI with Python I want to send data with my Raspberry Pi 4 ver.b using Python to send data to my module named AD9833 DDS. So i found code in internet, writed in Python (sor. https://ez.analog.com/dds/f/q-a/28431/ad9833-programming-in-raspberry-pi-using-python). This is the code : # The code write by SamMaster, Oct 21 2016 # importing library import time import spidev # activate spidev module and settings SPI spi = spidev.SpiDev() spi.open(0,1) spi.max_speed_hz = 976000 # initialize frequency and another value freq_out = 400 Two28 = 268435456 phase = 0 after the programmer call all library, function and set the value, his try to define a function to send a data. def send_data(input): tx_msb = input >> 8 tx_lsb = input & 0xFF spi.xfer([tx_msb,txlsb]) print(input) so that this frequencies value is able to read by AD9833, this frequency must convert to freq word, so programmer write the code, freq_word = int(round(float(freq_out*Two28)/25000000)) and then the programmer define all of MSB and LSB MSB = (freq_word & 0xFFC000)>>14 LSB = (freq_word & 0x3FFF) LSB |= 0x4000 MSB |= 0x4000 phase|= 0xC000 and then, function that the programmer built implement in this blocks of codes send_data(LSB) send_data(MSB) send_data(phase) send_data(0x2000) its worked on my Raspberry Pi 4, this is the result on my device, Result for 400Hz Result for 500Hz when i change the frequency so freq_out = 500 there is no changes, just the value is aproximately 400 Hz on my scope. So i try this simple solution, i put the code send_data(0x2000), 0x2000 it mean Reset AD9833 according to datasheet, above the send_data(LSB) code. So the code became, send_data(0x2000) send_data(LSB) send_data(MSB) send_data(phase) and this the result, freq_out = 400 freq_out = 400 freq_out = 500 freq_out = 500 freq_out = 600 freq_out = 600 freq_out = 1000freq_out = 1000 i don't know why when i writing freq_out = 600 the value output frequency not correct with what i'm inputing. So can anyone want to comment / state argument to my issue ? A: This problem can be split into a number of sub tasks. Values to send Sequence values sent in How values sent over SPI As SamMaster pointed out there is an application note from Analog Devices that shows the sequence of values to send to set the frequency to 400 Hz https://www.analog.com/media/en/technical-documentation/application-notes/AN-1070.pdf They summarise the five values to send and in what order in the following table: If I look at the code that SamMaster has written it is writing the correct values in the correct order (I don't have the hardware but I can print the values). sending: [0x21, 0x00] sending: [0x50, 0xc7] sending: [0x40, 0x00] sending: [0xc0, 0x00] sending: [0x20, 0x00] That just leaves bullet 3 that is causing the problems. The fact that you get changes happening on the hardware suggests that some kind of communication is happening, just not the correct values. Looking at the limited documentation at https://pypi.org/project/spidev/ there are two likely commands that could be used: xfer or xfer2. The difference between the two are the value of the chip select pin between blocks. Figure 4 in the data sheet I think is saying that chip select should not be released between the two bytes. https://www.analog.com/media/en/technical-documentation/data-sheets/ad9833.pdf That would suggest that xfer2 should be to used to send the blocks and not xfer as SamMaster has done. Although SamMaster seems to suggest he got it working with xfer and you were able to set the value to 400 Hz successfully. You would need your scope/logic analyser to see if the GPIO is doing the right thing on the hardware. At some point in your development you seem to have changed the sequence of values to send. It should be: send_data(0x2100) # Start send_data(LSB) # Frequency 14 bits (LSB) send_data(MSB) # Frequency 14 bits (MSB) send_data(phase) # Phase value send_data(0x2000) # End This could be another source of your error. I looked at what the values should be that get sent for the different frequency you tested. I calculated the values follows: For Frequency: 400 Frequency setting: 4295 = 0x10c7 = 0001000011000111 send_data(0x2100) send_data(0x50c7) send_data(0x4000) send_data(0xc000) send_data(0x2000) For Frequency: 500 Frequency setting: 5369 = 0x14f9 = 0001010011111001 send_data(0x2100) send_data(0x54f9) send_data(0x4000) send_data(0xc000) send_data(0x2000) For Frequency: 600 Frequency setting: 6442 = 0x192a = 0001100100101010 send_data(0x2100) send_data(0x592a) send_data(0x4000) send_data(0xc000) send_data(0x2000) For Frequency: 1000 Frequency setting: 10737 = 0x29f1 = 0010100111110001 send_data(0x2100) send_data(0x69f1) send_data(0x4000) send_data(0xc000) send_data(0x2000) And finally, I refactored the code to make it easier for me to test the different parts of the code. I'm sharing it here for your information. I had to comment out any of the spi communication parts because I don't have the hardware. import time import spidev # activate spidev module and settings SPI bus = 0 device = 1 spi = spidev.SpiDev() spi.open(bus, device) spi.max_speed_hz = 976000 def output_freq(hz_value): return int(round((hz_value * 2**28) / 25e6)) def freq_change_start(): ctrl_reg = 0 ctrl_reg += 2**13 # set DB13 (28 bit freq reg) ctrl_reg += 2**8 # set DB8 (Reset) return ctrl_reg def freq_reg_lsb(freq_reg): fourteen_bit_mask = 0b0011111111111111 write_value = 0 write_value += 2**14 # set DB14 lsb = freq_reg & fourteen_bit_mask write_value += lsb return write_value def freq_reg_msb(freq_reg): fourteen_bit_mask = 0b0011111111111111 write_value = 0 write_value += 2**14 # set DB14 msb = freq_reg >> 14 & fourteen_bit_mask write_value += msb return write_value def phase_register(): # Currently always the same value write_value = 0 # Set phase register address write_value += 2 ** 15 # set DB15 write_value += 2 ** 14 # set DB14 return write_value def freq_change_end(): ctrl_reg = 0 ctrl_reg += 2**13 # set DB13 (28 bit freq reg) return ctrl_reg def word_split(word16): tx_msb = word16 >> 8 tx_lsb = word16 & 0xFF return tx_msb, tx_lsb def send_spi_sequence(sequence): for word16 in sequence: two_bytes = word_split(word16) print(f"\tsending:[{two_bytes[0]:#02x}, {two_bytes[1]:#02x}]") print(f"\tsend_data({word16:#06x})") spi.xfer(two_bytes) # spi.xfer2(two_bytes) def change_freq(freq_hz): # Calc values to send print("For Frequency:", freq_hz) freq_reg = output_freq(freq_hz) print(f"Frequency setting: {freq_reg} = {freq_reg:#04x} = {freq_reg:016b}") ctrl_start = freq_change_start() print(f"Control register write: {ctrl_start:#04x}") lsb_value = freq_reg_lsb(freq_reg) print(f"lsb value: {lsb_value:#04x}") msb_value = freq_reg_msb(freq_reg) print(f"lsb value: {msb_value:#04x}") phase_reg = phase_register() print(f"Phase register write: {phase_reg:#04x}") ctrl_end = freq_change_end() print(f"Control register write: {ctrl_end:#04x}") # Write values to spi send_spi_sequence([ctrl_start, lsb_value, msb_value, phase_reg, ctrl_end]) def main(): show_freq_for = 20 change_freq(400) time.sleep(show_freq_for) change_freq(500) time.sleep(show_freq_for) change_freq(600) time.sleep(show_freq_for) change_freq(1000) time.sleep(show_freq_for) if __name__ == '__main__': main()
How i understanding AD9833 SPI communication using Python with my raspberry?
Hi i had an issue to discuss, and i dont realy understand to send data with SPI with Python I want to send data with my Raspberry Pi 4 ver.b using Python to send data to my module named AD9833 DDS. So i found code in internet, writed in Python (sor. https://ez.analog.com/dds/f/q-a/28431/ad9833-programming-in-raspberry-pi-using-python). This is the code : # The code write by SamMaster, Oct 21 2016 # importing library import time import spidev # activate spidev module and settings SPI spi = spidev.SpiDev() spi.open(0,1) spi.max_speed_hz = 976000 # initialize frequency and another value freq_out = 400 Two28 = 268435456 phase = 0 after the programmer call all library, function and set the value, his try to define a function to send a data. def send_data(input): tx_msb = input >> 8 tx_lsb = input & 0xFF spi.xfer([tx_msb,txlsb]) print(input) so that this frequencies value is able to read by AD9833, this frequency must convert to freq word, so programmer write the code, freq_word = int(round(float(freq_out*Two28)/25000000)) and then the programmer define all of MSB and LSB MSB = (freq_word & 0xFFC000)>>14 LSB = (freq_word & 0x3FFF) LSB |= 0x4000 MSB |= 0x4000 phase|= 0xC000 and then, function that the programmer built implement in this blocks of codes send_data(LSB) send_data(MSB) send_data(phase) send_data(0x2000) its worked on my Raspberry Pi 4, this is the result on my device, Result for 400Hz Result for 500Hz when i change the frequency so freq_out = 500 there is no changes, just the value is aproximately 400 Hz on my scope. So i try this simple solution, i put the code send_data(0x2000), 0x2000 it mean Reset AD9833 according to datasheet, above the send_data(LSB) code. So the code became, send_data(0x2000) send_data(LSB) send_data(MSB) send_data(phase) and this the result, freq_out = 400 freq_out = 400 freq_out = 500 freq_out = 500 freq_out = 600 freq_out = 600 freq_out = 1000freq_out = 1000 i don't know why when i writing freq_out = 600 the value output frequency not correct with what i'm inputing. So can anyone want to comment / state argument to my issue ?
[ "This problem can be split into a number of sub tasks.\n\nValues to send\nSequence values sent in\nHow values sent over SPI\n\nAs SamMaster pointed out there is an application note from Analog Devices that shows the sequence of values to send to set the frequency to 400 Hz\nhttps://www.analog.com/media/en/technical-documentation/application-notes/AN-1070.pdf\nThey summarise the five values to send and in what order in the following table:\n\nIf I look at the code that SamMaster has written it is writing the correct values in the correct order (I don't have the hardware but I can print the values).\nsending: [0x21, 0x00]\nsending: [0x50, 0xc7]\nsending: [0x40, 0x00]\nsending: [0xc0, 0x00]\nsending: [0x20, 0x00]\n\nThat just leaves bullet 3 that is causing the problems.\nThe fact that you get changes happening on the hardware suggests that some kind of communication is happening, just not the correct values.\nLooking at the limited documentation at https://pypi.org/project/spidev/ there are two likely commands that could be used: xfer or xfer2.\nThe difference between the two are the value of the chip select pin between blocks.\nFigure 4 in the data sheet I think is saying that chip select should not be released between the two bytes.\nhttps://www.analog.com/media/en/technical-documentation/data-sheets/ad9833.pdf\n\nThat would suggest that xfer2 should be to used to send the blocks and not xfer as SamMaster has done. Although SamMaster seems to suggest he got it working with xfer and you were able to set the value to 400 Hz successfully. You would need your scope/logic analyser to see if the GPIO is doing the right thing on the hardware.\nAt some point in your development you seem to have changed the sequence of values to send. It should be:\nsend_data(0x2100) # Start\nsend_data(LSB) # Frequency 14 bits (LSB)\nsend_data(MSB) # Frequency 14 bits (MSB)\nsend_data(phase) # Phase value\nsend_data(0x2000) # End\n\nThis could be another source of your error.\nI looked at what the values should be that get sent for the different frequency you tested. I calculated the values follows:\nFor Frequency: 400\nFrequency setting: 4295 = 0x10c7 = 0001000011000111\n send_data(0x2100)\n send_data(0x50c7)\n send_data(0x4000)\n send_data(0xc000)\n send_data(0x2000)\nFor Frequency: 500\nFrequency setting: 5369 = 0x14f9 = 0001010011111001\n send_data(0x2100)\n send_data(0x54f9)\n send_data(0x4000)\n send_data(0xc000)\n send_data(0x2000)\nFor Frequency: 600\nFrequency setting: 6442 = 0x192a = 0001100100101010\n send_data(0x2100)\n send_data(0x592a)\n send_data(0x4000)\n send_data(0xc000)\n send_data(0x2000)\nFor Frequency: 1000\nFrequency setting: 10737 = 0x29f1 = 0010100111110001\n send_data(0x2100)\n send_data(0x69f1)\n send_data(0x4000)\n send_data(0xc000)\n send_data(0x2000)\n\nAnd finally, I refactored the code to make it easier for me to test the different parts of the code. I'm sharing it here for your information. I had to comment out any of the spi communication parts because I don't have the hardware.\nimport time\nimport spidev\n\n# activate spidev module and settings SPI\nbus = 0\ndevice = 1\nspi = spidev.SpiDev()\nspi.open(bus, device)\nspi.max_speed_hz = 976000\n\n\ndef output_freq(hz_value):\n return int(round((hz_value * 2**28) / 25e6))\n\n\ndef freq_change_start():\n ctrl_reg = 0\n ctrl_reg += 2**13 # set DB13 (28 bit freq reg)\n ctrl_reg += 2**8 # set DB8 (Reset)\n return ctrl_reg\n\n\ndef freq_reg_lsb(freq_reg):\n fourteen_bit_mask = 0b0011111111111111\n write_value = 0\n write_value += 2**14 # set DB14\n lsb = freq_reg & fourteen_bit_mask\n write_value += lsb\n return write_value\n\n\ndef freq_reg_msb(freq_reg):\n fourteen_bit_mask = 0b0011111111111111\n write_value = 0\n write_value += 2**14 # set DB14\n msb = freq_reg >> 14 & fourteen_bit_mask\n write_value += msb\n return write_value\n\n\ndef phase_register():\n # Currently always the same value\n write_value = 0\n # Set phase register address\n write_value += 2 ** 15 # set DB15\n write_value += 2 ** 14 # set DB14\n return write_value\n\n\ndef freq_change_end():\n ctrl_reg = 0\n ctrl_reg += 2**13 # set DB13 (28 bit freq reg)\n return ctrl_reg\n\n\ndef word_split(word16):\n tx_msb = word16 >> 8\n tx_lsb = word16 & 0xFF\n return tx_msb, tx_lsb\n\n\ndef send_spi_sequence(sequence):\n for word16 in sequence:\n two_bytes = word_split(word16)\n print(f\"\\tsending:[{two_bytes[0]:#02x}, {two_bytes[1]:#02x}]\")\n print(f\"\\tsend_data({word16:#06x})\")\n spi.xfer(two_bytes)\n # spi.xfer2(two_bytes)\n\n\ndef change_freq(freq_hz):\n # Calc values to send\n print(\"For Frequency:\", freq_hz)\n freq_reg = output_freq(freq_hz)\n print(f\"Frequency setting: {freq_reg} = {freq_reg:#04x} = {freq_reg:016b}\")\n ctrl_start = freq_change_start()\n print(f\"Control register write: {ctrl_start:#04x}\")\n lsb_value = freq_reg_lsb(freq_reg)\n print(f\"lsb value: {lsb_value:#04x}\")\n msb_value = freq_reg_msb(freq_reg)\n print(f\"lsb value: {msb_value:#04x}\")\n phase_reg = phase_register()\n print(f\"Phase register write: {phase_reg:#04x}\")\n ctrl_end = freq_change_end()\n print(f\"Control register write: {ctrl_end:#04x}\")\n\n # Write values to spi\n send_spi_sequence([ctrl_start, lsb_value, msb_value, phase_reg, ctrl_end])\n\n\ndef main():\n show_freq_for = 20\n change_freq(400)\n time.sleep(show_freq_for)\n change_freq(500)\n time.sleep(show_freq_for)\n change_freq(600)\n time.sleep(show_freq_for)\n change_freq(1000)\n time.sleep(show_freq_for)\n\n\nif __name__ == '__main__':\n main()\n\n\n" ]
[ 0 ]
[]
[]
[ "interface", "python", "raspberry_pi", "spidev" ]
stackoverflow_0074545102_interface_python_raspberry_pi_spidev.txt
Q: Morse Code Translator - Calling a function within an if statement using Python `I'm trying to write code that starts with a question asking the user if they want to encode or decode to/from morse. Based on their response (1 or 2), it runs through an if statement, and will call the required function(s). It will take a user's input via user_input() and either return it in morse code, or return it in English, based on their choice to encode or decode. The encoding aspect works, but I cannot get the decode_morse() function to work within the entire program. I'm getting an error on the calling of function encode_or_decode() at the bottom, and also 'TypeError: decode_morse() missing 1 required positional argument: 'data'' # Dictionary representing English to morse code chart ENG_TO_MORSE_DICT = {'a':'.-', 'b':'-...', 'c':'-.-.', 'd':'-..', 'e':'.', 'f':'..-.', 'g':'--.', 'h':'....', 'i':'..', 'j':'.---', 'k':'-.-', 'l':'.-..', 'm':'--', 'n':'-.', 'o':'---', 'p':'.--.', 'q':'--.-', 'r':'.-.', 's':'...', 't':'-', 'u':'..-', 'v':'...-', 'w':'.--', 'x':'-..-', 'y':'-.--', 'z':'--..', ' ':'/', 'A':'.-', 'B':'-...', 'C':'-.-.', 'D':'-..', 'E':'.', 'F':'..-.', 'G':'--.','H':'....', 'I':'..', 'J':'.---', 'K':'-.-', 'L':'.-..', 'M':'--', 'N':'-.', 'O':'---', 'P':'.--.', 'Q':'--.-', 'R':'.-.', 'S':'...', 'T':'-', 'U':'..-', 'V':'...-', 'W':'.--', 'X':'-..-', 'Y':'-.--', 'Z':'--..' } # Dictionary representing morse code to English MORSE_TO_ENG_DICT = { ".-": "A", "-...": "B", "-.-.": "C", "-..": "D", ".": "E", "..-.": "F", "--.": "G", "....": "H", "..": "I", ".---": "J", "-.-": "K", ".-..": "L", "--": "M", "-.": "N", "---": "O", ".--.": "P", " --.-": "Q", ".-.": "R", "...": "S", "-": "T", "..-": "U", "...-": "V", ".--": "W", "-..-": "X", "-.--": "Y", "--..": "Z", "/":' ' } def encode_or_decode(): choice = int(input("Please select 1 to encode to morse, or 2 to decode from morse ")) if choice == 1: message_to_encode() elif choice == 2: decode_morse() else: print("Please select option 1 or option 2") # Defining a global variable for user's input to be used within multiple functions def user_input(): global data data = str(input("What message do you want to translate using the Morse cipher? ")) def morse_encrypt(data): for letter in data: print(ENG_TO_MORSE_DICT[letter], end = ' ') # Defining a function for user-inputted data, using isalpha method to mandate only alphabet letters & spaces as input def message_to_encode(): user_input() if data.replace(' ', '').isalpha(): morse_encrypt(data) else: print("Only text allowed in message") def decode_morse(): results = [] for item in data.split(' '): results.append(MORSE_TO_ENG_DICT.get(item)) results = ''.join(results) return results.lower() def decode_morse(data): results = [] for item in data.split(' '): results.append(MORSE_TO_ENG_DICT.get(item)) results = ''.join(results) return results.lower() encode_or_decode() I've tried running a similar decode function in isolation with its own user input and this works fine... but I don't want to duplicate the user input function in the main program, so have tried to use the data variable from the user_input(_) function, which throws up errors. MORSE_TO_ENG_DICT = { ".-": "A", "-...": "B", "-.-.": "C", "-..": "D", ".": "E", "..-.": "F", "--.": "G", "....": "H", "..": "I", ".---": "J", "-.-": "K", ".-..": "L", "--": "M", "-.": "N", "---": "O", ".--.": "P", " --.-": "Q", ".-.": "R", "...": "S", "-": "T", "..-": "U", "...-": "V", ".--": "W", "-..-": "X", "-.--": "Y", "--..": "Z", "/":' ' } def decode_morse(morse_data): results = [] for item in morse_data.split(' '): results.append(MORSE_TO_ENG_DICT.get(item)) results = ''.join(results) return results.lower() morse_data = str(input("What morse message do you want to decode using the Morse cipher? ")) print(decode_morse(morse_data)) A: The function decode_morse() is defined twice: one with a parameter, and one without. Try to change the names of the functions. A: I managed to figure it out - I needed to print the results of the decode_morse() function and remove 'return results.lower()' as this stopped the print from executing. The correct code was: def decode_morse(data): results = [] for item in data.split(' '): results.append(MORSE_TO_ENG_DICT.get(item)) results = ''.join(results) print(results.lower()) encode_or_decode()
Morse Code Translator - Calling a function within an if statement using Python
`I'm trying to write code that starts with a question asking the user if they want to encode or decode to/from morse. Based on their response (1 or 2), it runs through an if statement, and will call the required function(s). It will take a user's input via user_input() and either return it in morse code, or return it in English, based on their choice to encode or decode. The encoding aspect works, but I cannot get the decode_morse() function to work within the entire program. I'm getting an error on the calling of function encode_or_decode() at the bottom, and also 'TypeError: decode_morse() missing 1 required positional argument: 'data'' # Dictionary representing English to morse code chart ENG_TO_MORSE_DICT = {'a':'.-', 'b':'-...', 'c':'-.-.', 'd':'-..', 'e':'.', 'f':'..-.', 'g':'--.', 'h':'....', 'i':'..', 'j':'.---', 'k':'-.-', 'l':'.-..', 'm':'--', 'n':'-.', 'o':'---', 'p':'.--.', 'q':'--.-', 'r':'.-.', 's':'...', 't':'-', 'u':'..-', 'v':'...-', 'w':'.--', 'x':'-..-', 'y':'-.--', 'z':'--..', ' ':'/', 'A':'.-', 'B':'-...', 'C':'-.-.', 'D':'-..', 'E':'.', 'F':'..-.', 'G':'--.','H':'....', 'I':'..', 'J':'.---', 'K':'-.-', 'L':'.-..', 'M':'--', 'N':'-.', 'O':'---', 'P':'.--.', 'Q':'--.-', 'R':'.-.', 'S':'...', 'T':'-', 'U':'..-', 'V':'...-', 'W':'.--', 'X':'-..-', 'Y':'-.--', 'Z':'--..' } # Dictionary representing morse code to English MORSE_TO_ENG_DICT = { ".-": "A", "-...": "B", "-.-.": "C", "-..": "D", ".": "E", "..-.": "F", "--.": "G", "....": "H", "..": "I", ".---": "J", "-.-": "K", ".-..": "L", "--": "M", "-.": "N", "---": "O", ".--.": "P", " --.-": "Q", ".-.": "R", "...": "S", "-": "T", "..-": "U", "...-": "V", ".--": "W", "-..-": "X", "-.--": "Y", "--..": "Z", "/":' ' } def encode_or_decode(): choice = int(input("Please select 1 to encode to morse, or 2 to decode from morse ")) if choice == 1: message_to_encode() elif choice == 2: decode_morse() else: print("Please select option 1 or option 2") # Defining a global variable for user's input to be used within multiple functions def user_input(): global data data = str(input("What message do you want to translate using the Morse cipher? ")) def morse_encrypt(data): for letter in data: print(ENG_TO_MORSE_DICT[letter], end = ' ') # Defining a function for user-inputted data, using isalpha method to mandate only alphabet letters & spaces as input def message_to_encode(): user_input() if data.replace(' ', '').isalpha(): morse_encrypt(data) else: print("Only text allowed in message") def decode_morse(): results = [] for item in data.split(' '): results.append(MORSE_TO_ENG_DICT.get(item)) results = ''.join(results) return results.lower() def decode_morse(data): results = [] for item in data.split(' '): results.append(MORSE_TO_ENG_DICT.get(item)) results = ''.join(results) return results.lower() encode_or_decode() I've tried running a similar decode function in isolation with its own user input and this works fine... but I don't want to duplicate the user input function in the main program, so have tried to use the data variable from the user_input(_) function, which throws up errors. MORSE_TO_ENG_DICT = { ".-": "A", "-...": "B", "-.-.": "C", "-..": "D", ".": "E", "..-.": "F", "--.": "G", "....": "H", "..": "I", ".---": "J", "-.-": "K", ".-..": "L", "--": "M", "-.": "N", "---": "O", ".--.": "P", " --.-": "Q", ".-.": "R", "...": "S", "-": "T", "..-": "U", "...-": "V", ".--": "W", "-..-": "X", "-.--": "Y", "--..": "Z", "/":' ' } def decode_morse(morse_data): results = [] for item in morse_data.split(' '): results.append(MORSE_TO_ENG_DICT.get(item)) results = ''.join(results) return results.lower() morse_data = str(input("What morse message do you want to decode using the Morse cipher? ")) print(decode_morse(morse_data))
[ "The function decode_morse() is defined twice: one with a parameter, and one without. Try to change the names of the functions.\n", "I managed to figure it out - I needed to print the results of the decode_morse() function and remove 'return results.lower()' as this stopped the print from executing. The correct code was:\ndef decode_morse(data):\n results = []\n for item in data.split(' '):\n results.append(MORSE_TO_ENG_DICT.get(item))\n results = ''.join(results)\n print(results.lower())\nencode_or_decode()\n\n" ]
[ 0, 0 ]
[]
[]
[ "python", "python_3.x" ]
stackoverflow_0074588893_python_python_3.x.txt
Q: I have problem adding String with Interger I tried everything I could but somehow it still doesnt work can anyone help? A: Properly write number_one as below: on line one, it should look like this: number_one = int(input("Type number ")) Your print statements are having wrong concatenation, you can't add a str to an int so I recommend using f-string or changing the first + in all the print statements to ,. For example: print(f"The number = {number_one}") print(f"The number before = {number_one - 1}") print(f"The number after = {number_one + 1}") A: We can add string with integer simply by .... Using the str() Function. We can pass an int to the str() function it will be converted to a str : print(current_year_message + str( current_year ) ) ... Using the % Interpolation Operator. We can pass values to a conversion specification with printf-style String Formatting: ... Using the str. format() function A: When reading in input with the input function, it will return a string. To turn this string into a integer you can use the int() function as following: int(number_one) But you have to be cautious, because this will produce an error when number_one is not a number, but a generic string like "Hello!"
I have problem adding String with Interger
I tried everything I could but somehow it still doesnt work can anyone help?
[ "Properly write number_one as below:\non line one, it should look like this:\nnumber_one = int(input(\"Type number \"))\n\nYour print statements are having wrong concatenation, you can't add a str to an int so I recommend using f-string or changing the first + in all the print statements to ,. For example:\nprint(f\"The number = {number_one}\") \nprint(f\"The number before = {number_one - 1}\") \nprint(f\"The number after = {number_one + 1}\")\n\n", "We can add string with integer simply by ....\nUsing the str() Function. We can pass an int to the str() function it will be converted to a str : print(current_year_message + str( current_year ) ) ...\nUsing the % Interpolation Operator. We can pass values to a conversion specification with printf-style String Formatting: ...\nUsing the str. format() function\n", "When reading in input with the input function, it will return a string. To turn this string into a integer you can use the int() function as following:\nint(number_one)\n\nBut you have to be cautious, because this will produce an error when number_one is not a number, but a generic string like \"Hello!\"\n" ]
[ 0, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074589331_python.txt
Q: How to get class name in python playwright? How to get a Class name using playwright and pyton? I tried that but without success. It could also be the color that is contained in the css page.locator('xpath=//*[@id="__next"]/div[1]/div/div[2]/div[1]/div[1]/div/div[1]').Class() A: Try this: page.locator('xpath=//*[@id="__next"]/div[1]/div/div[2]/div[1]/div[1]/div/div[1]').get_attribute("class") With method get_attribute you can get any attribute from your html element.
How to get class name in python playwright?
How to get a Class name using playwright and pyton? I tried that but without success. It could also be the color that is contained in the css page.locator('xpath=//*[@id="__next"]/div[1]/div/div[2]/div[1]/div[1]/div/div[1]').Class()
[ "Try this:\npage.locator('xpath=//*[@id=\"__next\"]/div[1]/div/div[2]/div[1]/div[1]/div/div[1]').get_attribute(\"class\")\n\nWith method get_attribute you can get any attribute from your html element.\n" ]
[ 0 ]
[]
[]
[ "playwright_python", "python", "web_scraping" ]
stackoverflow_0074586465_playwright_python_python_web_scraping.txt
Q: Convert string to hexadecimal with python How to change a string to hexadecimal with Python? For example I would like to do string "a2" -> 0xa2 And through this, I want to satisfy the following formula 0x12 ^ 0xa2 = 0xb0 A: There are 2 methods to get hexadecimal value of string: Method 1 str = '0xa2' convert_str = int(str, base=16) hex_value = hex(convert_str) Method 2 from ast import literal_eval str = '0xa2' convert_str = literal_eval(str) hex_value = hex(convert_str) Output: 0xa2
Convert string to hexadecimal with python
How to change a string to hexadecimal with Python? For example I would like to do string "a2" -> 0xa2 And through this, I want to satisfy the following formula 0x12 ^ 0xa2 = 0xb0
[ "There are 2 methods to get hexadecimal value of string:\nMethod 1\nstr = '0xa2'\nconvert_str = int(str, base=16)\nhex_value = hex(convert_str)\n\nMethod 2\nfrom ast import literal_eval\nstr = '0xa2'\nconvert_str = literal_eval(str)\nhex_value = hex(convert_str)\n\nOutput:\n0xa2\n\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074589254_python.txt
Q: What is a floating number and its purpose in terms of theory I am just learning python and making a calculator and the tutorial says float instead of int. what is a floating number Why not just use int A: A float is a number with numbers after decimal points, for example 12.56 . It can also be called a real. An integer is a whole number eg 12. If you use an integer in a calculator, then you wouldn't be able to calculate with anything other than whole numbers. for example using int is fine if you do 12 + 12. However, if you enter 12.56 + 12.56, then you will get an error, as the int function assumes that the user enters a whole number A: Floats, contrary to int or integer data types, can store numerical data with much more precision. Integers lose information regarding to the mantissa or decimal portion of the number. So in applications where precision is required, we use float data type instead of int. A: welcome to Stackoverflow! This is a question on which you can dive deep, and I would absolutely google this. For example, the Wikipedia page on this will already give you a bunch of information. Now, in order not to overwhelm you with a too much information, we can answer your question without too many details: A float is a type with which you can represent decimal numbers (numbers with a comma in there, for example 0.5). An int is a type with which you can only represent integers (1, 2, 50, ...). Don't hesitate to google around for more info!
What is a floating number and its purpose in terms of theory
I am just learning python and making a calculator and the tutorial says float instead of int. what is a floating number Why not just use int
[ "A float is a number with numbers after decimal points, for example 12.56 . It can also be called a real.\nAn integer is a whole number eg 12.\nIf you use an integer in a calculator, then you wouldn't be able to calculate with anything other than whole numbers.\nfor example using int is fine if you do 12 + 12. However, if you enter 12.56 + 12.56, then you will get an error, as the int function assumes that the user enters a whole number\n", "Floats, contrary to int or integer data types, can store numerical data with much more precision. Integers lose information regarding to the mantissa or decimal portion of the number. So in applications where precision is required, we use float data type instead of int.\n", "welcome to Stackoverflow!\nThis is a question on which you can dive deep, and I would absolutely google this. For example, the Wikipedia page on this will already give you a bunch of information.\nNow, in order not to overwhelm you with a too much information, we can answer your question without too many details:\nA float is a type with which you can represent decimal numbers (numbers with a comma in there, for example 0.5). An int is a type with which you can only represent integers (1, 2, 50, ...).\nDon't hesitate to google around for more info!\n" ]
[ 0, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074589392_python.txt
Q: Selenium WebDriver Unable to Find ChromeDriver Recently I've been trying to do some webscraping, however I am utterly unable to run Selenium's webdriver. I am trying to run this basic boilerplate code: import pandas as pd import requests from bs4 import BeautifulSoup from selenium import webdriver import time web = webdriver.Chrome(service_args=["--verbose", "--log-path=D:\\qc1.log"]) url = 'https://www.google.com/' web.get(url) However this results in the following error: raise WebDriverException(f"Service {self.path} unexpectedly exited. Status code was: {return_code}") selenium.common.exceptions.WebDriverException: Message: Service chromedriver unexpectedly exited. Status code was: 1 From doing some research, this error was because ChromeDriver was not being found I can confirm that Chrome and Chromedriver are up to date: Chrome Version ChromeDriver Version I can also confirm that I have ChromeDriver successfully added as a PATH environment variable I have tried other solutions, such as using a path instead: import pandas as pd import requests from bs4 import BeautifulSoup from selenium import webdriver import time PATH = 'C:\webdrivers\chromedriver.exe' web = webdriver.Chrome(executable_path=PATH, service_args=["--verbose", "--log-path=D:\\qc1.log"]) url = 'https://www.google.com/' web.get(url) however the same error persists. I have also tried adding options to the WebDriver, but to no avail. When running without service_args added, the webpage will briefly open, before closing itself with no crash information A: You can try the other option of importing the Chromedriver through webdriver_manager like this: from webdriver_manager.chrome import ChromeDriverManager service = ChromeService(executable_path=ChromeDriverManager().install()) driver = webdriver.Chrome(service=service) A: Have you install the libraries, you are using in your project. pip install -u Selenium pip install pandas pip install bs4 The rest would be automatically installed into your project. If you are using pycharm
Selenium WebDriver Unable to Find ChromeDriver
Recently I've been trying to do some webscraping, however I am utterly unable to run Selenium's webdriver. I am trying to run this basic boilerplate code: import pandas as pd import requests from bs4 import BeautifulSoup from selenium import webdriver import time web = webdriver.Chrome(service_args=["--verbose", "--log-path=D:\\qc1.log"]) url = 'https://www.google.com/' web.get(url) However this results in the following error: raise WebDriverException(f"Service {self.path} unexpectedly exited. Status code was: {return_code}") selenium.common.exceptions.WebDriverException: Message: Service chromedriver unexpectedly exited. Status code was: 1 From doing some research, this error was because ChromeDriver was not being found I can confirm that Chrome and Chromedriver are up to date: Chrome Version ChromeDriver Version I can also confirm that I have ChromeDriver successfully added as a PATH environment variable I have tried other solutions, such as using a path instead: import pandas as pd import requests from bs4 import BeautifulSoup from selenium import webdriver import time PATH = 'C:\webdrivers\chromedriver.exe' web = webdriver.Chrome(executable_path=PATH, service_args=["--verbose", "--log-path=D:\\qc1.log"]) url = 'https://www.google.com/' web.get(url) however the same error persists. I have also tried adding options to the WebDriver, but to no avail. When running without service_args added, the webpage will briefly open, before closing itself with no crash information
[ "You can try the other option of importing the Chromedriver through webdriver_manager like this:\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nservice = ChromeService(executable_path=ChromeDriverManager().install())\ndriver = webdriver.Chrome(service=service)\n\n", "Have you install the libraries, you are using in your project.\npip install -u Selenium\npip install pandas\npip install bs4\n\nThe rest would be automatically installed into your project.\nIf you are using pycharm\n" ]
[ 0, 0 ]
[]
[]
[ "python", "selenium", "selenium_chromedriver", "selenium_webdriver" ]
stackoverflow_0074589046_python_selenium_selenium_chromedriver_selenium_webdriver.txt
Q: How to prevent unwanted Matplotlib y-axis minor tick labels in log log plot (and keep x-axis minor ticks) In Matplotlib, a log-log plot generates unwanted y-axis tick labels for the minor ticks. I tried this code, which specifies the (major) y-axis ticks to be [1,1.2,1.4,1.6] and expecting that any y-axis minor ticks will have no labels. # imports import numpy as np from matplotlib import pyplot as plt # data x = np.linspace(0,4,31) y1 = 1.7 - 0.3*x y1min = y1-0.05 y1max = y1+0.05 # ticks yticks = [1,1.2,1.4,1.6] yticklabels = [str(yt) for yt in yticks] # figure plt.figure() plt.plot(x,y1,color='r') plt.ylim([0.9,1.6]) plt.xlabel('x') plt.xscale('log') plt.yscale('log') plt.ylabel('y') plt.yticks(yticks) plt.gca().set_yticklabels(minor='off',labels=yticklabels) plt.show() which produces the following plot. Surprisingly the minor y-axis ticks have the labels specified specified for the major ones (i.e. with incorrect values), while the major y-axis ticks have the correct values, but in scientific (exponential) notation instead of the desired normal notation. How can I remove the minor tick labels on the y axis (hopefully without resorting to mticker), while keeping the x-axis minor tick labels, and have the major y-axis tick labels follow the values I specified with yticklabels? A: To disable the minor ticks of a log plot in matplotlib, we can use minorticks_off() method.
How to prevent unwanted Matplotlib y-axis minor tick labels in log log plot (and keep x-axis minor ticks)
In Matplotlib, a log-log plot generates unwanted y-axis tick labels for the minor ticks. I tried this code, which specifies the (major) y-axis ticks to be [1,1.2,1.4,1.6] and expecting that any y-axis minor ticks will have no labels. # imports import numpy as np from matplotlib import pyplot as plt # data x = np.linspace(0,4,31) y1 = 1.7 - 0.3*x y1min = y1-0.05 y1max = y1+0.05 # ticks yticks = [1,1.2,1.4,1.6] yticklabels = [str(yt) for yt in yticks] # figure plt.figure() plt.plot(x,y1,color='r') plt.ylim([0.9,1.6]) plt.xlabel('x') plt.xscale('log') plt.yscale('log') plt.ylabel('y') plt.yticks(yticks) plt.gca().set_yticklabels(minor='off',labels=yticklabels) plt.show() which produces the following plot. Surprisingly the minor y-axis ticks have the labels specified specified for the major ones (i.e. with incorrect values), while the major y-axis ticks have the correct values, but in scientific (exponential) notation instead of the desired normal notation. How can I remove the minor tick labels on the y axis (hopefully without resorting to mticker), while keeping the x-axis minor tick labels, and have the major y-axis tick labels follow the values I specified with yticklabels?
[ "To disable the minor ticks of a log plot in matplotlib, we can use minorticks_off() method.\n" ]
[ 0 ]
[]
[]
[ "matplotlib", "python" ]
stackoverflow_0074589426_matplotlib_python.txt
Q: How to get percentage difference between two columns of different DataFrames? There are 2 DataFrames with coin pairs and float prices. Need to make new DataFrame with coin pairs and the price difference as a percentage. First DataFrame in txt Second DataFrame in txt I tried this function, it didn't work def get_diff(): for i in df2['askPrice']: for x in df3['Low price']: i = float(i) x = float(x) try: if i > x: res = (round(i) - round(x)) / round(x) * 100 print(round(res)) else: print('lower') except ZeroDivisionError: print(float('inf')) get_diff() A: Let's put the desired calcultaions in a new list then transform it into a column within df3 diff = [((y-x)/x)*100 for (y, x) in zip(df2['askPrice'],df3['Low price'])] df3['diff'] = diff
How to get percentage difference between two columns of different DataFrames?
There are 2 DataFrames with coin pairs and float prices. Need to make new DataFrame with coin pairs and the price difference as a percentage. First DataFrame in txt Second DataFrame in txt I tried this function, it didn't work def get_diff(): for i in df2['askPrice']: for x in df3['Low price']: i = float(i) x = float(x) try: if i > x: res = (round(i) - round(x)) / round(x) * 100 print(round(res)) else: print('lower') except ZeroDivisionError: print(float('inf')) get_diff()
[ "Let's put the desired calcultaions in a new list then transform it into a column within df3\ndiff = [((y-x)/x)*100 for (y, x) in zip(df2['askPrice'],df3['Low price'])] \n\ndf3['diff'] = diff\n\n" ]
[ 0 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074589377_dataframe_pandas_python.txt
Q: requests_html render (async) not working on AWS Lambda Read-only file system I have a Python script that scrapes data from a webpage. It works on my local but not on AWS Lambda because it only allows writes new file to /tmp directory. I tried to go through request_html render API and it seems like it's not possible to change the file location. It default take current working directory by default. It's causing error as below Error detail Read-only file system: '/home/sbx_user123' Thank you in advance. A: How's set an environment variable below from lambda web UI? PYPPETEER_HOME=/tmp/ It may change working directory of pyppeteer (a module running inside request_html). maybe, it's a related question: Pyppeteer fails to download headless chrome when running on AWS Lambda
requests_html render (async) not working on AWS Lambda Read-only file system
I have a Python script that scrapes data from a webpage. It works on my local but not on AWS Lambda because it only allows writes new file to /tmp directory. I tried to go through request_html render API and it seems like it's not possible to change the file location. It default take current working directory by default. It's causing error as below Error detail Read-only file system: '/home/sbx_user123' Thank you in advance.
[ "How's set an environment variable below from lambda web UI?\nPYPPETEER_HOME=/tmp/\n\nIt may change working directory of pyppeteer (a module running inside request_html).\nmaybe, it's a related question: Pyppeteer fails to download headless chrome when running on AWS Lambda\n" ]
[ 0 ]
[]
[]
[ "aws_lambda", "python", "python_requests_html" ]
stackoverflow_0074011801_aws_lambda_python_python_requests_html.txt
Q: Why format string throws error on list but not tuple Why does passing a list to the following format method fail, but passing the same list, coerced to a tuple run without errors? From my test, the tuple is not (multiply) inheriting from Python's atomic numeric types (I assume not complex either) so overall I'm confused by what the interpreter is telling me, and why it can seemingly happily treat my tuple as containing values to format but not of my (ceteris paribus) list. #!/usr/bin/env python3 import math from multiprocessing import Pool def func(x): return math.sin(x) if __name__ == "__main__": with Pool(5) as p: x = p.map(func, range(1, 4)) x_tuple = tuple(x) try: print(", ".join([r"%2f"] * len(x)) % x) print(f"Print with list succeeded") except Exception as e: print(f"Print with list fails: {e}") try: print(", ".join([r"%2f"] * len(x_tuple)) % x_tuple) print(f"Print with tuple succeeded") except: print(f"Print with tuple fails: {e}") print(f"{isinstance(x, float) = }") print(f"{isinstance(x, int) = }") The above gives the following output: Print with list fails: must be real number, not list 0.841471, 0.909297, 0.141120 Print with tuple succeeded isinstance(x, float) = False isinstance(x, int) = False A: This is just how format % values works; as documented in the manual: If format requires a single argument, values may be a single non-tuple object. Otherwise, values must be a tuple with exactly the number of items specified by the format string, or a single mapping object (for example, a dictionary). (See: printf-style String Formatting) The reason it's done this way is to support simplified forms with just one value; things like "Hello %s" % name; when you use a list, it interprets that as being the simplified form; "Hello %s" % x would also work that way, formatting up the whole list using %s. However, a whole list can't be formatted up using %f, and if it could be, there would be no values for the remaining %f specifiers.
Why format string throws error on list but not tuple
Why does passing a list to the following format method fail, but passing the same list, coerced to a tuple run without errors? From my test, the tuple is not (multiply) inheriting from Python's atomic numeric types (I assume not complex either) so overall I'm confused by what the interpreter is telling me, and why it can seemingly happily treat my tuple as containing values to format but not of my (ceteris paribus) list. #!/usr/bin/env python3 import math from multiprocessing import Pool def func(x): return math.sin(x) if __name__ == "__main__": with Pool(5) as p: x = p.map(func, range(1, 4)) x_tuple = tuple(x) try: print(", ".join([r"%2f"] * len(x)) % x) print(f"Print with list succeeded") except Exception as e: print(f"Print with list fails: {e}") try: print(", ".join([r"%2f"] * len(x_tuple)) % x_tuple) print(f"Print with tuple succeeded") except: print(f"Print with tuple fails: {e}") print(f"{isinstance(x, float) = }") print(f"{isinstance(x, int) = }") The above gives the following output: Print with list fails: must be real number, not list 0.841471, 0.909297, 0.141120 Print with tuple succeeded isinstance(x, float) = False isinstance(x, int) = False
[ "This is just how format % values works; as documented in the manual:\n\nIf format requires a single argument, values may be a single non-tuple object. Otherwise, values must be a tuple with exactly the number of items specified by the format string, or a single mapping object (for example, a dictionary).\n\n(See: printf-style String Formatting)\nThe reason it's done this way is to support simplified forms with just one value; things like \"Hello %s\" % name; when you use a list, it interprets that as being the simplified form; \"Hello %s\" % x would also work that way, formatting up the whole list using %s. However, a whole list can't be formatted up using %f, and if it could be, there would be no values for the remaining %f specifiers.\n" ]
[ 2 ]
[]
[]
[ "python", "string", "tuples" ]
stackoverflow_0074589435_python_string_tuples.txt
Q: Convert python script which runs a bash script to executable file with Pyinstaller I want to convert a python script which runs a local bash script to executable file with Pyinstaller. My project structure is as following: Project/ |-- bash_script/ | |-- script.sh |-- main.py The main.py contains a line which runs the script locally: output = subprocess.check_output('./bash_script/script.sh', shell=True).decode() Now, after converting the main.py to executable file in linux, if I run it on a different location from where main.py is located, it won't find the script. I want to add the shell script to the python executable file, so it won't depend on the script locally, but, I will just have the executable file and it will eventually run. I have tried using --add-data flag to pyinstaller converting commend, however it didn't work. Thanks! Note: I am using the following command: pyinstaller --add-data "./bash_script/script.sh:." --onefile main.py and I get an error after running in dist dir: /bin/sh: 1: ./bash_script/script.sh: not found A: In your main.py: import subprocess import os script = os.path.join(os.path.dirname(__file__),'bash_script','script.sh') output = subprocess.check_output(script, shell=True).decode() print(output) Then run: pyinstaller -F --add-data ./bash_script/script.sh:./bash_script main.py And bobs your uncle! p.s. -F is the same as --onefile
Convert python script which runs a bash script to executable file with Pyinstaller
I want to convert a python script which runs a local bash script to executable file with Pyinstaller. My project structure is as following: Project/ |-- bash_script/ | |-- script.sh |-- main.py The main.py contains a line which runs the script locally: output = subprocess.check_output('./bash_script/script.sh', shell=True).decode() Now, after converting the main.py to executable file in linux, if I run it on a different location from where main.py is located, it won't find the script. I want to add the shell script to the python executable file, so it won't depend on the script locally, but, I will just have the executable file and it will eventually run. I have tried using --add-data flag to pyinstaller converting commend, however it didn't work. Thanks! Note: I am using the following command: pyinstaller --add-data "./bash_script/script.sh:." --onefile main.py and I get an error after running in dist dir: /bin/sh: 1: ./bash_script/script.sh: not found
[ "In your main.py:\nimport subprocess\nimport os\n\nscript = os.path.join(os.path.dirname(__file__),'bash_script','script.sh')\noutput = subprocess.check_output(script, shell=True).decode()\nprint(output)\n\nThen run:\npyinstaller -F --add-data ./bash_script/script.sh:./bash_script main.py\n\nAnd bobs your uncle!\np.s. -F is the same as --onefile\n" ]
[ 1 ]
[]
[]
[ "pyinstaller", "python" ]
stackoverflow_0074588616_pyinstaller_python.txt
Q: zsh: command not found: django-admin when starting a django project I use Ubuntu 15.10 and zsh (don't know if it can help) So I try to install django: pip install django Downloading/unpacking django Downloading Django-1.9.5-py2.py3-none-any.whl (6.6MB): 6.6MB downloaded Installing collected packages: django Successfully installed django Cleaning up... Everything works fine. When I do pip freeze I can see django is installed. then: django-admin startproject mysite But I got this issue: zsh: command not found: django-admin A: I found an alternative solution. With find / -name django-admin I found django-admin in myHome/.local/bin/django-admin. So instead of django-admin startproject mysite I use the full path myHome/.local/bin/django-admin startproject mysite thanks to @Evert, this is why I got the problem. his comment: This is likely because you either used the --user option to pip install, or you set up pip in such a way that it automatically does that. Hence, everything gets installed in $HOME/.local. You may want to add $HOME/.local/bin to your $PATH for the future. A: When I had the problem on my mac I just uninstall django and install it again but with root permissions. Now it's working good) pip3 uninstall django sudo pip3 install django A: I faced a similar issue on Mac OS but I moved in another way. I used Virtual Environments. First, create the virtual environment python3 -m venv django-env Then, use this environment source django-env/bin/activate Next, install django python -m pip install django Finally test django is working django-admin startproject mysite In my opinion, it is better to have environments isolated to avoid O.S. settings A: My adjango-admin is located in ~/Library/Python/3.7/bin/django-admin But I don't have it in my linked PATH which is looks like this /usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/mysql/bin So I create a symlink to one of the bin I have in my PATH sudo ln -s ~/Library/Python/3.7/bin/django-admin /usr/local/bin It solves my issue A: I was facing the same problem after updating my Mac OS to Catalina, and shell from bash to zsh. None of the commands would work, as if all my paths were deleted. Looking at Brady Huang's answer, doing something similar worked for me. I made sure django was installed correctly by reinstalling it pip3 uninstall django pip3 install django I found django installed under: /Library/Frameworks/Python.framework/Versions/3.8/bin/django-admin by running: sudo ln -s /Library/Frameworks/Python.framework/Versions/3.8/bin/django-admin usr/local/bin I was able to run django-admin again. A: You may be having some dependency issues. So to avoid it happen again you need to create a virtual environment only for the current project you are doing. Like this you can avoid having issues and isolate your application. You can follow this question to create a virtual environment and add django in it. Then, as soon as you create the virtual environment and activated you can pip install django and check to see whether it is installed. A: Tried many stuffs but then installing the python version 3.11 and using a virtual env solved the issue. Earlier I was using 3.8.9. Hope this helps someone A: what worked for me! I just Added the following in my ~/.zshrc file( you can also add it in the ~/.bashrc file if you are not using zsh terminal) export PATH=$PATH:$HOME/.local/bin This solution will work not only for you django-admin command but all other python package commands that would behave in a similar way.
zsh: command not found: django-admin when starting a django project
I use Ubuntu 15.10 and zsh (don't know if it can help) So I try to install django: pip install django Downloading/unpacking django Downloading Django-1.9.5-py2.py3-none-any.whl (6.6MB): 6.6MB downloaded Installing collected packages: django Successfully installed django Cleaning up... Everything works fine. When I do pip freeze I can see django is installed. then: django-admin startproject mysite But I got this issue: zsh: command not found: django-admin
[ "I found an alternative solution.\nWith find / -name django-admin I found django-admin in myHome/.local/bin/django-admin.\nSo instead of django-admin startproject mysite I use the full path myHome/.local/bin/django-admin startproject mysite\nthanks to @Evert, this is why I got the problem.\nhis comment:\nThis is likely because you either used the --user option to pip \ninstall, or you set up pip in such a way that it automatically does \nthat. Hence, everything gets installed in $HOME/.local. You may want\nto add $HOME/.local/bin to your $PATH for the future.\n\n", "When I had the problem on my mac I just uninstall django and install it again but with root permissions. Now it's working good)\npip3 uninstall django\n\nsudo pip3 install django \n\n", "I faced a similar issue on Mac OS but I moved in another way. I used Virtual Environments.\nFirst, create the virtual environment\npython3 -m venv django-env\n\nThen, use this environment\nsource django-env/bin/activate\n\nNext, install django\npython -m pip install django\n\nFinally test django is working\ndjango-admin startproject mysite\n\nIn my opinion, it is better to have environments isolated to avoid O.S. settings\n", "My adjango-admin is located in \n~/Library/Python/3.7/bin/django-admin\n\nBut I don't have it in my linked PATH which is looks like this\n/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/mysql/bin\n\nSo I create a symlink to one of the bin I have in my PATH\nsudo ln -s ~/Library/Python/3.7/bin/django-admin /usr/local/bin\n\nIt solves my issue\n", "I was facing the same problem after updating my Mac OS to Catalina, and shell from bash to zsh.\nNone of the commands would work, as if all my paths were deleted.\nLooking at Brady Huang's answer, doing something similar worked for me.\nI made sure django was installed correctly by reinstalling it\npip3 uninstall django\npip3 install django \n\nI found django installed under:\n/Library/Frameworks/Python.framework/Versions/3.8/bin/django-admin\n\nby running:\nsudo ln -s /Library/Frameworks/Python.framework/Versions/3.8/bin/django-admin usr/local/bin \n\nI was able to run django-admin again.\n", "You may be having some dependency issues.\nSo to avoid it happen again you need to create a virtual environment only for the current project you are doing. Like this you can avoid having issues and isolate your application.\nYou can follow this question to create a virtual environment and add django in it.\nThen, as soon as you create the virtual environment and activated you can pip install django and check to see whether it is installed.\n", "Tried many stuffs but then installing the python version 3.11 and using a virtual env solved the issue. Earlier I was using 3.8.9. Hope this helps someone\n", "what worked for me!\n\nI just Added the following in my ~/.zshrc file( you can also add it in the ~/.bashrc file if you are not using zsh terminal)\n export PATH=$PATH:$HOME/.local/bin\n\n\n\nThis solution will work not only for you django-admin command but all other python package commands that would behave in a similar way.\n" ]
[ 9, 6, 4, 0, 0, 0, 0, 0 ]
[]
[]
[ "django", "pip", "python", "ubuntu_15.10" ]
stackoverflow_0036446599_django_pip_python_ubuntu_15.10.txt
Q: Compute the greatest common divisor and least common multiple of two integers I need to write a Python program in which the user enters two numbers and receives the LCM and HCF of those numbers. I tried it, and my LCM was correct, but my HCF was not, so could anyone assist me in locating the HCF? Thank you! num1 = int(input('Enter your first number: ')) num2 = int(input('Enter your second number: ')) def compute_lcm(x, y): # choose the greater number if x > y: greater = x else: greater = y while(True): if((greater % x == 0) and (greater % y == 0)): lcm = greater break greater += 1 return lcm print("The L.C.M. is", compute_lcm(num1, num2)) A: You can use Euclidian algorithm if you want to find greatest common divisor or in your terms highest common factor (HCF): here is the link to the article in FreeCodeCamp.org Here is the code you can use for python for your case: """ finding HCF """ def hcfLoop(x : int, y : int) -> int: """ finding hinghest common factor using loop returns int """ while (x % y) > 0: remainder = x % y x = y y = remainder return y def hcfRecurs(x : int, y : int) -> int: """ find highest common factor using recursion """ if y == 0 : return x else: return hcfRecurs(y, x % y) x = 1220 y = 516 print(f"the HCF for {x} and {y} using loop: {hcfLoop(x,y)}") print(f"the HCF for {x} and {y} using recursion: {hcfRecurs(x,y)}") The answer: the HCF for 1220 and 516 using loop: 4 the HCF for 1220 and 516 using recursion: 4 A: num1 = 36 num2 = 60 hcf = 1 for i in range(1, min(num1, num2)): if num1 % i == 0 and num2 % i == 0: hcf = i print("Hcf of", num1, "and", num2, "is", hcf) # HCF of 36 and 60 is 12
Compute the greatest common divisor and least common multiple of two integers
I need to write a Python program in which the user enters two numbers and receives the LCM and HCF of those numbers. I tried it, and my LCM was correct, but my HCF was not, so could anyone assist me in locating the HCF? Thank you! num1 = int(input('Enter your first number: ')) num2 = int(input('Enter your second number: ')) def compute_lcm(x, y): # choose the greater number if x > y: greater = x else: greater = y while(True): if((greater % x == 0) and (greater % y == 0)): lcm = greater break greater += 1 return lcm print("The L.C.M. is", compute_lcm(num1, num2))
[ "You can use Euclidian algorithm if you want to find greatest common divisor or in your terms highest common factor (HCF): here is the link to the article in FreeCodeCamp.org\nHere is the code you can use for python for your case:\n\"\"\" \nfinding HCF\n\"\"\"\n\ndef hcfLoop(x : int, y : int) -> int:\n \"\"\" \n finding hinghest common factor using loop\n returns int\n \"\"\"\n while (x % y) > 0:\n remainder = x % y\n x = y\n y = remainder\n \n return y\n\ndef hcfRecurs(x : int, y : int) -> int:\n \"\"\" \n find highest common factor using recursion\n \"\"\"\n if y == 0 :\n return x\n else:\n return hcfRecurs(y, x % y)\n\n\nx = 1220\ny = 516\nprint(f\"the HCF for {x} and {y} using loop: {hcfLoop(x,y)}\")\nprint(f\"the HCF for {x} and {y} using recursion: {hcfRecurs(x,y)}\")\n\nThe answer:\nthe HCF for 1220 and 516 using loop: 4\nthe HCF for 1220 and 516 using recursion: 4\n\n", "num1 = 36\nnum2 = 60\nhcf = 1\n\nfor i in range(1, min(num1, num2)):\n if num1 % i == 0 and num2 % i == 0:\n hcf = i\nprint(\"Hcf of\", num1, \"and\", num2, \"is\", hcf)\n# HCF of 36 and 60 is 12\n\n" ]
[ 2, 1 ]
[]
[]
[ "python" ]
stackoverflow_0074589299_python.txt
Q: Python Selenium using loop to check keywords keywords = ['no stock','out of stock','not available'] n = 0 while True: n+=1 print(f'now check {n} times') for keyword in keywords: if keyword in driver.page_source: print(f'found {keyword}, refresh after 30 seconds') time.sleep(30) driver.get(url) else: print(f'could not find any of keyword') break Hi guys, hope you are doing well. I am trying to use selenium and to check whether an item is available in a webpage. My idea is to put all keywords related to no stock in a list and loop every 30 seconds to check. however, if I put a break on it, the code only scan the 1st item in list. Is there any way that to break the loop if all the three keywords are not in website? Thanks for your help. A: Maybe is this the logic you are looking for? keywords = ['no stock','out of stock','not available'] n = 0 while True: n+=1 print(f'now check {n} times') keywords_found = 0 for keyword in keywords: if keyword in driver.page_source: keywords_found += 1 print(f'found {keyword}, {keywords_found} of {len(keywords)}') if keywords_found == len(keywords): print(f"We found the {len(keywords)} items of the list intot he page, so sleep and navigate") time.sleep(30) driver.get(url) else: print(f'could not the {len(keywords)} keywords, break') break
Python Selenium using loop to check keywords
keywords = ['no stock','out of stock','not available'] n = 0 while True: n+=1 print(f'now check {n} times') for keyword in keywords: if keyword in driver.page_source: print(f'found {keyword}, refresh after 30 seconds') time.sleep(30) driver.get(url) else: print(f'could not find any of keyword') break Hi guys, hope you are doing well. I am trying to use selenium and to check whether an item is available in a webpage. My idea is to put all keywords related to no stock in a list and loop every 30 seconds to check. however, if I put a break on it, the code only scan the 1st item in list. Is there any way that to break the loop if all the three keywords are not in website? Thanks for your help.
[ "Maybe is this the logic you are looking for?\nkeywords = ['no stock','out of stock','not available']\n\nn = 0\nwhile True:\n n+=1\n print(f'now check {n} times')\n keywords_found = 0\n for keyword in keywords:\n if keyword in driver.page_source:\n keywords_found += 1\n print(f'found {keyword}, {keywords_found} of {len(keywords)}')\n if keywords_found == len(keywords):\n print(f\"We found the {len(keywords)} items of the list intot he page, so sleep and navigate\")\n time.sleep(30)\n driver.get(url)\n else:\n print(f'could not the {len(keywords)} keywords, break')\n break\n\n" ]
[ 0 ]
[]
[]
[ "loops", "python", "selenium" ]
stackoverflow_0074589110_loops_python_selenium.txt
Q: Subtracting more than 2 numbers in Python I am little bit new to the programming. I am learning Python, version 3.6. print("1.+ \n2.-\n3.*\n4./") choice = int(input()) if choice == 1: sum = 0 print("How many numbers you want to sum?") numb = int(input()) for i in range(numb): a = int(input(str(i+1)+". number ")) sum+=a print("Result : "+str(sum)) For improving myself i am trying to build a calculator but first i am asking how many numbers user want to calculate. You can see this in code above, but when it is comes to subtracting or dividing or multiplying i have no idea what to do. My reason to do it like this i want to do the calculator like in real time calculators. A: You can do the exact same thing you're already doing. Python has -=, *=, and /= operators that work the same way as the += you're already using. A: You can also use the *args or *kargs in order to subtract more than two numbers. If you define *args keyword in a function then it will help you to take as many variables you want. A: There are two methods to solve this: Method-1 Using the logic for subtraction a-b-c-… = ((a-b)-c)-… def subt1(*numbers): # defining a function subt1 and using a non-keyword argument *numbers so that variable number of arguments can be provided by user. All these arguments will be stored as a tuple. try: # using try-except to handle the errors. If numbers are given as arguments, then the statements in the try block will get executed. diff = numbers[0] # assigning the first element/number to the variable diff for i in range(1,len(numbers)): # iterating through all the given elements/ numbers of a tuple using a for loop diff = diff - numbers[i] # performing the subtraction operation for multiple numbers from left to right, for eg, a-b-c = (a-b)-c return diff # returning the final value of the above operation except: # if no arguments OR more than one non-numbers are passed, then the statement in the except block will get executed return 'please enter numbers as arguments' subt1(10, 5, -7, 9, -1) ----> here subt1 performs 10-5-(-7)-9-(-1) and returns the value 4 subt1(25.5, 50.0, -100.25, 75) ----> here subt1 performs 25.5-50.0-(-100.25)-75 and returns the value 0.75 subt1(20j, 10, -50+100j, 150j) ----> here subt1 performs 20j-10-(-50+100j)-150j and returns the value (40-230j) subt1() ----> here the statement in the except block is returned as no input is passed 'please enter numbers as arguments' subt1('e', 1, 2.0, 3j) ---> here the statement in the except block is returned as a string 'e' is passed which is not a number 'please enter numbers as arguments' Method-2 Using the logic for subtraction a-b-c-… = a-(b+c+…) = a-add(b,c,…) def subt2(*numbers): try: add = 0 # initializing a variable add with 0 for i in range(1,len(numbers)): add = add+ numbers[i] # performing the addition operation for the numbers starting from the index 1 return numbers[0]-add # returning the final value of subtraction of given numbers, logic : a-b-c = a-(b+c) = a-add(b,c) except: return 'please enter numbers as arguments' subt2(10, 5, -7, 9, -1) ----> here subt2 performs 10-5-(-7)-9-(-1) and returns the value 4 subt2(25.5, 50.0, -100.25, 75) ----> here subt2 performs 25.5-50.0-(-100.25)-75 and returns the value 0.75 subt2(20j, 10, -50+100j, 150j) ----> here subt2 performs 20j-10-(-50+100j)-150j and returns the value (40-230j) Note : All the above test cases have been tested in Jupyter notebooks.
Subtracting more than 2 numbers in Python
I am little bit new to the programming. I am learning Python, version 3.6. print("1.+ \n2.-\n3.*\n4./") choice = int(input()) if choice == 1: sum = 0 print("How many numbers you want to sum?") numb = int(input()) for i in range(numb): a = int(input(str(i+1)+". number ")) sum+=a print("Result : "+str(sum)) For improving myself i am trying to build a calculator but first i am asking how many numbers user want to calculate. You can see this in code above, but when it is comes to subtracting or dividing or multiplying i have no idea what to do. My reason to do it like this i want to do the calculator like in real time calculators.
[ "You can do the exact same thing you're already doing. Python has -=, *=, and /= operators that work the same way as the += you're already using.\n", "You can also use the *args or *kargs in order to subtract more than two numbers. If you define *args keyword in a function then it will help you to take as many variables you want.\n", "There are two methods to solve this:\nMethod-1\nUsing the logic for subtraction a-b-c-… = ((a-b)-c)-…\ndef subt1(*numbers): # defining a function subt1 and using a non-keyword argument *numbers so that variable number of arguments can be provided by user. All these arguments will be stored as a tuple.\n\n try: # using try-except to handle the errors. If numbers are given as arguments, then the statements in the try block will get executed.\n\n diff = numbers[0] # assigning the first element/number to the variable diff\n\n for i in range(1,len(numbers)): # iterating through all the given elements/ numbers of a tuple using a for loop\n diff = diff - numbers[i] # performing the subtraction operation for multiple numbers from left to right, for eg, a-b-c = (a-b)-c\n return diff # returning the final value of the above operation\n\n except: # if no arguments OR more than one non-numbers are passed, then the statement in the except block will get executed\n return 'please enter numbers as arguments'\n\nsubt1(10, 5, -7, 9, -1) ----> here subt1 performs 10-5-(-7)-9-(-1) and returns the value\n\n4\n\nsubt1(25.5, 50.0, -100.25, 75) ----> here subt1 performs 25.5-50.0-(-100.25)-75 and returns the value\n\n0.75\n\nsubt1(20j, 10, -50+100j, 150j) ----> here subt1 performs 20j-10-(-50+100j)-150j and returns the value\n\n(40-230j)\n\nsubt1() ----> here the statement in the except block is returned as no input is passed\n\n'please enter numbers as arguments'\n\nsubt1('e', 1, 2.0, 3j) ---> here the statement in the except block is returned as a string 'e' is passed which is not a number\n\n'please enter numbers as arguments'\n\nMethod-2\nUsing the logic for subtraction a-b-c-… = a-(b+c+…) = a-add(b,c,…)\ndef subt2(*numbers):\n\n try:\n add = 0 # initializing a variable add with 0\n\n for i in range(1,len(numbers)):\n add = add+ numbers[i] # performing the addition operation for the numbers starting from the index 1\n return numbers[0]-add # returning the final value of subtraction of given numbers, logic : a-b-c = a-(b+c) = a-add(b,c)\n\n except:\n return 'please enter numbers as arguments'\n\nsubt2(10, 5, -7, 9, -1) ----> here subt2 performs 10-5-(-7)-9-(-1) and returns the value\n\n4\n\nsubt2(25.5, 50.0, -100.25, 75) ----> here subt2 performs 25.5-50.0-(-100.25)-75 and returns the value\n\n0.75\n\nsubt2(20j, 10, -50+100j, 150j) ----> here subt2 performs 20j-10-(-50+100j)-150j and returns the value\n\n(40-230j)\n\nNote : All the above test cases have been tested in Jupyter notebooks.\n" ]
[ 2, 1, 0 ]
[]
[]
[ "calculator", "python" ]
stackoverflow_0048816636_calculator_python.txt
Q: Create an integer timestamp that corresponds to a Pandas Timestamp's timezone Suppose we have a dataset with a UNIX timestamp in milliseconds: data = [ { "unix_ts": 1669291200000, "val": 10 }, { "unix_ts": 1669291260000, "val": 25 } ] Which we convert to a Pandas dataframe with a Pandas timestamp (datetime) set to US/Eastern: df = pd.DataFrame(data) df['ET'] = pd.to_datetime(df['unix_ts'], unit='ms').dt.tz_localize('UTC').dt.tz_convert('US/Eastern') unix_ts val ET 0 1669291200000 10 2022-11-24 07:00:00-05:00 1 1669291260000 25 2022-11-24 07:01:00-05:00 We can see that the ET time is 5 hours behind the UTC unix_ts Suppose we want a new integer column with a value that corresponds with that -5 hours difference. Naively, we could do this: df['adjusted_ts'] = df['unix_ts'] - (3600000 * 5) # Include column to allow us to check the result by eye. df['Check_ET'] = pd.to_datetime(df['adjusted_ts'], unit='ms').dt.tz_localize('UTC').dt.tz_convert('US/Eastern') Result: unix_ts val ET adjusted_ts Check_ET 0 1669291200000 10 2022-11-24 07:00:00-05:00 1669273200000 2022-11-24 02:00:00-05:00 1 1669291260000 25 2022-11-24 07:01:00-05:00 1669273260000 2022-11-24 02:01:00-05:00 We can see from the Check_ET column that this "works", but it won't when we get to the part of the year when US/Eastern is only 4 hours behind UTC. It would be handy to be able to do something like this: import numpy as np df['smart_adjusted_ts'] = (df['ET'].astype(np.int64) // 10 ** 9) * 1000 But, sadly, that's not so 'smart', as it results in a column that's identical to the original unix_ts (Regardless of the ET column's timezone, the underlying data (NumPy) is always stored as nanoseconds since the EPOCH in UTC.): unix_ts ... Check_ET smart_adjusted_ts 0 1669291200000 ... 2022-11-24 02:00:00-05:00 1669291200000 1 1669291260000 ... 2022-11-24 02:01:00-05:00 1669291260000 So, unless there is a special method to do this (Anyone?), my only thought is to go back to the original approach but dynamically extract the UTC offset (ideally as ints: 4 or 5) from the ET column. The problem is, I can't find how to do that either, but I'm hoping to achieve something like this: df['adjusted_ts'] = df['unix_ts'] - (3600000 * et_utc_abs_diff) Please be aware that a dataset could include dates with both (4 & 5 hour) differences, so it's important to get this difference on a row-by-row basis as opposed to having a master variable set to 4 or 5. Any ideas for an elegant solution, please? Edit I came up with the following, and it gets the right result, but I suspect there must be a better way using standard Pandas methods. df['adjusted_ts'] = df['unix_ts'] - (df['ET'].astype(str).str.slice(start=21, stop=22).astype(int) * 3600000) A: Here's a way to implement this by localizing to None, as I've described in the comments. import pandas as pd df = pd.DataFrame({"unix_ts": [1651363200000, 1669291260000], "val": [10, 25]}) df["ET"] = pd.to_datetime(df["unix_ts"], unit='ms', utc=True).dt.tz_convert("America/New_York") # df["ET"] # 0 2022-04-30 20:00:00-04:00 # 1 2022-11-24 07:01:00-05:00 # Name: ET, dtype: datetime64[ns, America/New_York] # we can remove the time zone to get naive datetime. pandas will treat this as UTC df["ET_naive"] = df["ET"].dt.tz_localize(None) # df # unix_ts val ET ET_naive # 0 1669291200000 10 2022-11-24 07:00:00-05:00 2022-11-24 07:00:00 # 1 1669291260000 25 2022-11-24 07:01:00-05:00 2022-11-24 07:01:00 # now we can convert back to units of time since the epoch, # only that the epoch is now eastern time: df["ET_epochtime"] = df["ET_naive"].astype("int64") / 1e6 # division gives milliseconds # df["ET_epochtime"] correctly accounts for DST offset: (df["unix_ts"]-df["ET_epochtime"])/3600000 # 0 4.0 # 1 5.0 # dtype: float64
Create an integer timestamp that corresponds to a Pandas Timestamp's timezone
Suppose we have a dataset with a UNIX timestamp in milliseconds: data = [ { "unix_ts": 1669291200000, "val": 10 }, { "unix_ts": 1669291260000, "val": 25 } ] Which we convert to a Pandas dataframe with a Pandas timestamp (datetime) set to US/Eastern: df = pd.DataFrame(data) df['ET'] = pd.to_datetime(df['unix_ts'], unit='ms').dt.tz_localize('UTC').dt.tz_convert('US/Eastern') unix_ts val ET 0 1669291200000 10 2022-11-24 07:00:00-05:00 1 1669291260000 25 2022-11-24 07:01:00-05:00 We can see that the ET time is 5 hours behind the UTC unix_ts Suppose we want a new integer column with a value that corresponds with that -5 hours difference. Naively, we could do this: df['adjusted_ts'] = df['unix_ts'] - (3600000 * 5) # Include column to allow us to check the result by eye. df['Check_ET'] = pd.to_datetime(df['adjusted_ts'], unit='ms').dt.tz_localize('UTC').dt.tz_convert('US/Eastern') Result: unix_ts val ET adjusted_ts Check_ET 0 1669291200000 10 2022-11-24 07:00:00-05:00 1669273200000 2022-11-24 02:00:00-05:00 1 1669291260000 25 2022-11-24 07:01:00-05:00 1669273260000 2022-11-24 02:01:00-05:00 We can see from the Check_ET column that this "works", but it won't when we get to the part of the year when US/Eastern is only 4 hours behind UTC. It would be handy to be able to do something like this: import numpy as np df['smart_adjusted_ts'] = (df['ET'].astype(np.int64) // 10 ** 9) * 1000 But, sadly, that's not so 'smart', as it results in a column that's identical to the original unix_ts (Regardless of the ET column's timezone, the underlying data (NumPy) is always stored as nanoseconds since the EPOCH in UTC.): unix_ts ... Check_ET smart_adjusted_ts 0 1669291200000 ... 2022-11-24 02:00:00-05:00 1669291200000 1 1669291260000 ... 2022-11-24 02:01:00-05:00 1669291260000 So, unless there is a special method to do this (Anyone?), my only thought is to go back to the original approach but dynamically extract the UTC offset (ideally as ints: 4 or 5) from the ET column. The problem is, I can't find how to do that either, but I'm hoping to achieve something like this: df['adjusted_ts'] = df['unix_ts'] - (3600000 * et_utc_abs_diff) Please be aware that a dataset could include dates with both (4 & 5 hour) differences, so it's important to get this difference on a row-by-row basis as opposed to having a master variable set to 4 or 5. Any ideas for an elegant solution, please? Edit I came up with the following, and it gets the right result, but I suspect there must be a better way using standard Pandas methods. df['adjusted_ts'] = df['unix_ts'] - (df['ET'].astype(str).str.slice(start=21, stop=22).astype(int) * 3600000)
[ "Here's a way to implement this by localizing to None, as I've described in the comments.\nimport pandas as pd\n\ndf = pd.DataFrame({\"unix_ts\": [1651363200000, 1669291260000],\n \"val\": [10, 25]})\n\ndf[\"ET\"] = pd.to_datetime(df[\"unix_ts\"], unit='ms', utc=True).dt.tz_convert(\"America/New_York\")\n# df[\"ET\"]\n# 0 2022-04-30 20:00:00-04:00\n# 1 2022-11-24 07:01:00-05:00\n# Name: ET, dtype: datetime64[ns, America/New_York]\n\n# we can remove the time zone to get naive datetime. pandas will treat this as UTC\ndf[\"ET_naive\"] = df[\"ET\"].dt.tz_localize(None)\n# df\n# unix_ts val ET ET_naive\n# 0 1669291200000 10 2022-11-24 07:00:00-05:00 2022-11-24 07:00:00\n# 1 1669291260000 25 2022-11-24 07:01:00-05:00 2022-11-24 07:01:00\n\n# now we can convert back to units of time since the epoch, \n# only that the epoch is now eastern time:\ndf[\"ET_epochtime\"] = df[\"ET_naive\"].astype(\"int64\") / 1e6 # division gives milliseconds\n\n# df[\"ET_epochtime\"] correctly accounts for DST offset:\n(df[\"unix_ts\"]-df[\"ET_epochtime\"])/3600000\n# 0 4.0\n# 1 5.0\n# dtype: float64\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "datetime", "pandas", "python", "unix_timestamp" ]
stackoverflow_0074576268_dataframe_datetime_pandas_python_unix_timestamp.txt
Q: Selenium web scraping project I am new to using selenium. I previously wrote a scrapper using Beautiful Soup and it was working fine until I ran into "accept cookie". enter image description here I attempted to use Selenium to click on the "X" button, and then I wanted to pass the page_source to Beautifulsoup to reuse my previous script. But my soup is still showing the page with the "accept cookie", resulting in none of the class to be able to be found. This is the website I want to scrape: https://sturents.com/s/newcastle/newcastle?ne=54.9972%2C-1.5544&sw=54.9546%2C-1.6508 Here is the script: driver = webdriver.Chrome(PATH) driver.get(link) soup = BeautifulSoup(r_more_housing.text, 'html.parser') element = driver.find_element(By.CLASS_NAME, "new--icon-cross") element.click() time.sleep(10) driver.refresh() soup = BeautifulSoup(driver.page_source) rooms = soup.find_all('a', class_="new--listing-item js-listing-item") rooms would return empty string. tried to return soup where it showed the page without clicking on button A: But my soup is still showing the page with the "accept cookie", resulting in none of the class to be able to be found But the listings should show up even without closing cookies notice, and the part of your code to close the cookies notice looks fine anyway (maybe refreshing brings it back....?) You might have noticed that at first the page looks like So, I think you need to give some time for the listings to load after refreshing. (About that, why are you refreshing? If you don't refresh, the 10sec wait should be enough time to load...) # from selenium.webdriver.support.ui import WebDriverWait # from selenium.webdriver.support import expected_conditions as EC ## [ BEFORE BeautifulSoup ] ## maxWait = 10 # adjust as preferred wait = WebDriverWait(driver, maxWait) wait.until(EC.visibility_of_all_elements_located((By.CLASS_NAME, 'js-listing-item'))) ## [ NOW YOU CAN PARSE WITH BeautifulSoup ] ## that should force the program to wait until the listings are loaded. Btw, .find_all('a', class_="new--listing-item js-listing-item") might not be the most reliable way to get rooms, since the new--listing-item class only shows up for wide screens; it might be better to use .find_all('a', {'class': 'js-listing-item'}) or .select('a.js-listing-item') instead. rooms would return empty string Are you sure? find_all is supposed to return a ResultSet, which is closer to a list... Also, [this isn't very essential, but] it's better that the identifiers passed to find_element are as unique as possible; there are at least 3 other elements with the new--icon-cross class (even though close-cookies is the first, so that bit of your code should work anyway), but only the close-cookies button has the js-cookie-close class.
Selenium web scraping project
I am new to using selenium. I previously wrote a scrapper using Beautiful Soup and it was working fine until I ran into "accept cookie". enter image description here I attempted to use Selenium to click on the "X" button, and then I wanted to pass the page_source to Beautifulsoup to reuse my previous script. But my soup is still showing the page with the "accept cookie", resulting in none of the class to be able to be found. This is the website I want to scrape: https://sturents.com/s/newcastle/newcastle?ne=54.9972%2C-1.5544&sw=54.9546%2C-1.6508 Here is the script: driver = webdriver.Chrome(PATH) driver.get(link) soup = BeautifulSoup(r_more_housing.text, 'html.parser') element = driver.find_element(By.CLASS_NAME, "new--icon-cross") element.click() time.sleep(10) driver.refresh() soup = BeautifulSoup(driver.page_source) rooms = soup.find_all('a', class_="new--listing-item js-listing-item") rooms would return empty string. tried to return soup where it showed the page without clicking on button
[ "\nBut my soup is still showing the page with the \"accept cookie\", resulting in none of the class to be able to be found\n\nBut the listings should show up even without closing cookies notice, and the part of your code to close the cookies notice looks fine anyway (maybe refreshing brings it back....?)\n\nYou might have noticed that at first the page looks like\n\nSo, I think you need to give some time for the listings to load after refreshing. (About that, why are you refreshing? If you don't refresh, the 10sec wait should be enough time to load...)\n# from selenium.webdriver.support.ui import WebDriverWait\n# from selenium.webdriver.support import expected_conditions as EC\n\n## [ BEFORE BeautifulSoup ] ##\n\nmaxWait = 10 # adjust as preferred\nwait = WebDriverWait(driver, maxWait)\nwait.until(EC.visibility_of_all_elements_located((By.CLASS_NAME, 'js-listing-item')))\n\n## [ NOW YOU CAN PARSE WITH BeautifulSoup ] ##\n\nthat should force the program to wait until the listings are loaded.\n\nBtw, .find_all('a', class_=\"new--listing-item js-listing-item\") might not be the most reliable way to get rooms, since the new--listing-item class only shows up for wide screens; it might be better to use .find_all('a', {'class': 'js-listing-item'}) or .select('a.js-listing-item') instead.\n\n\nrooms would return empty string\n\nAre you sure? find_all is supposed to return a ResultSet, which is closer to a list...\n\nAlso, [this isn't very essential, but] it's better that the identifiers passed to find_element are as unique as possible; there are at least 3 other elements with the new--icon-cross class (even though close-cookies is the first, so that bit of your code should work anyway), but only the close-cookies button has the js-cookie-close class.\n" ]
[ 0 ]
[]
[]
[ "beautifulsoup", "python", "selenium", "web_scraping" ]
stackoverflow_0074588641_beautifulsoup_python_selenium_web_scraping.txt
Q: TimeoutError: [WinError 10060] A connection attempt failed : Except block doesn't execute either In my program i have a try and except block like so try: if exitCritera(ceLiveprc,peLiveprc,ceEntry,peEntry): closeAllOpenAndPendingTrades(cTok,pTok,ceEntry,peEntry) except(TimeoutError) as e: print("Timeout error occured re-trying...:{}".format(datetime.now())) time.sleep(1) if exitCritera(ceLiveprc,peLiveprc,ceEntry,peEntry): closeAllOpenAndPendingTrades(cTok,pTok,ceEntry,peEntry) the exitCriteria function checks for some data and then calls closeAllOpenAndPendingTrades() and in closeAllOpenAndPendingTrades() there is a contact to the server . I noticed that i was hitting. Exception in thread Thread-1: Traceback (most recent call last): File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connection.py", line 174, in _new_conn conn = connection.create_connection( File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\util\connection.py", line 96, in create_connection raise err File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\util\connection.py", line 86, in create_connection sock.connect(sa) TimeoutError: [WinError 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond So i added the except block. But the except block doesn't execute at all. Is there anything i can do to retry once a timeout error is hit? Complete error log Exception in thread Thread-1: Traceback (most recent call last): File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connection.py", line 174, in _new_conn conn = connection.create_connection( File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\util\connection.py", line 96, in create_connection raise err File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\util\connection.py", line 86, in create_connection sock.connect(sa) TimeoutError: [WinError 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 699, in urlopen httplib_response = self._make_request( File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 382, in _make_request self._validate_conn(conn) File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 1010, in _validate_conn conn.connect() File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connection.py", line 358, in connect conn = self._new_conn() File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connection.py", line 186, in _new_conn raise NewConnectionError( urllib3.exceptions.NewConnectionError: <urllib3.connection.HTTPSConnection object at 0x000002219E8422B0>: Failed to establish a new connection: [WinError 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\adapters.py", line 440, in send resp = conn.urlopen( File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 755, in urlopen retries = retries.increment( File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\util\retry.py", line 574, in increment raise MaxRetryError(_pool, url, error or ResponseError(cause)) urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='norenapi.thefirstock.com', port=443): Max retries exceeded with url: /NorenWClientTP/PositionBook (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x000002219E8422B0>: Failed to establish a new connection: [WinError 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond')) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\threading.py", line 932, in _bootstrap_inner self.run() File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\threading.py", line 870, in run self._target(*self._args, **self._kwargs) File "C:/Users/cgs/Documents/Personal/Stocks/pycode/client/py_client/test.py", line 1035, in workerThrd try: File "C:/Users/cgs/Documents/Personal/Stocks/pycode/client/py_client/test.py", line 540, in exitCritera File "C:/Users/cgs/Documents/Personal/Stocks/pycode/client/py_client/test.py", line 518, in maxProfitHit def maxProfitHit(ceLivePrice, peLivePrice,ceSellPrice,peSellPrice): File "C:/Users/cgs/Documents/Personal/Stocks/pycode/client/py_client/test.py", line 482, in tradeMaxedOut posBook = getPositionBook() File "C:/Users/cgs/Documents/Personal/Stocks/pycode/client/py_client/test.py", line 395, in getPositionBook posBook = client.orders.position_book(posBook) # This returns a string. File "C:\Users\cgs\Documents\Personal\Stocks\pycode\client\py_client\modules\orders\datasource.py", line 190, in position_book response_json = self._run_request(model, endpoints.POSITION_BOOK, key) File "C:\Users\cgs\Documents\Personal\Stocks\pycode\client\py_client\utils\datasources\noren\datasource.py", line 20, in _run_request return self.post(endpoint, f"jData={request_json}&jKey={key}") File "C:\Users\cgs\Documents\Personal\Stocks\pycode\client\py_client\utils\datasources\rest\datasource.py", line 68, in post response = requests.post( File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\api.py", line 117, in post return request('post', url, data=data, json=json, **kwargs) File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\api.py", line 61, in request return session.request(method=method, url=url, **kwargs) File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\sessions.py", line 529, in request resp = self.send(prep, **send_kwargs) File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\sessions.py", line 645, in send r = adapter.send(request, **kwargs) File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\adapters.py", line 519, in send raise ConnectionError(e, request=request) requests.exceptions.ConnectionError: HTTPSConnectionPool(host='norenapi.thefirstock.com', port=443): Max retries exceeded with url: /NorenWClientTP/P A: [WinError 10060] is a Windows Socket Error. Try catching it as an OSError, that worked for me in a very similar case. Change your code to the following: try: if exitCritera(ceLiveprc,peLiveprc,ceEntry,peEntry): closeAllOpenAndPendingTrades(cTok,pTok,ceEntry,peEntry) except OSError as e: print(e) print("Timeout error occured re-trying...:{}".format(datetime.now())) time.sleep(1) if exitCritera(ceLiveprc,peLiveprc,ceEntry,peEntry): closeAllOpenAndPendingTrades(cTok,pTok,ceEntry,peEntry)
TimeoutError: [WinError 10060] A connection attempt failed : Except block doesn't execute either
In my program i have a try and except block like so try: if exitCritera(ceLiveprc,peLiveprc,ceEntry,peEntry): closeAllOpenAndPendingTrades(cTok,pTok,ceEntry,peEntry) except(TimeoutError) as e: print("Timeout error occured re-trying...:{}".format(datetime.now())) time.sleep(1) if exitCritera(ceLiveprc,peLiveprc,ceEntry,peEntry): closeAllOpenAndPendingTrades(cTok,pTok,ceEntry,peEntry) the exitCriteria function checks for some data and then calls closeAllOpenAndPendingTrades() and in closeAllOpenAndPendingTrades() there is a contact to the server . I noticed that i was hitting. Exception in thread Thread-1: Traceback (most recent call last): File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connection.py", line 174, in _new_conn conn = connection.create_connection( File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\util\connection.py", line 96, in create_connection raise err File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\util\connection.py", line 86, in create_connection sock.connect(sa) TimeoutError: [WinError 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond So i added the except block. But the except block doesn't execute at all. Is there anything i can do to retry once a timeout error is hit? Complete error log Exception in thread Thread-1: Traceback (most recent call last): File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connection.py", line 174, in _new_conn conn = connection.create_connection( File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\util\connection.py", line 96, in create_connection raise err File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\util\connection.py", line 86, in create_connection sock.connect(sa) TimeoutError: [WinError 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 699, in urlopen httplib_response = self._make_request( File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 382, in _make_request self._validate_conn(conn) File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 1010, in _validate_conn conn.connect() File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connection.py", line 358, in connect conn = self._new_conn() File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connection.py", line 186, in _new_conn raise NewConnectionError( urllib3.exceptions.NewConnectionError: <urllib3.connection.HTTPSConnection object at 0x000002219E8422B0>: Failed to establish a new connection: [WinError 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\adapters.py", line 440, in send resp = conn.urlopen( File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\connectionpool.py", line 755, in urlopen retries = retries.increment( File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\urllib3\util\retry.py", line 574, in increment raise MaxRetryError(_pool, url, error or ResponseError(cause)) urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='norenapi.thefirstock.com', port=443): Max retries exceeded with url: /NorenWClientTP/PositionBook (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x000002219E8422B0>: Failed to establish a new connection: [WinError 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond')) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\threading.py", line 932, in _bootstrap_inner self.run() File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\threading.py", line 870, in run self._target(*self._args, **self._kwargs) File "C:/Users/cgs/Documents/Personal/Stocks/pycode/client/py_client/test.py", line 1035, in workerThrd try: File "C:/Users/cgs/Documents/Personal/Stocks/pycode/client/py_client/test.py", line 540, in exitCritera File "C:/Users/cgs/Documents/Personal/Stocks/pycode/client/py_client/test.py", line 518, in maxProfitHit def maxProfitHit(ceLivePrice, peLivePrice,ceSellPrice,peSellPrice): File "C:/Users/cgs/Documents/Personal/Stocks/pycode/client/py_client/test.py", line 482, in tradeMaxedOut posBook = getPositionBook() File "C:/Users/cgs/Documents/Personal/Stocks/pycode/client/py_client/test.py", line 395, in getPositionBook posBook = client.orders.position_book(posBook) # This returns a string. File "C:\Users\cgs\Documents\Personal\Stocks\pycode\client\py_client\modules\orders\datasource.py", line 190, in position_book response_json = self._run_request(model, endpoints.POSITION_BOOK, key) File "C:\Users\cgs\Documents\Personal\Stocks\pycode\client\py_client\utils\datasources\noren\datasource.py", line 20, in _run_request return self.post(endpoint, f"jData={request_json}&jKey={key}") File "C:\Users\cgs\Documents\Personal\Stocks\pycode\client\py_client\utils\datasources\rest\datasource.py", line 68, in post response = requests.post( File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\api.py", line 117, in post return request('post', url, data=data, json=json, **kwargs) File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\api.py", line 61, in request return session.request(method=method, url=url, **kwargs) File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\sessions.py", line 529, in request resp = self.send(prep, **send_kwargs) File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\sessions.py", line 645, in send r = adapter.send(request, **kwargs) File "C:\Users\cgs\AppData\Local\Programs\Python\Python38\lib\site-packages\requests\adapters.py", line 519, in send raise ConnectionError(e, request=request) requests.exceptions.ConnectionError: HTTPSConnectionPool(host='norenapi.thefirstock.com', port=443): Max retries exceeded with url: /NorenWClientTP/P
[ "[WinError 10060] is a Windows Socket Error. Try catching it as an OSError, that worked for me in a very similar case.\nChange your code to the following:\ntry:\n if exitCritera(ceLiveprc,peLiveprc,ceEntry,peEntry):\n closeAllOpenAndPendingTrades(cTok,pTok,ceEntry,peEntry)\nexcept OSError as e:\n print(e)\n print(\"Timeout error occured re-trying...:{}\".format(datetime.now()))\n time.sleep(1)\n if exitCritera(ceLiveprc,peLiveprc,ceEntry,peEntry):\n closeAllOpenAndPendingTrades(cTok,pTok,ceEntry,peEntry)\n\n" ]
[ 1 ]
[]
[]
[ "python", "python_requests", "timeoutexception" ]
stackoverflow_0072267807_python_python_requests_timeoutexception.txt
Q: splitting a text by a capital letter after a small letter, without loosing the small letter I have the following type of strings: "CanadaUnited States", "GermanyEnglandSpain" I want to split them into the countries' names, i.e.: ['Canada', 'United States'] ['Germany', 'England', 'Spain'] I have tried using the following regex: text = "GermanyEnglandSpain" re.split('[a-z](?=[A-Z])', text) and I'm getting: ['German', 'Englan', 'Spain'] How can I not lose the last char in every word?] Thanks! A: I would use re.findall here with a regex find all approach: inp = "CanadaUnited States" countries = re.findall(r'[A-Z][a-z]+(?: [A-Z][a-z]+)*', inp) print(countries) # ['Canada', 'United States'] The regex pattern used here says to match: [A-Z][a-z]+ match a leading uppercase word of a country name (?: [A-Z][a-z]+)* followed by space and another capital word, 0 or more times A: My answer is longer than Tim's because I wanted to include more cases to the problem so that you can change it as you need it. You can shorten it by using lambda functions and putting multiple regex into one Basic flow: add a space before every upper letter, replace multiple spaces with *, split on single spaces, and replace * with single space import re text = "GermanyUnited StatesEnglandUnited StatesSpain" text2=re.sub('([A-Z])', r' \1', text) #adds a single space before every upper letter print(text2) #Germany United States England United States Spain text3=re.sub('\s{2,}', '*', text2)#replaces 2 or more spaces with * so that we can replace later print(text3) #Germany United*States England United*States Spain text4=re.split(' ',text3)#splits the text into list on evert single space print(text4) #['', 'Germany', 'United*States', 'England', 'United*States', 'Spain'] text5=[] for i in text4: text5.append(re.sub('\*', ' ', i)) #replace every * with a single space text5=list(filter(None, text5)) #remove empty elements print(text5) #['Germany', 'United States', 'England', 'United States', 'Spain'] A: You can use re.split with capture groups like so, but then you will also need to filter out the empty delimeters: import re text = "GermanyEnglandSpain" res = re.split('([A-Z][a-z]*)', text) res = list(filter(None, res)) print(res)
splitting a text by a capital letter after a small letter, without loosing the small letter
I have the following type of strings: "CanadaUnited States", "GermanyEnglandSpain" I want to split them into the countries' names, i.e.: ['Canada', 'United States'] ['Germany', 'England', 'Spain'] I have tried using the following regex: text = "GermanyEnglandSpain" re.split('[a-z](?=[A-Z])', text) and I'm getting: ['German', 'Englan', 'Spain'] How can I not lose the last char in every word?] Thanks!
[ "I would use re.findall here with a regex find all approach:\ninp = \"CanadaUnited States\"\ncountries = re.findall(r'[A-Z][a-z]+(?: [A-Z][a-z]+)*', inp)\nprint(countries) # ['Canada', 'United States']\n\nThe regex pattern used here says to match:\n\n[A-Z][a-z]+ match a leading uppercase word of a country name\n(?: [A-Z][a-z]+)* followed by space and another capital word, 0 or more times\n\n", "My answer is longer than Tim's because I wanted to include more cases to the problem so that you can change it as you need it. You can shorten it by using lambda functions and putting multiple regex into one\nBasic flow: add a space before every upper letter, replace multiple spaces with *, split on single spaces, and replace * with single space\nimport re\ntext = \"GermanyUnited StatesEnglandUnited StatesSpain\"\ntext2=re.sub('([A-Z])', r' \\1', text) #adds a single space before every upper letter\nprint(text2) \n#Germany United States England United States Spain\ntext3=re.sub('\\s{2,}', '*', text2)#replaces 2 or more spaces with * so that we can replace later\nprint(text3)\n#Germany United*States England United*States Spain\ntext4=re.split(' ',text3)#splits the text into list on evert single space\nprint(text4)\n#['', 'Germany', 'United*States', 'England', 'United*States', 'Spain']\ntext5=[]\n\nfor i in text4:\n text5.append(re.sub('\\*', ' ', i)) #replace every * with a single space \ntext5=list(filter(None, text5)) #remove empty elements \n\nprint(text5)\n#['Germany', 'United States', 'England', 'United States', 'Spain']\n\n", "You can use re.split with capture groups like so, but then you will also need to filter out the empty delimeters:\nimport re\n\ntext = \"GermanyEnglandSpain\"\nres = re.split('([A-Z][a-z]*)', text)\nres = list(filter(None, res))\nprint(res)\n\n" ]
[ 2, 2, 1 ]
[]
[]
[ "python", "regex", "string" ]
stackoverflow_0074589171_python_regex_string.txt
Q: What is the Python equivalent of Matlab's tic and toc functions? What is the Python equivalent of Matlab's tic and toc functions? A: Apart from timeit which ThiefMaster mentioned, a simple way to do it is just (after importing time): t = time.time() # do stuff elapsed = time.time() - t I have a helper class I like to use: class Timer(object): def __init__(self, name=None): self.name = name def __enter__(self): self.tstart = time.time() def __exit__(self, type, value, traceback): if self.name: print('[%s]' % self.name,) print('Elapsed: %s' % (time.time() - self.tstart)) It can be used as a context manager: with Timer('foo_stuff'): # do some foo # do some stuff Sometimes I find this technique more convenient than timeit - it all depends on what you want to measure. A: I had the same question when I migrated to python from Matlab. With the help of this thread I was able to construct an exact analog of the Matlab tic() and toc() functions. Simply insert the following code at the top of your script. import time def TicTocGenerator(): # Generator that returns time differences ti = 0 # initial time tf = time.time() # final time while True: ti = tf tf = time.time() yield tf-ti # returns the time difference TicToc = TicTocGenerator() # create an instance of the TicTocGen generator # This will be the main function through which we define both tic() and toc() def toc(tempBool=True): # Prints the time difference yielded by generator instance TicToc tempTimeInterval = next(TicToc) if tempBool: print( "Elapsed time: %f seconds.\n" %tempTimeInterval ) def tic(): # Records a time in TicToc, marks the beginning of a time interval toc(False) That's it! Now we are ready to fully use tic() and toc() just as in Matlab. For example tic() time.sleep(5) toc() # returns "Elapsed time: 5.00 seconds." Actually, this is more versatile than the built-in Matlab functions. Here, you could create another instance of the TicTocGenerator to keep track of multiple operations, or just to time things differently. For instance, while timing a script, we can now time each piece of the script seperately, as well as the entire script. (I will provide a concrete example) TicToc2 = TicTocGenerator() # create another instance of the TicTocGen generator def toc2(tempBool=True): # Prints the time difference yielded by generator instance TicToc2 tempTimeInterval = next(TicToc2) if tempBool: print( "Elapsed time 2: %f seconds.\n" %tempTimeInterval ) def tic2(): # Records a time in TicToc2, marks the beginning of a time interval toc2(False) Now you should be able to time two separate things: In the following example, we time the total script and parts of a script separately. tic() time.sleep(5) tic2() time.sleep(3) toc2() # returns "Elapsed time 2: 5.00 seconds." toc() # returns "Elapsed time: 8.00 seconds." Actually, you do not even need to use tic() each time. If you have a series of commands that you want to time, then you can write tic() time.sleep(1) toc() # returns "Elapsed time: 1.00 seconds." time.sleep(2) toc() # returns "Elapsed time: 2.00 seconds." time.sleep(3) toc() # returns "Elapsed time: 3.00 seconds." # and so on... I hope that this is helpful. A: The absolute best analog of tic and toc would be to simply define them in python. def tic(): #Homemade version of matlab tic and toc functions import time global startTime_for_tictoc startTime_for_tictoc = time.time() def toc(): import time if 'startTime_for_tictoc' in globals(): print "Elapsed time is " + str(time.time() - startTime_for_tictoc) + " seconds." else: print "Toc: start time not set" Then you can use them as: tic() # do stuff toc() A: Usually, IPython's %time, %timeit, %prun and %lprun (if one has line_profiler installed) satisfy my profiling needs quite well. However, a use case for tic-toc-like functionality arose when I tried to profile calculations that were interactively driven, i.e., by the user's mouse motion in a GUI. I felt like spamming tics and tocs in the sources while testing interactively would be the fastest way to reveal the bottlenecks. I went with Eli Bendersky's Timer class, but wasn't fully happy, since it required me to change the indentation of my code, which can be inconvenient in some editors and confuses the version control system. Moreover, there may be the need to measure the time between points in different functions, which wouldn't work with the with statement. After trying lots of Python cleverness, here is the simple solution that I found worked best: from time import time _tstart_stack = [] def tic(): _tstart_stack.append(time()) def toc(fmt="Elapsed: %s s"): print fmt % (time() - _tstart_stack.pop()) Since this works by pushing the starting times on a stack, it will work correctly for multiple levels of tics and tocs. It also allows one to change the format string of the toc statement to display additional information, which I liked about Eli's Timer class. For some reason I got concerned with the overhead of a pure Python implementation, so I tested a C extension module as well: #include <Python.h> #include <mach/mach_time.h> #define MAXDEPTH 100 uint64_t start[MAXDEPTH]; int lvl=0; static PyObject* tic(PyObject *self, PyObject *args) { start[lvl++] = mach_absolute_time(); Py_RETURN_NONE; } static PyObject* toc(PyObject *self, PyObject *args) { return PyFloat_FromDouble( (double)(mach_absolute_time() - start[--lvl]) / 1000000000L); } static PyObject* res(PyObject *self, PyObject *args) { return tic(NULL, NULL), toc(NULL, NULL); } static PyMethodDef methods[] = { {"tic", tic, METH_NOARGS, "Start timer"}, {"toc", toc, METH_NOARGS, "Stop timer"}, {"res", res, METH_NOARGS, "Test timer resolution"}, {NULL, NULL, 0, NULL} }; PyMODINIT_FUNC inittictoc(void) { Py_InitModule("tictoc", methods); } This is for MacOSX, and I have omitted code to check if lvl is out of bounds for brevity. While tictoc.res() yields a resolution of about 50 nanoseconds on my system, I found that the jitter of measuring any Python statement is easily in the microsecond range (and much more when used from IPython). At this point, the overhead of the Python implementation becomes negligible, so that it can be used with the same confidence as the C implementation. I found that the usefulness of the tic-toc-approach is practically limited to code blocks that take more than 10 microseconds to execute. Below that, averaging strategies like in timeit are required to get a faithful measurement. A: You can use tic and toc from ttictoc. Install it with pip install ttictoc And just import them in your script as follow from ttictoc import tic,toc tic() # Some code print(toc()) A: I have just created a module [tictoc.py] for achieving nested tic tocs, which is what Matlab does. from time import time tics = [] def tic(): tics.append(time()) def toc(): if len(tics)==0: return None else: return time()-tics.pop() And it works this way: from tictoc import tic, toc # This keeps track of the whole process tic() # Timing a small portion of code (maybe a loop) tic() # -- Nested code here -- # End toc() # This returns the elapse time (in seconds) since the last invocation of tic() toc() # This does the same for the first tic() I hope it helps. A: pip install easy-tictoc In the code: from tictoc import tic, toc tic() #Some code toc() Disclaimer: I'm the author of this library. A: Have a look at the timeit module. It's not really equivalent but if the code you want to time is inside a function you can easily use it. A: Updating Eli's answer to Python 3: class Timer(object): def __init__(self, name=None, filename=None): self.name = name self.filename = filename def __enter__(self): self.tstart = time.time() def __exit__(self, type, value, traceback): message = 'Elapsed: %.2f seconds' % (time.time() - self.tstart) if self.name: message = '[%s] ' % self.name + message print(message) if self.filename: with open(self.filename,'a') as file: print(str(datetime.datetime.now())+": ",message,file=file) Just like Eli's, it can be used as a context manager: import time with Timer('Count'): for i in range(0,10_000_000): pass Output: [Count] Elapsed: 0.27 seconds I have also updated it to print the units of time reported (seconds) and trim the number of digits as suggested by Can, and with the option of also appending to a log file. You must import datetime to use the logging feature: import time import datetime with Timer('Count', 'log.txt'): for i in range(0,10_000_000): pass A: I changed @Eli Bendersky's answer a little bit to use the ctor __init__() and dtor __del__() to do the timing, so that it can be used more conveniently without indenting the original code: class Timer(object): def __init__(self, name=None): self.name = name self.tstart = time.time() def __del__(self): if self.name: print '%s elapsed: %.2fs' % (self.name, time.time() - self.tstart) else: print 'Elapsed: %.2fs' % (time.time() - self.tstart) To use, simple put Timer("blahblah") at the beginning of some local scope. Elapsed time will be printed at the end of the scope: for i in xrange(5): timer = Timer("eigh()") x = numpy.random.random((4000,4000)); x = (x+x.T)/2 numpy.linalg.eigh(x) print i+1 timer = None It prints out: 1 eigh() elapsed: 10.13s 2 eigh() elapsed: 9.74s 3 eigh() elapsed: 10.70s 4 eigh() elapsed: 10.25s 5 eigh() elapsed: 11.28s A: This can also be done using a wrapper. Very general way of keeping time. The wrapper in this example code wraps any function and prints the amount of time needed to execute the function: def timethis(f): import time def wrapped(*args, **kwargs): start = time.time() r = f(*args, **kwargs) print "Executing {0} took {1} seconds".format(f.func_name, time.time()-start) return r return wrapped @timethis def thistakestime(): for x in range(10000000): pass thistakestime() A: Building on Stefan and antonimmo's answers, I ended up putting def Tictoc(): start_stack = [] start_named = {} def tic(name=None): if name is None: start_stack.append(time()) else: start_named[name] = time() def toc(name=None): if name is None: start = start_stack.pop() else: start = start_named.pop(name) elapsed = time() - start return elapsed return tic, toc in a utils.py module, and I use it with a from utils import Tictoc tic, toc = Tictoc() This way you can simply use tic(), toc() and nest them like in Matlab alternatively, you can name them: tic(1), toc(1) or tic('very-important-block'), toc('very-important-block') and timers with different names won't interfere importing them this way prevents interference between modules using it. (here toc does not print the elapsed time, but returns it.) A: This solution works for my profiling needs: from time import time import inspect def tic(): tic.t = time() def toc(message=None): time_elapsed = time() - tic.t if message is None: message = inspect.currentframe().f_back.f_lineno print(message, time_elapsed) tic.t = time() return time_elapsed Then you can just paste a lot of toc()s in your code, and you have a pretty powerful profiler. (message defaults to the caller's code line in file)
What is the Python equivalent of Matlab's tic and toc functions?
What is the Python equivalent of Matlab's tic and toc functions?
[ "Apart from timeit which ThiefMaster mentioned, a simple way to do it is just (after importing time):\nt = time.time()\n# do stuff\nelapsed = time.time() - t\n\nI have a helper class I like to use:\nclass Timer(object):\n def __init__(self, name=None):\n self.name = name\n\n def __enter__(self):\n self.tstart = time.time()\n\n def __exit__(self, type, value, traceback):\n if self.name:\n print('[%s]' % self.name,)\n print('Elapsed: %s' % (time.time() - self.tstart))\n\nIt can be used as a context manager:\nwith Timer('foo_stuff'):\n # do some foo\n # do some stuff\n\nSometimes I find this technique more convenient than timeit - it all depends on what you want to measure.\n", "I had the same question when I migrated to python from Matlab. With the help of this thread I was able to construct an exact analog of the Matlab tic() and toc() functions. Simply insert the following code at the top of your script.\nimport time\n\ndef TicTocGenerator():\n # Generator that returns time differences\n ti = 0 # initial time\n tf = time.time() # final time\n while True:\n ti = tf\n tf = time.time()\n yield tf-ti # returns the time difference\n\nTicToc = TicTocGenerator() # create an instance of the TicTocGen generator\n\n# This will be the main function through which we define both tic() and toc()\ndef toc(tempBool=True):\n # Prints the time difference yielded by generator instance TicToc\n tempTimeInterval = next(TicToc)\n if tempBool:\n print( \"Elapsed time: %f seconds.\\n\" %tempTimeInterval )\n\ndef tic():\n # Records a time in TicToc, marks the beginning of a time interval\n toc(False)\n\nThat's it! Now we are ready to fully use tic() and toc() just as in Matlab. For example\ntic()\n\ntime.sleep(5)\n\ntoc() # returns \"Elapsed time: 5.00 seconds.\"\n\nActually, this is more versatile than the built-in Matlab functions. Here, you could create another instance of the TicTocGenerator to keep track of multiple operations, or just to time things differently. For instance, while timing a script, we can now time each piece of the script seperately, as well as the entire script. (I will provide a concrete example)\nTicToc2 = TicTocGenerator() # create another instance of the TicTocGen generator\n\ndef toc2(tempBool=True):\n # Prints the time difference yielded by generator instance TicToc2\n tempTimeInterval = next(TicToc2)\n if tempBool:\n print( \"Elapsed time 2: %f seconds.\\n\" %tempTimeInterval )\n\ndef tic2():\n # Records a time in TicToc2, marks the beginning of a time interval\n toc2(False)\n\nNow you should be able to time two separate things: In the following example, we time the total script and parts of a script separately.\ntic()\n\ntime.sleep(5)\n\ntic2()\n\ntime.sleep(3)\n\ntoc2() # returns \"Elapsed time 2: 5.00 seconds.\"\n\ntoc() # returns \"Elapsed time: 8.00 seconds.\"\n\nActually, you do not even need to use tic() each time. If you have a series of commands that you want to time, then you can write\ntic()\n\ntime.sleep(1)\n\ntoc() # returns \"Elapsed time: 1.00 seconds.\"\n\ntime.sleep(2)\n\ntoc() # returns \"Elapsed time: 2.00 seconds.\"\n\ntime.sleep(3)\n\ntoc() # returns \"Elapsed time: 3.00 seconds.\"\n\n# and so on...\n\nI hope that this is helpful.\n", "The absolute best analog of tic and toc would be to simply define them in python.\ndef tic():\n #Homemade version of matlab tic and toc functions\n import time\n global startTime_for_tictoc\n startTime_for_tictoc = time.time()\n\ndef toc():\n import time\n if 'startTime_for_tictoc' in globals():\n print \"Elapsed time is \" + str(time.time() - startTime_for_tictoc) + \" seconds.\"\n else:\n print \"Toc: start time not set\"\n\nThen you can use them as:\ntic()\n# do stuff\ntoc()\n\n", "Usually, IPython's %time, %timeit, %prun and %lprun (if one has line_profiler installed) satisfy my profiling needs quite well. However, a use case for tic-toc-like functionality arose when I tried to profile calculations that were interactively driven, i.e., by the user's mouse motion in a GUI. I felt like spamming tics and tocs in the sources while testing interactively would be the fastest way to reveal the bottlenecks. I went with Eli Bendersky's Timer class, but wasn't fully happy, since it required me to change the indentation of my code, which can be inconvenient in some editors and confuses the version control system. Moreover, there may be the need to measure the time between points in different functions, which wouldn't work with the with statement. After trying lots of Python cleverness, here is the simple solution that I found worked best:\nfrom time import time\n_tstart_stack = []\n\ndef tic():\n _tstart_stack.append(time())\n\ndef toc(fmt=\"Elapsed: %s s\"):\n print fmt % (time() - _tstart_stack.pop())\n\nSince this works by pushing the starting times on a stack, it will work correctly for multiple levels of tics and tocs. It also allows one to change the format string of the toc statement to display additional information, which I liked about Eli's Timer class.\nFor some reason I got concerned with the overhead of a pure Python implementation, so I tested a C extension module as well:\n#include <Python.h>\n#include <mach/mach_time.h>\n#define MAXDEPTH 100\n\nuint64_t start[MAXDEPTH];\nint lvl=0;\n\nstatic PyObject* tic(PyObject *self, PyObject *args) {\n start[lvl++] = mach_absolute_time();\n Py_RETURN_NONE;\n}\n\nstatic PyObject* toc(PyObject *self, PyObject *args) {\nreturn PyFloat_FromDouble(\n (double)(mach_absolute_time() - start[--lvl]) / 1000000000L);\n}\n\nstatic PyObject* res(PyObject *self, PyObject *args) {\n return tic(NULL, NULL), toc(NULL, NULL);\n}\n\nstatic PyMethodDef methods[] = {\n {\"tic\", tic, METH_NOARGS, \"Start timer\"},\n {\"toc\", toc, METH_NOARGS, \"Stop timer\"},\n {\"res\", res, METH_NOARGS, \"Test timer resolution\"},\n {NULL, NULL, 0, NULL}\n};\n\nPyMODINIT_FUNC\ninittictoc(void) {\n Py_InitModule(\"tictoc\", methods);\n}\n\nThis is for MacOSX, and I have omitted code to check if lvl is out of bounds for brevity. While tictoc.res() yields a resolution of about 50 nanoseconds on my system, I found that the jitter of measuring any Python statement is easily in the microsecond range (and much more when used from IPython). At this point, the overhead of the Python implementation becomes negligible, so that it can be used with the same confidence as the C implementation. \nI found that the usefulness of the tic-toc-approach is practically limited to code blocks that take more than 10 microseconds to execute. Below that, averaging strategies like in timeit are required to get a faithful measurement. \n", "You can use tic and toc from ttictoc. Install it with\npip install ttictoc\n\nAnd just import them in your script as follow\nfrom ttictoc import tic,toc\ntic()\n# Some code\nprint(toc())\n\n", "I have just created a module [tictoc.py] for achieving nested tic tocs, which is what Matlab does.\nfrom time import time\n\ntics = []\n\ndef tic():\n tics.append(time())\n\ndef toc():\n if len(tics)==0:\n return None\n else:\n return time()-tics.pop()\n\nAnd it works this way:\nfrom tictoc import tic, toc\n\n# This keeps track of the whole process\ntic()\n\n# Timing a small portion of code (maybe a loop)\ntic()\n\n# -- Nested code here --\n\n# End\ntoc() # This returns the elapse time (in seconds) since the last invocation of tic()\ntoc() # This does the same for the first tic()\n\nI hope it helps.\n", "pip install easy-tictoc\n\nIn the code:\nfrom tictoc import tic, toc\n\ntic()\n\n#Some code\n\ntoc()\n\n\nDisclaimer: I'm the author of this library.\n", "Have a look at the timeit module.\nIt's not really equivalent but if the code you want to time is inside a function you can easily use it.\n", "Updating Eli's answer to Python 3:\nclass Timer(object):\n def __init__(self, name=None, filename=None):\n self.name = name\n self.filename = filename\n\n def __enter__(self):\n self.tstart = time.time()\n\n def __exit__(self, type, value, traceback):\n message = 'Elapsed: %.2f seconds' % (time.time() - self.tstart)\n if self.name:\n message = '[%s] ' % self.name + message\n print(message)\n if self.filename:\n with open(self.filename,'a') as file:\n print(str(datetime.datetime.now())+\": \",message,file=file)\n\nJust like Eli's, it can be used as a context manager:\nimport time \nwith Timer('Count'):\n for i in range(0,10_000_000):\n pass\n\nOutput: \n[Count] Elapsed: 0.27 seconds\n\nI have also updated it to print the units of time reported (seconds) and trim the number of digits as suggested by Can, and with the option of also appending to a log file. You must import datetime to use the logging feature:\nimport time\nimport datetime \nwith Timer('Count', 'log.txt'): \n for i in range(0,10_000_000):\n pass\n\n", "I changed @Eli Bendersky's answer a little bit to use the ctor __init__() and dtor __del__() to do the timing, so that it can be used more conveniently without indenting the original code:\nclass Timer(object):\n def __init__(self, name=None):\n self.name = name\n self.tstart = time.time()\n\n def __del__(self):\n if self.name:\n print '%s elapsed: %.2fs' % (self.name, time.time() - self.tstart)\n else:\n print 'Elapsed: %.2fs' % (time.time() - self.tstart)\n\nTo use, simple put Timer(\"blahblah\") at the beginning of some local scope. Elapsed time will be printed at the end of the scope:\nfor i in xrange(5):\n timer = Timer(\"eigh()\")\n x = numpy.random.random((4000,4000));\n x = (x+x.T)/2\n numpy.linalg.eigh(x)\n print i+1\ntimer = None\n\nIt prints out:\n1\neigh() elapsed: 10.13s\n2\neigh() elapsed: 9.74s\n3\neigh() elapsed: 10.70s\n4\neigh() elapsed: 10.25s\n5\neigh() elapsed: 11.28s\n\n", "This can also be done using a wrapper. Very general way of keeping time.\nThe wrapper in this example code wraps any function and prints the amount of time needed to execute the function:\ndef timethis(f):\n import time\n\n def wrapped(*args, **kwargs):\n start = time.time()\n r = f(*args, **kwargs)\n print \"Executing {0} took {1} seconds\".format(f.func_name, time.time()-start)\n return r\n return wrapped\n\n@timethis\ndef thistakestime():\n for x in range(10000000):\n pass\n\nthistakestime()\n\n", "Building on Stefan and antonimmo's answers, I ended up putting\ndef Tictoc():\n start_stack = []\n start_named = {}\n\n def tic(name=None):\n if name is None:\n start_stack.append(time())\n else:\n start_named[name] = time()\n\n def toc(name=None):\n if name is None:\n start = start_stack.pop()\n else:\n start = start_named.pop(name)\n elapsed = time() - start\n return elapsed\n return tic, toc\n\nin a utils.py module, and I use it with a\nfrom utils import Tictoc\ntic, toc = Tictoc()\n\nThis way\n\nyou can simply use tic(), toc() and nest them like in Matlab\nalternatively, you can name them: tic(1), toc(1) or tic('very-important-block'), toc('very-important-block') and timers with different names won't interfere\nimporting them this way prevents interference between modules using it.\n\n(here toc does not print the elapsed time, but returns it.)\n", "This solution works for my profiling needs:\nfrom time import time\nimport inspect\n\ndef tic():\n tic.t = time()\n\ndef toc(message=None):\n time_elapsed = time() - tic.t\n if message is None:\n message = inspect.currentframe().f_back.f_lineno\n print(message, time_elapsed)\n tic.t = time()\n return time_elapsed\n\nThen you can just paste a lot of toc()s in your code, and you have a pretty powerful profiler. (message defaults to the caller's code line in file)\n" ]
[ 224, 50, 23, 18, 14, 8, 5, 4, 2, 1, 1, 1, 0 ]
[]
[]
[ "matlab", "python", "timing" ]
stackoverflow_0005849800_matlab_python_timing.txt
Q: How i can calulate the sum from a csv file in python? Hy guys i have one problem with the python. I just start to learn and my teacher has give me the task with the python. So the problre is how i can calulate the sum from a column in csv file in pyhon. Here is my tries : import pandas as pd df = pd.read_csv('1.csv') dfsum=sum(df) print(dfsum) My termial is compley for i a do sum with str on integers. Also the csv file is like this : -12 -14 -76 -89 -98 -45 -26 A: So I dont know if your csv file contains only negative number or it's juste the way you present it, but what i can say is that you can try this it gonna work eitherway : df = pd.read_csv("1.csv", names=["my_column"]) dfsum = df["my_column"].sum() assuming you have all negative numbers the result is : -360
How i can calulate the sum from a csv file in python?
Hy guys i have one problem with the python. I just start to learn and my teacher has give me the task with the python. So the problre is how i can calulate the sum from a column in csv file in pyhon. Here is my tries : import pandas as pd df = pd.read_csv('1.csv') dfsum=sum(df) print(dfsum) My termial is compley for i a do sum with str on integers. Also the csv file is like this : -12 -14 -76 -89 -98 -45 -26
[ "\nSo I dont know if your csv file contains only negative number or it's juste the way you present it, but what i can say is that you can try this it gonna work eitherway :\ndf = pd.read_csv(\"1.csv\", names=[\"my_column\"])\ndfsum = df[\"my_column\"].sum()\n\nassuming you have all negative numbers the result is : -360\n" ]
[ 1 ]
[]
[]
[ "csv", "python" ]
stackoverflow_0074589475_csv_python.txt
Q: webscraping returns not full info learning webscraping at the moment and decided to scrap telegram's web version so i choosed one of the chats (favorite one with yourself) and sent few voices there. My task is - i want to exract all voices lenght when i inspect page there is a div container <div class="audio-time">0:00</div> telegramURL = 'https://web.telegram.org/k/#@myname' audiolength = requests.get(telegramURL) soup2 = BeautifulSoup(audiolength.text, 'lxml') audio2 = soup.find_all('div', class_=re.compile('audio-time'))[0] print(audio2) all i get is an empty massive [] maybe i should use different parser? or url im using is wroing as it is not an official API or anything else also - while inspecting the page in the browser i can see this div container but when i get it with beauitful soup it seems like not full. <p class="browserupgrade">You are using an <strong>outdated</strong> browser. Please <a href="https://browsehappy.com/">upgrade your browser</a> to improve your experience and security.</p> <![endif]--><div class="whole" id="auth-pages" style="display:none"><div class="scrollable scrollable-y"><div class="tabs-container auth-pages__container" data-animation="tabs"><div class="tabs-tab page-signImport"><div class="container center-align"></div></div><div class="tabs-tab page-sign"><div class="container"><div class="auth-image"><svg class="sign-logo" height="160" viewbox="0 0 160 160" width="160" xmlns="http://www.w3.org/2000/svg"><use href="#logo"></use></svg></div></div></div><div class="tabs-tab page-signQR"><div class="container center-align"><div class="auth-image"></div></div></div><div class="tabs-tab page-authCode"><div class="container center-align"><div class="auth-image"></div><div class="phone-wrapper"><h4 class="phone"></h4><span class="phone-edit tgico-edit"></span></div><p class="subtitle sent-type"></p><div class="input-wrapper"></div></div></div><div class="tabs-tab page-password"></div><div class="tabs-tab page-signUp"></div></div></div></div><div class="whole page-chats" id="page-chats" style="display:none"><svg style="position:absolute;top:-10000px;left:-10000px"><defs id="svg-defs"><path d="M1.00002881,1.03679295e-14 L7,0 L7,17 C6.8069969,14.1607017 6.12380234,11.2332513 4.95041634,8.21764872 C4.04604748,5.89342034 2.50413132,3.73337411 0.324667862,1.73751004 L0.324652538,1.73752677 C-0.0826597201,1.36452676 -0.110475289,0.731958677 0.262524727,0.324646419 C0.451952959,0.117792698 0.719544377,1.0985861e-14 1.00002881,1.04360964e-14 Z" id="message-tail"></path><path d="M80,0 C124.18278,0 160,35.81722 160,80 C160,124.18278 124.18278,160 80,160 C35.81722,160 0,124.18278 0,80 C0,35.81722 35.81722,0 80,0 Z M114.262551,46.4516129 L114.123923,46.4516129 C111.089589,46.5056249 106.482806,48.0771432 85.1289541,56.93769 L81.4133571,58.4849956 C72.8664779,62.0684477 57.2607933,68.7965125 34.5963033,78.66919 C30.6591745,80.2345564 28.5967328,81.765936 28.4089783,83.2633288 C28.0626453,86.0254269 31.8703852,86.959903 36.7890378,88.5302703 L38.2642674,89.0045258 C42.3926354,90.314406 47.5534685,91.7248852 50.3250916,91.7847532 C52.9151948,91.8407003 55.7944784,90.8162976 58.9629426,88.7115451 L70.5121776,80.9327422 C85.6657026,70.7535853 93.6285785,65.5352892 94.4008055,65.277854 L94.6777873,65.216416 C95.1594319,65.1213105 95.7366278,65.0717596 96.1481181,65.4374337 C96.6344248,65.8695939 96.5866185,66.6880224 96.5351057,66.9075859 C96.127514,68.6448691 75.2839361,87.6143392 73.6629144,89.2417998 L73.312196,89.6016896 C68.7645143,94.2254793 63.9030972,97.1721503 71.5637945,102.355193 L73.3593638,103.544598 C79.0660342,107.334968 82.9483395,110.083813 88.8107882,113.958377 L90.3875424,114.996094 C95.0654739,118.061953 98.7330313,121.697601 103.562866,121.253237 C105.740839,121.052855 107.989107,119.042224 109.175465,113.09692 L109.246762,112.727987 C112.002037,98.0012935 117.417883,66.09303 118.669527,52.9443975 C118.779187,51.7924073 118.641237,50.318088 118.530455,49.6708963 L118.474159,49.3781963 C118.341081,48.7651315 118.067967,48.0040758 117.346762,47.4189793 C116.412565,46.6610871 115.002114,46.4638844 114.262551,46.4516129 Z" fill-rule="evenodd" id="logo"></path><path d="M4.47,5.33v13.6c0,4.97,4.03,9,9,9h458.16" id="poll-line"></path><path d="M 4.7071 12.2929 l 5 5 c 0.3905 0.3905 1.0237 0.3905 1.4142 0 l 11 -11" fill="none" id="check"></path><path clip-rule="evenodd" d="m14.378741 1.509638 1.818245 1.818557c.365651.365716.861601.571194 1.378741.571259l2.574273.000312c1.01361.000117 1.846494.773578 1.940861 1.762436l.008905.187798-.000312 2.5727c-.000065.517322.205439 1.013454.571259 1.379222l1.819649 1.819337c.714441.713427.759174 1.843179.134563 2.609139l-.134797.148109-1.819181 1.8182502c-.365963.3657823-.571558.8620196-.571493 1.3794456l.000312 2.5737972c.000559 1.0136048-.772668 1.846676-1.7615 1.9412861l-.188266.0084786-2.573792-.0003107c-.517426-.0000624-1.013675.2055248-1.379456.5714956l-1.818245 1.8191823c-.71331.7145515-1.843049.7594886-2.609113.1349998l-.148135-.1347645-1.8193435-1.8196542c-.3657628-.3658252-.8618987-.5713214-1.3792103-.571259l-2.5727052.0003107c-1.0136048.0001222-1.846676-.7731321-1.9412861-1.761968l-.0089492-.1877967-.0003107-2.5742678c-.0000624-.5171478-.2055495-1.0130926-.571259-1.3787397l-1.8185622-1.8182515c-.7139886-.713869-.758706-1.843647-.1340846-2.609607l.1338493-.148109 1.8190328-1.81935c.3655665-.365625.5709613-.861471.5710237-1.378494l.0003107-2.573181c.0006006-1.076777.8734635-1.949636 1.9502353-1.950234l2.5731758-.000312c.5170321-.000065 1.0128768-.205452 1.3785044-.571025l1.8193448-1.819038c.761592-.761449 1.996254-.761345 2.757716.000247zm3.195309 8.047806c-.426556-.34125-1.032655-.306293-1.417455.060333l-.099151.108173-4.448444 5.55815-1.7460313-1.74707-.1104961-.096564c-.4229264-.32188-1.0291801-.289692-1.4154413.096564-.3862612.386269-.4184492.992511-.0965653 1.41544l.0965653.1105 2.5999987 2.5999987.109876.0961467c.419874.320359 1.015131.2873897 1.397071-.0773773l.098579-.107692 5.2-6.4999961.083772-.120484c.273208-.455884.174278-1.054885-.252278-1.396122z" fill-rule="evenodd" id="verified-icon-background"></path><path d="M8 8H18V18H8V8Z" id="verified-icon-check"></path><symbol id="message-tail-filled" viewbox="0 0 11 20"><g fill="inherit" fill-rule="evenodd" transform="translate(9 -14)"><path d="M-6 16h6v17c-.193-2.84-.876-5.767-2.05-8.782-.904-2.325-2.446-4.485-4.625-6.48A1 1 0 01-6 16z" fill="inherit" id="corner-fill" transform="matrix(1 0 0 -1 0 49)"></path></g></symbol><lineargradient id="g" x1="-300%" x2="-200%" y1="0" y2="0"><stop offset="-10%" stop-opacity=".1"></stop><stop offset="30%" stop-opacity=".07"></stop><stop offset="70%" stop-opacity=".07"></stop><stop offset="110%" stop-opacity=".1"></stop><animate attributename="x1" dur="3s" from="-300%" repeatcount="indefinite" to="1200%"></animate><animate attributename="x2" dur="3s" from="-200%" repeatcount="indefinite" to="1300%"></animate></lineargradient></defs></svg><div class="tabs-container" data-animation="navigation" id="main-columns"><div class="tabs-tab chatlist-container sidebar sidebar-left main-column" id="column-left"><div class="sidebar-slider tabs-container"><div class="tabs-tab sidebar-slider-item item-main"><div class="sidebar-header"><div class="sidebar-header__btn-container"><div class="animated-menu-icon"></div><div class="btn-icon sidebar-back-button"></div></div></div><div class="sidebar-content transition zoom-fade"><div class="transition-item active" id="chatlist-container"><div class="folders-tabs-scrollable menu-horizontal-scrollable hide"><nav class="menu-horizontal-div" id="folders-tabs"></nav></div><div class="tabs-container" id="folders-container"></div></div><div class="transition-item sidebar-search" id="search-container"></div></div></div></div></div><div class="tabs-tab main-column" id="column-center"></div><div class="tabs-tab sidebar sidebar-right main-column" id="column-right"><div class="sidebar-content sidebar-slider tabs-container"></div></div></div><div class="emoji-dropdown" id="emoji-dropdown" style="display:none"><div class="emoji-container"><div class="tabs-container"><div class="tabs-tab emoji-padding"><div class="menu-wrapper"><nav class="menu-horizontal-div no-stripe justify-start"></nav></div><div class="emoticons-content" id="content-emoji"></div></div><div class="tabs-tab stickers-padding"><div class="menu-wrapper"><nav class="menu-horizontal-div no-stripe justify-start"></nav></div><div class="emoticons-content" id="content-stickers"></div></div><div class="tabs-tab gifs-padding"><div class="emoticons-content" id="content-gifs"><div class="gifs-masonry"></div></div></div></div></div><div class="emoji-tabs menu-horizontal-div no-stripe"><button class="menu-horizontal-div-item emoji-tabs-search justify-self-start btn-icon tgico-search" data-tab="-1"></button> <button class="menu-horizontal-div-item emoji-tabs-emoji btn-icon tgico-smile" data-tab="0"></button> <button class="menu-horizontal-div-item emoji-tabs-stickers btn-icon tgico-stickers_face" data-tab="1"></button> <button class="menu-horizontal-div-item emoji-tabs-gifs btn-icon tgico-gifs" data-tab="2"></button> <button class="menu-horizontal-div-item emoji-tabs-delete justify-self-end btn-icon tgico-deleteleft" data-tab="-1"></button></div></div></div></body></html> i get this when i exract full page of a chat but when i do the same in the browser i see this div i needed A: The problem is not the parser, it's beautiful soup itself. Last time I've used it can only do static scraping which means it only returns the html to traverse the DOM and if it had any scripts in it wouldn't load. Static scraping ignores JavaScript. You have to switch to Selenium with a chrome webdriver. It has similar functionality as BeautifulSoup with a pageLoadStrategy in order to load the website as you'd expect.
webscraping returns not full info
learning webscraping at the moment and decided to scrap telegram's web version so i choosed one of the chats (favorite one with yourself) and sent few voices there. My task is - i want to exract all voices lenght when i inspect page there is a div container <div class="audio-time">0:00</div> telegramURL = 'https://web.telegram.org/k/#@myname' audiolength = requests.get(telegramURL) soup2 = BeautifulSoup(audiolength.text, 'lxml') audio2 = soup.find_all('div', class_=re.compile('audio-time'))[0] print(audio2) all i get is an empty massive [] maybe i should use different parser? or url im using is wroing as it is not an official API or anything else also - while inspecting the page in the browser i can see this div container but when i get it with beauitful soup it seems like not full. <p class="browserupgrade">You are using an <strong>outdated</strong> browser. Please <a href="https://browsehappy.com/">upgrade your browser</a> to improve your experience and security.</p> <![endif]--><div class="whole" id="auth-pages" style="display:none"><div class="scrollable scrollable-y"><div class="tabs-container auth-pages__container" data-animation="tabs"><div class="tabs-tab page-signImport"><div class="container center-align"></div></div><div class="tabs-tab page-sign"><div class="container"><div class="auth-image"><svg class="sign-logo" height="160" viewbox="0 0 160 160" width="160" xmlns="http://www.w3.org/2000/svg"><use href="#logo"></use></svg></div></div></div><div class="tabs-tab page-signQR"><div class="container center-align"><div class="auth-image"></div></div></div><div class="tabs-tab page-authCode"><div class="container center-align"><div class="auth-image"></div><div class="phone-wrapper"><h4 class="phone"></h4><span class="phone-edit tgico-edit"></span></div><p class="subtitle sent-type"></p><div class="input-wrapper"></div></div></div><div class="tabs-tab page-password"></div><div class="tabs-tab page-signUp"></div></div></div></div><div class="whole page-chats" id="page-chats" style="display:none"><svg style="position:absolute;top:-10000px;left:-10000px"><defs id="svg-defs"><path d="M1.00002881,1.03679295e-14 L7,0 L7,17 C6.8069969,14.1607017 6.12380234,11.2332513 4.95041634,8.21764872 C4.04604748,5.89342034 2.50413132,3.73337411 0.324667862,1.73751004 L0.324652538,1.73752677 C-0.0826597201,1.36452676 -0.110475289,0.731958677 0.262524727,0.324646419 C0.451952959,0.117792698 0.719544377,1.0985861e-14 1.00002881,1.04360964e-14 Z" id="message-tail"></path><path d="M80,0 C124.18278,0 160,35.81722 160,80 C160,124.18278 124.18278,160 80,160 C35.81722,160 0,124.18278 0,80 C0,35.81722 35.81722,0 80,0 Z M114.262551,46.4516129 L114.123923,46.4516129 C111.089589,46.5056249 106.482806,48.0771432 85.1289541,56.93769 L81.4133571,58.4849956 C72.8664779,62.0684477 57.2607933,68.7965125 34.5963033,78.66919 C30.6591745,80.2345564 28.5967328,81.765936 28.4089783,83.2633288 C28.0626453,86.0254269 31.8703852,86.959903 36.7890378,88.5302703 L38.2642674,89.0045258 C42.3926354,90.314406 47.5534685,91.7248852 50.3250916,91.7847532 C52.9151948,91.8407003 55.7944784,90.8162976 58.9629426,88.7115451 L70.5121776,80.9327422 C85.6657026,70.7535853 93.6285785,65.5352892 94.4008055,65.277854 L94.6777873,65.216416 C95.1594319,65.1213105 95.7366278,65.0717596 96.1481181,65.4374337 C96.6344248,65.8695939 96.5866185,66.6880224 96.5351057,66.9075859 C96.127514,68.6448691 75.2839361,87.6143392 73.6629144,89.2417998 L73.312196,89.6016896 C68.7645143,94.2254793 63.9030972,97.1721503 71.5637945,102.355193 L73.3593638,103.544598 C79.0660342,107.334968 82.9483395,110.083813 88.8107882,113.958377 L90.3875424,114.996094 C95.0654739,118.061953 98.7330313,121.697601 103.562866,121.253237 C105.740839,121.052855 107.989107,119.042224 109.175465,113.09692 L109.246762,112.727987 C112.002037,98.0012935 117.417883,66.09303 118.669527,52.9443975 C118.779187,51.7924073 118.641237,50.318088 118.530455,49.6708963 L118.474159,49.3781963 C118.341081,48.7651315 118.067967,48.0040758 117.346762,47.4189793 C116.412565,46.6610871 115.002114,46.4638844 114.262551,46.4516129 Z" fill-rule="evenodd" id="logo"></path><path d="M4.47,5.33v13.6c0,4.97,4.03,9,9,9h458.16" id="poll-line"></path><path d="M 4.7071 12.2929 l 5 5 c 0.3905 0.3905 1.0237 0.3905 1.4142 0 l 11 -11" fill="none" id="check"></path><path clip-rule="evenodd" d="m14.378741 1.509638 1.818245 1.818557c.365651.365716.861601.571194 1.378741.571259l2.574273.000312c1.01361.000117 1.846494.773578 1.940861 1.762436l.008905.187798-.000312 2.5727c-.000065.517322.205439 1.013454.571259 1.379222l1.819649 1.819337c.714441.713427.759174 1.843179.134563 2.609139l-.134797.148109-1.819181 1.8182502c-.365963.3657823-.571558.8620196-.571493 1.3794456l.000312 2.5737972c.000559 1.0136048-.772668 1.846676-1.7615 1.9412861l-.188266.0084786-2.573792-.0003107c-.517426-.0000624-1.013675.2055248-1.379456.5714956l-1.818245 1.8191823c-.71331.7145515-1.843049.7594886-2.609113.1349998l-.148135-.1347645-1.8193435-1.8196542c-.3657628-.3658252-.8618987-.5713214-1.3792103-.571259l-2.5727052.0003107c-1.0136048.0001222-1.846676-.7731321-1.9412861-1.761968l-.0089492-.1877967-.0003107-2.5742678c-.0000624-.5171478-.2055495-1.0130926-.571259-1.3787397l-1.8185622-1.8182515c-.7139886-.713869-.758706-1.843647-.1340846-2.609607l.1338493-.148109 1.8190328-1.81935c.3655665-.365625.5709613-.861471.5710237-1.378494l.0003107-2.573181c.0006006-1.076777.8734635-1.949636 1.9502353-1.950234l2.5731758-.000312c.5170321-.000065 1.0128768-.205452 1.3785044-.571025l1.8193448-1.819038c.761592-.761449 1.996254-.761345 2.757716.000247zm3.195309 8.047806c-.426556-.34125-1.032655-.306293-1.417455.060333l-.099151.108173-4.448444 5.55815-1.7460313-1.74707-.1104961-.096564c-.4229264-.32188-1.0291801-.289692-1.4154413.096564-.3862612.386269-.4184492.992511-.0965653 1.41544l.0965653.1105 2.5999987 2.5999987.109876.0961467c.419874.320359 1.015131.2873897 1.397071-.0773773l.098579-.107692 5.2-6.4999961.083772-.120484c.273208-.455884.174278-1.054885-.252278-1.396122z" fill-rule="evenodd" id="verified-icon-background"></path><path d="M8 8H18V18H8V8Z" id="verified-icon-check"></path><symbol id="message-tail-filled" viewbox="0 0 11 20"><g fill="inherit" fill-rule="evenodd" transform="translate(9 -14)"><path d="M-6 16h6v17c-.193-2.84-.876-5.767-2.05-8.782-.904-2.325-2.446-4.485-4.625-6.48A1 1 0 01-6 16z" fill="inherit" id="corner-fill" transform="matrix(1 0 0 -1 0 49)"></path></g></symbol><lineargradient id="g" x1="-300%" x2="-200%" y1="0" y2="0"><stop offset="-10%" stop-opacity=".1"></stop><stop offset="30%" stop-opacity=".07"></stop><stop offset="70%" stop-opacity=".07"></stop><stop offset="110%" stop-opacity=".1"></stop><animate attributename="x1" dur="3s" from="-300%" repeatcount="indefinite" to="1200%"></animate><animate attributename="x2" dur="3s" from="-200%" repeatcount="indefinite" to="1300%"></animate></lineargradient></defs></svg><div class="tabs-container" data-animation="navigation" id="main-columns"><div class="tabs-tab chatlist-container sidebar sidebar-left main-column" id="column-left"><div class="sidebar-slider tabs-container"><div class="tabs-tab sidebar-slider-item item-main"><div class="sidebar-header"><div class="sidebar-header__btn-container"><div class="animated-menu-icon"></div><div class="btn-icon sidebar-back-button"></div></div></div><div class="sidebar-content transition zoom-fade"><div class="transition-item active" id="chatlist-container"><div class="folders-tabs-scrollable menu-horizontal-scrollable hide"><nav class="menu-horizontal-div" id="folders-tabs"></nav></div><div class="tabs-container" id="folders-container"></div></div><div class="transition-item sidebar-search" id="search-container"></div></div></div></div></div><div class="tabs-tab main-column" id="column-center"></div><div class="tabs-tab sidebar sidebar-right main-column" id="column-right"><div class="sidebar-content sidebar-slider tabs-container"></div></div></div><div class="emoji-dropdown" id="emoji-dropdown" style="display:none"><div class="emoji-container"><div class="tabs-container"><div class="tabs-tab emoji-padding"><div class="menu-wrapper"><nav class="menu-horizontal-div no-stripe justify-start"></nav></div><div class="emoticons-content" id="content-emoji"></div></div><div class="tabs-tab stickers-padding"><div class="menu-wrapper"><nav class="menu-horizontal-div no-stripe justify-start"></nav></div><div class="emoticons-content" id="content-stickers"></div></div><div class="tabs-tab gifs-padding"><div class="emoticons-content" id="content-gifs"><div class="gifs-masonry"></div></div></div></div></div><div class="emoji-tabs menu-horizontal-div no-stripe"><button class="menu-horizontal-div-item emoji-tabs-search justify-self-start btn-icon tgico-search" data-tab="-1"></button> <button class="menu-horizontal-div-item emoji-tabs-emoji btn-icon tgico-smile" data-tab="0"></button> <button class="menu-horizontal-div-item emoji-tabs-stickers btn-icon tgico-stickers_face" data-tab="1"></button> <button class="menu-horizontal-div-item emoji-tabs-gifs btn-icon tgico-gifs" data-tab="2"></button> <button class="menu-horizontal-div-item emoji-tabs-delete justify-self-end btn-icon tgico-deleteleft" data-tab="-1"></button></div></div></div></body></html> i get this when i exract full page of a chat but when i do the same in the browser i see this div i needed
[ "The problem is not the parser, it's beautiful soup itself. Last time I've used it can only do static scraping which means it only returns the html to traverse the DOM and if it had any scripts in it wouldn't load.\nStatic scraping ignores JavaScript.\nYou have to switch to Selenium with a chrome webdriver. It has similar functionality as BeautifulSoup with a pageLoadStrategy in order to load the website as you'd expect.\n" ]
[ 0 ]
[]
[]
[ "python", "telegram", "web_scraping" ]
stackoverflow_0074589410_python_telegram_web_scraping.txt
Q: i cant count the full list bot discord python I'm trying to count the entire embed list but it always shows me the same number... should appear 1 2 3 4 ... I don't know how to solve this problem this is the code @bot.command() async def habbo(ctx): response = requests.get("https://images.habbo.com/habbo-web-leaderboards/hhes/visited-rooms/daily/latest.json") data = response.json() count=0 for i in data: count=count+1 content = f'\n\n{contar} - '.join(item['name'] for item in data) embed = discord.Embed(title=f"", description=f"{content}", color=discord.Colour.random()) await ctx.send(embed=embed) A: This should work. I just removed the .join() function and created the appending with a loop instead. @bot.command() async def habbo(ctx): response = requests.get("https://images.habbo.com/habbo-web-leaderboards/hhes/visited-rooms/daily/latest.json") data = response.json() count = 0 content = "" for item in data: count = count + 1 item = item["name"] content = content + f"\n{count} - {item}\n" embed = discord.Embed(title=f"", description=f"{content}", color=discord.Colour.random()) await ctx.send(embed=embed)
i cant count the full list bot discord python
I'm trying to count the entire embed list but it always shows me the same number... should appear 1 2 3 4 ... I don't know how to solve this problem this is the code @bot.command() async def habbo(ctx): response = requests.get("https://images.habbo.com/habbo-web-leaderboards/hhes/visited-rooms/daily/latest.json") data = response.json() count=0 for i in data: count=count+1 content = f'\n\n{contar} - '.join(item['name'] for item in data) embed = discord.Embed(title=f"", description=f"{content}", color=discord.Colour.random()) await ctx.send(embed=embed)
[ "This should work. I just removed the .join() function and created the appending with a loop instead.\n@bot.command()\nasync def habbo(ctx):\n response = requests.get(\"https://images.habbo.com/habbo-web-leaderboards/hhes/visited-rooms/daily/latest.json\")\n data = response.json()\n \n count = 0\n content = \"\"\n\n for item in data:\n count = count + 1\n item = item[\"name\"]\n content = content + f\"\\n{count} - {item}\\n\"\n\n embed = discord.Embed(title=f\"\", description=f\"{content}\", color=discord.Colour.random())\n await ctx.send(embed=embed)\n\n" ]
[ 1 ]
[]
[]
[ "python" ]
stackoverflow_0074589140_python.txt
Q: Could not find function xmlCheckVersion in library libxml2 while executing pip install uspto-opendata-python I have conda xml2 installed. However, when I execute pip install uspto-opendata-python, I get the following message could not find function xmlCheckVersion in library libxml2. is libxml2 installed? I noticed a similar question here - Could not find function xmlCheckVersion in library libxml2. Is libxml2 installed? - but the suggestions listed there are not resolving my issue. how to resolve this issue? That's the error message i'm getting: C:\Users\njind\AppData\Local\Temp\xmlXPathInitwd5ud9xh.c(1): fatal error C1083: Cannot open include file: 'libxml/xpath.h': No such file or directory error: command 'C:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\VC\Tools\MSVC\14.32.31326\bin\HostX86\x64\cl.exe' failed with exit code 2 Could not find function xmlCheckVersion in library libxml2. Is libxml2 installed? A: I encountered the same error. You must download the file from Archived: Unofficial Windows Binaries for Python Extension Packages, then install it like this on Windows: pip install C:\Users\USER\Downloads\lxml-4.9.0-cp311-cp311-win_amd64.whl
Could not find function xmlCheckVersion in library libxml2 while executing pip install uspto-opendata-python
I have conda xml2 installed. However, when I execute pip install uspto-opendata-python, I get the following message could not find function xmlCheckVersion in library libxml2. is libxml2 installed? I noticed a similar question here - Could not find function xmlCheckVersion in library libxml2. Is libxml2 installed? - but the suggestions listed there are not resolving my issue. how to resolve this issue? That's the error message i'm getting: C:\Users\njind\AppData\Local\Temp\xmlXPathInitwd5ud9xh.c(1): fatal error C1083: Cannot open include file: 'libxml/xpath.h': No such file or directory error: command 'C:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\VC\Tools\MSVC\14.32.31326\bin\HostX86\x64\cl.exe' failed with exit code 2 Could not find function xmlCheckVersion in library libxml2. Is libxml2 installed?
[ "I encountered the same error. You must download the file from\nArchived: Unofficial Windows Binaries for Python Extension Packages, then install it like this on Windows:\npip install C:\\Users\\USER\\Downloads\\lxml-4.9.0-cp311-cp311-win_amd64.whl\n" ]
[ 0 ]
[]
[]
[ "libxml2", "python" ]
stackoverflow_0072550326_libxml2_python.txt
Q: python, itertools: 'list' object is not callable I'm trying to run this function but I got the error: TypeError: 'list' object is not callable import itertools def get_all_pair_combinations(list): return list(itertools.combinations(list, 2)) pair_indexes = get_all_pair_combinations(list(range(len(features_vectors[0])))) the features_vectors[0] that is called is a list of 1024 arrays. any idea please?
python, itertools: 'list' object is not callable
I'm trying to run this function but I got the error: TypeError: 'list' object is not callable import itertools def get_all_pair_combinations(list): return list(itertools.combinations(list, 2)) pair_indexes = get_all_pair_combinations(list(range(len(features_vectors[0])))) the features_vectors[0] that is called is a list of 1024 arrays. any idea please?
[]
[]
[ "you can't pass list to a function, read about map function!\n" ]
[ -3 ]
[ "pandas", "python", "python_itertools" ]
stackoverflow_0074589578_pandas_python_python_itertools.txt
Q: How to transform a Pandas Dataframe with irregular coordinates into a xarray Dataset I'm working with a pandas Dataframe on python, but in order to plot as a map my data I have to transform it into a xarray Dataset, since the library I'm using to plot (salem) works best for this class. The problem I'm having is that the grid of my data isn't regular so I can't seem to be able to create the Dataset. My Dataframe has the latitude and longitude, as well as the value in each point: lon lat value 0 -104.936302 -51.339233 7.908411 1 -104.827377 -51.127686 7.969049 2 -104.719154 -50.915470 8.036676 3 -104.611641 -50.702595 8.096765 4 -104.504814 -50.489056 8.163690 ... ... ... ... 65995 -32.911377 15.359591 25.475702 65996 -32.957718 15.579139 25.443994 65997 -33.004040 15.798100 25.429346 65998 -33.050335 16.016472 25.408105 65999 -33.096611 16.234255 25.383844 [66000 rows x 3 columns] In order to create the Dataset using lat and lon as coordinates and fill all of the missing values with NaN, I was trying the following: ds = xr.Dataset({ 'ts': xr.DataArray( data = value, # enter data here dims = ['lon','lat'], coords = {'lon': lon, 'lat':lat}, attrs = { '_FillValue': np.nan, 'units' : 'K' } )}, attrs = {'attr': 'RegCM output'} ) ds But I got the following error: --------------------------------------------------------------------------- ValueError Traceback (most recent call last) Input In [41], in <cell line: 1>() 1 ds = xr.Dataset({ ----> 2 'ts': xr.DataArray( 3 data = value, # enter data here 4 dims = ['lon','lat'], 5 coords = {'lon': lon, 'lat':lat}, 6 attrs = { 7 '_FillValue': np.nan, 8 'units' : 'K' 9 } 10 )}, 11 attrs = {'example_attr': 'this is a global attribute'} 12 ) 14 # ds = xr.Dataset( 15 # data_vars=dict( 16 # variable=(["lon", "lat"], value) (...) 25 # } 26 # ) 27 ds File ~\anaconda3\lib\site-packages\xarray\core\dataarray.py:406, in DataArray.__init__(self, data, coords, dims, name, attrs, indexes, fastpath) 404 data = _check_data_shape(data, coords, dims) 405 data = as_compatible_data(data) --> 406 coords, dims = _infer_coords_and_dims(data.shape, coords, dims) 407 variable = Variable(dims, data, attrs, fastpath=True) 408 indexes = dict( 409 _extract_indexes_from_coords(coords) 410 ) # needed for to_dataset File ~\anaconda3\lib\site-packages\xarray\core\dataarray.py:123, in _infer_coords_and_dims(shape, coords, dims) 121 dims = tuple(dims) 122 elif len(dims) != len(shape): --> 123 raise ValueError( 124 "different number of dimensions on data " 125 f"and dims: {len(shape)} vs {len(dims)}" 126 ) 127 else: 128 for d in dims: ValueError: different number of dimensions on data and dims: 1 vs 2 I would really appreciate any insights to solve this. A: If you really require a rectangularly gridded dataset you need to resample your data into a regular grid... (rasterio, pyresample etc. provide useful functionalities for that). However if you just want to plot the data, this is not necessary! Not sure about salem (never used it so far), but I've tried my best to simplify plotting of irrelgularly sampled data in the visualization-library I'm developing EOmaps! You could get a "contour-plot" like appearance if you use a "delaunay triangulation" to visualize the data: import pandas as pd df = pd.read_csv("... path-to df.csv ...", index_col=0) from eomaps import Maps m = Maps() m.add_feature.preset.coastline() m.set_data(df, x="lon", y="lat", crs=4326, parameter="value") m.set_shape.delaunay_triangulation() m.plot_map()
How to transform a Pandas Dataframe with irregular coordinates into a xarray Dataset
I'm working with a pandas Dataframe on python, but in order to plot as a map my data I have to transform it into a xarray Dataset, since the library I'm using to plot (salem) works best for this class. The problem I'm having is that the grid of my data isn't regular so I can't seem to be able to create the Dataset. My Dataframe has the latitude and longitude, as well as the value in each point: lon lat value 0 -104.936302 -51.339233 7.908411 1 -104.827377 -51.127686 7.969049 2 -104.719154 -50.915470 8.036676 3 -104.611641 -50.702595 8.096765 4 -104.504814 -50.489056 8.163690 ... ... ... ... 65995 -32.911377 15.359591 25.475702 65996 -32.957718 15.579139 25.443994 65997 -33.004040 15.798100 25.429346 65998 -33.050335 16.016472 25.408105 65999 -33.096611 16.234255 25.383844 [66000 rows x 3 columns] In order to create the Dataset using lat and lon as coordinates and fill all of the missing values with NaN, I was trying the following: ds = xr.Dataset({ 'ts': xr.DataArray( data = value, # enter data here dims = ['lon','lat'], coords = {'lon': lon, 'lat':lat}, attrs = { '_FillValue': np.nan, 'units' : 'K' } )}, attrs = {'attr': 'RegCM output'} ) ds But I got the following error: --------------------------------------------------------------------------- ValueError Traceback (most recent call last) Input In [41], in <cell line: 1>() 1 ds = xr.Dataset({ ----> 2 'ts': xr.DataArray( 3 data = value, # enter data here 4 dims = ['lon','lat'], 5 coords = {'lon': lon, 'lat':lat}, 6 attrs = { 7 '_FillValue': np.nan, 8 'units' : 'K' 9 } 10 )}, 11 attrs = {'example_attr': 'this is a global attribute'} 12 ) 14 # ds = xr.Dataset( 15 # data_vars=dict( 16 # variable=(["lon", "lat"], value) (...) 25 # } 26 # ) 27 ds File ~\anaconda3\lib\site-packages\xarray\core\dataarray.py:406, in DataArray.__init__(self, data, coords, dims, name, attrs, indexes, fastpath) 404 data = _check_data_shape(data, coords, dims) 405 data = as_compatible_data(data) --> 406 coords, dims = _infer_coords_and_dims(data.shape, coords, dims) 407 variable = Variable(dims, data, attrs, fastpath=True) 408 indexes = dict( 409 _extract_indexes_from_coords(coords) 410 ) # needed for to_dataset File ~\anaconda3\lib\site-packages\xarray\core\dataarray.py:123, in _infer_coords_and_dims(shape, coords, dims) 121 dims = tuple(dims) 122 elif len(dims) != len(shape): --> 123 raise ValueError( 124 "different number of dimensions on data " 125 f"and dims: {len(shape)} vs {len(dims)}" 126 ) 127 else: 128 for d in dims: ValueError: different number of dimensions on data and dims: 1 vs 2 I would really appreciate any insights to solve this.
[ "If you really require a rectangularly gridded dataset you need to resample your data into a regular grid... (rasterio, pyresample etc. provide useful functionalities for that). However if you just want to plot the data, this is not necessary!\nNot sure about salem (never used it so far), but I've tried my best to simplify plotting of irrelgularly sampled data in the visualization-library I'm developing EOmaps!\nYou could get a \"contour-plot\" like appearance if you use a \"delaunay triangulation\" to visualize the data:\nimport pandas as pd\ndf = pd.read_csv(\"... path-to df.csv ...\", index_col=0)\n\nfrom eomaps import Maps\n\nm = Maps()\nm.add_feature.preset.coastline()\nm.set_data(df, x=\"lon\", y=\"lat\", crs=4326, parameter=\"value\")\nm.set_shape.delaunay_triangulation()\nm.plot_map()\n\n\n" ]
[ 1 ]
[]
[]
[ "dataset", "pandas", "python", "python_xarray" ]
stackoverflow_0074561493_dataset_pandas_python_python_xarray.txt
Q: "Exception has occurred: TclError unknown option" I am making memory match on pyhton but I keep getting this error: "Exception has occurred: TclError unknown option" Here's the code. Any ideas? import random import time from tkinter import * from PIL import Image, ImageTk from turtle import * def show_symbol(x, y): global first global previousX, previousY buttons[x, y]["text"] = button_symbols[x, y] buttons[x, y].update_idletasks() if first: previousX = x previousY = y first = False elif previousX != x or previousY != y: if buttons[previousX, previousY]["text"] != buttons[x, y]["text"]: time.sleep(0.5) buttons[previousX, previousY]["text"] = "" buttons[x, y]["text"] = "" else: buttons[previousX, previousY]["command"] = DISABLED buttons[x, y]["command"] = DISABLED first = True root = Tk() root.title('Memory Match') root.resizable(width=False, height=False) buttons = {} first = True previousX = 0 previousY = 0 royaljello = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\StarEgg.gif")) treat = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\treat.gif")) star_treat = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\star_treat.gif")) strawberry = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Strawberry.gif")) blueberry = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Blueberry.gif")) pineapple = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Pineapple.gif")) seed = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\seed.gif")) star_jelly = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\StarJelly.gif")) silveregg = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Silver_Egg.gif")) goldegg = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Gold_Egg.gif")) deemondegg = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Diamond_Egg.gif")) mythegg = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Mythic_Egg.gif")) button_symbols = {} symbols = [royaljello, royaljello, treat, treat, star_treat, star_treat, strawberry, strawberry, blueberry, blueberry, pineapple, pineapple, seed, seed, star_jelly, star_jelly, silveregg, silveregg, goldegg, goldegg, deemondegg, deemondegg, mythegg, mythegg ] random.shuffle(symbols) for x in range(6) : for y in range(4) : button = Button(command=lambda x=x, y=y: show_symbol(x, y), window_height=3 window_width=3) button.grid(column=x, row=y) button[x, y] = button button_symbols[x, y] = symbols.pop() root.mainloop() # Credit to Onett for images Here's the syntax (pretend line 61 is on the end of line 60). Thank you in advance and I apologise for any amatuer errors I have made. File "C:\Users\Martin\AppData\Local\Programs\Python\Python310\lib\tkinter\__init__.py", line 2601, in __init__ self.tk.call( _tkinter.TclError: unknown option "-window_height" PS D:\Documents\Projects> A: Your line: button = Button(command=lambda x=x, y=y: show_symbol(x, y), window_height=3 window_width=3) has multiple related problems; the options window_height and window_width are simply unknown to the widget, as the error message says. Did you mean height and width, respectively?
"Exception has occurred: TclError unknown option"
I am making memory match on pyhton but I keep getting this error: "Exception has occurred: TclError unknown option" Here's the code. Any ideas? import random import time from tkinter import * from PIL import Image, ImageTk from turtle import * def show_symbol(x, y): global first global previousX, previousY buttons[x, y]["text"] = button_symbols[x, y] buttons[x, y].update_idletasks() if first: previousX = x previousY = y first = False elif previousX != x or previousY != y: if buttons[previousX, previousY]["text"] != buttons[x, y]["text"]: time.sleep(0.5) buttons[previousX, previousY]["text"] = "" buttons[x, y]["text"] = "" else: buttons[previousX, previousY]["command"] = DISABLED buttons[x, y]["command"] = DISABLED first = True root = Tk() root.title('Memory Match') root.resizable(width=False, height=False) buttons = {} first = True previousX = 0 previousY = 0 royaljello = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\StarEgg.gif")) treat = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\treat.gif")) star_treat = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\star_treat.gif")) strawberry = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Strawberry.gif")) blueberry = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Blueberry.gif")) pineapple = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Pineapple.gif")) seed = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\seed.gif")) star_jelly = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\StarJelly.gif")) silveregg = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Silver_Egg.gif")) goldegg = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Gold_Egg.gif")) deemondegg = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Diamond_Egg.gif")) mythegg = PhotoImage(Image.open(r"D:\Documents\Projects\Memorysnap\Mythic_Egg.gif")) button_symbols = {} symbols = [royaljello, royaljello, treat, treat, star_treat, star_treat, strawberry, strawberry, blueberry, blueberry, pineapple, pineapple, seed, seed, star_jelly, star_jelly, silveregg, silveregg, goldegg, goldegg, deemondegg, deemondegg, mythegg, mythegg ] random.shuffle(symbols) for x in range(6) : for y in range(4) : button = Button(command=lambda x=x, y=y: show_symbol(x, y), window_height=3 window_width=3) button.grid(column=x, row=y) button[x, y] = button button_symbols[x, y] = symbols.pop() root.mainloop() # Credit to Onett for images Here's the syntax (pretend line 61 is on the end of line 60). Thank you in advance and I apologise for any amatuer errors I have made. File "C:\Users\Martin\AppData\Local\Programs\Python\Python310\lib\tkinter\__init__.py", line 2601, in __init__ self.tk.call( _tkinter.TclError: unknown option "-window_height" PS D:\Documents\Projects>
[ "Your line:\nbutton = Button(command=lambda x=x, y=y: show_symbol(x, y), window_height=3 window_width=3)\n\nhas multiple related problems; the options window_height and window_width are simply unknown to the widget, as the error message says. Did you mean height and width, respectively?\n" ]
[ 1 ]
[]
[]
[ "python", "tcl", "tkinter" ]
stackoverflow_0074585943_python_tcl_tkinter.txt
Q: re.sub(".*", ", "(replacement)", "text") doubles replacement on Python 3.7 On Python 3.7 (tested on Windows 64 bits), the replacement of a string using the RegEx .* gives the input string repeated twice! On Python 3.7.2: >>> import re >>> re.sub(".*", "(replacement)", "sample text") '(replacement)(replacement)' On Python 3.6.4: >>> import re >>> re.sub(".*", "(replacement)", "sample text") '(replacement)' On Python 2.7.5 (32 bits): >>> import re >>> re.sub(".*", "(replacement)", "sample text") '(replacement)' What is wrong? How to fix that? A: This is not a bug, but a bug fix in Python 3.7 from the commit fbb490fd2f38bd817d99c20c05121ad0168a38ee. In regex, a non-zero-width match moves the pointer position to the end of the match, so that the next assertion, zero-width or not, can continue to match from the position following the match. So in your example, after .* greedily matches and consumes the entire string, the fact that the pointer is then moved to the end of the string still actually leaves "room" for a zero-width match at that position, as can be evident from the following code, which behaves the same in Python 2.7, 3.6 and 3.7: >>> re.findall(".*", 'sample text') ['sample text', ''] So the bug fix, which is about replacement of a zero-width match right after a non-zero-width match, now correctly replaces both matches with the replacement text. A: This is a common regex issue, it affects a lot of regex flavors, see related language-agnostic : Why do some regex engines match .* twice in a single input string? java : String.replaceAll(regex) makes the same replacement twice There are several ways to fix the issue: Add anchors on both sides of .*: re.sub("^.*$", "(replacement)", "sample text") Since you want to only match a line once, add the count=1 argument: print( re.sub(".*", "(replacement)", "sample text", count=1) ) In case you want to replace any non-empty line, replace * with +: print( re.sub(".+", "(replacement)", "sample text") ) See the Python demo: import re # Adding anchors: print( re.sub("^.*$", "(replacement)", "sample text") ) # => (replacement) # Using the count=1 argument print( re.sub(".*", "(replacement)", "sample text", count=1) ) # => (replacement) # If you want to replace non-empty lines: print( re.sub(".+", "(replacement)", "sample text") ) # => (replacement)
re.sub(".*", ", "(replacement)", "text") doubles replacement on Python 3.7
On Python 3.7 (tested on Windows 64 bits), the replacement of a string using the RegEx .* gives the input string repeated twice! On Python 3.7.2: >>> import re >>> re.sub(".*", "(replacement)", "sample text") '(replacement)(replacement)' On Python 3.6.4: >>> import re >>> re.sub(".*", "(replacement)", "sample text") '(replacement)' On Python 2.7.5 (32 bits): >>> import re >>> re.sub(".*", "(replacement)", "sample text") '(replacement)' What is wrong? How to fix that?
[ "This is not a bug, but a bug fix in Python 3.7 from the commit fbb490fd2f38bd817d99c20c05121ad0168a38ee.\nIn regex, a non-zero-width match moves the pointer position to the end of the match, so that the next assertion, zero-width or not, can continue to match from the position following the match. So in your example, after .* greedily matches and consumes the entire string, the fact that the pointer is then moved to the end of the string still actually leaves \"room\" for a zero-width match at that position, as can be evident from the following code, which behaves the same in Python 2.7, 3.6 and 3.7:\n>>> re.findall(\".*\", 'sample text')\n['sample text', '']\n\nSo the bug fix, which is about replacement of a zero-width match right after a non-zero-width match, now correctly replaces both matches with the replacement text.\n", "This is a common regex issue, it affects a lot of regex flavors, see related\n\nlanguage-agnostic : Why do some regex engines match .* twice in a single input string?\njava : String.replaceAll(regex) makes the same replacement twice\n\nThere are several ways to fix the issue:\n\nAdd anchors on both sides of .*: re.sub(\"^.*$\", \"(replacement)\", \"sample text\")\nSince you want to only match a line once, add the count=1 argument: print( re.sub(\".*\", \"(replacement)\", \"sample text\", count=1) )\nIn case you want to replace any non-empty line, replace * with +: print( re.sub(\".+\", \"(replacement)\", \"sample text\") )\n\nSee the Python demo:\nimport re\n# Adding anchors:\nprint( re.sub(\"^.*$\", \"(replacement)\", \"sample text\") ) # => (replacement)\n# Using the count=1 argument\nprint( re.sub(\".*\", \"(replacement)\", \"sample text\", count=1) ) # => (replacement)\n# If you want to replace non-empty lines:\nprint( re.sub(\".+\", \"(replacement)\", \"sample text\") ) # => (replacement)\n\n" ]
[ 20, 0 ]
[]
[]
[ "python", "python_re" ]
stackoverflow_0054713570_python_python_re.txt
Q: What's pylint's TypeVar name specification? Pylint gives a warning whenever something like this happens: import typing SEQ_FR = typing.TypeVar("SEQ_FR") #^^^^^ gets underlined with the warning The warning is like this: Type variable name "SEQ_FR" doesn't conform to predefined naming style. pylint(invalid-name) I tried searching through Pylint's documentations with no luck on finding the exact regex / specifications used. Doesn't seem like I can pass a custom regex onto Pylint for this as well, unlike regular variables, methods, functions, classes, etc. What is the specification used by Pylint to flag TypeVar variables as valid or invalid names? A: You can find the rule used in the Pylint messages documentation; this error is named invalid-name, so the specific documentation can be found on the invalid-name / C0103 page, which has a TypeVar rule in the Predefined Naming Patterns section: Name type: typevar Good Names: T, _CallableT, _T_co, AnyStr, DeviceTypeT, IPAddressT Bad Names: DICT_T, CALLABLE_T, ENUM_T, DeviceType, _StrType, TAnyStr It doesn't document the exact regex rule here, but Pylint will actually include the regex used in the error message when you use the --include-naming-hint=y command-line switch *): Type variable name "SEQ_FR" doesn't conform to predefined naming style ('^_{0,2}(?!T[A-Z])(?:[A-Z]+|(?:[A-Z]+[a-z]+)+T?(?<!Type))(?:_co(?:ntra)?)?$' pattern) (invalid-name) Alternatively, you can find the regex for typevars in the source code. Breaking the pattern down, typevar names are compliant when following these rules: Optionally start with 0-2 underscores not starting with T<capital letter> either all capital letters and no underscores, or a PascalCaseWord ✝) optionally ending in T, no underscores, and not ending with Type with an optional _co or _contra ending. Put differently, typevar names must be either PascalCase ✝) or all-caps, are optionally protected (_) or private (__), are optionally marked as covariant (_co) or contravariant (_contra), and should not end in Type. A compliant name for your example could be SeqFr or SeqFrT; the T suffix is meant to make it clear the SnakeCaseT name is a typevar. Alternatively, you can specify your own regex with the --typevar-rgx=<regex> command-line switch *). Note: As Pierre Sassoulas (the maintainer of Pylint) pointed out in a comment: There is no PEP-8 convention (yet) for typevar naming; instead the PyLint team captured the rules observed in Python projects and the type hinting documentation. The exact rule is therefore still subject to change if an official convention were to be created. *) The Visual Studio Code settings for the Python extension include a python.linting.pylintArgs option that takes a list of command-line switches. ✝) PascalCase is also known as CapitalizedWords, UpperCamelCase or StudlyCase. Don't confuse this with camelCase (initial letter lowercase) or snake_case (all lowercase with underscores). I regularly do! When in doubt, Wikipedia has a handly table of multi-word formats.
What's pylint's TypeVar name specification?
Pylint gives a warning whenever something like this happens: import typing SEQ_FR = typing.TypeVar("SEQ_FR") #^^^^^ gets underlined with the warning The warning is like this: Type variable name "SEQ_FR" doesn't conform to predefined naming style. pylint(invalid-name) I tried searching through Pylint's documentations with no luck on finding the exact regex / specifications used. Doesn't seem like I can pass a custom regex onto Pylint for this as well, unlike regular variables, methods, functions, classes, etc. What is the specification used by Pylint to flag TypeVar variables as valid or invalid names?
[ "You can find the rule used in the Pylint messages documentation; this error is named invalid-name, so the specific documentation can be found on the invalid-name / C0103 page, which has a TypeVar rule in the Predefined Naming Patterns section:\n\nName type: typevar\nGood Names: T, _CallableT, _T_co, AnyStr, DeviceTypeT, IPAddressT\nBad Names: DICT_T, CALLABLE_T, ENUM_T, DeviceType, _StrType, TAnyStr\n\nIt doesn't document the exact regex rule here, but Pylint will actually include the regex used in the error message when you use the --include-naming-hint=y command-line switch *):\n\nType variable name \"SEQ_FR\" doesn't conform to predefined naming style ('^_{0,2}(?!T[A-Z])(?:[A-Z]+|(?:[A-Z]+[a-z]+)+T?(?<!Type))(?:_co(?:ntra)?)?$' pattern) (invalid-name)\n\nAlternatively, you can find the regex for typevars in the source code.\nBreaking the pattern down, typevar names are compliant when following these rules:\n\nOptionally start with 0-2 underscores\nnot starting with T<capital letter>\neither\n\nall capital letters and no underscores,\nor a PascalCaseWord ✝) optionally ending in T, no underscores, and not ending with Type\n\n\nwith an optional _co or _contra ending.\n\nPut differently, typevar names must be either PascalCase ✝) or all-caps, are optionally protected (_) or private (__), are optionally marked as covariant (_co) or contravariant (_contra), and should not end in Type.\nA compliant name for your example could be SeqFr or SeqFrT; the T suffix is meant to make it clear the SnakeCaseT name is a typevar.\nAlternatively, you can specify your own regex with the --typevar-rgx=<regex> command-line switch *).\nNote: As Pierre Sassoulas (the maintainer of Pylint) pointed out in a comment: There is no PEP-8 convention (yet) for typevar naming; instead the PyLint team captured the rules observed in Python projects and the type hinting documentation. The exact rule is therefore still subject to change if an official convention were to be created.\n\n*) The Visual Studio Code settings for the Python extension include a python.linting.pylintArgs option that takes a list of command-line switches.\n✝) PascalCase is also known as CapitalizedWords, UpperCamelCase or StudlyCase. Don't confuse this with camelCase (initial letter lowercase) or snake_case (all lowercase with underscores). I regularly do! When in doubt, Wikipedia has a handly table of multi-word formats.\n" ]
[ 9 ]
[]
[]
[ "pylint", "python", "vscode_python" ]
stackoverflow_0074589610_pylint_python_vscode_python.txt
Q: error while working with fbprophet model using cutoffs I am facing an issue while trying to work with fbprophet cross_valisation using cutoffs tying to see the results for the last 7 months. My data is ranging from 2017-01-01 to 2022-07-01 df_cv2 = cross_validation(model=m, cutoffs=cutoffs, horizon='30 days') ValueError Traceback (most recent call last) <ipython-input-323-75c1d39c86f3> in <module> ----> 1 df_cv2 = cross_validation(model=m, cutoffs=cutoffs, horizon='30 days') C:\ProgramData\Anaconda3\lib\site-packages\prophet\diagnostics.py in cross_validation(model, horizon, period, initial, parallel, cutoffs, disable_tqdm) 197 198 else: --> 199 predicts = [ 200 single_cutoff_forecast(df, model, cutoff, horizon, predict_columns) 201 for cutoff in (tqdm(cutoffs) if not disable_tqdm else cutoffs) C:\ProgramData\Anaconda3\lib\site-packages\prophet\diagnostics.py in <listcomp>(.0) 198 else: 199 predicts = [ --> 200 single_cutoff_forecast(df, model, cutoff, horizon, predict_columns) 201 for cutoff in (tqdm(cutoffs) if not disable_tqdm else cutoffs) 202 ] C:\ProgramData\Anaconda3\lib\site-packages\prophet\diagnostics.py in single_cutoff_forecast(df, model, cutoff, horizon, predict_columns) 251 for props in m.seasonalities.values() 252 if props['condition_name'] is not None]) --> 253 yhat = m.predict(df[index_predicted][columns]) 254 # Merge yhat(predicts), y(df, original data) and cutoff 255 C:\ProgramData\Anaconda3\lib\site-packages\prophet\forecaster.py in predict(self, df) 1202 else: 1203 if df.shape[0] == 0: -> 1204 raise ValueError('Dataframe has no rows.') 1205 df = self.setup_dataframe(df.copy()) 1206 ValueError: Dataframe has no rows. My dataframe looks like this y ds 0 -1.0 2017-01-01 978 2.0 2017-02-01 1582 0.0 2017-03-01 2237 0.0 2017-04-01 2902 0.0 2017-05-01 I have created my model and applied the fit and these are my cutoffs cutoffs = pd.date_range(start='2021-01-01', end='2022-06-01', freq='1MS') resulting: DatetimeIndex(['2021-01-01', '2021-02-01', '2021-03-01', '2021-04-01', '2021-05-01', '2021-06-01', '2021-07-01', '2021-08-01', '2021-09-01', '2021-10-01', '2021-11-01', '2021-12-01', '2022-01-01', '2022-02-01', '2022-03-01', '2022-04-01', '2022-05-01', '2022-06-01'], dtype='datetime64[ns]', freq='MS') A: I faced the same problem and I coped with it efficiently setting horizon = '31 days' (i.e., the maximum number that could elapse between two months). This is due to how index_predicted (used here: yhat = m.predict(df[index_predicted][columns])), is set: index_predicted = (df['ds'] > cutoff) & (df['ds'] <= cutoff + horizon), where cutoff is one of your cutoffs. It doesn't work with your arguments because, e.g., cutoff (2021-01-01) + horizon (30 days) = 2021-01-31 and in your df there isn't a ds that is greater than 2021-01-01 and less than or equal to 2021-01-31.
error while working with fbprophet model using cutoffs
I am facing an issue while trying to work with fbprophet cross_valisation using cutoffs tying to see the results for the last 7 months. My data is ranging from 2017-01-01 to 2022-07-01 df_cv2 = cross_validation(model=m, cutoffs=cutoffs, horizon='30 days') ValueError Traceback (most recent call last) <ipython-input-323-75c1d39c86f3> in <module> ----> 1 df_cv2 = cross_validation(model=m, cutoffs=cutoffs, horizon='30 days') C:\ProgramData\Anaconda3\lib\site-packages\prophet\diagnostics.py in cross_validation(model, horizon, period, initial, parallel, cutoffs, disable_tqdm) 197 198 else: --> 199 predicts = [ 200 single_cutoff_forecast(df, model, cutoff, horizon, predict_columns) 201 for cutoff in (tqdm(cutoffs) if not disable_tqdm else cutoffs) C:\ProgramData\Anaconda3\lib\site-packages\prophet\diagnostics.py in <listcomp>(.0) 198 else: 199 predicts = [ --> 200 single_cutoff_forecast(df, model, cutoff, horizon, predict_columns) 201 for cutoff in (tqdm(cutoffs) if not disable_tqdm else cutoffs) 202 ] C:\ProgramData\Anaconda3\lib\site-packages\prophet\diagnostics.py in single_cutoff_forecast(df, model, cutoff, horizon, predict_columns) 251 for props in m.seasonalities.values() 252 if props['condition_name'] is not None]) --> 253 yhat = m.predict(df[index_predicted][columns]) 254 # Merge yhat(predicts), y(df, original data) and cutoff 255 C:\ProgramData\Anaconda3\lib\site-packages\prophet\forecaster.py in predict(self, df) 1202 else: 1203 if df.shape[0] == 0: -> 1204 raise ValueError('Dataframe has no rows.') 1205 df = self.setup_dataframe(df.copy()) 1206 ValueError: Dataframe has no rows. My dataframe looks like this y ds 0 -1.0 2017-01-01 978 2.0 2017-02-01 1582 0.0 2017-03-01 2237 0.0 2017-04-01 2902 0.0 2017-05-01 I have created my model and applied the fit and these are my cutoffs cutoffs = pd.date_range(start='2021-01-01', end='2022-06-01', freq='1MS') resulting: DatetimeIndex(['2021-01-01', '2021-02-01', '2021-03-01', '2021-04-01', '2021-05-01', '2021-06-01', '2021-07-01', '2021-08-01', '2021-09-01', '2021-10-01', '2021-11-01', '2021-12-01', '2022-01-01', '2022-02-01', '2022-03-01', '2022-04-01', '2022-05-01', '2022-06-01'], dtype='datetime64[ns]', freq='MS')
[ "I faced the same problem and I coped with it efficiently setting horizon = '31 days' (i.e., the maximum number that could elapse between two months).\nThis is due to how index_predicted (used here: yhat = m.predict(df[index_predicted][columns])), is set: index_predicted = (df['ds'] > cutoff) & (df['ds'] <= cutoff + horizon), where cutoff is one of your cutoffs.\nIt doesn't work with your arguments because, e.g., cutoff (2021-01-01) + horizon (30 days) = 2021-01-31 and in your df there isn't a ds that is greater than 2021-01-01 and less than or equal to 2021-01-31.\n" ]
[ 0 ]
[]
[]
[ "facebook_prophet", "forecasting", "pandas", "python", "time_series" ]
stackoverflow_0073656137_facebook_prophet_forecasting_pandas_python_time_series.txt
Q: Resize image by 50% using the least amount of lines I have the following code that resizes the image by a number hardcoded I would like it to resize using the following formula - image_size / 2 f = r'C:\Users\elazar\bucket\PHOTO' for file in os.listdir(f): f_img = f+"/"+file img = Image.open(f_img).resize((540,540)).save(f_img) Is it possible to shorten this code to fewer lines and instead of using something like 540,540 be able to cut the original size (divide) by 2 I've tried following some other formula that I couldn't fully understand here Open CV Documentation A: ".size" gives the width and height of a picture as a tuple. You can replace the code below with your 4th line. Image.open(f_img).resize((int(Image.open(f_img).size[0] / 2), int(Image.open(f_img).size[1] / 2))).save(f_img) However, this one line code is much more inefficient than the code below. It opens the image 3 times instead of one time. image = Image.open(f_img) image.resize((int(image.size[0] / 2), int(image.size[1] / 2))).save(f_img)
Resize image by 50% using the least amount of lines
I have the following code that resizes the image by a number hardcoded I would like it to resize using the following formula - image_size / 2 f = r'C:\Users\elazar\bucket\PHOTO' for file in os.listdir(f): f_img = f+"/"+file img = Image.open(f_img).resize((540,540)).save(f_img) Is it possible to shorten this code to fewer lines and instead of using something like 540,540 be able to cut the original size (divide) by 2 I've tried following some other formula that I couldn't fully understand here Open CV Documentation
[ "\".size\" gives the width and height of a picture as a tuple. You can replace the code below with your 4th line.\nImage.open(f_img).resize((int(Image.open(f_img).size[0] / 2), int(Image.open(f_img).size[1] / 2))).save(f_img)\n\nHowever, this one line code is much more inefficient than the code below. It opens the image 3 times instead of one time.\nimage = Image.open(f_img)\nimage.resize((int(image.size[0] / 2), int(image.size[1] / 2))).save(f_img)\n\n" ]
[ 3 ]
[]
[]
[ "python" ]
stackoverflow_0074589508_python.txt
Q: Cannot catch requests.exceptions.ConnectionError with try except It feels like I am slowly losing my sanity. I am unable to catch a connection error in a REST-API request. I read at least 20 similar questions on stackoverflow, tried every possible except statement I could think of and simplified the code as much as I could to rule out certain other libraries. I am using Python 3.7 and requests 2.25.1. It is a very basic call to an API on my own server, which sometimes fails, but it only fails once in a while: try: response = requests.get(url, headers=api_headers, auth=HTTPBasicAuth(username, password)) except requests.exceptions.ConnectionError: print("Connection error!") I am sorry I cannot supply a full working example, as I am not connecting to an publicly accessible API, so I had to remove url, username and password. Even though I try to catch the connection error, the script fails with following traceback: Traceback (most recent call last): File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 706, in urlopen chunked=chunked, File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 382, in _make_request self._validate_conn(conn) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 1010, in _validate_conn conn.connect() File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connection.py", line 421, in connect tls_in_tls=tls_in_tls, File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\ssl_.py", line 429, in ssl_wrap_socket sock, context, tls_in_tls, server_hostname=server_hostname File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\ssl_.py", line 472, in _ssl_wrap_socket_impl return ssl_context.wrap_socket(sock, server_hostname=server_hostname) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 412, in wrap_socket session=session File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 850, in _create self.do_handshake() File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1108, in do_handshake self._sslobj.do_handshake() TimeoutError: [WinError 10060] Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\adapters.py", line 449, in send timeout=timeout File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 756, in urlopen method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\retry.py", line 532, in increment raise six.reraise(type(error), error, _stacktrace) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\packages\six.py", line 734, in reraise raise value.with_traceback(tb) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 706, in urlopen chunked=chunked, File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 382, in _make_request self._validate_conn(conn) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 1010, in _validate_conn conn.connect() File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connection.py", line 421, in connect tls_in_tls=tls_in_tls, File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\ssl_.py", line 429, in ssl_wrap_socket sock, context, tls_in_tls, server_hostname=server_hostname File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\ssl_.py", line 472, in _ssl_wrap_socket_impl return ssl_context.wrap_socket(sock, server_hostname=server_hostname) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 412, in wrap_socket session=session File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 850, in _create self.do_handshake() File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1108, in do_handshake self._sslobj.do_handshake() urllib3.exceptions.ProtocolError: ('Connection aborted.', TimeoutError(10060, 'Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat', None, 10060, None)) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\api.py", line 61, in request return session.request(method=method, url=url, **kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\sessions.py", line 542, in request resp = self.send(prep, **send_kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\sessions.py", line 655, in send r = adapter.send(request, **kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\adapters.py", line 498, in send raise ConnectionError(err, request=request) requests.exceptions.ConnectionError: ('Connection aborted.', TimeoutError(10060, 'Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat', None, 10060, None)) I don't understand how it is possible for the script to fail with requests.exceptions.ConnectionError if I am catching that very error? If I understand that traceback correctly, the error is not thrown in my code, and therefore I am not able to catch it? All I see is python libraries like ssl.py and urllib and request, but not a line from my code. So how do I catch that? Any help is highly appreciated! EDIT (because this is not possible in a comment). @Thomas made a helpful comment to connect to httpstat.us:81 to debug. So I tried replacing my order_response = requests.get() call with response = requests.get("http://httpstat.us:81"). This is the exact block in my code: try: order_response = requests.get(order_access_url, headers=api_headers, auth=HTTPBasicAuth(username, password)) if order_response.status_code == 200: order_content = json.loads(order_response.text) else: order_content = "" except requests.exceptions.ConnectionError: print("Connection error!") If I am trying to connect to http://httpstat.us:81 it actually catches the error. If I intentionally not catch it, the error looks like it: Traceback (most recent call last): File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connection.py", line 170, in _new_conn (self._dns_host, self.port), self.timeout, **extra_kw File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\connection.py", line 96, in create_connection raise err File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\connection.py", line 86, in create_connection sock.connect(sa) TimeoutError: [WinError 10060] Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 706, in urlopen chunked=chunked, File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 394, in _make_request conn.request(method, url, **httplib_request_kw) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connection.py", line 234, in request super(HTTPConnection, self).request(method, url, body=body, headers=headers) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1229, in request self._send_request(method, url, body, headers, encode_chunked) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1275, in _send_request self.endheaders(body, encode_chunked=encode_chunked) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1224, in endheaders self._send_output(message_body, encode_chunked=encode_chunked) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1016, in _send_output self.send(msg) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 956, in send self.connect() File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connection.py", line 200, in connect conn = self._new_conn() File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connection.py", line 182, in _new_conn self, "Failed to establish a new connection: %s" % e urllib3.exceptions.NewConnectionError: <urllib3.connection.HTTPConnection object at 0x00000223F9B42860>: Failed to establish a new connection: [WinError 10060] Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\adapters.py", line 449, in send timeout=timeout File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 756, in urlopen method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\retry.py", line 574, in increment raise MaxRetryError(_pool, url, error or ResponseError(cause)) urllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='httpstat.us', port=81): Max retries exceeded with url: / (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x00000223F9B42860>: Failed to establish a new connection: [WinError 10060] Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat')) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Daten\cloud.bss-archery.com\BSS\_Twain\modules\order_extracts_api.py", line 50, in create_order_analysis response = requests.get("http://httpstat.us:81") File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\api.py", line 76, in get return request('get', url, params=params, **kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\api.py", line 61, in request return session.request(method=method, url=url, **kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\sessions.py", line 542, in request resp = self.send(prep, **send_kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\sessions.py", line 655, in send r = adapter.send(request, **kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\adapters.py", line 516, in send raise ConnectionError(e, request=request) requests.exceptions.ConnectionError: HTTPConnectionPool(host='httpstat.us', port=81): Max retries exceeded with url: / (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x00000223F9B42860>: Failed to establish a new connection: [WinError 10060] Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat')) So I am still very confused because the last entry in the traceback is in fact the same, requests.exceptions.ConnectionError but it is not caught in my real world application. It is, however, raised by a different line in \lib\site-packages\requests\adapters.py A: Okay, I could figure it out myself. Kind of. A huge problem was that the traceback doesn't point to the line of my code where the exception is raised. I still don't know why that is and if this should be considered a bug in requests or not. But in any case: requests raises a ConnectionError in adapters.py but the origin is a protocol or socket error. This is line 497 in adapters.py: except (ProtocolError, socket.error) as err: raise ConnectionError(err, request=request) The TimeoutError: [WinError 10060] in the traceback actually points to a socket error. From https://hstechdocs.helpsystems.com/manuals/globalscape/archive/cuteftp8/Socket_errors_10060_10061_10064_10065.htm: A socket error in the 10060 range is a Winsock error. It is generally caused by either outgoing connection problems or connection problems on the host end. That is why I wasn't able to reproduce the error with httpstat.us. The solution was to catch it as an OSError: try: response = requests.get(url, headers=api_headers, auth=HTTPBasicAuth(username, password)) except OSError as e: print(e) It's a bit frustrating to be honest, as I still don't know why ProtocolError or socket.error in requests that raises a ConnectionError needs to be caught with "OSError" but at this point, I am just glad I could find ANY solution.
Cannot catch requests.exceptions.ConnectionError with try except
It feels like I am slowly losing my sanity. I am unable to catch a connection error in a REST-API request. I read at least 20 similar questions on stackoverflow, tried every possible except statement I could think of and simplified the code as much as I could to rule out certain other libraries. I am using Python 3.7 and requests 2.25.1. It is a very basic call to an API on my own server, which sometimes fails, but it only fails once in a while: try: response = requests.get(url, headers=api_headers, auth=HTTPBasicAuth(username, password)) except requests.exceptions.ConnectionError: print("Connection error!") I am sorry I cannot supply a full working example, as I am not connecting to an publicly accessible API, so I had to remove url, username and password. Even though I try to catch the connection error, the script fails with following traceback: Traceback (most recent call last): File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 706, in urlopen chunked=chunked, File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 382, in _make_request self._validate_conn(conn) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 1010, in _validate_conn conn.connect() File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connection.py", line 421, in connect tls_in_tls=tls_in_tls, File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\ssl_.py", line 429, in ssl_wrap_socket sock, context, tls_in_tls, server_hostname=server_hostname File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\ssl_.py", line 472, in _ssl_wrap_socket_impl return ssl_context.wrap_socket(sock, server_hostname=server_hostname) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 412, in wrap_socket session=session File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 850, in _create self.do_handshake() File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1108, in do_handshake self._sslobj.do_handshake() TimeoutError: [WinError 10060] Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\adapters.py", line 449, in send timeout=timeout File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 756, in urlopen method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\retry.py", line 532, in increment raise six.reraise(type(error), error, _stacktrace) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\packages\six.py", line 734, in reraise raise value.with_traceback(tb) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 706, in urlopen chunked=chunked, File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 382, in _make_request self._validate_conn(conn) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 1010, in _validate_conn conn.connect() File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connection.py", line 421, in connect tls_in_tls=tls_in_tls, File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\ssl_.py", line 429, in ssl_wrap_socket sock, context, tls_in_tls, server_hostname=server_hostname File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\ssl_.py", line 472, in _ssl_wrap_socket_impl return ssl_context.wrap_socket(sock, server_hostname=server_hostname) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 412, in wrap_socket session=session File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 850, in _create self.do_handshake() File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1108, in do_handshake self._sslobj.do_handshake() urllib3.exceptions.ProtocolError: ('Connection aborted.', TimeoutError(10060, 'Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat', None, 10060, None)) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\api.py", line 61, in request return session.request(method=method, url=url, **kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\sessions.py", line 542, in request resp = self.send(prep, **send_kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\sessions.py", line 655, in send r = adapter.send(request, **kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\adapters.py", line 498, in send raise ConnectionError(err, request=request) requests.exceptions.ConnectionError: ('Connection aborted.', TimeoutError(10060, 'Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat', None, 10060, None)) I don't understand how it is possible for the script to fail with requests.exceptions.ConnectionError if I am catching that very error? If I understand that traceback correctly, the error is not thrown in my code, and therefore I am not able to catch it? All I see is python libraries like ssl.py and urllib and request, but not a line from my code. So how do I catch that? Any help is highly appreciated! EDIT (because this is not possible in a comment). @Thomas made a helpful comment to connect to httpstat.us:81 to debug. So I tried replacing my order_response = requests.get() call with response = requests.get("http://httpstat.us:81"). This is the exact block in my code: try: order_response = requests.get(order_access_url, headers=api_headers, auth=HTTPBasicAuth(username, password)) if order_response.status_code == 200: order_content = json.loads(order_response.text) else: order_content = "" except requests.exceptions.ConnectionError: print("Connection error!") If I am trying to connect to http://httpstat.us:81 it actually catches the error. If I intentionally not catch it, the error looks like it: Traceback (most recent call last): File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connection.py", line 170, in _new_conn (self._dns_host, self.port), self.timeout, **extra_kw File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\connection.py", line 96, in create_connection raise err File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\connection.py", line 86, in create_connection sock.connect(sa) TimeoutError: [WinError 10060] Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 706, in urlopen chunked=chunked, File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 394, in _make_request conn.request(method, url, **httplib_request_kw) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connection.py", line 234, in request super(HTTPConnection, self).request(method, url, body=body, headers=headers) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1229, in request self._send_request(method, url, body, headers, encode_chunked) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1275, in _send_request self.endheaders(body, encode_chunked=encode_chunked) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1224, in endheaders self._send_output(message_body, encode_chunked=encode_chunked) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1016, in _send_output self.send(msg) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 956, in send self.connect() File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connection.py", line 200, in connect conn = self._new_conn() File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connection.py", line 182, in _new_conn self, "Failed to establish a new connection: %s" % e urllib3.exceptions.NewConnectionError: <urllib3.connection.HTTPConnection object at 0x00000223F9B42860>: Failed to establish a new connection: [WinError 10060] Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\adapters.py", line 449, in send timeout=timeout File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 756, in urlopen method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\util\retry.py", line 574, in increment raise MaxRetryError(_pool, url, error or ResponseError(cause)) urllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='httpstat.us', port=81): Max retries exceeded with url: / (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x00000223F9B42860>: Failed to establish a new connection: [WinError 10060] Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat')) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Daten\cloud.bss-archery.com\BSS\_Twain\modules\order_extracts_api.py", line 50, in create_order_analysis response = requests.get("http://httpstat.us:81") File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\api.py", line 76, in get return request('get', url, params=params, **kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\api.py", line 61, in request return session.request(method=method, url=url, **kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\sessions.py", line 542, in request resp = self.send(prep, **send_kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\sessions.py", line 655, in send r = adapter.send(request, **kwargs) File "C:\Users\Tilman\AppData\Local\Programs\Python\Python37\lib\site-packages\requests\adapters.py", line 516, in send raise ConnectionError(e, request=request) requests.exceptions.ConnectionError: HTTPConnectionPool(host='httpstat.us', port=81): Max retries exceeded with url: / (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x00000223F9B42860>: Failed to establish a new connection: [WinError 10060] Ein Verbindungsversuch ist fehlgeschlagen, da die Gegenstelle nach einer bestimmten Zeitspanne nicht richtig reagiert hat, oder die hergestellte Verbindung war fehlerhaft, da der verbundene Host nicht reagiert hat')) So I am still very confused because the last entry in the traceback is in fact the same, requests.exceptions.ConnectionError but it is not caught in my real world application. It is, however, raised by a different line in \lib\site-packages\requests\adapters.py
[ "Okay, I could figure it out myself. Kind of.\nA huge problem was that the traceback doesn't point to the line of my code where the exception is raised. I still don't know why that is and if this should be considered a bug in requests or not. But in any case: requests raises a ConnectionError in adapters.py but the origin is a protocol or socket error. This is line 497 in adapters.py:\nexcept (ProtocolError, socket.error) as err:\n raise ConnectionError(err, request=request)\n\nThe TimeoutError: [WinError 10060] in the traceback actually points to a socket error.\nFrom https://hstechdocs.helpsystems.com/manuals/globalscape/archive/cuteftp8/Socket_errors_10060_10061_10064_10065.htm:\n\nA socket error in the 10060 range is a Winsock error. It is generally caused by either outgoing connection problems or connection problems on the host end.\n\nThat is why I wasn't able to reproduce the error with httpstat.us.\nThe solution was to catch it as an OSError:\ntry:\n response = requests.get(url, headers=api_headers, auth=HTTPBasicAuth(username, password))\nexcept OSError as e:\n print(e)\n\nIt's a bit frustrating to be honest, as I still don't know why ProtocolError or socket.error in requests that raises a ConnectionError needs to be caught with \"OSError\" but at this point, I am just glad I could find ANY solution.\n" ]
[ 0 ]
[]
[]
[ "python", "python_3.x", "python_requests", "try_except" ]
stackoverflow_0074253820_python_python_3.x_python_requests_try_except.txt
Q: Temporarily change cursor using Python I am writing a script that intercepts a touchpad output and send to the windows after some processing. So there is no GUI involved. I want to change the cursor temporarily when certain cursor behavior occurs. I have searched the web to the best of my abilities and found very few posts that talked about using win32api.SetCursor() but this does not work at all. Most of the posts talks about changing cursor using Tkinter or wxPython. Is there any other solution to change the cursor system wide? A: Using the code below the cursor is changed system-wide though I have to restored to the arrow cursor below quitting the program. If there are other better ways I would appreciate your response. from ctypes import * import win32con SetSystemCursor = windll.user32.SetSystemCursor #reference to function SetSystemCursor.restype = c_int #return SetSystemCursor.argtype = [c_int, c_int] #arguments LoadCursorFromFile = windll.user32.LoadCursorFromFileA #reference to function LoadCursorFromFile.restype = c_int #return LoadCursorFromFile.argtype = c_char_p #arguments CursorPath = "../cursor/MyCross.cur" NewCursor = LoadCursorFromFile(CursorPath) if NewCursor is None: print "Error loading the cursor" elif SetSystemCursor(NewCursor, win32con.IDC_ARROW) == 0: print "Error in setting the cursor" A: import win32con import win32api import win32gui import ctypes import time import atexit cursor = win32gui.LoadImage(0, 32512, win32con.IMAGE_CURSOR, 0, 0, win32con.LR_SHARED) save_system_cursor = ctypes.windll.user32.CopyImage(cursor, win32con.IMAGE_CURSOR, 0, 0, win32con.LR_COPYFROMRESOURCE) cursor = win32gui.LoadImage(0, "file.cur", win32con.IMAGE_CURSOR, 0, 0, win32con.LR_LOADFROMFILE); ctypes.windll.user32.SetSystemCursor(cursor, 32512) ctypes.windll.user32.DestroyCursor(cursor); I hope this helps
Temporarily change cursor using Python
I am writing a script that intercepts a touchpad output and send to the windows after some processing. So there is no GUI involved. I want to change the cursor temporarily when certain cursor behavior occurs. I have searched the web to the best of my abilities and found very few posts that talked about using win32api.SetCursor() but this does not work at all. Most of the posts talks about changing cursor using Tkinter or wxPython. Is there any other solution to change the cursor system wide?
[ "Using the code below the cursor is changed system-wide though I have to restored to the arrow cursor below quitting the program. If there are other better ways I would appreciate your response.\nfrom ctypes import *\nimport win32con\n\nSetSystemCursor = windll.user32.SetSystemCursor #reference to function\nSetSystemCursor.restype = c_int #return\nSetSystemCursor.argtype = [c_int, c_int] #arguments\n\nLoadCursorFromFile = windll.user32.LoadCursorFromFileA #reference to function\nLoadCursorFromFile.restype = c_int #return\nLoadCursorFromFile.argtype = c_char_p #arguments\n\nCursorPath = \"../cursor/MyCross.cur\"\n\nNewCursor = LoadCursorFromFile(CursorPath)\n\nif NewCursor is None:\n print \"Error loading the cursor\"\nelif SetSystemCursor(NewCursor, win32con.IDC_ARROW) == 0:\n print \"Error in setting the cursor\"\n\n", "import win32con\nimport win32api\nimport win32gui\nimport ctypes\nimport time\nimport atexit\n\n\ncursor = win32gui.LoadImage(0, 32512, win32con.IMAGE_CURSOR, \n 0, 0, win32con.LR_SHARED)\nsave_system_cursor = ctypes.windll.user32.CopyImage(cursor, win32con.IMAGE_CURSOR, \n 0, 0, win32con.LR_COPYFROMRESOURCE)\n\n\n\ncursor = win32gui.LoadImage(0, \"file.cur\", win32con.IMAGE_CURSOR, \n 0, 0, win32con.LR_LOADFROMFILE);\nctypes.windll.user32.SetSystemCursor(cursor, 32512)\nctypes.windll.user32.DestroyCursor(cursor);\n\nI hope this helps\n" ]
[ 1, 0 ]
[]
[]
[ "cursor", "python", "winapi" ]
stackoverflow_0007921307_cursor_python_winapi.txt
Q: Space de-limited results, errors when Concatenating 2 columns the code I'm running gives results that are space de-liminated. This creates a problem with my sector column which gives a result of Communication Services. It creates 1 column for Communication and another column for Services where I need 1 column saying Communication Services. I have tried to concatentate the 2 columns into 1 but I'm getting attribute and str errors and don't know how to achieve this. Can anyone show how this can be done? Thanks Code import yfinance as yf import pandas as pd from concurrent.futures import ThreadPoolExecutor list_of_futures= [] def get_stats(ticker): info = yf.Tickers(ticker).tickers[ticker].info s= f"{ticker} {info['currentPrice']} {info['marketCap']} {info['sector']}" list_of_futures.append(s) ticker_list = ['AAPL', 'ORCL', 'GTBIF', 'META'] with ThreadPoolExecutor() as executor: executor.map(get_stats, ticker_list) ( pd.DataFrame(list_of_futures) [0].str.split(expand=True) .rename(columns={0: "Ticker", 1: "Price", 2: "Market Cap", 3: "Sector", 4: "Sector1"}) .to_excel("yahoo_futures.xlsx", index=False) ) Current Results Desired Results A: Let us reformulate the get_stats function to return dictionary instead string. This way you can avoid the unnecessary step to split the strings to create a dataframe def get_stats(ticker): info = yf.Tickers(ticker).tickers[ticker].info cols = ['currentPrice', 'marketCap', 'sector'] return {'ticker': ticker, **{c: info[c] for c in cols}} tickers = ['AAPL', 'ORCL', 'GTBIF', 'META'] with ThreadPoolExecutor() as executor: result_iter = executor.map(get_stats, tickers) df = pd.DataFrame(result_iter) Result ticker currentPrice marketCap sector 0 AAPL 148.11 2356148699136 Technology 1 ORCL 82.72 223027183616 Technology 2 GTBIF 13.25 3190864896 Healthcare 3 META 111.41 295409188864 Communication Services
Space de-limited results, errors when Concatenating 2 columns
the code I'm running gives results that are space de-liminated. This creates a problem with my sector column which gives a result of Communication Services. It creates 1 column for Communication and another column for Services where I need 1 column saying Communication Services. I have tried to concatentate the 2 columns into 1 but I'm getting attribute and str errors and don't know how to achieve this. Can anyone show how this can be done? Thanks Code import yfinance as yf import pandas as pd from concurrent.futures import ThreadPoolExecutor list_of_futures= [] def get_stats(ticker): info = yf.Tickers(ticker).tickers[ticker].info s= f"{ticker} {info['currentPrice']} {info['marketCap']} {info['sector']}" list_of_futures.append(s) ticker_list = ['AAPL', 'ORCL', 'GTBIF', 'META'] with ThreadPoolExecutor() as executor: executor.map(get_stats, ticker_list) ( pd.DataFrame(list_of_futures) [0].str.split(expand=True) .rename(columns={0: "Ticker", 1: "Price", 2: "Market Cap", 3: "Sector", 4: "Sector1"}) .to_excel("yahoo_futures.xlsx", index=False) ) Current Results Desired Results
[ "Let us reformulate the get_stats function to return dictionary instead string. This way you can avoid the unnecessary step to split the strings to create a dataframe\ndef get_stats(ticker):\n info = yf.Tickers(ticker).tickers[ticker].info\n cols = ['currentPrice', 'marketCap', 'sector']\n return {'ticker': ticker, **{c: info[c] for c in cols}}\n\ntickers = ['AAPL', 'ORCL', 'GTBIF', 'META']\n\nwith ThreadPoolExecutor() as executor:\n result_iter = executor.map(get_stats, tickers)\n\ndf = pd.DataFrame(result_iter)\n\nResult\n ticker currentPrice marketCap sector\n0 AAPL 148.11 2356148699136 Technology\n1 ORCL 82.72 223027183616 Technology\n2 GTBIF 13.25 3190864896 Healthcare\n3 META 111.41 295409188864 Communication Services\n\n" ]
[ 1 ]
[]
[]
[ "concatenation", "pandas", "python" ]
stackoverflow_0074589387_concatenation_pandas_python.txt
Q: train, test, validation splits in tfds.load so I am asked to implement the split function parameter: 80% train, 10% validation, and 10% test. And I do not understand how to do it here. Please help. Thanks. def plot_example(x_raw, y_raw): fig, axes = plt.subplots(3, 3) i = 0 for i in range(3): for j in range(3): imgplot = axes[i,j].imshow(x_raw[i*3 + j], cmap = 'bone') axes[i,j].set_title(y_raw[i*3 + j]) axes[i,j].get_yaxis().set_visible(False) axes[i,j].get_xaxis().set_visible(False) fig.set_size_inches(18.5, 10.5, forward=True) ## TODO: Implement the split function parameter: 80% train, 10% validation, and 10% test. (ds_train, ds_val, ds_test), ds_info = tfds.load("colorectal_histology", split=[], as_supervised=True, with_info=True) df = tfds.as_dataframe(ds_train.shuffle(1000).take(1000), ds_info) plot_example(df['image'], df['label']) print(ds_info) Please explain A: The tfds.load has the argument of split. You can use this argument to load the dataset in your desired format. If you want 80% train, 10% val, 10% test, you can simply do tfds.load( colorectal_histology, split=["train[20%:]", "train[0%:10%]", "train[10%:20%"], as_supervised=True, with_info=True) Here the 1st argument in split train[10%:] will return the 90% of dataset as training, train[0%:10%] will return the 10% dataset from training as validation, and train[10%:20%] will return the other 10 percent as testing set. Though you can use the complete testing set, but if you want a split as 80,10,10 from training, this is what you can do. Read more here
train, test, validation splits in tfds.load
so I am asked to implement the split function parameter: 80% train, 10% validation, and 10% test. And I do not understand how to do it here. Please help. Thanks. def plot_example(x_raw, y_raw): fig, axes = plt.subplots(3, 3) i = 0 for i in range(3): for j in range(3): imgplot = axes[i,j].imshow(x_raw[i*3 + j], cmap = 'bone') axes[i,j].set_title(y_raw[i*3 + j]) axes[i,j].get_yaxis().set_visible(False) axes[i,j].get_xaxis().set_visible(False) fig.set_size_inches(18.5, 10.5, forward=True) ## TODO: Implement the split function parameter: 80% train, 10% validation, and 10% test. (ds_train, ds_val, ds_test), ds_info = tfds.load("colorectal_histology", split=[], as_supervised=True, with_info=True) df = tfds.as_dataframe(ds_train.shuffle(1000).take(1000), ds_info) plot_example(df['image'], df['label']) print(ds_info) Please explain
[ "The tfds.load has the argument of split. You can use this argument to load the dataset in your desired format. If you want 80% train, 10% val, 10% test, you can simply do\ntfds.load(\n colorectal_histology,\n split=[\"train[20%:]\", \"train[0%:10%]\", \"train[10%:20%\"],\n as_supervised=True, \n with_info=True)\n\nHere the 1st argument in split train[10%:] will return the 90% of dataset as training, train[0%:10%] will return the 10% dataset from training as validation, and train[10%:20%] will return the other 10 percent as testing set. Though you can use the complete testing set, but if you want a split as 80,10,10 from training, this is what you can do.\nRead more here\n" ]
[ 0 ]
[]
[]
[ "machine_learning", "python", "scikit_learn", "sklearn_pandas" ]
stackoverflow_0074587287_machine_learning_python_scikit_learn_sklearn_pandas.txt
Q: How to use .env files and environment variables with Python and macOS So I am connecting to an RPC cloud-node and trying to get the latest block from the Ethereum blockchain, along with all the block details and have written some code in python using web3.py. I have the code ready and according to the official doc https://web3py.readthedocs.io/en/v5/troubleshooting.html, I am able to setup a virtual environment too. I only want to understand how to add environment variables and then revoke them in my code. As far as I understand I will have to import os and then create a file with .env and type username=xyz key=abc endpoint="example.com" Is that it? A: The easiest way to have environment variables on macOS is to use Bash shell environment files and source command. Virtualenv does this internally when you run the command source venv/bin/activate. Note that there is no standard on .env file format. Create an env file mac.env with the content: export USERNAME=xyz export KEY=abc export ENDPOINT="example.com" Then in your Bash shell you can import this file before running your Python application: source mac.env echo $USERNAME xyz Because the .env file is now loaded into the memory of your shell and exported, any Python application you run will automatically receive these environment variables. python myapplication.py Then if your Python code you can do: import os username = os.environ.get("USERNAME") if username is None: raise RuntimeError("USERNAME not set") else: print(f"Username is {username}") If you need to use different formats of env files, e.g. for Docker or JavaScript compatibility, there are tools called shdotenv and python-dotenv to deal with this.
How to use .env files and environment variables with Python and macOS
So I am connecting to an RPC cloud-node and trying to get the latest block from the Ethereum blockchain, along with all the block details and have written some code in python using web3.py. I have the code ready and according to the official doc https://web3py.readthedocs.io/en/v5/troubleshooting.html, I am able to setup a virtual environment too. I only want to understand how to add environment variables and then revoke them in my code. As far as I understand I will have to import os and then create a file with .env and type username=xyz key=abc endpoint="example.com" Is that it?
[ "The easiest way to have environment variables on macOS is to use Bash shell environment files and source command. Virtualenv does this internally when you run the command source venv/bin/activate.\nNote that there is no standard on .env file format.\nCreate an env file mac.env with the content:\nexport USERNAME=xyz\nexport KEY=abc\nexport ENDPOINT=\"example.com\"\n\nThen in your Bash shell you can import this file before running your Python application:\nsource mac.env\n\necho $USERNAME\n\nxyz\n\nBecause the .env file is now loaded into the memory of your shell and exported, any Python application you run will automatically receive these environment variables.\npython myapplication.py\n\nThen if your Python code you can do:\nimport os\n\nusername = os.environ.get(\"USERNAME\")\n\nif username is None:\n raise RuntimeError(\"USERNAME not set\")\nelse:\n print(f\"Username is {username}\")\n\n\nIf you need to use different formats of env files, e.g. for Docker or JavaScript compatibility, there are tools called shdotenv and python-dotenv to deal with this.\n" ]
[ 0 ]
[]
[]
[ "environment_variables", "ethereum", "python", "virtual_environment", "web3py" ]
stackoverflow_0074567136_environment_variables_ethereum_python_virtual_environment_web3py.txt
Q: How to make a variable based on the content of another variable I have a CSV with info in it, the start of it is the profile name and I want to store the row as a variable name thats the profile name value like so: def GetShipping(): file = "profiles.csv" with open(file) as f: heading = next(f) reader = csv.reader(f) for row in reader: profile_name = row[0] # The profile name is John John = [] # The profile list needs to be callable with the variable John first_name = row[1] John.append(first_name) last_name = row[2] John.append(last_name) title = row[3] John.append(title) return John A: It's hard to understand your question. But, below code will give you some idea. import csv from typing import Tuple, Dict def GetShipping(): file = "profiles.csv" with open(file) as f: heading = next(f) reader = csv.reader(f) profile_map: Dict[str, Tuple[str, str, str]] = dict() for row in reader: profile_name = row[0] # The profile name is Something profile_tuple = (row[1], row[2], row[3]) profile_map[profile_name] = profile_tuple return profile_map
How to make a variable based on the content of another variable
I have a CSV with info in it, the start of it is the profile name and I want to store the row as a variable name thats the profile name value like so: def GetShipping(): file = "profiles.csv" with open(file) as f: heading = next(f) reader = csv.reader(f) for row in reader: profile_name = row[0] # The profile name is John John = [] # The profile list needs to be callable with the variable John first_name = row[1] John.append(first_name) last_name = row[2] John.append(last_name) title = row[3] John.append(title) return John
[ "It's hard to understand your question.\nBut, below code will give you some idea.\nimport csv\nfrom typing import Tuple, Dict\n\n\ndef GetShipping():\n file = \"profiles.csv\"\n with open(file) as f:\n heading = next(f)\n reader = csv.reader(f)\n\n profile_map: Dict[str, Tuple[str, str, str]] = dict()\n for row in reader:\n profile_name = row[0] # The profile name is Something\n profile_tuple = (row[1], row[2], row[3])\n profile_map[profile_name] = profile_tuple\n\n return profile_map\n\n" ]
[ 0 ]
[]
[]
[ "csv", "python" ]
stackoverflow_0074589586_csv_python.txt
Q: How to include xml prolog to xml files using python 3? I want to include the XML prolog in my XML file... I tried the following - ET.tostring(root, encoding='utf8', method='xml') But it works only while printing and not for writing to file. I have a small code where I am changing an attribute and modifying the XML file. But I want to add the XML prolog also. Any idea how to ? import xml.etree.ElementTree as ET tree = ET.parse('xyz.xml') root = tree.getroot() root[0].text = 'abc' ET.tostring(root, encoding='utf8', method='xml') tree.write('xyz.xml') A: In order to add prolog to XML you need to pass additional parameter (xml_declaration) to tree.write() method as follows: tree.write('xyz.xml', xml_declaration=True) The last but one code line is redundant, so our method should take one more parameter: tree.write('xyz.xml', encoding='UTF-8', xml_declaration=True) The corrected code will be as follows: import xml.etree.ElementTree as ET tree = ET.parse('xyz.xml') root = tree.getroot() root[0].text = 'abc' tree.write('xyz.xml', encoding='UTF-8', xml_declaration=True) P.S. As enconding and xml_declaration are the second and the third optional named parameters, call of write method can be simplified: tree.write('xyz.xml', 'UTF-8', True) A: Using lxml.etree does it: import lxml.etree xml = lxml.etree.parse('xyz.xml') root = xml.getroot() root[0].text = 'abc' with open("xyz2.xml", 'wb') as f: f.write(lxml.etree.tostring(root, xml_declaration=True, encoding="utf-8")) print(open("xyz2.xml", 'r').read()) Output: <?xml version='1.0' encoding='utf-8'?> <note> <to>abc</to> <from>Jani</from> <heading>Reminder</heading> <body>Don't forget me this weekend!</body> </note>
How to include xml prolog to xml files using python 3?
I want to include the XML prolog in my XML file... I tried the following - ET.tostring(root, encoding='utf8', method='xml') But it works only while printing and not for writing to file. I have a small code where I am changing an attribute and modifying the XML file. But I want to add the XML prolog also. Any idea how to ? import xml.etree.ElementTree as ET tree = ET.parse('xyz.xml') root = tree.getroot() root[0].text = 'abc' ET.tostring(root, encoding='utf8', method='xml') tree.write('xyz.xml')
[ "In order to add prolog to XML you need to pass additional parameter (xml_declaration) to tree.write() method as follows:\ntree.write('xyz.xml', xml_declaration=True)\n\nThe last but one code line is redundant, so our method should take one more parameter:\ntree.write('xyz.xml', encoding='UTF-8', xml_declaration=True)\n\nThe corrected code will be as follows:\nimport xml.etree.ElementTree as ET\n\ntree = ET.parse('xyz.xml')\nroot = tree.getroot()\nroot[0].text = 'abc'\ntree.write('xyz.xml', encoding='UTF-8', xml_declaration=True)\n\nP.S. As enconding and xml_declaration are the second and the third optional named parameters, call of write method can be simplified:\ntree.write('xyz.xml', 'UTF-8', True)\n\n", "Using lxml.etree does it:\nimport lxml.etree\n\nxml = lxml.etree.parse('xyz.xml')\nroot = xml.getroot()\nroot[0].text = 'abc'\n\nwith open(\"xyz2.xml\", 'wb') as f:\n f.write(lxml.etree.tostring(root, xml_declaration=True, encoding=\"utf-8\"))\n\nprint(open(\"xyz2.xml\", 'r').read())\n\nOutput:\n<?xml version='1.0' encoding='utf-8'?>\n<note>\n <to>abc</to>\n <from>Jani</from>\n <heading>Reminder</heading>\n <body>Don't forget me this weekend!</body>\n</note>\n\n" ]
[ 1, 0 ]
[]
[]
[ "elementtree", "lxml", "python", "python_3.x", "xml" ]
stackoverflow_0061949852_elementtree_lxml_python_python_3.x_xml.txt
Q: Python .toml and .cfg nonsense? I am having a hard time configure proper cfg and toml files.... I just want to understand these, I am coming from a Java background, and nothing clear from my researches. Why namespaces are so bad ? Simple python project would take the first folder / package as a name... if I call it SQLAlchemy it will override the real SQLAlchemy on pip as a base name... What is the use of .cfg file ? If I have a bunch of packages it should be a .cfg in each, not a global one as it retains 'version' for only one package, and have a 'package dir search and find' option... .cfg is useless look, i have this project : ZeProject | | -- src | | - Whatever | | - Package1 | | | - somefiles.py | | - Package2 | | | - somefiles.py So this should be 2 modules right ? So why do I have only 1 cfg file for a version ? While I have a toml for the project version ? Neither the namespace is taken in consideration... Let's see, this is my .toml : [build-system] requires = ["setuptools"] build-backend = "setuptools.build_meta" [project] name = "stfulama" version = "4" and this is my .cfg : [metadata] version = 666 [options] package-dir = = src namespace_packages = wow [options.packages.find] where = youwant It installs perfectly with pip install, with the project name 'stfulama', and version 4... nothing of the .cfg is taken in consideration, from the stupid stuff I wrote, I can import in an other project : import from whatever.package1 or whatever.package2 Can anyone explain me the use of these ? And why there is no namespace as se ? And why python library (pip) can be override with a simple package name ? I am so confused A: Why namespaces are so bad ? Package namespacing is a community decision with advantages and drawbacks. The python community has largely fallen on the side of not doing it, in part for social reasons and in part for technological reasons: the language historically did not support "namespace packages" and they were not easy to implement, and as they were opt-in easy to screw up. So only specific multi-project organisations would use namespaces in order to put all their stuff together. PEP 420 improved on that, but the habit remains, and as does the idea of "useless namespaces". It's not like calling your package org.sqlalchemy prevents somebody else from calling their package org.sqlalchemy. The package registry might do so, but the same will happen with a bare sqlalchemy. What is the use of .cfg file ? setup.cfg is used to perform declarative configuration of setuptools. Literally the second hit on google yields https://setuptools.pypa.io/en/latest/userguide/declarative_config.html It predates pyproject.toml, and as a result there are redundancies between the two, mostly in that the [metadata] table of setup.cfg and the [project] table of pyproject.toml are both declarative means of expressing package metadata. So this should be 2 modules right ? It's one package. So why do I have only 1 cfg file for a version ? Because it's one package. It installs perfectly with pip install, with the project name 'stfulama', and version 4... nothing of the .cfg is taken in consideration Try removing the setup.cfg and see what happens. And why python library (pip) can be override with a simple package name ? I have no idea what that means. And pip is not "python library", whatever that means as well.
Python .toml and .cfg nonsense?
I am having a hard time configure proper cfg and toml files.... I just want to understand these, I am coming from a Java background, and nothing clear from my researches. Why namespaces are so bad ? Simple python project would take the first folder / package as a name... if I call it SQLAlchemy it will override the real SQLAlchemy on pip as a base name... What is the use of .cfg file ? If I have a bunch of packages it should be a .cfg in each, not a global one as it retains 'version' for only one package, and have a 'package dir search and find' option... .cfg is useless look, i have this project : ZeProject | | -- src | | - Whatever | | - Package1 | | | - somefiles.py | | - Package2 | | | - somefiles.py So this should be 2 modules right ? So why do I have only 1 cfg file for a version ? While I have a toml for the project version ? Neither the namespace is taken in consideration... Let's see, this is my .toml : [build-system] requires = ["setuptools"] build-backend = "setuptools.build_meta" [project] name = "stfulama" version = "4" and this is my .cfg : [metadata] version = 666 [options] package-dir = = src namespace_packages = wow [options.packages.find] where = youwant It installs perfectly with pip install, with the project name 'stfulama', and version 4... nothing of the .cfg is taken in consideration, from the stupid stuff I wrote, I can import in an other project : import from whatever.package1 or whatever.package2 Can anyone explain me the use of these ? And why there is no namespace as se ? And why python library (pip) can be override with a simple package name ? I am so confused
[ "\nWhy namespaces are so bad ?\n\nPackage namespacing is a community decision with advantages and drawbacks. The python community has largely fallen on the side of not doing it, in part for social reasons and in part for technological reasons: the language historically did not support \"namespace packages\" and they were not easy to implement, and as they were opt-in easy to screw up. So only specific multi-project organisations would use namespaces in order to put all their stuff together.\nPEP 420 improved on that, but the habit remains, and as does the idea of \"useless namespaces\".\nIt's not like calling your package org.sqlalchemy prevents somebody else from calling their package org.sqlalchemy. The package registry might do so, but the same will happen with a bare sqlalchemy.\n\nWhat is the use of .cfg file ?\n\nsetup.cfg is used to perform declarative configuration of setuptools. Literally the second hit on google yields https://setuptools.pypa.io/en/latest/userguide/declarative_config.html\nIt predates pyproject.toml, and as a result there are redundancies between the two, mostly in that the [metadata] table of setup.cfg and the [project] table of pyproject.toml are both declarative means of expressing package metadata.\n\nSo this should be 2 modules right ?\n\nIt's one package.\n\nSo why do I have only 1 cfg file for a version ?\n\nBecause it's one package.\n\nIt installs perfectly with pip install, with the project name 'stfulama', and version 4... nothing of the .cfg is taken in consideration\n\nTry removing the setup.cfg and see what happens.\n\nAnd why python library (pip) can be override with a simple package name ?\n\nI have no idea what that means. And pip is not \"python library\", whatever that means as well.\n" ]
[ 0 ]
[]
[]
[ "cfg", "package", "python", "toml" ]
stackoverflow_0074589685_cfg_package_python_toml.txt
Q: Problem with Python code to send data from Mac to Arduino thanks in advance for any help on this. I am writing some code to send data from a Mac to an Arduino board so that I can program a flash memory device. I have a Python program which negotiates a link to the arduino board and then should send 256 byte chunks of data read from a file to the arduino. Code running on the Arduino programs the memory device in 256 byte pages using an SPI link. Here's the Python code: import serial, time, sys try: dataFile = open(sys.argv[1], "rb") except IOError: sys.exit("file cannot be opened") arduino = serial.Serial('/dev/cu.usbmodem2101', 19200, timeout=1) time.sleep(1) # give the connection a second to settle arduino.write(("WAKEUP").encode('ascii')) if( arduino.readline() != ("ACK").encode('ascii') ): sys.exit("no initial ACK from programmer") print("received initial ACK") for block in range(1, 131073): # 256Mb is 131072 x 256B blocks blockData = dataFile.read(256) checksum = 256 - (sum(blockData) % 256) # checksum when added to summed data should result in 0 arduino.write(blockData) # send the data arduino.write(checksum) # send the checksum if( arduino.readline() != ("ACK").encode('ascii') ): # wait for the block to be processed sys.exit("Failed to complete data transfer") print("Block = " + str(block) + " sent succesfully" ) arduino.close() dataFile.close() Instead of sending the data the program just sends 0x00 over and over. If I modify the code to read a single byte of data from the file at a time and send the data one byte at a time it works fine. Please can anyone advise me what I'm doing wrong with the code shown above? A: I worked on this some more to fix some checksum-related bugs and the following code works: import serial, time, sys try: dataFile = open(sys.argv[1], "rb") except IOError: sys.exit("file cannot be opened") arduino = serial.Serial('/dev/cu.usbmodem2101', 115200, timeout=1) time.sleep(1) # give the connection a second to settle arduino.write(("WAKEUP").encode('ascii')) if( arduino.readline() != ("ACK").encode('ascii') ): sys.exit("no initial ACK from programmer") print("received initial ACK") for block in range(0, 16384): # 256Mb is 16384 x 2048B blocks blockData = dataFile.read(2048) checksum = (256 - (sum(blockData) % 256)) % 256 # checksum when added to summed data should result in 0 arduino.write((blockData)) # send data if( checksum == 0) : arduino.write(bytearray([0])) else : arduino.write(checksum.to_bytes((checksum.bit_length() + 7) // 8, 'big')) # send the checksum if( arduino.readline() != ("ACK").encode('ascii') ): # wait for the block to be processed sys.exit("Failed to complete data transfer") print("Block = " + str(block) + " sent succesfully" ) arduino.close() dataFile.close() Don't really understand why it suddenly decided to work.
Problem with Python code to send data from Mac to Arduino
thanks in advance for any help on this. I am writing some code to send data from a Mac to an Arduino board so that I can program a flash memory device. I have a Python program which negotiates a link to the arduino board and then should send 256 byte chunks of data read from a file to the arduino. Code running on the Arduino programs the memory device in 256 byte pages using an SPI link. Here's the Python code: import serial, time, sys try: dataFile = open(sys.argv[1], "rb") except IOError: sys.exit("file cannot be opened") arduino = serial.Serial('/dev/cu.usbmodem2101', 19200, timeout=1) time.sleep(1) # give the connection a second to settle arduino.write(("WAKEUP").encode('ascii')) if( arduino.readline() != ("ACK").encode('ascii') ): sys.exit("no initial ACK from programmer") print("received initial ACK") for block in range(1, 131073): # 256Mb is 131072 x 256B blocks blockData = dataFile.read(256) checksum = 256 - (sum(blockData) % 256) # checksum when added to summed data should result in 0 arduino.write(blockData) # send the data arduino.write(checksum) # send the checksum if( arduino.readline() != ("ACK").encode('ascii') ): # wait for the block to be processed sys.exit("Failed to complete data transfer") print("Block = " + str(block) + " sent succesfully" ) arduino.close() dataFile.close() Instead of sending the data the program just sends 0x00 over and over. If I modify the code to read a single byte of data from the file at a time and send the data one byte at a time it works fine. Please can anyone advise me what I'm doing wrong with the code shown above?
[ "I worked on this some more to fix some checksum-related bugs and the following code works:\n import serial, time, sys\n\ntry:\n dataFile = open(sys.argv[1], \"rb\")\nexcept IOError:\n sys.exit(\"file cannot be opened\") \narduino = serial.Serial('/dev/cu.usbmodem2101', 115200, timeout=1)\ntime.sleep(1) # give the connection a second to settle\narduino.write((\"WAKEUP\").encode('ascii'))\nif( arduino.readline() != (\"ACK\").encode('ascii') ):\n sys.exit(\"no initial ACK from programmer\") \nprint(\"received initial ACK\")\nfor block in range(0, 16384): # 256Mb is 16384 x 2048B blocks\n blockData = dataFile.read(2048)\n checksum = (256 - (sum(blockData) % 256)) % 256 # checksum when added to summed data should result in 0\n arduino.write((blockData)) # send data\n if( checksum == 0) :\n arduino.write(bytearray([0]))\n else :\n arduino.write(checksum.to_bytes((checksum.bit_length() + 7) // 8, 'big')) # send the checksum\n if( arduino.readline() != (\"ACK\").encode('ascii') ): # wait for the block to be processed\n sys.exit(\"Failed to complete data transfer\")\n print(\"Block = \" + str(block) + \" sent succesfully\" )\narduino.close()\ndataFile.close()\n\nDon't really understand why it suddenly decided to work.\n" ]
[ 0 ]
[]
[]
[ "bytestream", "pyserial", "python", "serial_port" ]
stackoverflow_0074577735_bytestream_pyserial_python_serial_port.txt
Q: How to print out variable name instead of value? If I have a list of variables and each variable is assigned to an equation, how can I print the variable itself from the list not the result of the equation For example: x = 1 + 1 y = 2 + 2 z = 3 + 3 list = [x, y, z] print(list[0]) print(list[1]) print(list[2]) Should print out: x y z Instead of: 2 4 6 A: print() giving you a values of list[0], or list[1] or list[2] You gave them values 2,4,6 at the start of your app x = 1 + 1 y = 2 + 2 z = 3 + 3 if you want to get x,y,z try this: x = "x" y = "y" z = "z" A: in that case, you will need to change your list to a string, otherwise it will think that it is a variable and print the value assigned to it. to fix this you change line 5 to : list = ["x","y","z"] -> this way it will print out the string, not the value of the variable x A: The answers here are obviously correct, but perhaps they miss the real point. I presume you want to be able to show/use both the names and the values, for instance print something like "the sum of x, y, z is 12". If this is the case you may want to work with Python's builtin namespaces, but the simplest thing is to use a dictionary where your names will be the keys and your values... well, the values: my_dict = {} my_dict['x'] = 1 + 1 my_dict['y'] = 2 + 2 my_dict['z'] = 3 + 3 ','.join(my_dict.keys()) #'x,y,z' sum(my_dict.values()) # 12 A: From your first question i didn't understand well what do you need I think this can work: def dna_content(seq): A = ["A", (seq.count("A") / len(seq)) * 100] T = ["T", (seq.count("T") / len(seq)) * 100] G = ["G", (seq.count("G") / len(seq)) * 100] C = ["C", (seq.count("C") / len(seq)) * 100] bases = [A, T, G, C] for i in bases: print(str(i[0]) + " content is: " + str(i[1])) And... i couldn't find how to acces name of variable through it's value UPD: try this: x = 1 + 1 y = 2 + 2 z = 3 + 3 list = ["x", "y", "z"] for i in list: print(i, globals()[i])
How to print out variable name instead of value?
If I have a list of variables and each variable is assigned to an equation, how can I print the variable itself from the list not the result of the equation For example: x = 1 + 1 y = 2 + 2 z = 3 + 3 list = [x, y, z] print(list[0]) print(list[1]) print(list[2]) Should print out: x y z Instead of: 2 4 6
[ "print() giving you a values of list[0], or list[1] or list[2]\nYou gave them values 2,4,6 at the start of your app\nx = 1 + 1\ny = 2 + 2\nz = 3 + 3\n\nif you want to get x,y,z try this:\nx = \"x\"\ny = \"y\"\nz = \"z\"\n\n", "in that case, you will need to change your list to a string, otherwise it will think that it is a variable and print the value assigned to it. to fix this you change line 5 to : list = [\"x\",\"y\",\"z\"] -> this way it will print out the string, not the value of the variable x\n", "The answers here are obviously correct, but perhaps they miss the real point. I presume you want to be able to show/use both the names and the values, for instance print something like \"the sum of x, y, z is 12\".\nIf this is the case you may want to work with Python's builtin namespaces, but the simplest thing is to use a dictionary where your names will be the keys and your values... well, the values:\nmy_dict = {}\nmy_dict['x'] = 1 + 1\nmy_dict['y'] = 2 + 2\nmy_dict['z'] = 3 + 3\n\n','.join(my_dict.keys()) #'x,y,z'\nsum(my_dict.values()) # 12\n\n", "From your first question i didn't understand well what do you need\nI think this can work:\ndef dna_content(seq):\n A = [\"A\", (seq.count(\"A\") / len(seq)) * 100]\n\n T = [\"T\", (seq.count(\"T\") / len(seq)) * 100]\n G = [\"G\", (seq.count(\"G\") / len(seq)) * 100]\n C = [\"C\", (seq.count(\"C\") / len(seq)) * 100]\n bases = [A, T, G, C]\n for i in bases:\n print(str(i[0]) + \" content is: \" + str(i[1]))\n\nAnd... i couldn't find how to acces name of variable through it's value\nUPD:\ntry this:\nx = 1 + 1\ny = 2 + 2\nz = 3 + 3\nlist = [\"x\", \"y\", \"z\"]\n\n\nfor i in list:\n print(i, globals()[i])\n\n" ]
[ 1, 0, 0, 0 ]
[]
[]
[ "debugging", "python" ]
stackoverflow_0074589346_debugging_python.txt
Q: python function questiton about listed many object Calculus exam results are announced and your result is above the announced average. You want to have an idea about the letter grade you will get, and to do this you ask everyone you know about their exam results. You want to calculate the median of the results, which will give you some more idea about the distribution of the grades so that you can take a better guess about your letter grade. Write a function named find_median that takes a list of integers as the distribution of the grades and returns an integer as the median of this distribution. If the list has an even length, the function must return the higher of the two middle elements. Hint: You can use the built-in function sorted() to get the grades in the increasing order. thank you for your helping in advance. A: You can test with doc test. def find_median(lists): """ >>> find_median([1]) 1 >>> find_median([3,1]) 3 >>> find_median([1,2,3]) 2 >>> find_median([1,6,3,5]) 5 >>> find_median([5,3,5]) 5 """ copied = lists[:] copied.sort() length = len(copied) if length % 2 == 0: return copied[int(length / 2)] else: return copied[int(length / 2)] A: First sort the list then get its length. Check if the length is even if yes return the highest number(median) as stated in the question else return the median. Solution def find_median(arr): arr.sort() size = len(arr) if size % 2 == 0: return arr[size // 2] return arr[(size - 1) // 2] Usage arr = [2, 1, 6, 5, 8] print(find_median(arr)) arr = [2, 1, 6, 4] print(find_median(arr)) Output 5 4
python function questiton about listed many object
Calculus exam results are announced and your result is above the announced average. You want to have an idea about the letter grade you will get, and to do this you ask everyone you know about their exam results. You want to calculate the median of the results, which will give you some more idea about the distribution of the grades so that you can take a better guess about your letter grade. Write a function named find_median that takes a list of integers as the distribution of the grades and returns an integer as the median of this distribution. If the list has an even length, the function must return the higher of the two middle elements. Hint: You can use the built-in function sorted() to get the grades in the increasing order. thank you for your helping in advance.
[ "You can test with doc test.\ndef find_median(lists):\n \"\"\"\n >>> find_median([1])\n 1\n >>> find_median([3,1])\n 3\n >>> find_median([1,2,3])\n 2\n >>> find_median([1,6,3,5])\n 5\n >>> find_median([5,3,5])\n 5\n \"\"\"\n copied = lists[:]\n copied.sort()\n length = len(copied)\n if length % 2 == 0:\n return copied[int(length / 2)]\n else:\n return copied[int(length / 2)]\n\n", "First sort the list then get its length. Check if the length is even if yes return the highest number(median) as stated in the question else return the median.\nSolution\ndef find_median(arr):\n arr.sort()\n size = len(arr)\n if size % 2 == 0:\n return arr[size // 2]\n return arr[(size - 1) // 2]\n\nUsage\narr = [2, 1, 6, 5, 8]\n\nprint(find_median(arr))\n\narr = [2, 1, 6, 4]\nprint(find_median(arr))\n\nOutput\n5\n4\n\n" ]
[ 0, 0 ]
[]
[]
[ "function", "list", "python" ]
stackoverflow_0074589728_function_list_python.txt
Q: Find how many times a string appears within elements of a list in Python? If I have a list of strings such as this: names = ["Alice", "Bob", "Charlie", "Darren"] How would I find how many of these strings contain the letter 'a'? I tried using the count function names.count("a") But this only output the amount of elements that were 'a' rather than contained 'a'. A: A list comprehension can be used to determine the no. of elements with letter 'a' in them. print(len([x for x in names if 'a' in x])) O/P: 2 A: We can use a loop: names = ["Alice", "Bob", "Charlie", "Darren"] count=0 for i in range(len(names)): if 'a' in list(names[i]): count+=1 print(count) Output: >>> 2 A: Generally, if you want to count how many elements in a list satisfy a condition, you have to iterate the list, that is, check the condition for every element and count successful tries: names = ["Alice", "Bob", "Charlie", "Darren"] count = 0 for name in names: if 'a' in name: count += 1 print(count) On a more advanced note, recall that boolean values (like a in b) are actually integers in Python and that there's a built-in sum function to which you can pass a generator expression. Combining these ideas, the solution is as simple as: print(sum('a' in name for name in names))
Find how many times a string appears within elements of a list in Python?
If I have a list of strings such as this: names = ["Alice", "Bob", "Charlie", "Darren"] How would I find how many of these strings contain the letter 'a'? I tried using the count function names.count("a") But this only output the amount of elements that were 'a' rather than contained 'a'.
[ "A list comprehension can be used to determine the no. of elements with letter 'a' in them.\nprint(len([x for x in names if 'a' in x]))\n\nO/P: 2\n", "We can use a loop:\nnames = [\"Alice\", \"Bob\", \"Charlie\", \"Darren\"]\n\ncount=0\n\nfor i in range(len(names)):\n if 'a' in list(names[i]):\n count+=1\n \nprint(count)\n\nOutput:\n>>> 2 \n\n", "Generally, if you want to count how many elements in a list satisfy a condition, you have to iterate the list, that is, check the condition for every element and count successful tries:\nnames = [\"Alice\", \"Bob\", \"Charlie\", \"Darren\"]\ncount = 0\n\nfor name in names:\n if 'a' in name:\n count += 1\n\nprint(count)\n\nOn a more advanced note, recall that boolean values (like a in b) are actually integers in Python and that there's a built-in sum function to which you can pass a generator expression. Combining these ideas, the solution is as simple as:\nprint(sum('a' in name for name in names))\n\n" ]
[ 1, 0, 0 ]
[]
[]
[ "list", "python" ]
stackoverflow_0074589773_list_python.txt
Q: Set a module's class method as an attribute of that module from outside that module The objective: I have a package with submodules that I would like to be accessible in the most straightforward way possible. The submodules contain classes to take advantage of the class structure, but don't need to be initialized (as they contain static and class methods). So, ideally, I would like to access them as follows: from myPackage.subModule import someMethod print (someMethod) from myPackage import subModule print (subModule.someMethod) import myPackage print(myPackage.subModule.someMethod) Here is the package structure: myPackage ─┐ __init__.py subModule subModule2 etc. Example of a typical submodule: # submodule.py class SomeClass(): someAttr = list(range(10)) @classmethod def someMethod(cls): pass @staticmethod def someMethod2(): pass Here is the code I have in my '__init __.py': In order to achieve the above; it attempts to set attributes for each class at the package level, and the same for it's methods at the sub-module level. # __init__.py def import_submodules(package, filetypes=('py', 'pyc', 'pyd'), ignoreStartingWith='_'): '''Import submodules to the given package, expose any classes at the package level and their respective class methods at submodule level. :Parameters: package (str)(obj) = A python package. filetypes (str)(tuple) = Filetype extension(s) to include. ignoreStartingWith (str)(tuple) = Ignore submodules starting with given chars. ''' if isinstance(package, str): package = sys.modules[package] if not package: return pkg_dir = os.path.dirname(os.path.abspath(package.__file__)) sys.path.append(pkg_dir) #append this dir to the system path. for mod_name in os.listdir(pkg_dir): if mod_name.startswith(ignoreStartingWith): continue elif os.path.isfile(os.path.join(pkg_dir, mod_name)): mod_name, *mod_ext = mod_name.rsplit('.', 1) if filetypes: if not mod_ext or mod_ext[0] not in filetypes: continue mod = importlib.import_module(mod_name) vars(package)[mod_name] = mod classes = inspect.getmembers(mod, inspect.isclass) for cls_name, clss in classes: vars(package)[cls_name] = clss methods = inspect.getmembers(clss, inspect.isfunction) for method_name, method in methods: vars(mod)[method_name] = method del mod_name import_submodules(__name__) At issue is this line: vars(mod)[method_name] = method Which ultimately results in: (indicating that the attribute was not set) from myPackage.subModule import someMethod ImportError: cannot import name 'someMethod' from 'myPackage.subModule' I am able to set the methods as attributes to the module within that module, but setting them from outside (ie. in the package __init __), isn't working as written. I understand this isn't ideal to begin with, but my current logic is; that the ease of use, outweighs any perceived issues with namespace pollution. I am, of course, always open to counter-arguments. A: I just checked it on my machine. Created a package myPackage with a module subModule that has a function someMethod. I run a python shell with working directory in the same directory that the myPackage is in, and to get these 3 import statements to work: from myPackage.subModule import someMethod from myPackage import subModule import myPackage All I had to do was to create an __init__.py with this line in it: from . import subModule A: Found a nice "hacky" solution - subModule.py: class myClass: @staticmethod def someMethod(): print("I have a bad feeling about this") myInstance = myClass() someMethod = myInstance.someMethod init.py is empty A: Still scratching my head of why I am unable to do this from the package __init __, but this solution works with the caveat it has to be called at the end of each submodule. Perhaps someone, in the future, someone can chime in as to why this wasn't working when completely contained in the __init __. def addMembers(module, ignoreStartingWith='_'): '''Expose class members at module level. :Parameters: module (str)(obj) = A python module. ignoreStartingWith (str)(tuple) = Ignore class members starting with given chars. ex. call: addMembers(__name__) ''' if isinstance(module, str): module = sys.modules[module] if not module: return classes = inspect.getmembers(module, inspect.isclass) for cls_name, clss in classes: cls_members = [(o, getattr(clss, o)) for o in dir(clss) if not o.startswith(ignoreStartingWith)] for name, mem in cls_members: vars(module)[name] = mem
Set a module's class method as an attribute of that module from outside that module
The objective: I have a package with submodules that I would like to be accessible in the most straightforward way possible. The submodules contain classes to take advantage of the class structure, but don't need to be initialized (as they contain static and class methods). So, ideally, I would like to access them as follows: from myPackage.subModule import someMethod print (someMethod) from myPackage import subModule print (subModule.someMethod) import myPackage print(myPackage.subModule.someMethod) Here is the package structure: myPackage ─┐ __init__.py subModule subModule2 etc. Example of a typical submodule: # submodule.py class SomeClass(): someAttr = list(range(10)) @classmethod def someMethod(cls): pass @staticmethod def someMethod2(): pass Here is the code I have in my '__init __.py': In order to achieve the above; it attempts to set attributes for each class at the package level, and the same for it's methods at the sub-module level. # __init__.py def import_submodules(package, filetypes=('py', 'pyc', 'pyd'), ignoreStartingWith='_'): '''Import submodules to the given package, expose any classes at the package level and their respective class methods at submodule level. :Parameters: package (str)(obj) = A python package. filetypes (str)(tuple) = Filetype extension(s) to include. ignoreStartingWith (str)(tuple) = Ignore submodules starting with given chars. ''' if isinstance(package, str): package = sys.modules[package] if not package: return pkg_dir = os.path.dirname(os.path.abspath(package.__file__)) sys.path.append(pkg_dir) #append this dir to the system path. for mod_name in os.listdir(pkg_dir): if mod_name.startswith(ignoreStartingWith): continue elif os.path.isfile(os.path.join(pkg_dir, mod_name)): mod_name, *mod_ext = mod_name.rsplit('.', 1) if filetypes: if not mod_ext or mod_ext[0] not in filetypes: continue mod = importlib.import_module(mod_name) vars(package)[mod_name] = mod classes = inspect.getmembers(mod, inspect.isclass) for cls_name, clss in classes: vars(package)[cls_name] = clss methods = inspect.getmembers(clss, inspect.isfunction) for method_name, method in methods: vars(mod)[method_name] = method del mod_name import_submodules(__name__) At issue is this line: vars(mod)[method_name] = method Which ultimately results in: (indicating that the attribute was not set) from myPackage.subModule import someMethod ImportError: cannot import name 'someMethod' from 'myPackage.subModule' I am able to set the methods as attributes to the module within that module, but setting them from outside (ie. in the package __init __), isn't working as written. I understand this isn't ideal to begin with, but my current logic is; that the ease of use, outweighs any perceived issues with namespace pollution. I am, of course, always open to counter-arguments.
[ "I just checked it on my machine.\nCreated a package myPackage with a module subModule that has a function someMethod.\nI run a python shell with working directory in the same directory that the myPackage is in, and to get these 3 import statements to work:\nfrom myPackage.subModule import someMethod\n\nfrom myPackage import subModule\n\nimport myPackage\n\nAll I had to do was to create an __init__.py with this line in it:\nfrom . import subModule\n\n", "Found a nice \"hacky\" solution -\nsubModule.py:\nclass myClass:\n\n @staticmethod\n def someMethod():\n print(\"I have a bad feeling about this\")\n\nmyInstance = myClass()\nsomeMethod = myInstance.someMethod\n\ninit.py is empty\n", "Still scratching my head of why I am unable to do this from the package __init __, but this solution works with the caveat it has to be called at the end of each submodule. Perhaps someone, in the future, someone can chime in as to why this wasn't working when completely contained in the __init __.\ndef addMembers(module, ignoreStartingWith='_'):\n '''Expose class members at module level.\n\n :Parameters:\n module (str)(obj) = A python module.\n ignoreStartingWith (str)(tuple) = Ignore class members starting with given chars.\n\n ex. call: addMembers(__name__)\n '''\n if isinstance(module, str):\n module = sys.modules[module]\n if not module:\n return\n\n classes = inspect.getmembers(module, inspect.isclass)\n\n for cls_name, clss in classes:\n cls_members = [(o, getattr(clss, o)) for o in dir(clss) if not o.startswith(ignoreStartingWith)]\n for name, mem in cls_members:\n vars(module)[name] = mem\n\n" ]
[ 0, 0, 0 ]
[]
[]
[ "python", "python_3.x" ]
stackoverflow_0074547307_python_python_3.x.txt
Q: Python Async Limit Concurrent coroutines per second My use case is the following : I’m using python 3.8 I have an async function analyse_doc that is a wrapper for a http request to a web service. I have approx 1000 docs to analyse as fast as possible. The service allows for 15 transaction per second (and not 15 concurrent request at any second). So first sec I can send 15, then 2nd sec I can send 15 again and so on. If I try to hit the service more than 15 times per sec I get 429 error msg or sometimes 503/504 error (server is busy…) My question is : is it possible to implement smt in python that effectively sends 15 requests per sec asynchronously then wait 1 sec then do it again until the queue is empty. Also some tasks might fail. Those failing tasks might need a rerun at some point. So far my code is the following (unbounded parallelism… not even a semaphore) but it handles retry. tasks = {asyncio.create_task(analyse_async(doc)): doc for doc in documents} pending = set(tasks) # Handle retry while pending: # backoff in case of 429 time.sleep(1) # concurrent call return_when all completed finished, pending = await asyncio.wait( pending, return_when=asyncio.ALL_COMPLETED ) # check if task has exception and register for new run. for task in finished: arg = tasks[task] if task.exception(): new_task = asyncio.create_task(analyze_doc(doc)) tasks[new_task] = doc pending.add(new_task) A: You could try adding another sleep tasks into the mix to drive the request generation. Something like this import asyncio import random ONE_SECOND = 1 CONCURRENT_TASK_LIMIT = 2 TASKS_TO_CREATE = 10 loop = asyncio.new_event_loop() work_todo = [] work_in_progress = [] # just creates arbitrary work to do def create_tasks(): for i in range(TASKS_TO_CREATE): work_todo.append(worker_task(i)) # muddle this up to see how drain works random.shuffle(work_todo) # represents the actual work async def worker_task(index): print(f"i am worker {index} and i am starting") await asyncio.sleep(index) print(f"i am worker {index} and i am done") # gets the next 'concurrent' workload segment (if there is one) def get_next_tasks(): todo = [] i = 0 while i < CONCURRENT_TASK_LIMIT and len(work_todo) > 0: todo.append(work_todo.pop()) i += 1 return todo # drains down any outstanding tasks and closes the loop async def are_we_done_yet(): print('draining') await asyncio.gather(*work_in_progress) loop.stop() # closes out the program print('done') # puts work on the queue every tick (1 second) async def work(): next_tasks = get_next_tasks() if len(next_tasks) > 0: print(f'found {len(next_tasks)} tasks to do') for task in next_tasks: # schedules the work, puts it in the in-progress pile work_in_progress.append(loop.create_task(task)) # this is the 'tick' or speed work gets scheduled on await asyncio.sleep(ONE_SECOND) # every 'tick' we add this tasks onto the loop again unless there isn't any more to do... loop.create_task(work()) else: # ... if there isn't any to do we just enter drain mode await are_we_done_yet() # bootstrap the process create_tasks() loop.create_task(work()) loop.run_forever() Updated version with a simulated exception import asyncio import random ONE_SECOND = 1 CONCURRENT_TASK_LIMIT = 2 TASKS_TO_CREATE = 10 loop = asyncio.new_event_loop() work_todo = [] work_in_progress = [] # just creates arbitrary work to do def create_tasks(): for i in range(TASKS_TO_CREATE): work_todo.append(worker_task(i)) # muddle this up to see how drain works random.shuffle(work_todo) # represents the actual work async def worker_task(index): try: print(f"i am worker {index} and i am starting") await asyncio.sleep(index) if index % 9 == 0: print('simulating error') raise NotImplementedError("some error happened") print(f"i am worker {index} and i am done") except: # put this work back on the pile (fudge the index so it doesn't throw this time) work_todo.append(worker_task(index + 1)) # gets the next 'concurrent' workload segment (if there is one) def get_next_tasks(): todo = [] i = 0 while i < CONCURRENT_TASK_LIMIT and len(work_todo) > 0: todo.append(work_todo.pop()) i += 1 return todo # drains down any outstanding tasks and closes the loop async def are_we_done_yet(): print('draining') await asyncio.gather(*work_in_progress) if (len(work_todo)) > 0: loop.create_task(work()) print('found some retries') else: loop.stop() # closes out the program print('done') # puts work on the queue every tick (1 second) async def work(): next_tasks = get_next_tasks() if len(next_tasks) > 0: print(f'found {len(next_tasks)} tasks to do') for task in next_tasks: # schedules the work, puts it in the in-progress pile work_in_progress.append(loop.create_task(task)) # this is the 'tick' or speed work gets scheduled on await asyncio.sleep(ONE_SECOND) # every 'tick' we add this tasks onto the loop again unless there isn't any more to do... loop.create_task(work()) else: # ... if there isn't any to do we just enter drain mode await are_we_done_yet() # bootstrap the process create_tasks() loop.create_task(work()) loop.run_forever() This just simulates something going wrong and re-queues the failed task. If the error happens after the main work method has finished it won't get re-queued so in the are-we-there-yet method it would need to check and rerun any failed tasks - this isn't particularly optimal as it'll wait to drain before checking everything else but gives you an idea of an implementation
Python Async Limit Concurrent coroutines per second
My use case is the following : I’m using python 3.8 I have an async function analyse_doc that is a wrapper for a http request to a web service. I have approx 1000 docs to analyse as fast as possible. The service allows for 15 transaction per second (and not 15 concurrent request at any second). So first sec I can send 15, then 2nd sec I can send 15 again and so on. If I try to hit the service more than 15 times per sec I get 429 error msg or sometimes 503/504 error (server is busy…) My question is : is it possible to implement smt in python that effectively sends 15 requests per sec asynchronously then wait 1 sec then do it again until the queue is empty. Also some tasks might fail. Those failing tasks might need a rerun at some point. So far my code is the following (unbounded parallelism… not even a semaphore) but it handles retry. tasks = {asyncio.create_task(analyse_async(doc)): doc for doc in documents} pending = set(tasks) # Handle retry while pending: # backoff in case of 429 time.sleep(1) # concurrent call return_when all completed finished, pending = await asyncio.wait( pending, return_when=asyncio.ALL_COMPLETED ) # check if task has exception and register for new run. for task in finished: arg = tasks[task] if task.exception(): new_task = asyncio.create_task(analyze_doc(doc)) tasks[new_task] = doc pending.add(new_task)
[ "You could try adding another sleep tasks into the mix to drive the request generation. Something like this\nimport asyncio\nimport random\n\nONE_SECOND = 1\nCONCURRENT_TASK_LIMIT = 2\nTASKS_TO_CREATE = 10\n\nloop = asyncio.new_event_loop()\n\nwork_todo = []\nwork_in_progress = []\n\n# just creates arbitrary work to do\ndef create_tasks():\n for i in range(TASKS_TO_CREATE):\n work_todo.append(worker_task(i))\n\n # muddle this up to see how drain works\n random.shuffle(work_todo)\n\n# represents the actual work\nasync def worker_task(index):\n print(f\"i am worker {index} and i am starting\")\n await asyncio.sleep(index)\n print(f\"i am worker {index} and i am done\")\n\n# gets the next 'concurrent' workload segment (if there is one)\ndef get_next_tasks():\n todo = []\n\n i = 0\n\n while i < CONCURRENT_TASK_LIMIT and len(work_todo) > 0:\n todo.append(work_todo.pop())\n i += 1\n\n return todo\n\n# drains down any outstanding tasks and closes the loop\nasync def are_we_done_yet():\n print('draining')\n \n await asyncio.gather(*work_in_progress)\n\n loop.stop()\n \n # closes out the program\n print('done')\n\n# puts work on the queue every tick (1 second)\nasync def work():\n next_tasks = get_next_tasks()\n if len(next_tasks) > 0:\n print(f'found {len(next_tasks)} tasks to do')\n for task in next_tasks:\n # schedules the work, puts it in the in-progress pile\n work_in_progress.append(loop.create_task(task))\n\n # this is the 'tick' or speed work gets scheduled on\n await asyncio.sleep(ONE_SECOND)\n \n # every 'tick' we add this tasks onto the loop again unless there isn't any more to do...\n loop.create_task(work())\n else:\n # ... if there isn't any to do we just enter drain mode\n await are_we_done_yet()\n\n# bootstrap the process\ncreate_tasks()\nloop.create_task(work())\nloop.run_forever()\n\n\n\nUpdated version with a simulated exception\nimport asyncio\nimport random\n\nONE_SECOND = 1\nCONCURRENT_TASK_LIMIT = 2\nTASKS_TO_CREATE = 10\n\nloop = asyncio.new_event_loop()\n\nwork_todo = []\nwork_in_progress = []\n\n# just creates arbitrary work to do\ndef create_tasks():\n for i in range(TASKS_TO_CREATE):\n work_todo.append(worker_task(i))\n\n # muddle this up to see how drain works\n random.shuffle(work_todo)\n\n# represents the actual work\nasync def worker_task(index):\n try:\n print(f\"i am worker {index} and i am starting\")\n await asyncio.sleep(index)\n\n if index % 9 == 0:\n print('simulating error')\n raise NotImplementedError(\"some error happened\")\n\n print(f\"i am worker {index} and i am done\")\n except:\n # put this work back on the pile (fudge the index so it doesn't throw this time)\n work_todo.append(worker_task(index + 1))\n \n\n# gets the next 'concurrent' workload segment (if there is one)\ndef get_next_tasks():\n todo = []\n\n i = 0\n\n while i < CONCURRENT_TASK_LIMIT and len(work_todo) > 0:\n todo.append(work_todo.pop())\n i += 1\n\n return todo\n\n# drains down any outstanding tasks and closes the loop\nasync def are_we_done_yet():\n print('draining')\n \n await asyncio.gather(*work_in_progress)\n\n if (len(work_todo)) > 0:\n loop.create_task(work())\n print('found some retries')\n else:\n loop.stop()\n # closes out the program\n print('done')\n \n \n\n# puts work on the queue every tick (1 second)\nasync def work():\n next_tasks = get_next_tasks()\n if len(next_tasks) > 0:\n print(f'found {len(next_tasks)} tasks to do')\n for task in next_tasks:\n # schedules the work, puts it in the in-progress pile\n work_in_progress.append(loop.create_task(task))\n\n # this is the 'tick' or speed work gets scheduled on\n await asyncio.sleep(ONE_SECOND)\n \n # every 'tick' we add this tasks onto the loop again unless there isn't any more to do...\n loop.create_task(work())\n else:\n # ... if there isn't any to do we just enter drain mode\n await are_we_done_yet()\n\n# bootstrap the process\ncreate_tasks()\nloop.create_task(work())\nloop.run_forever()\n\nThis just simulates something going wrong and re-queues the failed task. If the error happens after the main work method has finished it won't get re-queued so in the are-we-there-yet method it would need to check and rerun any failed tasks - this isn't particularly optimal as it'll wait to drain before checking everything else but gives you an idea of an implementation\n" ]
[ 1 ]
[]
[]
[ "asynchronous", "python", "python_3.x", "python_asyncio" ]
stackoverflow_0074585677_asynchronous_python_python_3.x_python_asyncio.txt
Q: How do I cross time series in pandas? Let's say I've got a dataframe with an integer index: pd.DataFrame([[4,5],[7,8],[9,10]],columns=['a','b']) a b 0 4 5 1 7 8 2 9 10 I'd like to create a matrix of ratios for each of a cross b, for each index, so I get a series of matrices of the form: a/a a/b b/a b/b for each index. Ultimately, I'll want to unfurl these into four columns. Is there an easy way? If there's an easy numpy solution doing this, that might be better. A: Easy way: pd.DataFrame({f'{x}/{y}': df[x] / df[y] for x in df for y in df}) Slightly complicated way (might be faster if you have large number of columns): a = df.values[None].T / df.values pd.DataFrame(np.hstack(a), columns=(f'{x}/{y}' for x in df for y in df)) Result a/a a/b b/a b/b 0 1.0 0.800 1.250000 1.0 1 1.0 0.875 1.142857 1.0 2 1.0 0.900 1.111111 1.0
How do I cross time series in pandas?
Let's say I've got a dataframe with an integer index: pd.DataFrame([[4,5],[7,8],[9,10]],columns=['a','b']) a b 0 4 5 1 7 8 2 9 10 I'd like to create a matrix of ratios for each of a cross b, for each index, so I get a series of matrices of the form: a/a a/b b/a b/b for each index. Ultimately, I'll want to unfurl these into four columns. Is there an easy way? If there's an easy numpy solution doing this, that might be better.
[ "Easy way:\npd.DataFrame({f'{x}/{y}': df[x] / df[y] for x in df for y in df})\n\nSlightly complicated way (might be faster if you have large number of columns):\na = df.values[None].T / df.values\npd.DataFrame(np.hstack(a), columns=(f'{x}/{y}' for x in df for y in df))\n\nResult\n a/a a/b b/a b/b\n0 1.0 0.800 1.250000 1.0\n1 1.0 0.875 1.142857 1.0\n2 1.0 0.900 1.111111 1.0\n\n" ]
[ 2 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074589778_pandas_python.txt
Q: Method that acts like a static method AND a regular method I was wondering if there is a way for a class to define a method that behaves like a static method (can be called without an instance variable) and a regular method (can be called with an instance variable). Im making an RSA module that would help me solve RSA problems, the initialization goes like this: class RSA: def __init__(self, n: int, e: int, c: int, p=None, q=None, phi=None): self.n = n self.e = e self.c = c self.p = p self.q = q assert p == None or gmpy2.is_prime(p), 'p must be prime' assert q == None or gmpy2.is_prime(q), 'q must be prime' self.phi = phi and in that class, there is a method that would factorize n into p and q which goes like this (the algorithm used is irrelevant so I wont bother explaining): def fermat_factorization(self, n=None): if n == None: n = self.n t_ = gmpy2.isqrt(n)+1 counter = 0 t = t_ + counter temp = gmpy2.isqrt((t * t) - n) while((temp * temp) != ((t * t) - n)): counter += 1 t = t_ + counter temp = gmpy2.isqrt((t * t) - n) s = temp p = t + s q = t - s return p, q that implementation does not work. What I wanted to do is for that method to be dynamic, i.e. can be called externally by simply p, q = RSA.fermat_factorization(n) # n is some large number yet can also be called on an instance like: s1 = RSA(n, 65537, c) # c and n is some large number p, q = s1.fermat_factorization() # without specifying n because it is already an instance attribute A: In python, you use modules for that kind of stuff, not classes: in rsa.py def fermat_factorization(n): """Ordinary function""" class RSA: def fermat_factorization(self): """Method""" return fermat_factorization(self.n) somewhere else: import rsa x = rsa.fermat_factorization(100) obj = rsa.RSA(...) y = obj.fermat_factorization() Having a single function that behaves some way or another depending on how it's called is a recipe for disaster. Don't do that.
Method that acts like a static method AND a regular method
I was wondering if there is a way for a class to define a method that behaves like a static method (can be called without an instance variable) and a regular method (can be called with an instance variable). Im making an RSA module that would help me solve RSA problems, the initialization goes like this: class RSA: def __init__(self, n: int, e: int, c: int, p=None, q=None, phi=None): self.n = n self.e = e self.c = c self.p = p self.q = q assert p == None or gmpy2.is_prime(p), 'p must be prime' assert q == None or gmpy2.is_prime(q), 'q must be prime' self.phi = phi and in that class, there is a method that would factorize n into p and q which goes like this (the algorithm used is irrelevant so I wont bother explaining): def fermat_factorization(self, n=None): if n == None: n = self.n t_ = gmpy2.isqrt(n)+1 counter = 0 t = t_ + counter temp = gmpy2.isqrt((t * t) - n) while((temp * temp) != ((t * t) - n)): counter += 1 t = t_ + counter temp = gmpy2.isqrt((t * t) - n) s = temp p = t + s q = t - s return p, q that implementation does not work. What I wanted to do is for that method to be dynamic, i.e. can be called externally by simply p, q = RSA.fermat_factorization(n) # n is some large number yet can also be called on an instance like: s1 = RSA(n, 65537, c) # c and n is some large number p, q = s1.fermat_factorization() # without specifying n because it is already an instance attribute
[ "In python, you use modules for that kind of stuff, not classes:\nin rsa.py\ndef fermat_factorization(n):\n \"\"\"Ordinary function\"\"\"\n\nclass RSA:\n def fermat_factorization(self):\n \"\"\"Method\"\"\"\n return fermat_factorization(self.n)\n\nsomewhere else:\n import rsa\n\n x = rsa.fermat_factorization(100)\n\n obj = rsa.RSA(...)\n y = obj.fermat_factorization()\n\nHaving a single function that behaves some way or another depending on how it's called is a recipe for disaster. Don't do that.\n" ]
[ 2 ]
[]
[]
[ "oop", "python", "python_3.x" ]
stackoverflow_0074589462_oop_python_python_3.x.txt
Q: Insertion sort python algorithm: Why do we subtract 1 from i? Here is the code: list_a = [3,2,5,7,4,1] def insertion_sort(list_a): indexing_length = range(1,len(list_a)) for i in indexing_length: value_to_sort = list_a[i] while list_a[i-1] > value_to_sort and i>0: list_a[i], list_a[i-1] = list_a[i-1], list_a[i] i = i - 1 return list_a I understand the logic to the rest of the algorithm but I can't seem to grasp the logic for doing i = i - 1. Can someone explain please? A: Hi and welcome to SO, range(a, b) in python is equivalent to [a, b[ in mathematics with a and b two floating numbers and a < b And range(b) is equivalent to [0, b[ in mathematics. A: in insertion sort, you select each value and go back ward to place in the corresponding place where it is smaller than right part and bigger than left part. probably this gif from wikimedia describes it well. if embedded gif not wokring look at the link : https://upload.wikimedia.org/wikipedia/commons/9/9c/Insertion-sort-example.gif for this reason you need the i = i -1 to go backwrd and place in the correct place. A: Consider an example: arr = [12, 11, 13, 5, 6] The array is virtually split into a sorted and an unsorted part. Values from the unsorted part are picked and placed at the correct position in the sorted part. First Pass: Starting with first two elements 12 11 13 5 6 Here, 12 is greater than 11 hence they are not in the ascending order and 12 is not at its correct position. Thus, swap 11 and 12. So, for now 11 is stored in a sorted sub-array. Second Pass: Now, move to the next two elements and compare them 11 12 13 5 6 Here, 13 is greater than 12, thus both elements seems to be in ascending order, hence, no swapping will occur. 12 also stored in a sorted sub-array along with 11. Third Pass: Now, Moving forward to the next two elements which are 13 and 5 11 12 13 5 6 Both 5 and 13 are not present at their correct place so swap them 11 12 5 13 6 After swapping, elements 12 and 5 are not sorted, thus swap again 11 5 12 13 6 Here, again 11 and 5 are not sorted, hence swap again 5 11 12 13 6 here, it is at its correct position Then we repeat same process in each pass. You can see from example when one pair of elements isn't in the correct order we keep swapping them from the index of current element backwards till they are in the correct order. that's why we set i = i - 1 in the algorithm code. A: Without,i=i-1 the adjacent elements are checked once. so, at the end [2,3,5,7,4,1] would become [2,3,5,4,1,7] . For this (without i = i-1) to work you need to implement double for loop like below and may replace while loop with if condition or leave as it is. def insertion_sort(list_a): indexing_length = range(1,len(list_a)) #this is double for-loop for _ in indexing_length: for i in indexing_length: # print(i) value_to_sort = list_a[i] if list_a[i-1] > value_to_sort and i>0: list_a[i], list_a[i-1] = list_a[i-1], list_a[i] print(list_a) return list_a Actually above method is easy to understand and taught everywhere where we check every two adjacent elements twice, But to reduce the number of comparisons checks we can use while loop with i = i-1.
Insertion sort python algorithm: Why do we subtract 1 from i?
Here is the code: list_a = [3,2,5,7,4,1] def insertion_sort(list_a): indexing_length = range(1,len(list_a)) for i in indexing_length: value_to_sort = list_a[i] while list_a[i-1] > value_to_sort and i>0: list_a[i], list_a[i-1] = list_a[i-1], list_a[i] i = i - 1 return list_a I understand the logic to the rest of the algorithm but I can't seem to grasp the logic for doing i = i - 1. Can someone explain please?
[ "Hi and welcome to SO,\nrange(a, b) in python is equivalent to [a, b[ in mathematics with a and b two floating numbers and a < b\nAnd range(b) is equivalent to [0, b[ in mathematics.\n", "in insertion sort, you select each value and go back ward to place in the corresponding place where it is smaller than right part and bigger than left part.\nprobably this gif from wikimedia describes it well. if embedded gif not wokring look at the link :\nhttps://upload.wikimedia.org/wikipedia/commons/9/9c/Insertion-sort-example.gif\n\nfor this reason you need the i = i -1 to go backwrd and place in the correct place.\n", "Consider an example: arr = [12, 11, 13, 5, 6]\nThe array is virtually split into a sorted and an unsorted part. Values from the unsorted part are picked and placed at the correct position in the sorted part.\nFirst Pass:\nStarting with first two elements\n 12 11 13 5 6 \n\nHere, 12 is greater than 11 hence they are not in the ascending order and 12 is not at its correct position. Thus, swap 11 and 12.\nSo, for now 11 is stored in a sorted sub-array.\nSecond Pass:\nNow, move to the next two elements and compare them\n 11 12 13 5 6 \n\nHere, 13 is greater than 12, thus both elements seems to be in ascending order, hence, no swapping will occur. 12 also stored in a sorted sub-array along with 11.\nThird Pass:\nNow, Moving forward to the next two elements which are 13 and 5\n 11 12 13 5 6 \n\nBoth 5 and 13 are not present at their correct place so swap them\n 11 12 5 13 6 \n\nAfter swapping, elements 12 and 5 are not sorted, thus swap again\n 11 5 12 13 6 \n\nHere, again 11 and 5 are not sorted, hence swap again\n 5 11 12 13 6 \n\nhere, it is at its correct position\nThen we repeat same process in each pass.\nYou can see from example when one pair of elements isn't in the correct order we keep swapping them from the index of current element backwards till they are in the correct order. that's why we set i = i - 1 in the algorithm code.\n", "Without,i=i-1 the adjacent elements are checked once. so, at the end [2,3,5,7,4,1] would become [2,3,5,4,1,7] . For this (without i = i-1) to work you need to implement double for loop like below and may replace while loop with if condition or leave as it is.\n\ndef insertion_sort(list_a):\n indexing_length = range(1,len(list_a))\n #this is double for-loop\n for _ in indexing_length:\n for i in indexing_length:\n # print(i)\n value_to_sort = list_a[i]\n if list_a[i-1] > value_to_sort and i>0:\n list_a[i], list_a[i-1] = list_a[i-1], list_a[i] \n print(list_a)\n \n return list_a\n\nActually above method is easy to understand and taught everywhere where we check every two adjacent elements twice,\nBut to reduce the number of comparisons checks we can use while loop with\ni = i-1.\n" ]
[ 0, 0, 0, 0 ]
[]
[]
[ "algorithm", "insertion_sort", "python" ]
stackoverflow_0074589512_algorithm_insertion_sort_python.txt
Q: django - DecimalField max_digits, decimal_places explained So I'm just starting out with Django and using it to store forex prices which are represented as 1.21242, 1.20641, etc... model.py from django.db import models # Create your models here. class ForexPrice(models.Model): openPrice = models.DecimalField(max_digits=6, decimal_places=6) highPrice = models.DecimalField(max_digits=6, decimal_places=6) lowPrice = models.DecimalField(max_digits=6, decimal_places=6) closePrice = models.DecimalField(max_digits=6, decimal_places=6) My Question Is: How do the max_decimal and decimal_places attributes work? I'm using 6 for both fields assuming max_digits=6 will allow values to be stored up to 6 digits ie: 123456.000000 while decimal_palces=6 will support values up to 000000.123456. Is this assumption correct or am I missing something? Getting the following error when I save the record to the DB: A field with precision 6, scale 6 must round to an absolute value less than 1. A: max_digits must be equal, or higher than decimal_places. If you were to have 123456.654321 you'd have to define max_digits=12, decimal_places=6. max_digits is INCLUDING decimal_places. A: "max_digits" represents the number of digits all of your numbers. "decimal_places" represents the number of digits to the right of the comma. For ex: to store numbers up to 999.99 with a resolution of 2 decimal places, you’d use: models.DecimalField(..., max_digits=5, decimal_places=2) A: Here is a simple analysis For the below expression in your model: number = models.DecimalField(max_digits=6, decimal_places=2) Here is the test & result => 9999.99 (correct) => 99999.99 (error: Ensure that there are no more than 6 digits in total) => 9999.999 (error: Ensure that there are no more than 6 digits in total) => 999.999 (error: Ensure that there are no more than 2 decimal places) => 99999.9 (error: Ensure that there are no more than 4 digits before the decimal point) So, the above example shows that if we use max_digits=6, you can enter total 6 digits as a field value. As we have used decimal_places=2, we have to think that we can store two decimal places as values, and the rest 6-2 = 4 will be the number of digits before the point. In the case of: number = models.DecimalField(max_digits=5, decimal_places=2) We can store two decimal places as values, and the rest of 5-2 = 3 will be the number of digits before the point.
django - DecimalField max_digits, decimal_places explained
So I'm just starting out with Django and using it to store forex prices which are represented as 1.21242, 1.20641, etc... model.py from django.db import models # Create your models here. class ForexPrice(models.Model): openPrice = models.DecimalField(max_digits=6, decimal_places=6) highPrice = models.DecimalField(max_digits=6, decimal_places=6) lowPrice = models.DecimalField(max_digits=6, decimal_places=6) closePrice = models.DecimalField(max_digits=6, decimal_places=6) My Question Is: How do the max_decimal and decimal_places attributes work? I'm using 6 for both fields assuming max_digits=6 will allow values to be stored up to 6 digits ie: 123456.000000 while decimal_palces=6 will support values up to 000000.123456. Is this assumption correct or am I missing something? Getting the following error when I save the record to the DB: A field with precision 6, scale 6 must round to an absolute value less than 1.
[ "max_digits must be equal, or higher than decimal_places.\nIf you were to have 123456.654321 you'd have to define max_digits=12, decimal_places=6.\nmax_digits is INCLUDING decimal_places.\n", "\"max_digits\" represents the number of digits all of your numbers.\n\"decimal_places\" represents the number of digits to the right of the comma.\nFor ex: to store numbers up to 999.99 with a resolution of 2 decimal places, you’d use:\nmodels.DecimalField(..., max_digits=5, decimal_places=2)\n", "Here is a simple analysis\nFor the below expression in your model:\nnumber = models.DecimalField(max_digits=6, decimal_places=2)\n\nHere is the test & result\n=> 9999.99 (correct)\n=> 99999.99 (error: Ensure that there are no more than 6 digits in total) \n=> 9999.999 (error: Ensure that there are no more than 6 digits in total)\n=> 999.999 (error: Ensure that there are no more than 2 decimal places)\n=> 99999.9 (error: Ensure that there are no more than 4 digits before the decimal point)\n\nSo, the above example shows that if we use max_digits=6, you can enter total 6 digits as a field value.\nAs we have used decimal_places=2, we have to think that we can store two decimal places as values, and the rest 6-2 = 4 will be the number of digits before the point.\nIn the case of:\nnumber = models.DecimalField(max_digits=5, decimal_places=2)\n\nWe can store two decimal places as values, and the rest of 5-2 = 3 will be the number of digits before the point.\n" ]
[ 19, 0, 0 ]
[]
[]
[ "django", "django_models", "python" ]
stackoverflow_0067307399_django_django_models_python.txt
Q: What exactly happens when you create an alias of the Exception class? try: 0/0 except Exception as e: print(e) The above code prints division by zero as one would expect. But if we try to print without creating the alias: try: 0/0 except Exception: print(Exception) It simply prints <class 'Exception'>. What is happening here? The as keyword is used to create an "alias". If the error message "division by zero" is an attribute of the Exception class, then why does creating an alias make it equal to said attribute? Is it possible to print the error message without creating the alias? A: The documentation of Python states this: The except clause may specify a variable after the exception name. The variable is bound to the exception instance which typically has an args attribute that stores the arguments. For convenience, builtin exception types define str() to print all the arguments without explicitly accessing .args. It means that the variable that is passed after the name of Exception has additional attributes to display the specific exception occured.
What exactly happens when you create an alias of the Exception class?
try: 0/0 except Exception as e: print(e) The above code prints division by zero as one would expect. But if we try to print without creating the alias: try: 0/0 except Exception: print(Exception) It simply prints <class 'Exception'>. What is happening here? The as keyword is used to create an "alias". If the error message "division by zero" is an attribute of the Exception class, then why does creating an alias make it equal to said attribute? Is it possible to print the error message without creating the alias?
[ "The documentation of Python states this:\n\nThe except clause may specify a variable after the exception name. The variable is bound to the exception instance which typically has an args attribute that stores the arguments. For convenience, builtin exception types define str() to print all the arguments without explicitly accessing .args.\n\nIt means that the variable that is passed after the name of Exception has additional attributes to display the specific exception occured.\n" ]
[ 1 ]
[]
[]
[ "exception", "python" ]
stackoverflow_0074589925_exception_python.txt
Q: Changing layout and adding titles to JSON file with python I'm trying to extract data from two different api endpoints and create a JSON file with said data. I wish to have titles for each object to distinguish the different data. My code is below: import requests import json headers = { 'accept-language': 'en-US,en;q=0.9', 'origin': 'https://www.nasdaq.com/', 'referer': 'https://www.nasdaq.com/', 'accept': 'application/json, text/plain, */*', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36' } dataAAPL = requests.get('https://api.nasdaq.com/api/company/AAPL/insider-trades?limit=15&type=ALL&sortColumn=lastDate&sortOrder=DESC', headers=headers).json()['data']['numberOfSharesTraded']['rows'][3] dataMSFT = requests.get('https://api.nasdaq.com/api/company/MSFT/insider-trades?limit=15&type=ALL&sortColumn=lastDate&sortOrder=DESC', headers=headers).json()['data']['numberOfSharesTraded']['rows'][3] with open('AAPL_insider_piechart.json', 'w') as f: json.dump(dataAAPL, f, indent=4) json.dump(dataMSFT, f, indent=4) And this is the output JSON: { "insiderTrade": "Net Activity", "months3": "(1,317,881)", "months12": "(1,986,819)" } { "insiderTrade": "Net Activity", "months3": "185,451", "months12": "31,944" } What I need, is for the JSON to look something like this: { "AAPL":[ { "insiderTrade": "Net Activity", "months3": "(1,317,881)", "months12": "(1,986,819)" } ], "MSFT":[ { "insiderTrade": "Net Activity", "months3": "185,451", "months12": "31,944" } ] } A: Just create a dictionary and place the two values in it before dumping it as JSON. Solution import requests import json headers = { 'accept-language': 'en-US,en;q=0.9', 'origin': 'https://www.nasdaq.com/', 'referer': 'https://www.nasdaq.com/', 'accept': 'application/json, text/plain, */*', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36' } dataAAPL = requests.get('https://api.nasdaq.com/api/company/AAPL/insider-trades?limit=15&type=ALL&sortColumn=lastDate&sortOrder=DESC', headers=headers).json()['data']['numberOfSharesTraded']['rows'][3] dataMSFT = requests.get('https://api.nasdaq.com/api/company/MSFT/insider-trades?limit=15&type=ALL&sortColumn=lastDate&sortOrder=DESC', headers=headers).json()['data']['numberOfSharesTraded']['rows'][3] with open('AAPL_insider_piechart.json', 'w') as f: obj = { "AAPL": [dataAAPL], "MSFT": [dataMSFT] } json.dump(obj, f, indent=4)
Changing layout and adding titles to JSON file with python
I'm trying to extract data from two different api endpoints and create a JSON file with said data. I wish to have titles for each object to distinguish the different data. My code is below: import requests import json headers = { 'accept-language': 'en-US,en;q=0.9', 'origin': 'https://www.nasdaq.com/', 'referer': 'https://www.nasdaq.com/', 'accept': 'application/json, text/plain, */*', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36' } dataAAPL = requests.get('https://api.nasdaq.com/api/company/AAPL/insider-trades?limit=15&type=ALL&sortColumn=lastDate&sortOrder=DESC', headers=headers).json()['data']['numberOfSharesTraded']['rows'][3] dataMSFT = requests.get('https://api.nasdaq.com/api/company/MSFT/insider-trades?limit=15&type=ALL&sortColumn=lastDate&sortOrder=DESC', headers=headers).json()['data']['numberOfSharesTraded']['rows'][3] with open('AAPL_insider_piechart.json', 'w') as f: json.dump(dataAAPL, f, indent=4) json.dump(dataMSFT, f, indent=4) And this is the output JSON: { "insiderTrade": "Net Activity", "months3": "(1,317,881)", "months12": "(1,986,819)" } { "insiderTrade": "Net Activity", "months3": "185,451", "months12": "31,944" } What I need, is for the JSON to look something like this: { "AAPL":[ { "insiderTrade": "Net Activity", "months3": "(1,317,881)", "months12": "(1,986,819)" } ], "MSFT":[ { "insiderTrade": "Net Activity", "months3": "185,451", "months12": "31,944" } ] }
[ "Just create a dictionary and place the two values in it before dumping it as JSON.\nSolution\nimport requests\nimport json\n\nheaders = {\n 'accept-language': 'en-US,en;q=0.9',\n 'origin': 'https://www.nasdaq.com/',\n 'referer': 'https://www.nasdaq.com/',\n 'accept': 'application/json, text/plain, */*',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36'\n}\n\ndataAAPL = requests.get('https://api.nasdaq.com/api/company/AAPL/insider-trades?limit=15&type=ALL&sortColumn=lastDate&sortOrder=DESC', \nheaders=headers).json()['data']['numberOfSharesTraded']['rows'][3]\n\ndataMSFT = requests.get('https://api.nasdaq.com/api/company/MSFT/insider-trades?limit=15&type=ALL&sortColumn=lastDate&sortOrder=DESC', \nheaders=headers).json()['data']['numberOfSharesTraded']['rows'][3]\n\nwith open('AAPL_insider_piechart.json', 'w') as f:\n obj = {\n \"AAPL\": [dataAAPL], \n \"MSFT\": [dataMSFT] \n }\n json.dump(obj, f, indent=4)\n\n" ]
[ 1 ]
[]
[]
[ "api", "json", "python" ]
stackoverflow_0074589989_api_json_python.txt
Q: Path hunting with Z3 solver I am modeling below problem in Z3. The aim is to find the path for Agent to reach the coin avoiding obstacles. Initial_grid =[['T' 'T' 'T' 'T' 'T' 'T' 'T'] ['T' ' ' ' ' ' ' ' ' ' ' 'T'] ['T' ' ' 'A' 'O' ' ' 'O' 'T'] ['T' 'O' ' ' ' ' ' ' ' ' 'T'] ['T' ' ' ' ' 'O' 'O' 'C' 'T'] ['T' ' ' ' ' ' ' ' ' ' ' 'T'] ['T' 'T' 'T' 'T' 'T' 'T' 'T']] x, y = Ints('x y') x = agent_loc[0] y = agent_loc[1] xc, yc = Ints('xc yc') xc = coin_loc[0] yc = coin_loc[1] s = Solver() s.add(x,y = (Or(move_right(),move_left(),move_top(),move_bottom()))) solve(And (x = xc) (y = yc)) if s.check() == unsat: print('Problem not solvable') else: m = s.model() I added constraint for movement function which returns x,y coordinates if the movement is valid (no obstacles and within boundary) and returns false otherwise. How can I model the movement constraint as the one in code gives error: add() got an unexpected keyword argument 'y'. A: One way to think about these sorts of search problems is a two pronged approach: Can I find a path with 1 move? If not, try with 2 moves, 3 moves, etc. till you hit an upper bound and you decide to stop trying. Instead of "searching," imagine a path is given to you; how would you check that it's a good path? The magic of SMT solving is that if you can write a program that verifies a given "alleged" solution is good, it can find you one that is indeed good. The following is a solution to your problem following these lines of thought. from z3 import * Grid = [ ['T', 'T', 'T', 'T', 'T', 'T', 'T'] , ['T', ' ', ' ', ' ', ' ', ' ', 'T'] , ['T', ' ', 'A', 'O', ' ', 'O', 'T'] , ['T', 'O', ' ', ' ', ' ', ' ', 'T'] , ['T', ' ', ' ', 'O', 'O', 'C', 'T'] , ['T', ' ', ' ', ' ', ' ', ' ', 'T'] , ['T', 'T', 'T', 'T', 'T', 'T', 'T'] ] Cell, (Wall, Empty, Agent, Obstacle, Coin) = EnumSort('Cell', ('Wall', 'Empty', 'Agent', 'Obstacle', 'Coin')) def mkCell(c): if c == 'T': return Wall elif c == ' ': return Empty elif c == 'A': return Agent elif c == 'O': return Obstacle else: return Coin def grid(x, y): result = Wall for i in range (len(Grid)): for j in range (len(Grid[0])): result = If(And(x == IntVal(i), y == IntVal(j)), mkCell(Grid[i][j]), result) return result def validStart(x, y): return grid(x, y) == Agent def validEnd(x, y): return grid(x, y) == Coin def canMoveTo(x, y): n = grid(x, y) return Or(n == Empty, n == Coin, n == Agent) def moveLeft(x, y): return [x, If(canMoveTo(x, y-1), y-1, y)] def moveRight(x, y): return [x, If(canMoveTo(x, y+1), y+1, y)] def moveUp(x, y): return [If(canMoveTo(x-1, y), x-1, x), y] def moveDown(x, y): return [If(canMoveTo(x+1, y), x+1, x), y] Dir, (Left, Right, Up, Down) = EnumSort('Dir', ('Left', 'Right', 'Up', 'Down')) def move(d, x, y): xL, yL = moveLeft (x, y) xR, yR = moveRight(x, y) xU, yU = moveUp (x, y) xD, yD = moveDown (x, y) xN = If(d == Left, xL, If (d == Right, xR, If (d == Up, xU, xD))) yN = If(d == Left, yL, If (d == Right, yR, If (d == Up, yU, yD))) return [xN, yN] def solves(seq, x, y): def walk(moves, curX, curY): if moves: nX, nY = move(moves[0], curX, curY) return walk(moves[1:], nX, nY) else: return [curX, curY] xL, yL = walk(seq, x, y) return And(validStart(x, y), validEnd(xL, yL)) pathLength = 0 while(pathLength != 20): print("Trying to find a path of length:", pathLength) s = Solver() seq = [Const('m' + str(i), Dir) for i in range(pathLength)] x, y = Ints('x y') s.add(solves(seq, x, y)) if s.check() == sat: print("Found solution with length:", pathLength) m = s.model() print(" Start x:", m[x]) print(" Start y:", m[y]) for move in seq: print(" Move", m[move]) break else: pathLength += 1 When run, this prints: Trying to find a path of length: 0 Trying to find a path of length: 1 Trying to find a path of length: 2 Trying to find a path of length: 3 Trying to find a path of length: 4 Trying to find a path of length: 5 Found solution with length: 5 Start x: 2 Start y: 2 Move Down Move Right Move Right Move Right Move Down So, it found a solution with 5 moves; you can chase it in your grid to see that it's indeed correct. (The numbering starts at 0,0 at the top-left corner; increasing as you go to right and down.) Additionally, you’re guaranteed that this is a shortest solution (not necessarily unique of course). That is, there are no solutions with less than 5 moves. I should add that there are other ways to solve this problem without iterating, by using z3 sequences. However that’s even more advanced z3 programming, and likely to be less performant as well. For all practical purposes, the iterative approach presented here is a good way to tackle such search problems in z3. A: An alternative solution: from z3 import * # 1 2 3 4 5 6 7 Grid = [ ['T', 'T', 'T', 'T', 'T', 'T', 'T'] # 1 , ['T', ' ', ' ', ' ', ' ', ' ', 'T'] # 2 , ['T', ' ', 'A', 'O', ' ', 'O', 'T'] # 3 , ['T', 'O', ' ', ' ', ' ', ' ', 'T'] # 4 , ['T', ' ', ' ', 'O', 'O', 'C', 'T'] # 5 , ['T', ' ', ' ', ' ', ' ', ' ', 'T'] # 6 , ['T', 'T', 'T', 'T', 'T', 'T', 'T'] # 7 ] rows = len(Grid) Rows = range(rows) cols = len(Grid[0]) Cols = range(cols) Infinity = rows * cols + 1 deltaRow = [-1, 0, +1, 0] deltaCol = [ 0, -1, 0, +1] delta = ['up', 'left', 'down', 'right'] deltaInv = ['down', 'right', 'up', 'left']; Deltas = range(len(delta)) s = Solver() # 2D array comprehension: # create matrix of path distances # https://stackoverflow.com/a/25345853/1911064 distances = [[Int('d'+str(r)+'_'+str(c)) for c in Cols] for r in Rows] # http://www.hakank.org/z3/z3_utils_hakank.py # v is the minimum value of x def minimum(sol, v, x): sol.add(Or([v == x[i] for i in range(len(x))])) # v is an element in x) for i in range(len(x)): sol.add(v <= x[i]) # and it's the smallest # constraints for distances for row in Rows: for col in Cols: # shorthands to reduce typing g = Grid[row][col] dist = distances[row][col] if (g == 'T') or (g == 'O'): # obstacles and walls cannot be part of the path s.add(dist == Infinity) elif g == 'A': # the path starts here s.add(dist == 0) else: # array index violations cannot happen # because the wall case is handled above minimum(s, dist, [distances[row + deltaRow[i]][col + deltaCol[i]] + 1 for i in Deltas]) # remember the coin coordinates if g == 'C': rowCoin, colCoin = row, col # detect unreachable target as UNSAT s.add(dist < Infinity) if s.check() == sat: # show the resulting path m = s.model() row, col = rowCoin, colCoin # collect the path in reverse to # avoid dead-ends which don't reach the coin path = [] dir = [] while True: path.insert(0, [row, col]) if Grid[row][col] == 'A': break neighborDistances = [m[distances[row+deltaRow[i]][col+deltaCol[i]]].as_long() for i in Deltas] best = neighborDistances.index(min(neighborDistances)) # advance to the direction of the lowest distance row += deltaRow[best] col += deltaCol[best] dir.insert(0, best) print('start ' + ' [row ' + str(path[0][0]+1) + '; col ' + str(path[0][1]+1) + ']') for i in range(1, len(path)): print(deltaInv[dir[i-1]].ljust(6) + ' [row ' + str(path[i][0]+1) + '; col ' + str(path[i][1]+1) + ']') else: print("No path found!")
Path hunting with Z3 solver
I am modeling below problem in Z3. The aim is to find the path for Agent to reach the coin avoiding obstacles. Initial_grid =[['T' 'T' 'T' 'T' 'T' 'T' 'T'] ['T' ' ' ' ' ' ' ' ' ' ' 'T'] ['T' ' ' 'A' 'O' ' ' 'O' 'T'] ['T' 'O' ' ' ' ' ' ' ' ' 'T'] ['T' ' ' ' ' 'O' 'O' 'C' 'T'] ['T' ' ' ' ' ' ' ' ' ' ' 'T'] ['T' 'T' 'T' 'T' 'T' 'T' 'T']] x, y = Ints('x y') x = agent_loc[0] y = agent_loc[1] xc, yc = Ints('xc yc') xc = coin_loc[0] yc = coin_loc[1] s = Solver() s.add(x,y = (Or(move_right(),move_left(),move_top(),move_bottom()))) solve(And (x = xc) (y = yc)) if s.check() == unsat: print('Problem not solvable') else: m = s.model() I added constraint for movement function which returns x,y coordinates if the movement is valid (no obstacles and within boundary) and returns false otherwise. How can I model the movement constraint as the one in code gives error: add() got an unexpected keyword argument 'y'.
[ "One way to think about these sorts of search problems is a two pronged approach:\n\nCan I find a path with 1 move? If not, try with 2 moves, 3 moves, etc. till you hit an upper bound and you decide to stop trying.\n\nInstead of \"searching,\" imagine a path is given to you; how would you check that it's a good path? The magic of SMT solving is that if you can write a program that verifies a given \"alleged\" solution is good, it can find you one that is indeed good.\n\n\nThe following is a solution to your problem following these lines of thought.\nfrom z3 import *\n\nGrid = [ ['T', 'T', 'T', 'T', 'T', 'T', 'T']\n , ['T', ' ', ' ', ' ', ' ', ' ', 'T']\n , ['T', ' ', 'A', 'O', ' ', 'O', 'T']\n , ['T', 'O', ' ', ' ', ' ', ' ', 'T']\n , ['T', ' ', ' ', 'O', 'O', 'C', 'T']\n , ['T', ' ', ' ', ' ', ' ', ' ', 'T']\n , ['T', 'T', 'T', 'T', 'T', 'T', 'T']\n ]\n\nCell, (Wall, Empty, Agent, Obstacle, Coin) = EnumSort('Cell', ('Wall', 'Empty', 'Agent', 'Obstacle', 'Coin'))\n\ndef mkCell(c):\n if c == 'T':\n return Wall\n elif c == ' ':\n return Empty\n elif c == 'A':\n return Agent\n elif c == 'O':\n return Obstacle\n else:\n return Coin\n\ndef grid(x, y):\n result = Wall\n for i in range (len(Grid)):\n for j in range (len(Grid[0])):\n result = If(And(x == IntVal(i), y == IntVal(j)), mkCell(Grid[i][j]), result)\n return result\n\ndef validStart(x, y):\n return grid(x, y) == Agent\n\ndef validEnd(x, y):\n return grid(x, y) == Coin\n\ndef canMoveTo(x, y):\n n = grid(x, y)\n return Or(n == Empty, n == Coin, n == Agent)\n\ndef moveLeft(x, y):\n return [x, If(canMoveTo(x, y-1), y-1, y)]\n\ndef moveRight(x, y):\n return [x, If(canMoveTo(x, y+1), y+1, y)]\n\ndef moveUp(x, y):\n return [If(canMoveTo(x-1, y), x-1, x), y]\n\ndef moveDown(x, y):\n return [If(canMoveTo(x+1, y), x+1, x), y]\n\nDir, (Left, Right, Up, Down) = EnumSort('Dir', ('Left', 'Right', 'Up', 'Down'))\n\ndef move(d, x, y):\n xL, yL = moveLeft (x, y)\n xR, yR = moveRight(x, y)\n xU, yU = moveUp (x, y)\n xD, yD = moveDown (x, y)\n xN = If(d == Left, xL, If (d == Right, xR, If (d == Up, xU, xD)))\n yN = If(d == Left, yL, If (d == Right, yR, If (d == Up, yU, yD)))\n return [xN, yN]\n\ndef solves(seq, x, y):\n def walk(moves, curX, curY):\n if moves:\n nX, nY = move(moves[0], curX, curY)\n return walk(moves[1:], nX, nY)\n else:\n return [curX, curY]\n\n xL, yL = walk(seq, x, y)\n return And(validStart(x, y), validEnd(xL, yL))\n\npathLength = 0\n\nwhile(pathLength != 20):\n print(\"Trying to find a path of length:\", pathLength)\n\n s = Solver()\n seq = [Const('m' + str(i), Dir) for i in range(pathLength)]\n x, y = Ints('x y')\n s.add(solves(seq, x, y))\n\n if s.check() == sat:\n print(\"Found solution with length:\", pathLength)\n m = s.model()\n print(\" Start x:\", m[x])\n print(\" Start y:\", m[y])\n for move in seq:\n print(\" Move\", m[move])\n break\n else:\n pathLength += 1\n\nWhen run, this prints:\nTrying to find a path of length: 0\nTrying to find a path of length: 1\nTrying to find a path of length: 2\nTrying to find a path of length: 3\nTrying to find a path of length: 4\nTrying to find a path of length: 5\nFound solution with length: 5\n Start x: 2\n Start y: 2\n Move Down\n Move Right\n Move Right\n Move Right\n Move Down\n\nSo, it found a solution with 5 moves; you can chase it in your grid to see that it's indeed correct. (The numbering starts at 0,0 at the top-left corner; increasing as you go to right and down.) Additionally, you’re guaranteed that this is a shortest solution (not necessarily unique of course). That is, there are no solutions with less than 5 moves.\nI should add that there are other ways to solve this problem without iterating, by using z3 sequences. However that’s even more advanced z3 programming, and likely to be less performant as well. For all practical purposes, the iterative approach presented here is a good way to tackle such search problems in z3.\n", "An alternative solution:\nfrom z3 import *\n# 1 2 3 4 5 6 7\nGrid = [ ['T', 'T', 'T', 'T', 'T', 'T', 'T'] # 1\n , ['T', ' ', ' ', ' ', ' ', ' ', 'T'] # 2\n , ['T', ' ', 'A', 'O', ' ', 'O', 'T'] # 3\n , ['T', 'O', ' ', ' ', ' ', ' ', 'T'] # 4\n , ['T', ' ', ' ', 'O', 'O', 'C', 'T'] # 5\n , ['T', ' ', ' ', ' ', ' ', ' ', 'T'] # 6\n , ['T', 'T', 'T', 'T', 'T', 'T', 'T'] # 7\n ]\n\nrows = len(Grid)\nRows = range(rows)\ncols = len(Grid[0])\nCols = range(cols)\nInfinity = rows * cols + 1\n\ndeltaRow = [-1, 0, +1, 0]\ndeltaCol = [ 0, -1, 0, +1]\ndelta = ['up', 'left', 'down', 'right']\ndeltaInv = ['down', 'right', 'up', 'left'];\nDeltas = range(len(delta))\n\ns = Solver()\n\n# 2D array comprehension: \n# create matrix of path distances\n# https://stackoverflow.com/a/25345853/1911064\ndistances = [[Int('d'+str(r)+'_'+str(c)) for c in Cols] for r in Rows]\n\n# http://www.hakank.org/z3/z3_utils_hakank.py\n# v is the minimum value of x\ndef minimum(sol, v, x):\n sol.add(Or([v == x[i] for i in range(len(x))])) # v is an element in x)\n for i in range(len(x)):\n sol.add(v <= x[i]) # and it's the smallest\n\n# constraints for distances\nfor row in Rows:\n for col in Cols:\n # shorthands to reduce typing\n g = Grid[row][col]\n dist = distances[row][col]\n if (g == 'T') or (g == 'O'):\n # obstacles and walls cannot be part of the path\n s.add(dist == Infinity)\n elif g == 'A':\n # the path starts here\n s.add(dist == 0)\n else:\n # array index violations cannot happen\n # because the wall case is handled above\n minimum(s, dist, [distances[row + deltaRow[i]][col + deltaCol[i]] + 1 for i in Deltas])\n # remember the coin coordinates\n if g == 'C':\n rowCoin, colCoin = row, col\n # detect unreachable target as UNSAT\n s.add(dist < Infinity)\n \nif s.check() == sat:\n # show the resulting path\n m = s.model()\n row, col = rowCoin, colCoin\n # collect the path in reverse to\n # avoid dead-ends which don't reach the coin\n path = []\n dir = []\n while True:\n path.insert(0, [row, col])\n if Grid[row][col] == 'A':\n break\n neighborDistances = [m[distances[row+deltaRow[i]][col+deltaCol[i]]].as_long() \n for i in Deltas]\n best = neighborDistances.index(min(neighborDistances))\n # advance to the direction of the lowest distance\n row += deltaRow[best]\n col += deltaCol[best]\n dir.insert(0, best)\n\n print('start ' + ' [row ' + str(path[0][0]+1) + '; col ' + str(path[0][1]+1) + ']')\n for i in range(1, len(path)):\n print(deltaInv[dir[i-1]].ljust(6) + ' [row ' + str(path[i][0]+1) + '; col ' + str(path[i][1]+1) + ']')\nelse:\n print(\"No path found!\") \n\n" ]
[ 1, 0 ]
[]
[]
[ "python", "z3", "z3py" ]
stackoverflow_0074582355_python_z3_z3py.txt
Q: How to format dictionary.items , so that it will return 2 marks after comma? I have a excel sheet and I extract some values from the sheet. But some numbers extracted from the file sheet looks like: 3767.3999999999996 and it has to be: 3767,39. So just two decimals after comma. I Tried with this function: import openpyxl def load_excel_file(self, file_name): excelWorkbook = openpyxl.load_workbook(filename=file_name, data_only=True) return excelWorkbook def calulate_total_fruit_NorthMidSouth(self, file_name): # self.excelWorkbook sheet_factuur = self.load_excel_file(file_name)["Facturen "] fruit_sums = { "ananas": 0, "apple": 0, "waspeen": 0, } fruit_name_rows = { "ananas": [6, 7, 8], "apple": [9, 10, 11], "waspeen": [12, 13, 14], } array = [row for row in sheet_factuur.values] # type: ignore # excel does not have a row 0 for row_num, row_values in enumerate(array, 1): for fruit in ["ananas", "apple", "waspeen"]: # loop through specific fruits if row_num in fruit_name_rows[fruit]: # index 4 is column 5 in excel fruit_sums[fruit] += row_values[4] # type: ignore return "\n".join(f"{a} {b}" for a, b in "{:.2%}".format(fruit_sums.items())) I try to format the return statement. But I get this error: TypeError at /controlepunt unsupported format string passed to dict_items.__format__ Question: how to format this correct so that it have only two marks after comma? A: The issue is at "{:.2%}".format(fruit_sums.items()), but even if it had worked, you would have tried to iterate key, values on a string, that is absolutly not the way You need to apply the formatting logic only the value only return "\n".join(f"{a} {b:.2f}" for a, b in fruit_sums.items())
How to format dictionary.items , so that it will return 2 marks after comma?
I have a excel sheet and I extract some values from the sheet. But some numbers extracted from the file sheet looks like: 3767.3999999999996 and it has to be: 3767,39. So just two decimals after comma. I Tried with this function: import openpyxl def load_excel_file(self, file_name): excelWorkbook = openpyxl.load_workbook(filename=file_name, data_only=True) return excelWorkbook def calulate_total_fruit_NorthMidSouth(self, file_name): # self.excelWorkbook sheet_factuur = self.load_excel_file(file_name)["Facturen "] fruit_sums = { "ananas": 0, "apple": 0, "waspeen": 0, } fruit_name_rows = { "ananas": [6, 7, 8], "apple": [9, 10, 11], "waspeen": [12, 13, 14], } array = [row for row in sheet_factuur.values] # type: ignore # excel does not have a row 0 for row_num, row_values in enumerate(array, 1): for fruit in ["ananas", "apple", "waspeen"]: # loop through specific fruits if row_num in fruit_name_rows[fruit]: # index 4 is column 5 in excel fruit_sums[fruit] += row_values[4] # type: ignore return "\n".join(f"{a} {b}" for a, b in "{:.2%}".format(fruit_sums.items())) I try to format the return statement. But I get this error: TypeError at /controlepunt unsupported format string passed to dict_items.__format__ Question: how to format this correct so that it have only two marks after comma?
[ "The issue is at \"{:.2%}\".format(fruit_sums.items()), but even if it had worked, you would have tried to iterate key, values on a string, that is absolutly not the way\nYou need to apply the formatting logic only the value only\nreturn \"\\n\".join(f\"{a} {b:.2f}\" for a, b in fruit_sums.items())\n\n" ]
[ 1 ]
[]
[]
[ "format", "python" ]
stackoverflow_0074590144_format_python.txt
Q: Using a class-based test as a fixture I am using this class which creates my login test: import pytest from pages.loginPage import LoginPage from utils import utilis as utils @pytest.mark.usefixtures("test_setup") class TestLogin(): def test_login(self): driver=self.driver driver.get(utils.URL) login =LoginPage(driver) login.enterUsername(utils.USERNAME) login.enterPassword(utils.PASSWORD) login.clickLogin() I want to re-use this test as a fixture for other tests, like this: import pytest from pages.loginPage import LoginPage from pages.homePage import HomePage from utils import utilis as util @pytest.mark.usefixtures("test_login") class TestAddRegulation(): def test_addRegulation(self): driver = self.driver homepage = HomePage(driver) homepage.clickRegulationTile() homepage.clickAddRegulationListItem() And this is the conftest.py file with the test_setup fixture: from selenium import webdriver import pytest def pytest_addoption(parser): parser.addoption("--browser", action="store", default="chrome", help="Type in browser name e.g.chrome OR firefox") @pytest.fixture(scope="class") def test_setup(request): browser = request.config.getoption("--browser") if browser == 'chrome': driver = webdriver.Chrome(executable_path= r"C:/Users/user/PycharmProjects/RCM_AutomationFramework/drivers/chromedriver.exe") elif browser == 'firefox': driver = webdriver.Firefox(executable_path= r"C:/Users/user/PycharmProjects/RCM_AutomationFramework/drivers/geckodriver.exe") driver.implicitly_wait(5) driver.maximize_window() request.cls.driver = driver yield driver.close() driver.quit() print("Test is finished") I can't get this to work, even if the test_login case is executed before the test_addRegulation test case. I tried marking test_login as a fixture but it doesn't work. I can make it work if I dropped using classes. Can I make a class method a fixture that is re-usable for other test classes? A: Fixtures can be methods defined in a class, but then they are not available outside of the class. As the pytest documentation on fixtures states: Fixture availability is determined from the perspective of the test. A fixture is only available for tests to request if they are in the scope that fixture is defined in. If a fixture is defined inside a class, it can only be requested by tests inside that class. (Bold emphasis mine). This means that you have to use a plain function to define re-usable fixtures. You can still access the class used by each test, however, via the request.cls attribute. Make sure to have the fixture take both the request and the test_setup scopes: @pytest.fixture(scope="class") def login(request, test_setup): driver = request.cls.driver driver.get(utils.URL) login = LoginPage(driver) login.enterUsername(utils.USERNAME) login.enterPassword(utils.PASSWORD) login.clickLogin() Just put that fixture in your conftest.py file. You can use a different scope, provided it doesn't exceed the class scope of the test_setup fixture (so your choices are class and function here). You can then use that fixture with no actual test body to test the login: @pytest.mark.usefixtures("login") class TestLogin: def test_login(self): # test passes if the login fixture completes. pass This does seem a bit redundant, of course. Use the fixture for other classes the same way: @pytest.mark.usefixtures("login") class TestAddRegulation: def test_addRegulation(self): # ... etc. A quick demo (without selenium, just plain Python): import pytest @pytest.fixture(scope="class") def test_setup(request): request.cls.fixtures = ["test_setup"] yield print("Test is finished, fixtures used:", request.cls.fixtures) @pytest.fixture(scope="class") def login(request, test_setup): # The fixtures list was created by the test_setup fixture fixtures = request.cls.fixtures fixtures.append("login") @pytest.mark.usefixtures("login") class TestLogin: def test_login(self): assert self.fixtures == ["test_setup", "login"] @pytest.mark.usefixtures("login") class TestAddRegulation: def test_addRegulation(self): assert self.fixtures == ["test_setup", "login"] Running these tests with pytest -vs (verbose mode, disabling stdout capture) produces: ...::TestLogin::test_login PASSEDTest is finished, fixtures used: ['test_setup', 'login'] ...::TestAddRegulation::test_addRegulation PASSEDTest is finished, fixtures used: ['test_setup', 'login']
Using a class-based test as a fixture
I am using this class which creates my login test: import pytest from pages.loginPage import LoginPage from utils import utilis as utils @pytest.mark.usefixtures("test_setup") class TestLogin(): def test_login(self): driver=self.driver driver.get(utils.URL) login =LoginPage(driver) login.enterUsername(utils.USERNAME) login.enterPassword(utils.PASSWORD) login.clickLogin() I want to re-use this test as a fixture for other tests, like this: import pytest from pages.loginPage import LoginPage from pages.homePage import HomePage from utils import utilis as util @pytest.mark.usefixtures("test_login") class TestAddRegulation(): def test_addRegulation(self): driver = self.driver homepage = HomePage(driver) homepage.clickRegulationTile() homepage.clickAddRegulationListItem() And this is the conftest.py file with the test_setup fixture: from selenium import webdriver import pytest def pytest_addoption(parser): parser.addoption("--browser", action="store", default="chrome", help="Type in browser name e.g.chrome OR firefox") @pytest.fixture(scope="class") def test_setup(request): browser = request.config.getoption("--browser") if browser == 'chrome': driver = webdriver.Chrome(executable_path= r"C:/Users/user/PycharmProjects/RCM_AutomationFramework/drivers/chromedriver.exe") elif browser == 'firefox': driver = webdriver.Firefox(executable_path= r"C:/Users/user/PycharmProjects/RCM_AutomationFramework/drivers/geckodriver.exe") driver.implicitly_wait(5) driver.maximize_window() request.cls.driver = driver yield driver.close() driver.quit() print("Test is finished") I can't get this to work, even if the test_login case is executed before the test_addRegulation test case. I tried marking test_login as a fixture but it doesn't work. I can make it work if I dropped using classes. Can I make a class method a fixture that is re-usable for other test classes?
[ "Fixtures can be methods defined in a class, but then they are not available outside of the class. As the pytest documentation on fixtures states:\n\nFixture availability is determined from the perspective of the test. A fixture is only available for tests to request if they are in the scope that fixture is defined in. If a fixture is defined inside a class, it can only be requested by tests inside that class.\n\n(Bold emphasis mine).\nThis means that you have to use a plain function to define re-usable fixtures. You can still access the class used by each test, however, via the request.cls attribute. Make sure to have the fixture take both the request and the test_setup scopes:\n@pytest.fixture(scope=\"class\")\ndef login(request, test_setup):\n driver = request.cls.driver\n driver.get(utils.URL)\n\n login = LoginPage(driver)\n login.enterUsername(utils.USERNAME)\n login.enterPassword(utils.PASSWORD)\n login.clickLogin()\n\nJust put that fixture in your conftest.py file. You can use a different scope, provided it doesn't exceed the class scope of the test_setup fixture (so your choices are class and function here).\nYou can then use that fixture with no actual test body to test the login:\n@pytest.mark.usefixtures(\"login\")\nclass TestLogin:\n\n def test_login(self):\n # test passes if the login fixture completes.\n pass\n\nThis does seem a bit redundant, of course.\nUse the fixture for other classes the same way:\n@pytest.mark.usefixtures(\"login\")\nclass TestAddRegulation:\n\n def test_addRegulation(self):\n # ... etc.\n\nA quick demo (without selenium, just plain Python):\nimport pytest\n\n@pytest.fixture(scope=\"class\")\ndef test_setup(request):\n request.cls.fixtures = [\"test_setup\"]\n yield\n print(\"Test is finished, fixtures used:\", request.cls.fixtures)\n\n@pytest.fixture(scope=\"class\")\ndef login(request, test_setup):\n # The fixtures list was created by the test_setup fixture\n fixtures = request.cls.fixtures\n fixtures.append(\"login\")\n\n@pytest.mark.usefixtures(\"login\")\nclass TestLogin:\n\n def test_login(self):\n assert self.fixtures == [\"test_setup\", \"login\"]\n\n@pytest.mark.usefixtures(\"login\")\nclass TestAddRegulation:\n\n def test_addRegulation(self):\n assert self.fixtures == [\"test_setup\", \"login\"]\n\nRunning these tests with pytest -vs (verbose mode, disabling stdout capture) produces:\n...::TestLogin::test_login PASSEDTest is finished, fixtures used: ['test_setup', 'login']\n\n...::TestAddRegulation::test_addRegulation PASSEDTest is finished, fixtures used: ['test_setup', 'login']\n\n" ]
[ 1 ]
[]
[]
[ "pytest", "python", "testing" ]
stackoverflow_0074590160_pytest_python_testing.txt
Q: Finding the outlier points from matplotlib : boxplot I am plotting a non-normal distribution using boxplot and interested in finding out about outliers using boxplot function of matplotlib. Besides the plot I am interested in finding out the value of points in my code which are shown as outliers in the boxplot. Is there any way I can extract these values for use in my downstream code from the boxplot object? A: Do you means those points above and below the two black lines? from pylab import * spread= rand(50) * 100 center = ones(25) * 50 flier_high = rand(10) * 100 + 100 flier_low = rand(10) * -100 data =concatenate((spread, center, flier_high, flier_low), 0) r = boxplot(data) Store the return dict from boxplot, and you can get the all the information from it, for example: top_points = r["fliers"][0].get_data()[1] bottom_points = r["fliers"][2].get_data()[1] plot(np.ones(len(top_points)), top_points, "+") plot(np.ones(len(bottom_points)), bottom_points, "+") A: The matplotlib pyplot.boxplot() function returns a dictionary containing various properties of the boxplot. The outlier values are stored within the fliers key of this dictionary. Assuming call to plt.boxplot() was stored in variable bplot, # retrieving outliers for vertical boxplot outliers = bplot["fliers"][0].get_ydata() # retreiving outliers for horizontal boxplot outliers = bplot["fliers"][0].get_xdata()
Finding the outlier points from matplotlib : boxplot
I am plotting a non-normal distribution using boxplot and interested in finding out about outliers using boxplot function of matplotlib. Besides the plot I am interested in finding out the value of points in my code which are shown as outliers in the boxplot. Is there any way I can extract these values for use in my downstream code from the boxplot object?
[ "Do you means those points above and below the two black lines?\nfrom pylab import *\nspread= rand(50) * 100\ncenter = ones(25) * 50\nflier_high = rand(10) * 100 + 100\nflier_low = rand(10) * -100\ndata =concatenate((spread, center, flier_high, flier_low), 0)\nr = boxplot(data)\n\n\nStore the return dict from boxplot, and you can get the all the information from it, for example:\ntop_points = r[\"fliers\"][0].get_data()[1]\nbottom_points = r[\"fliers\"][2].get_data()[1]\nplot(np.ones(len(top_points)), top_points, \"+\")\nplot(np.ones(len(bottom_points)), bottom_points, \"+\")\n\n\n", "The matplotlib pyplot.boxplot() function returns a dictionary containing various properties of the boxplot.\nThe outlier values are stored within the fliers key of this dictionary.\nAssuming call to plt.boxplot() was stored in variable bplot,\n# retrieving outliers for vertical boxplot\noutliers = bplot[\"fliers\"][0].get_ydata()\n\n# retreiving outliers for horizontal boxplot\noutliers = bplot[\"fliers\"][0].get_xdata()\n\n" ]
[ 21, 1 ]
[]
[]
[ "matplotlib", "outliers", "python" ]
stackoverflow_0010238357_matplotlib_outliers_python.txt
Q: How to make my python code less clumsy? (dealing with if/elif statements and pandas) I wrote a function that generates a table after feeding it a list. It is part of a web scraping script I'm working on. The function works (not the best but good enough for its purpose) but is there a better way to achieve better/similar/same result? For example, here's a list I would want to turn into a table: listings = ["Search Result", "Advanced Search", "Item Trader Location Price Last Seen", "Sealed Blacksmithing Writ", "Rewards 356 Vouchers", "Level 1", "@rscus2001", "Shadowfen: Stormhold", "Ghost Sea Trading Co", "71,200", "X", "1", "=", "71,200 3 Hour ago", "Sealed Blacksmithing Writ", "Rewards 328 Vouchers", "Level 1", "@Deirdre531", "Grahtwood: Elden Root", "piston", "100,000", "X", "1", "=", "100,000 6 Hour ago", "Sealed Blacksmithing Writ", "Rewards 328 Vouchers", "Level 1", "@Araxas", "Luminous Legion", "100,000", "X", "1", "=", "100,000 9 Hour ago", "Sealed Blacksmithing Writ", "Rewards 356 Vouchers", "Level 1", "@CaffeinatedMayhem", "Craglorn: Belkarth", "Masser's Merchants", "25,000", "X", "1", "=", "25,000 13 Hour ago", "Sealed Blacksmithing Writ", "Rewards 287 Vouchers", "Level 1", "@Gregori_Weissteufel", "Wrothgar: Morkul Stronghold", "The Cutthroat Mutineers", "45,000", "X", "1", "=", "45,000 13 Hour ago", "<", "1", ">"] Result: 0 1 2 3 4 5 6 7 8 9 10 0 Sealed Blacksmithing Writ Rewards 356 Vouchers Level 1 @rscus2001 Shadowfen: Stormhold Ghost Sea Trading Co 71,200 X 1 = 71,200 3 Hour ago 1 Sealed Blacksmithing Writ Rewards 328 Vouchers Level 1 @Deirdre531 Grahtwood: Elden Root piston 100,000 X 1 = 100,000 6 Hour ago 2 Sealed Blacksmithing Writ Rewards 328 Vouchers Level 1 @Araxas Luminous Legion 100,000 X 1 = 100,000 9 Hour ago None 3 Sealed Blacksmithing Writ Rewards 356 Vouchers Level 1 @CaffeinatedMayhem Craglorn: Belkarth Masser's Merchants 25,000 X 1 = 25,000 13 Hour ago 4 Sealed Blacksmithing Writ Rewards 287 Vouchers Level 1 @Gregori_Weissteufel Wrothgar: Morkul Stronghold The Cutthroat Mutineers 45,000 X 1 = 45,000 13 Hour ago Below is my code: import re import pandas as pd pd.set_option('display.max_columns', None) pd.options.display.width=None def MakeTable(listings): hour_idx = [i for i, item in enumerate(listings) if re.search(r"([0-9,]*\s[0-9]*\s(Minute|Hour)\sago|[0-9,]*\sNow)", item)] if len(hour_idx) == 1: ls = [listings[3:hour_idx[0]+1]] elif len(hour_idx) == 2: ls = [listings[3:hour_idx[0]+1],listings[hour_idx[0]+1:hour_idx[1]+1]] elif len(hour_idx) == 3: ls = [listings[3:hour_idx[0]+1],listings[hour_idx[0]+1:hour_idx[1]+1],listings[hour_idx[1]+1:hour_idx[2]+1]] elif len(hour_idx) == 4: ls = [listings[3:hour_idx[0]+1],listings[hour_idx[0]+1:hour_idx[1]+1],listings[hour_idx[1]+1:hour_idx[2]+1],listings[hour_idx[2]+1:hour_idx[3]+1]] else: ls = [listings[3:hour_idx[0]+1],listings[hour_idx[0]+1:hour_idx[1]+1],listings[hour_idx[1]+1:hour_idx[2]+1],listings[hour_idx[2]+1:hour_idx[3]+1],listings[hour_idx[3]+1:hour_idx[4]+1]] df = pd.DataFrame(ls) print(df) A: We can use list comprehensions and zip statement: def MakeTable(listings): hour_idx = [i for i, item in enumerate(listings) if re.search(r"([0-9,]*\s[0-9]*\s(Minute|Hour)\sago|[0-9,]*\sNow)", item)] ls = [listings[3:hour_idx[0]+1]] ls_2 = [x[y[i]+1:y[i+1]+1] for (x, y, i) in zip(listings, hour_idx, range(len(hour_idx)-1))] ls = ls.append(ls_2) df = pd.DataFrame(ls) print(df) A: I guess it's already answered - but I had a wee go for fun: import re import pandas as pd pd.set_option('display.max_columns', None) pd.options.display.width=None human_time_re = re.compile(r"([0-9,]*\s[0-9]*\s(Minute|Hour)\sago|[0-9,]*\sNow)") def make_table(listings): hour_idx = [i for i, item in enumerate(listings) if human_time_re.search(item)] hour_key = lambda key: hour_idx[key] + 1 idx = lambda key, key2=0: listings[key:hour_key(key2)] idx_more = lambda key=0, key2=1: listings[hour_key(key):hour_key(key2)] ls = (idx(3),) + tuple(idx_more(i, i+1) for i in range(len(hour_idx) - 1)) return ls res = make_table(listings) ls = pd.DataFrame(res) print(res) As far as I can see, it does exactly the same as your posted version.
How to make my python code less clumsy? (dealing with if/elif statements and pandas)
I wrote a function that generates a table after feeding it a list. It is part of a web scraping script I'm working on. The function works (not the best but good enough for its purpose) but is there a better way to achieve better/similar/same result? For example, here's a list I would want to turn into a table: listings = ["Search Result", "Advanced Search", "Item Trader Location Price Last Seen", "Sealed Blacksmithing Writ", "Rewards 356 Vouchers", "Level 1", "@rscus2001", "Shadowfen: Stormhold", "Ghost Sea Trading Co", "71,200", "X", "1", "=", "71,200 3 Hour ago", "Sealed Blacksmithing Writ", "Rewards 328 Vouchers", "Level 1", "@Deirdre531", "Grahtwood: Elden Root", "piston", "100,000", "X", "1", "=", "100,000 6 Hour ago", "Sealed Blacksmithing Writ", "Rewards 328 Vouchers", "Level 1", "@Araxas", "Luminous Legion", "100,000", "X", "1", "=", "100,000 9 Hour ago", "Sealed Blacksmithing Writ", "Rewards 356 Vouchers", "Level 1", "@CaffeinatedMayhem", "Craglorn: Belkarth", "Masser's Merchants", "25,000", "X", "1", "=", "25,000 13 Hour ago", "Sealed Blacksmithing Writ", "Rewards 287 Vouchers", "Level 1", "@Gregori_Weissteufel", "Wrothgar: Morkul Stronghold", "The Cutthroat Mutineers", "45,000", "X", "1", "=", "45,000 13 Hour ago", "<", "1", ">"] Result: 0 1 2 3 4 5 6 7 8 9 10 0 Sealed Blacksmithing Writ Rewards 356 Vouchers Level 1 @rscus2001 Shadowfen: Stormhold Ghost Sea Trading Co 71,200 X 1 = 71,200 3 Hour ago 1 Sealed Blacksmithing Writ Rewards 328 Vouchers Level 1 @Deirdre531 Grahtwood: Elden Root piston 100,000 X 1 = 100,000 6 Hour ago 2 Sealed Blacksmithing Writ Rewards 328 Vouchers Level 1 @Araxas Luminous Legion 100,000 X 1 = 100,000 9 Hour ago None 3 Sealed Blacksmithing Writ Rewards 356 Vouchers Level 1 @CaffeinatedMayhem Craglorn: Belkarth Masser's Merchants 25,000 X 1 = 25,000 13 Hour ago 4 Sealed Blacksmithing Writ Rewards 287 Vouchers Level 1 @Gregori_Weissteufel Wrothgar: Morkul Stronghold The Cutthroat Mutineers 45,000 X 1 = 45,000 13 Hour ago Below is my code: import re import pandas as pd pd.set_option('display.max_columns', None) pd.options.display.width=None def MakeTable(listings): hour_idx = [i for i, item in enumerate(listings) if re.search(r"([0-9,]*\s[0-9]*\s(Minute|Hour)\sago|[0-9,]*\sNow)", item)] if len(hour_idx) == 1: ls = [listings[3:hour_idx[0]+1]] elif len(hour_idx) == 2: ls = [listings[3:hour_idx[0]+1],listings[hour_idx[0]+1:hour_idx[1]+1]] elif len(hour_idx) == 3: ls = [listings[3:hour_idx[0]+1],listings[hour_idx[0]+1:hour_idx[1]+1],listings[hour_idx[1]+1:hour_idx[2]+1]] elif len(hour_idx) == 4: ls = [listings[3:hour_idx[0]+1],listings[hour_idx[0]+1:hour_idx[1]+1],listings[hour_idx[1]+1:hour_idx[2]+1],listings[hour_idx[2]+1:hour_idx[3]+1]] else: ls = [listings[3:hour_idx[0]+1],listings[hour_idx[0]+1:hour_idx[1]+1],listings[hour_idx[1]+1:hour_idx[2]+1],listings[hour_idx[2]+1:hour_idx[3]+1],listings[hour_idx[3]+1:hour_idx[4]+1]] df = pd.DataFrame(ls) print(df)
[ "We can use list comprehensions and zip statement:\ndef MakeTable(listings):\n\n hour_idx = [i for i, item in enumerate(listings) if re.search(r\"([0-9,]*\\s[0-9]*\\s(Minute|Hour)\\sago|[0-9,]*\\sNow)\", item)]\n \n ls = [listings[3:hour_idx[0]+1]]\n \n ls_2 = [x[y[i]+1:y[i+1]+1] for (x, y, i) in zip(listings, hour_idx, range(len(hour_idx)-1))]\n \n ls = ls.append(ls_2)\n\n df = pd.DataFrame(ls)\n \n print(df)\n\n", "I guess it's already answered - but I had a wee go for fun:\nimport re\nimport pandas as pd\n\npd.set_option('display.max_columns', None)\npd.options.display.width=None\n\nhuman_time_re = re.compile(r\"([0-9,]*\\s[0-9]*\\s(Minute|Hour)\\sago|[0-9,]*\\sNow)\")\n\n\ndef make_table(listings):\n hour_idx = [i for i, item in enumerate(listings) if human_time_re.search(item)]\n\n hour_key = lambda key: hour_idx[key] + 1\n idx = lambda key, key2=0: listings[key:hour_key(key2)]\n idx_more = lambda key=0, key2=1: listings[hour_key(key):hour_key(key2)]\n\n ls = (idx(3),) + tuple(idx_more(i, i+1) for i in range(len(hour_idx) - 1))\n return ls\n\n\nres = make_table(listings)\nls = pd.DataFrame(res)\nprint(res)\n\nAs far as I can see, it does exactly the same as your posted version.\n" ]
[ 4, 1 ]
[]
[]
[ "if_statement", "pandas", "python" ]
stackoverflow_0074589824_if_statement_pandas_python.txt