query
stringlengths
9
60
language
stringclasses
1 value
code
stringlengths
105
25.7k
url
stringlengths
91
217
export to excel
python
def export_analytics_data_to_excel(data, output_file_name, result_info_key, identifier_keys): """Creates an Excel file containing data returned by the Analytics API Args: data: Analytics API data as a list of dicts output_file_name: File name for output Excel file (use .xlsx extension). """ workbook = create_excel_workbook(data, result_info_key, identifier_keys) workbook.save(output_file_name) print('Saved Excel file to {}'.format(output_file_name))
https://github.com/housecanary/hc-api-python/blob/2bb9e2208b34e8617575de45934357ee33b8531c/housecanary/excel/__init__.py#L18-L28
export to excel
python
def export_envar(self, key, val): """Export an environment variable.""" line = "export " + key + "=" + str(val) self._add(line)
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/launcher.py#L88-L91
export to excel
python
def del_export(exports='/etc/exports', path=None): ''' Remove an export CLI Example: .. code-block:: bash salt '*' nfs.del_export /media/storage ''' edict = list_exports(exports) del edict[path] _write_exports(exports, edict) return edict
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nfs3.py#L69-L82
export to excel
python
def env_export(prefix, exported, env): """ Define the list of 'exported' variables with 'prefix' with values from 'env' """ for exp in exported: ENV["_".join([prefix, exp])] = env[exp]
https://github.com/refenv/cijoe/blob/21d7b2ed4ff68e0a1457e7df2db27f6334f1a379/modules/cij/__init__.py#L136-L142
export to excel
python
def from_export(cls, endpoint): # type: (ExportEndpoint) -> EndpointDescription """ Converts an ExportEndpoint bean to an EndpointDescription :param endpoint: An ExportEndpoint bean :return: An EndpointDescription bean """ assert isinstance(endpoint, ExportEndpoint) # Service properties properties = endpoint.get_properties() # Set import keys properties[pelix.remote.PROP_ENDPOINT_ID] = endpoint.uid properties[pelix.remote.PROP_IMPORTED_CONFIGS] = endpoint.configurations properties[ pelix.remote.PROP_EXPORTED_INTERFACES ] = endpoint.specifications # Remove export keys for key in ( pelix.remote.PROP_EXPORTED_CONFIGS, pelix.remote.PROP_EXPORTED_INTERFACES, pelix.remote.PROP_EXPORTED_INTENTS, pelix.remote.PROP_EXPORTED_INTENTS_EXTRA, ): try: del properties[key] except KeyError: pass # Other information properties[pelix.remote.PROP_ENDPOINT_NAME] = endpoint.name properties[ pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID ] = endpoint.framework return EndpointDescription(None, properties)
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/remote/beans.py#L655-L693
export to excel
python
def make_export(self, exports): """Populate library exported function data.""" sql = 'drop table if exists export' logging.debug(sql) self.cursor.execute(sql) sql = 'create table if not exists export ' \ '(func text unique, module text)' logging.debug(sql) self.cursor.execute(sql) for module in exports: logging.debug(_('insering exports from %s'), module) sql = 'insert into export values (?, ?)' for func in exports[module]: if func: try: self.cursor.execute(sql, (func, module)) except sqlite3.IntegrityError: pass self.con.commit()
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/database.py#L267-L285
export to excel
python
def database_to_excel(engine, excel_file_path): """Export database to excel. :param engine: :param excel_file_path: """ from sqlalchemy import MetaData, select metadata = MetaData() metadata.reflect(engine) writer = pd.ExcelWriter(excel_file_path) for table in metadata.tables.values(): sql = select([table]) df = pd.read_sql(sql, engine) df.to_excel(writer, table.name, index=False) writer.save()
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/pkg/pandas_mate/sql_io.py#L100-L117
export to excel
python
def add_export(exports='/etc/exports', path=None, hosts=None, options=None): ''' Add an export CLI Example: .. code-block:: bash salt '*' nfs3.add_export path='/srv/test' hosts='127.0.0.1' options=['rw'] ''' if options is None: options = [] if not isinstance(hosts, six.string_types): # Lists, etc would silently mangle /etc/exports raise TypeError('hosts argument must be a string') edict = list_exports(exports) if path not in edict: edict[path] = [] new = {'hosts': hosts, 'options': options} edict[path].append(new) _write_exports(exports, edict) return new
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nfs3.py#L85-L107
export to excel
python
def simple_export2xlsx(filename, titles, qs, func_data): """ export as excel filename: file name titles: title for this table qs: queryset to export func_data: a function to format object to list. ex: `lambda o: [o.pk, o.name]` """ output = BytesIO() wb = xlwt.Workbook(output) ws = wb.add_worksheet(filename) header_fmt = wb.add_format() header_fmt.set_bg_color('#C4D89E') row_idx = 0 row_idx = xlsw_write_row(ws, row_idx, titles, header_fmt) for o in qs: row_idx = xlsw_write_row(ws, row_idx, func_data(o)) fn = '%s-%s.xlsx' % (filename, datetime.now()) return export_xlsx(wb, output, fn)
https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/xlsxutils.py#L48-L66
export to excel
python
def usufyToTextExport(d, fPath=None): """ Workaround to export to a .txt file or to show the information. Args: ----- d: Data to export. fPath: File path for the output file. If None was provided, it will assume that it has to print it. Returns: -------- unicode: It sometimes returns a unicode representation of the Sheet received. """ # Manual check... if d == []: return "+------------------+\n| No data found... |\n+------------------+" import pyexcel as pe import pyexcel.ext.text as text if fPath == None: isTerminal = True else: isTerminal = False try: oldData = get_data(fPath) except: # No information has been recovered oldData = {"OSRFramework":[]} # Generating the new tabular data tabularData = _generateTabularData(d, {"OSRFramework":[[]]}, True, canUnicode=False) # The tabular data contains a dict representing the whole book and we need only the sheet!! sheet = pe.Sheet(tabularData["OSRFramework"]) sheet.name = "Profiles recovered (" + getCurrentStrDatetime() +")." # Defining the headers sheet.name_columns_by_row(0) text.TABLEFMT = "grid" try: with open(fPath, "w") as oF: oF.write(str(sheet)) except Exception as e: # If a fPath was not provided... We will only print the info: return unicode(sheet)
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/general.py#L294-L342
export to excel
python
def inkscape_export(input_file, output_file, export_flag="-A", dpi=90, inkscape_binpath=None): """ Call Inkscape to export the input_file to output_file using the specific export argument flag for the output file type. Parameters ---------- input_file: str Path to the input file output_file: str Path to the output file export_flag: str Inkscape CLI flag to indicate the type of the output file Returns ------- return_value Command call return value """ if not os.path.exists(input_file): log.error('File {} not found.'.format(input_file)) raise IOError((0, 'File not found.', input_file)) if '=' not in export_flag: export_flag += ' ' arg_strings = [] arg_strings += ['--without-gui'] arg_strings += ['--export-text-to-path'] arg_strings += ['{}"{}"'.format(export_flag, output_file)] arg_strings += ['--export-dpi={}'.format(dpi)] arg_strings += ['"{}"'.format(input_file)] return call_inkscape(arg_strings, inkscape_binpath=inkscape_binpath)
https://github.com/PythonSanSebastian/docstamp/blob/b43808f2e15351b0b2f0b7eade9c7ef319c9e646/docstamp/inkscape.py#L48-L84
export to excel
python
def _write_export(export, file_obj=None): """ Write a string to a file. If file_obj isn't specified, return the string Parameters --------- export: a string of the export data file_obj: a file-like object or a filename """ if file_obj is None: return export if hasattr(file_obj, 'write'): out_file = file_obj else: out_file = open(file_obj, 'wb') try: out_file.write(export) except TypeError: out_file.write(export.encode('utf-8')) out_file.close() return export
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/exchange/export.py#L50-L75
export to excel
python
def csv_export(self, csv_dest, fieldnames=None, encoding="UTF-8"): """Exports the contents of the table to a CSV-formatted file. @param csv_dest: CSV file - if a string is given, the file with that name will be opened, written, and closed; if a file object is given, then that object will be written as-is, and left for the caller to be closed. @type csv_dest: string or file @param fieldnames: attribute names to be exported; can be given as a single string with space-delimited names, or as a list of attribute names @type fieldnames: list of strings @param encoding: string (default="UTF-8"); if csv_dest is provided as a string representing an output filename, an encoding argument can be provided (Python 3 only) @type encoding: string """ close_on_exit = False if isinstance(csv_dest, basestring): if PY_3: csv_dest = open(csv_dest, 'w', newline='', encoding=encoding) else: csv_dest = open(csv_dest, 'wb') close_on_exit = True try: if fieldnames is None: fieldnames = list(_object_attrnames(self.obs[0])) if isinstance(fieldnames, basestring): fieldnames = fieldnames.split() csv_dest.write(','.join(fieldnames) + NL) csvout = csv.DictWriter(csv_dest, fieldnames, extrasaction='ignore', lineterminator=NL) if hasattr(self.obs[0], "__dict__"): csvout.writerows(o.__dict__ for o in self.obs) else: do_all(csvout.writerow(ODict(starmap(lambda obj, fld: (fld, getattr(obj, fld)), zip(repeat(o), fieldnames)))) for o in self.obs) finally: if close_on_exit: csv_dest.close()
https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1187-L1222
export to excel
python
def export_xlsx(wb, output, fn): """ export as excel wb: output: fn: file name """ wb.close() output.seek(0) response = HttpResponse(output.read(), content_type="application/vnd.ms-excel") cd = codecs.encode('attachment;filename=%s' % fn, 'utf-8') response['Content-Disposition'] = cd return response
https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/xlsxutils.py#L20-L32
export to excel
python
def write_exports(self, exports): """ Write a dictionary of exports to a file in .ini format. :param exports: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. """ rf = self.get_distinfo_file(EXPORTS_FILENAME) with open(rf, 'w') as f: write_exports(exports, f)
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/database.py#L632-L641
export to excel
python
def export(self, file_to_export, xformat='csv'): """Export epochwise annotations to csv file. Parameters ---------- file_to_export : path to file file to write to """ if 'csv' == xformat: with open(file_to_export, 'w', newline='') as f: csv_file = writer(f) csv_file.writerow(['Wonambi v{}'.format(__version__)]) csv_file.writerow(('clock start time', 'start', 'end', 'stage')) for epoch in self.epochs: epoch_time = (self.start_time + timedelta(seconds=epoch['start'])) csv_file.writerow((epoch_time.strftime('%H:%M:%S'), epoch['start'], epoch['end'], epoch['stage'])) if 'remlogic' in xformat: columns = 'Time [hh:mm:ss]\tEvent\tDuration[s]\n' if 'remlogic_fr' == xformat: columns = 'Heure [hh:mm:ss]\tEvénement\tDurée[s]\n' patient_id = splitext(basename(self.dataset))[0] rec_date = self.start_time.strftime('%d/%m/%Y') stkey = {v:k for k, v in REMLOGIC_STAGE_KEY.items()} stkey['Artefact'] = 'SLEEP-UNSCORED' stkey['Unknown'] = 'SLEEP-UNSCORED' stkey['Movement'] = 'SLEEP-UNSCORED' with open(file_to_export, 'w') as f: f.write('RemLogic Event Export\n') f.write('Patient:\t' + patient_id + '\n') f.write('Patient ID:\t' + patient_id + '\n') f.write('Recording Date:\t' + rec_date + '\n') f.write('\n') f.write('Events Included:\n') for i in sorted(set([stkey[x['stage']] for x in self.epochs])): f.write(i + '\n') f.write('\n') f.write(columns) for epoch in self.epochs: epoch_time = (self.start_time + timedelta(seconds=epoch['start'])) f.write((epoch_time.strftime('%Y-%m-%dT%H:%M:%S.000000') + '\t' + stkey[epoch['stage']] + '\t' + str(self.epoch_length) + '\n'))
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/attr/annotations.py#L1440-L1499
export to excel
python
def db_export(schema, uuid, object_filter, export_format, filename, pretty, all_schemata, omit): """Export stored objects Warning! This functionality is work in progress and you may destroy live data by using it! Be very careful when using the export/import functionality!""" internal_backup(schema, uuid, object_filter, export_format, filename, pretty, all_schemata, omit)
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/tool/backup.py#L45-L51
export to excel
python
def _export_figure(self, filepath, data, format): """Export of single cell that contains a matplotlib figure Parameters ---------- filepath: String \tPath of export file data: Matplotlib Figure \tMatplotlib figure that is eported format: String in ["png", "pdf", "ps", "eps", "svg"] """ formats = ["svg", "eps", "ps", "pdf", "png"] assert format in formats data = fig2x(data, format) try: outfile = open(filepath, "wb") outfile.write(data) except IOError, err: msg = _("The file {filepath} could not be fully written\n \n" "Error message:\n{msg}") msg = msg.format(filepath=filepath, msg=err) short_msg = _('Error writing SVG file') self.main_window.interfaces.display_warning(msg, short_msg) finally: outfile.close()
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_main_window_actions.py#L187-L217
export to excel
python
def to_excel(self, *args): """ Dump all the data to excel, fname and path can be passed as args """ path = os.getcwd() fname = self.fname.replace(".ppl", "_ppl") + ".xlsx" if len(args) > 0 and args[0] != "": path = args[0] if os.path.exists(path) == False: os.mkdir(path) xl_file = pd.ExcelWriter(path + os.sep + fname) for idx in self.filter_data(""): self.extract(idx) labels = list(self.filter_data("").values()) for prof in self.data: data_df = pd.DataFrame() data_df["X"] = self.data[prof][0] for timestep, data in zip(self.time, self.data[prof][1]): data_df[timestep] = data myvar = labels[prof-1].split(" ")[0] br_label = labels[prof-1].split("\'")[5] unit = labels[prof-1].split("\'")[7].replace("/", "-") mylabel = "{} - {} - {}".format(myvar, br_label, unit) data_df.to_excel(xl_file, sheet_name=mylabel) xl_file.save()
https://github.com/gpagliuca/pyfas/blob/5daa1199bd124d315d02bef0ad3888a8f58355b2/build/lib/pyfas/ppl.py#L127-L151
export to excel
python
def export_to_xls(table, filename_or_fobj=None, sheet_name="Sheet1", *args, **kwargs): """Export the rows.Table to XLS file and return the saved file.""" workbook = xlwt.Workbook() sheet = workbook.add_sheet(sheet_name) prepared_table = prepare_to_export(table, *args, **kwargs) field_names = next(prepared_table) for column_index, field_name in enumerate(field_names): sheet.write(0, column_index, field_name) _convert_row = _python_to_xls([table.fields.get(field) for field in field_names]) for row_index, row in enumerate(prepared_table, start=1): for column_index, (value, data) in enumerate(_convert_row(row)): sheet.write(row_index, column_index, value, **data) return_result = False if filename_or_fobj is None: filename_or_fobj = BytesIO() return_result = True source = Source.from_file(filename_or_fobj, mode="wb", plugin_name="xls") workbook.save(source.fobj) source.fobj.flush() if return_result: source.fobj.seek(0) result = source.fobj.read() else: result = source.fobj if source.should_close: source.fobj.close() return result
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xls.py#L215-L250
export to excel
python
def export_to_xlsx(table, filename_or_fobj=None, sheet_name="Sheet1", *args, **kwargs): """Export the rows.Table to XLSX file and return the saved file.""" workbook = Workbook() sheet = workbook.active sheet.title = sheet_name prepared_table = prepare_to_export(table, *args, **kwargs) # Write header field_names = next(prepared_table) for col_index, field_name in enumerate(field_names): cell = sheet.cell(row=1, column=col_index + 1) cell.value = field_name # Write sheet rows _convert_row = _python_to_cell(list(map(table.fields.get, field_names))) for row_index, row in enumerate(prepared_table, start=1): for col_index, (value, number_format) in enumerate(_convert_row(row)): cell = sheet.cell(row=row_index + 1, column=col_index + 1) cell.value = value if number_format is not None: cell.number_format = number_format return_result = False if filename_or_fobj is None: filename_or_fobj = BytesIO() return_result = True source = Source.from_file(filename_or_fobj, mode="wb", plugin_name="xlsx") workbook.save(source.fobj) source.fobj.flush() if return_result: source.fobj.seek(0) result = source.fobj.read() else: result = source.fobj if source.should_close: source.fobj.close() return result
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xlsx.py#L152-L193
export to excel
python
def export_file(self, record, field, event=None, return_format='json'): """ Export the contents of a file stored for a particular record Notes ----- Unlike other export methods, this works on a single record. Parameters ---------- record : str record ID field : str field name containing the file to be exported. event: str for longitudinal projects, specify the unique event here return_format: ('json'), 'csv', 'xml' format of error message Returns ------- content : bytes content of the file content_map : dict content-type dictionary """ self._check_file_field(field) # load up payload pl = self.__basepl(content='file', format=return_format) # there's no format field in this call del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'export' pl['field'] = field pl['record'] = record if event: pl['event'] = event content, headers = self._call_api(pl, 'exp_file') #REDCap adds some useful things in content-type if 'content-type' in headers: splat = [kv.strip() for kv in headers['content-type'].split(';')] kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv in splat if '=' in kv] content_map = dict(kv) else: content_map = {} return content, content_map
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L503-L549
export to excel
python
def cmd_export_all(*args): """ Arguments: <output folder> [-- [--quality <0-100>] [--page_format <page_format>]] Export all documents as PDF files. Default quality is 50. Default page format is A4. Possible JSON replies: -- { "status": "error", "exception": "yyy", "reason": "xxxx", "args": "(xxxx, )" } -- { "status": "ok", "docids": [ ["xxx", "file:///tmp/xxx.pdf"], ["yyy", "file:///tmp/yyy.pdf"], ["zzz", "file:///tmp/zzz.pdf"] ], "output_dir": "file:///tmp", } """ (output_dir, quality, page_format) = _get_export_params(args) dsearch = get_docsearch() try: os.mkdir(output_dir) except FileExistsError: # NOQA (Python 3.x only) pass out = [] docs = [d for d in dsearch.docs] docs.sort(key=lambda doc: doc.docid) output_dir = FS.safe(output_dir) for (doc_idx, doc) in enumerate(docs): output_pdf = FS.join(output_dir, doc.docid + ".pdf") exporter = doc.build_exporter(file_format="pdf") if exporter.can_change_quality: exporter.set_quality(quality) if exporter.can_select_format: exporter.set_page_format(page_format) verbose( "[{}/{}] Exporting {} --> {} ...".format( doc_idx + 1, len(docs), doc.docid, output_pdf ) ) exporter.save(output_pdf) out.append((doc.docid, output_pdf)) doc = None gc.collect() verbose("Done") reply({ "docids": out, "output_dir": output_dir, })
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/shell.py#L210-L273
export to excel
python
def export(app, local): """Export the data.""" print_header() log("Preparing to export the data...") id = str(app) subdata_path = os.path.join("data", id, "data") # Create the data package os.makedirs(subdata_path) # Copy the experiment code into a code/ subdirectory try: shutil.copyfile( os.path.join("snapshots", id + "-code.zip"), os.path.join("data", id, id + "-code.zip") ) except: pass # Copy in the DATA readme. # open(os.path.join(id, "README.txt"), "a").close() # Save the experiment id. with open(os.path.join("data", id, "experiment_id.md"), "a+") as file: file.write(id) if not local: # Export the logs subprocess.call( "heroku logs " + "-n 10000 > " + os.path.join("data", id, "server_logs.md") + " --app " + id, shell=True) dump_path = dump_database(id) subprocess.call( "pg_restore --verbose --clean -d wallace " + os.path.join("data", id) + "/data.dump", shell=True) all_tables = [ "node", "network", "vector", "info", "transformation", "transmission", "participant", "notification", "question" ] for table in all_tables: subprocess.call( "psql -d wallace --command=\"\\copy " + table + " to \'" + os.path.join(subdata_path, table) + ".csv\' csv header\"", shell=True) if not local: os.remove(dump_path) log("Zipping up the package...") shutil.make_archive( os.path.join("data", id + "-data"), "zip", os.path.join("data", id) ) shutil.rmtree(os.path.join("data", id)) log("Done. Data available in " + str(id) + ".zip")
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/command_line.py#L724-L799
export to excel
python
def export(self, file_obj=None, file_type=None, **kwargs): """ Export the path to a file object or return data. Parameters --------------- file_obj : None, str, or file object File object or string to export to file_type : None or str Type of file: dxf, dict, svg Returns --------------- exported : bytes or str Exported as specified type """ return export_path(self, file_type=file_type, file_obj=file_obj, **kwargs)
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/path.py#L665-L687
export to excel
python
def export_file(self, filepath, __filter, data, preview_data=None): """Export data for other applications Parameters ---------- filepath: String \tPath of export file __filter: String \tImport filter data: Object \tCode array result object slice, i. e. one object or iterable of \tsuch objects """ if __filter.startswith("cell_"): self._export_figure(filepath, data, __filter[5:]) elif __filter == "csv": self._export_csv(filepath, data, preview_data=preview_data) elif __filter in ["pdf", "svg"]: self.export_cairo(filepath, __filter)
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_main_window_actions.py#L219-L241
export to excel
python
def export_env_string(name, value): # type: (AEnvName, AEnvValue) -> ADefine """Exports an environment variable with the given value""" os.environ[name] = value return Define(name, value)
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/modules/builtin/defines.py#L97-L101
export to excel
python
def to_excel(self, filepath: str, title: str): """Write the main dataframe to an Excell file :param filepath: path of the Excel file to write :type filepath: str :param title: Title of the stylesheet :type title: str :example: ``ds.to_excel_("./myfile.xlsx", "My data")`` """ try: self.start("Saving data to Excell file: "+filepath + " ...") writer = pytablewriter.ExcelXlsxTableWriter() writer.from_dataframe(self.df) writer.open(filepath) writer.make_worksheet(title) writer.write_table() writer.close() self.end("File exported to", filepath) except Exception as e: self.err(e, "Can not convert data to Excel")
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/export.py#L158-L178
export to excel
python
def submit_export(cls, file, volume, location, properties=None, overwrite=False, copy_only=False, api=None): """ Submit new export job. :param file: File to be exported. :param volume: Volume identifier. :param location: Volume location. :param properties: Properties dictionary. :param overwrite: If true it will overwrite file if exists :param copy_only: If true files are kept on SevenBridges bucket. :param api: Api Instance. :return: Export object. """ data = {} params = {} volume = Transform.to_volume(volume) file = Transform.to_file(file) destination = { 'volume': volume, 'location': location } source = { 'file': file } if properties: data['properties'] = properties data['source'] = source data['destination'] = destination data['overwrite'] = overwrite extra = { 'resource': cls.__name__, 'query': data } logger.info('Submitting export', extra=extra) api = api if api else cls._API if copy_only: params['copy_only'] = True _export = api.post( cls._URL['query'], data=data, params=params).json() else: _export = api.post( cls._URL['query'], data=data).json() return Export(api=api, **_export)
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/models/storage_export.py#L74-L122
export to excel
python
def cmd_export_doc(*args): """ Arguments: <document id> <output PDF file path> [-- [--quality <0-100>] [--page_format <page_format>]] Export one document as a PDF file. Default quality is 50. Default page format is A4. Possible JSON replies: -- { "status": "error", "exception": "yyy", "reason": "xxxx", "args": "(xxxx, )" } -- { "status": "ok", "docid": "xxxx", "output_file": "file:///tmp/xxxx.pdf", "quality": 50, "page_format": "A4", } """ (docid, output_pdf, quality, page_format) = _get_export_params(args) dsearch = get_docsearch() doc = dsearch.get(docid) exporter = doc.build_exporter(file_format="pdf") if exporter.can_change_quality: exporter.set_quality(quality) if exporter.can_select_format: exporter.set_page_format(page_format) verbose("Exporting {} --> {} ...".format(docid, output_pdf)) output_pdf = FS.safe(output_pdf) exporter.save(output_pdf) verbose("Done") r = { "docid": doc.docid, "output_file": output_pdf, } if exporter.can_change_quality: r['quality'] = quality if exporter.can_select_format: r['page_format'] = page_format reply(r)
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/shell.py#L276-L324
export to excel
python
def export(self, export_type, what): """ :param export_type: 'csv', 'json', ... :param what: string describing what to export :returns: list of exported file names """ aids, arefs, spec, key = self.parse(what) if key.startswith('rlz'): curves = self.export_curves_rlzs(aids, key) else: # statistical exports curves = self.export_curves_stats(aids, key) return getattr(self, 'export_' + export_type)(spec, arefs, curves)
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/export/loss_curves.py#L134-L145
export to excel
python
def save_excel(self, fd): """ Saves the case as an Excel spreadsheet. """ from pylon.io.excel import ExcelWriter ExcelWriter(self).write(fd)
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/case.py#L981-L985
export to excel
python
def exports(self): """ Return the information exported by this distribution. :return: A dictionary of exports, mapping an export category to a dict of :class:`ExportEntry` instances describing the individual export entries, and keyed by name. """ result = {} r = self.get_distinfo_resource(EXPORTS_FILENAME) if r: result = self.read_exports() return result
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/database.py#L604-L615
export to excel
python
def export_as_file(self, file_path, cv_source): """Export the ensemble as a single Python file and saves it to `file_path`. This is EXPERIMENTAL as putting different modules together would probably wreak havoc especially on modules that make heavy use of global variables. Args: file_path (str, unicode): Absolute/local path of place to save file in cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features. """ if os.path.exists(file_path): raise exceptions.UserError('{} already exists'.format(file_path)) with open(file_path, 'wb') as f: f.write(self.export_as_code(cv_source).encode('utf8'))
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L488-L504
export to excel
python
def output(self): """ Create full path for excel file to save parsed translations strings. Returns: unicode: full path for excel file to save parsed translations strings. """ path, src = os.path.split(self.src) src, ext = os.path.splitext(src) return os.path.join(path, "{src}.xls".format(**{"src": src, }))
https://github.com/vint21h/django-po2xls/blob/e76d26cfae6d9e5ca95ff053d05a00f875875019/po2xls/utils.py#L66-L76
export to excel
python
def export_output(dskey, calc_id, datadir, target_dir, export_types): """ Simple UI wrapper around :func:`openquake.engine.export.core.export_from_db` yielding a summary of files exported, if any. """ outkey = get_outkey(dskey, export_types.split(',')) if export_types and not outkey: yield 'There is no exporter for %s, %s' % (dskey, export_types) return yield from export_from_db(outkey, calc_id, datadir, target_dir)
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/engine/export/core.py#L115-L125
export to excel
python
def to_excel(self, workbook=None, worksheet=None, xl_app=None, clear=True, rename=True, resize_columns=True): """ Writes worksheet to an Excel Worksheet COM object. Requires :py:module:`pywin32` to be installed. :param workbook: xltable.Workbook this sheet belongs to. :param worksheet: Excel COM Worksheet instance to write to. :param xl_app: Excel COM Excel Application to write to. :param bool clear: If a worksheet is provided, clear worksheet before writing. :param bool rename: If a worksheet is provided, rename self to match the worksheet. :param bool resize_columns: Resize sheet columns after writing. """ from win32com.client import Dispatch, constants, gencache if xl_app is None: if worksheet is not None: xl_app = worksheet.Parent.Application elif workbook is not None and hasattr(workbook.workbook_obj, "Application"): xl_app = workbook.workbook_obj.Application else: xl_app = Dispatch("Excel.Application") xl = xl_app = gencache.EnsureDispatch(xl_app) # Create a workbook if there isn't one already if not workbook: from .workbook import Workbook workbook = Workbook(worksheets=[self]) if worksheet is None: # If there's no worksheet then call Workbook.to_excel which will create one return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns) if rename: self.__name = worksheet.Name # set manual calculation and turn off screen updating while we update the cells calculation = xl.Calculation screen_updating = xl.ScreenUpdating xl.Calculation = constants.xlCalculationManual xl.ScreenUpdating = False try: # clear the worksheet and reset the styles if clear: worksheet.Cells.ClearContents() worksheet.Cells.Font.Bold = False worksheet.Cells.Font.Size = 11 worksheet.Cells.Font.Color = 0x000000 worksheet.Cells.Interior.ColorIndex = 0 worksheet.Cells.NumberFormat = "General" # get any array formula tables array_formula_tables = [] for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): array_formula_tables.append((row, col, row + table.height, col + table.width)) def _is_in_array_formula_table(row, col): """returns True if this formula cell is part of an array formula table""" for top, left, bottom, right in array_formula_tables: if bottom >= row >= top and left <= col <= right: return True return False origin = worksheet.Range("A1") xl_cell = origin for r, row in enumerate(self.iterrows(workbook)): row = _to_pywintypes(row) # set the value and formulae to the excel range (it's much quicker to # write a row at a time and update the formula than it is it do it # cell by cell) if clear: xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row))) xl_row.Value = row else: for c, value in enumerate(row): if value is not None: xl_cell.Offset(1, 1 + c).Value = value for c, value in enumerate(row): if isinstance(value, str): if value.startswith("="): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).Formula = value elif value.startswith("{=") \ and not _is_in_array_formula_table(r, c): formula_value = self.__formula_values.get((r, c), 0) xl_cell.Offset(1, 1 + c).Value = formula_value xl_cell.Offset(1, 1 + c).FormulaArray = value # move to the next row xl_cell = xl_cell.Offset(2, 1) # set any array formulas for table, (row, col) in self.__tables.values(): if isinstance(table, ArrayFormula): data = table.get_data(workbook, row, col) height, width = data.shape upper_left = origin.Offset(row+1, col+1) lower_right = origin.Offset(row + height, col + width) xl_range = worksheet.Range(upper_left, lower_right) xl_range.FormulaArray = table.formula.get_formula(workbook, row, col) # set any formatting for (row, col), style in self._get_all_styles().items(): r = origin.Offset(1 + row, 1 + col) if style.bold: r.Font.Bold = True if style.excel_number_format is not None: r.NumberFormat = style.excel_number_format if style.size is not None: r.Font.Size = style.size if style.text_color is not None: r.Font.Color = _to_bgr(style.text_color) if style.bg_color is not None: r.Interior.Color = _to_bgr(style.bg_color) if style.text_wrap or style.border: raise Exception("text wrap and border not implemented") # add any charts for chart, (row, col) in self.__charts: top_left = origin.Offset(1 + row, 1 + col) xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype) if chart.title: xl_chart.ChartTitle = chart.title for series in chart.iter_series(self, row, col): xl_series = xl_chart.SeriesCollection().NewSeries() xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("=")) if "categories" in series: xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("=")) if "name" in series: xl_series.Name = series["name"] finally: xl.ScreenUpdating = screen_updating xl.Calculation = calculation if resize_columns: try: worksheet.Cells.EntireColumn.AutoFit() except: pass
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L280-L431
export to excel
python
def write_export_node_to_file(file_object, export_elements): """ Exporting process to CSV file :param file_object: object of File class, :param export_elements: a dictionary object. The key is a node ID, value is a dictionary of parameters that will be used in exported CSV document. """ for export_element in export_elements: # Order,Activity,Condition,Who,Subprocess,Terminated file_object.write( export_element["Order"] + "," + export_element["Activity"] + "," + export_element["Condition"] + "," + export_element["Who"] + "," + export_element["Subprocess"] + "," + export_element["Terminated"] + "\n")
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_process_csv_export.py#L287-L299
export to excel
python
def get_export(request): """Retrieve an export file.""" settings = get_current_registry().settings exports_dirs = settings['exports-directories'].split() args = request.matchdict ident_hash, type = args['ident_hash'], args['type'] id, version = split_ident_hash(ident_hash) with db_connect() as db_connection: with db_connection.cursor() as cursor: try: results = get_export_files(cursor, id, version, [type], exports_dirs, read_file=True) if not results: raise httpexceptions.HTTPNotFound() filename, mimetype, size, modtime, state, file_content \ = results[0] except ExportError as e: logger.debug(str(e)) raise httpexceptions.HTTPNotFound() if state == 'missing': raise httpexceptions.HTTPNotFound() encoded_filename = urllib.quote(filename.encode('utf-8')) resp = request.response resp.status = "200 OK" resp.content_type = mimetype # Need both filename and filename* below for various browsers # See: https://fastmail.blog/2011/06/24/download-non-english-filenames/ resp.content_disposition = "attachment; filename={fname};" \ " filename*=UTF-8''{fname}".format( fname=encoded_filename) resp.body = file_content # Remove version and extension from filename, to recover title slug slug_title = '-'.join(encoded_filename.split('-')[:-1]) resp.headerlist.append( ('Link', '<https://{}/contents/{}/{}> ;rel="Canonical"'.format( request.host, id, slug_title))) return resp
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/exports.py#L50-L89
export to excel
python
def get_export( self, export_type, generate=False, wait=False, wait_timeout=None, ): """ Downloads a data export over HTTP. Returns a `Requests Response <http://docs.python-requests.org/en/master/api/#requests.Response>`_ object containing the content of the export. - **export_type** is a string specifying which type of export should be downloaded. - **generate** is a boolean specifying whether to generate a new export and wait for it to be ready, or to just download the latest export. - **wait** is a boolean specifying whether to wait for an in-progress export to finish, if there is one. Has no effect if ``generate`` is ``True``. - **wait_timeout** is the number of seconds to wait if ``wait`` is ``True``. Has no effect if ``wait`` is ``False`` or if ``generate`` is ``True``. The returned :py:class:`.Response` object has two additional attributes as a convenience for working with the CSV content; **csv_reader** and **csv_dictreader**, which are wrappers for :py:meth:`.csv.reader` and :py:class:`csv.DictReader` respectively. These wrappers take care of correctly decoding the export content for the CSV parser. Example:: classification_export = Project(1234).get_export('classifications') for row in classification_export.csv_reader(): print(row) classification_export = Project(1234).get_export('classifications') for row in classification_export.csv_dictreader(): print(row) """ if generate: self.generate_export(export_type) if generate or wait: export = self.wait_export(export_type, wait_timeout) else: export = self.describe_export(export_type) if export_type in TALK_EXPORT_TYPES: media_url = export['data_requests'][0]['url'] else: media_url = export['media'][0]['src'] response = requests.get(media_url, stream=True) response.csv_reader = functools.partial( csv.reader, response.iter_lines(decode_unicode=True), ) response.csv_dictreader = functools.partial( csv.DictReader, response.iter_lines(decode_unicode=True), ) return response
https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/exportable.py#L30-L92
export to excel
python
def exportEPublication(epub): """ Send `epub` :class:`.EPublication` object to Aleph, where it will be processed by librarians. Args: epub (EPublication): structure for export Warning: The export function is expecting some of the EPublication properties to be filled with non-blank data. Specifically: - :attr:`.EPublication.ISBN` - :attr:`.EPublication.nazev` - :attr:`.EPublication.mistoVydani` - :attr:`.EPublication.datumVydani` - :attr:`.EPublication.poradiVydani` - :attr:`.EPublication.zpracovatelZaznamu` - :attr:`.EPublication.vazba` - :attr:`.EPublication.format` - :attr:`.EPublication.format` - :attr:`.EPublication.nakladatelVydavatel` """ epub = _removeSpecialCharacters(epub) post_dict = PostData(epub).get_POST_data() return _sendPostDict(post_dict)
https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/export.py#L378-L405
export to excel
python
def export_olx(self, tarball, root_path): """if sequestered, only export the assets""" def append_asset_to_soup_and_export(asset_): if isinstance(asset_, Item): try: unique_url = asset_.export_olx(tarball, root_path) except AttributeError: pass else: unique_name = get_file_name_without_extension(unique_url) asset_type = asset_.genus_type.identifier asset_tag = my_soup.new_tag(asset_type) asset_tag['url_name'] = unique_name getattr(my_soup, my_tag).append(asset_tag) else: try: unique_urls = asset_.export_olx(tarball, root_path) except AttributeError: pass else: for index, ac in enumerate(asset_.get_asset_contents()): asset_type = ac.genus_type.identifier unique_url = unique_urls[index] unique_name = get_file_name_without_extension(unique_url) asset_tag = my_soup.new_tag(asset_type) asset_tag['url_name'] = unique_name getattr(my_soup, my_tag).append(asset_tag) def get_file_name_without_extension(filepath): return filepath.split('/')[-1].replace('.xml', '') my_path = None if self.my_osid_object.is_sequestered(): # just export assets for asset in self.assets: try: asset.export_olx(tarball, root_path) except AttributeError: pass else: # also add to the /<tag>/ folder my_tag = self.my_osid_object.genus_type.identifier expected_name = self.get_unique_name(tarball, self.url, my_tag, root_path) my_path = '{0}{1}/{2}.xml'.format(root_path, my_tag, expected_name) my_soup = BeautifulSoup('<' + my_tag + '/>', 'xml') getattr(my_soup, my_tag)['display_name'] = self.my_osid_object.display_name.text if my_tag == 'split_test': getattr(my_soup, my_tag)['group_id_to_child'] = self.my_osid_object.group_id_to_child getattr(my_soup, my_tag)['user_partition_id'] = self.my_osid_object.user_partition_id.text rm = self.my_osid_object._get_provider_manager('REPOSITORY') if self.my_osid_object._proxy is None: cls = rm.get_composition_lookup_session() else: cls = rm.get_composition_lookup_session(proxy=self.my_osid_object._proxy) cls.use_federated_repository_view() cls.use_unsequestered_composition_view() for child_id in self.my_osid_object.get_child_ids(): child = cls.get_composition(child_id) if child.is_sequestered(): # append its assets here for asset in child.assets: append_asset_to_soup_and_export(asset) else: child_type = child.genus_type.identifier child_tag = my_soup.new_tag(child_type) child_path = child.export_olx(tarball, root_path) if child_path is not None: child_tag['url_name'] = get_file_name_without_extension(child_path) getattr(my_soup, my_tag).append(child_tag) for asset in self.assets: append_asset_to_soup_and_export(asset) self.write_to_tarfile(tarball, my_path, my_soup) return my_path
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/repository/edx/compositions.py#L522-L604
export to excel
python
def usufyToXlsExport(d, fPath): """ Workaround to export to a .xls file. Args: ----- d: Data to export. fPath: File path for the output file. """ from pyexcel_xls import get_data try: #oldData = get_data(fPath) # A change in the API now returns only an array of arrays if there is only one sheet. oldData = {"OSRFramework": get_data(fPath) } except: # No information has been recovered oldData = {"OSRFramework":[]} # Generating the new tabular data tabularData = _generateTabularData(d, oldData) from pyexcel_xls import save_data # Storing the file save_data(fPath, tabularData)
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/general.py#L397-L419
export to excel
python
def __build_export(self, stats): """Build the export lists.""" export_names = [] export_values = [] if isinstance(stats, dict): # Stats is a dict # Is there a key ? if 'key' in iterkeys(stats) and stats['key'] in iterkeys(stats): pre_key = '{}.'.format(stats[stats['key']]) else: pre_key = '' # Walk through the dict for key, value in iteritems(stats): if isinstance(value, bool): value = json.dumps(value) if isinstance(value, list): try: value = value[0] except IndexError: value = '' if isinstance(value, dict): item_names, item_values = self.__build_export(value) item_names = [pre_key + key.lower() + str(i) for i in item_names] export_names += item_names export_values += item_values else: export_names.append(pre_key + key.lower()) export_values.append(value) elif isinstance(stats, list): # Stats is a list (of dict) # Recursive loop through the list for item in stats: item_names, item_values = self.__build_export(item) export_names += item_names export_values += item_values return export_names, export_values
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/exports/glances_export.py#L187-L223
export to excel
python
def initial_export(agent_id, force): """Performs the initial data export for an agent""" agent = LiveSyncAgent.find_first(id=agent_id) if agent is None: print 'No such agent' return if agent.backend is None: print cformat('Cannot run agent %{red!}{}%{reset} (backend not found)').format(agent.name) return print cformat('Selected agent: %{white!}{}%{reset} ({})').format(agent.name, agent.backend.title) if agent.initial_data_exported and not force: print 'The initial export has already been performed for this agent.' print cformat('To re-run it, use %{yellow!}--force%{reset}') return agent.create_backend().run_initial_export(Event.find(is_deleted=False)) agent.initial_data_exported = True db.session.commit()
https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/livesync/indico_livesync/cli.py#L72-L89
export to excel
python
def main(argv=None): """ben-elastic entry point""" arguments = cli_common(__doc__, argv=argv) es_export = ESExporter(arguments['CAMPAIGN-DIR'], arguments['--es']) es_export.export() if argv is not None: return es_export
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/benelastic.py#L20-L26
export to excel
python
def to_file(self, path): """Export the contents to a file as comma separated values. Parameters ---------- path : string File path where the data should be saved to Example ------- Export the last ten elements of AME2012 to a new file: >>> Table('AME2012').tail(10).to_file('last_ten.txt') """ with open(path, 'w') as f: f.write('Z N M\n') self.df.to_csv(path, sep='\t', mode='a')
https://github.com/elyase/masstable/blob/3eb72b22cd3337bc5c6bb95bb7bb73fdbe6ae9e2/masstable/masstable.py#L96-L112
export to excel
python
def usufyToXlsxExport(d, fPath): """ Workaround to export to a .xlsx file. Args: ----- d: Data to export. fPath: File path for the output file. """ from pyexcel_xlsx import get_data try: #oldData = get_data(fPath) # A change in the API now returns only an array of arrays if there is only one sheet. oldData = {"OSRFramework": get_data(fPath) } except: # No information has been recovered oldData = {"OSRFramework":[]} # Generating the new tabular data tabularData = _generateTabularData(d, oldData) from pyexcel_xlsx import save_data # Storing the file save_data(fPath, tabularData)
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/general.py#L422-L445
export to excel
python
def exportData(self, datfile): """ Create a .dat file with the data that has been loaded. Args: datfile: Path to the file (Relative to the current working directory or absolute). """ def ampl_set(name, values): def format_entry(e): return repr(e).replace(' ', '') return 'set {0} := {1};'.format( name, ','.join(format_entry(e) for e in values) ) def ampl_param(name, values): def format_entry(k, v): k = repr(k).strip('()').replace(' ', '') if v == inf: v = "Infinity" elif v == -inf: v = "-Infinity" else: v = repr(v).strip('()').replace(' ', '') return '[{0}]{1}'.format(k, v) return 'param {0} := {1};'.format( name, ''.join(format_entry(k, v) for k, v in values.items()) ) with open(datfile, 'w') as f: for name, entity in self.getSets(): values = entity.getValues().toList() print(ampl_set(name, values), file=f) for name, entity in self.getParameters(): if entity.isScalar(): print( 'param {} := {};'.format(name, entity.value()), file=f ) else: values = entity.getValues().toDict() print(ampl_param(name, values), file=f)
https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/ampl.py#L959-L1003
export to excel
python
def export(self, output=Mimetypes.PLAINTEXT, exclude=None, **kwargs): """ Export the collection item in the Mimetype required. ..note:: If current implementation does not have special mimetypes, reuses default_export method :param output: Mimetype to export to (Uses Mimetypes) :type output: str :param exclude: Informations to exclude. Specific to implementations :type exclude: [str] :return: Object using a different representation """ return self.getTextualNode().export(output, exclude)
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/texts/remote/cts.py#L368-L379
export to excel
python
def ExportTextFile(self, fname, title='unknown plot'): "save plot data to external file" buff = ["# Plot Data for %s" % title, "#---------------------------------"] out = [] labels = [] itrace = 0 for ax in self.panel.fig.get_axes(): for line in ax.lines: itrace += 1 x = line.get_xdata() y = line.get_ydata() ylab = line.get_label() if len(ylab) < 1: ylab = 'Y%i' % itrace for c in ' .:";|/\\(){}[]\'&^%*$+=-?!@#': ylab = ylab.replace(c, '_') xlab = (' X%d' % itrace + ' '*3)[:4] ylab = ' '*(18-len(ylab)) + ylab + ' ' out.extend([x, y]) labels.extend([xlab, ylab]) if itrace == 0: return buff.append('# %s' % (' '.join(labels))) npts = [len(a) for a in out] for i in range(max(npts)): oline = [] for a in out: d = np.nan if i < len(a): d = a[i] oline.append(gformat(d, 12)) buff.append(' '.join(oline)) buff.append('') with open(fname, 'w') as fout: fout.write("\n".join(buff)) fout.close() self.write_message("Exported data to '%s'" % fname, panel=0)
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/plotframe.py#L64-L108
export to excel
python
def _export_service(self, svc, ed): # type: (Any, EndpointDescription) -> None """ Registers a service export :param svc: Service instance :param ed: Endpoint description """ self._add_export(ed.get_id(), (svc, ed))
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/rsa/providers/distribution/__init__.py#L586-L594
export to excel
python
def export_csv(ekey, dstore): """ Default csv exporter for arrays stored in the output.hdf5 file :param ekey: export key :param dstore: datastore object :returns: a list with the path of the exported file """ name = ekey[0] + '.csv' try: array = dstore[ekey[0]].value except AttributeError: # this happens if the key correspond to a HDF5 group return [] # write a custom exporter in this case if len(array.shape) == 1: # vector array = array.reshape((len(array), 1)) return [write_csv(dstore.export_path(name), array)]
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/export/__init__.py#L28-L44
export to excel
python
def export(app, local, no_scrub): """Export the data.""" log(header, chevrons=False) data.export(str(app), local=local, scrub_pii=(not no_scrub))
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/command_line.py#L706-L709
export to excel
python
def export(self): """ export dictionary with values """ data = {} for key in self._specification: data[key] = self[key] return data
https://github.com/gijzelaerr/python-snap7/blob/a6db134c7a3a2ef187b9eca04669221d6fc634c3/snap7/util.py#L363-L370
export to excel
python
def to_export(export): """Serializes export to id string :param export: object to serialize :return: string id """ from sevenbridges.models.storage_export import Export if not export: raise SbgError('Export is required!') elif isinstance(export, Export): return export.id elif isinstance(export, six.string_types): return export else: raise SbgError('Invalid export parameter!')
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/meta/transformer.py#L211-L224
export to excel
python
def export_to_file(self, filepath, level_prefix = ' '): """ Exports this model to a file. @param filepath: File to be exported to. @type filepath: str """ xmldom = self.export_to_dom() xmlstr = xmldom.toprettyxml(level_prefix, '\n',) f = open(filepath, 'w') f.write(xmlstr) f.close()
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/model.py#L313-L326
export to excel
python
def mark_entities_to_export(self, export_config): """ Apply the specified :class:`meteorpi_model.ExportConfiguration` to the database, running its contained query and creating rows in t_observationExport or t_fileExport for matching entities. :param ExportConfiguration export_config: An instance of :class:`meteorpi_model.ExportConfiguration` to apply. :returns: The integer number of rows added to the export tables """ # Retrieve the internal ID of the export configuration, failing if it hasn't been stored self.con.execute('SELECT uid FROM archive_exportConfig WHERE exportConfigID = %s;', (export_config.config_id,)) export_config_id = self.con.fetchall() if len(export_config_id) < 1: raise ValueError("Attempt to run export on ExportConfiguration not in database") export_config_id = export_config_id[0]['uid'] # If the export is inactive then do nothing if not export_config.enabled: return 0 # Track the number of rows created, return it later rows_created = 0 # Handle ObservationSearch if isinstance(export_config.search, mp.ObservationSearch): # Create a deep copy of the search and set the properties required when creating exports search = mp.ObservationSearch.from_dict(export_config.search.as_dict()) search.exclude_export_to = export_config.config_id b = search_observations_sql_builder(search) self.con.execute(b.get_select_sql(columns='o.uid, o.obsTime'), b.sql_args) for result in self.con.fetchall(): self.con.execute('INSERT INTO archive_observationExport ' '(observationId, obsTime, exportConfig, exportState) ' 'VALUES (%s,%s,%s,%s)', (result['uid'], result['obsTime'], export_config_id, 1)) rows_created += 1 # Handle FileSearch elif isinstance(export_config.search, mp.FileRecordSearch): # Create a deep copy of the search and set the properties required when creating exports search = mp.FileRecordSearch.from_dict(export_config.search.as_dict()) search.exclude_export_to = export_config.config_id b = search_files_sql_builder(search) self.con.execute(b.get_select_sql(columns='f.uid, f.fileTime'), b.sql_args) for result in self.con.fetchall(): self.con.execute('INSERT INTO archive_fileExport ' '(fileId, fileTime, exportConfig, exportState) ' 'VALUES (%s,%s,%s,%s)', (result['uid'], result['fileTime'], export_config_id, 1)) rows_created += 1 # Handle ObservatoryMetadataSearch elif isinstance(export_config.search, mp.ObservatoryMetadataSearch): # Create a deep copy of the search and set the properties required when creating exports search = mp.ObservatoryMetadataSearch.from_dict(export_config.search.as_dict()) search.exclude_export_to = export_config.config_id b = search_metadata_sql_builder(search) self.con.execute(b.get_select_sql(columns='m.uid, m.setAtTime'), b.sql_args) for result in self.con.fetchall(): self.con.execute('INSERT INTO archive_metadataExport ' '(metadataId, setAtTime, exportConfig, exportState) ' 'VALUES (%s,%s,%s,%s)', (result['uid'], result['setAtTime'], export_config_id, 1)) rows_created += 1 # Complain if it's anything other than these two (nothing should be at the moment but we might introduce # more search types in the future else: raise ValueError("Unknown search type %s" % str(type(export_config.search))) return rows_created
https://github.com/camsci/meteor-pi/blob/7b01527650bd1b2b76d6f364e8122e25b8812c8d/src/pythonModules/meteorpi_db/meteorpi_db/__init__.py#L1102-L1173
export to excel
python
def exportTreeItem(self, sheet, cols, item): """ Exports the inputed item to the given Excel worksheet for the given visible columns. :param sheet | <xlwt.WorkSheet> cols | [<int>, ..] item | <QTreeWidgetItem> """ # export item information for c, col in enumerate(cols): data = unwrapVariant(item.data(Qt.EditRole, col)) if data: sheet.write(self._currrow, c, nativestring(data)) else: sheet.write(self._currrow, c, nativestring(item.text(col))) self._currrow += 1 # export children as rows for c in range(item.childCount()): self.exportTreeItem(sheet, cols, item.child(c))
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/exporters/xexcelexporter.py#L76-L97
export to excel
python
def generate_exports(): """Print export commands for setting environment variables. """ env = [] for name in list_installed_genomes(): try: g = Genome(name) env_name = re.sub(r'[^\w]+', "_", name).upper() env.append("export {}={}".format(env_name, g.filename)) except: pass return env
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L238-L249
export to excel
python
def export(self, output: str=None, exclude: List[str]=None, **kwargs): """ Export the collection item in the Mimetype required. ..note:: If current implementation does not have special mimetypes, reuses default_export method :param output: Mimetype to export to (Uses MyCapytain.common.constants.Mimetypes) :type output: str :param exclude: Information to exclude. Specific to implementations :type exclude: [str] :return: Object using a different representation """ return Exportable.export(self, output, exclude=exclude, **kwargs)
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/text.py#L158-L169
export to excel
python
def export_to_file(self, filename): """Export this instrument's settings to a file. :param filename: the name of the file """ instr_json = self.export_struct() with open(filename, 'w') as fp: json.dump(instr_json, fp, indent=2)
https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/instrument.py#L88-L96
export to excel
python
def export(self): """ See DiskExportManager.export """ with LogTask('Exporting disk {} to {}'.format(self.name, self.dst)): with utils.RollbackContext() as rollback: rollback.prependDefer( shutil.rmtree, self.dst, ignore_errors=True ) self.copy() if not self.disk['format'] == 'iso': self.sparse() self.calc_sha('sha1') self.update_lago_metadata() self.write_lago_metadata() self.compress() rollback.clear()
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/export.py#L246-L262
export to excel
python
def export(self, output_folder): """Export matrices as ``*.npy`` files to an output folder.""" if not os.path.exists(output_folder): os.makedirs(output_folder) self._interact_with_folder(output_folder, 'w')
https://github.com/erget/StereoVision/blob/1adff45e291362f52188e0fd0211265845a4461a/stereovision/calibration.py#L124-L128
export to excel
python
def _write_exports(exports, edict): ''' Write an exports file to disk If multiple shares were initially configured per line, like: /media/storage /media/data *(ro,sync,no_subtree_check) ...then they will be saved to disk with only one share per line: /media/storage *(ro,sync,no_subtree_check) /media/data *(ro,sync,no_subtree_check) ''' with salt.utils.files.fopen(exports, 'w') as efh: for export in edict: line = salt.utils.stringutils.to_str(export) for perms in edict[export]: hosts = perms['hosts'] options = ','.join(perms['options']) line += ' {0}({1})'.format(hosts, options) efh.write('{0}\n'.format(line))
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nfs3.py#L110-L130
export to excel
python
def panel_export(panel_id): """Export panel to PDF file""" panel_obj = store.panel(panel_id) data = controllers.panel_export(store, panel_obj) data['report_created_at'] = datetime.datetime.now().strftime("%Y-%m-%d") html_report = render_template('panels/panel_pdf_simple.html', **data) return render_pdf(HTML(string=html_report), download_filename=data['panel']['panel_name']+'_'+str(data['panel']['version'])+'_'+datetime.datetime.now().strftime("%Y-%m-%d")+'_scout.pdf')
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/panels/views.py#L136-L142
export to excel
python
def export_file(file_path): """Prepend the given parameter with ``export``""" if not os.path.isfile(file_path): return error("Referenced file does not exist: '{}'.".format(file_path)) return "export {}".format(file_path)
https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L305-L311
export to excel
python
def export(self, name, columns, points): """Export the stats to the Statsd server.""" for i in range(len(columns)): if not isinstance(points[i], Number): continue stat_name = '{}.{}.{}'.format(self.prefix, name, columns[i]) stat_value = points[i] tags = self.parse_tags(self.tags) try: self.client.send(stat_name, stat_value, **tags) except Exception as e: logger.error("Can not export stats %s to OpenTSDB (%s)" % (name, e)) logger.debug("Export {} stats to OpenTSDB".format(name))
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/exports/glances_opentsdb.py#L76-L88
export to excel
python
def export_csv(self, table, output=None, columns="*", **kwargs): """ Export a table to a CSV file. If an output path is provided, write to file. Else, return a string. Wrapper around pandas.sql.to_csv(). See: http://pandas.pydata.org/pandas-docs/stable/io.html#io-store-in-csv Arguments: table (str): Name of the table to export. output (str, optional): Path of the file to write. columns (str, optional): A comma separated list of columns to export. **kwargs: Additional args passed to pandas.sql.to_csv() Returns: str: CSV string, or None if writing to file. Raises: IOError: In case of error writing to file. SchemaError: If the named table is not found. """ import pandas.io.sql as panda # Determine if we're writing to a file or returning a string. isfile = output is not None output = output or StringIO() if table not in self.tables: raise SchemaError("Cannot find table '{table}'" .format(table=table)) # Don't print row indexes by default. if "index" not in kwargs: kwargs["index"] = False table = panda.read_sql("SELECT {columns} FROM {table}" .format(columns=columns, table=table), self.connection) table.to_csv(output, **kwargs) return None if isfile else output.getvalue()
https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/db.py#L442-L487
export to excel
python
def export(self): """ Make the actual request to the Import API (exporting is part of the Import API) to export a map visualization as a .carto file :return: A URL pointing to the .carto file :rtype: str :raise: CartoException .. warning:: Non-public API. It may change with no previous notice .. note:: The export is asynchronous, but this method waits for the export to complete. See `MAX_NUMBER_OF_RETRIES` and `INTERVAL_BETWEEN_RETRIES_S` """ export_job = ExportJob(self.client, self.get_id()) export_job.run() export_job.refresh() count = 0 while export_job.state in ("exporting", "enqueued", "pending"): if count >= MAX_NUMBER_OF_RETRIES: raise CartoException(_("Maximum number of retries exceeded \ when polling the import API for \ visualization export")) time.sleep(INTERVAL_BETWEEN_RETRIES_S) export_job.refresh() count += 1 if export_job.state == "failure": raise CartoException(_("Visualization export failed")) if (export_job.state != "complete" and export_job.state != "created"): raise CartoException(_("Unexpected problem on visualization export \ (state: {state})"). format(state=export_job.state)) return export_job.url
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/visualizations.py#L80-L117
export to excel
python
def exportUsufy(data, ext, fileH): """ Method that exports the different structures onto different formats. Args: ----- data: Data to export. ext: One of the following: csv, excel, json, ods. fileH: Fileheader for the output files. Returns: -------- Performs the export as requested by parameter. """ if ext == "csv": usufyToCsvExport(data, fileH+"."+ext) elif ext == "gml": usufyToGmlExport(data, fileH+"."+ext) elif ext == "json": usufyToJsonExport(data, fileH+"."+ext) elif ext == "ods": usufyToOdsExport(data, fileH+"."+ext) elif ext == "png": usufyToPngExport(data, fileH+"."+ext) elif ext == "txt": usufyToTextExport(data, fileH+"."+ext) elif ext == "xls": usufyToXlsExport(data, fileH+"."+ext) elif ext == "xlsx": usufyToXlsxExport(data, fileH+"."+ext)
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/general.py#L40-L69
export to excel
python
def exportItem(self, title, itemId, exportFormat, tags="export", snippet=None, exportParameters=None, wait=True): """ Exports a service item (POST only) to the specified output format. Available only to users with an organizational subscription. Invokable only by the service item owner or an administrator. Inputs: title - name of export item itemId - id of the item to export exportFormat - out format. Values: Shapefile, CSV or File Geodatabase, feature collection, GeoJson, or Scene Package tags - comma seperated list of quick descriptors, the default is export. snippet - short explination of the exported item exportParameters - A JSON object describing the layers to be exported and the export parameters for each layer. wait - optional - default false. Export item is an a synchronus operation. If true is specified, the process will wait till the operation is completed. Output: If wait is True: UserItem class else: job id for the export item. """ url = "%s/export" % self.location params = { "f" : "json", "title" : title, "tags" : tags, "itemId" : itemId, "exportFormat" : exportFormat, } if snippet is not None: params['snippet'] = snippet if exportParameters is not None: params["exportParameters"] = json.dumps(exportParameters) res = self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) itemURL = "%s/items/%s" % (self.location, res['exportItemId']) if self.currentFolder is not None or self.currentFolder['title'] != "root": self.moveItems(items=res['exportItemId'], folder=self.currentFolder['id']) ui = UserItem(url=itemURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) if wait == True: status = "partial" while status != "completed": status = ui.status(jobId=res['jobId'], jobType="export") if status['status'] == 'failed': raise Exception("Could not export item: %s" % itemId) elif status['status'].lower() == "completed": break time.sleep(2) else: return res['jobId'], ui return ui
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_content.py#L2444-L2513
export to excel
python
def export(self, path, session): """See `Module.export`.""" def variables_saver(variables_path): if self._saver: self._saver.save( session, variables_path, write_meta_graph=False, write_state=False) self._spec._export(path, variables_saver)
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/native_module.py#L569-L578
export to excel
python
def export(infile, outfile, format, outcsv, transition_quantification, max_transition_pep, ipf, ipf_max_peptidoform_pep, max_rs_peakgroup_qvalue, peptide, max_global_peptide_qvalue, protein, max_global_protein_qvalue): """ Export TSV/CSV tables """ if format == "score_plots": export_score_plots(infile) else: if outfile is None: if outcsv: outfile = infile.split(".osw")[0] + ".csv" else: outfile = infile.split(".osw")[0] + ".tsv" else: outfile = outfile export_tsv(infile, outfile, format, outcsv, transition_quantification, max_transition_pep, ipf, ipf_max_peptidoform_pep, max_rs_peakgroup_qvalue, peptide, max_global_peptide_qvalue, protein, max_global_protein_qvalue)
https://github.com/PyProphet/pyprophet/blob/f546ad171750cd7685afbde6785fe71f82cadb35/pyprophet/main.py#L277-L292
export to excel
python
def export(id, local=False, scrub_pii=False): """Export data from an experiment.""" print("Preparing to export the data...") if local: db_uri = db.db_url else: db_uri = HerokuApp(id).db_uri # Create the data package if it doesn't already exist. subdata_path = os.path.join("data", id, "data") try: os.makedirs(subdata_path) except OSError as e: if e.errno != errno.EEXIST or not os.path.isdir(subdata_path): raise # Copy in the data. copy_db_to_csv(db_uri, subdata_path, scrub_pii=scrub_pii) # Copy the experiment code into a code/ subdirectory. try: shutil.copyfile( os.path.join("snapshots", id + "-code.zip"), os.path.join("data", id, id + "-code.zip"), ) except Exception: pass # Copy in the DATA readme. # open(os.path.join(id, "README.txt"), "a").close() # Save the experiment id. with open(os.path.join("data", id, "experiment_id.md"), "a+") as file: file.write(id) # Zip data src = os.path.join("data", id) dst = os.path.join("data", id + "-data.zip") archive_data(id, src, dst) cwd = os.getcwd() data_filename = "{}-data.zip".format(id) path_to_data = os.path.join(cwd, "data", data_filename) # Backup data on S3 unless run locally if not local: bucket = user_s3_bucket() bucket.upload_file(path_to_data, data_filename) url = _generate_s3_url(bucket, data_filename) # Register experiment UUID with dallinger register(id, url) return path_to_data
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/data.py#L206-L263
export to excel
python
def get_export_table(self): """Get the export table.""" symbols = self.binary.DIRECTORY_ENTRY_EXPORT.symbols names = AttrsGetter(symbols, join=False).name return names
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/executables/pe.py#L54-L58
export to excel
python
def export_csv(self, path, idx=None, header=None, formatted=False, sort_idx=True, fmt='%.18e'): """ Export to a csv file Parameters ---------- path : str path of the csv file to save idx : None or array-like, optional the indices of the variables to export. Export all by default header : None or array-like, optional customized header if not `None`. Use the names from the lst file by default formatted : bool, optional Use LaTeX-formatted header. Does not apply when using customized header sort_idx : bool, optional Sort by idx or not, # TODO: implement sort fmt : str cell formatter """ if not idx: idx = self._idx if not header: header = self.get_header(idx, formatted=formatted) assert len(idx) == len(header), \ "Idx length does not match header length" body = self.get_values(idx) with open(path, 'w') as fd: fd.write(','.join(header) + '\n') np.savetxt(fd, body, fmt=fmt, delimiter=',')
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/plot.py#L141-L175
export to excel
python
def export_csv_file(self, directory, filename): """ Exports diagram inner graph to BPMN 2.0 XML file (with Diagram Interchange data). :param directory: strings representing output directory, :param filename: string representing output file name. """ bpmn_csv_export.BpmnDiagramGraphCsvExport.export_process_to_csv(self, directory, filename)
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_rep.py#L95-L102
export to excel
python
def export_envars(self, env): """Export the environment variables contained in the dict env.""" for k, v in env.items(): self.export_envar(k, v)
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/launcher.py#L93-L96
export to excel
python
def values_export(self, **params): """ Method for `Export Values from all Data Streams of a Device <https://m2x.att.com/developer/documentation/v2/device#Export-Values-from-all-Data-Streams-of-a-Device>`_ endpoint. :param params: Query parameters passed as keyword arguments. View M2X API Docs for listing of available parameters. :return: The API response, see M2X API docs for details :rtype: dict :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request """ self.api.get(self.subpath('/values/export.csv'), params=params) return self.api.last_response
https://github.com/attm2x/m2x-python/blob/df83f590114692b1f96577148b7ba260065905bb/m2x/v2/devices.py#L181-L192
export to excel
python
def list_to_file(orig_list, file_name, file_location): """ Function to export a list to a text file Args: orig_list: The list you want exported file_name: The name of the exported file file_location: The location of the file, derive from the os module Returns: returns the filename info """ file = __os.path.join(file_location, file_name) def add_line_break(list_line): """ Create a line break at the end of a string Args: list_line: string Returns: A string with a line break """ list_line = ('%s\n' % (list_line,)) return list_line write_file = open(file, "a") for orig_list_line in orig_list: write_file.write(add_line_break(str(orig_list_line))) write_file.close() return file_name
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L134-L162
export to excel
python
def csv(self, output): """Output data as excel-compatible CSV""" import csv csvwriter = csv.writer(self.outfile) csvwriter.writerows(output)
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/cli/__init__.py#L75-L79
export to excel
python
def usufyToCsvExport(d, fPath): """ Workaround to export to a CSV file. Args: ----- d: Data to export. fPath: File path for the output file. """ from pyexcel_io import get_data try: oldData = {"OSRFramework": get_data(fPath) } except: # No information has been recovered oldData = {"OSRFramework":[]} # Generating the new tabular data. tabularData = _generateTabularData(d, oldData) from pyexcel_io import save_data # Storing the file # NOTE: when working with CSV files it is no longer a dict because it is a one-sheet-format save_data(fPath, tabularData["OSRFramework"])
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/general.py#L345-L368
export to excel
python
def export(self, input_stats=None): """Export all the stats. Each export module is ran in a dedicated thread. """ # threads = [] input_stats = input_stats or {} for e in self._exports: logger.debug("Export stats using the %s module" % e) thread = threading.Thread(target=self._exports[e].update, args=(input_stats,)) # threads.append(thread) thread.start()
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/stats.py#L228-L241
export to excel
python
def export(self, exports=None): """ Export all the outputs in the datastore in the given export formats. Individual outputs are not exported if there are multiple realizations. """ self.exported = getattr(self.precalc, 'exported', {}) if isinstance(exports, tuple): fmts = exports elif exports: # is a string fmts = exports.split(',') elif isinstance(self.oqparam.exports, tuple): fmts = self.oqparam.exports else: # is a string fmts = self.oqparam.exports.split(',') keys = set(self.datastore) has_hcurves = ('hcurves-stats' in self.datastore or 'hcurves-rlzs' in self.datastore) if has_hcurves: keys.add('hcurves') for fmt in fmts: if not fmt: continue for key in sorted(keys): # top level keys if 'rlzs' in key and self.R > 1: continue # skip individual curves self._export((key, fmt)) if has_hcurves and self.oqparam.hazard_maps: self._export(('hmaps', fmt)) if has_hcurves and self.oqparam.uniform_hazard_spectra: self._export(('uhs', fmt))
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/base.py#L260-L289
export to excel
python
def export_process_to_csv(bpmn_diagram, directory, filename): """ Root method of CSV export functionality. :param bpmn_diagram: an instance of BpmnDiagramGraph class, :param directory: a string object, which is a path of output directory, :param filename: a string object, which is a name of output file. """ nodes = copy.deepcopy(bpmn_diagram.get_nodes()) start_nodes = [] export_elements = [] for node in nodes: incoming_list = node[1].get(consts.Consts.incoming_flow) if len(incoming_list) == 0: start_nodes.append(node) if len(start_nodes) != 1: raise bpmn_exception.BpmnPythonError("Exporting to CSV format accepts only one start event") nodes_classification = utils.BpmnImportUtils.generate_nodes_clasification(bpmn_diagram) start_node = start_nodes.pop() BpmnDiagramGraphCsvExport.export_node(bpmn_diagram, export_elements, start_node, nodes_classification) try: os.makedirs(directory) except OSError as exception: if exception.errno != errno.EEXIST: raise file_object = open(directory + filename, "w") file_object.write("Order,Activity,Condition,Who,Subprocess,Terminated\n") BpmnDiagramGraphCsvExport.write_export_node_to_file(file_object, export_elements) file_object.close()
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_process_csv_export.py#L44-L75
export to excel
python
def write_to_file(data, path): """Export extracted fields to xml Appends .xml to path if missing and generates xml file in specified directory, if not then in root Parameters ---------- data : dict Dictionary of extracted fields path : str directory to save generated xml file Notes ---- Do give file name to the function parameter path. Only `date`, `desc`, `amount` and `currency` are exported Examples -------- >>> from invoice2data.output import to_xml >>> to_xml.write_to_file(data, "/exported_xml/invoice.xml") >>> to_xml.write_to_file(data, "invoice.xml") """ if path.endswith('.xml'): filename = path else: filename = path + '.xml' tag_data = ET.Element('data') xml_file = open(filename, "w") i = 0 for line in data: i += 1 tag_item = ET.SubElement(tag_data, 'item') tag_date = ET.SubElement(tag_item, 'date') tag_desc = ET.SubElement(tag_item, 'desc') tag_currency = ET.SubElement(tag_item, 'currency') tag_amount = ET.SubElement(tag_item, 'amount') tag_item.set('id', str(i)) tag_date.text = line['date'].strftime('%d/%m/%Y') tag_desc.text = line['desc'] tag_currency.text = line['currency'] tag_amount.text = str(line['amount']) xml_file.write(prettify(tag_data)) xml_file.close()
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/output/to_xml.py#L12-L59
export to excel
python
def export_formats(self, pid_type): """List of export formats.""" if pid_type not in self._export_formats: fmts = self.app.config.get('RECORDS_UI_EXPORT_FORMATS', {}).get( pid_type, {}) self._export_formats[pid_type] = sorted( [(k, v) for k, v in fmts.items() if v], key=lambda x: x[1]['order'], ) return self._export_formats[pid_type]
https://github.com/inveniosoftware/invenio-records-ui/blob/ae92367978f2e1e96634685bd296f0fd92b4da54/invenio_records_ui/ext.py#L29-L38
export to excel
python
def export(self): """Export all attributes of the user to a dict. :return: attributes of the user. :rtype: dict. """ data = {} data["name"] = self.name data["contributions"] = self.contributions data["avatar"] = self.avatar data["followers"] = self.followers data["join"] = self.join data["organizations"] = self.organizations data["repositories"] = self.numberOfRepos data["bio"] = self.bio data["private"] = self.private data["public"] = self.public data["location"] = self.location return data
https://github.com/iblancasa/GitHubCity/blob/c5299c6859dbefbd869e2ac6ff2faff2a39cf32f/src/githubcity/ghuser.py#L79-L97
export to excel
python
def export_csv(self, spec, asset_refs, curves_dict): """ :param asset_ref: name of the asset :param curves_dict: a dictionary tag -> loss curves """ writer = writers.CsvWriter(fmt=writers.FIVEDIGITS) ebr = hasattr(self, 'builder') for key in sorted(curves_dict): recs = curves_dict[key] data = [['asset', 'loss_type', 'loss', 'period' if ebr else 'poe']] for li, loss_type in enumerate(self.loss_types): if ebr: # event_based_risk array = recs[:, :, li] # shape (A, P, LI) periods = self.builder.return_periods for aref, losses in zip(asset_refs, array): for period, loss in zip(periods, losses): data.append((aref, loss_type, loss, period)) else: # classical_risk array = recs[loss_type] # shape (A,) loss_curve_dt for aref, losses, poes in zip( asset_refs, array['losses'], array['poes']): for loss, poe in zip(losses, poes): data.append((aref, loss_type, loss, poe)) dest = self.dstore.build_fname( 'loss_curves', '%s-%s' % (spec, key) if spec else key, 'csv') writer.save(data, dest) return writer.getsaved()
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/export/loss_curves.py#L106-L132
export to excel
python
def _export_csv(self, filepath, data, preview_data): """CSV export of code_array results Parameters ---------- filepath: String \tPath of export file data: Object \tCode array result object slice, i. e. one object or iterable of \tsuch objects """ # Get csv info csv_info = \ self.main_window.interfaces.get_csv_export_info(preview_data) if csv_info is None: return try: dialect, digest_types, has_header = csv_info except TypeError: return # Export CSV file csv_interface = CsvInterface(self.main_window, filepath, dialect, digest_types, has_header) try: csv_interface.write(data) except IOError, err: msg = _("The file {filepath} could not be fully written\n \n" "Error message:\n{msg}") msg = msg.format(filepath=filepath, msg=err) short_msg = _('Error writing CSV file') self.main_window.interfaces.display_warning(msg, short_msg)
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_main_window_actions.py#L146-L185
export to excel
python
def export_results(job, fsid, file_name, univ_options, subfolder=None): """ Write out a file to a given location. The location can be either a directory on the local machine, or a folder with a bucket on AWS. :param str fsid: The file store id for the file to be exported :param str file_name: The name of the file that neeeds to be exported (path to file is also acceptable) :param dict univ_options: Dict of universal options used by almost all tools :param str subfolder: A sub folder within the main folder where this data should go :return: None """ job.fileStore.logToMaster('Exporting %s to output location' % fsid) file_name = os.path.basename(file_name) try: assert univ_options['output_folder'], 'Need a path to a folder to write out files' assert univ_options['storage_location'], 'Need to know where the files need to go. ' + \ 'Local or AWS/Azure, etc.' except AssertionError as err: # This isn't a game killer. Continue the pipeline without erroring out but do inform the # user about it. print('ERROR:', err.message, file=sys.stderr) return if univ_options['output_folder'] == 'NA': output_folder = '' else: output_folder = univ_options['output_folder'] output_folder = os.path.join(output_folder, univ_options['patient']) output_folder = os.path.join(output_folder, subfolder) if subfolder else output_folder if univ_options['storage_location'] == 'local': # Handle Local try: # Create the directory if required os.makedirs(output_folder, 0755) except OSError as err: if err.errno != errno.EEXIST: raise output_url = 'file://' + os.path.join(output_folder, file_name) elif univ_options['storage_location'].startswith('aws'): # Handle AWS bucket_name = univ_options['storage_location'].split(':')[-1] output_url = os.path.join('S3://', bucket_name, output_folder.strip('/'), file_name) # Can't do Azure or google yet. else: # TODO: Azure support print("Currently doesn't support anything but Local and aws.") return job.fileStore.exportFile(fsid, output_url)
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/common.py#L401-L448
export to excel
python
def export(self, xformat='csv'): """action: export annotations to CSV.""" if self.annot is None: # remove if buttons are disabled self.parent.statusBar().showMessage('No score file loaded') return if xformat == 'csv': filename = splitext(self.annot.xml_file)[0] + '.csv' filename, _ = QFileDialog.getSaveFileName(self, 'Export stages', filename, 'Sleep stages (*.csv)') if 'remlogic' in xformat: filename = splitext(self.annot.xml_file)[0] + '.txt' filename, _ = QFileDialog.getSaveFileName(self, 'Export stages', filename, 'Sleep stages (*.txt)') if filename == '': return self.annot.export(filename, xformat=xformat)
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L1577-L1596
export to excel
python
def to_csv(objects, filename, digits=5, warnings=True): """ Export the flatten indicators of one or several users to CSV. Parameters ---------- objects : list List of objects to be exported. filename : string File to export to. digits : int Precision of floats. Examples -------- This function can be used to export the results of :meth`bandicoot.utils.all`. >>> U_1 = bc.User() >>> U_2 = bc.User() >>> bc.to_csv([bc.utils.all(U_1), bc.utils.all(U_2)], 'results_1_2.csv') If you only have one object, you can simply pass it as argument: >>> bc.to_csv(bc.utils.all(U_1), 'results_1.csv') """ if not isinstance(objects, list): objects = [objects] data = [flatten(obj) for obj in objects] all_keys = [d for datum in data for d in datum.keys()] field_names = sorted(set(all_keys), key=lambda x: all_keys.index(x)) with open(filename, 'w') as f: w = csv.writer(f) w.writerow(field_names) def make_repr(item): if item is None: return None elif isinstance(item, float): return repr(round(item, digits)) else: return str(item) for row in data: row = dict((k, make_repr(v)) for k, v in row.items()) w.writerow([make_repr(row.get(k, None)) for k in field_names]) if warnings: print("Successfully exported {} object(s) to {}".format(len(objects), filename))
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/io.py#L46-L96
export to excel
python
def export(self, top=True): """Exports object to its string representation. Args: top (bool): if True appends `internal_name` before values. All non list objects should be exported with value top=True, all list objects, that are embedded in as fields inlist objects should be exported with `top`=False Returns: str: The objects string representation """ out = [] if top: out.append(self._internal_name) out.append(self._to_str(self.leapyear_observed)) out.append(self._to_str(self.daylight_saving_start_day)) out.append(self._to_str(self.daylight_saving_end_day)) out.append(str(len(self.holidays))) for obj in self.holidays: out.append(obj.export(top=False)) return ",".join(out)
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L4868-L4890
export to excel
python
def to_csv(self, encoding=export.ENCODING, dialect=export.DIALECT, make_filename=export.MAKE_FILENAME): """Dump all worksheets of the spreadsheet to individual CSV files. Args: encoding (str): result string encoding dialect (str): :mod:`csv` dialect name or object to use make_filename: template or one-argument callable returning the filename If ``make_filename`` is a string, it is string-interpolated with an infos-dictionary with the fields ``id`` (spreadhseet id), ``title`` (spreadsheet title), ``sheet`` (worksheet title), ``gid`` (worksheet id), ``index`` (worksheet index), and ``dialect`` CSV dialect to generate the filename: ``filename = make_filename % infos``. If ``make_filename`` is a callable, it will be called with the infos-dictionary to generate the filename: ``filename = make_filename(infos)``. """ for s in self._sheets: s.to_csv(None, encoding, dialect, make_filename)
https://github.com/xflr6/gsheets/blob/ca4f1273044704e529c1138e3f942836fc496e1b/gsheets/models.py#L170-L190
export to excel
python
def export(name, target=None, rev=None, user=None, username=None, password=None, force=False, overwrite=False, externals=True, trust=False, trust_failures=None): ''' Export a file or directory from an SVN repository name Address and path to the file or directory to be exported. target Name of the target directory where the checkout will put the working directory rev : None The name revision number to checkout. Enable "force" if the directory already exists. user : None Name of the user performing repository management operations username : None The user to access the name repository with. The svn default is the current user password Connect to the Subversion server with this password .. versionadded:: 0.17.0 force : False Continue if conflicts are encountered overwrite : False Overwrite existing target externals : True Change to False to not checkout or update externals trust : False Automatically trust the remote server. SVN's --trust-server-cert trust_failures : None Comma-separated list of certificate trust failures, that shall be ignored. This can be used if trust=True is not sufficient. The specified string is passed to SVN's --trust-server-cert-failures option as-is. .. versionadded:: 2019.2.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not target: return _fail(ret, 'Target option is required') svn_cmd = 'svn.export' cwd, basename = os.path.split(target) opts = tuple() if not overwrite and os.path.exists(target) and not os.path.isdir(target): return _fail(ret, 'The path "{0}" exists and is not ' 'a directory.'.format(target) ) if __opts__['test']: if not os.path.exists(target): return _neutral_test( ret, ('{0} doesn\'t exist and is set to be checked out.').format(target)) svn_cmd = 'svn.list' rev = 'HEAD' out = __salt__[svn_cmd](cwd, target, user, username, password, *opts) return _neutral_test( ret, ('{0}').format(out)) if not rev: rev = 'HEAD' if force: opts += ('--force',) if externals is False: opts += ('--ignore-externals',) if trust: opts += ('--trust-server-cert',) if trust_failures: opts += ('--trust-server-cert-failures', trust_failures) out = __salt__[svn_cmd](cwd, name, basename, user, username, password, rev, *opts) ret['changes']['new'] = name ret['changes']['comment'] = name + ' was Exported to ' + target return ret
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/svn.py#L190-L291
export to excel
python
def export_cmd(cmd, src_path, dest_dir=None, item_id=None, export_format=None, scale=None): ''' Executes a `sketchtool export` command and returns formatted output :src_path: File to export. :type <str> :dest_dir: Items are exported at /dest_dir/name@scale.export_format e.g. `~/Desktop/Page 1@2x.png` :param export_format: 'png', 'pdf' etc. :type <ExportFormat> :param scale: Specify as 1.0, 2.0 etc. :type <float> :param item_id: id or name of an Exportable :type <str> :returns: list of exported item paths ''' cmd.extend([src_path]) if not dest_dir: dest_dir = mkdtemp(prefix='pyglass') cmd.extend(['--output=%s' % dest_dir]) if export_format: cmd.extend(['--formats=%s' % export_format]) if scale: cmd.extend(['--scales=%s' % scale]) if item_id: cmd.extend(['--items=%s' % item_id]) logger.debug(u'Executing cmd: %s' % cmd) exported_str = execute(cmd) logger.debug(u'Raw result: %s' % exported_str) # Raw result is in the form: 'Exported <item-name-1>\nExported <item-name-2>\n' exported_items = [os.path.join(dirpath, f) for dirpath, dirnames, files in os.walk(dest_dir) for f in files if f.endswith('.%s' % export_format)] return exported_items
https://github.com/Pixelapse/pyglass/blob/83cd0ff2b0b7cdaf4ec6f54559a626e67455cd33/pyglass/sketch/export.py#L19-L54
export to excel
python
def OnExport(self, event): """File export event handler Currently, only CSV export is supported """ code_array = self.main_window.grid.code_array tab = self.main_window.grid.current_table selection = self.main_window.grid.selection # Check if no selection is present selection_bbox = selection.get_bbox() f2w = get_filetypes2wildcards(["csv", "pdf", "svg"]) filters = f2w.keys() wildcards = f2w.values() wildcard = "|".join(wildcards) if selection_bbox is None: # No selection --> Use smallest filled area for bottom right edge maxrow, maxcol, __ = code_array.get_last_filled_cell(tab) (top, left), (bottom, right) = (0, 0), (maxrow, maxcol) else: (top, left), (bottom, right) = selection_bbox # Generator of row and column keys in correct order __top = 0 if top is None else top __bottom = code_array.shape[0] if bottom is None else bottom + 1 __left = 0 if left is None else left __right = code_array.shape[1] if right is None else right + 1 def data_gen(top, bottom, left, right): for row in xrange(top, bottom): yield (code_array[row, col, tab] for col in xrange(left, right)) data = data_gen(__top, __bottom, __left, __right) preview_data = data_gen(__top, __bottom, __left, __right) # Get target filepath from user # No selection --> Provide svg export of current cell # if current cell is a matplotlib figure if selection_bbox is None: cursor = self.main_window.grid.actions.cursor figure = code_array[cursor] if Figure is not None and isinstance(figure, Figure): wildcard += \ "|" + _("SVG of current cell") + " (*.svg)|*.svg" + \ "|" + _("EPS of current cell") + " (*.eps)|*.eps" + \ "|" + _("PS of current cell") + " (*.ps)|*.ps" + \ "|" + _("PDF of current cell") + " (*.pdf)|*.pdf" + \ "|" + _("PNG of current cell") + " (*.png)|*.png" filters.append("cell_svg") filters.append("cell_eps") filters.append("cell_ps") filters.append("cell_pdf") filters.append("cell_png") message = _("Choose filename for export.") style = wx.SAVE path, filterindex = \ self.interfaces.get_filepath_findex_from_user(wildcard, message, style) if path is None: return # If an single cell is exported then the selection bbox # has to be changed to the current cell if filters[filterindex].startswith("cell_"): data = figure # Export file # ----------- self.main_window.actions.export_file(path, filters[filterindex], data, preview_data)
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_main_window.py#L1059-L1143
export to excel
python
def _export(dataset_input, dataset_output, random_index_column, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True): """ :param DatasetLocal dataset: dataset to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue :return: """ if selection: if selection == True: # easier to work with the name selection = "default" N = len(dataset_input) if not selection else dataset_input.selected_length(selection) if N == 0: raise ValueError("Cannot export empty table") if shuffle and sort: raise ValueError("Cannot shuffle and sort at the same time") if shuffle: shuffle_array = dataset_output.columns[random_index_column] partial_shuffle = shuffle and len(dataset_input) != N order_array = None order_array_inverse = None # for strings we also need the inverse order_array, keep track of that has_strings = any([dataset_input.dtype(k) == str_type for k in column_names]) if partial_shuffle: # if we only export a portion, we need to create the full length random_index array, and shuffle_array_full = np.random.choice(len(dataset_input), len(dataset_input), replace=False) # then take a section of it shuffle_array[:] = shuffle_array_full[shuffle_array_full < N] del shuffle_array_full order_array = shuffle_array elif shuffle: # better to do this in memory shuffle_array_memory = np.random.choice(N, N, replace=False) shuffle_array[:] = shuffle_array_memory order_array = shuffle_array if order_array is not None: indices_r = np.zeros_like(order_array) indices_r[order_array] = np.arange(len(order_array)) order_array_inverse = indices_r del indices_r if sort: if selection: raise ValueError("sorting selections not yet supported") # these indices sort the input array, but we evaluate the input in sequential order and write it out in sorted order # e.g., not b[:] = a[indices] # but b[indices_r] = a logger.info("sorting...") indices = np.argsort(dataset_input.evaluate(sort)) indices_r = np.zeros_like(indices) indices_r[indices] = np.arange(len(indices)) if has_strings: # in this case we already have the inverse ready order_array_inverse = indices if ascending else indices[:--1] else: del indices order_array = indices_r if ascending else indices_r[::-1] logger.info("sorting done") if progress == True: progress = vaex.utils.progressbar_callable(title="exporting") progress = progress or (lambda value: True) progress_total = len(column_names) * len(dataset_input) progress_status = ProgressStatus() progress_status.cancelled = False progress_status.value = 0 if selection: full_mask = dataset_input.evaluate_selection_mask(selection) else: full_mask = None sparse_groups = collections.defaultdict(list) sparse_matrices = {} # alternative to a set of matrices, since they are not hashable string_columns = [] futures = [] thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=1) if True: for column_name in column_names: sparse_matrix = dataset_output._sparse_matrix(column_name) if sparse_matrix is not None: # sparse columns are written differently sparse_groups[id(sparse_matrix)].append(column_name) sparse_matrices[id(sparse_matrix)] = sparse_matrix continue logger.debug(" exporting column: %s " % column_name) future = thread_pool.submit(_export_column, dataset_input, dataset_output, column_name, full_mask, shuffle, sort, selection, N, order_array, order_array_inverse, progress_status) futures.append(future) done = False while not done: done = True for future in futures: try: future.result(0.1/4) except concurrent.futures.TimeoutError: done = False break if not done: if not progress(progress_status.value / float(progress_total)): progress_status.cancelled = True for sparse_matrix_id, column_names in sparse_groups.items(): sparse_matrix = sparse_matrices[sparse_matrix_id] for column_name in column_names: assert not shuffle assert selection in [None, False] column = dataset_output.columns[column_name] column.matrix.data[:] = dataset_input.columns[column_name].matrix.data column.matrix.indptr[:] = dataset_input.columns[column_name].matrix.indptr column.matrix.indices[:] = dataset_input.columns[column_name].matrix.indices return column_names
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/export.py#L34-L157