file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
made.py
|
.qgisUserDbFilePath()).path() + "/python/plugins/dalacalc"
# initialize locale
localePath = ""
locale = QSettings().value("locale/userLocale").toString()[0:2]
if QFileInfo(self.plugin_dir).exists():
localePath = self.plugin_dir + "/i18n/dalacalc_" + locale + ".qm"
if QFileInfo(localePath).exists():
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
# self.dlg = DalaCalcDialog(self.iface.mainWindow(), flags)
def initGui(self):
# Create action that will start plugin configuration
self.action = QAction(QIcon(":/plugins/dalacalc/icon.png"),"Hitung Dala", self.iface.mainWindow())
self.action.setWhatsThis("Plugin untuk hitungan Kerusakan dan Kerugian")
self.action.setStatusTip("Damages And Losses Plugin")
# connect the action to the run method
QObject.connect(self.action, SIGNAL("triggered()"), self.run)
# Add toolbar button and menu item
self.iface.addToolBarIcon(self.action)
self.iface.addPluginToMenu(u"&Hitungan Kerusakan Kerugian", self.action)
def unload(self):
# Remove the plugin menu item and icon
|
# run method that performs all the real work
def run(self):
# create and show the dialog
flags = Qt.WindowTitleHint | Qt.WindowSystemMenuHint | Qt.WindowMaximizeButtonHint
self.dlg = DalaCalcDialog(self.iface.mainWindow(), flags)
# show the dialog
self.dlg.show()
# koneksi signal
QObject.connect(self.dlg.ui.KeterdampakanComboBox,SIGNAL('currentIndexChanged(int)'), self.bacaKeterdampakan)
QObject.connect(self.dlg.ui.BahayaComboBox,SIGNAL('currentIndexChanged(int)'), self.bacaBahaya)
#QObject.connect(self.dlg.ui.KerugianLineEdit,SIGNAL('currentIndexChanged(int)'), self.bacaKerugian)
QObject.connect(self.dlg.ui.helpPushButton,SIGNAL('clicked()'), self.bantuan)
QObject.connect(self.dlg.ui.hitungPushButton,SIGNAL('clicked()'), self.hitungDala)
quitbutton = self.dlg.ui.closePushButton
QObject.connect(quitbutton, SIGNAL('clicked()'), self.dlg, SLOT('close()'))
# membuat daftar layer yang ada di qgis
self.layermap=QgsMapLayerRegistry.instance().mapLayers()
for (name,layer) in self.layermap.iteritems():
if type(layer).__name__ == "QgsVectorLayer":
tempname = str(name).rstrip('01234567890')
self.layerids.append(name)
self.dlg.ui.KeterdampakanComboBox.addItem(tempname)
self.dlg.ui.BahayaComboBox.addItem(tempname)
def bacaKeterdampakan(self):
# membaca layer yg akan digunakan sebagai keterdampakan
try:
comboindex = self.dlg.ui.KeterdampakanComboBox.currentIndex()
layerKeterdampakan = self.layermap[self.layerids[comboindex]]
except: #Crashes without valid shapefiles
return
def bacaBahaya(self):
# membaca layer yg akan digunakan sebagai exposure
try:
comboindex = self.dlg.ui.BahayaComboBox.currentIndex()
layerBahaya = self.layermap[self.layerids[comboindex]]
except: #Crashes without valid shapefiles
return
def bantuan(self):
# membaca menu bantuan
QMessageBox.information(self.iface.mainWindow(),"Bantuan Dala","Hitungan kerugian disesuaikan dengan peraturan daerah yang berlaku, dan diasumsikan kerusakan sebesar 90 %", QMessageBox.Close)
def hitungDala(self):
# membaca isi nilai kerugian - menguji isinya apakah yang dimasukkan benar merupakan angka
try:
nilaiKerugian = self.dlg.ui.KerugianLineEdit.text()
nilaiKerugian = float(nilaiKerugian)
except ValueError:
QMessageBox.warning(self.iface.mainWindow(),"Error","Nilai kerugian tidak boleh kosong dan harus berupa angka!", QMessageBox.Close)
return
# membaca layer exposure
comboindex = self.dlg.ui.KeterdampakanComboBox.currentIndex()
layerKeterdampakan = self.layermap[self.layerids[comboindex]]
# membaca layer hazard
comboindex = self.dlg.ui.BahayaComboBox.currentIndex()
layerBahaya = self.layermap[self.layerids[comboindex]]
# check apakah layer sudah bener masuk
#QMessageBox.information(self.iface.mainWindow(),"Error","terdampak = "+str(layerKeterdampakan)+"\nBahaya = "+str(layerBahaya), QMessageBox.Close)
# membuat spatial index untuk mempercepat proses
dampakIndex = QgsSpatialIndex() #index kosong untuk menampung layer dengan jumlah feature banyak
bahayaIndex = QgsSpatialIndex()
fbahaya = QgsFeature() #variabel untuk menyimpan feature pada layer bahaya
fdampak = QgsFeature() #variabel untuk menyimpan feature pada layer dampak
# dampak - buat penyimpanan feature menggunakan spatial index
allAttrsDampak = layerKeterdampakan.pendingAllAttributesList()
layerKeterdampakan.select(allAttrsDampak)
allFeaturesDampak = {fdampak.id(): fdampak for fdampak in layerKeterdampakan}
# bahaya - buat penyimpanan feature menggunakan spatial index
allAttrsBahaya = layerBahaya.pendingAllAttributesList()
layerBahaya.select(allAttrsBahaya)
allFeaturesBahaya = {fbahaya.id(): fbahaya for fbahaya in layerBahaya}
#mengisi dictionary dengan data keterdampakan
for fd in allFeaturesDampak.values():
dampakIndex.insertFeature(fd)
#mengisi dictionary dengan data bahaya
for fb in allFeaturesBahaya.values():
bahayaIndex.insertFeature(fb)
# --- MAIN ITERATION ---
ids_D = {}
ids_B = {}
luasAkhirTerdampak = 0
# loop untuk mengisi feature di layer dampak dengan spatial indexnya
for fdampak in allFeaturesDampak.values():
varA = fdampak.id()
ids_D[varA] = dampakIndex.intersects(fdampak.geometry().boundingBox())
#QMessageBox.information(self.iface.mainWindow(),"test", str(varA),QMessageBox.Close)
# loop untuk mengisi feature di layer bahaya dengan spatial indexnya
for fbahaya in allFeaturesBahaya.values():
varB = fbahaya.id()
ids_B[varB] = bahayaIndex.intersects(fbahaya.geometry().boundingBox())
#QMessageBox.information(self.iface.mainWindow(),"test", str(varB),QMessageBox.Close)
selection=[]
# seleksi fitur yang terseleksi
for id_D in ids_D:
f_D = allFeaturesDampak[id_D]
for id_B in ids_B:
f_B = allFeaturesBahaya[id_B]
intersct = f_D.geometry().intersects(f_B.geometry())
#QMessageBox.information(self.iface.mainWindow(),"test1", "intersect pa gak?"+str(intersct),QMessageBox.Close)
if intersct == True:
luasTerdampak = f_D.geometry().area()
luasAkhirTerdampak += luasTerdampak
selection.append(id_D) # mendaftar feature yang terseleksi
else:
pass
layerKeterdampakan.setSelectedFeatures(selection)
if varA == 1:
self.zoomFeature()
else:
mc=self.iface.mapCanvas()
mc.zoomToSelected(layerKeterdampakan)
# menghitung perkalian antara nilai kerugian dengan luas area terdampak
persentase = 90.0*(0.01)
hasilKali = luasAkhirTerdampak * nilaiKerugian * persentase
# menampilkan hasil
stringHasil = ("Hasil analisis kerugian dan kerusakan: \n"
"\n- Total jumlah fasilitas terdampak = "+str(len(selection))+
"\n- Total luas semua fasilitas terdampak "
"\n = "+str(luasAkhirTerdampak)+ " m2"
"\n- Dengan nilai kerugian per unit sebesar "
"\n Rp. "+locale.format("%d",nilaiKerugian,grouping=True)+",- "
"\n dan dengan asumsi bahwa bangunan yang rusak "
"\n mengalami "+str(persentase*100)+"% kerus
|
self.iface.removePluginMenu(u"&Hitungan Kerusakan Kerugian", self.action)
self.iface.removeToolBarIcon(self.action)
|
identifier_body
|
__init__.py
|
PUT|DELETE|TRACE|PATCH) ')
_accept_html = re.compile(rb'^Accept:[^\r]*text/html', re.IGNORECASE)
_keep_alive = re.compile(rb'^Connection:[^\r]*keep-alive$', re.IGNORECASE)
_error_page = '''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>wstan error</title>
<style type="text/css">
body {{
font-family: sans-serif;
font-size: 12pt;
height: 100%;
}}
h1 {{
font-size: 18pt;
color: #333;
}}
#frame {{
margin: 0 auto;
margin-top: 80px;
width: 80%;
color: #444;
}}
hr {{ color: #BBB }}
</style>
</head>
<body>
<div id="frame">
<h1>wstan error: {title}</h1>
<hr />
<p>{detail}</p>
</div>
</body>
</html>
'''
async def my_sock_connect(host=None, port=None, *, family=0, proto=0, flags=0):
"""Modified version of BaseEventLoop.create_connection: this function returns sock object.
And it resolve names for Py 3.4- capability."""
assert (host and port)
infos = await loop.getaddrinfo(
host, port, family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags)
if not infos:
raise OSError('getaddrinfo() returned empty list')
exceptions = []
sock = None
for family, type_, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
await loop.sock_connect(sock, address)
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except Exception:
if sock is not None:
sock.close()
raise
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
else:
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions): # If they all have the same str(), raise one.
raise exceptions[0]
raise OSError('Multiple exceptions: {}'.format(', '.join(map(str, exceptions))))
return sock
def make_socks_addr(host, port):
return b'\x00\x03' + bytes([len(host)]) + host + struct.pack('>H', port)
def parse_socks_addr(dat, allow_remain=False):
"""Extract address and port from SOCKS request header (only 4 parts:
RSV(0x00) | ATYP | DST.ADDR | DST.PORT). The header will be reused in tunnel server."""
if not dat or dat[0] != 0x00:
raise ValueError
try:
atyp = dat[1]
if atyp == 0x01: # IPv4
port_idx = 6
target_addr = socket.inet_ntoa(dat[2:port_idx])
elif atyp == 0x03: # domain name
port_idx = 3 + dat[2]
target_addr = dat[3:port_idx].decode('ascii')
elif atyp == 0x04: # IPv6
port_idx = 18
target_addr = socket.inet_ntop(socket.AF_INET6, dat[2:port_idx])
else:
raise ValueError("unknown address type")
target_port = struct.unpack('>H', dat[port_idx:port_idx+2])[0]
if allow_remain:
return target_addr, target_port, port_idx + 2
else:
if dat[port_idx+2:]:
raise ValueError
return target_addr, target_port
except (IndexError, struct.error):
raise ValueError
def die(reason):
print(reason, file=sys.stderr)
sys.exit(1)
def load_ini(ini_path):
"""Read config from ini file."""
ini = ConfigParser()
try:
# utf-8 with BOM will kill ConfigParser
with open(ini_path, encoding='utf-8-sig') as f:
ini.read_string('[DEFAULT]\n' + f.read())
except (ParsingError, FileNotFoundError) as e:
die('error reading config file: %s' % e)
ini = ini['DEFAULT']
ret = {}
ret.update(ini)
# fix types
for i in ('port', 'tun-port'):
if i in ini:
ret[i] = ini.getint(i)
for i in ('client', 'server', 'debug', 'compatible'):
if i in ini:
ret[i] = ini.getboolean(i)
for i in ret:
if '-' in i:
ret[i.replace('-', '_')] = ret.pop(i)
return ret.items()
def load_config():
import argparse
from wstan.autobahn.websocket.protocol import parseWsUrl
parser = argparse.ArgumentParser(
description='Ver %s | Tunneling TCP in WebSocket' % __version__)
# common config
parser.add_argument('-g', '--gen-key', help='generate a key and exit', action='store_true')
parser.add_argument('uri', help='URI of server', nargs='?')
parser.add_argument('key', help='base64 encoded 16-byte key', nargs='?')
g = parser.add_mutually_exclusive_group()
g.add_argument('-c', '--client', help='run as client (default, also act as SOCKS5/HTTP(S) server)',
default=True, action='store_true')
g.add_argument('-s', '--server', help='run as server', action='store_true')
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-z', '--compatible', help='useful when server is behind WS proxy', action='store_true')
parser.add_argument('-i', '--ini', help='load config file')
# client config
parser.add_argument('-y', '--proxy', help='let client use a HTTPS proxy (host:port)')
parser.add_argument('-p', '--port', help='listen port of SOCKS5/HTTP(S) server at localhost (defaults 1080)',
type=int, default=1080)
# server config
parser.add_argument('-t', '--tun-addr', help='listen address of server, overrides URI')
parser.add_argument('-r', '--tun-port', help='listen port of server, overrides URI', type=int)
parser.add_argument('--x-forward', help='Use X-Forwarded-For as client IP address when behind proxy',
default=False, action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.gen_key: # option -g can be used without URI and key, just like -h
return args
if args.ini:
for k, v in load_ini(args.ini):
setattr(args, k, v) # file config will override args
for i in ['uri', 'key']:
if not getattr(args, i):
die('%s not specified' % i)
if '?' in args.uri:
die('URI should not contain query')
try:
args.key = base64.b64decode(args.key)
assert len(args.key) == 16
except (Base64Error, AssertionError):
die('invalid key')
args.tun_ssl, args.uri_addr, args.uri_port = parseWsUrl(args.uri)[:3]
if args.proxy and args.client:
try:
args.proxy_host, port = args.proxy.split(':')
args.proxy_port = int(port)
except ValueError:
dir('invalid proxy format')
if args.compatible:
d = get_sha1(args.key)[-1]
args.cookie_key = '_' + chr((d % 26) + 65) # an upper case character
return args
def http_die_soon(req):
"""Disable keep-alive to make HTTP proxy act like SOCKS. By doing this
wstan server can remain unchanged, but it will increase latency."""
dropped = [i for i in req.split(b'\r\n') if not _keep_alive.match(i)]
end = dropped.index(b'')
return b'\r\n'.join(dropped[:end] + [b'Connection: close'] + dropped[end:])
def is_http_req(dat):
return bool(_http_req.match(dat))
def can_return_error_page(dat):
return dat and bool(_http_req.match(dat) and any(map(_accept_html.match, dat.split(b'\r\n'))))
def gen_error_page(title, detail):
body = _error_page.format(title=title, detail=detail).encode()
header = '\r\n'.join(
['HTTP/1.1 599 WSTAN ERROR', 'Content-Type: text/html; charset=UTF-8',
'Content-Length: %d' % len(body), '', '']).encode()
return header + body
def get_sha1(dat):
|
sha1 = hashlib.sha1()
sha1.update(dat)
return sha1.digest()
|
random_line_split
|
|
__init__.py
|
, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import socket
import struct
import hashlib
import asyncio
import base64
import sys
import os
import re
from binascii import Error as Base64Error
from configparser import ConfigParser, ParsingError
from collections import deque
__version__ = '0.4.1'
# Don't use "super().__init__()" in constructor of classes of this package (all libraries
# used are using old style)
# global variables shared between modules
config = loop = None
_http_req = re.compile(rb'^(GET|POST|HEAD|CONNECT|OPTIONS|PUT|DELETE|TRACE|PATCH) ')
_accept_html = re.compile(rb'^Accept:[^\r]*text/html', re.IGNORECASE)
_keep_alive = re.compile(rb'^Connection:[^\r]*keep-alive$', re.IGNORECASE)
_error_page = '''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>wstan error</title>
<style type="text/css">
body {{
font-family: sans-serif;
font-size: 12pt;
height: 100%;
}}
h1 {{
font-size: 18pt;
color: #333;
}}
#frame {{
margin: 0 auto;
margin-top: 80px;
width: 80%;
color: #444;
}}
hr {{ color: #BBB }}
</style>
</head>
<body>
<div id="frame">
<h1>wstan error: {title}</h1>
<hr />
<p>{detail}</p>
</div>
</body>
</html>
'''
async def my_sock_connect(host=None, port=None, *, family=0, proto=0, flags=0):
"""Modified version of BaseEventLoop.create_connection: this function returns sock object.
And it resolve names for Py 3.4- capability."""
assert (host and port)
infos = await loop.getaddrinfo(
host, port, family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags)
if not infos:
raise OSError('getaddrinfo() returned empty list')
exceptions = []
sock = None
for family, type_, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
await loop.sock_connect(sock, address)
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except Exception:
if sock is not None:
sock.close()
raise
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
else:
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions): # If they all have the same str(), raise one.
raise exceptions[0]
raise OSError('Multiple exceptions: {}'.format(', '.join(map(str, exceptions))))
return sock
def make_socks_addr(host, port):
return b'\x00\x03' + bytes([len(host)]) + host + struct.pack('>H', port)
def parse_socks_addr(dat, allow_remain=False):
"""Extract address and port from SOCKS request header (only 4 parts:
RSV(0x00) | ATYP | DST.ADDR | DST.PORT). The header will be reused in tunnel server."""
if not dat or dat[0] != 0x00:
raise ValueError
try:
atyp = dat[1]
if atyp == 0x01: # IPv4
port_idx = 6
target_addr = socket.inet_ntoa(dat[2:port_idx])
elif atyp == 0x03: # domain name
port_idx = 3 + dat[2]
target_addr = dat[3:port_idx].decode('ascii')
elif atyp == 0x04: # IPv6
port_idx = 18
target_addr = socket.inet_ntop(socket.AF_INET6, dat[2:port_idx])
else:
raise ValueError("unknown address type")
target_port = struct.unpack('>H', dat[port_idx:port_idx+2])[0]
if allow_remain:
return target_addr, target_port, port_idx + 2
else:
if dat[port_idx+2:]:
raise ValueError
return target_addr, target_port
except (IndexError, struct.error):
raise ValueError
def die(reason):
print(reason, file=sys.stderr)
sys.exit(1)
def load_ini(ini_path):
"""Read config from ini file."""
ini = ConfigParser()
try:
# utf-8 with BOM will kill ConfigParser
with open(ini_path, encoding='utf-8-sig') as f:
ini.read_string('[DEFAULT]\n' + f.read())
except (ParsingError, FileNotFoundError) as e:
die('error reading config file: %s' % e)
ini = ini['DEFAULT']
ret = {}
ret.update(ini)
# fix types
for i in ('port', 'tun-port'):
if i in ini:
ret[i] = ini.getint(i)
for i in ('client', 'server', 'debug', 'compatible'):
if i in ini:
ret[i] = ini.getboolean(i)
for i in ret:
if '-' in i:
ret[i.replace('-', '_')] = ret.pop(i)
return ret.items()
def
|
():
import argparse
from wstan.autobahn.websocket.protocol import parseWsUrl
parser = argparse.ArgumentParser(
description='Ver %s | Tunneling TCP in WebSocket' % __version__)
# common config
parser.add_argument('-g', '--gen-key', help='generate a key and exit', action='store_true')
parser.add_argument('uri', help='URI of server', nargs='?')
parser.add_argument('key', help='base64 encoded 16-byte key', nargs='?')
g = parser.add_mutually_exclusive_group()
g.add_argument('-c', '--client', help='run as client (default, also act as SOCKS5/HTTP(S) server)',
default=True, action='store_true')
g.add_argument('-s', '--server', help='run as server', action='store_true')
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-z', '--compatible', help='useful when server is behind WS proxy', action='store_true')
parser.add_argument('-i', '--ini', help='load config file')
# client config
parser.add_argument('-y', '--proxy', help='let client use a HTTPS proxy (host:port)')
parser.add_argument('-p', '--port', help='listen port of SOCKS5/HTTP(S) server at localhost (defaults 1080)',
type=int, default=1080)
# server config
parser.add_argument('-t', '--tun-addr', help='listen address of server, overrides URI')
parser.add_argument('-r', '--tun-port', help='listen port of server, overrides URI', type=int)
parser.add_argument('--x-forward', help='Use X-Forwarded-For as client IP address when behind proxy',
default=False, action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.gen_key: # option -g can be used without URI and key, just like -h
return args
if args.ini:
for k, v in load_ini(args.ini):
setattr(args, k, v) # file config will override args
for i in ['uri', 'key']:
if not getattr(args, i):
die('%s not specified' % i)
if '?' in args.uri:
die('URI should not contain query')
try:
args.key = base64.b64decode(args.key)
assert len(args.key) == 16
except (Base64Error, AssertionError):
die('invalid key')
args.tun_ssl, args.uri_addr, args.uri_port = parseWsUrl(args.uri)[:3]
if args.proxy and args.client:
try:
args.proxy_host, port = args.proxy.split(':')
args.proxy_port = int(port)
except ValueError:
dir('invalid proxy format')
if args.compatible:
d = get_sha1(args.key)[-1]
args.cookie_key = '_' + chr((d % 26) + 65) # an upper case character
return args
def http_die_soon(req):
"""Disable keep-alive to make HTTP proxy act like SOCKS. By doing this
wstan server can remain unchanged, but it will increase latency."""
dropped = [i for i in req.split(b'\r\n') if not _keep_alive.match(i)]
end = dropped.index(b'')
return b'\r\n'.join(dropped[:end] + [b'Connection: close'] + dropped[end
|
load_config
|
identifier_name
|
__init__.py
|
PUT|DELETE|TRACE|PATCH) ')
_accept_html = re.compile(rb'^Accept:[^\r]*text/html', re.IGNORECASE)
_keep_alive = re.compile(rb'^Connection:[^\r]*keep-alive$', re.IGNORECASE)
_error_page = '''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>wstan error</title>
<style type="text/css">
body {{
font-family: sans-serif;
font-size: 12pt;
height: 100%;
}}
h1 {{
font-size: 18pt;
color: #333;
}}
#frame {{
margin: 0 auto;
margin-top: 80px;
width: 80%;
color: #444;
}}
hr {{ color: #BBB }}
</style>
</head>
<body>
<div id="frame">
<h1>wstan error: {title}</h1>
<hr />
<p>{detail}</p>
</div>
</body>
</html>
'''
async def my_sock_connect(host=None, port=None, *, family=0, proto=0, flags=0):
"""Modified version of BaseEventLoop.create_connection: this function returns sock object.
And it resolve names for Py 3.4- capability."""
assert (host and port)
infos = await loop.getaddrinfo(
host, port, family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags)
if not infos:
raise OSError('getaddrinfo() returned empty list')
exceptions = []
sock = None
for family, type_, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
await loop.sock_connect(sock, address)
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except Exception:
if sock is not None:
sock.close()
raise
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
else:
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions): # If they all have the same str(), raise one.
raise exceptions[0]
raise OSError('Multiple exceptions: {}'.format(', '.join(map(str, exceptions))))
return sock
def make_socks_addr(host, port):
return b'\x00\x03' + bytes([len(host)]) + host + struct.pack('>H', port)
def parse_socks_addr(dat, allow_remain=False):
"""Extract address and port from SOCKS request header (only 4 parts:
RSV(0x00) | ATYP | DST.ADDR | DST.PORT). The header will be reused in tunnel server."""
if not dat or dat[0] != 0x00:
raise ValueError
try:
atyp = dat[1]
if atyp == 0x01: # IPv4
port_idx = 6
target_addr = socket.inet_ntoa(dat[2:port_idx])
elif atyp == 0x03: # domain name
port_idx = 3 + dat[2]
target_addr = dat[3:port_idx].decode('ascii')
elif atyp == 0x04: # IPv6
port_idx = 18
target_addr = socket.inet_ntop(socket.AF_INET6, dat[2:port_idx])
else:
raise ValueError("unknown address type")
target_port = struct.unpack('>H', dat[port_idx:port_idx+2])[0]
if allow_remain:
return target_addr, target_port, port_idx + 2
else:
if dat[port_idx+2:]:
raise ValueError
return target_addr, target_port
except (IndexError, struct.error):
raise ValueError
def die(reason):
print(reason, file=sys.stderr)
sys.exit(1)
def load_ini(ini_path):
"""Read config from ini file."""
ini = ConfigParser()
try:
# utf-8 with BOM will kill ConfigParser
with open(ini_path, encoding='utf-8-sig') as f:
ini.read_string('[DEFAULT]\n' + f.read())
except (ParsingError, FileNotFoundError) as e:
die('error reading config file: %s' % e)
ini = ini['DEFAULT']
ret = {}
ret.update(ini)
# fix types
for i in ('port', 'tun-port'):
if i in ini:
ret[i] = ini.getint(i)
for i in ('client', 'server', 'debug', 'compatible'):
if i in ini:
ret[i] = ini.getboolean(i)
for i in ret:
if '-' in i:
ret[i.replace('-', '_')] = ret.pop(i)
return ret.items()
def load_config():
import argparse
from wstan.autobahn.websocket.protocol import parseWsUrl
parser = argparse.ArgumentParser(
description='Ver %s | Tunneling TCP in WebSocket' % __version__)
# common config
parser.add_argument('-g', '--gen-key', help='generate a key and exit', action='store_true')
parser.add_argument('uri', help='URI of server', nargs='?')
parser.add_argument('key', help='base64 encoded 16-byte key', nargs='?')
g = parser.add_mutually_exclusive_group()
g.add_argument('-c', '--client', help='run as client (default, also act as SOCKS5/HTTP(S) server)',
default=True, action='store_true')
g.add_argument('-s', '--server', help='run as server', action='store_true')
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-z', '--compatible', help='useful when server is behind WS proxy', action='store_true')
parser.add_argument('-i', '--ini', help='load config file')
# client config
parser.add_argument('-y', '--proxy', help='let client use a HTTPS proxy (host:port)')
parser.add_argument('-p', '--port', help='listen port of SOCKS5/HTTP(S) server at localhost (defaults 1080)',
type=int, default=1080)
# server config
parser.add_argument('-t', '--tun-addr', help='listen address of server, overrides URI')
parser.add_argument('-r', '--tun-port', help='listen port of server, overrides URI', type=int)
parser.add_argument('--x-forward', help='Use X-Forwarded-For as client IP address when behind proxy',
default=False, action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.gen_key: # option -g can be used without URI and key, just like -h
return args
if args.ini:
for k, v in load_ini(args.ini):
setattr(args, k, v) # file config will override args
for i in ['uri', 'key']:
if not getattr(args, i):
die('%s not specified' % i)
if '?' in args.uri:
die('URI should not contain query')
try:
args.key = base64.b64decode(args.key)
assert len(args.key) == 16
except (Base64Error, AssertionError):
die('invalid key')
args.tun_ssl, args.uri_addr, args.uri_port = parseWsUrl(args.uri)[:3]
if args.proxy and args.client:
try:
args.proxy_host, port = args.proxy.split(':')
args.proxy_port = int(port)
except ValueError:
dir('invalid proxy format')
if args.compatible:
d = get_sha1(args.key)[-1]
args.cookie_key = '_' + chr((d % 26) + 65) # an upper case character
return args
def http_die_soon(req):
"""Disable keep-alive to make HTTP proxy act like SOCKS. By doing this
wstan server can remain unchanged, but it will increase latency."""
dropped = [i for i in req.split(b'\r\n') if not _keep_alive.match(i)]
end = dropped.index(b'')
return b'\r\n'.join(dropped[:end] + [b'Connection: close'] + dropped[end:])
def is_http_req(dat):
return bool(_http_req.match(dat))
def can_return_error_page(dat):
return dat and bool(_http_req.match(dat) and any(map(_accept_html.match, dat.split(b'\r\n'))))
def gen_error_page(title, detail):
body = _error_page.format(title=title, detail=detail).encode()
header = '\r\n'.join(
['HTTP/1.1 599 WSTAN ERROR', 'Content-Type: text/html; charset=UTF-8',
'Content-Length: %d' % len(body), '', '']).encode()
return header + body
def get_sha1(dat):
|
sha1 = hashlib.sha1()
sha1.update(dat)
return sha1.digest()
|
identifier_body
|
|
__init__.py
|
, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import socket
import struct
import hashlib
import asyncio
import base64
import sys
import os
import re
from binascii import Error as Base64Error
from configparser import ConfigParser, ParsingError
from collections import deque
__version__ = '0.4.1'
# Don't use "super().__init__()" in constructor of classes of this package (all libraries
# used are using old style)
# global variables shared between modules
config = loop = None
_http_req = re.compile(rb'^(GET|POST|HEAD|CONNECT|OPTIONS|PUT|DELETE|TRACE|PATCH) ')
_accept_html = re.compile(rb'^Accept:[^\r]*text/html', re.IGNORECASE)
_keep_alive = re.compile(rb'^Connection:[^\r]*keep-alive$', re.IGNORECASE)
_error_page = '''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>wstan error</title>
<style type="text/css">
body {{
font-family: sans-serif;
font-size: 12pt;
height: 100%;
}}
h1 {{
font-size: 18pt;
color: #333;
}}
#frame {{
margin: 0 auto;
margin-top: 80px;
width: 80%;
color: #444;
}}
hr {{ color: #BBB }}
</style>
</head>
<body>
<div id="frame">
<h1>wstan error: {title}</h1>
<hr />
<p>{detail}</p>
</div>
</body>
</html>
'''
async def my_sock_connect(host=None, port=None, *, family=0, proto=0, flags=0):
"""Modified version of BaseEventLoop.create_connection: this function returns sock object.
And it resolve names for Py 3.4- capability."""
assert (host and port)
infos = await loop.getaddrinfo(
host, port, family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags)
if not infos:
|
exceptions = []
sock = None
for family, type_, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
await loop.sock_connect(sock, address)
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except Exception:
if sock is not None:
sock.close()
raise
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
else:
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions): # If they all have the same str(), raise one.
raise exceptions[0]
raise OSError('Multiple exceptions: {}'.format(', '.join(map(str, exceptions))))
return sock
def make_socks_addr(host, port):
return b'\x00\x03' + bytes([len(host)]) + host + struct.pack('>H', port)
def parse_socks_addr(dat, allow_remain=False):
"""Extract address and port from SOCKS request header (only 4 parts:
RSV(0x00) | ATYP | DST.ADDR | DST.PORT). The header will be reused in tunnel server."""
if not dat or dat[0] != 0x00:
raise ValueError
try:
atyp = dat[1]
if atyp == 0x01: # IPv4
port_idx = 6
target_addr = socket.inet_ntoa(dat[2:port_idx])
elif atyp == 0x03: # domain name
port_idx = 3 + dat[2]
target_addr = dat[3:port_idx].decode('ascii')
elif atyp == 0x04: # IPv6
port_idx = 18
target_addr = socket.inet_ntop(socket.AF_INET6, dat[2:port_idx])
else:
raise ValueError("unknown address type")
target_port = struct.unpack('>H', dat[port_idx:port_idx+2])[0]
if allow_remain:
return target_addr, target_port, port_idx + 2
else:
if dat[port_idx+2:]:
raise ValueError
return target_addr, target_port
except (IndexError, struct.error):
raise ValueError
def die(reason):
print(reason, file=sys.stderr)
sys.exit(1)
def load_ini(ini_path):
"""Read config from ini file."""
ini = ConfigParser()
try:
# utf-8 with BOM will kill ConfigParser
with open(ini_path, encoding='utf-8-sig') as f:
ini.read_string('[DEFAULT]\n' + f.read())
except (ParsingError, FileNotFoundError) as e:
die('error reading config file: %s' % e)
ini = ini['DEFAULT']
ret = {}
ret.update(ini)
# fix types
for i in ('port', 'tun-port'):
if i in ini:
ret[i] = ini.getint(i)
for i in ('client', 'server', 'debug', 'compatible'):
if i in ini:
ret[i] = ini.getboolean(i)
for i in ret:
if '-' in i:
ret[i.replace('-', '_')] = ret.pop(i)
return ret.items()
def load_config():
import argparse
from wstan.autobahn.websocket.protocol import parseWsUrl
parser = argparse.ArgumentParser(
description='Ver %s | Tunneling TCP in WebSocket' % __version__)
# common config
parser.add_argument('-g', '--gen-key', help='generate a key and exit', action='store_true')
parser.add_argument('uri', help='URI of server', nargs='?')
parser.add_argument('key', help='base64 encoded 16-byte key', nargs='?')
g = parser.add_mutually_exclusive_group()
g.add_argument('-c', '--client', help='run as client (default, also act as SOCKS5/HTTP(S) server)',
default=True, action='store_true')
g.add_argument('-s', '--server', help='run as server', action='store_true')
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-z', '--compatible', help='useful when server is behind WS proxy', action='store_true')
parser.add_argument('-i', '--ini', help='load config file')
# client config
parser.add_argument('-y', '--proxy', help='let client use a HTTPS proxy (host:port)')
parser.add_argument('-p', '--port', help='listen port of SOCKS5/HTTP(S) server at localhost (defaults 1080)',
type=int, default=1080)
# server config
parser.add_argument('-t', '--tun-addr', help='listen address of server, overrides URI')
parser.add_argument('-r', '--tun-port', help='listen port of server, overrides URI', type=int)
parser.add_argument('--x-forward', help='Use X-Forwarded-For as client IP address when behind proxy',
default=False, action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.gen_key: # option -g can be used without URI and key, just like -h
return args
if args.ini:
for k, v in load_ini(args.ini):
setattr(args, k, v) # file config will override args
for i in ['uri', 'key']:
if not getattr(args, i):
die('%s not specified' % i)
if '?' in args.uri:
die('URI should not contain query')
try:
args.key = base64.b64decode(args.key)
assert len(args.key) == 16
except (Base64Error, AssertionError):
die('invalid key')
args.tun_ssl, args.uri_addr, args.uri_port = parseWsUrl(args.uri)[:3]
if args.proxy and args.client:
try:
args.proxy_host, port = args.proxy.split(':')
args.proxy_port = int(port)
except ValueError:
dir('invalid proxy format')
if args.compatible:
d = get_sha1(args.key)[-1]
args.cookie_key = '_' + chr((d % 26) + 65) # an upper case character
return args
def http_die_soon(req):
"""Disable keep-alive to make HTTP proxy act like SOCKS. By doing this
wstan server can remain unchanged, but it will increase latency."""
dropped = [i for i in req.split(b'\r\n') if not _keep_alive.match(i)]
end = dropped.index(b'')
return b'\r\n'.join(dropped[:end] + [b'Connection: close'] + dropped[end
|
raise OSError('getaddrinfo() returned empty list')
|
conditional_block
|
fs.rs
|
VirtioFeatures};
use crate::virtio;
use crate::virtio::copy_config;
use crate::virtio::fs::passthrough::PassthroughFs;
use crate::virtio::fs::{process_fs_queue, virtio_fs_config, FS_MAX_TAG_LEN};
use crate::virtio::vhost::user::device::handler::{
CallEvent, DeviceRequestHandler, VhostUserBackend,
};
static FS_EXECUTOR: OnceCell<Executor> = OnceCell::new();
async fn handle_fs_queue(
mut queue: virtio::Queue,
mem: GuestMemory,
call_evt: Arc<Mutex<CallEvent>>,
kick_evt: EventAsync,
server: Arc<fuse::Server<PassthroughFs>>,
tube: Arc<Mutex<Tube>>,
) {
// Slot is always going to be 0 because we do not support DAX
let slot: u32 = 0;
loop {
if let Err(e) = kick_evt.next_val().await {
error!("Failed to read kick event for fs queue: {}", e);
break;
}
if let Err(e) = process_fs_queue(&mem, &call_evt, &mut queue, &server, &tube, slot) {
error!("Process FS queue failed: {}", e);
break;
}
}
}
fn default_uidmap() -> String {
let euid = unsafe { libc::geteuid() };
format!("{} {} 1", euid, euid)
}
fn default_gidmap() -> String
|
fn jail_and_fork(
mut keep_rds: Vec<RawDescriptor>,
dir_path: PathBuf,
uid_map: Option<String>,
gid_map: Option<String>,
) -> anyhow::Result<i32> {
// Create new minijail sandbox
let mut j = Minijail::new()?;
j.namespace_pids();
j.namespace_user();
j.namespace_user_disable_setgroups();
j.uidmap(&uid_map.unwrap_or_else(default_uidmap))?;
j.gidmap(&gid_map.unwrap_or_else(default_gidmap))?;
j.run_as_init();
j.namespace_vfs();
j.namespace_net();
j.no_new_privs();
// Only pivot_root if we are not re-using the current root directory.
if dir_path != Path::new("/") {
// It's safe to call `namespace_vfs` multiple times.
j.namespace_vfs();
j.enter_pivot_root(&dir_path)?;
}
j.set_remount_mode(libc::MS_SLAVE);
let limit = get_max_open_files().context("failed to get max open files")?;
j.set_rlimit(libc::RLIMIT_NOFILE as i32, limit, limit)?;
// Make sure there are no duplicates in keep_rds
keep_rds.dedup();
// fork on the jail here
let pid = unsafe { j.fork(Some(&keep_rds))? };
if pid > 0 {
unsafe { libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGTERM) };
}
if pid < 0 {
bail!("Fork error! {}", std::io::Error::last_os_error());
}
Ok(pid)
}
struct FsBackend {
server: Arc<fuse::Server<PassthroughFs>>,
tag: [u8; FS_MAX_TAG_LEN],
avail_features: u64,
acked_features: u64,
acked_protocol_features: VhostUserProtocolFeatures,
workers: [Option<AbortHandle>; Self::MAX_QUEUE_NUM],
keep_rds: Vec<RawDescriptor>,
}
impl FsBackend {
pub fn new(tag: &str) -> anyhow::Result<Self> {
if tag.len() > FS_MAX_TAG_LEN {
bail!(
"fs tag is too long: {} (max supported: {})",
tag.len(),
FS_MAX_TAG_LEN
);
}
let mut fs_tag = [0u8; FS_MAX_TAG_LEN];
fs_tag[..tag.len()].copy_from_slice(tag.as_bytes());
let avail_features = virtio::base_features(ProtectionType::Unprotected)
| VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
// Use default passthroughfs config
let fs = PassthroughFs::new(Default::default())?;
let mut keep_rds: Vec<RawDescriptor> = [0, 1, 2].to_vec();
keep_rds.append(&mut fs.keep_rds());
let server = Arc::new(Server::new(fs));
Ok(FsBackend {
server,
tag: fs_tag,
avail_features,
acked_features: 0,
acked_protocol_features: VhostUserProtocolFeatures::empty(),
workers: Default::default(),
keep_rds,
})
}
}
impl VhostUserBackend for FsBackend {
const MAX_QUEUE_NUM: usize = 2; /* worker queue and high priority queue */
const MAX_VRING_LEN: u16 = 1024;
type Doorbell = CallEvent;
type Error = anyhow::Error;
fn features(&self) -> u64 {
self.avail_features
}
fn ack_features(&mut self, value: u64) -> anyhow::Result<()> {
let unrequested_features = value & !self.avail_features;
if unrequested_features != 0 {
bail!("invalid features are given: {:#x}", unrequested_features);
}
self.acked_features |= value;
Ok(())
}
fn acked_features(&self) -> u64 {
self.acked_features
}
fn protocol_features(&self) -> VhostUserProtocolFeatures {
VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ
}
fn ack_protocol_features(&mut self, features: u64) -> anyhow::Result<()> {
let features = VhostUserProtocolFeatures::from_bits(features)
.ok_or_else(|| anyhow!("invalid protocol features are given: {:#x}", features))?;
let supported = self.protocol_features();
self.acked_protocol_features = features & supported;
Ok(())
}
fn acked_protocol_features(&self) -> u64 {
self.acked_protocol_features.bits()
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
let config = virtio_fs_config {
tag: self.tag,
num_request_queues: Le32::from(1),
};
copy_config(data, 0, config.as_slice(), offset);
}
fn reset(&mut self) {
for handle in self.workers.iter_mut().filter_map(Option::take) {
handle.abort();
}
}
fn start_queue(
&mut self,
idx: usize,
mut queue: virtio::Queue,
mem: GuestMemory,
call_evt: Arc<Mutex<CallEvent>>,
kick_evt: Event,
) -> anyhow::Result<()> {
if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) {
warn!("Starting new queue handler without stopping old handler");
handle.abort();
}
// Safe because the executor is initialized in main() below.
let ex = FS_EXECUTOR.get().expect("Executor not initialized");
// Enable any virtqueue features that were negotiated (like VIRTIO_RING_F_EVENT_IDX).
queue.ack_features(self.acked_features);
let kick_evt =
EventAsync::new(kick_evt.0, ex).context("failed to create EventAsync for kick_evt")?;
let (handle, registration) = AbortHandle::new_pair();
let (_, fs_device_tube) = Tube::pair()?;
ex.spawn_local(Abortable::new(
handle_fs_queue(
queue,
mem,
call_evt,
kick_evt,
self.server.clone(),
Arc::new(Mutex::new(fs_device_tube)),
),
registration,
))
.detach();
self.workers[idx] = Some(handle);
Ok(())
}
fn stop_queue(&mut self, idx: usize) {
if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) {
handle.abort();
}
}
}
#[derive(FromArgs)]
#[argh(description = "")]
struct Options {
#[argh(option, description = "path to a socket", arg_name = "PATH")]
socket: String,
#[argh(option, description = "the virtio-fs tag", arg_name = "TAG")]
tag: String,
#[argh(option, description = "path to a directory to share", arg_name = "DIR")]
shared_dir: PathBuf,
#[argh(option, description = "uid map to use", arg_name = "UIDMAP")]
uid_map: Option<String>,
#[argh(option, description = "gid map to use", arg_name = "GIDMAP")]
gid_map: Option<String>,
}
/// Starts a vhost-user fs device.
/// Returns an error if the given `args` is invalid or the device fails to run.
pub fn run_fs_device(program_name: &str, args: &[&str]) -> anyhow::Result<()> {
let opts = match Options::from_args(&[program_name], args) {
Ok(opts) => opts,
Err(e) => {
if e.status.is_err() {
bail
|
{
let egid = unsafe { libc::getegid() };
format!("{} {} 1", egid, egid)
}
|
identifier_body
|
fs.rs
|
VirtioFeatures};
use crate::virtio;
use crate::virtio::copy_config;
use crate::virtio::fs::passthrough::PassthroughFs;
use crate::virtio::fs::{process_fs_queue, virtio_fs_config, FS_MAX_TAG_LEN};
use crate::virtio::vhost::user::device::handler::{
CallEvent, DeviceRequestHandler, VhostUserBackend,
};
static FS_EXECUTOR: OnceCell<Executor> = OnceCell::new();
async fn handle_fs_queue(
mut queue: virtio::Queue,
mem: GuestMemory,
call_evt: Arc<Mutex<CallEvent>>,
kick_evt: EventAsync,
server: Arc<fuse::Server<PassthroughFs>>,
tube: Arc<Mutex<Tube>>,
) {
// Slot is always going to be 0 because we do not support DAX
let slot: u32 = 0;
loop {
if let Err(e) = kick_evt.next_val().await {
error!("Failed to read kick event for fs queue: {}", e);
break;
}
if let Err(e) = process_fs_queue(&mem, &call_evt, &mut queue, &server, &tube, slot) {
error!("Process FS queue failed: {}", e);
break;
}
}
}
fn default_uidmap() -> String {
let euid = unsafe { libc::geteuid() };
format!("{} {} 1", euid, euid)
}
fn default_gidmap() -> String {
let egid = unsafe { libc::getegid() };
format!("{} {} 1", egid, egid)
}
fn jail_and_fork(
mut keep_rds: Vec<RawDescriptor>,
dir_path: PathBuf,
uid_map: Option<String>,
gid_map: Option<String>,
) -> anyhow::Result<i32> {
// Create new minijail sandbox
let mut j = Minijail::new()?;
j.namespace_pids();
j.namespace_user();
j.namespace_user_disable_setgroups();
j.uidmap(&uid_map.unwrap_or_else(default_uidmap))?;
j.gidmap(&gid_map.unwrap_or_else(default_gidmap))?;
j.run_as_init();
j.namespace_vfs();
j.namespace_net();
j.no_new_privs();
// Only pivot_root if we are not re-using the current root directory.
if dir_path != Path::new("/") {
// It's safe to call `namespace_vfs` multiple times.
j.namespace_vfs();
j.enter_pivot_root(&dir_path)?;
}
j.set_remount_mode(libc::MS_SLAVE);
let limit = get_max_open_files().context("failed to get max open files")?;
j.set_rlimit(libc::RLIMIT_NOFILE as i32, limit, limit)?;
// Make sure there are no duplicates in keep_rds
keep_rds.dedup();
// fork on the jail here
let pid = unsafe { j.fork(Some(&keep_rds))? };
if pid > 0 {
unsafe { libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGTERM) };
}
if pid < 0 {
bail!("Fork error! {}", std::io::Error::last_os_error());
}
Ok(pid)
}
struct FsBackend {
server: Arc<fuse::Server<PassthroughFs>>,
tag: [u8; FS_MAX_TAG_LEN],
avail_features: u64,
acked_features: u64,
acked_protocol_features: VhostUserProtocolFeatures,
workers: [Option<AbortHandle>; Self::MAX_QUEUE_NUM],
keep_rds: Vec<RawDescriptor>,
}
impl FsBackend {
pub fn new(tag: &str) -> anyhow::Result<Self> {
if tag.len() > FS_MAX_TAG_LEN {
bail!(
"fs tag is too long: {} (max supported: {})",
tag.len(),
FS_MAX_TAG_LEN
);
}
let mut fs_tag = [0u8; FS_MAX_TAG_LEN];
fs_tag[..tag.len()].copy_from_slice(tag.as_bytes());
let avail_features = virtio::base_features(ProtectionType::Unprotected)
| VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
// Use default passthroughfs config
let fs = PassthroughFs::new(Default::default())?;
let mut keep_rds: Vec<RawDescriptor> = [0, 1, 2].to_vec();
keep_rds.append(&mut fs.keep_rds());
let server = Arc::new(Server::new(fs));
Ok(FsBackend {
server,
tag: fs_tag,
avail_features,
acked_features: 0,
acked_protocol_features: VhostUserProtocolFeatures::empty(),
workers: Default::default(),
keep_rds,
})
}
}
impl VhostUserBackend for FsBackend {
const MAX_QUEUE_NUM: usize = 2; /* worker queue and high priority queue */
const MAX_VRING_LEN: u16 = 1024;
type Doorbell = CallEvent;
type Error = anyhow::Error;
fn features(&self) -> u64 {
self.avail_features
}
fn ack_features(&mut self, value: u64) -> anyhow::Result<()> {
let unrequested_features = value & !self.avail_features;
if unrequested_features != 0 {
bail!("invalid features are given: {:#x}", unrequested_features);
}
self.acked_features |= value;
Ok(())
}
fn acked_features(&self) -> u64 {
self.acked_features
}
fn protocol_features(&self) -> VhostUserProtocolFeatures {
VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ
}
fn ack_protocol_features(&mut self, features: u64) -> anyhow::Result<()> {
let features = VhostUserProtocolFeatures::from_bits(features)
.ok_or_else(|| anyhow!("invalid protocol features are given: {:#x}", features))?;
let supported = self.protocol_features();
self.acked_protocol_features = features & supported;
Ok(())
}
fn acked_protocol_features(&self) -> u64 {
self.acked_protocol_features.bits()
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
let config = virtio_fs_config {
tag: self.tag,
num_request_queues: Le32::from(1),
};
copy_config(data, 0, config.as_slice(), offset);
}
fn reset(&mut self) {
for handle in self.workers.iter_mut().filter_map(Option::take) {
handle.abort();
}
}
fn start_queue(
&mut self,
idx: usize,
mut queue: virtio::Queue,
mem: GuestMemory,
call_evt: Arc<Mutex<CallEvent>>,
kick_evt: Event,
) -> anyhow::Result<()> {
if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) {
warn!("Starting new queue handler without stopping old handler");
handle.abort();
}
// Safe because the executor is initialized in main() below.
let ex = FS_EXECUTOR.get().expect("Executor not initialized");
// Enable any virtqueue features that were negotiated (like VIRTIO_RING_F_EVENT_IDX).
queue.ack_features(self.acked_features);
let kick_evt =
EventAsync::new(kick_evt.0, ex).context("failed to create EventAsync for kick_evt")?;
let (handle, registration) = AbortHandle::new_pair();
let (_, fs_device_tube) = Tube::pair()?;
ex.spawn_local(Abortable::new(
handle_fs_queue(
queue,
mem,
call_evt,
kick_evt,
self.server.clone(),
Arc::new(Mutex::new(fs_device_tube)),
),
registration,
))
.detach();
self.workers[idx] = Some(handle);
Ok(())
}
fn stop_queue(&mut self, idx: usize) {
if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) {
handle.abort();
}
}
}
#[derive(FromArgs)]
#[argh(description = "")]
struct
|
{
#[argh(option, description = "path to a socket", arg_name = "PATH")]
socket: String,
#[argh(option, description = "the virtio-fs tag", arg_name = "TAG")]
tag: String,
#[argh(option, description = "path to a directory to share", arg_name = "DIR")]
shared_dir: PathBuf,
#[argh(option, description = "uid map to use", arg_name = "UIDMAP")]
uid_map: Option<String>,
#[argh(option, description = "gid map to use", arg_name = "GIDMAP")]
gid_map: Option<String>,
}
/// Starts a vhost-user fs device.
/// Returns an error if the given `args` is invalid or the device fails to run.
pub fn run_fs_device(program_name: &str, args: &[&str]) -> anyhow::Result<()> {
let opts = match Options::from_args(&[program_name], args) {
Ok(opts) => opts,
Err(e) => {
if e.status.is_err() {
bail!(
|
Options
|
identifier_name
|
fs.rs
|
VirtioFeatures};
use crate::virtio;
use crate::virtio::copy_config;
use crate::virtio::fs::passthrough::PassthroughFs;
use crate::virtio::fs::{process_fs_queue, virtio_fs_config, FS_MAX_TAG_LEN};
use crate::virtio::vhost::user::device::handler::{
CallEvent, DeviceRequestHandler, VhostUserBackend,
};
static FS_EXECUTOR: OnceCell<Executor> = OnceCell::new();
async fn handle_fs_queue(
mut queue: virtio::Queue,
mem: GuestMemory,
call_evt: Arc<Mutex<CallEvent>>,
kick_evt: EventAsync,
server: Arc<fuse::Server<PassthroughFs>>,
tube: Arc<Mutex<Tube>>,
) {
// Slot is always going to be 0 because we do not support DAX
let slot: u32 = 0;
loop {
if let Err(e) = kick_evt.next_val().await {
error!("Failed to read kick event for fs queue: {}", e);
break;
}
if let Err(e) = process_fs_queue(&mem, &call_evt, &mut queue, &server, &tube, slot) {
error!("Process FS queue failed: {}", e);
break;
}
}
}
fn default_uidmap() -> String {
let euid = unsafe { libc::geteuid() };
format!("{} {} 1", euid, euid)
}
fn default_gidmap() -> String {
let egid = unsafe { libc::getegid() };
format!("{} {} 1", egid, egid)
}
fn jail_and_fork(
mut keep_rds: Vec<RawDescriptor>,
dir_path: PathBuf,
uid_map: Option<String>,
gid_map: Option<String>,
) -> anyhow::Result<i32> {
// Create new minijail sandbox
let mut j = Minijail::new()?;
j.namespace_pids();
j.namespace_user();
j.namespace_user_disable_setgroups();
j.uidmap(&uid_map.unwrap_or_else(default_uidmap))?;
j.gidmap(&gid_map.unwrap_or_else(default_gidmap))?;
j.run_as_init();
j.namespace_vfs();
j.namespace_net();
j.no_new_privs();
// Only pivot_root if we are not re-using the current root directory.
if dir_path != Path::new("/") {
// It's safe to call `namespace_vfs` multiple times.
j.namespace_vfs();
j.enter_pivot_root(&dir_path)?;
}
j.set_remount_mode(libc::MS_SLAVE);
let limit = get_max_open_files().context("failed to get max open files")?;
j.set_rlimit(libc::RLIMIT_NOFILE as i32, limit, limit)?;
// Make sure there are no duplicates in keep_rds
keep_rds.dedup();
// fork on the jail here
let pid = unsafe { j.fork(Some(&keep_rds))? };
if pid > 0
|
if pid < 0 {
bail!("Fork error! {}", std::io::Error::last_os_error());
}
Ok(pid)
}
struct FsBackend {
server: Arc<fuse::Server<PassthroughFs>>,
tag: [u8; FS_MAX_TAG_LEN],
avail_features: u64,
acked_features: u64,
acked_protocol_features: VhostUserProtocolFeatures,
workers: [Option<AbortHandle>; Self::MAX_QUEUE_NUM],
keep_rds: Vec<RawDescriptor>,
}
impl FsBackend {
pub fn new(tag: &str) -> anyhow::Result<Self> {
if tag.len() > FS_MAX_TAG_LEN {
bail!(
"fs tag is too long: {} (max supported: {})",
tag.len(),
FS_MAX_TAG_LEN
);
}
let mut fs_tag = [0u8; FS_MAX_TAG_LEN];
fs_tag[..tag.len()].copy_from_slice(tag.as_bytes());
let avail_features = virtio::base_features(ProtectionType::Unprotected)
| VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
// Use default passthroughfs config
let fs = PassthroughFs::new(Default::default())?;
let mut keep_rds: Vec<RawDescriptor> = [0, 1, 2].to_vec();
keep_rds.append(&mut fs.keep_rds());
let server = Arc::new(Server::new(fs));
Ok(FsBackend {
server,
tag: fs_tag,
avail_features,
acked_features: 0,
acked_protocol_features: VhostUserProtocolFeatures::empty(),
workers: Default::default(),
keep_rds,
})
}
}
impl VhostUserBackend for FsBackend {
const MAX_QUEUE_NUM: usize = 2; /* worker queue and high priority queue */
const MAX_VRING_LEN: u16 = 1024;
type Doorbell = CallEvent;
type Error = anyhow::Error;
fn features(&self) -> u64 {
self.avail_features
}
fn ack_features(&mut self, value: u64) -> anyhow::Result<()> {
let unrequested_features = value & !self.avail_features;
if unrequested_features != 0 {
bail!("invalid features are given: {:#x}", unrequested_features);
}
self.acked_features |= value;
Ok(())
}
fn acked_features(&self) -> u64 {
self.acked_features
}
fn protocol_features(&self) -> VhostUserProtocolFeatures {
VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ
}
fn ack_protocol_features(&mut self, features: u64) -> anyhow::Result<()> {
let features = VhostUserProtocolFeatures::from_bits(features)
.ok_or_else(|| anyhow!("invalid protocol features are given: {:#x}", features))?;
let supported = self.protocol_features();
self.acked_protocol_features = features & supported;
Ok(())
}
fn acked_protocol_features(&self) -> u64 {
self.acked_protocol_features.bits()
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
let config = virtio_fs_config {
tag: self.tag,
num_request_queues: Le32::from(1),
};
copy_config(data, 0, config.as_slice(), offset);
}
fn reset(&mut self) {
for handle in self.workers.iter_mut().filter_map(Option::take) {
handle.abort();
}
}
fn start_queue(
&mut self,
idx: usize,
mut queue: virtio::Queue,
mem: GuestMemory,
call_evt: Arc<Mutex<CallEvent>>,
kick_evt: Event,
) -> anyhow::Result<()> {
if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) {
warn!("Starting new queue handler without stopping old handler");
handle.abort();
}
// Safe because the executor is initialized in main() below.
let ex = FS_EXECUTOR.get().expect("Executor not initialized");
// Enable any virtqueue features that were negotiated (like VIRTIO_RING_F_EVENT_IDX).
queue.ack_features(self.acked_features);
let kick_evt =
EventAsync::new(kick_evt.0, ex).context("failed to create EventAsync for kick_evt")?;
let (handle, registration) = AbortHandle::new_pair();
let (_, fs_device_tube) = Tube::pair()?;
ex.spawn_local(Abortable::new(
handle_fs_queue(
queue,
mem,
call_evt,
kick_evt,
self.server.clone(),
Arc::new(Mutex::new(fs_device_tube)),
),
registration,
))
.detach();
self.workers[idx] = Some(handle);
Ok(())
}
fn stop_queue(&mut self, idx: usize) {
if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) {
handle.abort();
}
}
}
#[derive(FromArgs)]
#[argh(description = "")]
struct Options {
#[argh(option, description = "path to a socket", arg_name = "PATH")]
socket: String,
#[argh(option, description = "the virtio-fs tag", arg_name = "TAG")]
tag: String,
#[argh(option, description = "path to a directory to share", arg_name = "DIR")]
shared_dir: PathBuf,
#[argh(option, description = "uid map to use", arg_name = "UIDMAP")]
uid_map: Option<String>,
#[argh(option, description = "gid map to use", arg_name = "GIDMAP")]
gid_map: Option<String>,
}
/// Starts a vhost-user fs device.
/// Returns an error if the given `args` is invalid or the device fails to run.
pub fn run_fs_device(program_name: &str, args: &[&str]) -> anyhow::Result<()> {
let opts = match Options::from_args(&[program_name], args) {
Ok(opts) => opts,
Err(e) => {
if e.status.is_err() {
bail
|
{
unsafe { libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGTERM) };
}
|
conditional_block
|
fs.rs
|
VirtioFeatures};
use crate::virtio;
use crate::virtio::copy_config;
use crate::virtio::fs::passthrough::PassthroughFs;
use crate::virtio::fs::{process_fs_queue, virtio_fs_config, FS_MAX_TAG_LEN};
use crate::virtio::vhost::user::device::handler::{
CallEvent, DeviceRequestHandler, VhostUserBackend,
};
static FS_EXECUTOR: OnceCell<Executor> = OnceCell::new();
async fn handle_fs_queue(
mut queue: virtio::Queue,
mem: GuestMemory,
call_evt: Arc<Mutex<CallEvent>>,
kick_evt: EventAsync,
server: Arc<fuse::Server<PassthroughFs>>,
tube: Arc<Mutex<Tube>>,
) {
// Slot is always going to be 0 because we do not support DAX
let slot: u32 = 0;
loop {
if let Err(e) = kick_evt.next_val().await {
error!("Failed to read kick event for fs queue: {}", e);
break;
}
if let Err(e) = process_fs_queue(&mem, &call_evt, &mut queue, &server, &tube, slot) {
error!("Process FS queue failed: {}", e);
break;
}
}
}
fn default_uidmap() -> String {
let euid = unsafe { libc::geteuid() };
format!("{} {} 1", euid, euid)
}
fn default_gidmap() -> String {
let egid = unsafe { libc::getegid() };
format!("{} {} 1", egid, egid)
}
fn jail_and_fork(
mut keep_rds: Vec<RawDescriptor>,
dir_path: PathBuf,
uid_map: Option<String>,
gid_map: Option<String>,
) -> anyhow::Result<i32> {
// Create new minijail sandbox
let mut j = Minijail::new()?;
j.namespace_pids();
j.namespace_user();
j.namespace_user_disable_setgroups();
j.uidmap(&uid_map.unwrap_or_else(default_uidmap))?;
j.gidmap(&gid_map.unwrap_or_else(default_gidmap))?;
j.run_as_init();
j.namespace_vfs();
j.namespace_net();
j.no_new_privs();
// Only pivot_root if we are not re-using the current root directory.
if dir_path != Path::new("/") {
// It's safe to call `namespace_vfs` multiple times.
j.namespace_vfs();
j.enter_pivot_root(&dir_path)?;
}
j.set_remount_mode(libc::MS_SLAVE);
let limit = get_max_open_files().context("failed to get max open files")?;
j.set_rlimit(libc::RLIMIT_NOFILE as i32, limit, limit)?;
// Make sure there are no duplicates in keep_rds
keep_rds.dedup();
// fork on the jail here
let pid = unsafe { j.fork(Some(&keep_rds))? };
if pid > 0 {
unsafe { libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGTERM) };
}
if pid < 0 {
bail!("Fork error! {}", std::io::Error::last_os_error());
}
Ok(pid)
}
struct FsBackend {
server: Arc<fuse::Server<PassthroughFs>>,
tag: [u8; FS_MAX_TAG_LEN],
avail_features: u64,
acked_features: u64,
acked_protocol_features: VhostUserProtocolFeatures,
workers: [Option<AbortHandle>; Self::MAX_QUEUE_NUM],
keep_rds: Vec<RawDescriptor>,
}
impl FsBackend {
pub fn new(tag: &str) -> anyhow::Result<Self> {
if tag.len() > FS_MAX_TAG_LEN {
bail!(
"fs tag is too long: {} (max supported: {})",
tag.len(),
FS_MAX_TAG_LEN
);
}
let mut fs_tag = [0u8; FS_MAX_TAG_LEN];
fs_tag[..tag.len()].copy_from_slice(tag.as_bytes());
let avail_features = virtio::base_features(ProtectionType::Unprotected)
| VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
// Use default passthroughfs config
let fs = PassthroughFs::new(Default::default())?;
let mut keep_rds: Vec<RawDescriptor> = [0, 1, 2].to_vec();
keep_rds.append(&mut fs.keep_rds());
let server = Arc::new(Server::new(fs));
Ok(FsBackend {
server,
tag: fs_tag,
avail_features,
acked_features: 0,
acked_protocol_features: VhostUserProtocolFeatures::empty(),
workers: Default::default(),
keep_rds,
})
}
}
impl VhostUserBackend for FsBackend {
const MAX_QUEUE_NUM: usize = 2; /* worker queue and high priority queue */
const MAX_VRING_LEN: u16 = 1024;
type Doorbell = CallEvent;
type Error = anyhow::Error;
fn features(&self) -> u64 {
self.avail_features
}
fn ack_features(&mut self, value: u64) -> anyhow::Result<()> {
let unrequested_features = value & !self.avail_features;
if unrequested_features != 0 {
bail!("invalid features are given: {:#x}", unrequested_features);
}
self.acked_features |= value;
Ok(())
}
fn acked_features(&self) -> u64 {
self.acked_features
}
fn protocol_features(&self) -> VhostUserProtocolFeatures {
VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ
}
fn ack_protocol_features(&mut self, features: u64) -> anyhow::Result<()> {
let features = VhostUserProtocolFeatures::from_bits(features)
.ok_or_else(|| anyhow!("invalid protocol features are given: {:#x}", features))?;
let supported = self.protocol_features();
self.acked_protocol_features = features & supported;
Ok(())
}
fn acked_protocol_features(&self) -> u64 {
self.acked_protocol_features.bits()
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
let config = virtio_fs_config {
tag: self.tag,
num_request_queues: Le32::from(1),
};
copy_config(data, 0, config.as_slice(), offset);
}
fn reset(&mut self) {
for handle in self.workers.iter_mut().filter_map(Option::take) {
handle.abort();
}
}
fn start_queue(
&mut self,
idx: usize,
mut queue: virtio::Queue,
mem: GuestMemory,
call_evt: Arc<Mutex<CallEvent>>,
kick_evt: Event,
) -> anyhow::Result<()> {
if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) {
warn!("Starting new queue handler without stopping old handler");
handle.abort();
}
// Safe because the executor is initialized in main() below.
let ex = FS_EXECUTOR.get().expect("Executor not initialized");
// Enable any virtqueue features that were negotiated (like VIRTIO_RING_F_EVENT_IDX).
queue.ack_features(self.acked_features);
let kick_evt =
EventAsync::new(kick_evt.0, ex).context("failed to create EventAsync for kick_evt")?;
let (handle, registration) = AbortHandle::new_pair();
let (_, fs_device_tube) = Tube::pair()?;
ex.spawn_local(Abortable::new(
handle_fs_queue(
queue,
mem,
call_evt,
kick_evt,
self.server.clone(),
Arc::new(Mutex::new(fs_device_tube)),
),
registration,
))
.detach();
self.workers[idx] = Some(handle);
Ok(())
}
fn stop_queue(&mut self, idx: usize) {
if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) {
handle.abort();
}
}
}
#[derive(FromArgs)]
#[argh(description = "")]
struct Options {
#[argh(option, description = "path to a socket", arg_name = "PATH")]
socket: String,
#[argh(option, description = "the virtio-fs tag", arg_name = "TAG")]
tag: String,
#[argh(option, description = "path to a directory to share", arg_name = "DIR")]
shared_dir: PathBuf,
|
/// Starts a vhost-user fs device.
/// Returns an error if the given `args` is invalid or the device fails to run.
pub fn run_fs_device(program_name: &str, args: &[&str]) -> anyhow::Result<()> {
let opts = match Options::from_args(&[program_name], args) {
Ok(opts) => opts,
Err(e) => {
if e.status.is_err() {
bail!(
|
#[argh(option, description = "uid map to use", arg_name = "UIDMAP")]
uid_map: Option<String>,
#[argh(option, description = "gid map to use", arg_name = "GIDMAP")]
gid_map: Option<String>,
}
|
random_line_split
|
analysis.py
|
# put the files to analyze in a folder of your choosing in the same
# directory as this python file. This folder will also
# need to contain a "metadata.txt" file. The metadata file
# needs to be a .tsv with the filename, genre, author, title, era columns
directories = ["corpus"]
# Size of resultant PDF
figsize = (10,10)
# Colors and labels for HCA/PCA
# 1 is title, 2 is genre, 3 is era, 4 is author, 5 is dir, 6 is secname
label_name = 2
label_color = 2
# Size of the dots in the PCA
dot_size=4
# Set mean to zero?
scalemeanzero = False
# Plot Loadings?
plotloadings = False
##################
# PICKLE THINGS? #
##################
# Save a pickle of the PCA loadings?
pickle_loadings = True
''' Unimplemented features
#Pickle or Analyze?
topickle = False
if topickle == True:
skipanalysis = True:
elif topickle == False:
skipanalysis = False:
skipimport = False
# First number 1 used or 0 unused, second min features, third max featueres
featurerange = (0, 0, 400)
# Set Vocabulary 1 used 0 unused, list of vocab
setvocab = (0, [])
'''
################
# MAIN PROGRAM #
################
if __name__ == "__main__":
# Get yeshi titles
y1 = open("ymetadata.txt","r").read()
yeshititles = []
for line in y1.split("\n"):
cells = line.split("\t")
yeshititles.append(cells[2])
print("Acquiring Data")
# Send the directories and genres to include to the appropriate function
# from paulutility.
if fschoise == "full":
infolist = paulutility.fulltextcontent(directories, items_to_include, item_type, e=eras)
elif fschoise == "split":
infolist = paulutility.splittextcontent(directories, items_to_include, item_type,e=eras)
elif fschoise == "num":
infolist = paulutility.fullsplitbynum(directories, divnum, items_to_include, item_type, e=eras)
else:
print("Not a valid c
|
umerate(infolist[1]):
if title in yeshititles:
infolist[2][i] = "野史"
'''
if "外史" in title or "逸史" in title or "密史" in title or "野史" in title:
priorgenre = infolist[2][i]
if priorgenre == "小说":
infolist[2][i] = "ny"
elif priorgenre == "演义":
infolist[2][i] = "yy"
elif priorgenre == "志存记录":
infolist[2][i] = "hy"
'''
if textbalance:
if item_type == 0:
dt = infolist[2]
elif item_type == 1:
dt = infolist[4]
gs = Series(dt)
vcgs = gs.value_counts()
ungs = list(set(dt))
genstart = []
for ug in ungs:
genstart.append(dt.index(ug))
rangesets = []
genstart= sorted(genstart)
for i,it in enumerate(genstart):
if i != (len(genstart)) -1:
#print(i, it, genstart[i +1])
randrange = [x for x in range(it,genstart[i+1])]
#print(len(randrange))
rangesets.append(randrange)
else:
#print(i,it)
randrange = [x for x in range(it,len(dt))]
#print(len(randrange))
rangesets.append(randrange)
reduced = []
for rang in rangesets:
red = random.sample(rang,vcgs[-1])
reduced.extend(red)
altinfo = []
for i in range(0,len(infolist)):
nl = []
for it in reduced:
nl.append(infolist[i][it])
altinfo.append(nl)
infolist = altinfo
print("Making vectorizer")
# create a vectorizer object to vectorize the documents into matrices. These
# vectorizers return sparse matrices.
# Calculate using plain term frequency
if cnchoice == "tf":
vectorizer = TfidfVectorizer(use_idf=False, analyzer='word', token_pattern='\S+', ngram_range=ngramrange, max_features=features, vocabulary=voc,norm='l2')
# Calculate using TFIDF
elif cnchoice == "tfidf":
vectorizer = TfidfVectorizer(use_idf=True, analyzer='word', token_pattern='\S+', ngram_range=ngramrange, max_features=features,vocabulary=voc)
# Calculate using raw term counts
elif cnchoice == "raw":
vectorizer = CountVectorizer(analyzer='word', token_pattern = '\S+', ngram_range=ngramrange, max_features=features,vocabulary=voc)
# Calculate using a chi measure (based on Ted Underwood's tech note)
# This returns a DataFrame and a list of vocabulary
elif cnchoice == "chi":
df, vocab = paulutility.chinormal(infolist[0], ngramrange, features, infolist[2])
densematrix = df#.toarray()
print("Fitting vectorizer")
# create the Matrix if using a sklearn vectorizer object
# this will finish with a matrix in the same form as the one returned
# using the chi metric
if cnchoice != "chi":
matrix = vectorizer.fit_transform(infolist[0])
vocab = vectorizer.get_feature_names()
# A dense matrix is necessary for some purposes, so I convert the sparse
# matrix to a dense one
densematrix = matrix.toarray()
if scalemeanzero:
densematrix = scale(densematrix) #sklearn scale to mean 0, var 1
df = DataFrame(densematrix, columns=vocab, index=infolist[2])
################
# PCA ANALYSIS #
################
#df = df[df[2] != "志存记录"]
#print(df)
if analysischoise == "pca":
# run pca
# by default I am only looking at the first two PCs
pca = PCA(n_components=2)
pca2 = PCA(n_components=2)
pca2.fit(df)
plt.figure(figsize=figsize)
plt.plot(pca2.explained_variance_ratio_,marker='o')
plt.xticks(np.arange(0,10,1))
plt.xlabel('Principal Component')
plt.ylabel('Explained Variance')
plt.title('Scree Plot')
plt.savefig(screeplotname)
plt.clf()
if item_type == 0:
dt = infolist[2]
elif item_type == 1:
dt = infolist[4]
seriesgenre = Series(dt)
genrecount = seriesgenre.value_counts()
print(genrecount)
titleseries = Series(infolist[1])
wf = open("usedtitles.txt","w")
for title in set(infolist[1]):
wf.write(title + "\n")
wf.close()
titlecount = titleseries.value_counts()
print(titlecount)
my_pca = pca.fit(df).transform(df) # same as PCA(n_components=2).fit_transform(df)
# in sklearn, the loadings are held in pca.components_
loadings = pca.components_
# Pickle the loadings (useful for extra analysis), so
# I don't have to reload data every time
if pickle_loadings:
pickle.dump([vocab,loadings], open('loadings.p','wb'))
if plotloadings == True:
# I first plot the loadings
plt.figure(figsize=figsize)
# Scatter plot using the loadings, needs work
#plt.scatter(*loadings, alpha=0.0)
plt.scatter(loadings[pcs[0]], loadings[pcs[1]], alpha=0.0)
#plt.scatter([0,0],[0,0],alpha=0.0)
# Label with explained variance
pclabel1 = "PC"+str(pcs[0] + 1) + " "
pclabel2 = "PC"+str(pcs[1] + 1) + " "
plt.xlabel(pclabel1+str(pca.explained_variance_ratio_[pcs[0]]))
plt.ylabel(pclabel2+str(pca.explained_variance_ratio_[pcs[1]]))
# Set a Chinese Font. Mac compatible. Will need something else
# on windows
chinese = FontProperties(fname='/Library/Fonts/Songti.ttc')
matplotlib.rc('font', family='STHeiti')
# Iterate through the vocab and plot where it falls on loadings graph
# numpy array the loadings
|
hoice")
# Kill the program
exit()
for i,title in en
|
conditional_block
|
analysis.py
|
# put the files to analyze in a folder of your choosing in the same
# directory as this python file. This folder will also
# need to contain a "metadata.txt" file. The metadata file
# needs to be a .tsv with the filename, genre, author, title, era columns
directories = ["corpus"]
# Size of resultant PDF
figsize = (10,10)
# Colors and labels for HCA/PCA
# 1 is title, 2 is genre, 3 is era, 4 is author, 5 is dir, 6 is secname
label_name = 2
label_color = 2
# Size of the dots in the PCA
dot_size=4
# Set mean to zero?
scalemeanzero = False
# Plot Loadings?
plotloadings = False
##################
# PICKLE THINGS? #
##################
# Save a pickle of the PCA loadings?
pickle_loadings = True
''' Unimplemented features
#Pickle or Analyze?
topickle = False
if topickle == True:
skipanalysis = True:
elif topickle == False:
skipanalysis = False:
skipimport = False
# First number 1 used or 0 unused, second min features, third max featueres
featurerange = (0, 0, 400)
# Set Vocabulary 1 used 0 unused, list of vocab
setvocab = (0, [])
'''
################
# MAIN PROGRAM #
################
if __name__ == "__main__":
# Get yeshi titles
y1 = open("ymetadata.txt","r").read()
yeshititles = []
for line in y1.split("\n"):
cells = line.split("\t")
yeshititles.append(cells[2])
print("Acquiring Data")
# Send the directories and genres to include to the appropriate function
# from paulutility.
if fschoise == "full":
infolist = paulutility.fulltextcontent(directories, items_to_include, item_type, e=eras)
elif fschoise == "split":
infolist = paulutility.splittextcontent(directories, items_to_include, item_type,e=eras)
elif fschoise == "num":
infolist = paulutility.fullsplitbynum(directories, divnum, items_to_include, item_type, e=eras)
else:
print("Not a valid choice")
# Kill the program
exit()
for i,title in enumerate(infolist[1]):
if title in yeshititles:
infolist[2][i] = "野史"
'''
if "外史" in title or "逸史" in title or "密史" in title or "野史" in title:
priorgenre = infolist[2][i]
if priorgenre == "小说":
infolist[2][i] = "ny"
elif priorgenre == "演义":
infolist[2][i] = "yy"
elif priorgenre == "志存记录":
infolist[2][i] = "hy"
'''
if textbalance:
if item_type == 0:
dt = infolist[2]
elif item_type == 1:
dt = infolist[4]
gs = Series(dt)
vcgs = gs.value_counts()
ungs = list(set(dt))
genstart = []
for ug in ungs:
genstart.append(dt.index(ug))
rangesets = []
genstart= sorted(genstart)
for i,it in enumerate(genstart):
if i != (len(genstart)) -1:
#print(i, it, genstart[i +1])
randrange = [x for x in range(it,genstart[i+1])]
#print(len(randrange))
rangesets.append(randrange)
else:
#print(i,it)
randrange = [x for x in range(it,len(dt))]
#print(len(randrange))
rangesets.append(randrange)
reduced = []
for rang in rangesets:
red = random.sample(rang,vcgs[-1])
reduced.extend(red)
altinfo = []
for i in range(0,len(infolist)):
nl = []
for it in reduced:
|
infolist = altinfo
print("Making vectorizer")
# create a vectorizer object to vectorize the documents into matrices. These
# vectorizers return sparse matrices.
# Calculate using plain term frequency
if cnchoice == "tf":
vectorizer = TfidfVectorizer(use_idf=False, analyzer='word', token_pattern='\S+', ngram_range=ngramrange, max_features=features, vocabulary=voc,norm='l2')
# Calculate using TFIDF
elif cnchoice == "tfidf":
vectorizer = TfidfVectorizer(use_idf=True, analyzer='word', token_pattern='\S+', ngram_range=ngramrange, max_features=features,vocabulary=voc)
# Calculate using raw term counts
elif cnchoice == "raw":
vectorizer = CountVectorizer(analyzer='word', token_pattern = '\S+', ngram_range=ngramrange, max_features=features,vocabulary=voc)
# Calculate using a chi measure (based on Ted Underwood's tech note)
# This returns a DataFrame and a list of vocabulary
elif cnchoice == "chi":
df, vocab = paulutility.chinormal(infolist[0], ngramrange, features, infolist[2])
densematrix = df#.toarray()
print("Fitting vectorizer")
# create the Matrix if using a sklearn vectorizer object
# this will finish with a matrix in the same form as the one returned
# using the chi metric
if cnchoice != "chi":
matrix = vectorizer.fit_transform(infolist[0])
vocab = vectorizer.get_feature_names()
# A dense matrix is necessary for some purposes, so I convert the sparse
# matrix to a dense one
densematrix = matrix.toarray()
if scalemeanzero:
densematrix = scale(densematrix) #sklearn scale to mean 0, var 1
df = DataFrame(densematrix, columns=vocab, index=infolist[2])
################
# PCA ANALYSIS #
################
#df = df[df[2] != "志存记录"]
#print(df)
if analysischoise == "pca":
# run pca
# by default I am only looking at the first two PCs
pca = PCA(n_components=2)
pca2 = PCA(n_components=2)
pca2.fit(df)
plt.figure(figsize=figsize)
plt.plot(pca2.explained_variance_ratio_,marker='o')
plt.xticks(np.arange(0,10,1))
plt.xlabel('Principal Component')
plt.ylabel('Explained Variance')
plt.title('Scree Plot')
plt.savefig(screeplotname)
plt.clf()
if item_type == 0:
dt = infolist[2]
elif item_type == 1:
dt = infolist[4]
seriesgenre = Series(dt)
genrecount = seriesgenre.value_counts()
print(genrecount)
titleseries = Series(infolist[1])
wf = open("usedtitles.txt","w")
for title in set(infolist[1]):
wf.write(title + "\n")
wf.close()
titlecount = titleseries.value_counts()
print(titlecount)
my_pca = pca.fit(df).transform(df) # same as PCA(n_components=2).fit_transform(df)
# in sklearn, the loadings are held in pca.components_
loadings = pca.components_
# Pickle the loadings (useful for extra analysis), so
# I don't have to reload data every time
if pickle_loadings:
pickle.dump([vocab,loadings], open('loadings.p','wb'))
if plotloadings == True:
# I first plot the loadings
plt.figure(figsize=figsize)
# Scatter plot using the loadings, needs work
#plt.scatter(*loadings, alpha=0.0)
plt.scatter(loadings[pcs[0]], loadings[pcs[1]], alpha=0.0)
#plt.scatter([0,0],[0,0],alpha=0.0)
# Label with explained variance
pclabel1 = "PC"+str(pcs[0] + 1) + " "
pclabel2 = "PC"+str(pcs[1] + 1) + " "
plt.xlabel(pclabel1+str(pca.explained_variance_ratio_[pcs[0]]))
plt.ylabel(pclabel2+str(pca.explained_variance_ratio_[pcs[1]]))
# Set a Chinese Font. Mac compatible. Will need something else
# on windows
chinese = FontProperties(fname='/Library/Fonts/Songti.ttc')
matplotlib.rc('font', family='STHeiti')
# Iterate through the vocab and plot where it falls on loadings graph
# numpy array the loadings info is
|
nl.append(infolist[i][it])
altinfo.append(nl)
|
random_line_split
|
tools.py
|
data = yaml.safe_load(descriptor)
except Exception as ex:
raise CekitError('Cannot load descriptor', ex)
if isinstance(data, basestring):
LOGGER.debug("Reading descriptor from '{}' file...".format(descriptor))
if os.path.exists(descriptor):
with open(descriptor, 'r') as fh:
return yaml.safe_load(fh)
raise CekitError(
"Descriptor could not be found on the '{}' path, please check your arguments!".format(descriptor))
LOGGER.debug("Reading descriptor directly...")
return data
def decision(question):
"""Asks user for a question returning True/False answed"""
return click.confirm(question, show_default=True)
def get_brew_url(md5):
try:
LOGGER.debug("Getting brew details for an artifact with '{}' md5 sum".format(md5))
list_archives_cmd = ['/usr/bin/brew', 'call', '--json-output', 'listArchives',
"checksum={}".format(md5), 'type=maven']
LOGGER.debug("Executing '{}'.".format(" ".join(list_archives_cmd)))
try:
json_archives = subprocess.check_output(list_archives_cmd).strip().decode("utf8")
except subprocess.CalledProcessError as ex:
if ex.output is not None and 'AuthError' in ex.output:
LOGGER.warning(
"Brew authentication failed, please make sure you have a valid Kerberos ticket")
raise CekitError("Could not fetch archives for checksum {}".format(md5), ex)
archives = yaml.safe_load(json_archives)
if not archives:
raise CekitError("Artifact with md5 checksum {} could not be found in Brew".format(md5))
archive = archives[0]
build_id = archive['build_id']
filename = archive['filename']
group_id = archive['group_id']
artifact_id = archive['artifact_id']
version = archive['version']
get_build_cmd = ['brew', 'call', '--json-output',
'getBuild', "buildInfo={}".format(build_id)]
LOGGER.debug("Executing '{}'".format(" ".join(get_build_cmd)))
try:
json_build = subprocess.check_output(get_build_cmd).strip().decode("utf8")
except subprocess.CalledProcessError as ex:
raise CekitError("Could not fetch build {} from Brew".format(build_id), ex)
build = yaml.safe_load(json_build)
build_states = ['BUILDING', 'COMPLETE', 'DELETED', 'FAILED', 'CANCELED']
# State 1 means: COMPLETE which is the only success state. Other states are:
#
# 'BUILDING': 0
# 'COMPLETE': 1
# 'DELETED': 2
# 'FAILED': 3
# 'CANCELED': 4
if build['state'] != 1:
raise CekitError(
"Artifact with checksum {} was found in Koji metadata but the build is in incorrect state ({}) making "
"the artifact not available for downloading anymore".format(md5, build_states[build['state']]))
package = build['package_name']
release = build['release']
url = 'http://download.devel.redhat.com/brewroot/packages/' + package + '/' + \
version.replace('-', '_') + '/' + release + '/maven/' + \
group_id.replace('.', '/') + '/' + \
artifact_id + '/' + version + '/' + filename
except subprocess.CalledProcessError as ex:
LOGGER.error("Can't fetch artifacts details from brew: '{}'.".format(
ex.output))
raise ex
return url
def copy_recursively(source_directory, destination_directory):
"""
Copies contents of a directory to selected target location (also a directory).
the specific source file to destination.
If the source directory contains a directory, it will copy all the content recursively.
Symlinks are preserved (not followed).
The destination directory tree will be created if it does not exist.
"""
# If the source directory does not exists, return
if not os.path.isdir(source_directory):
return
# Iterate over content in the source directory
for name in os.listdir(source_directory):
src = os.path.join(source_directory, name)
dst = os.path.join(destination_directory, name)
LOGGER.debug("Copying '{}' to '{}'...".format(src, dst))
if not os.path.isdir(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
if os.path.islink(src):
os.symlink(os.readlink(src), dst)
elif os.path.isdir(src):
shutil.copytree(src, dst, symlinks=True)
else:
shutil.copy2(src, dst)
class Chdir(object):
""" Context manager for changing the current working directory """
def __init__(self, new_path):
self.newPath = os.path.expanduser(new_path)
self.savedPath = None
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DependencyHandler(object):
|
self.os_release = {}
self.platform = None
os_release_path = "/etc/os-release"
if os.path.exists(os_release_path):
# Read the file containing operating system information
with open(os_release_path, 'r') as f:
content = f.readlines()
self.os_release = dict([l.strip().split('=')
for l in content if not l.isspace() and not l.strip().startswith('#')])
# Remove the quote character, if it's there
for key in self.os_release.keys():
self.os_release[key] = self.os_release[key].strip('"')
if not self.os_release or 'ID' not in self.os_release or 'NAME' not in self.os_release or 'VERSION' not in self.os_release:
LOGGER.warning(
"You are running CEKit on an unknown platform. External dependencies suggestions may not work!")
return
self.platform = self.os_release['ID']
if self.os_release['ID'] not in DependencyHandler.KNOWN_OPERATING_SYSTEMS:
LOGGER.warning(
"You are running CEKit on an untested platform: {} {}. External dependencies "
"suggestions will not work!".format(self.os_release['NAME'], self.os_release['VERSION']))
return
LOGGER.info("You are running on known platform: {} {}".format(
self.os_release['NAME'], self.os_release['VERSION']))
def _handle_dependencies(self, dependencies):
"""
The dependencies provided is expected to be a dict in following format:
{
PACKAGE_ID: { 'package': PACKAGE_NAME, 'command': COMMAND_TO_TEST_FOR_PACKACGE_EXISTENCE },
}
Additionally every package can contain platform specific information, for example:
{
'git': {
'package': 'git',
'executable': 'git',
'fedora': {
'package': 'git-latest'
}
}
}
If the platform on which CEKit is currently running is available, it takes precedence before
defaults.
"""
if not dependencies:
LOGGER.debug("No dependencies found, skipping...")
return
for dependency in dependencies.keys():
current_dependency = dependencies[dependency]
package = current_dependency.get('package')
library = current_dependency.get('library')
executable = current_dependency.get('executable')
if self.platform in current_dependency:
package = current_dependency[self.platform].get('package', package)
library = current_dependency[self.platform].get('library', library)
executable = current_dependency[self.platform].get('executable', executable)
LOGGER.debug("Checking if '{}' dependency is provided...".format(dependency))
if library:
if self._check_for_library(library):
LOGGER.debug("Required CEKit library '{}' was found as a '{}' module!".format(
dependency, library))
continue
else:
msg = "Required CEKit library '{}' was not found; required module '{}' could not be found.".format(
dependency, library)
# Library was not found, check if we have a hint
if package and self.platform in DependencyHandler.KNOWN_OPERATING_SYSTEMS:
msg += " Try to install the '{}' package.".format(package)
raise CekitError(msg)
if executable:
if package and self.platform in DependencyHandler.KNOWN_OPERATING_SYSTEMS:
self._check_for_executable(dependency, executable, package)
else:
self._check_for_executable(dependency, executable)
LOGGER.debug("All dependencies provided!")
# pylint: disable=R0201
def _check_for_library(self, library):
library_found = False
if sys.version_info[0] < 3:
import imp
try:
imp.find_module(library)
library_found = True
except ImportError:
pass
else:
import importlib
|
"""
External dependency manager. Understands on what platform are we currently
running and what dependencies are required to be installed to satisfy the
requirements.
"""
# List of operating system families on which CEKit is known to work.
# It may work on other operating systems too, but it was not tested.
KNOWN_OPERATING_SYSTEMS = ['fedora', 'centos', 'rhel']
# Set of core CEKit external dependencies.
# Format is defined below, in the handle_dependencies() method
EXTERNAL_CORE_DEPENDENCIES = {
'git': {
'package': 'git',
'executable': 'git'
}
}
def __init__(self):
|
identifier_body
|
tools.py
|
as ex:
if ex.output is not None and 'AuthError' in ex.output:
LOGGER.warning(
"Brew authentication failed, please make sure you have a valid Kerberos ticket")
raise CekitError("Could not fetch archives for checksum {}".format(md5), ex)
archives = yaml.safe_load(json_archives)
if not archives:
raise CekitError("Artifact with md5 checksum {} could not be found in Brew".format(md5))
archive = archives[0]
build_id = archive['build_id']
filename = archive['filename']
group_id = archive['group_id']
artifact_id = archive['artifact_id']
version = archive['version']
get_build_cmd = ['brew', 'call', '--json-output',
'getBuild', "buildInfo={}".format(build_id)]
LOGGER.debug("Executing '{}'".format(" ".join(get_build_cmd)))
try:
json_build = subprocess.check_output(get_build_cmd).strip().decode("utf8")
except subprocess.CalledProcessError as ex:
raise CekitError("Could not fetch build {} from Brew".format(build_id), ex)
build = yaml.safe_load(json_build)
build_states = ['BUILDING', 'COMPLETE', 'DELETED', 'FAILED', 'CANCELED']
# State 1 means: COMPLETE which is the only success state. Other states are:
#
# 'BUILDING': 0
# 'COMPLETE': 1
# 'DELETED': 2
# 'FAILED': 3
# 'CANCELED': 4
if build['state'] != 1:
raise CekitError(
"Artifact with checksum {} was found in Koji metadata but the build is in incorrect state ({}) making "
"the artifact not available for downloading anymore".format(md5, build_states[build['state']]))
package = build['package_name']
release = build['release']
url = 'http://download.devel.redhat.com/brewroot/packages/' + package + '/' + \
version.replace('-', '_') + '/' + release + '/maven/' + \
group_id.replace('.', '/') + '/' + \
artifact_id + '/' + version + '/' + filename
except subprocess.CalledProcessError as ex:
LOGGER.error("Can't fetch artifacts details from brew: '{}'.".format(
ex.output))
raise ex
return url
def copy_recursively(source_directory, destination_directory):
"""
Copies contents of a directory to selected target location (also a directory).
the specific source file to destination.
If the source directory contains a directory, it will copy all the content recursively.
Symlinks are preserved (not followed).
The destination directory tree will be created if it does not exist.
"""
# If the source directory does not exists, return
if not os.path.isdir(source_directory):
return
# Iterate over content in the source directory
for name in os.listdir(source_directory):
src = os.path.join(source_directory, name)
dst = os.path.join(destination_directory, name)
LOGGER.debug("Copying '{}' to '{}'...".format(src, dst))
if not os.path.isdir(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
if os.path.islink(src):
os.symlink(os.readlink(src), dst)
elif os.path.isdir(src):
shutil.copytree(src, dst, symlinks=True)
else:
shutil.copy2(src, dst)
class Chdir(object):
""" Context manager for changing the current working directory """
def __init__(self, new_path):
self.newPath = os.path.expanduser(new_path)
self.savedPath = None
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DependencyHandler(object):
"""
External dependency manager. Understands on what platform are we currently
running and what dependencies are required to be installed to satisfy the
requirements.
"""
# List of operating system families on which CEKit is known to work.
# It may work on other operating systems too, but it was not tested.
KNOWN_OPERATING_SYSTEMS = ['fedora', 'centos', 'rhel']
# Set of core CEKit external dependencies.
# Format is defined below, in the handle_dependencies() method
EXTERNAL_CORE_DEPENDENCIES = {
'git': {
'package': 'git',
'executable': 'git'
}
}
def __init__(self):
self.os_release = {}
self.platform = None
os_release_path = "/etc/os-release"
if os.path.exists(os_release_path):
# Read the file containing operating system information
with open(os_release_path, 'r') as f:
content = f.readlines()
self.os_release = dict([l.strip().split('=')
for l in content if not l.isspace() and not l.strip().startswith('#')])
# Remove the quote character, if it's there
for key in self.os_release.keys():
self.os_release[key] = self.os_release[key].strip('"')
if not self.os_release or 'ID' not in self.os_release or 'NAME' not in self.os_release or 'VERSION' not in self.os_release:
LOGGER.warning(
"You are running CEKit on an unknown platform. External dependencies suggestions may not work!")
return
self.platform = self.os_release['ID']
if self.os_release['ID'] not in DependencyHandler.KNOWN_OPERATING_SYSTEMS:
LOGGER.warning(
"You are running CEKit on an untested platform: {} {}. External dependencies "
"suggestions will not work!".format(self.os_release['NAME'], self.os_release['VERSION']))
return
LOGGER.info("You are running on known platform: {} {}".format(
self.os_release['NAME'], self.os_release['VERSION']))
def _handle_dependencies(self, dependencies):
"""
The dependencies provided is expected to be a dict in following format:
{
PACKAGE_ID: { 'package': PACKAGE_NAME, 'command': COMMAND_TO_TEST_FOR_PACKACGE_EXISTENCE },
}
Additionally every package can contain platform specific information, for example:
{
'git': {
'package': 'git',
'executable': 'git',
'fedora': {
'package': 'git-latest'
}
}
}
If the platform on which CEKit is currently running is available, it takes precedence before
defaults.
"""
if not dependencies:
LOGGER.debug("No dependencies found, skipping...")
return
for dependency in dependencies.keys():
current_dependency = dependencies[dependency]
package = current_dependency.get('package')
library = current_dependency.get('library')
executable = current_dependency.get('executable')
if self.platform in current_dependency:
package = current_dependency[self.platform].get('package', package)
library = current_dependency[self.platform].get('library', library)
executable = current_dependency[self.platform].get('executable', executable)
LOGGER.debug("Checking if '{}' dependency is provided...".format(dependency))
if library:
if self._check_for_library(library):
LOGGER.debug("Required CEKit library '{}' was found as a '{}' module!".format(
dependency, library))
continue
else:
msg = "Required CEKit library '{}' was not found; required module '{}' could not be found.".format(
dependency, library)
# Library was not found, check if we have a hint
if package and self.platform in DependencyHandler.KNOWN_OPERATING_SYSTEMS:
msg += " Try to install the '{}' package.".format(package)
raise CekitError(msg)
if executable:
if package and self.platform in DependencyHandler.KNOWN_OPERATING_SYSTEMS:
self._check_for_executable(dependency, executable, package)
else:
self._check_for_executable(dependency, executable)
LOGGER.debug("All dependencies provided!")
# pylint: disable=R0201
def _check_for_library(self, library):
library_found = False
if sys.version_info[0] < 3:
import imp
try:
imp.find_module(library)
library_found = True
except ImportError:
pass
else:
import importlib
if importlib.util.find_spec(library):
library_found = True
return library_found
# pylint: disable=no-self-use
def _check_for_executable(self, dependency, executable, package=None):
if os.path.isabs(executable):
if self._is_program(executable):
return True
else:
return False
path = os.environ.get("PATH", os.defpath)
path = path.split(os.pathsep)
for directory in path:
file_path = os.path.join(os.path.normcase(directory), executable)
if self._is_program(file_path):
LOGGER.debug("CEKit dependency '{}' provided via the '{}' executable.".format(
dependency, file_path))
return
msg = "CEKit dependency: '{}' was not found, please provide the '{}' executable.".format(
dependency, executable)
if package:
msg += " To satisfy this requirement you can install the '{}' package.".format(package)
raise CekitError(msg)
def _is_program(self, path):
if os.path.exists(path) and os.access(path, os.F_OK | os.X_OK) and not os.path.isdir(path):
return True
return False
def
|
handle_core_dependencies
|
identifier_name
|
|
tools.py
|
data = yaml.safe_load(descriptor)
except Exception as ex:
raise CekitError('Cannot load descriptor', ex)
if isinstance(data, basestring):
LOGGER.debug("Reading descriptor from '{}' file...".format(descriptor))
if os.path.exists(descriptor):
with open(descriptor, 'r') as fh:
return yaml.safe_load(fh)
raise CekitError(
"Descriptor could not be found on the '{}' path, please check your arguments!".format(descriptor))
LOGGER.debug("Reading descriptor directly...")
return data
def decision(question):
"""Asks user for a question returning True/False answed"""
return click.confirm(question, show_default=True)
def get_brew_url(md5):
try:
LOGGER.debug("Getting brew details for an artifact with '{}' md5 sum".format(md5))
list_archives_cmd = ['/usr/bin/brew', 'call', '--json-output', 'listArchives',
"checksum={}".format(md5), 'type=maven']
LOGGER.debug("Executing '{}'.".format(" ".join(list_archives_cmd)))
try:
json_archives = subprocess.check_output(list_archives_cmd).strip().decode("utf8")
except subprocess.CalledProcessError as ex:
if ex.output is not None and 'AuthError' in ex.output:
LOGGER.warning(
"Brew authentication failed, please make sure you have a valid Kerberos ticket")
raise CekitError("Could not fetch archives for checksum {}".format(md5), ex)
archives = yaml.safe_load(json_archives)
if not archives:
raise CekitError("Artifact with md5 checksum {} could not be found in Brew".format(md5))
archive = archives[0]
build_id = archive['build_id']
filename = archive['filename']
group_id = archive['group_id']
artifact_id = archive['artifact_id']
version = archive['version']
get_build_cmd = ['brew', 'call', '--json-output',
'getBuild', "buildInfo={}".format(build_id)]
LOGGER.debug("Executing '{}'".format(" ".join(get_build_cmd)))
try:
json_build = subprocess.check_output(get_build_cmd).strip().decode("utf8")
except subprocess.CalledProcessError as ex:
raise CekitError("Could not fetch build {} from Brew".format(build_id), ex)
build = yaml.safe_load(json_build)
build_states = ['BUILDING', 'COMPLETE', 'DELETED', 'FAILED', 'CANCELED']
# State 1 means: COMPLETE which is the only success state. Other states are:
#
# 'BUILDING': 0
# 'COMPLETE': 1
# 'DELETED': 2
# 'FAILED': 3
# 'CANCELED': 4
if build['state'] != 1:
raise CekitError(
"Artifact with checksum {} was found in Koji metadata but the build is in incorrect state ({}) making "
"the artifact not available for downloading anymore".format(md5, build_states[build['state']]))
package = build['package_name']
release = build['release']
url = 'http://download.devel.redhat.com/brewroot/packages/' + package + '/' + \
version.replace('-', '_') + '/' + release + '/maven/' + \
group_id.replace('.', '/') + '/' + \
artifact_id + '/' + version + '/' + filename
except subprocess.CalledProcessError as ex:
LOGGER.error("Can't fetch artifacts details from brew: '{}'.".format(
ex.output))
raise ex
return url
def copy_recursively(source_directory, destination_directory):
"""
Copies contents of a directory to selected target location (also a directory).
the specific source file to destination.
If the source directory contains a directory, it will copy all the content recursively.
Symlinks are preserved (not followed).
The destination directory tree will be created if it does not exist.
"""
# If the source directory does not exists, return
if not os.path.isdir(source_directory):
return
# Iterate over content in the source directory
for name in os.listdir(source_directory):
src = os.path.join(source_directory, name)
dst = os.path.join(destination_directory, name)
LOGGER.debug("Copying '{}' to '{}'...".format(src, dst))
if not os.path.isdir(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
if os.path.islink(src):
os.symlink(os.readlink(src), dst)
elif os.path.isdir(src):
shutil.copytree(src, dst, symlinks=True)
else:
shutil.copy2(src, dst)
class Chdir(object):
""" Context manager for changing the current working directory """
def __init__(self, new_path):
self.newPath = os.path.expanduser(new_path)
self.savedPath = None
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DependencyHandler(object):
"""
External dependency manager. Understands on what platform are we currently
running and what dependencies are required to be installed to satisfy the
requirements.
"""
# List of operating system families on which CEKit is known to work.
# It may work on other operating systems too, but it was not tested.
KNOWN_OPERATING_SYSTEMS = ['fedora', 'centos', 'rhel']
# Set of core CEKit external dependencies.
# Format is defined below, in the handle_dependencies() method
EXTERNAL_CORE_DEPENDENCIES = {
'git': {
'package': 'git',
'executable': 'git'
}
}
def __init__(self):
self.os_release = {}
self.platform = None
os_release_path = "/etc/os-release"
if os.path.exists(os_release_path):
# Read the file containing operating system information
with open(os_release_path, 'r') as f:
content = f.readlines()
self.os_release = dict([l.strip().split('=')
for l in content if not l.isspace() and not l.strip().startswith('#')])
# Remove the quote character, if it's there
for key in self.os_release.keys():
self.os_release[key] = self.os_release[key].strip('"')
if not self.os_release or 'ID' not in self.os_release or 'NAME' not in self.os_release or 'VERSION' not in self.os_release:
LOGGER.warning(
"You are running CEKit on an unknown platform. External dependencies suggestions may not work!")
return
self.platform = self.os_release['ID']
if self.os_release['ID'] not in DependencyHandler.KNOWN_OPERATING_SYSTEMS:
LOGGER.warning(
"You are running CEKit on an untested platform: {} {}. External dependencies "
"suggestions will not work!".format(self.os_release['NAME'], self.os_release['VERSION']))
return
LOGGER.info("You are running on known platform: {} {}".format(
self.os_release['NAME'], self.os_release['VERSION']))
def _handle_dependencies(self, dependencies):
"""
The dependencies provided is expected to be a dict in following format:
{
PACKAGE_ID: { 'package': PACKAGE_NAME, 'command': COMMAND_TO_TEST_FOR_PACKACGE_EXISTENCE },
}
Additionally every package can contain platform specific information, for example:
{
'git': {
'package': 'git',
'executable': 'git',
'fedora': {
'package': 'git-latest'
}
}
}
If the platform on which CEKit is currently running is available, it takes precedence before
defaults.
"""
if not dependencies:
LOGGER.debug("No dependencies found, skipping...")
return
for dependency in dependencies.keys():
current_dependency = dependencies[dependency]
package = current_dependency.get('package')
library = current_dependency.get('library')
executable = current_dependency.get('executable')
if self.platform in current_dependency:
package = current_dependency[self.platform].get('package', package)
library = current_dependency[self.platform].get('library', library)
executable = current_dependency[self.platform].get('executable', executable)
LOGGER.debug("Checking if '{}' dependency is provided...".format(dependency))
if library:
if self._check_for_library(library):
LOGGER.debug("Required CEKit library '{}' was found as a '{}' module!".format(
dependency, library))
continue
else:
msg = "Required CEKit library '{}' was not found; required module '{}' could not be found.".format(
dependency, library)
# Library was not found, check if we have a hint
if package and self.platform in DependencyHandler.KNOWN_OPERATING_SYSTEMS:
msg += " Try to install the '{}' package.".format(package)
raise CekitError(msg)
if executable:
if package and self.platform in DependencyHandler.KNOWN_OPERATING_SYSTEMS:
self._check_for_executable(dependency, executable, package)
else:
|
LOGGER.debug("All dependencies provided!")
# pylint: disable=R0201
def _check_for_library(self, library):
library_found = False
if sys.version_info[0] < 3:
import imp
try:
imp.find_module(library)
library_found = True
except ImportError:
pass
else:
import import
|
self._check_for_executable(dependency, executable)
|
conditional_block
|
tools.py
|
data = yaml.safe_load(descriptor)
except Exception as ex:
raise CekitError('Cannot load descriptor', ex)
if isinstance(data, basestring):
LOGGER.debug("Reading descriptor from '{}' file...".format(descriptor))
if os.path.exists(descriptor):
with open(descriptor, 'r') as fh:
return yaml.safe_load(fh)
raise CekitError(
"Descriptor could not be found on the '{}' path, please check your arguments!".format(descriptor))
LOGGER.debug("Reading descriptor directly...")
return data
def decision(question):
"""Asks user for a question returning True/False answed"""
return click.confirm(question, show_default=True)
def get_brew_url(md5):
try:
LOGGER.debug("Getting brew details for an artifact with '{}' md5 sum".format(md5))
list_archives_cmd = ['/usr/bin/brew', 'call', '--json-output', 'listArchives',
"checksum={}".format(md5), 'type=maven']
LOGGER.debug("Executing '{}'.".format(" ".join(list_archives_cmd)))
try:
json_archives = subprocess.check_output(list_archives_cmd).strip().decode("utf8")
except subprocess.CalledProcessError as ex:
if ex.output is not None and 'AuthError' in ex.output:
LOGGER.warning(
"Brew authentication failed, please make sure you have a valid Kerberos ticket")
raise CekitError("Could not fetch archives for checksum {}".format(md5), ex)
archives = yaml.safe_load(json_archives)
if not archives:
raise CekitError("Artifact with md5 checksum {} could not be found in Brew".format(md5))
archive = archives[0]
build_id = archive['build_id']
filename = archive['filename']
group_id = archive['group_id']
artifact_id = archive['artifact_id']
version = archive['version']
get_build_cmd = ['brew', 'call', '--json-output',
'getBuild', "buildInfo={}".format(build_id)]
LOGGER.debug("Executing '{}'".format(" ".join(get_build_cmd)))
try:
json_build = subprocess.check_output(get_build_cmd).strip().decode("utf8")
except subprocess.CalledProcessError as ex:
raise CekitError("Could not fetch build {} from Brew".format(build_id), ex)
build = yaml.safe_load(json_build)
build_states = ['BUILDING', 'COMPLETE', 'DELETED', 'FAILED', 'CANCELED']
# State 1 means: COMPLETE which is the only success state. Other states are:
|
# 'BUILDING': 0
# 'COMPLETE': 1
# 'DELETED': 2
# 'FAILED': 3
# 'CANCELED': 4
if build['state'] != 1:
raise CekitError(
"Artifact with checksum {} was found in Koji metadata but the build is in incorrect state ({}) making "
"the artifact not available for downloading anymore".format(md5, build_states[build['state']]))
package = build['package_name']
release = build['release']
url = 'http://download.devel.redhat.com/brewroot/packages/' + package + '/' + \
version.replace('-', '_') + '/' + release + '/maven/' + \
group_id.replace('.', '/') + '/' + \
artifact_id + '/' + version + '/' + filename
except subprocess.CalledProcessError as ex:
LOGGER.error("Can't fetch artifacts details from brew: '{}'.".format(
ex.output))
raise ex
return url
def copy_recursively(source_directory, destination_directory):
"""
Copies contents of a directory to selected target location (also a directory).
the specific source file to destination.
If the source directory contains a directory, it will copy all the content recursively.
Symlinks are preserved (not followed).
The destination directory tree will be created if it does not exist.
"""
# If the source directory does not exists, return
if not os.path.isdir(source_directory):
return
# Iterate over content in the source directory
for name in os.listdir(source_directory):
src = os.path.join(source_directory, name)
dst = os.path.join(destination_directory, name)
LOGGER.debug("Copying '{}' to '{}'...".format(src, dst))
if not os.path.isdir(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
if os.path.islink(src):
os.symlink(os.readlink(src), dst)
elif os.path.isdir(src):
shutil.copytree(src, dst, symlinks=True)
else:
shutil.copy2(src, dst)
class Chdir(object):
""" Context manager for changing the current working directory """
def __init__(self, new_path):
self.newPath = os.path.expanduser(new_path)
self.savedPath = None
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DependencyHandler(object):
"""
External dependency manager. Understands on what platform are we currently
running and what dependencies are required to be installed to satisfy the
requirements.
"""
# List of operating system families on which CEKit is known to work.
# It may work on other operating systems too, but it was not tested.
KNOWN_OPERATING_SYSTEMS = ['fedora', 'centos', 'rhel']
# Set of core CEKit external dependencies.
# Format is defined below, in the handle_dependencies() method
EXTERNAL_CORE_DEPENDENCIES = {
'git': {
'package': 'git',
'executable': 'git'
}
}
def __init__(self):
self.os_release = {}
self.platform = None
os_release_path = "/etc/os-release"
if os.path.exists(os_release_path):
# Read the file containing operating system information
with open(os_release_path, 'r') as f:
content = f.readlines()
self.os_release = dict([l.strip().split('=')
for l in content if not l.isspace() and not l.strip().startswith('#')])
# Remove the quote character, if it's there
for key in self.os_release.keys():
self.os_release[key] = self.os_release[key].strip('"')
if not self.os_release or 'ID' not in self.os_release or 'NAME' not in self.os_release or 'VERSION' not in self.os_release:
LOGGER.warning(
"You are running CEKit on an unknown platform. External dependencies suggestions may not work!")
return
self.platform = self.os_release['ID']
if self.os_release['ID'] not in DependencyHandler.KNOWN_OPERATING_SYSTEMS:
LOGGER.warning(
"You are running CEKit on an untested platform: {} {}. External dependencies "
"suggestions will not work!".format(self.os_release['NAME'], self.os_release['VERSION']))
return
LOGGER.info("You are running on known platform: {} {}".format(
self.os_release['NAME'], self.os_release['VERSION']))
def _handle_dependencies(self, dependencies):
"""
The dependencies provided is expected to be a dict in following format:
{
PACKAGE_ID: { 'package': PACKAGE_NAME, 'command': COMMAND_TO_TEST_FOR_PACKACGE_EXISTENCE },
}
Additionally every package can contain platform specific information, for example:
{
'git': {
'package': 'git',
'executable': 'git',
'fedora': {
'package': 'git-latest'
}
}
}
If the platform on which CEKit is currently running is available, it takes precedence before
defaults.
"""
if not dependencies:
LOGGER.debug("No dependencies found, skipping...")
return
for dependency in dependencies.keys():
current_dependency = dependencies[dependency]
package = current_dependency.get('package')
library = current_dependency.get('library')
executable = current_dependency.get('executable')
if self.platform in current_dependency:
package = current_dependency[self.platform].get('package', package)
library = current_dependency[self.platform].get('library', library)
executable = current_dependency[self.platform].get('executable', executable)
LOGGER.debug("Checking if '{}' dependency is provided...".format(dependency))
if library:
if self._check_for_library(library):
LOGGER.debug("Required CEKit library '{}' was found as a '{}' module!".format(
dependency, library))
continue
else:
msg = "Required CEKit library '{}' was not found; required module '{}' could not be found.".format(
dependency, library)
# Library was not found, check if we have a hint
if package and self.platform in DependencyHandler.KNOWN_OPERATING_SYSTEMS:
msg += " Try to install the '{}' package.".format(package)
raise CekitError(msg)
if executable:
if package and self.platform in DependencyHandler.KNOWN_OPERATING_SYSTEMS:
self._check_for_executable(dependency, executable, package)
else:
self._check_for_executable(dependency, executable)
LOGGER.debug("All dependencies provided!")
# pylint: disable=R0201
def _check_for_library(self, library):
library_found = False
if sys.version_info[0] < 3:
import imp
try:
imp.find_module(library)
library_found = True
except ImportError:
pass
else:
import importlib
|
#
|
random_line_split
|
deploy.go
|
[]string) {
defer log.Flush()
stx.EnsureVaultSession(config)
if flags.DeployDeps {
flags.DeploySave = true
}
availableStacks := make(map[string]deployArgs)
workingGraph := graph.NewGraph()
buildInstances := stx.GetBuildInstances(args, config.PackageName)
stx.Process(buildInstances, flags, log, func(buildInstance *build.Instance, cueInstance *cue.Instance) {
stacksIterator, stacksIteratorErr := stx.NewStacksIterator(cueInstance, flags, log)
if stacksIteratorErr != nil {
log.Fatal(stacksIteratorErr)
}
// since the Process handler generally only sees one stack per instance,
// we need to gather ALL the stacks first primarily to support dependencies
for stacksIterator.Next() {
stackValue := stacksIterator.Value()
var stack stx.Stack
decodeErr := stackValue.Decode(&stack)
if decodeErr != nil {
if flags.DeployDeps {
log.Fatal(decodeErr)
} else {
log.Error(decodeErr)
continue
}
}
availableStacks[stack.Name] = deployArgs{stack: stack, buildInstance: buildInstance, stackValue: stackValue}
if flags.DeployDeps {
workingGraph.AddNode(stack.Name, stack.DependsOn...)
}
}
})
if flags.DeployDeps {
resolved, err := workingGraph.Resolve()
if err != nil {
log.Fatalf("Failed to resolve dependency graph: %s\n", err)
}
for _, stackName := range resolved {
dplArgs := availableStacks[stackName]
deployStack(dplArgs.stack, dplArgs.buildInstance, dplArgs.stackValue)
}
} else {
for _, dplArgs := range availableStacks {
deployStack(dplArgs.stack, dplArgs.buildInstance, dplArgs.stackValue)
}
}
},
}
func deployStack(stack stx.Stack, buildInstance *build.Instance, stackValue cue.Value)
|
changeSetName := "stx-dpl-" + usr.Username + "-" + fmt.Sprintf("%x", sha1.Sum(templateFileBytes))
// validate template
validateTemplateInput := cloudformation.ValidateTemplateInput{
TemplateBody: &templateBody,
}
validateTemplateOutput, validateTemplateErr := cfn.ValidateTemplate(&validateTemplateInput)
// template failed to validate
if validateTemplateErr != nil {
log.Infof(" %s\n", au.Red("✕"))
log.Fatalf("%+v\n", validateTemplateErr)
}
// template must have validated
log.Infof("%s\n", au.BrightGreen("✓"))
//log.Infof("%+v\n", validateTemplateOutput.String())
// look to see if stack exists
log.Debug("Describing", stack.Name)
describeStacksInput := cloudformation.DescribeStacksInput{StackName: aws.String(stack.Name)}
_, describeStacksErr := cfn.DescribeStacks(&describeStacksInput)
createChangeSetInput := cloudformation.CreateChangeSetInput{
Capabilities: validateTemplateOutput.Capabilities,
ChangeSetName: aws.String(changeSetName), // I think AWS overuses pointers
StackName: aws.String(stack.Name),
TemplateBody: aws.String(templateBody),
}
changeSetType := "UPDATE" // default
// if stack does not exist set action to CREATE
if describeStacksErr != nil {
changeSetType = "CREATE" // if stack does not already exist
}
createChangeSetInput.ChangeSetType = &changeSetType
stackParametersValue := stackValue.Lookup("Template", "Parameters")
if stackParametersValue.Exists() {
// TODO paramaters need to support the type as declared in Parameter.Type (in the least string and number).
// this should be map[string]interface{} with type casting done when adding parameters to the changeset
parametersMap := make(map[string]string)
var parameters []*cloudformation.Parameter
if flags.DeployPrevious {
// deploy using previous values
stackParameters, stackParametersErr := stackParametersValue.Fields()
if stackParametersErr != nil {
log.Fatal(stackParametersErr)
return
}
log.Infof("%s", au.Gray(11, " Using previous parameters..."))
for stackParameters.Next() {
stackParam := stackParameters.Value()
key, _ := stackParam.Label()
parametersMap[key] = ""
}
log.Check()
} else {
// load overrides
// TODO #48 stx should prompt for each Parameter input if overrides are undefined
if len(stack.Overrides) < 0 {
log.Fatal("Template has Parameters but no Overrides are defined.")
return
}
for k, v := range stack.Overrides {
path := strings.Replace(k, "${STX::CuePath}", strings.Replace(buildInstance.Dir, buildInstance.Root+"/", "", 1), 1)
behavior := v
log.Infof("%s", au.Gray(11, " Applying overrides: "+path+" "))
var yamlBytes []byte
var yamlBytesErr error
if behavior.SopsProfile != "" {
// decrypt the file contents
yamlBytes, yamlBytesErr = stx.DecryptSecrets(filepath.Clean(buildInstance.Root+"/"+path), behavior.SopsProfile)
} else {
// just pull the file contents directly
yamlBytes, yamlBytesErr = ioutil.ReadFile(filepath.Clean(buildInstance.Root + "/" + path))
}
if yamlBytesErr != nil {
log.Fatal(yamlBytesErr)
return
}
// TODO #47 parameters need to support the type as declared in Parameter.Type (in the least string and number).
// this should be map[string]interface{} with type casting done when adding parameters to the changeset
var override map[string]string
yamlUnmarshalErr := yaml.Unmarshal(yamlBytes, &override)
if yamlUnmarshalErr != nil {
log.Fatal(yamlUnmarshalErr)
return
}
// TODO #50 stx should error when a parameter key is duplicated among two or more overrides files
if len(behavior.Map) > 0 {
// map the yaml key:value to parameter key:value
for k, v := range behavior.Map {
fromKey := k
toKey := v
parametersMap[toKey] = override[fromKey]
}
} else {
// just do a straight copy, keys should align 1:1
for k, v := range override {
overrideKey := k
overrideVal := v
parametersMap[overrideKey] = overrideVal
}
}
log.Check()
}
}
// apply parameters to changeset
for k, v := range parametersMap {
paramKey := k
paramVal := v
parameter := cloudformation.Parameter{ParameterKey: aws.String(paramKey)}
if flags.DeployPrevious {
parameter.SetUsePreviousValue(true)
} else {
parameter.ParameterValue = aws.String(paramVal)
}
parameters = append(parameters, ¶meter)
}
createChangeSetInput.SetParameters(parameters)
} // end stackParametersValue.Exists()
// handle Stack.Tags
if len(stack.Tags) > 0 && stack.TagsEnabled {
var tags []*cloudformation.Tag
for k, v := range stack.Tags {
tagK := k // reassign here to avoid issues with for-scope var
var tagV string
switch v {
default:
tagV = v
case "${STX::CuePath}":
tagV = strings.Replace(buildInstance.Dir, buildInstance.Root, "", 1)
case "${STX::CueFiles}":
tagV = strings.Join(buildInstance.CUEFiles, ", ")
}
tags = append(tags, &cloudformation.Tag{Key: &tagK, Value: &tagV})
}
createChangeSetInput.SetTags(tags)
}
if config.Cmd.Deploy.Notify.TopicArn != "" { // && stx notify command is running! perhaps use unix domain sockets to test
log.Infof("%s", au.Gray(11, " Reticulating splines..."))
snsClient := sns.New(session, awsCfg)
subscribeInput := sns.SubscribeInput{Endpoint: aws.String(config.Cmd.Deploy.Notify.Endpoint), TopicArn
|
{
fileName, saveErr := saveStackAsYml(stack, buildInstance, stackValue)
if saveErr != nil {
log.Error(saveErr)
}
log.Infof("%s %s %s %s:%s\n", au.White("Deploying"), au.Magenta(stack.Name), au.White("⤏"), au.Green(stack.Profile), au.Cyan(stack.Region))
log.Infof("%s", au.Gray(11, " Validating template..."))
// get a session and cloudformation service client
log.Debugf("\nGetting session for profile %s\n", stack.Profile)
session := stx.GetSession(stack.Profile)
awsCfg := aws.NewConfig().WithRegion(stack.Region)
cfn := cloudformation.New(session, awsCfg)
// read template from disk
log.Debug("Reading template from", fileName)
templateFileBytes, _ := ioutil.ReadFile(fileName)
templateBody := string(templateFileBytes)
usr, _ := user.Current()
|
identifier_body
|
deploy.go
|
() {
rootCmd.AddCommand(deployCmd)
deployCmd.Flags().BoolVarP(&flags.DeployWait, "wait", "w", false, "Wait for stack updates to complete before continuing.")
deployCmd.Flags().BoolVarP(&flags.DeploySave, "save", "s", false, "Save stack outputs upon successful completion. Implies --wait.")
deployCmd.Flags().BoolVarP(&flags.DeployDeps, "dependencies", "d", false, "Deploy stack dependencies in order. Implies --save.")
deployCmd.Flags().BoolVarP(&flags.DeployPrevious, "previous-values", "v", false, "Deploy stack using previous parameter values.")
}
type deployArgs struct {
stack stx.Stack
stackValue cue.Value
buildInstance *build.Instance
}
// deployCmd represents the deploy command
var deployCmd = &cobra.Command{
Use: "deploy",
Short: "Deploys a stack by creating a changeset, previews expected changes, and optionally executes.",
Long: `Deploy will act upon every stack it finds among the evaluated cue files.
For each stack, a changeset is first created, and the proposed changes are
displayed. At this point you have the option to execute the changeset
before moving on to the next stack.
The following config.stx.cue options are available:
Cmd: {
Deploy: {
Notify: {
Endpoint: string | *""
TopicArn: string | *""
}
}
}
Use Cmd:Deploy:Notify: properties to enable the notify command to receive stack
event notifications from SNS. The endpoint will be the http address provided by
the notify command. If this is run behind a router, you will need to enable
port forwarding. If port forwarding is not possible, such as in a corporate
office setting, stx notify could be run on a remote machine such as an EC2
instance, or virtual workspace.
The TopicArn is an SNS topic that is provided as a NotificationArn when
creating changesets. In a team setting, it may be better for each member to
have their own topic; keep in mind that the last person to deploy will be
the one to receive notifications when the stack is deleted. To receive events
during a delete operation, be sure to update the stack with your own TopicArn
first.
`,
Run: func(cmd *cobra.Command, args []string) {
defer log.Flush()
stx.EnsureVaultSession(config)
if flags.DeployDeps {
flags.DeploySave = true
}
availableStacks := make(map[string]deployArgs)
workingGraph := graph.NewGraph()
buildInstances := stx.GetBuildInstances(args, config.PackageName)
stx.Process(buildInstances, flags, log, func(buildInstance *build.Instance, cueInstance *cue.Instance) {
stacksIterator, stacksIteratorErr := stx.NewStacksIterator(cueInstance, flags, log)
if stacksIteratorErr != nil {
log.Fatal(stacksIteratorErr)
}
// since the Process handler generally only sees one stack per instance,
// we need to gather ALL the stacks first primarily to support dependencies
for stacksIterator.Next() {
stackValue := stacksIterator.Value()
var stack stx.Stack
decodeErr := stackValue.Decode(&stack)
if decodeErr != nil {
if flags.DeployDeps {
log.Fatal(decodeErr)
} else {
log.Error(decodeErr)
continue
}
}
availableStacks[stack.Name] = deployArgs{stack: stack, buildInstance: buildInstance, stackValue: stackValue}
if flags.DeployDeps {
workingGraph.AddNode(stack.Name, stack.DependsOn...)
}
}
})
if flags.DeployDeps {
resolved, err := workingGraph.Resolve()
if err != nil {
log.Fatalf("Failed to resolve dependency graph: %s\n", err)
}
for _, stackName := range resolved {
dplArgs := availableStacks[stackName]
deployStack(dplArgs.stack, dplArgs.buildInstance, dplArgs.stackValue)
}
} else {
for _, dplArgs := range availableStacks {
deployStack(dplArgs.stack, dplArgs.buildInstance, dplArgs.stackValue)
}
}
},
}
func deployStack(stack stx.Stack, buildInstance *build.Instance, stackValue cue.Value) {
fileName, saveErr := saveStackAsYml(stack, buildInstance, stackValue)
if saveErr != nil {
log.Error(saveErr)
}
log.Infof("%s %s %s %s:%s\n", au.White("Deploying"), au.Magenta(stack.Name), au.White("⤏"), au.Green(stack.Profile), au.Cyan(stack.Region))
log.Infof("%s", au.Gray(11, " Validating template..."))
// get a session and cloudformation service client
log.Debugf("\nGetting session for profile %s\n", stack.Profile)
session := stx.GetSession(stack.Profile)
awsCfg := aws.NewConfig().WithRegion(stack.Region)
cfn := cloudformation.New(session, awsCfg)
// read template from disk
log.Debug("Reading template from", fileName)
templateFileBytes, _ := ioutil.ReadFile(fileName)
templateBody := string(templateFileBytes)
usr, _ := user.Current()
changeSetName := "stx-dpl-" + usr.Username + "-" + fmt.Sprintf("%x", sha1.Sum(templateFileBytes))
// validate template
validateTemplateInput := cloudformation.ValidateTemplateInput{
TemplateBody: &templateBody,
}
validateTemplateOutput, validateTemplateErr := cfn.ValidateTemplate(&validateTemplateInput)
// template failed to validate
if validateTemplateErr != nil {
log.Infof(" %s\n", au.Red("✕"))
log.Fatalf("%+v\n", validateTemplateErr)
}
// template must have validated
log.Infof("%s\n", au.BrightGreen("✓"))
//log.Infof("%+v\n", validateTemplateOutput.String())
// look to see if stack exists
log.Debug("Describing", stack.Name)
describeStacksInput := cloudformation.DescribeStacksInput{StackName: aws.String(stack.Name)}
_, describeStacksErr := cfn.DescribeStacks(&describeStacksInput)
createChangeSetInput := cloudformation.CreateChangeSetInput{
Capabilities: validateTemplateOutput.Capabilities,
ChangeSetName: aws.String(changeSetName), // I think AWS overuses pointers
StackName: aws.String(stack.Name),
TemplateBody: aws.String(templateBody),
}
changeSetType := "UPDATE" // default
// if stack does not exist set action to CREATE
if describeStacksErr != nil {
changeSetType = "CREATE" // if stack does not already exist
}
createChangeSetInput.ChangeSetType = &changeSetType
stackParametersValue := stackValue.Lookup("Template", "Parameters")
if stackParametersValue.Exists() {
// TODO paramaters need to support the type as declared in Parameter.Type (in the least string and number).
// this should be map[string]interface{} with type casting done when adding parameters to the changeset
parametersMap := make(map[string]string)
var parameters []*cloudformation.Parameter
if flags.DeployPrevious {
// deploy using previous values
stackParameters, stackParametersErr := stackParametersValue.Fields()
if stackParametersErr != nil {
log.Fatal(stackParametersErr)
return
}
log.Infof("%s", au.Gray(11, " Using previous parameters..."))
for stackParameters.Next() {
stackParam := stackParameters.Value()
key, _ := stackParam.Label()
parametersMap[key] = ""
}
log.Check()
} else {
// load overrides
// TODO #48 stx should prompt for each Parameter input if overrides are undefined
if len(stack.Overrides) < 0 {
log.Fatal("Template has Parameters but no Overrides are defined.")
return
}
for k, v := range stack.Overrides {
path := strings.Replace(k, "${STX::CuePath}", strings.Replace(buildInstance.Dir, buildInstance.Root+"/", "", 1), 1)
behavior := v
log.Infof("%s", au.Gray(11, " Applying overrides: "+path+" "))
var yamlBytes []byte
var yamlBytesErr error
if behavior.SopsProfile != "" {
// decrypt the file contents
yamlBytes, yamlBytesErr = stx.DecryptSecrets(filepath.Clean(buildInstance.Root+"/"+path), behavior.SopsProfile)
} else {
// just pull the file contents directly
yamlBytes, yamlBytesErr = ioutil.ReadFile(filepath.Clean(buildInstance.Root + "/" + path))
}
if yamlBytesErr != nil {
log.Fatal(yamlBytesErr)
return
}
// TODO #47 parameters need to support the type as declared in Parameter.Type (in the least string and number).
// this should be map[string]interface{} with type casting done when adding parameters to the changeset
var override map[string]string
yamlUnmarshalErr := yaml.Unmarshal(yamlBytes, &override)
if yamlUnmarshalErr != nil {
log.Fatal(yamlUnmarshalErr)
return
}
// TODO #50 stx should error when a parameter key is duplicated among
|
init
|
identifier_name
|
|
deploy.go
|
}
// TODO #47 parameters need to support the type as declared in Parameter.Type (in the least string and number).
// this should be map[string]interface{} with type casting done when adding parameters to the changeset
var override map[string]string
yamlUnmarshalErr := yaml.Unmarshal(yamlBytes, &override)
if yamlUnmarshalErr != nil {
log.Fatal(yamlUnmarshalErr)
return
}
// TODO #50 stx should error when a parameter key is duplicated among two or more overrides files
if len(behavior.Map) > 0 {
// map the yaml key:value to parameter key:value
for k, v := range behavior.Map {
fromKey := k
toKey := v
parametersMap[toKey] = override[fromKey]
}
} else {
// just do a straight copy, keys should align 1:1
for k, v := range override {
overrideKey := k
overrideVal := v
parametersMap[overrideKey] = overrideVal
}
}
log.Check()
}
}
// apply parameters to changeset
for k, v := range parametersMap {
paramKey := k
paramVal := v
parameter := cloudformation.Parameter{ParameterKey: aws.String(paramKey)}
if flags.DeployPrevious {
parameter.SetUsePreviousValue(true)
} else {
parameter.ParameterValue = aws.String(paramVal)
}
parameters = append(parameters, ¶meter)
}
createChangeSetInput.SetParameters(parameters)
} // end stackParametersValue.Exists()
// handle Stack.Tags
if len(stack.Tags) > 0 && stack.TagsEnabled {
var tags []*cloudformation.Tag
for k, v := range stack.Tags {
tagK := k // reassign here to avoid issues with for-scope var
var tagV string
switch v {
default:
tagV = v
case "${STX::CuePath}":
tagV = strings.Replace(buildInstance.Dir, buildInstance.Root, "", 1)
case "${STX::CueFiles}":
tagV = strings.Join(buildInstance.CUEFiles, ", ")
}
tags = append(tags, &cloudformation.Tag{Key: &tagK, Value: &tagV})
}
createChangeSetInput.SetTags(tags)
}
if config.Cmd.Deploy.Notify.TopicArn != "" { // && stx notify command is running! perhaps use unix domain sockets to test
log.Infof("%s", au.Gray(11, " Reticulating splines..."))
snsClient := sns.New(session, awsCfg)
subscribeInput := sns.SubscribeInput{Endpoint: aws.String(config.Cmd.Deploy.Notify.Endpoint), TopicArn: aws.String(config.Cmd.Deploy.Notify.TopicArn), Protocol: aws.String("http")}
_, subscribeErr := snsClient.Subscribe(&subscribeInput)
if subscribeErr != nil {
log.Errorf("%s\n", subscribeErr)
} else {
var notificationArns []*string
notificationArns = append(notificationArns, aws.String(config.Cmd.Deploy.Notify.TopicArn))
createChangeSetInput.SetNotificationARNs(notificationArns)
log.Check()
}
}
log.Infof("%s", au.Gray(11, " Creating changeset..."))
_, createChangeSetErr := cfn.CreateChangeSet(&createChangeSetInput)
if createChangeSetErr != nil {
if awsErr, ok := createChangeSetErr.(awserr.Error); ok {
log.Infof(" %s\n", au.Red(awsErr))
if awsErr.Code() == "AlreadyExistsException" {
var deleteChangesetInput cloudformation.DeleteChangeSetInput
deleteChangesetInput.ChangeSetName = createChangeSetInput.ChangeSetName
deleteChangesetInput.StackName = createChangeSetInput.StackName
log.Infof("%s %s\n", au.White("Deleting"), au.BrightBlue(changeSetName))
_, deleteChangeSetErr := cfn.DeleteChangeSet(&deleteChangesetInput)
if deleteChangeSetErr != nil {
log.Error(deleteChangeSetErr)
}
return
}
}
log.Fatal(createChangeSetErr)
}
describeChangesetInput := cloudformation.DescribeChangeSetInput{
ChangeSetName: aws.String(changeSetName),
StackName: aws.String(stack.Name),
}
waitOption := request.WithWaiterDelay(request.ConstantWaiterDelay(5 * time.Second))
cfn.WaitUntilChangeSetCreateCompleteWithContext(context.Background(), &describeChangesetInput, waitOption)
log.Check()
log.Infof("%s %s %s %s:%s\n", au.White("Describing"), au.BrightBlue(changeSetName), au.White("⤎"), au.Magenta(stack.Name), au.Cyan(stack.Region))
describeChangesetOuput, describeChangesetErr := cfn.DescribeChangeSet(&describeChangesetInput)
if describeChangesetErr != nil {
log.Fatalf("%+v", au.Red(describeChangesetErr))
}
if aws.StringValue(describeChangesetOuput.ExecutionStatus) != "AVAILABLE" || aws.StringValue(describeChangesetOuput.Status) != "CREATE_COMPLETE" {
//TODO put describeChangesetOuput into table view
log.Infof("%+v\n", describeChangesetOuput)
log.Info(au.Yellow("No changes to deploy."))
var deleteChangesetInput cloudformation.DeleteChangeSetInput
deleteChangesetInput.ChangeSetName = createChangeSetInput.ChangeSetName
deleteChangesetInput.StackName = createChangeSetInput.StackName
log.Infof("%s %s\n", au.White("Deleting"), au.BrightBlue(changeSetName))
_, deleteChangeSetErr := cfn.DeleteChangeSet(&deleteChangesetInput)
if deleteChangeSetErr != nil {
log.Error(deleteChangeSetErr)
}
return
}
if len(describeChangesetOuput.Changes) > 0 {
// log.Infof("%+v\n", describeChangesetOuput.Changes)
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoWrapText(false)
table.SetAutoMergeCells(true)
table.SetRowLine(true)
table.SetHeader([]string{"Resource", "Action", "Attribute", "Property", "Recreation"})
table.SetHeaderColor(tablewriter.Colors{tablewriter.FgWhiteColor}, tablewriter.Colors{tablewriter.FgWhiteColor}, tablewriter.Colors{tablewriter.FgWhiteColor}, tablewriter.Colors{tablewriter.FgWhiteColor}, tablewriter.Colors{tablewriter.FgWhiteColor})
for _, change := range describeChangesetOuput.Changes {
row := []string{
aws.StringValue(change.ResourceChange.LogicalResourceId),
aws.StringValue(change.ResourceChange.Action),
"",
"",
"",
}
if aws.StringValue(change.ResourceChange.Action) == "Modify" {
for _, detail := range change.ResourceChange.Details {
row[2] = aws.StringValue(detail.Target.Attribute)
row[3] = aws.StringValue(detail.Target.Name)
recreation := aws.StringValue(detail.Target.RequiresRecreation)
if recreation == "ALWAYS" || recreation == "CONDITIONAL" {
row[4] = au.Red(recreation).String()
} else {
row[4] = recreation
}
table.Append(row)
}
} else {
table.Append(row)
}
}
table.Render()
}
diff(cfn, stack.Name, templateBody)
log.Infof("%s %s %s %s %s:%s:%s %s\n", au.Index(255-88, "Execute change set"), au.BrightBlue(changeSetName), au.Index(255-88, "on"), au.White("⤏"), au.Magenta(stack.Name), au.Green(stack.Profile), au.Cyan(stack.Region), au.Index(255-88, "?"))
log.Infof("%s\n%s", au.Gray(11, "Y to execute. Anything else to cancel."), au.Gray(11, "▶︎"))
var input string
fmt.Scanln(&input)
input = strings.ToLower(input)
matched, _ := regexp.MatchString("^(y){1}(es)?$", input)
if !matched {
// delete changeset and continue
var deleteChangesetInput cloudformation.DeleteChangeSetInput
deleteChangesetInput.ChangeSetName = createChangeSetInput.ChangeSetName
deleteChangesetInput.StackName = createChangeSetInput.StackName
log.Infof("%s %s\n", au.White("Deleting"), au.BrightBlue(changeSetName))
_, deleteChangeSetErr := cfn.DeleteChangeSet(&deleteChangesetInput)
if deleteChangeSetErr != nil {
log.Error(deleteChangeSetErr)
}
return
}
executeChangeSetInput := cloudformation.ExecuteChangeSetInput{
ChangeSetName: aws.String(changeSetName),
StackName: aws.String(stack.Name),
}
log.Infof("%s %s %s %s:%s\n", au.White("Executing"), au.BrightBlue(changeSetName), au.White("⤏"), au.Magenta(stack.Name), au.Cyan(stack.Region))
_, executeChangeSetErr := cfn.ExecuteChangeSet(&executeChangeSetInput)
if executeChangeSetErr != nil {
log.Fatal(ex
|
ecuteChangeSetErr)
}
if flags.Depl
|
conditional_block
|
|
deploy.go
|
behavior.Map) > 0 {
// map the yaml key:value to parameter key:value
for k, v := range behavior.Map {
fromKey := k
toKey := v
parametersMap[toKey] = override[fromKey]
}
} else {
// just do a straight copy, keys should align 1:1
for k, v := range override {
overrideKey := k
overrideVal := v
parametersMap[overrideKey] = overrideVal
}
}
log.Check()
}
}
// apply parameters to changeset
for k, v := range parametersMap {
paramKey := k
paramVal := v
parameter := cloudformation.Parameter{ParameterKey: aws.String(paramKey)}
if flags.DeployPrevious {
parameter.SetUsePreviousValue(true)
} else {
parameter.ParameterValue = aws.String(paramVal)
}
parameters = append(parameters, ¶meter)
}
createChangeSetInput.SetParameters(parameters)
} // end stackParametersValue.Exists()
// handle Stack.Tags
if len(stack.Tags) > 0 && stack.TagsEnabled {
var tags []*cloudformation.Tag
for k, v := range stack.Tags {
tagK := k // reassign here to avoid issues with for-scope var
var tagV string
switch v {
default:
tagV = v
case "${STX::CuePath}":
tagV = strings.Replace(buildInstance.Dir, buildInstance.Root, "", 1)
case "${STX::CueFiles}":
tagV = strings.Join(buildInstance.CUEFiles, ", ")
}
tags = append(tags, &cloudformation.Tag{Key: &tagK, Value: &tagV})
}
createChangeSetInput.SetTags(tags)
}
if config.Cmd.Deploy.Notify.TopicArn != "" { // && stx notify command is running! perhaps use unix domain sockets to test
log.Infof("%s", au.Gray(11, " Reticulating splines..."))
snsClient := sns.New(session, awsCfg)
subscribeInput := sns.SubscribeInput{Endpoint: aws.String(config.Cmd.Deploy.Notify.Endpoint), TopicArn: aws.String(config.Cmd.Deploy.Notify.TopicArn), Protocol: aws.String("http")}
_, subscribeErr := snsClient.Subscribe(&subscribeInput)
if subscribeErr != nil {
log.Errorf("%s\n", subscribeErr)
} else {
var notificationArns []*string
notificationArns = append(notificationArns, aws.String(config.Cmd.Deploy.Notify.TopicArn))
createChangeSetInput.SetNotificationARNs(notificationArns)
log.Check()
}
}
log.Infof("%s", au.Gray(11, " Creating changeset..."))
_, createChangeSetErr := cfn.CreateChangeSet(&createChangeSetInput)
if createChangeSetErr != nil {
if awsErr, ok := createChangeSetErr.(awserr.Error); ok {
log.Infof(" %s\n", au.Red(awsErr))
if awsErr.Code() == "AlreadyExistsException" {
var deleteChangesetInput cloudformation.DeleteChangeSetInput
deleteChangesetInput.ChangeSetName = createChangeSetInput.ChangeSetName
deleteChangesetInput.StackName = createChangeSetInput.StackName
log.Infof("%s %s\n", au.White("Deleting"), au.BrightBlue(changeSetName))
_, deleteChangeSetErr := cfn.DeleteChangeSet(&deleteChangesetInput)
if deleteChangeSetErr != nil {
log.Error(deleteChangeSetErr)
}
return
}
}
log.Fatal(createChangeSetErr)
}
describeChangesetInput := cloudformation.DescribeChangeSetInput{
ChangeSetName: aws.String(changeSetName),
StackName: aws.String(stack.Name),
}
waitOption := request.WithWaiterDelay(request.ConstantWaiterDelay(5 * time.Second))
cfn.WaitUntilChangeSetCreateCompleteWithContext(context.Background(), &describeChangesetInput, waitOption)
log.Check()
log.Infof("%s %s %s %s:%s\n", au.White("Describing"), au.BrightBlue(changeSetName), au.White("⤎"), au.Magenta(stack.Name), au.Cyan(stack.Region))
describeChangesetOuput, describeChangesetErr := cfn.DescribeChangeSet(&describeChangesetInput)
if describeChangesetErr != nil {
log.Fatalf("%+v", au.Red(describeChangesetErr))
}
if aws.StringValue(describeChangesetOuput.ExecutionStatus) != "AVAILABLE" || aws.StringValue(describeChangesetOuput.Status) != "CREATE_COMPLETE" {
//TODO put describeChangesetOuput into table view
log.Infof("%+v\n", describeChangesetOuput)
log.Info(au.Yellow("No changes to deploy."))
var deleteChangesetInput cloudformation.DeleteChangeSetInput
deleteChangesetInput.ChangeSetName = createChangeSetInput.ChangeSetName
deleteChangesetInput.StackName = createChangeSetInput.StackName
log.Infof("%s %s\n", au.White("Deleting"), au.BrightBlue(changeSetName))
_, deleteChangeSetErr := cfn.DeleteChangeSet(&deleteChangesetInput)
if deleteChangeSetErr != nil {
log.Error(deleteChangeSetErr)
}
return
}
if len(describeChangesetOuput.Changes) > 0 {
// log.Infof("%+v\n", describeChangesetOuput.Changes)
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoWrapText(false)
table.SetAutoMergeCells(true)
table.SetRowLine(true)
table.SetHeader([]string{"Resource", "Action", "Attribute", "Property", "Recreation"})
table.SetHeaderColor(tablewriter.Colors{tablewriter.FgWhiteColor}, tablewriter.Colors{tablewriter.FgWhiteColor}, tablewriter.Colors{tablewriter.FgWhiteColor}, tablewriter.Colors{tablewriter.FgWhiteColor}, tablewriter.Colors{tablewriter.FgWhiteColor})
for _, change := range describeChangesetOuput.Changes {
row := []string{
aws.StringValue(change.ResourceChange.LogicalResourceId),
aws.StringValue(change.ResourceChange.Action),
"",
"",
"",
}
if aws.StringValue(change.ResourceChange.Action) == "Modify" {
for _, detail := range change.ResourceChange.Details {
row[2] = aws.StringValue(detail.Target.Attribute)
row[3] = aws.StringValue(detail.Target.Name)
recreation := aws.StringValue(detail.Target.RequiresRecreation)
if recreation == "ALWAYS" || recreation == "CONDITIONAL" {
row[4] = au.Red(recreation).String()
} else {
row[4] = recreation
}
table.Append(row)
}
} else {
table.Append(row)
}
}
table.Render()
}
diff(cfn, stack.Name, templateBody)
log.Infof("%s %s %s %s %s:%s:%s %s\n", au.Index(255-88, "Execute change set"), au.BrightBlue(changeSetName), au.Index(255-88, "on"), au.White("⤏"), au.Magenta(stack.Name), au.Green(stack.Profile), au.Cyan(stack.Region), au.Index(255-88, "?"))
log.Infof("%s\n%s", au.Gray(11, "Y to execute. Anything else to cancel."), au.Gray(11, "▶︎"))
var input string
fmt.Scanln(&input)
input = strings.ToLower(input)
matched, _ := regexp.MatchString("^(y){1}(es)?$", input)
if !matched {
// delete changeset and continue
var deleteChangesetInput cloudformation.DeleteChangeSetInput
deleteChangesetInput.ChangeSetName = createChangeSetInput.ChangeSetName
deleteChangesetInput.StackName = createChangeSetInput.StackName
log.Infof("%s %s\n", au.White("Deleting"), au.BrightBlue(changeSetName))
_, deleteChangeSetErr := cfn.DeleteChangeSet(&deleteChangesetInput)
if deleteChangeSetErr != nil {
log.Error(deleteChangeSetErr)
}
return
}
executeChangeSetInput := cloudformation.ExecuteChangeSetInput{
ChangeSetName: aws.String(changeSetName),
StackName: aws.String(stack.Name),
}
log.Infof("%s %s %s %s:%s\n", au.White("Executing"), au.BrightBlue(changeSetName), au.White("⤏"), au.Magenta(stack.Name), au.Cyan(stack.Region))
_, executeChangeSetErr := cfn.ExecuteChangeSet(&executeChangeSetInput)
if executeChangeSetErr != nil {
log.Fatal(executeChangeSetErr)
}
if flags.DeploySave || flags.DeployWait {
log.Infof("%s", au.Gray(11, " Waiting for stack..."))
switch changeSetType {
case "UPDATE":
cfn.WaitUntilStackUpdateCompleteWithContext(context.Background(), &describeStacksInput, waitOption)
case "CREATE":
cfn.WaitUntilStackCreateCompleteWithContext(context.Background(), &describeStacksInput, waitOption)
}
log.Check()
|
if flags.DeploySave {
saveErr := saveStackOutputs(buildInstance, stack)
if saveErr != nil {
log.Fatal(saveErr)
|
random_line_split
|
|
http.go
|
.InterfacesTotal[i].In, prevtotals[i].In),
DeltaOut: bps(s.InterfacesTotal[i].Out, prevtotals[i].Out),
}
}
sort.Sort(interfaceOrder(ifs))
return ifs
}
func(s state) cpudelta() sigar.CpuList {
prev := s.PREVCPU
if len(prev.List) == 0 {
return s.RAWCPU
}
// cls := s.RAWCPU
cls := sigar.CpuList{List: make([]sigar.Cpu, len(s.RAWCPU.List)) }
copy(cls.List, s.RAWCPU.List)
for i := range cls.List {
cls.List[i].User -= prev.List[i].User
cls.List[i].Nice -= prev.List[i].Nice
cls.List[i].Sys -= prev.List[i].Sys
cls.List[i].Idle -= prev.List[i].Idle
}
sort.Sort(cpuOrder(cls.List))
return cls
}
func(s state) CPU() types.CPU {
sum := sigar.Cpu{}
cls := s.cpudelta()
c := types.CPU{List: make([]types.CPU, len(cls.List))}
for i, cp := range cls.List {
total := cp.User + cp.Nice + cp.Sys + cp.Idle
user := percent(cp.User, total)
sys := percent(cp.Sys, total)
idle := uint(0)
if user + sys < 100 {
idle = 100 - user - sys
}
c.List[i].N = i
c.List[i].User, c.List[i].AttrUser = user, textAttr_colorPercent(user)
c.List[i].Sys, c.List[i].AttrSys = sys, textAttr_colorPercent(sys)
c.List[i].Idle, c.List[i].AttrIdle = idle, textAttr_colorPercent(100 - idle)
sum.User += cp.User + cp.Nice
sum.Sys += cp.Sys
sum.Idle += cp.Idle
}
total := sum.User + sum.Sys + sum.Idle // + sum.Nice
user := percent(sum.User, total)
sys := percent(sum.Sys, total)
idle := uint(0)
if user + sys < 100 {
idle = 100 - user - sys
}
c.N = len(cls.List)
c.User, c.AttrUser = user, textAttr_colorPercent(user)
c.Sys, c.AttrSys = sys, textAttr_colorPercent(sys)
c.Idle, c.AttrIdle = idle, textAttr_colorPercent(100 - idle)
return c
}
func textAttr_colorPercent(p uint) template.HTMLAttr {
return template.HTMLAttr(" class=\"text-" + colorPercent(p) + "\"")
}
func labelAttr_colorPercent(p uint) template.HTMLAttr {
return template.HTMLAttr(" class=\"label label-" + colorPercent(p) + "\"")
}
func colorPercent(p uint) string {
if p > 90 {
return "danger"
}
if p > 80 {
return "warning"
}
if p > 20 {
return "info"
}
return "success"
}
type memory struct {
Total string
Used string
Free string
UsePercent string
AttrUsePercent template.HTMLAttr `json:"-"`
}
type diskInfo struct {
DevName string
Total uint64
Used uint64
Avail uint64
UsePercent float64
Inodes uint64
Iused uint64
Ifree uint64
IusePercent float64
DirName string
}
func valuesSet(req *http.Request, base url.Values, pname string, bimap types.Biseqmap) types.SEQ {
if params, ok := req.Form[pname]; ok && len(params) > 0 {
if seq, ok := bimap.STRING2SEQ[params[0]]; ok {
base.Set(pname, params[0])
return seq
}
}
return bimap.Default_seq
}
func
|
(disks []diskInfo, seq types.SEQ) []types.DiskData {
sort.Stable(diskOrder{
disks: disks,
seq: seq,
reverse: _DFBIMAP.SEQ2REVERSE[seq],
})
var dd []types.DiskData
for _, disk := range disks {
total, approxtotal := humanBandback(disk.Total)
used, approxused := humanBandback(disk.Used)
itotal, approxitotal := humanBandback(disk.Inodes)
iused, approxiused := humanBandback(disk.Iused)
short := ""
if len(disk.DevName) > 10 {
short = disk.DevName[:10]
}
dd = append(dd, types.DiskData{
DiskName: disk.DevName,
ShortDiskName: short,
Total: total,
Used: used,
Avail: humanB(disk.Avail),
UsePercent: formatPercent(approxused, approxtotal),
Inodes: itotal,
Iused: iused,
Ifree: humanB(disk.Ifree),
IusePercent: formatPercent(approxiused, approxitotal),
DirName: disk.DirName,
AttrUsePercent: labelAttr_colorPercent(percent(approxused, approxtotal)),
AttrIusePercent: labelAttr_colorPercent(percent(approxiused, approxitotal)),
})
}
return dd
}
var _DFBIMAP = types.Seq2bimap(DFFS, // the default seq for ordering
types.Seq2string{
DFFS: "fs",
DFSIZE: "size",
DFUSED: "used",
DFAVAIL: "avail",
DFMP: "mp",
}, []types.SEQ{
DFFS, DFMP,
})
var _PSBIMAP = types.Seq2bimap(PSPID, // the default seq for ordering
types.Seq2string{
PSPID: "pid",
PSPRI: "pri",
PSNICE: "nice",
PSSIZE: "size",
PSRES: "res",
PSTIME: "time",
PSNAME: "name",
PSUID: "user",
}, []types.SEQ{
PSNAME, PSUID,
})
func username(uids map[uint]string, uid uint) string {
if s, ok := uids[uid]; ok {
return s
}
s := fmt.Sprintf("%d", uid)
if usr, err := user.LookupId(s); err == nil {
s = usr.Username
}
uids[uid] = s
return s
}
func orderProc(procs []types.ProcInfo, seq types.SEQ) []types.ProcData {
sort.Sort(procOrder{ // not sort.Stable
procs: procs,
seq: seq,
reverse: _PSBIMAP.SEQ2REVERSE[seq],
})
if len(procs) > 20 {
procs = procs[:20]
}
uids := map[uint]string{}
var list []types.ProcData
for _, proc := range procs {
list = append(list, types.ProcData{
PID: proc.PID,
Priority: proc.Priority,
Nice: proc.Nice,
Time: formatTime(proc.Time),
Name: proc.Name,
User: username(uids, proc.Uid),
Size: humanB(proc.Size),
Resident: humanB(proc.Resident),
})
}
return list
}
type state struct {
About about
System system
RAWCPU sigar.CpuList
PREVCPU sigar.CpuList
RAM memory
Swap memory
DiskList []diskInfo
ProcList []types.ProcInfo
InterfacesTotal []InterfaceTotal
PrevInterfacesTotal []InterfaceTotal
}
type Page struct {
About about
System system
CPU types.CPU
RAM memory
Swap memory
DiskTable DiskTable
ProcTable ProcTable
Interfaces types.Interfaces
DISTRIB string
HTTP_HOST string
}
type pageUpdate struct {
About about
System system
CPU types.CPU
RAM memory
Swap memory
DiskTable DiskTable
ProcTable ProcTable
Interfaces []types.DeltaInterface
}
var stateLock sync.Mutex
var lastState state
func reset_prev() {
stateLock.Lock()
defer stateLock.Unlock()
lastState.PrevInterfacesTotal = []InterfaceTotal{}
lastState.PREVCPU.List = []sigar.Cpu{}
}
func collect() { // state
stateLock.Lock()
defer stateLock.Unlock()
prev_ifstotal := lastState.InterfacesTotal
prev_cpu := lastState.RAWCPU
ifs, ip := NewInterfaces()
about := getAbout()
about.IP = ip
lastState = state{
|
orderDisk
|
identifier_name
|
http.go
|
(s.InterfacesTotal[i].In, prevtotals[i].In),
DeltaOut: bps(s.InterfacesTotal[i].Out, prevtotals[i].Out),
}
}
sort.Sort(interfaceOrder(ifs))
return ifs
}
func(s state) cpudelta() sigar.CpuList {
prev := s.PREVCPU
if len(prev.List) == 0 {
return s.RAWCPU
}
// cls := s.RAWCPU
cls := sigar.CpuList{List: make([]sigar.Cpu, len(s.RAWCPU.List)) }
copy(cls.List, s.RAWCPU.List)
for i := range cls.List {
cls.List[i].User -= prev.List[i].User
cls.List[i].Nice -= prev.List[i].Nice
cls.List[i].Sys -= prev.List[i].Sys
cls.List[i].Idle -= prev.List[i].Idle
}
sort.Sort(cpuOrder(cls.List))
return cls
}
func(s state) CPU() types.CPU {
sum := sigar.Cpu{}
cls := s.cpudelta()
c := types.CPU{List: make([]types.CPU, len(cls.List))}
for i, cp := range cls.List {
total := cp.User + cp.Nice + cp.Sys + cp.Idle
user := percent(cp.User, total)
sys := percent(cp.Sys, total)
idle := uint(0)
if user + sys < 100 {
idle = 100 - user - sys
}
c.List[i].N = i
c.List[i].User, c.List[i].AttrUser = user, textAttr_colorPercent(user)
c.List[i].Sys, c.List[i].AttrSys = sys, textAttr_colorPercent(sys)
c.List[i].Idle, c.List[i].AttrIdle = idle, textAttr_colorPercent(100 - idle)
sum.User += cp.User + cp.Nice
sum.Sys += cp.Sys
sum.Idle += cp.Idle
}
total := sum.User + sum.Sys + sum.Idle // + sum.Nice
user := percent(sum.User, total)
sys := percent(sum.Sys, total)
idle := uint(0)
if user + sys < 100 {
idle = 100 - user - sys
}
c.N = len(cls.List)
c.User, c.AttrUser = user, textAttr_colorPercent(user)
c.Sys, c.AttrSys = sys, textAttr_colorPercent(sys)
c.Idle, c.AttrIdle = idle, textAttr_colorPercent(100 - idle)
return c
}
func textAttr_colorPercent(p uint) template.HTMLAttr {
return template.HTMLAttr(" class=\"text-" + colorPercent(p) + "\"")
}
func labelAttr_colorPercent(p uint) template.HTMLAttr {
return template.HTMLAttr(" class=\"label label-" + colorPercent(p) + "\"")
}
func colorPercent(p uint) string {
if p > 90 {
return "danger"
}
if p > 80 {
return "warning"
}
if p > 20 {
return "info"
}
return "success"
}
type memory struct {
Total string
Used string
Free string
UsePercent string
AttrUsePercent template.HTMLAttr `json:"-"`
}
type diskInfo struct {
DevName string
Total uint64
Used uint64
Avail uint64
UsePercent float64
Inodes uint64
Iused uint64
Ifree uint64
IusePercent float64
DirName string
}
func valuesSet(req *http.Request, base url.Values, pname string, bimap types.Biseqmap) types.SEQ {
if params, ok := req.Form[pname]; ok && len(params) > 0 {
if seq, ok := bimap.STRING2SEQ[params[0]]; ok {
base.Set(pname, params[0])
return seq
}
}
return bimap.Default_seq
}
func orderDisk(disks []diskInfo, seq types.SEQ) []types.DiskData {
sort.Stable(diskOrder{
disks: disks,
seq: seq,
reverse: _DFBIMAP.SEQ2REVERSE[seq],
})
var dd []types.DiskData
for _, disk := range disks {
total, approxtotal := humanBandback(disk.Total)
used, approxused := humanBandback(disk.Used)
itotal, approxitotal := humanBandback(disk.Inodes)
iused, approxiused := humanBandback(disk.Iused)
short := ""
if len(disk.DevName) > 10 {
short = disk.DevName[:10]
}
dd = append(dd, types.DiskData{
DiskName: disk.DevName,
ShortDiskName: short,
Total: total,
Used: used,
Avail: humanB(disk.Avail),
UsePercent: formatPercent(approxused, approxtotal),
Inodes: itotal,
Iused: iused,
Ifree: humanB(disk.Ifree),
IusePercent: formatPercent(approxiused, approxitotal),
DirName: disk.DirName,
AttrUsePercent: labelAttr_colorPercent(percent(approxused, approxtotal)),
AttrIusePercent: labelAttr_colorPercent(percent(approxiused, approxitotal)),
})
}
return dd
}
var _DFBIMAP = types.Seq2bimap(DFFS, // the default seq for ordering
types.Seq2string{
DFFS: "fs",
DFSIZE: "size",
DFUSED: "used",
DFAVAIL: "avail",
DFMP: "mp",
}, []types.SEQ{
DFFS, DFMP,
})
var _PSBIMAP = types.Seq2bimap(PSPID, // the default seq for ordering
types.Seq2string{
PSPID: "pid",
PSPRI: "pri",
PSNICE: "nice",
PSSIZE: "size",
PSRES: "res",
PSTIME: "time",
PSNAME: "name",
PSUID: "user",
}, []types.SEQ{
PSNAME, PSUID,
})
func username(uids map[uint]string, uid uint) string {
if s, ok := uids[uid]; ok {
return s
}
s := fmt.Sprintf("%d", uid)
if usr, err := user.LookupId(s); err == nil {
s = usr.Username
}
uids[uid] = s
return s
}
func orderProc(procs []types.ProcInfo, seq types.SEQ) []types.ProcData {
sort.Sort(procOrder{ // not sort.Stable
procs: procs,
seq: seq,
reverse: _PSBIMAP.SEQ2REVERSE[seq],
})
if len(procs) > 20 {
procs = procs[:20]
}
uids := map[uint]string{}
var list []types.ProcData
for _, proc := range procs {
list = append(list, types.ProcData{
PID: proc.PID,
Priority: proc.Priority,
Nice: proc.Nice,
Time: formatTime(proc.Time),
Name: proc.Name,
User: username(uids, proc.Uid),
Size: humanB(proc.Size),
Resident: humanB(proc.Resident),
})
}
return list
}
type state struct {
About about
System system
RAWCPU sigar.CpuList
PREVCPU sigar.CpuList
RAM memory
Swap memory
DiskList []diskInfo
ProcList []types.ProcInfo
InterfacesTotal []InterfaceTotal
PrevInterfacesTotal []InterfaceTotal
}
type Page struct {
About about
System system
CPU types.CPU
RAM memory
Swap memory
DiskTable DiskTable
ProcTable ProcTable
Interfaces types.Interfaces
DISTRIB string
HTTP_HOST string
}
type pageUpdate struct {
About about
System system
CPU types.CPU
RAM memory
Swap memory
DiskTable DiskTable
ProcTable ProcTable
Interfaces []types.DeltaInterface
}
|
stateLock.Lock()
defer stateLock.Unlock()
lastState.PrevInterfacesTotal = []InterfaceTotal{}
lastState.PREVCPU.List = []sigar.Cpu{}
}
func collect() { // state
stateLock.Lock()
defer stateLock.Unlock()
prev_ifstotal := lastState.InterfacesTotal
prev_cpu := lastState.RAWCPU
ifs, ip := NewInterfaces()
about := getAbout()
about.IP = ip
lastState = state{
|
var stateLock sync.Mutex
var lastState state
func reset_prev() {
|
random_line_split
|
http.go
|
.InterfacesTotal[i].In, prevtotals[i].In),
DeltaOut: bps(s.InterfacesTotal[i].Out, prevtotals[i].Out),
}
}
sort.Sort(interfaceOrder(ifs))
return ifs
}
func(s state) cpudelta() sigar.CpuList {
prev := s.PREVCPU
if len(prev.List) == 0 {
return s.RAWCPU
}
// cls := s.RAWCPU
cls := sigar.CpuList{List: make([]sigar.Cpu, len(s.RAWCPU.List)) }
copy(cls.List, s.RAWCPU.List)
for i := range cls.List {
cls.List[i].User -= prev.List[i].User
cls.List[i].Nice -= prev.List[i].Nice
cls.List[i].Sys -= prev.List[i].Sys
cls.List[i].Idle -= prev.List[i].Idle
}
sort.Sort(cpuOrder(cls.List))
return cls
}
func(s state) CPU() types.CPU
|
sum.User += cp.User + cp.Nice
sum.Sys += cp.Sys
sum.Idle += cp.Idle
}
total := sum.User + sum.Sys + sum.Idle // + sum.Nice
user := percent(sum.User, total)
sys := percent(sum.Sys, total)
idle := uint(0)
if user + sys < 100 {
idle = 100 - user - sys
}
c.N = len(cls.List)
c.User, c.AttrUser = user, textAttr_colorPercent(user)
c.Sys, c.AttrSys = sys, textAttr_colorPercent(sys)
c.Idle, c.AttrIdle = idle, textAttr_colorPercent(100 - idle)
return c
}
func textAttr_colorPercent(p uint) template.HTMLAttr {
return template.HTMLAttr(" class=\"text-" + colorPercent(p) + "\"")
}
func labelAttr_colorPercent(p uint) template.HTMLAttr {
return template.HTMLAttr(" class=\"label label-" + colorPercent(p) + "\"")
}
func colorPercent(p uint) string {
if p > 90 {
return "danger"
}
if p > 80 {
return "warning"
}
if p > 20 {
return "info"
}
return "success"
}
type memory struct {
Total string
Used string
Free string
UsePercent string
AttrUsePercent template.HTMLAttr `json:"-"`
}
type diskInfo struct {
DevName string
Total uint64
Used uint64
Avail uint64
UsePercent float64
Inodes uint64
Iused uint64
Ifree uint64
IusePercent float64
DirName string
}
func valuesSet(req *http.Request, base url.Values, pname string, bimap types.Biseqmap) types.SEQ {
if params, ok := req.Form[pname]; ok && len(params) > 0 {
if seq, ok := bimap.STRING2SEQ[params[0]]; ok {
base.Set(pname, params[0])
return seq
}
}
return bimap.Default_seq
}
func orderDisk(disks []diskInfo, seq types.SEQ) []types.DiskData {
sort.Stable(diskOrder{
disks: disks,
seq: seq,
reverse: _DFBIMAP.SEQ2REVERSE[seq],
})
var dd []types.DiskData
for _, disk := range disks {
total, approxtotal := humanBandback(disk.Total)
used, approxused := humanBandback(disk.Used)
itotal, approxitotal := humanBandback(disk.Inodes)
iused, approxiused := humanBandback(disk.Iused)
short := ""
if len(disk.DevName) > 10 {
short = disk.DevName[:10]
}
dd = append(dd, types.DiskData{
DiskName: disk.DevName,
ShortDiskName: short,
Total: total,
Used: used,
Avail: humanB(disk.Avail),
UsePercent: formatPercent(approxused, approxtotal),
Inodes: itotal,
Iused: iused,
Ifree: humanB(disk.Ifree),
IusePercent: formatPercent(approxiused, approxitotal),
DirName: disk.DirName,
AttrUsePercent: labelAttr_colorPercent(percent(approxused, approxtotal)),
AttrIusePercent: labelAttr_colorPercent(percent(approxiused, approxitotal)),
})
}
return dd
}
var _DFBIMAP = types.Seq2bimap(DFFS, // the default seq for ordering
types.Seq2string{
DFFS: "fs",
DFSIZE: "size",
DFUSED: "used",
DFAVAIL: "avail",
DFMP: "mp",
}, []types.SEQ{
DFFS, DFMP,
})
var _PSBIMAP = types.Seq2bimap(PSPID, // the default seq for ordering
types.Seq2string{
PSPID: "pid",
PSPRI: "pri",
PSNICE: "nice",
PSSIZE: "size",
PSRES: "res",
PSTIME: "time",
PSNAME: "name",
PSUID: "user",
}, []types.SEQ{
PSNAME, PSUID,
})
func username(uids map[uint]string, uid uint) string {
if s, ok := uids[uid]; ok {
return s
}
s := fmt.Sprintf("%d", uid)
if usr, err := user.LookupId(s); err == nil {
s = usr.Username
}
uids[uid] = s
return s
}
func orderProc(procs []types.ProcInfo, seq types.SEQ) []types.ProcData {
sort.Sort(procOrder{ // not sort.Stable
procs: procs,
seq: seq,
reverse: _PSBIMAP.SEQ2REVERSE[seq],
})
if len(procs) > 20 {
procs = procs[:20]
}
uids := map[uint]string{}
var list []types.ProcData
for _, proc := range procs {
list = append(list, types.ProcData{
PID: proc.PID,
Priority: proc.Priority,
Nice: proc.Nice,
Time: formatTime(proc.Time),
Name: proc.Name,
User: username(uids, proc.Uid),
Size: humanB(proc.Size),
Resident: humanB(proc.Resident),
})
}
return list
}
type state struct {
About about
System system
RAWCPU sigar.CpuList
PREVCPU sigar.CpuList
RAM memory
Swap memory
DiskList []diskInfo
ProcList []types.ProcInfo
InterfacesTotal []InterfaceTotal
PrevInterfacesTotal []InterfaceTotal
}
type Page struct {
About about
System system
CPU types.CPU
RAM memory
Swap memory
DiskTable DiskTable
ProcTable ProcTable
Interfaces types.Interfaces
DISTRIB string
HTTP_HOST string
}
type pageUpdate struct {
About about
System system
CPU types.CPU
RAM memory
Swap memory
DiskTable DiskTable
ProcTable ProcTable
Interfaces []types.DeltaInterface
}
var stateLock sync.Mutex
var lastState state
func reset_prev() {
stateLock.Lock()
defer stateLock.Unlock()
lastState.PrevInterfacesTotal = []InterfaceTotal{}
lastState.PREVCPU.List = []sigar.Cpu{}
}
func collect() { // state
stateLock.Lock()
defer stateLock.Unlock()
prev_ifstotal := lastState.InterfacesTotal
prev_cpu := lastState.RAWCPU
ifs, ip := NewInterfaces()
about := getAbout()
about.IP = ip
lastState = state
|
{
sum := sigar.Cpu{}
cls := s.cpudelta()
c := types.CPU{List: make([]types.CPU, len(cls.List))}
for i, cp := range cls.List {
total := cp.User + cp.Nice + cp.Sys + cp.Idle
user := percent(cp.User, total)
sys := percent(cp.Sys, total)
idle := uint(0)
if user + sys < 100 {
idle = 100 - user - sys
}
c.List[i].N = i
c.List[i].User, c.List[i].AttrUser = user, textAttr_colorPercent(user)
c.List[i].Sys, c.List[i].AttrSys = sys, textAttr_colorPercent(sys)
c.List[i].Idle, c.List[i].AttrIdle = idle, textAttr_colorPercent(100 - idle)
|
identifier_body
|
http.go
|
.InterfacesTotal[i].In, prevtotals[i].In),
DeltaOut: bps(s.InterfacesTotal[i].Out, prevtotals[i].Out),
}
}
sort.Sort(interfaceOrder(ifs))
return ifs
}
func(s state) cpudelta() sigar.CpuList {
prev := s.PREVCPU
if len(prev.List) == 0 {
return s.RAWCPU
}
// cls := s.RAWCPU
cls := sigar.CpuList{List: make([]sigar.Cpu, len(s.RAWCPU.List)) }
copy(cls.List, s.RAWCPU.List)
for i := range cls.List {
cls.List[i].User -= prev.List[i].User
cls.List[i].Nice -= prev.List[i].Nice
cls.List[i].Sys -= prev.List[i].Sys
cls.List[i].Idle -= prev.List[i].Idle
}
sort.Sort(cpuOrder(cls.List))
return cls
}
func(s state) CPU() types.CPU {
sum := sigar.Cpu{}
cls := s.cpudelta()
c := types.CPU{List: make([]types.CPU, len(cls.List))}
for i, cp := range cls.List {
total := cp.User + cp.Nice + cp.Sys + cp.Idle
user := percent(cp.User, total)
sys := percent(cp.Sys, total)
idle := uint(0)
if user + sys < 100 {
idle = 100 - user - sys
}
c.List[i].N = i
c.List[i].User, c.List[i].AttrUser = user, textAttr_colorPercent(user)
c.List[i].Sys, c.List[i].AttrSys = sys, textAttr_colorPercent(sys)
c.List[i].Idle, c.List[i].AttrIdle = idle, textAttr_colorPercent(100 - idle)
sum.User += cp.User + cp.Nice
sum.Sys += cp.Sys
sum.Idle += cp.Idle
}
total := sum.User + sum.Sys + sum.Idle // + sum.Nice
user := percent(sum.User, total)
sys := percent(sum.Sys, total)
idle := uint(0)
if user + sys < 100 {
idle = 100 - user - sys
}
c.N = len(cls.List)
c.User, c.AttrUser = user, textAttr_colorPercent(user)
c.Sys, c.AttrSys = sys, textAttr_colorPercent(sys)
c.Idle, c.AttrIdle = idle, textAttr_colorPercent(100 - idle)
return c
}
func textAttr_colorPercent(p uint) template.HTMLAttr {
return template.HTMLAttr(" class=\"text-" + colorPercent(p) + "\"")
}
func labelAttr_colorPercent(p uint) template.HTMLAttr {
return template.HTMLAttr(" class=\"label label-" + colorPercent(p) + "\"")
}
func colorPercent(p uint) string {
if p > 90 {
return "danger"
}
if p > 80 {
return "warning"
}
if p > 20 {
return "info"
}
return "success"
}
type memory struct {
Total string
Used string
Free string
UsePercent string
AttrUsePercent template.HTMLAttr `json:"-"`
}
type diskInfo struct {
DevName string
Total uint64
Used uint64
Avail uint64
UsePercent float64
Inodes uint64
Iused uint64
Ifree uint64
IusePercent float64
DirName string
}
func valuesSet(req *http.Request, base url.Values, pname string, bimap types.Biseqmap) types.SEQ {
if params, ok := req.Form[pname]; ok && len(params) > 0 {
if seq, ok := bimap.STRING2SEQ[params[0]]; ok {
base.Set(pname, params[0])
return seq
}
}
return bimap.Default_seq
}
func orderDisk(disks []diskInfo, seq types.SEQ) []types.DiskData {
sort.Stable(diskOrder{
disks: disks,
seq: seq,
reverse: _DFBIMAP.SEQ2REVERSE[seq],
})
var dd []types.DiskData
for _, disk := range disks
|
Iused: iused,
Ifree: humanB(disk.Ifree),
IusePercent: formatPercent(approxiused, approxitotal),
DirName: disk.DirName,
AttrUsePercent: labelAttr_colorPercent(percent(approxused, approxtotal)),
AttrIusePercent: labelAttr_colorPercent(percent(approxiused, approxitotal)),
})
}
return dd
}
var _DFBIMAP = types.Seq2bimap(DFFS, // the default seq for ordering
types.Seq2string{
DFFS: "fs",
DFSIZE: "size",
DFUSED: "used",
DFAVAIL: "avail",
DFMP: "mp",
}, []types.SEQ{
DFFS, DFMP,
})
var _PSBIMAP = types.Seq2bimap(PSPID, // the default seq for ordering
types.Seq2string{
PSPID: "pid",
PSPRI: "pri",
PSNICE: "nice",
PSSIZE: "size",
PSRES: "res",
PSTIME: "time",
PSNAME: "name",
PSUID: "user",
}, []types.SEQ{
PSNAME, PSUID,
})
func username(uids map[uint]string, uid uint) string {
if s, ok := uids[uid]; ok {
return s
}
s := fmt.Sprintf("%d", uid)
if usr, err := user.LookupId(s); err == nil {
s = usr.Username
}
uids[uid] = s
return s
}
func orderProc(procs []types.ProcInfo, seq types.SEQ) []types.ProcData {
sort.Sort(procOrder{ // not sort.Stable
procs: procs,
seq: seq,
reverse: _PSBIMAP.SEQ2REVERSE[seq],
})
if len(procs) > 20 {
procs = procs[:20]
}
uids := map[uint]string{}
var list []types.ProcData
for _, proc := range procs {
list = append(list, types.ProcData{
PID: proc.PID,
Priority: proc.Priority,
Nice: proc.Nice,
Time: formatTime(proc.Time),
Name: proc.Name,
User: username(uids, proc.Uid),
Size: humanB(proc.Size),
Resident: humanB(proc.Resident),
})
}
return list
}
type state struct {
About about
System system
RAWCPU sigar.CpuList
PREVCPU sigar.CpuList
RAM memory
Swap memory
DiskList []diskInfo
ProcList []types.ProcInfo
InterfacesTotal []InterfaceTotal
PrevInterfacesTotal []InterfaceTotal
}
type Page struct {
About about
System system
CPU types.CPU
RAM memory
Swap memory
DiskTable DiskTable
ProcTable ProcTable
Interfaces types.Interfaces
DISTRIB string
HTTP_HOST string
}
type pageUpdate struct {
About about
System system
CPU types.CPU
RAM memory
Swap memory
DiskTable DiskTable
ProcTable ProcTable
Interfaces []types.DeltaInterface
}
var stateLock sync.Mutex
var lastState state
func reset_prev() {
stateLock.Lock()
defer stateLock.Unlock()
lastState.PrevInterfacesTotal = []InterfaceTotal{}
lastState.PREVCPU.List = []sigar.Cpu{}
}
func collect() { // state
stateLock.Lock()
defer stateLock.Unlock()
prev_ifstotal := lastState.InterfacesTotal
prev_cpu := lastState.RAWCPU
ifs, ip := NewInterfaces()
about := getAbout()
about.IP = ip
lastState = state{
|
{
total, approxtotal := humanBandback(disk.Total)
used, approxused := humanBandback(disk.Used)
itotal, approxitotal := humanBandback(disk.Inodes)
iused, approxiused := humanBandback(disk.Iused)
short := ""
if len(disk.DevName) > 10 {
short = disk.DevName[:10]
}
dd = append(dd, types.DiskData{
DiskName: disk.DevName,
ShortDiskName: short,
Total: total,
Used: used,
Avail: humanB(disk.Avail),
UsePercent: formatPercent(approxused, approxtotal),
Inodes: itotal,
|
conditional_block
|
prohack-github.py
|
df[3865:]
test=test.drop("y", axis = 1)
test_res= test.copy()
# %% [markdown]
# ### Checking how many galaxies are there and how many of them are distinct.
#
# - There are **181** distinct galaxies on the training set and **172** on the test set.
#
# - On overall they each galaxy has **20** samples on the training set and **5** on the test set.
#
# - **Some galaxies on the training set does not exist on the test set.**
#
# - **Galaxy 126** has only one sample. I discard it on the training phase
#
# As far as I know, the world bank has **182** members (countries) in 2000s (IBRD). Each distinct galaxy may represent a country in real life. Every sample for a galaxy may represent the properties of the country at a time (galactic year).
# %%
train_gal=set(train["galaxy"])
s=0
for x in train_gal:
s=s+len(train.loc[train['galaxy'] == x])
print("Total distinct galaxies: {}".format(len(train_gal)))
print("Average samples per galaxy: {}".format(s/len(train_gal)))
# %%
test_gal=set(test["galaxy"])
s=0
for x in test_gal:
s=s+len(test.loc[test['galaxy'] == x])
print("Total distinct galaxies: {}".format(len(test_gal)))
print("Average samples per galaxy: {}".format(s/len(test_gal)))
# %% [markdown]
# #### Number of samples and features
# Train set: 3865
#
# Test set: 890
#
# Features: 79
# %%
print("Train vector: " + str(train.shape))
print("Test vector: " + str(test.shape))
# %% [markdown]
# ## Methods for Cross-validating Training Data
#
# - I trained **a model for exery distinct galaxy** in the training set (180) except the one from 126th galaxy as it has only one sample.
#
# - I used **features with top x correlation** with respect to y (target variable) galaxy specific. (x is found by trying different values [20,25,30,40,50,60,70])
#
# - Missing values are filled with the galaxy specific 'mean' of the data. (Median can be used alternatively.)
#
# - **Train and test sets are not mixed for both imputation and standardization.**
#
# - Standard Scaler is used to standardize data.
#
# - Gradient Boosted Regression is used as a model.
# %%
def cross_validation_loop(data,cor):
labels= data['y']
data=data.drop('galaxy', axis=1)
data=data.drop('y', axis=1)
correlation=abs(data.corrwith(labels))
columns=correlation.nlargest(cor).index
data=data[columns]
# imp = SimpleImputer(missing_values=np.nan, strategy='mean').fit(data)
# data=imp.transform(data)
scaler = StandardScaler().fit(data)
data = scaler.transform(data)
xgb1 = XGBRegressor(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
objective='reg:squarederror',
colsample_bytree=0.8,
nthread=4,
scale_pos_weight=1,
seed=42)
estimator = XGBRegressor(n_estimators=300)
#estimator = GradientBoostingRegressor(n_estimators=300)
cv_results = cross_validate(estimator, data, labels, cv=5, scoring='neg_root_mean_squared_error')
error=np.mean(cv_results['test_score'])
return error
# %% [markdown]
# #### Code for cross-validating a model for every galaxy
#
# I return the mean of the cross-validation scores disregarding the differences of their sample sizes.
# %%
train_gal=set(train["galaxy"])
train_gal.remove(126)
def loop_train(cor):
errors=[]
for gal in tqdm(train_gal):
index = train.index[train['galaxy'] == gal]
data = train.loc[index]
errors.append(cross_validation_loop(data,cor))
return np.mean(errors)
# %% [markdown]
# #### Checking which correlation threshold gives better value
#
# The model performs best when the threshold is 20 with RMSE of 0.0063
# %%
cor=[20,25,30,40,50,60,70,80]
errors=[]
for x in cor:
print
|
%%
print(errors)
# [-0.005510409192904806, -0.005474700678841418, -0.005478204236398942, -0.005493891458843025, -0.005485265856592613, -0.005493237060981963, -0.005493713846323645, -0.0055068515842603225]
# %% [markdown]
# ## Making predictions on the test data
#
# - Similar methodology is used to fill the missing value and standardization.
# - The best covariance threshold in the cross validation, 20, is used.
# %%
def test_loop(data, test_data):
labels= data['y']
data=data.drop('galaxy', axis=1)
data=data.drop('y', axis=1)
correlation=abs(data.corrwith(labels))
columns=correlation.nlargest(20).index
train_labels= labels
train_data=data[columns]
test_data= test_data[columns]
imp = SimpleImputer(missing_values=np.nan, strategy='mean').fit(train_data)
train_data=imp.transform(train_data)
test_data=imp.transform(test_data)
scaler = StandardScaler().fit(train_data)
train_data = scaler.transform(train_data)
test_data = scaler.transform(test_data)
model = GradientBoostingRegressor(n_estimators=300)
model.fit(train_data, train_labels)
predictions = model.predict(test_data)
return predictions
# %% [markdown]
# #### Sorting samples with respect to their unique galaxy type.
# %%
test=test_res
test=test.sort_values(by=['galaxy'])
test_pred = pd.DataFrame(0, index=np.arange(len(test)), columns=["predicted_y"])
# %% [markdown]
# #### Looping over all galaxy types in the test set and making predictions.
# %%
i=0
for gal in test_gal:
count=len(test.loc[test['galaxy'] == gal])
index = train.index[train['galaxy'] == gal]
data = train.loc[index]
pred=test_loop(data,test.loc[test['galaxy']==gal])
test_pred.loc[i:i+count-1,'predicted_y'] = pred
i=i+count
# %% [markdown]
# #### Sorting samples with respect to the index.
# %%
test["predicted_y"]=test_pred.to_numpy()
test.sort_index(inplace=True)
predictions = test["predicted_y"]
# %% [markdown]
# ## Discussion 1
#
# - With this approach, we are **not using 8 galaxies in the training set as they are not in the test set.** (Almost 160 samples)
#
# - A better approach should use them as well.
#
# - According to our theory, every galaxy represent a country and samples are its properties at a time (maybe galactic year represents time).
#
# - Some countries may have missing values as they may have joined IBRD late. This may be organizers decision as well. Filling missing values with regression can improve performance.
#
# - World Bank categorizes countries by both region and income: https://datahelpdesk.worldbank.org/knowledgebase/articles/906519-world-bank-country-and-lending-groups
#
# 7 regions: East Asia and Pacific, Europe and Central Asia, Latin America & the Caribbean, Middle East and North Africa, North America, South Asia, Sub-Saharan Africa
#
# 4 income groups: Low-income economies, Lower-middle-income economies, Upper-middle-income economies, High-income economies
#
# - Clustering galaxies may excel the performance of the model. I would try both clustering galaxies to either 4 or 7 clusters. Then try making imputation/training with respect to every cluster.
#
# This code is a summary of what we have done. We also analyzed RMSE for cross-validation for per galaxy.
#
# Galaxies: {128, 2, 4, 5, 133, 11, 140, 147, 153, 154, 3
|
("cor: ",x)
errors.append(loop_train(x))
#
|
conditional_block
|
prohack-github.py
|
[train['galaxy'] == x])
print("Total distinct galaxies: {}".format(len(train_gal)))
print("Average samples per galaxy: {}".format(s/len(train_gal)))
# %%
test_gal=set(test["galaxy"])
s=0
for x in test_gal:
s=s+len(test.loc[test['galaxy'] == x])
print("Total distinct galaxies: {}".format(len(test_gal)))
print("Average samples per galaxy: {}".format(s/len(test_gal)))
# %% [markdown]
# #### Number of samples and features
# Train set: 3865
#
# Test set: 890
#
# Features: 79
# %%
print("Train vector: " + str(train.shape))
print("Test vector: " + str(test.shape))
# %% [markdown]
# ## Methods for Cross-validating Training Data
#
# - I trained **a model for exery distinct galaxy** in the training set (180) except the one from 126th galaxy as it has only one sample.
#
# - I used **features with top x correlation** with respect to y (target variable) galaxy specific. (x is found by trying different values [20,25,30,40,50,60,70])
#
# - Missing values are filled with the galaxy specific 'mean' of the data. (Median can be used alternatively.)
#
# - **Train and test sets are not mixed for both imputation and standardization.**
#
# - Standard Scaler is used to standardize data.
#
# - Gradient Boosted Regression is used as a model.
# %%
def cross_validation_loop(data,cor):
labels= data['y']
data=data.drop('galaxy', axis=1)
data=data.drop('y', axis=1)
correlation=abs(data.corrwith(labels))
columns=correlation.nlargest(cor).index
data=data[columns]
# imp = SimpleImputer(missing_values=np.nan, strategy='mean').fit(data)
# data=imp.transform(data)
scaler = StandardScaler().fit(data)
data = scaler.transform(data)
xgb1 = XGBRegressor(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
objective='reg:squarederror',
colsample_bytree=0.8,
nthread=4,
scale_pos_weight=1,
seed=42)
estimator = XGBRegressor(n_estimators=300)
#estimator = GradientBoostingRegressor(n_estimators=300)
cv_results = cross_validate(estimator, data, labels, cv=5, scoring='neg_root_mean_squared_error')
error=np.mean(cv_results['test_score'])
return error
# %% [markdown]
# #### Code for cross-validating a model for every galaxy
#
# I return the mean of the cross-validation scores disregarding the differences of their sample sizes.
# %%
train_gal=set(train["galaxy"])
train_gal.remove(126)
def loop_train(cor):
errors=[]
for gal in tqdm(train_gal):
index = train.index[train['galaxy'] == gal]
data = train.loc[index]
errors.append(cross_validation_loop(data,cor))
return np.mean(errors)
# %% [markdown]
# #### Checking which correlation threshold gives better value
#
# The model performs best when the threshold is 20 with RMSE of 0.0063
# %%
cor=[20,25,30,40,50,60,70,80]
errors=[]
for x in cor:
print("cor: ",x)
errors.append(loop_train(x))
# %%
print(errors)
# [-0.005510409192904806, -0.005474700678841418, -0.005478204236398942, -0.005493891458843025, -0.005485265856592613, -0.005493237060981963, -0.005493713846323645, -0.0055068515842603225]
# %% [markdown]
# ## Making predictions on the test data
#
# - Similar methodology is used to fill the missing value and standardization.
# - The best covariance threshold in the cross validation, 20, is used.
# %%
def test_loop(data, test_data):
labels= data['y']
data=data.drop('galaxy', axis=1)
data=data.drop('y', axis=1)
correlation=abs(data.corrwith(labels))
columns=correlation.nlargest(20).index
train_labels= labels
train_data=data[columns]
test_data= test_data[columns]
imp = SimpleImputer(missing_values=np.nan, strategy='mean').fit(train_data)
train_data=imp.transform(train_data)
test_data=imp.transform(test_data)
scaler = StandardScaler().fit(train_data)
train_data = scaler.transform(train_data)
test_data = scaler.transform(test_data)
model = GradientBoostingRegressor(n_estimators=300)
model.fit(train_data, train_labels)
predictions = model.predict(test_data)
return predictions
# %% [markdown]
# #### Sorting samples with respect to their unique galaxy type.
# %%
test=test_res
test=test.sort_values(by=['galaxy'])
test_pred = pd.DataFrame(0, index=np.arange(len(test)), columns=["predicted_y"])
# %% [markdown]
# #### Looping over all galaxy types in the test set and making predictions.
# %%
i=0
for gal in test_gal:
count=len(test.loc[test['galaxy'] == gal])
index = train.index[train['galaxy'] == gal]
data = train.loc[index]
pred=test_loop(data,test.loc[test['galaxy']==gal])
test_pred.loc[i:i+count-1,'predicted_y'] = pred
i=i+count
# %% [markdown]
# #### Sorting samples with respect to the index.
# %%
test["predicted_y"]=test_pred.to_numpy()
test.sort_index(inplace=True)
predictions = test["predicted_y"]
# %% [markdown]
# ## Discussion 1
#
# - With this approach, we are **not using 8 galaxies in the training set as they are not in the test set.** (Almost 160 samples)
#
# - A better approach should use them as well.
#
# - According to our theory, every galaxy represent a country and samples are its properties at a time (maybe galactic year represents time).
#
# - Some countries may have missing values as they may have joined IBRD late. This may be organizers decision as well. Filling missing values with regression can improve performance.
#
# - World Bank categorizes countries by both region and income: https://datahelpdesk.worldbank.org/knowledgebase/articles/906519-world-bank-country-and-lending-groups
#
# 7 regions: East Asia and Pacific, Europe and Central Asia, Latin America & the Caribbean, Middle East and North Africa, North America, South Asia, Sub-Saharan Africa
#
# 4 income groups: Low-income economies, Lower-middle-income economies, Upper-middle-income economies, High-income economies
#
# - Clustering galaxies may excel the performance of the model. I would try both clustering galaxies to either 4 or 7 clusters. Then try making imputation/training with respect to every cluster.
#
# This code is a summary of what we have done. We also analyzed RMSE for cross-validation for per galaxy.
#
# Galaxies: {128, 2, 4, 5, 133, 11, 140, 147, 153, 154, 34, 35, 40, 43, 55, 64, 76, 78, 83, 100, 101, 102, 107, 108, 119} have RMSE over 0.008.
#
# The list gives them in order, 128th having 0.008559 and 119th having 0.034926.
#
# - Fine tuning these problematic galaxies with low cross-validation scores can excel the performance of the model
# %% [markdown]
# ## Optimization part
#
# - Ideally giving 100 to top 500 samples with highest p^2 values should optimize the likely increase.
# - However, as the predictions can be faulty, this approach would result with lower Leaderboard Score.
|
#
# E.g: If the original p^2 value is higher than the predicted p^2, it will increase the error as we are directly giving it 0.
#
|
random_line_split
|
|
prohack-github.py
|
[3865:]
test=test.drop("y", axis = 1)
test_res= test.copy()
# %% [markdown]
# ### Checking how many galaxies are there and how many of them are distinct.
#
# - There are **181** distinct galaxies on the training set and **172** on the test set.
#
# - On overall they each galaxy has **20** samples on the training set and **5** on the test set.
#
# - **Some galaxies on the training set does not exist on the test set.**
#
# - **Galaxy 126** has only one sample. I discard it on the training phase
#
# As far as I know, the world bank has **182** members (countries) in 2000s (IBRD). Each distinct galaxy may represent a country in real life. Every sample for a galaxy may represent the properties of the country at a time (galactic year).
# %%
train_gal=set(train["galaxy"])
s=0
for x in train_gal:
s=s+len(train.loc[train['galaxy'] == x])
print("Total distinct galaxies: {}".format(len(train_gal)))
print("Average samples per galaxy: {}".format(s/len(train_gal)))
# %%
test_gal=set(test["galaxy"])
s=0
for x in test_gal:
s=s+len(test.loc[test['galaxy'] == x])
print("Total distinct galaxies: {}".format(len(test_gal)))
print("Average samples per galaxy: {}".format(s/len(test_gal)))
# %% [markdown]
# #### Number of samples and features
# Train set: 3865
#
# Test set: 890
#
# Features: 79
# %%
print("Train vector: " + str(train.shape))
print("Test vector: " + str(test.shape))
# %% [markdown]
# ## Methods for Cross-validating Training Data
#
# - I trained **a model for exery distinct galaxy** in the training set (180) except the one from 126th galaxy as it has only one sample.
#
# - I used **features with top x correlation** with respect to y (target variable) galaxy specific. (x is found by trying different values [20,25,30,40,50,60,70])
#
# - Missing values are filled with the galaxy specific 'mean' of the data. (Median can be used alternatively.)
#
# - **Train and test sets are not mixed for both imputation and standardization.**
#
# - Standard Scaler is used to standardize data.
#
# - Gradient Boosted Regression is used as a model.
# %%
def cross_validation_loop(data,cor):
labels= data['y']
data=data.drop('galaxy', axis=1)
data=data.drop('y', axis=1)
correlation=abs(data.corrwith(labels))
columns=correlation.nlargest(cor).index
data=data[columns]
# imp = SimpleImputer(missing_values=np.nan, strategy='mean').fit(data)
# data=imp.transform(data)
scaler = StandardScaler().fit(data)
data = scaler.transform(data)
xgb1 = XGBRegressor(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
objective='reg:squarederror',
colsample_bytree=0.8,
nthread=4,
scale_pos_weight=1,
seed=42)
estimator = XGBRegressor(n_estimators=300)
#estimator = GradientBoostingRegressor(n_estimators=300)
cv_results = cross_validate(estimator, data, labels, cv=5, scoring='neg_root_mean_squared_error')
error=np.mean(cv_results['test_score'])
return error
# %% [markdown]
# #### Code for cross-validating a model for every galaxy
#
# I return the mean of the cross-validation scores disregarding the differences of their sample sizes.
# %%
train_gal=set(train["galaxy"])
train_gal.remove(126)
def loop_
|
:
errors=[]
for gal in tqdm(train_gal):
index = train.index[train['galaxy'] == gal]
data = train.loc[index]
errors.append(cross_validation_loop(data,cor))
return np.mean(errors)
# %% [markdown]
# #### Checking which correlation threshold gives better value
#
# The model performs best when the threshold is 20 with RMSE of 0.0063
# %%
cor=[20,25,30,40,50,60,70,80]
errors=[]
for x in cor:
print("cor: ",x)
errors.append(loop_train(x))
# %%
print(errors)
# [-0.005510409192904806, -0.005474700678841418, -0.005478204236398942, -0.005493891458843025, -0.005485265856592613, -0.005493237060981963, -0.005493713846323645, -0.0055068515842603225]
# %% [markdown]
# ## Making predictions on the test data
#
# - Similar methodology is used to fill the missing value and standardization.
# - The best covariance threshold in the cross validation, 20, is used.
# %%
def test_loop(data, test_data):
labels= data['y']
data=data.drop('galaxy', axis=1)
data=data.drop('y', axis=1)
correlation=abs(data.corrwith(labels))
columns=correlation.nlargest(20).index
train_labels= labels
train_data=data[columns]
test_data= test_data[columns]
imp = SimpleImputer(missing_values=np.nan, strategy='mean').fit(train_data)
train_data=imp.transform(train_data)
test_data=imp.transform(test_data)
scaler = StandardScaler().fit(train_data)
train_data = scaler.transform(train_data)
test_data = scaler.transform(test_data)
model = GradientBoostingRegressor(n_estimators=300)
model.fit(train_data, train_labels)
predictions = model.predict(test_data)
return predictions
# %% [markdown]
# #### Sorting samples with respect to their unique galaxy type.
# %%
test=test_res
test=test.sort_values(by=['galaxy'])
test_pred = pd.DataFrame(0, index=np.arange(len(test)), columns=["predicted_y"])
# %% [markdown]
# #### Looping over all galaxy types in the test set and making predictions.
# %%
i=0
for gal in test_gal:
count=len(test.loc[test['galaxy'] == gal])
index = train.index[train['galaxy'] == gal]
data = train.loc[index]
pred=test_loop(data,test.loc[test['galaxy']==gal])
test_pred.loc[i:i+count-1,'predicted_y'] = pred
i=i+count
# %% [markdown]
# #### Sorting samples with respect to the index.
# %%
test["predicted_y"]=test_pred.to_numpy()
test.sort_index(inplace=True)
predictions = test["predicted_y"]
# %% [markdown]
# ## Discussion 1
#
# - With this approach, we are **not using 8 galaxies in the training set as they are not in the test set.** (Almost 160 samples)
#
# - A better approach should use them as well.
#
# - According to our theory, every galaxy represent a country and samples are its properties at a time (maybe galactic year represents time).
#
# - Some countries may have missing values as they may have joined IBRD late. This may be organizers decision as well. Filling missing values with regression can improve performance.
#
# - World Bank categorizes countries by both region and income: https://datahelpdesk.worldbank.org/knowledgebase/articles/906519-world-bank-country-and-lending-groups
#
# 7 regions: East Asia and Pacific, Europe and Central Asia, Latin America & the Caribbean, Middle East and North Africa, North America, South Asia, Sub-Saharan Africa
#
# 4 income groups: Low-income economies, Lower-middle-income economies, Upper-middle-income economies, High-income economies
#
# - Clustering galaxies may excel the performance of the model. I would try both clustering galaxies to either 4 or 7 clusters. Then try making imputation/training with respect to every cluster.
#
# This code is a summary of what we have done. We also analyzed RMSE for cross-validation for per galaxy.
#
# Galaxies: {128, 2, 4, 5, 133, 11, 140, 147, 153, 154,
|
train(cor)
|
identifier_name
|
prohack-github.py
|
[3865:]
test=test.drop("y", axis = 1)
test_res= test.copy()
# %% [markdown]
# ### Checking how many galaxies are there and how many of them are distinct.
#
# - There are **181** distinct galaxies on the training set and **172** on the test set.
#
# - On overall they each galaxy has **20** samples on the training set and **5** on the test set.
#
# - **Some galaxies on the training set does not exist on the test set.**
#
# - **Galaxy 126** has only one sample. I discard it on the training phase
#
# As far as I know, the world bank has **182** members (countries) in 2000s (IBRD). Each distinct galaxy may represent a country in real life. Every sample for a galaxy may represent the properties of the country at a time (galactic year).
# %%
train_gal=set(train["galaxy"])
s=0
for x in train_gal:
s=s+len(train.loc[train['galaxy'] == x])
print("Total distinct galaxies: {}".format(len(train_gal)))
print("Average samples per galaxy: {}".format(s/len(train_gal)))
# %%
test_gal=set(test["galaxy"])
s=0
for x in test_gal:
s=s+len(test.loc[test['galaxy'] == x])
print("Total distinct galaxies: {}".format(len(test_gal)))
print("Average samples per galaxy: {}".format(s/len(test_gal)))
# %% [markdown]
# #### Number of samples and features
# Train set: 3865
#
# Test set: 890
#
# Features: 79
# %%
print("Train vector: " + str(train.shape))
print("Test vector: " + str(test.shape))
# %% [markdown]
# ## Methods for Cross-validating Training Data
#
# - I trained **a model for exery distinct galaxy** in the training set (180) except the one from 126th galaxy as it has only one sample.
#
# - I used **features with top x correlation** with respect to y (target variable) galaxy specific. (x is found by trying different values [20,25,30,40,50,60,70])
#
# - Missing values are filled with the galaxy specific 'mean' of the data. (Median can be used alternatively.)
#
# - **Train and test sets are not mixed for both imputation and standardization.**
#
# - Standard Scaler is used to standardize data.
#
# - Gradient Boosted Regression is used as a model.
# %%
def cross_validation_loop(data,cor):
labels= data['y']
data=data.drop('galaxy', axis=1)
data=data.drop('y', axis=1)
correlation=abs(data.corrwith(labels))
columns=correlation.nlargest(cor).index
data=data[columns]
# imp = SimpleImputer(missing_values=np.nan, strategy='mean').fit(data)
# data=imp.transform(data)
scaler = StandardScaler().fit(data)
data = scaler.transform(data)
xgb1 = XGBRegressor(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
objective='reg:squarederror',
colsample_bytree=0.8,
nthread=4,
scale_pos_weight=1,
seed=42)
estimator = XGBRegressor(n_estimators=300)
#estimator = GradientBoostingRegressor(n_estimators=300)
cv_results = cross_validate(estimator, data, labels, cv=5, scoring='neg_root_mean_squared_error')
error=np.mean(cv_results['test_score'])
return error
# %% [markdown]
# #### Code for cross-validating a model for every galaxy
#
# I return the mean of the cross-validation scores disregarding the differences of their sample sizes.
# %%
train_gal=set(train["galaxy"])
train_gal.remove(126)
def loop_train(cor):
error
|
% [markdown]
# #### Checking which correlation threshold gives better value
#
# The model performs best when the threshold is 20 with RMSE of 0.0063
# %%
cor=[20,25,30,40,50,60,70,80]
errors=[]
for x in cor:
print("cor: ",x)
errors.append(loop_train(x))
# %%
print(errors)
# [-0.005510409192904806, -0.005474700678841418, -0.005478204236398942, -0.005493891458843025, -0.005485265856592613, -0.005493237060981963, -0.005493713846323645, -0.0055068515842603225]
# %% [markdown]
# ## Making predictions on the test data
#
# - Similar methodology is used to fill the missing value and standardization.
# - The best covariance threshold in the cross validation, 20, is used.
# %%
def test_loop(data, test_data):
labels= data['y']
data=data.drop('galaxy', axis=1)
data=data.drop('y', axis=1)
correlation=abs(data.corrwith(labels))
columns=correlation.nlargest(20).index
train_labels= labels
train_data=data[columns]
test_data= test_data[columns]
imp = SimpleImputer(missing_values=np.nan, strategy='mean').fit(train_data)
train_data=imp.transform(train_data)
test_data=imp.transform(test_data)
scaler = StandardScaler().fit(train_data)
train_data = scaler.transform(train_data)
test_data = scaler.transform(test_data)
model = GradientBoostingRegressor(n_estimators=300)
model.fit(train_data, train_labels)
predictions = model.predict(test_data)
return predictions
# %% [markdown]
# #### Sorting samples with respect to their unique galaxy type.
# %%
test=test_res
test=test.sort_values(by=['galaxy'])
test_pred = pd.DataFrame(0, index=np.arange(len(test)), columns=["predicted_y"])
# %% [markdown]
# #### Looping over all galaxy types in the test set and making predictions.
# %%
i=0
for gal in test_gal:
count=len(test.loc[test['galaxy'] == gal])
index = train.index[train['galaxy'] == gal]
data = train.loc[index]
pred=test_loop(data,test.loc[test['galaxy']==gal])
test_pred.loc[i:i+count-1,'predicted_y'] = pred
i=i+count
# %% [markdown]
# #### Sorting samples with respect to the index.
# %%
test["predicted_y"]=test_pred.to_numpy()
test.sort_index(inplace=True)
predictions = test["predicted_y"]
# %% [markdown]
# ## Discussion 1
#
# - With this approach, we are **not using 8 galaxies in the training set as they are not in the test set.** (Almost 160 samples)
#
# - A better approach should use them as well.
#
# - According to our theory, every galaxy represent a country and samples are its properties at a time (maybe galactic year represents time).
#
# - Some countries may have missing values as they may have joined IBRD late. This may be organizers decision as well. Filling missing values with regression can improve performance.
#
# - World Bank categorizes countries by both region and income: https://datahelpdesk.worldbank.org/knowledgebase/articles/906519-world-bank-country-and-lending-groups
#
# 7 regions: East Asia and Pacific, Europe and Central Asia, Latin America & the Caribbean, Middle East and North Africa, North America, South Asia, Sub-Saharan Africa
#
# 4 income groups: Low-income economies, Lower-middle-income economies, Upper-middle-income economies, High-income economies
#
# - Clustering galaxies may excel the performance of the model. I would try both clustering galaxies to either 4 or 7 clusters. Then try making imputation/training with respect to every cluster.
#
# This code is a summary of what we have done. We also analyzed RMSE for cross-validation for per galaxy.
#
# Galaxies: {128, 2, 4, 5, 133, 11, 140, 147, 153, 154,
|
s=[]
for gal in tqdm(train_gal):
index = train.index[train['galaxy'] == gal]
data = train.loc[index]
errors.append(cross_validation_loop(data,cor))
return np.mean(errors)
# %
|
identifier_body
|
myRansac.py
|
] ) is None: continue
new_d = time.strptime( lstr[0], '%Y/%m/%d' )
if new_d < BS_DATE: continue
b = float( lstr[idx] )
data.append( [lstr[0], b] )
return data, csv_name
def GetCleanData( data_x, data_y ):
i = 0
j = 0
data_out = []
while i < len( data_x ) and j < len( data_y ):
item_x = data_x[i]
item_y = data_y[j]
tx = time.strptime( item_x[0], '%Y/%m/%d' )
ty = time.strptime( item_y[0], '%Y/%m/%d' )
if tx < ty:
i += 1
continue
if tx > ty:
j += 1
continue
data_out.append([item_x[1], item_y[1] ])
i += 1
j += 1
#end of while loop
return data_out
def log_msg( str = '' ):
if str == '': return
time_string = time.strftime( "%Y-%m-%d %X", time.localtime())
with open( LOG_FILE,'a' ) as log_file:
log_file.write( time_string + ': ' + str + '\r\n' )
return
#fit model parameters to data using the RANSAC algorithm
#This implementation written from pseudocode found at
#http://en.wikipedia.org/w/index.php?title=RANSAC&oldid=116358182
#Given:
# data - a set of observed data points # 可观测数据点集
# model - a model that can be fitted to data points #
# n - the minimum number of data values required to fit the model
# 拟合模型所需的最小数据点数目
# k - the maximum number of iterations allowed in the algorithm
# 最大允许迭代次数
# t - a threshold value for determining when a data point fits a model
# 确认某一数据点是否符合模型的阈值
# d - the number of close data values required to assert that a model fits well to data
#Return:
# bestfit - model parameters which best fit the data (or nil if no good model is found)
def ransac(data,model,n,k,t,d):
iterations = 0
bestfit = None
besterr = numpy.inf
best_inlier_idxs = None
best_d = d
while iterations < k:
maybe_idxs, test_idxs = random_partition(n,data.shape[0])
maybeinliers = data[maybe_idxs,:]
test_points = data[test_idxs]
maybemodel = model.fit(maybeinliers)
test_err = model.get_error( test_points, maybemodel)
also_idxs = test_idxs[test_err < t] # select indices of rows with accepted points
alsoinliers = data[also_idxs,:]
if len(alsoinliers) > d:
betterdata = numpy.concatenate( (maybeinliers, alsoinliers) )
bettermodel = model.fit(betterdata)
better_errs = model.get_error( betterdata, bettermodel)
thiserr = numpy.mean( better_errs )
this_d = len(alsoinliers)
if this_d > best_d:
best_d = this_d
bestfit = bettermodel
besterr = thiserr
best_inlier_idxs = numpy.concatenate( (maybe_idxs, also_idxs) )
iterations+=1
if bestfit is None:
log_msg("Did not meet fit acceptance criteria")
return bestfit, {'inliers':best_inlier_idxs, 'lenth': best_d}
#return n random rows of data (and also the other len(data)-n rows)
def random_partition(n,n_data):
all_idxs = numpy.arange( n_data )
numpy.random.shuffle(all_idxs)
idxs1 = all_idxs[:n]
idxs2 = all_idxs[n:]
return idxs1, idxs2
#linear system solved using linear least squares
#This class serves as an example that fulfills the model interface
#needed by the ransac() function.
class LinearLeastSquaresModel:
def __init__(self,input_columns,output_columns,debug=False):
self.input_columns = input_columns
self.output_columns = output_columns
self.debug = debug
def fit(self, data):
A0 = numpy.vstack([data[:,i] for i in self.input_columns])[0]
A = numpy.vstack([A0, numpy.ones(len(A0))]).T
B = numpy.vstack([data[:,i] for i in self.output_columns]).T
x,resids,rank,s = numpy.linalg.lstsq(A,B)
return x
def get_error( self, data, model):
A = numpy.vstack([data[:,i] for i in self.input_columns]).T
B = numpy.vstack([data[:,i] for i in self.output_columns]).T
#B_fit = numpy.dot(A,model)
B_fit = A * model[0][0] + model[1][0]
err_per_point = numpy.sum((B-B_fit)**2,axis=1) # sum squared error per row
return err_per_point
class qqExmail:
def __init__(self):
self.user = 'zsb@cuteguide.cn'
self.passwd = 'zhou111Qt'
|
str = 'FYI: This mail is sent from a Ransac dev\r\n'
str += 'Which IP addr is %s'%my_ip[0]
txt = MIMEText(str)
message.attach(txt)
if self.tag is not None:
message['Subject'] = Header(self.tag,'utf-8')
if self.user is not None:
message['From'] = Header('RansacDev<%s>'%self.user, 'utf-8')
if len(self.to_list) > 0:
message['To'] = Header(';'.join(self.to_list), 'utf-8')
if len(self.cc_list) > 0:
message['Cc'] = Header(';'.join(self.cc_list), 'utf-8')
if self.doc:
fn = os.path.basename( self.doc )
with open(self.doc,'rb') as f:
doc = MIMEText(f.read(), 'base64', 'utf-8')
doc["Content-Type"] = 'application/octet-stream'
doc["Content-Disposition"] = 'attachment; filename="%s"'%fn
message.attach(doc)
return message.as_string()
if __name__=='__main__':
CONFIG_FILE = 'ransac.conf'
try: #Get configur
ations form .config file
config = ConfigParser()
config.read( CONFIG_FILE )
rs_n = config.getint( 'RANSAC', 'MIN_NUM' )
rs_k = config.getint( 'RANSAC', 'MAX_ITR' )
t_str = config.get( 'RANSAC', 'THRES' )
rs_t = float( t_str )
rs_d = config.getint( 'RANSAC', 'N_CLOSE' )
I_STR = config.get( 'RANSAC', 'I_CONST' )
I_CONST = float( I_STR )
LOCAL_PATH = config.get( 'RANSAC', 'DATA_PATH' )
BASE_FILE = config.get( 'RANSAC', 'BASE_FILE' )
BASE_DATE = config.get( 'RANSAC', 'BASE_DATE' )
BS_DATE = time.strptime( BASE_DATE, '%Y/%m/%d' )
except Exception as e:
exit(1)
LOG_FILE = LOCAL_PATH + 'log' + time.strftime( '%y%m%d.log', time.localtime())
n_inputs = 1
n_outputs = 1
fx = LOCAL_PATH + BASE_FILE
dataX, nameX = GetDataFromCSV( fx )
file_list = os.popen( 'ls %s*.txt'%LOCAL_PATH ).readlines()
lstResult = []
for fn in file_list:
File_Y = fn.rstrip('\n')
if File_Y == fx:
continue
dataY, nameY = GetDataFromCSV( File_Y )
dataXY = GetCleanData( dataX, dataY )
if len(dataXY) < 2*rs_d:
continue
all_data = numpy.array( dataXY )
dx = all_data[:,0]
mx = dx.mean()
if mx
|
self.to_list = ['sunber.chou@qq.com']
self.cc_list = ['zhousongbo@hanmingtech.com']
self.tag = 'Finally, Ransac get result!'
self.doc = None
return
def send(self):
ret = True
try:
mail_host = smtplib.SMTP_SSL('smtp.exmail.qq.com', port=465)
mail_host.login(self.user,self.passwd)
receiver = self.to_list + self.cc_list
mail_host.sendmail(self.user, receiver, self.get_attach())
mail_host.close()
except Exception as e:
ret = False
return ret
def get_attach(self):
message = MIMEMultipart()
my_ip = os.popen('hostname -I').readlines()
|
identifier_body
|
myRansac.py
|
fits well to data
#Return:
# bestfit - model parameters which best fit the data (or nil if no good model is found)
def ransac(data,model,n,k,t,d):
iterations = 0
bestfit = None
besterr = numpy.inf
best_inlier_idxs = None
best_d = d
while iterations < k:
maybe_idxs, test_idxs = random_partition(n,data.shape[0])
maybeinliers = data[maybe_idxs,:]
test_points = data[test_idxs]
maybemodel = model.fit(maybeinliers)
test_err = model.get_error( test_points, maybemodel)
also_idxs = test_idxs[test_err < t] # select indices of rows with accepted points
alsoinliers = data[also_idxs,:]
if len(alsoinliers) > d:
betterdata = numpy.concatenate( (maybeinliers, alsoinliers) )
bettermodel = model.fit(betterdata)
better_errs = model.get_error( betterdata, bettermodel)
thiserr = numpy.mean( better_errs )
this_d = len(alsoinliers)
if this_d > best_d:
best_d = this_d
bestfit = bettermodel
besterr = thiserr
best_inlier_idxs = numpy.concatenate( (maybe_idxs, also_idxs) )
iterations+=1
if bestfit is None:
log_msg("Did not meet fit acceptance criteria")
return bestfit, {'inliers':best_inlier_idxs, 'lenth': best_d}
#return n random rows of data (and also the other len(data)-n rows)
def random_partition(n,n_data):
all_idxs = numpy.arange( n_data )
numpy.random.shuffle(all_idxs)
idxs1 = all_idxs[:n]
idxs2 = all_idxs[n:]
return idxs1, idxs2
#linear system solved using linear least squares
#This class serves as an example that fulfills the model interface
#needed by the ransac() function.
class LinearLeastSquaresModel:
def __init__(self,input_columns,output_columns,debug=False):
self.input_columns = input_columns
self.output_columns = output_columns
self.debug = debug
def fit(self, data):
A0 = numpy.vstack([data[:,i] for i in self.input_columns])[0]
A = numpy.vstack([A0, numpy.ones(len(A0))]).T
B = numpy.vstack([data[:,i] for i in self.output_columns]).T
x,resids,rank,s = numpy.linalg.lstsq(A,B)
return x
def get_error( self, data, model):
A = numpy.vstack([data[:,i] for i in self.input_columns]).T
B = numpy.vstack([data[:,i] for i in self.output_columns]).T
#B_fit = numpy.dot(A,model)
B_fit = A * model[0][0] + model[1][0]
err_per_point = numpy.sum((B-B_fit)**2,axis=1) # sum squared error per row
return err_per_point
class qqExmail:
def __init__(self):
self.user = 'zsb@cuteguide.cn'
self.passwd = 'zhou111Qt'
self.to_list = ['sunber.chou@qq.com']
self.cc_list = ['zhousongbo@hanmingtech.com']
self.tag = 'Finally, Ransac get result!'
self.doc = None
return
def send(self):
ret = True
try:
mail_host = smtplib.SMTP_SSL('smtp.exmail.qq.com', port=465)
mail_host.login(self.user,self.passwd)
receiver = self.to_list + self.cc_list
mail_host.sendmail(self.user, receiver, self.get_attach())
mail_host.close()
except Exception as e:
ret = False
return ret
def get_attach(self):
message = MIMEMultipart()
my_ip = os.popen('hostname -I').readlines()
str = 'FYI: This mail is sent from a Ransac dev\r\n'
str += 'Which IP addr is %s'%my_ip[0]
txt = MIMEText(str)
message.attach(txt)
if self.tag is not None:
message['Subject'] = Header(self.tag,'utf-8')
if self.user is not None:
message['From'] = Header('RansacDev<%s>'%self.user, 'utf-8')
if len(self.to_list) > 0:
message['To'] = Header(';'.join(self.to_list), 'utf-8')
if len(self.cc_list) > 0:
message['Cc'] = Header(';'.join(self.cc_list), 'utf-8')
if self.doc:
fn = os.path.basename( self.doc )
with open(self.doc,'rb') as f:
doc = MIMEText(f.read(), 'base64', 'utf-8')
doc["Content-Type"] = 'application/octet-stream'
doc["Content-Disposition"] = 'attachment; filename="%s"'%fn
message.attach(doc)
return message.as_string()
if __name__=='__main__':
CONFIG_FILE = 'ransac.conf'
try: #Get configurations form .config file
config = ConfigParser()
config.read( CONFIG_FILE )
rs_n = config.getint( 'RANSAC', 'MIN_NUM' )
rs_k = config.getint( 'RANSAC', 'MAX_ITR' )
t_str = config.get( 'RANSAC', 'THRES' )
rs_t = float( t_str )
rs_d = config.getint( 'RANSAC', 'N_CLOSE' )
I_STR = config.get( 'RANSAC', 'I_CONST' )
I_CONST = float( I_STR )
LOCAL_PATH = config.get( 'RANSAC', 'DATA_PATH' )
BASE_FILE = config.get( 'RANSAC', 'BASE_FILE' )
BASE_DATE = config.get( 'RANSAC', 'BASE_DATE' )
BS_DATE = time.strptime( BASE_DATE, '%Y/%m/%d' )
except Exception as e:
exit(1)
LOG_FILE = LOCAL_PATH + 'log' + time.strftime( '%y%m%d.log', time.localtime())
n_inputs = 1
n_outputs = 1
fx = LOCAL_PATH + BASE_FILE
dataX, nameX = GetDataFromCSV( fx )
file_list = os.popen( 'ls %s*.txt'%LOCAL_PATH ).readlines()
lstResult = []
for fn in file_list:
File_Y = fn.rstrip('\n')
if File_Y == fx:
continue
dataY, nameY = GetDataFromCSV( File_Y )
dataXY = GetCleanData( dataX, dataY )
if len(dataXY) < 2*rs_d:
continue
all_data = numpy.array( dataXY )
dx = all_data[:,0]
mx = dx.mean()
if mx == 0:
log_msg( 'mean x is zero' )
break
dx = (dx - mx )/mx
dy = all_data[:,1]
my = dy.mean()
if my == 0:
log_msg( 'mean y is zero' )
continue
dy = (dy - my)/my
all_data = numpy.vstack(( dx, dy )).T
input_columns = range(n_inputs) # the first columns of the array
output_columns = [n_inputs+i for i in range(n_outputs)] # the last columns of the array
model = LinearLeastSquaresModel(input_columns,output_columns,debug=False)
log_msg( 'Deal with %s.'%File_Y )
# run RANSAC algorithm
ransac_fit, ransac_data = ransac(
all_data, model, rs_n, rs_k, rs_t, rs_d ) # misc. parameters
if ransac_fit is None: continue
ransac_value = ransac_fit[0,0]
ransac_rest = ransac_fit[1,0]
r_idx = os.path.basename( File_Y )[ :-4]
fnResult = LOCAL_PATH + 'o' + r_idx + '.csv'
item = [r_idx, dx.size, nameY, ransac_value, ransac_rest, ransac_data['lenth']]
r_dta = float( 0 )
with open( fnResult, 'w' ) as fpResult:
for i in range( dx.size ):
tmp = dy[i]-dx[i] * ransac_value-ransac_rest
r_dta = r_dta * ( 1-I_CONST ) + tmp * I_CONST
fpResult.write( '%.6f, %.6f, %.6f, %.6f\r\n'%(
dx[i], dy[i], tmp, r_dta ))
item.append( tmp )
item.append( r_dta )
lstResult.append( item )
#End to 'for' loop
lstResult.sort(key=lambda x:x[7], reverse = True )
|
fnList = LOCAL_PATH + 'A_result.txt'
with open( fnList, 'w', encoding='utf-8') as fw_p:
|
random_line_split
|
|
myRansac.py
|
[idx] ) is None: continue
new_d = time.strptime( lstr[0], '%Y/%m/%d' )
if new_d < BS_DATE: continue
b = float( lstr[idx] )
data.append( [lstr[0], b] )
return data, csv_name
def GetC
|
ta_x, data_y ):
i = 0
j = 0
data_out = []
while i < len( data_x ) and j < len( data_y ):
item_x = data_x[i]
item_y = data_y[j]
tx = time.strptime( item_x[0], '%Y/%m/%d' )
ty = time.strptime( item_y[0], '%Y/%m/%d' )
if tx < ty:
i += 1
continue
if tx > ty:
j += 1
continue
data_out.append([item_x[1], item_y[1] ])
i += 1
j += 1
#end of while loop
return data_out
def log_msg( str = '' ):
if str == '': return
time_string = time.strftime( "%Y-%m-%d %X", time.localtime())
with open( LOG_FILE,'a' ) as log_file:
log_file.write( time_string + ': ' + str + '\r\n' )
return
#fit model parameters to data using the RANSAC algorithm
#This implementation written from pseudocode found at
#http://en.wikipedia.org/w/index.php?title=RANSAC&oldid=116358182
#Given:
# data - a set of observed data points # 可观测数据点集
# model - a model that can be fitted to data points #
# n - the minimum number of data values required to fit the model
# 拟合模型所需的最小数据点数目
# k - the maximum number of iterations allowed in the algorithm
# 最大允许迭代次数
# t - a threshold value for determining when a data point fits a model
# 确认某一数据点是否符合模型的阈值
# d - the number of close data values required to assert that a model fits well to data
#Return:
# bestfit - model parameters which best fit the data (or nil if no good model is found)
def ransac(data,model,n,k,t,d):
iterations = 0
bestfit = None
besterr = numpy.inf
best_inlier_idxs = None
best_d = d
while iterations < k:
maybe_idxs, test_idxs = random_partition(n,data.shape[0])
maybeinliers = data[maybe_idxs,:]
test_points = data[test_idxs]
maybemodel = model.fit(maybeinliers)
test_err = model.get_error( test_points, maybemodel)
also_idxs = test_idxs[test_err < t] # select indices of rows with accepted points
alsoinliers = data[also_idxs,:]
if len(alsoinliers) > d:
betterdata = numpy.concatenate( (maybeinliers, alsoinliers) )
bettermodel = model.fit(betterdata)
better_errs = model.get_error( betterdata, bettermodel)
thiserr = numpy.mean( better_errs )
this_d = len(alsoinliers)
if this_d > best_d:
best_d = this_d
bestfit = bettermodel
besterr = thiserr
best_inlier_idxs = numpy.concatenate( (maybe_idxs, also_idxs) )
iterations+=1
if bestfit is None:
log_msg("Did not meet fit acceptance criteria")
return bestfit, {'inliers':best_inlier_idxs, 'lenth': best_d}
#return n random rows of data (and also the other len(data)-n rows)
def random_partition(n,n_data):
all_idxs = numpy.arange( n_data )
numpy.random.shuffle(all_idxs)
idxs1 = all_idxs[:n]
idxs2 = all_idxs[n:]
return idxs1, idxs2
#linear system solved using linear least squares
#This class serves as an example that fulfills the model interface
#needed by the ransac() function.
class LinearLeastSquaresModel:
def __init__(self,input_columns,output_columns,debug=False):
self.input_columns = input_columns
self.output_columns = output_columns
self.debug = debug
def fit(self, data):
A0 = numpy.vstack([data[:,i] for i in self.input_columns])[0]
A = numpy.vstack([A0, numpy.ones(len(A0))]).T
B = numpy.vstack([data[:,i] for i in self.output_columns]).T
x,resids,rank,s = numpy.linalg.lstsq(A,B)
return x
def get_error( self, data, model):
A = numpy.vstack([data[:,i] for i in self.input_columns]).T
B = numpy.vstack([data[:,i] for i in self.output_columns]).T
#B_fit = numpy.dot(A,model)
B_fit = A * model[0][0] + model[1][0]
err_per_point = numpy.sum((B-B_fit)**2,axis=1) # sum squared error per row
return err_per_point
class qqExmail:
def __init__(self):
self.user = 'zsb@cuteguide.cn'
self.passwd = 'zhou111Qt'
self.to_list = ['sunber.chou@qq.com']
self.cc_list = ['zhousongbo@hanmingtech.com']
self.tag = 'Finally, Ransac get result!'
self.doc = None
return
def send(self):
ret = True
try:
mail_host = smtplib.SMTP_SSL('smtp.exmail.qq.com', port=465)
mail_host.login(self.user,self.passwd)
receiver = self.to_list + self.cc_list
mail_host.sendmail(self.user, receiver, self.get_attach())
mail_host.close()
except Exception as e:
ret = False
return ret
def get_attach(self):
message = MIMEMultipart()
my_ip = os.popen('hostname -I').readlines()
str = 'FYI: This mail is sent from a Ransac dev\r\n'
str += 'Which IP addr is %s'%my_ip[0]
txt = MIMEText(str)
message.attach(txt)
if self.tag is not None:
message['Subject'] = Header(self.tag,'utf-8')
if self.user is not None:
message['From'] = Header('RansacDev<%s>'%self.user, 'utf-8')
if len(self.to_list) > 0:
message['To'] = Header(';'.join(self.to_list), 'utf-8')
if len(self.cc_list) > 0:
message['Cc'] = Header(';'.join(self.cc_list), 'utf-8')
if self.doc:
fn = os.path.basename( self.doc )
with open(self.doc,'rb') as f:
doc = MIMEText(f.read(), 'base64', 'utf-8')
doc["Content-Type"] = 'application/octet-stream'
doc["Content-Disposition"] = 'attachment; filename="%s"'%fn
message.attach(doc)
return message.as_string()
if __name__=='__main__':
CONFIG_FILE = 'ransac.conf'
try: #Get configurations form .config file
config = ConfigParser()
config.read( CONFIG_FILE )
rs_n = config.getint( 'RANSAC', 'MIN_NUM' )
rs_k = config.getint( 'RANSAC', 'MAX_ITR' )
t_str = config.get( 'RANSAC', 'THRES' )
rs_t = float( t_str )
rs_d = config.getint( 'RANSAC', 'N_CLOSE' )
I_STR = config.get( 'RANSAC', 'I_CONST' )
I_CONST = float( I_STR )
LOCAL_PATH = config.get( 'RANSAC', 'DATA_PATH' )
BASE_FILE = config.get( 'RANSAC', 'BASE_FILE' )
BASE_DATE = config.get( 'RANSAC', 'BASE_DATE' )
BS_DATE = time.strptime( BASE_DATE, '%Y/%m/%d' )
except Exception as e:
exit(1)
LOG_FILE = LOCAL_PATH + 'log' + time.strftime( '%y%m%d.log', time.localtime())
n_inputs = 1
n_outputs = 1
fx = LOCAL_PATH + BASE_FILE
dataX, nameX = GetDataFromCSV( fx )
file_list = os.popen( 'ls %s*.txt'%LOCAL_PATH ).readlines()
lstResult = []
for fn in file_list:
File_Y = fn.rstrip('\n')
if File_Y == fx:
continue
dataY, nameY = GetDataFromCSV( File_Y )
dataXY = GetCleanData( dataX, dataY )
if len(dataXY) < 2*rs_d:
continue
all_data = numpy.array( dataXY )
dx = all_data[:,0]
mx = dx.mean()
if mx ==
|
leanData( da
|
identifier_name
|
myRansac.py
|
[idx] ) is None: continue
new_d = time.strptime( lstr[0], '%Y/%m/%d' )
if new_d < BS_DATE: continue
b = float( lstr[idx] )
data.append( [lstr[0], b] )
return data, csv_name
def GetCleanData( data_x, data_y ):
i = 0
j = 0
data_out = []
while i < len( data_x ) and j < len( data_y ):
item_x = data_x[i]
item_y = data_y[j]
tx = time.strptime( item_x[0], '%Y/%m/%d' )
ty = time.strptime( item_y[0], '%Y/%m/%d' )
if tx < ty:
i += 1
continue
if tx > ty:
j += 1
continue
data_out.append([item_x[1], item_y[1] ])
i += 1
j += 1
#end of while loop
return data_out
def log_msg( str = '' ):
if str == '': return
time_string = time.strftime( "%Y-%m-%d %X", time.localtime())
with open( LOG_FILE,'a' ) as log_file:
log_file.write( time_string + ': ' + str + '\r\n' )
return
#fit model parameters to data using the RANSAC algorithm
#This implementation written from pseudocode found at
#http://en.wikipedia.org/w/index.php?title=RANSAC&oldid=116358182
#Given:
# data - a set of observed data points # 可观测数据点集
# model - a model that can be fitted to data points #
# n - the minimum number of data values required to fit the model
# 拟合模型所需的最小数据点数目
# k - the maximum number of iterations allowed in the algorithm
# 最大允许迭代次数
# t - a threshold value for determining when a data point fits a model
# 确认某一数据点是否符合模型的阈值
# d - the number of close data values required to assert that a model fits well to data
#Return:
# bestfit - model parameters which best fit the data (or nil if no good model is found)
def ransac(data,model,n,k,t,d):
iterations = 0
bestfit = None
besterr = numpy.inf
best_inlier_idxs = None
best_d = d
while iterations < k:
maybe_idxs, test_idxs = random_partition(n,data.shape[0])
maybeinliers = data[maybe_idxs,:]
test_points = data[test_idxs]
maybemodel = model.fit(maybeinliers)
test_err = model.get_error( test_points, maybemodel)
also_idxs = test_idxs[test_err < t] # select indices of rows with accepted points
alsoinliers = data[also_idxs,:]
if len(alsoinliers) > d:
betterdata = numpy.concatenate( (maybeinliers, alsoinliers) )
bettermodel = model.fit(betterdata)
better_errs = model.get_error( betterdata, bettermodel)
thiserr = numpy.mean( better_errs )
this_d = len(alsoinliers)
if this_d > best_d:
best_d = this_d
bestfit = bettermodel
besterr = thiserr
best_inlier_idxs = numpy.concatenate( (maybe_idxs, also_idxs) )
iterations+=1
if bestfit is None:
log_msg("Did not meet fit acceptance criteria")
return bestfit, {'inliers':best_inlier_idxs, 'lenth': best_d}
#return n random rows of data (and also the other len(data)-n rows)
def random_partition(n,n_data):
all_idxs = numpy.arange( n_data )
numpy.random.shuffle(all_idxs)
idxs1 = all_idxs[:n]
idxs2 = all_idxs[n:]
return idxs1, idxs2
#linear system solved using linear least squares
#This class serves as an example that fulfills the model interface
#needed by the ransac() function.
class LinearLeastSquaresModel:
def __init__(self,input_columns,output_columns,debug=False):
self.input_columns = input_columns
self.output_columns = output_columns
self.debug = debug
def fit(self, data):
A0 = numpy.vstack([data[:,i] for i in self.input_columns])[0]
A = numpy.vstack([A0, numpy.ones(len(A0))]).T
B = numpy.vstack([data[:,i] for i in self.output_columns]).T
x,resids,rank,s = numpy.linalg.lstsq(A,B)
return x
def get_error( self, data, model):
A = numpy.vstack([data[:,i] for i in self.input_columns]).T
B = numpy.vstack([data[:,i] for i in self.output_columns]).T
#B_fit = numpy.dot(A,model)
B_fit = A * model[0][0] + model[1][0]
err_per_point = numpy.sum((B-B_fit)**2,axis=1) # sum squared error per row
return err_per_point
class qqExmail:
def __init__(self):
self.user = 'zsb@cuteguide.cn'
self.passwd = 'zhou111Qt'
self.to_list = ['sunber.chou@qq.com']
self.cc_list = ['zhousongbo@hanmingtech.com']
self.tag = 'Finally, Ransac get result!'
self.doc = None
return
def send(self):
ret = True
try:
mail_host = smtplib.SMTP_SSL('smtp.exmail.qq.com', port=465)
mail_host.login(self.user,self.passwd)
receiver = self.to_list + self.cc_list
mail_host.sendmail(self.user, receiver, self.get_attach())
mail_host.close()
except Exception as e:
ret = False
return ret
def get_attach(self):
message = MIMEMultipart()
my_ip = os.popen('hostname -I').readlines()
str = 'FYI: This mail is sent from a Ransac dev\r\n'
str += 'Which IP addr is %s'%my_ip[0]
txt = MIMEText(str)
message.attach(txt)
if self.tag is not None:
message['Subject'] = Header(self.tag,'utf-8')
if self.user is not None:
message['From'] = Header('RansacDev<%s>'%self.user, 'utf-8')
if len(self.to_list) > 0:
message['To'] = Header(';'.join(self.to_list), 'utf-8')
if len(self.cc_list) > 0:
|
), 'utf-8')
if self.doc:
fn = os.path.basename( self.doc )
with open(self.doc,'rb') as f:
doc = MIMEText(f.read(), 'base64', 'utf-8')
doc["Content-Type"] = 'application/octet-stream'
doc["Content-Disposition"] = 'attachment; filename="%s"'%fn
message.attach(doc)
return message.as_string()
if __name__=='__main__':
CONFIG_FILE = 'ransac.conf'
try: #Get configurations form .config file
config = ConfigParser()
config.read( CONFIG_FILE )
rs_n = config.getint( 'RANSAC', 'MIN_NUM' )
rs_k = config.getint( 'RANSAC', 'MAX_ITR' )
t_str = config.get( 'RANSAC', 'THRES' )
rs_t = float( t_str )
rs_d = config.getint( 'RANSAC', 'N_CLOSE' )
I_STR = config.get( 'RANSAC', 'I_CONST' )
I_CONST = float( I_STR )
LOCAL_PATH = config.get( 'RANSAC', 'DATA_PATH' )
BASE_FILE = config.get( 'RANSAC', 'BASE_FILE' )
BASE_DATE = config.get( 'RANSAC', 'BASE_DATE' )
BS_DATE = time.strptime( BASE_DATE, '%Y/%m/%d' )
except Exception as e:
exit(1)
LOG_FILE = LOCAL_PATH + 'log' + time.strftime( '%y%m%d.log', time.localtime())
n_inputs = 1
n_outputs = 1
fx = LOCAL_PATH + BASE_FILE
dataX, nameX = GetDataFromCSV( fx )
file_list = os.popen( 'ls %s*.txt'%LOCAL_PATH ).readlines()
lstResult = []
for fn in file_list:
File_Y = fn.rstrip('\n')
if File_Y == fx:
continue
dataY, nameY = GetDataFromCSV( File_Y )
dataXY = GetCleanData( dataX, dataY )
if len(dataXY) < 2*rs_d:
continue
all_data = numpy.array( dataXY )
dx = all_data[:,0]
mx = dx.mean()
if mx ==
|
message['Cc'] = Header(';'.join(self.cc_list
|
conditional_block
|
3DMain.js
|
*/
function setScreen(n, data, projManager, projectId, currentLayerObjList) {
planArr = data;
if (n == 1) {
$("#earthDiv0,#earthDiv1,#earthDiv2").removeClass("s1 s2 s3 s4").addClass("hide");
$("#earthDiv0").removeClass("hide").addClass("s1");
for (var i = parent.earthArray.length - 1; i > 0; i--) {
parent.earthArray[i].Suicide();
parent.earthArray.pop();
}
if(bollonArr&&bollonArr.length>0){
for(var i=0;i<bollonArr.length;i++){
if( bollonArr[i].Guid!=""){
bollonArr[i].DestroyObject();
}
}
}
bollonArr = [];
$("#earthDiv2, #earthDiv1").empty();
document.getElementById("earthDiv0").style.width="100%";
document.getElementById("earthDiv0").style.height="100%";
} else if (n == 2) {
$("#earthDiv0,#earthDiv1,#earthDiv2").removeClass("hide s1 s2 s3 s4");
//第一个球往左缩小
//$("#earthDiv0").addClass("s2");//此行代码有时不起作用 div并没有缩小 因此采用下面两行代码强行设置宽高比例!
document.getElementById("earthDiv0").style.width="50%";
document.getElementById("earthDiv0").style.height="100%";
document.getElementById("earthDiv1").style.width="50%";
document.getElementById("earthDiv1").style.height="100%";
//第二个球加载在右边
$("#earthDiv1").addClass("s2");
//隐藏第三个球
$("#earthDiv2").addClass("hide");
createEarth("earth1", document.getElementById("earthDiv1"), data, projManager, projectId, currentLayerObjList);
} else if (n == 3) {
$("#earthDiv0,#earthDiv1,#earthDiv2").removeClass("hide s1 s2 s4").addClass("s3");
document.getElementById("earthDiv0").style.width="33.3%";
document.getElementById("earthDiv0").style.height="100%";
document.getElementById("earthDiv1").style.width="33.3%";
document.getElementById("earthDiv1").style.height="100%";
document.getElementById("earthDiv2").style.width="33.3%";
document.getElementById("earthDiv2").style.height="100%";
createEarth("earth1", document.getElementById("earthDiv1"), data, projManager, projectId, currentLayerObjList, true);
}
};
function createEarth3(id, div, data, projManager, projectId, currentLayerObjList){
var earth = document.createElement("object");
earth.id = id;
earth.name = id;
earth.classid = "CLSID:EA3EA17C-5724-4104-94D8-4EECBD352964";
earth.style.width = "100%";
earth.style.height = "100%";
div.appendChild(earth);
earth.Event.OnCreateEarth = function (searth) {
earth.Event.OnCreateEarth = function () {};
parent.earthArray.push(searth);
searth.Event.OnDocumentChanged = function (){
searth.Event.OnDocumentChanged = function (){};
//先隐藏所有的图层 只显示数据库图层
if(parent.currentPrjGuid){
var layer = searth.LayerManager.LayerList;
if(layer){
var childCount = layer.GetChildCount();
for (var i = 0; i < childCount; i++) {
var childLayer = layer.GetChildAt(i);
if (childLayer.Guid == parent.currentPrjGuid) {
childLayer.Visibility = false;
}
}
}
}
//这里面就可以获取到earth.LayerManager 及其下属的属性与方法
if(data && data.length){
//searth加载数据
var thirdId = data[2].id;
var xzId = parent.parcelLayerGuid2;
//parent.planLayerIDs;
if(thirdId == xzId){
//说明是现状
parent.loadXZLayers(true, earth2);
}else{
//说明是方案
setTimeout(function(){
//earth1 加载方案2图层
var layerIDs = projManager.getLayerIdsByPlanId(thirdId);
parent.applyRecords(true, layerIDs, earth2, parent.parcelLayerGuid, false);
},400);
}
}
//cy 20150508 加
shareDigLayer(searth);
var pose = getPose(parent.earth);
searth.GlobeObserver.GotoLookat(pose.longitude, pose.latitude, pose.altitude,
pose.heading, pose.tilt, pose.roll, 0);
};
searth.Load(CITYPLAN_config.server.ip, CITYPLAN_config.server.screen);
// searth.Environment.SetDatabaseLink(CITYPLAN_config.server.dataServerIP);
};
}
/**已看
* 共享开挖地形
* @return {[type]} [description]
*/
function shareDigLayer(curEarth){
//开挖图层共享
if(parent.demObj && parent.demObj.length){
var guid = curEarth.Factory.CreateGuid();
var tempDemPath = curEarth.RootPath + "temp\\terr\\terrain\\";
var rect = parent.demObj[0];
var levelMin = parent.demObj[1];
var levelMax = parent.demObj[2];
var demTempLayer = curEarth.Factory.CreateDEMLayer(guid,
"TempTerrainLayer",
tempDemPath,
rect,
levelMin,
levelMax, 1000);
demTempLayer.Visibility = true;
curEarth.AttachObject(demTempLayer);
}
};
/**已看 y
/**已看
/**
* 根据id和div容器创建Earth对象,并返回创建的对象
* @param id
* @param div
*/
function createEarth(id, div, data, projManager, projectId, currentLayerObjList, isThird) {
var earth = document.createElement("object");
earth.id = id;
earth.name = id;
earth.classid = "CLSID:EA3EA17C-5724-4104-94D8-4EECBD352964";
earth.style.width = "100%";
earth.style.height = "100%";
div.appendChild(earth);
earth.Event.OnCreateEarth = function (searth) {
earth.Event.OnCreateEarth = function () {};
parent.earthArray.push(searth);
searth.Event.OnDocumentChanged = function (){
searth.Event.OnDocumentChanged = function (){};
if(isThird){//创建第三个球
createEarth3("earth2", document.getElementById("earthDiv2"), data, projManager, projectId, currentLayerObjList);
}
//先隐藏所有的图层 只显示数据库图层
if(parent.currentPrjGuid){
var layer = searth.LayerManager.LayerList;
if(layer){
var childCount = layer.GetChildCount();
for (var i = 0; i < childCount; i++) {
var childLayer = layer.GetChildAt(i);
if (childLayer.Guid == parent.currentPrjGuid) {
childLayer.Visibility = false;
}
}
}
}
//这里面就可以获取到earth.LayerManager 及其下属的属性与方法
//控制数据显示
if(data && data.length){
//searth加载数据
var firstId = data[0].id;
var secordId = data[1].id;
var xzId = parent.parcelLayerGuid2;
//parent.planLayerIDs;
if(firstId == xzId){
//第一个是现状
}else if(secordId == xzId){
//加载第一个方案 firstId
setTimeout(function(){
projManager.showAll(projectId, firstId, true, true, false, false,true);
},100);
//第二个是现状 secordId 需要把现状数据库图层的都加上即可
parent.loadXZLayers(true, earth1);
}else{
//两个都是方案
setTimeout(function(){
projManager.showAll(projectId, firstId, true, true, false, false,true);
},100);
setTimeout(function(){
//earth1 加载方案2图层
var layerIDs = projManager.getLayerIdsByPlanId(secordId);
parent.applyRecords(true, layerIDs, earth1, parent.parcelLayerGuid, false);
},200);
}
}
shareDigLayer(searth);
//同步视角
var pose = getPose(parent.earth);
searth.GlobeObserver.GotoLookat(pose.longitude, pose.latitude, pose.altitude,
pose.heading, pose.tilt, pose.roll, 0);
};
searth.Load(CITYPLAN_config.server.ip, CITYPLAN_config.server.screen);
// searth.Environment.SetDatabaseLink(CITYPLAN_config.server.dataServerIP);
};
}
var htmlArr=[];
function showPlan
|
* @param n 屏幕数
|
random_line_split
|
|
3DMain.js
|
, firstId, true, true, false, false,true);
},100);
//第二个是现状 secordId 需要把现状数据库图层的都加上即可
parent.loadXZLayers(true, earth1);
}else{
//两个都是方案
setTimeout(function(){
projManager.showAll(projectId, firstId, true, true, false, false,true);
},100);
setTimeout(function(){
//earth1 加载方案2图层
var layerIDs = projManager.getLayerIdsByPlanId(secordId);
parent.applyRecords(true, layerIDs, earth1, parent.parcelLayerGuid, false);
},200);
}
}
shareDigLayer(searth);
//同步视角
var pose = getPose(parent.earth);
searth.GlobeObserver.GotoLookat(pose.longitude, pose.latitude, pose.altitude,
pose.heading, pose.tilt, pose.roll, 0);
};
searth.Load(CITYPLAN_config.server.ip, CITYPLAN_config.server.screen);
// searth.Environment.SetDatabaseLink(CITYPLAN_config.server.dataServerIP);
};
}
var htmlArr=[];
function showPlanData(data,seearth,planData){
var path = location.pathname.substring(0, location.pathname.lastIndexOf("/"));
var url = location.protocol + "//" + location.hostname + path +
"/html/investigate/planData.html?id="+data.id;
var htmlBalloon = null;
var guid = seearth.Factory.CreateGuid();
htmlBalloon = seearth.Factory.CreateHtmlBalloon(guid, "balloon");
htmlBalloon.SetScreenLocation(0,0);
htmlBalloon.SetRectSize(290,290);
htmlBalloon.SetIsAddMargin(true);
htmlBalloon.SetIsAddBackgroundImage(true);
htmlBalloon.ShowNavigate(url);
bollonArr.push(htmlBalloon);
seearth.Event.OnHtmlNavigateCompleted = function (htmlId){
htmlArr.push({id:htmlId,obj:htmlBalloon});
setTimeout(function(){
htmlBalloon.InvokeScript("setTranScroll", planData);
},100);
//earth.Event.OnHtmlNavigateCompleted = function (){};
};
seearth.Event.OnHtmlBalloonFinished = function(){
if(htmlBalloon!=null){
htmlBalloon.DestroyObject();
htmlBalloon=null;
}
seearth.Event.OnHtmlBalloonFinished = function(){};
}
}
//已看
function showIndex(tag,planData){
//earthArray
if(tag){
for(var i=0;i<planArr.length;i++){
showPlanData(planArr[i], parent.earthArray[i],planData);
}
} else{
if(bollonArr&&bollonArr.length>0){
for(var i=0;i<bollonArr.length;i++){
if( bollonArr[i].Guid!=""){
bollonArr[i].DestroyObject();
}
}
}
bollonArr = [];
}
}
/* 设置联动
* @param bSync 等于true时表示联动
*/
function setSync(bSync) {
var i = 0;
var emptyFunction = function () {
};
if (bSync) { //联动
while (i < parent.earthArray.length) {
parent.earthArray[i].Event.OnLBDown = setFocus(i); // 注册每个球的OnLBDown事件【左键】
parent.earthArray[i].Event.OnMBDown = setFocus(i); // 注册每个球的OnMBDown事件 【中键】
i += 1;
}
gotoPose(0)(); // 将其他屏定位到第一屏的位置
} else {
if(bollonArr&&bollonArr.length>0){
for(var s=0;s<bollonArr.length;s++){
if(bollonArr[s].Guid&&bollonArr[s].Guid!=""){
bollonArr[s].DestroyObject();
}
}
}
while (i < parent.earthArray.length) { // 注销每个球绑定的事件
parent.earthArray[i].Event.OnLBDown = emptyFunction;
parent.earthArray[i].Event.OnMBDown = emptyFunction;
parent.earthArray[i].Event.OnObserverChanged = emptyFunction;
i += 1;
}
gotoPose(0)(); // 将其他屏定位到第一屏的位置
}
}
/**已看
* 设置联动
* 注册当前球的OnObserverChanged事件
* 注销其他球的OnObserverChanged事件,给其他球的OnLBDown绑定事件,似的在左键点击时称为当前球
*/
function setFocus(i) {
return function () {
parent.earthArray[i].Event.OnObserverChanged = gotoPose(i);
for (var j = 0; j < parent.earthArray.length; j++) {
if (i != j) {
parent.earthArray[j].Event.OnObserverChanged = function () { };
parent.earthArray[j].Event.OnLBDown = setFocus(j);
parent.earthArray[j].Event.OnMBDown = setFocus(j);
}
}
};
}
/**已看
* 将所有非主球都定位到主球i的当前位置
* @param i
* @return {Function}
*/
function gotoPose(i) {
setTimeout( function () {
var pose = getPose( parent.earthArray[i]);
var j = 0;
while (j < parent.earthArray.length) {
if (j != i) {
parent.earthArray[j].GlobeObserver.GotoLookat(pose.longitude, pose.latitude, pose.altitude,
pose.heading, pose.tilt, pose.roll, 0);
}
j += 1;
}
setFocus(i);
},500);
return function () {
var pose = getPose( parent.earthArray[i]);
var j = 0;
while (j < parent.earthArray.length) {
if (j != i) {
parent.earthArray[j].GlobeObserver.GotoLookat(pose.longitude, pose.latitude, pose.altitude,
pose.heading, pose.tilt, pose.roll, 4);
}
j += 1;
}
setFocus(i);
}
}
/**
* 获得earthObj的当前位置
* @param earthObj
* @return {Object}
*/
function getPose(earthObj) {
var data = {};
if (earthObj) {
data.longitude = earthObj.GlobeObserver.Pose.Longitude;
data.latitude = earthObj.GlobeObserver.Pose.Latitude;
data.altitude = earthObj.GlobeObserver.Pose.Altitude;
data.heading = earthObj.GlobeObserver.Pose.heading;
data.tilt = earthObj.GlobeObserver.Pose.tilt;
data.roll = earthObj.GlobeObserver.Pose.roll;
}
return data;
}
/**
* 功能:隐藏剖面分析图
* 参数:无
* 返回值:无
*/
function hidenHtmlWindow() {
seearth.ShapeCreator.Clear();
testDiv.style.top = "55%";
testDiv.style.display = "none";
htmlWin.style.display = "none";
earthDiv0.style.height = "100%";
}
var chart = null; //剖面分析图对象
var POINTARR = null; //剖面分析数据集
/**
* 功能:显示剖面分析图
* 参数:xCategories-X轴标注数字;serieList-剖面图数据序列数组
* 返回值:无
*/
function showProfileResult(xCategories,serieList,pointArr){
var v_rate = 1.0;
var v_height = document.body.clientHeight;
var v_flag = testDiv.style.top;
if (v_flag.indexOf("px") == -1) {
v_rate = parseFloat(v_flag) * 0.01;
} else {
v_rate = parseInt(v_flag) / v_height;
}
earthDiv0.style.height = v_rate * 100.0 + "%";
testDiv.style.display = "block";
var v_htmlwin_top = v_rate * v_height + 12;
htmlWin.style.top = v_htmlwin_top + "px";
var v_htmlwin_height = v_height - v_htmlwin_top;
htmlWin.style.height = v_htmlwin_height + "px";
htmlWin.style.display = "block";
if(chart != null){
chart.destroy();
}
chart = createChart(xCategories,serieList);
POINTARR = pointArr;
}
/**
* 功能:创建剖面分析图
* 参数:xCategories-X轴标注数字;serieList-剖面图数据序列数组
* 返回值:剖面分析图对象
*/
function createChart(xCategories,serieList){
var minValue = null;
var maxValue = null;
for(var i=0; i<serieList.length; i++){
var dataList = serieList[i].data;
for(var k=0; k<dataList.length; k++){
var dataValue = dataList[k];
if(minValue == null){
minValue = dataValue;
}el
|
se{
if(dataValue < minValue){
|
conditional_block
|
|
3DMain.js
|
if(htmlBalloons){
htmlBalloons.DestroyObject();
htmlBalloons=null;
}
var geoPoint = seearth.GlobeObserver.Pick(posX,posY);
var guid = seearth.Factory.CreateGuid();
htmlBalloons = seearth.Factory.CreateHtmlBalloon(guid, "balloon");
htmlBalloons.SetSphericalLocation(geoPoint.Longitude, geoPoint.Latitude, geoPoint.Altitude);
htmlBalloons.SetRectSize(380, 400);
var color = parseInt("0xffffff00");//0xccc0c0c0
htmlBalloons.SetTailColor(color);
htmlBalloons.SetIsAddCloseButton(true);
htmlBalloons.SetIsAddMargin(true);
htmlBalloons.SetIsAddBackgroundImage(true);
htmlBalloons.SetIsTransparence(true);
htmlBalloons.SetBackgroundAlpha(0xcc);
htmlBalloons.ShowHtml(html);
seearth.Event.OnHtmlBalloonFinished = function(){
if(htmlBalloons!=null){
htmlBalloons.DestroyObject();
htmlBalloons=null;
}
seearth.Event.OnHtmlBalloonFinished = function(){};
}
}
/**已看
* * 设置
多屏(方案比选)
* @param n 屏幕数
*/
function setScreen(n, data, projManager, projectId, currentLayerObjList) {
planArr = data;
if (n == 1) {
$("#earthDiv0,#earthDiv1,#earthDiv2").removeClass("s1 s2 s3 s4").addClass("hide");
$("#earthDiv0").removeClass("hide").addClass("s1");
for (var i = parent.earthArray.length - 1; i > 0; i--) {
parent.earthArray[i].Suicide();
parent.earthArray.pop();
}
if(bollonArr&&bollonArr.length>0){
for(var i=0;i<bollonArr.length;i++){
if( bollonArr[i].Guid!=""){
bollonArr[i].DestroyObject();
}
}
}
bollonArr = [];
$("#earthDiv2, #earthDiv1").empty();
document.getElementById("earthDiv0").style.width="100%";
document.getElementById("earthDiv0").style.height="100%";
} else if (n == 2) {
$("#earthDiv0,#earthDiv1,#earthDiv2").removeClass("hide s1 s2 s3 s4");
//第一个球往左缩小
//$("#earthDiv0").addClass("s2");//此行代码有时不起作用 div并没有缩小 因此采用下面两行代码强行设置宽高比例!
document.getElementById("earthDiv0").style.width="50%";
document.getElementById("earthDiv0").style.height="100%";
document.getElementById("earthDiv1").style.width="50%";
document.getElementById("earthDiv1").style.height="100%";
//第二个球加载在右边
$("#earthDiv1").addClass("s2");
//隐藏第三个球
$("#earthDiv2").addClass("hide");
createEarth("earth1", document.getElementById("earthDiv1"), data, projManager, projectId, currentLayerObjList);
} else if (n == 3) {
$("#earthDiv0,#earthDiv1,#earthDiv2").removeClass("hide s1 s2 s4").addClass("s3");
document.getElementById("earthDiv0").style.width="33.3%";
document.getElementById("earthDiv0").style.height="100%";
document.getElementById("earthDiv1").style.width="33.3%";
document.getElementById("earthDiv1").style.height="100%";
document.getElementById("earthDiv2").style.width="33.3%";
document.getElementById("earthDiv2").style.height="100%";
createEarth("earth1", document.getElementById("earthDiv1"), data, projManager, projectId, currentLayerObjList, true);
}
};
function createEarth3(id, div, data, projManager, projectId, currentLayerObjList){
var earth = document.createElement("object");
earth.id = id;
earth.name = id;
earth.classid = "CLSID:EA3EA17C-5724-4104-94D8-4EECBD352964";
earth.style.width = "100%";
earth.style.height = "100%";
div.appendChild(earth);
earth.Event.OnCreateEarth = function (searth) {
earth.Event.OnCreateEarth = function () {};
parent.earthArray.push(searth);
searth.Event.OnDocumentChanged = function (){
searth.Event.OnDocumentChanged = function (){};
//先隐藏所有的图层 只显示数据库图层
if(parent.currentPrjGuid){
var layer = searth.LayerManager.LayerList;
if(layer){
var childCount = layer.GetChildCount();
for (var i = 0; i < childCount; i++) {
var childLayer = layer.GetChildAt(i);
if (childLayer.Guid == parent.currentPrjGuid) {
childLayer.Visibility = false;
}
}
}
}
//这里面就可以获取到earth.LayerManager 及其下属的属性与方法
if(data && data.length){
//searth加载数据
var thirdId = data[2].id;
var xzId = parent.parcelLayerGuid2;
//parent.planLayerIDs;
if(thirdId == xzId){
//说明是现状
parent.loadXZLayers(true, earth2);
}else{
//说明是方案
setTimeout(function(){
//earth1 加载方案2图层
var layerIDs = projManager.getLayerIdsByPlanId(thirdId);
parent.applyRecords(true, layerIDs, earth2, parent.parcelLayerGuid, false);
},400);
}
}
//cy 20150508 加
shareDigLayer(searth);
var pose = getPose(parent.earth);
searth.GlobeObserver.GotoLookat(pose.longitude, pose.latitude, pose.altitude,
pose.heading, pose.tilt, pose.roll, 0);
};
searth.Load(CITYPLAN_config.server.ip, CITYPLAN_config.server.screen);
// searth.Environment.SetDatabaseLink(CITYPLAN_config.server.dataServerIP);
};
}
/**已看
* 共享开挖地形
* @return {[type]} [description]
*/
function shareDigLayer(curEarth){
//开挖图层共享
if(parent.demObj && parent.demObj.length){
var guid = curEarth.Factory.CreateGuid();
var tempDemPath = curEarth.RootPath + "temp\\terr\\terrain\\";
var rect = parent.demObj[0];
var levelMin = parent.demObj[1];
var levelMax = parent.demObj[2];
var demTempLayer = curEarth.Factory.CreateDEMLayer(guid,
"TempTerrainLayer",
tempDemPath,
rect,
levelMin,
levelMax, 1000);
demTempLayer.Visibility = true;
curEarth.AttachObject(demTempLayer);
}
};
/**已看 y
/**已看
/**
* 根据id和div容器创建Earth对象,并返回创建的对象
* @param id
* @param div
*/
function createEarth(id, div, data, projManager, projectId, currentLayerObjList, isThird) {
var earth = document.createElement("object");
earth.id = id;
earth.name = id;
earth.classid = "CLSID:EA3EA17C-5724-4104-94D8-4EECBD352964";
earth.style.width = "100%";
earth.style.height = "100%";
div.appendChild(earth);
earth.Event.OnCreateEarth = function (searth) {
earth.Event.OnCreateEarth = function () {};
parent.earthArray.push(searth);
searth.Event.OnDocumentChanged =
|
parent.htmlBalloon;
var posX = parseInt(pVal.substring(pVal.indexOf("<posX>")+6, pVal.indexOf("</posX>")));
var posY = parseInt(pVal.substring(pVal.indexOf("<posY>")+6, pVal.indexOf("</posY>")));
var loaclUrl = window.location.href.substring(0, window.location.href.lastIndexOf("/"));
var url = loaclUrl + "/res/content.htm";
var html = "";
html += "<html>";
html += "<bodys>";
html += "<table>";
html += "<tr><td align='right'>";
html += "<img id='dlg_close' src='" + loaclUrl + "/res/x.png'/>";
html += "</td></tr>";
html += "<tr><td>";
html += "<iframe src='" + url + "' width='320px' height='400px' border='1' frameborder='1' scrolling=auto></iframe>";
html += "</td></tr>";
html += "</table>";
//html += "<iframe src='" + url + "' width='320px' height='400px' border='1' frameborder='1' scrolling=auto></iframe>";
html += "</body>";
html += "</html>";
|
identifier_body
|
|
3DMain.js
|
缩小 因此采用下面两行代码强行设置宽高比例!
document.getElementById("earthDiv0").style.width="50%";
document.getElementById("earthDiv0").style.height="100%";
document.getElementById("earthDiv1").style.width="50%";
document.getElementById("earthDiv1").style.height="100%";
//第二个球加载在右边
$("#earthDiv1").addClass("s2");
//隐藏第三个球
$("#earthDiv2").addClass("hide");
createEarth("earth1", document.getElementById("earthDiv1"), data, projManager, projectId, currentLayerObjList);
} else if (n == 3) {
$("#earthDiv0,#earthDiv1,#earthDiv2").removeClass("hide s1 s2 s4").addClass("s3");
document.getElementById("earthDiv0").style.width="33.3%";
document.getElementById("earthDiv0").style.height="100%";
document.getElementById("earthDiv1").style.width="33.3%";
document.getElementById("earthDiv1").style.height="100%";
document.getElementById("earthDiv2").style.width="33.3%";
document.getElementById("earthDiv2").style.height="100%";
createEarth("earth1", document.getElementById("earthDiv1"), data, projManager, projectId, currentLayerObjList, true);
}
};
function createEarth3(id, div, data, projManager, projectId, currentLayerObjList){
var earth = document.createElement("object");
earth.id = id;
ear
|
;
earth.classid = "CLSID:EA3EA17C-5724-4104-94D8-4EECBD352964";
earth.style.width = "100%";
earth.style.height = "100%";
div.appendChild(earth);
earth.Event.OnCreateEarth = function (searth) {
earth.Event.OnCreateEarth = function () {};
parent.earthArray.push(searth);
searth.Event.OnDocumentChanged = function (){
searth.Event.OnDocumentChanged = function (){};
//先隐藏所有的图层 只显示数据库图层
if(parent.currentPrjGuid){
var layer = searth.LayerManager.LayerList;
if(layer){
var childCount = layer.GetChildCount();
for (var i = 0; i < childCount; i++) {
var childLayer = layer.GetChildAt(i);
if (childLayer.Guid == parent.currentPrjGuid) {
childLayer.Visibility = false;
}
}
}
}
//这里面就可以获取到earth.LayerManager 及其下属的属性与方法
if(data && data.length){
//searth加载数据
var thirdId = data[2].id;
var xzId = parent.parcelLayerGuid2;
//parent.planLayerIDs;
if(thirdId == xzId){
//说明是现状
parent.loadXZLayers(true, earth2);
}else{
//说明是方案
setTimeout(function(){
//earth1 加载方案2图层
var layerIDs = projManager.getLayerIdsByPlanId(thirdId);
parent.applyRecords(true, layerIDs, earth2, parent.parcelLayerGuid, false);
},400);
}
}
//cy 20150508 加
shareDigLayer(searth);
var pose = getPose(parent.earth);
searth.GlobeObserver.GotoLookat(pose.longitude, pose.latitude, pose.altitude,
pose.heading, pose.tilt, pose.roll, 0);
};
searth.Load(CITYPLAN_config.server.ip, CITYPLAN_config.server.screen);
// searth.Environment.SetDatabaseLink(CITYPLAN_config.server.dataServerIP);
};
}
/**已看
* 共享开挖地形
* @return {[type]} [description]
*/
function shareDigLayer(curEarth){
//开挖图层共享
if(parent.demObj && parent.demObj.length){
var guid = curEarth.Factory.CreateGuid();
var tempDemPath = curEarth.RootPath + "temp\\terr\\terrain\\";
var rect = parent.demObj[0];
var levelMin = parent.demObj[1];
var levelMax = parent.demObj[2];
var demTempLayer = curEarth.Factory.CreateDEMLayer(guid,
"TempTerrainLayer",
tempDemPath,
rect,
levelMin,
levelMax, 1000);
demTempLayer.Visibility = true;
curEarth.AttachObject(demTempLayer);
}
};
/**已看 y
/**已看
/**
* 根据id和div容器创建Earth对象,并返回创建的对象
* @param id
* @param div
*/
function createEarth(id, div, data, projManager, projectId, currentLayerObjList, isThird) {
var earth = document.createElement("object");
earth.id = id;
earth.name = id;
earth.classid = "CLSID:EA3EA17C-5724-4104-94D8-4EECBD352964";
earth.style.width = "100%";
earth.style.height = "100%";
div.appendChild(earth);
earth.Event.OnCreateEarth = function (searth) {
earth.Event.OnCreateEarth = function () {};
parent.earthArray.push(searth);
searth.Event.OnDocumentChanged = function (){
searth.Event.OnDocumentChanged = function (){};
if(isThird){//创建第三个球
createEarth3("earth2", document.getElementById("earthDiv2"), data, projManager, projectId, currentLayerObjList);
}
//先隐藏所有的图层 只显示数据库图层
if(parent.currentPrjGuid){
var layer = searth.LayerManager.LayerList;
if(layer){
var childCount = layer.GetChildCount();
for (var i = 0; i < childCount; i++) {
var childLayer = layer.GetChildAt(i);
if (childLayer.Guid == parent.currentPrjGuid) {
childLayer.Visibility = false;
}
}
}
}
//这里面就可以获取到earth.LayerManager 及其下属的属性与方法
//控制数据显示
if(data && data.length){
//searth加载数据
var firstId = data[0].id;
var secordId = data[1].id;
var xzId = parent.parcelLayerGuid2;
//parent.planLayerIDs;
if(firstId == xzId){
//第一个是现状
}else if(secordId == xzId){
//加载第一个方案 firstId
setTimeout(function(){
projManager.showAll(projectId, firstId, true, true, false, false,true);
},100);
//第二个是现状 secordId 需要把现状数据库图层的都加上即可
parent.loadXZLayers(true, earth1);
}else{
//两个都是方案
setTimeout(function(){
projManager.showAll(projectId, firstId, true, true, false, false,true);
},100);
setTimeout(function(){
//earth1 加载方案2图层
var layerIDs = projManager.getLayerIdsByPlanId(secordId);
parent.applyRecords(true, layerIDs, earth1, parent.parcelLayerGuid, false);
},200);
}
}
shareDigLayer(searth);
//同步视角
var pose = getPose(parent.earth);
searth.GlobeObserver.GotoLookat(pose.longitude, pose.latitude, pose.altitude,
pose.heading, pose.tilt, pose.roll, 0);
};
searth.Load(CITYPLAN_config.server.ip, CITYPLAN_config.server.screen);
// searth.Environment.SetDatabaseLink(CITYPLAN_config.server.dataServerIP);
};
}
var htmlArr=[];
function showPlanData(data,seearth,planData){
var path = location.pathname.substring(0, location.pathname.lastIndexOf("/"));
var url = location.protocol + "//" + location.hostname + path +
"/html/investigate/planData.html?id="+data.id;
var htmlBalloon = null;
var guid = seearth.Factory.CreateGuid();
htmlBalloon = seearth.Factory.CreateHtmlBalloon(guid, "balloon");
htmlBalloon.SetScreenLocation(0,0);
htmlBalloon.SetRectSize(290,290);
htmlBalloon.SetIsAddMargin(true);
htmlBalloon.SetIsAddBackgroundImage(true);
htmlBalloon.ShowNavigate(url);
bollonArr.push(htmlBalloon);
seearth.Event.OnHtmlNavigateCompleted = function (htmlId){
htmlArr.push({id:htmlId,obj:htmlBalloon});
setTimeout(function(){
htmlBalloon.InvokeScript("setTranScroll", planData);
},100);
//earth.Event.OnHtmlNavigateCompleted = function (){};
};
seearth.Event.OnHtmlBalloonFinished = function(){
if(htmlBalloon!=null){
htmlBalloon.DestroyObject();
htmlBalloon=null;
}
seearth.Event.OnHtmlBalloonFinished = function(){};
}
}
//已看
function showIndex(tag,planData){
//earthArray
if(tag){
for
|
th.name = id
|
identifier_name
|
pygit.py
|
list of repositories by running
master_directory : str
The absolute path to the directory
git_exec : str
The path to the git executable on the system
message : str
Commit message
Returns
--------
: Commands object
"""
def __str__(self):
return "Commands: {}: {}".format(self.name, self.dir)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __init__(self, repo_name, master_directory, git_exec=None, message="minor changes"):
self.name = repo_name
self.dir = master_directory
self.git_exec = git_exec
self.message = message
try:
os.chdir(self.dir)
except (FileNotFoundError, TypeError):
print("{} may have been moved.\n Run initialize() to update paths".format(self.name))
self.dir = os.getcwd()
def need_attention(self):
"""Return True if a repo status is not exactly same as that of remote"""
msg = ["not staged", "behind", "ahead", "Untracked"]
status_msg = self.status()
if any([each in status_msg for each in msg]):
return True
return False
def fetch(self):
"""git fetch"""
if self.git_exec:
process = Popen([self.git_exec, "git fetch"], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
else:
process = Popen("git fetch", shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
# output, error = process.communicate()
process.communicate()
def status(self):
"""git status"""
self.fetch() # always do a fetch before reporting status
if self.git_exec:
process = Popen([self.git_exec, " git status"], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
else:
process = Popen("git status", shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
output, _ = process.communicate()
return str(output.decode("utf-8"))
def stage_file(self, file_name):
"""git add file"""
stage_file = 'git add {}'.format(file_name)
if self.git_exec:
process = Popen([self.git_exec, stage_file], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
else:
process = Popen(stage_file, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
output, _ = process.communicate()
return str(output.decode("utf-8"))
def stage_all(self, files="."):
"""git add all"""
files = "` ".join(files.split())
stage_file = 'git add {}'.format(files)
if self.git_exec:
process = Popen([self.git_exec, stage_file], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
else:
process = Popen(stage_file, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
output, _ = process.communicate()
return str(output.decode("utf-8"))
def commit(self):
"""git commit"""
enter = input("Commit message.\nPress enter to use 'minor changes'")
if enter == "":
message = self.message
else:
message = enter
# message = "` ".join(message.split())
if self.git_exec:
process = Popen([self.git_exec, 'git', ' commit ', '-m ', message], stdin=PIPE, stdout=PIPE, stderr=PIPE,)
else:
process = Popen(['git', ' commit', ' -m ', message], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE,)
output, _ = process.communicate()
return str(output.decode("utf-8"))
def stage_and_commit(self):
"""git add followed by commit"""
self.stage_all()
self.commit()
def push(self):
"""git push"""
if self.git_exec:
process = Popen([self.git_exec, ' git push'], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
else:
process = Popen(['git push'], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE,)
output, _ = process.communicate()
return str("Push completed.{}".format(str(output.decode("utf-8"))))
def pull(self):
"""git pull"""
if self.git_exec:
process = Popen([self.git_exec, ' git pull'], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
else:
process = Popen(['git pull'], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE,)
output, _ = process.communicate()
return str("Pull completed.\n{}".format(str(output.decode("utf-8"))))
def reset(self, number='1'):
"""git reset"""
if self.git_exec:
process = Popen([self.git_exec, ' git reset HEAD~', number], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
else:
process = Popen(['git reset HEAD~', number], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
output, _ = process.communicate()
return str(output.decode("utf-8"))
# def branch(self):
# """Return the branch being tracked by local"""
# process = Popen([self.git_exec, 'git branch -vv'], shell=True,
# stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
# output, _ = process.communicate()
# out_text = str(output.decode("utf-8"))
# try:
# line = [each for each in out_text.split("\n") if each.startswith("*")][0]
# except IndexError: # no lines start with *
# return
# branch_name = re.search(r"\[origin\/(.*)\]", line)
# return branch_name.group(1)
def repos():
"""Show all available repositories, path, and unique ID"""
print("\nThe following repos are available.\n")
NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / "NAME_SHELF")))
INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / "INDEX_SHELF")))
print("{:<4} {:<20} {:<}".format("Key", "| Name", "| Path"))
print("******************************************")
for key in INDEX_SHELF.keys():
name = INDEX_SHELF[key]
print("{:<4} {:<20} {:<}".format(key, name, str(NAME_SHELF[name])))
INDEX_SHELF.close()
NAME_SHELF.close()
def load(input_string): # id is string
"""Load a repository with specified id"""
NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / "NAME_SHELF")))
INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / "INDEX_SHELF")))
input_string = str(input_string)
try:
int(input_string) # if not coercible into an integer, then its probably a repo name rather than ID
try:
name = INDEX_SHELF[input_string]
return Commands(name, str(NAME_SHELF[name]))
except KeyError:
raise Exception("That index does not exist.")
except ValueError:
try:
return Commands(input_string, NAME_SHELF[input_string])
except KeyError:
raise Exception("That repository name does not exist or is not indexed")
INDEX_SHELF.close()
NAME_SHELF.close()
def load_multiple(*args, _all=False):
"""Create `commands` object for a set of repositories
Parameters
------------
args : int
comma-separated string values
Yields
---------
A list of commands objects. One for each of the entered string
"""
if _all:
NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / "NAME_SHELF")))
for key in NAME_SHELF.keys():
yield load(key)
else:
for arg in args:
yield load(arg)
def pull(*args, _all=False):
for each in load_multiple(*args, _all=_all):
s = "*** {} ***\n{}".format(each.name, each.pull())
print(s)
def push(*args, _all=False):
for each in load_multiple(*args, _all=_all):
s = "*** {} ***\n{}".format(each.name, each.push())
print(s)
def all_status():
|
"""Write status of all repositories to file in markdown format"""
print("Getting repo status.\n\nYou may be prompted for credentials...")
os.chdir(STATUS_DIR)
attention = ""
messages = []
TIME_STAMP = datetime.now().strftime("%a_%d_%b_%Y_%H_%M_%S_%p")
fname = "REPO_STATUS_@_{}.md".format(TIME_STAMP)
with open(fname, 'w+') as f:
f.write("# Repository status as at {}\n\n".format(TIME_STAMP))
for each in load_multiple(_all=True):
name = each.name
status = each.status()
messages.append("## {}\n\n```cmd\n{}```\n".format(name, status))
if need_attention(status):
attention += "1. {}\n".format(name)
|
identifier_body
|
|
pygit.py
|
(str(PurePath(SHELF_DIR / "MASTER_SHELF")))
MASTER_SHELF["master"] = master_directory
MASTER_SHELF.close()
def shelve_master_directory(master_directory, verbosity, rules):
"""Find and store the locations of git repos"""
if master_directory:
save_master(master_directory)
show_verbose_output(verbosity, "Master directory set to ", master_directory, "Now Shelving")
i = len(list(INDEX_SHELF.keys())) + 1
folder_paths = [x for x in Path(master_directory).iterdir() if x.is_dir()]
for f in folder_paths: # log folders
show_verbose_output(verbosity, f)
for folder_name in folder_paths:
path = Path(master_directory) / folder_name
if enforce_exclusion(folder_name, verbosity):
continue
if match_rule(rules, path, verbosity):
continue
directory_absolute_path = Path(path).resolve()
if is_git_repo(directory_absolute_path):
if sys.platform == 'win32':
name = PureWindowsPath(directory_absolute_path).parts[-1]
if sys.platform == 'linux':
name = PurePath(directory_absolute_path).parts[-1]
show_verbose_output(verbosity, directory_absolute_path, " is a git repository *** shelving\n")
NAME_SHELF[name] = directory_absolute_path
INDEX_SHELF[str(i)] = name
i += 1
# NAME_SHELF.close()
# INDEX_SHELF.close()
def shelve_simple_directory(simple_directory, verbosity):
if simple_directory:
i = len(list(INDEX_SHELF.keys())) + 1
for directory in simple_directory:
if is_git_repo(directory):
show_verbose_output(verbosity, " is a git repository *** shelving\n")
if sys.platform == 'win32':
name = directory.split("\\")[-1]
if sys.platform == 'linux':
name = directory.split("/")[-1]
NAME_SHELF[name] = directory
INDEX_SHELF[str(i)] = name
else:
show_verbose_output(verbosity, " is not a valid git repo.\nContinuing...\n")
continue
i += 1
def initialize():
"""Initialize the data necessary for pygit to operate"""
print("Initializing ...")
global NAME_SHELF, INDEX_SHELF
try:
Path.mkdir(SHELF_DIR)
except FileExistsError:
shutil.rmtree(SHELF_DIR)
Path.mkdir(SHELF_DIR)
try:
Path.mkdir(STATUS_DIR)
except FileExistsError:
pass
NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / "NAME_SHELF"))) # Use the string representation to open path to avoid errors
INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / "INDEX_SHELF")))
args = get_command_line_arguments()
verbosity = args.verbosity
rules = args.rules
shelve_git_path(args.gitPath, verbosity)
shelve_master_directory(args.masterDirectory, verbosity, rules)
shelve_simple_directory(args.simpleDirectory, verbosity)
INDEX_SHELF.close()
NAME_SHELF.close()
if verbosity:
print("\nIndexed git repos.\n")
NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / "NAME_SHELF")))
INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / "INDEX_SHELF")))
print("Status saved in {}".format(STATUS_DIR))
print("{:<4} {:<20} {:<}".format("Key", "| Name", "| Path"))
print("*********************************")
for key in INDEX_SHELF.keys():
name = INDEX_SHELF[key]
print("{:<4} {:<20} {:<}".format(key, name, str(NAME_SHELF[name])))
else:
print("Indexing done")
return
def update():
"""Update INDEX_SHELF"""
MASTER_SHELF = shelve.open(str(PurePath(SHELF_DIR / "MASTER_SHELF")))
INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / "INDEX_SHELF")))
NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / "NAME_SHELF")))
master = MASTER_SHELF["master"]
print("Master ", master)
# shelve_master_directory(master, 0, "")
save_master(master)
i = len(list(INDEX_SHELF.keys())) + 1
folder_paths = [x for x in Path(master).iterdir() if x.is_dir()]
for folder_name in folder_paths:
path = Path(master) / folder_name
directory_absolute_path = Path(path).resolve()
if is_git_repo(directory_absolute_path):
if sys.platform == 'win32':
name = PureWindowsPath(directory_absolute_path).parts[-1]
if sys.platform == 'linux':
name = PurePath(directory_absolute_path).parts[-1]
NAME_SHELF[name] = directory_absolute_path
INDEX_SHELF[str(i)] = name
i += 1
print("Update completed successfully")
return
class Commands:
"""Commands class
Parameters
-----------
repo_name : str
The repository name. See list of repositories by running
master_directory : str
The absolute path to the directory
git_exec : str
The path to the git executable on the system
message : str
Commit message
Returns
--------
: Commands object
"""
def __str__(self):
return "Commands: {}: {}".format(self.name, self.dir)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __init__(self, repo_name, master_directory, git_exec=None, message="minor changes"):
self.name = repo_name
self.dir = master_directory
self.git_exec = git_exec
self.message = message
try:
os.chdir(self.dir)
except (FileNotFoundError, TypeError):
print("{} may have been moved.\n Run initialize() to update paths".format(self.name))
self.dir = os.getcwd()
def need_attention(self):
"""Return True if a repo status is not exactly same as that of remote"""
msg = ["not staged", "behind", "ahead", "Untracked"]
status_msg = self.status()
if any([each in status_msg for each in msg]):
return True
return False
def fetch(self):
"""git fetch"""
if self.git_exec:
process = Popen([self.git_exec, "git fetch"], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
else:
process = Popen("git fetch", shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
# output, error = process.communicate()
process.communicate()
def status(self):
"""git status"""
self.fetch() # always do a fetch before reporting status
if self.git_exec:
process = Popen([self.git_exec, " git status"], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
else:
process = Popen("git status", shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
output, _ = process.communicate()
return str(output.decode("utf-8"))
def stage_file(self, file_name):
"""git add file"""
stage_file = 'git add {}'.format(file_name)
if self.git_exec:
process = Popen([self.git_exec, stage_file], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
else:
process = Popen(stage_file, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
output, _ = process.communicate()
return str(output.decode("utf-8"))
def stage_all(self, files="."):
"""git add all"""
files = "` ".join(files.split())
stage_file = 'git add {}'.format(files)
if self.git_exec:
process = Popen([self.git_exec, stage_file], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
else:
process = Popen(stage_file, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
output, _ = process.communicate()
return str(output.decode("utf-8"))
def commit(self):
"""git commit"""
enter = input("Commit message.\nPress enter to use 'minor changes'")
if enter == "":
message = self.message
else:
message = enter
# message = "` ".join(message.split())
if self.git_exec:
process = Popen([self.git_exec, 'git', ' commit ', '-m ', message], stdin=PIPE, stdout=PIPE, stderr=PIPE,)
else:
process = Popen(['git', ' commit', ' -m ', message], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE,)
output, _ = process.communicate()
return str(output.decode("utf-8"))
def stage_and_commit(self):
"""git add followed by commit"""
self.stage_all()
self.commit()
def push(self):
"""git push"""
if self.git_exec:
|
process = Popen([self.git_exec, ' git push'], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
|
conditional_block
|
|
pygit.py
|
_DIR / "NAME_SHELF")))
master = MASTER_SHELF["master"]
print("Master ", master)
# shelve_master_directory(master, 0, "")
save_master(master)
i = len(list(INDEX_SHELF.keys())) + 1
folder_paths = [x for x in Path(master).iterdir() if x.is_dir()]
for folder_name in folder_paths:
path = Path(master) / folder_name
directory_absolute_path = Path(path).resolve()
if is_git_repo(directory_absolute_path):
if sys.platform == 'win32':
name = PureWindowsPath(directory_absolute_path).parts[-1]
if sys.platform == 'linux':
name = PurePath(directory_absolute_path).parts[-1]
NAME_SHELF[name] = directory_absolute_path
INDEX_SHELF[str(i)] = name
i += 1
print("Update completed successfully")
return
class Commands:
"""Commands class
Parameters
-----------
repo_name : str
The repository name. See list of repositories by running
master_directory : str
The absolute path to the directory
git_exec : str
The path to the git executable on the system
message : str
Commit message
Returns
--------
: Commands object
"""
def __str__(self):
return "Commands: {}: {}".format(self.name, self.dir)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __init__(self, repo_name, master_directory, git_exec=None, message="minor changes"):
self.name = repo_name
self.dir = master_directory
self.git_exec = git_exec
self.message = message
try:
os.chdir(self.dir)
except (FileNotFoundError, TypeError):
print("{} may have been moved.\n Run initialize() to update paths".format(self.name))
self.dir = os.getcwd()
def need_attention(self):
"""Return True if a repo status is not exactly same as that of remote"""
msg = ["not staged", "behind", "ahead", "Untracked"]
status_msg = self.status()
if any([each in status_msg for each in msg]):
return True
return False
def fetch(self):
"""git fetch"""
if self.git_exec:
process = Popen([self.git_exec, "git fetch"], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
else:
process = Popen("git fetch", shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
# output, error = process.communicate()
process.communicate()
def status(self):
"""git status"""
self.fetch() # always do a fetch before reporting status
if self.git_exec:
process = Popen([self.git_exec, " git status"], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
else:
process = Popen("git status", shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
output, _ = process.communicate()
return str(output.decode("utf-8"))
def stage_file(self, file_name):
"""git add file"""
stage_file = 'git add {}'.format(file_name)
if self.git_exec:
process = Popen([self.git_exec, stage_file], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
else:
process = Popen(stage_file, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
output, _ = process.communicate()
return str(output.decode("utf-8"))
def stage_all(self, files="."):
"""git add all"""
files = "` ".join(files.split())
stage_file = 'git add {}'.format(files)
if self.git_exec:
process = Popen([self.git_exec, stage_file], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
else:
process = Popen(stage_file, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
output, _ = process.communicate()
return str(output.decode("utf-8"))
def commit(self):
"""git commit"""
enter = input("Commit message.\nPress enter to use 'minor changes'")
if enter == "":
message = self.message
else:
message = enter
# message = "` ".join(message.split())
if self.git_exec:
process = Popen([self.git_exec, 'git', ' commit ', '-m ', message], stdin=PIPE, stdout=PIPE, stderr=PIPE,)
else:
process = Popen(['git', ' commit', ' -m ', message], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE,)
output, _ = process.communicate()
return str(output.decode("utf-8"))
def stage_and_commit(self):
"""git add followed by commit"""
self.stage_all()
self.commit()
def push(self):
"""git push"""
if self.git_exec:
process = Popen([self.git_exec, ' git push'], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
else:
process = Popen(['git push'], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE,)
output, _ = process.communicate()
return str("Push completed.{}".format(str(output.decode("utf-8"))))
def pull(self):
"""git pull"""
if self.git_exec:
process = Popen([self.git_exec, ' git pull'], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
else:
process = Popen(['git pull'], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE,)
output, _ = process.communicate()
return str("Pull completed.\n{}".format(str(output.decode("utf-8"))))
def reset(self, number='1'):
"""git reset"""
if self.git_exec:
process = Popen([self.git_exec, ' git reset HEAD~', number], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
else:
process = Popen(['git reset HEAD~', number], stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
output, _ = process.communicate()
return str(output.decode("utf-8"))
# def branch(self):
# """Return the branch being tracked by local"""
# process = Popen([self.git_exec, 'git branch -vv'], shell=True,
# stdin=PIPE, stdout=PIPE, stderr=STDOUT,)
# output, _ = process.communicate()
# out_text = str(output.decode("utf-8"))
# try:
# line = [each for each in out_text.split("\n") if each.startswith("*")][0]
# except IndexError: # no lines start with *
# return
# branch_name = re.search(r"\[origin\/(.*)\]", line)
# return branch_name.group(1)
def repos():
"""Show all available repositories, path, and unique ID"""
print("\nThe following repos are available.\n")
NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / "NAME_SHELF")))
INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / "INDEX_SHELF")))
print("{:<4} {:<20} {:<}".format("Key", "| Name", "| Path"))
print("******************************************")
for key in INDEX_SHELF.keys():
name = INDEX_SHELF[key]
print("{:<4} {:<20} {:<}".format(key, name, str(NAME_SHELF[name])))
INDEX_SHELF.close()
NAME_SHELF.close()
def load(input_string): # id is string
"""Load a repository with specified id"""
NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / "NAME_SHELF")))
INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / "INDEX_SHELF")))
input_string = str(input_string)
try:
int(input_string) # if not coercible into an integer, then its probably a repo name rather than ID
try:
name = INDEX_SHELF[input_string]
return Commands(name, str(NAME_SHELF[name]))
except KeyError:
raise Exception("That index does not exist.")
except ValueError:
try:
return Commands(input_string, NAME_SHELF[input_string])
except KeyError:
raise Exception("That repository name does not exist or is not indexed")
INDEX_SHELF.close()
NAME_SHELF.close()
def load_multiple(*args, _all=False):
"""Create `commands` object for a set of repositories
Parameters
------------
args : int
comma-separated string values
Yields
---------
A list of commands objects. One for each of the entered string
"""
if _all:
NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / "NAME_SHELF")))
for key in NAME_SHELF.keys():
yield load(key)
else:
for arg in args:
yield load(arg)
def pull(*args, _all=False):
for each in load_multiple(*args, _all=_all):
s = "*** {} ***\n{}".format(each.name, each.pull())
print(s)
def
|
push
|
identifier_name
|
|
pygit.py
|
', '--masterDirectory', help="Full pathname to directory holding any number of git repos.")
parser.add_argument('-s', '--simpleDirectory', help="A list of full pathnames to any number of individual git repos.", nargs='+')
return parser.parse_args()
def shelve_git_path(git_path, verbosity):
"""Find and store the location of git executable"""
if check_git_support():
print("Your system is configured to work with git.\n")
elif "git" in os.environ['PATH']:
user_paths = os.environ['PATH'].split(os.pathsep)
for path in user_paths:
if "git-cmd.exe" in path:
NAME_SHELF['GIT_WINDOWS'] = path
return
if "git-bash.exe" in path:
NAME_SHELF['GIT_BASH'] = path
return
else:
print("Git was not found in your system path.\nYou may need to set the location manually using the -g flag.\n")
if git_path:
for _, __, files in os.walk(git_path):
if "git-cmd.exe" in files:
NAME_SHELF['GIT_WINDOWS'] = git_path
elif "git-bash.exe" in files:
NAME_SHELF['GIT_BASH'] = git_path
else:
print("A valid git executable was not found in the directory.\n")
return
def enforce_exclusion(folder_name, verbosity):
"""Return True if a folder starts with any character in exclusion_folder_start"""
exclusion_folder_start = [".", "_"] # skip folders that start with any of these characters
if any([str(PurePath(folder_name)).startswith(each) for each in exclusion_folder_start]):
if verbosity:
show_verbose_output(verbosity, folder_name, " starts with one of ", exclusion_folder_start, " skipping\n")
return True
return False
def match_rule(rules, path, verbosity):
"""Return True if a folder matches a rule in rules"""
if rules:
if any([rule in path for rule in rules]):
show_verbose_output(verbosity, path, " matches an exclusion rule. Skipping\n")
return True
return False
def save_master(master_directory):
"""Saves the location of the master directory"""
global MASTER_SHELF
MASTER_SHELF = shelve.open(str(PurePath(SHELF_DIR / "MASTER_SHELF")))
MASTER_SHELF["master"] = master_directory
MASTER_SHELF.close()
def shelve_master_directory(master_directory, verbosity, rules):
"""Find and store the locations of git repos"""
if master_directory:
save_master(master_directory)
show_verbose_output(verbosity, "Master directory set to ", master_directory, "Now Shelving")
i = len(list(INDEX_SHELF.keys())) + 1
folder_paths = [x for x in Path(master_directory).iterdir() if x.is_dir()]
for f in folder_paths: # log folders
show_verbose_output(verbosity, f)
for folder_name in folder_paths:
path = Path(master_directory) / folder_name
if enforce_exclusion(folder_name, verbosity):
continue
if match_rule(rules, path, verbosity):
continue
directory_absolute_path = Path(path).resolve()
if is_git_repo(directory_absolute_path):
if sys.platform == 'win32':
name = PureWindowsPath(directory_absolute_path).parts[-1]
if sys.platform == 'linux':
name = PurePath(directory_absolute_path).parts[-1]
show_verbose_output(verbosity, directory_absolute_path, " is a git repository *** shelving\n")
NAME_SHELF[name] = directory_absolute_path
INDEX_SHELF[str(i)] = name
i += 1
# NAME_SHELF.close()
# INDEX_SHELF.close()
def shelve_simple_directory(simple_directory, verbosity):
if simple_directory:
i = len(list(INDEX_SHELF.keys())) + 1
for directory in simple_directory:
if is_git_repo(directory):
show_verbose_output(verbosity, " is a git repository *** shelving\n")
if sys.platform == 'win32':
name = directory.split("\\")[-1]
if sys.platform == 'linux':
name = directory.split("/")[-1]
NAME_SHELF[name] = directory
INDEX_SHELF[str(i)] = name
else:
show_verbose_output(verbosity, " is not a valid git repo.\nContinuing...\n")
continue
i += 1
def initialize():
"""Initialize the data necessary for pygit to operate"""
print("Initializing ...")
global NAME_SHELF, INDEX_SHELF
try:
Path.mkdir(SHELF_DIR)
except FileExistsError:
shutil.rmtree(SHELF_DIR)
Path.mkdir(SHELF_DIR)
try:
Path.mkdir(STATUS_DIR)
except FileExistsError:
pass
NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / "NAME_SHELF"))) # Use the string representation to open path to avoid errors
INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / "INDEX_SHELF")))
args = get_command_line_arguments()
verbosity = args.verbosity
rules = args.rules
shelve_git_path(args.gitPath, verbosity)
shelve_master_directory(args.masterDirectory, verbosity, rules)
shelve_simple_directory(args.simpleDirectory, verbosity)
INDEX_SHELF.close()
NAME_SHELF.close()
if verbosity:
print("\nIndexed git repos.\n")
NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / "NAME_SHELF")))
INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / "INDEX_SHELF")))
print("Status saved in {}".format(STATUS_DIR))
print("{:<4} {:<20} {:<}".format("Key", "| Name", "| Path"))
print("*********************************")
for key in INDEX_SHELF.keys():
name = INDEX_SHELF[key]
print("{:<4} {:<20} {:<}".format(key, name, str(NAME_SHELF[name])))
else:
print("Indexing done")
return
def update():
"""Update INDEX_SHELF"""
MASTER_SHELF = shelve.open(str(PurePath(SHELF_DIR / "MASTER_SHELF")))
INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / "INDEX_SHELF")))
NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / "NAME_SHELF")))
master = MASTER_SHELF["master"]
print("Master ", master)
# shelve_master_directory(master, 0, "")
save_master(master)
|
i = len(list(INDEX_SHELF.keys())) + 1
folder_paths = [x for x in Path(master).iterdir() if x.is_dir()]
for folder_name in folder_paths:
path = Path(master) / folder_name
directory_absolute_path = Path(path).resolve()
if is_git_repo(directory_absolute_path):
if sys.platform == 'win32':
name = PureWindowsPath(directory_absolute_path).parts[-1]
if sys.platform == 'linux':
name = PurePath(directory_absolute_path).parts[-1]
NAME_SHELF[name] = directory_absolute_path
INDEX_SHELF[str(i)] = name
i += 1
print("Update completed successfully")
return
class Commands:
"""Commands class
Parameters
-----------
repo_name : str
The repository name. See list of repositories by running
master_directory : str
The absolute path to the directory
git_exec : str
The path to the git executable on the system
message : str
Commit message
Returns
--------
: Commands object
"""
def __str__(self):
return "Commands: {}: {}".format(self.name, self.dir)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __init__(self, repo_name, master_directory, git_exec=None, message="minor changes"):
self.name = repo_name
self.dir = master_directory
self.git_exec = git_exec
self.message = message
try:
os.chdir(self.dir)
except (FileNotFoundError, TypeError):
print("{} may have been moved.\n Run initialize() to update paths".format(self.name))
self.dir = os.getcwd()
def need_attention(self):
"""Return True if a repo status is not exactly same as that of remote"""
msg = ["not staged", "behind", "ahead", "Untracked"]
status_msg = self.status()
if any([each in status_msg for each in msg]):
return True
return False
def fetch(self):
"""git fetch"""
if self.git_exec:
process = Popen([self.git_exec, "git fetch"], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
else:
process = Popen("git fetch", shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
# output, error = process.communicate()
process.communicate()
def status(self):
"""git status"""
self.fetch() # always do a fetch before reporting status
if self.git
|
random_line_split
|
|
merge.go
|
ux.Unlock()
if ETLMergeTaskPool == nil {
mp, err := mpool.NewMPool("etl_merge_task", 0, mpool.NoFixed)
if err != nil {
return nil, err
}
ETLMergeTaskPool = mp
}
return ETLMergeTaskPool, nil
}
func NewMerge(ctx context.Context, opts ...MergeOption) (*Merge, error) {
var err error
m := &Merge{
pathBuilder: table.NewAccountDatePathBuilder(),
MaxFileSize: defaultMaxFileSize,
MaxMergeJobs: 1,
logger: runtime.ProcessLevelRuntime().Logger().WithContext(ctx).Named(LoggerNameETLMerge),
}
m.ctx, m.cancelFunc = context.WithCancel(ctx)
for _, opt := range opts {
opt(m)
}
if m.mp, err = getMpool(); err != nil {
return nil, err
}
m.validate(ctx)
m.runningJobs = make(chan struct{}, m.MaxMergeJobs)
return m, nil
}
// validate check missing init elems. Panic with has missing elems.
func (m *Merge) validate(ctx context.Context) {
if m.table == nil {
panic(moerr.NewInternalError(ctx, "merge task missing input 'table'"))
}
if m.fs == nil {
panic(moerr.NewInternalError(ctx, "merge task missing input 'FileService'"))
}
}
// Start for service Loop
func (m *Merge) Start(ctx context.Context, interval time.Duration) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
m.Main(ctx)
case <-m.ctx.Done():
return
}
}
}
// Stop should call only once
func (m *Merge) Stop() {
m.cancelFunc()
}
// =======================
// main logic
// =======================
type FileMeta struct {
FilePath string
FileSize int64
}
// Main do list all accounts, all dates which belong to m.table.GetName()
func (m *Merge) Main(ctx context.Context) error {
var files = make([]*FileMeta, 0, 1000)
var totalSize int64
accounts, err := m.fs.List(ctx, "/")
if err != nil {
return err
}
if len(accounts) == 0 {
m.logger.Info("merge find empty data")
return nil
}
m.logger.Debug(fmt.Sprintf("merge task with max file: %v MB", m.MaxFileSize/mpool.MB))
for _, account := range accounts {
if !account.IsDir {
m.logger.Warn(fmt.Sprintf("path is not dir: %s", account.Name))
continue
}
// build targetPath like "${account}/logs/*/*/*/${table_name}"
targetPath := m.pathBuilder.Build(account.Name, table.MergeLogTypeLogs, table.ETLParamTSAll, m.table.GetDatabase(), m.table.GetName())
// search all paths like:
// 0: ${account}/logs/2023/05/31/${table_name}
// 1: ${account}/logs/2023/06/01/${table_name}
// 2: ...
rootPaths, err := m.getAllTargetPath(ctx, targetPath)
if err != nil {
return err
}
// get all file entry
for _, rootPath := range rootPaths {
m.logger.Info("start merge", logutil.TableField(m.table.GetIdentify()), logutil.PathField(rootPath),
zap.String("metadata.ID", m.task.Metadata.ID))
fileEntrys, err := m.fs.List(ctx, rootPath)
if err != nil {
// fixme: m.logger.Error()
return err
}
files = files[:0]
totalSize = 0
for _, f := range fileEntrys {
filepath := path.Join(rootPath, f.Name)
totalSize += f.Size
files = append(files, &FileMeta{filepath, f.Size})
if totalSize > m.MaxFileSize {
if err = m.doMergeFiles(ctx, files); err != nil {
m.logger.Error(fmt.Sprintf("merge task meet error: %v", err))
}
files = files[:0]
totalSize = 0
}
}
if len(files) > 0 {
if err = m.doMergeFiles(ctx, files); err != nil {
m.logger.Warn(fmt.Sprintf("merge task meet error: %v", err))
}
}
}
}
return err
}
func (m *Merge) getAllTargetPath(ctx context.Context, filePath string) ([]string, error) {
sep := "/"
pathDir := strings.Split(filePath, sep)
l := list.New()
if pathDir[0] == "" {
l.PushBack(sep)
} else {
l.PushBack(pathDir[0])
}
for i := 1; i < len(pathDir); i++ {
length := l.Len()
for j := 0; j < length; j++ {
elem := l.Remove(l.Front())
prefix := elem.(string)
entries, err := m.fs.List(ctx, prefix)
if err != nil {
return nil, err
}
for _, entry := range entries {
if !entry.IsDir && i+1 != len(pathDir) {
continue
}
matched, err := path.Match(pathDir[i], entry.Name)
if err != nil {
return nil, err
}
if !matched {
continue
}
l.PushBack(path.Join(prefix, entry.Name))
}
}
}
length := l.Len()
fileList := make([]string, 0, length)
for idx := 0; idx < length; idx++ {
fileList = append(fileList, l.Remove(l.Front()).(string))
}
return fileList, nil
}
// doMergeFiles handle merge (read->write->delete) ops for all files in the target directory.
// Handle the files one by one, act uploadFile and do the deletion if upload is success.
// Upload the files to SQL table
// Delete the files from FileService
func (m *Merge) doMergeFiles(ctx context.Context, files []*FileMeta) error {
ctx, span := trace.Start(ctx, "doMergeFiles")
defer span.End()
// Control task concurrency
m.runningJobs <- struct{}{}
defer func() {
<-m.runningJobs
}()
// Step 3. do simple merge
var uploadFile = func(ctx context.Context, fp *FileMeta) error {
row := m.table.GetRow(ctx)
defer row.Free()
// open reader
reader, err := newETLReader(ctx, m.table, m.fs, fp.FilePath, fp.FileSize, m.mp)
if err != nil {
m.logger.Error(fmt.Sprintf("merge file meet read failed: %v", err))
return err
}
defer reader.Close()
cacheFileData := &SliceCache{}
defer cacheFileData.Reset()
// read all content
var line []string
line, err = reader.ReadLine()
for ; line != nil && err == nil; line, err = reader.ReadLine() {
if err = row.ParseRow(line); err != nil {
m.logger.Error("pa
|
w)
}
if err != nil {
m.logger.Warn("failed to read file",
logutil.PathField(fp.FilePath), zap.Error(err))
return err
}
// sql insert
if cacheFileData.Size() > 0 {
if err = cacheFileData.Flush(m.table); err != nil {
return err
}
cacheFileData.Reset()
}
// delete empty file or file already uploaded
if cacheFileData.Size() == 0 {
if err = m.fs.Delete(ctx, fp.FilePath); err != nil {
m.logger.Warn("failed to delete file", zap.Error(err))
return err
}
}
return nil
}
var err error
for _, fp := range files {
if err = uploadFile(ctx, fp); err != nil {
// todo: adjust the sleep settings
// Sleep 10 seconds to wait for the database to recover
time.Sleep(10 * time.Second)
m.logger.Error("failed to upload file to MO",
logutil.TableField(m.table.GetIdentify()),
logutil.PathField(fp.FilePath),
zap.Error(err),
)
}
}
logutil.Debug("upload files success", logutil.TableField(m.table.GetIdentify()), zap.Int("file count", len(files)))
return err
}
func SubStringPrefixLimit(str string, length int) string {
if length <= 0 {
return ""
}
if len(str) < length {
return str
} else {
return str[:length] + "..."
}
}
type ContentReader struct {
ctx context.Context
path string
idx int
length int
content [][]string
logger *log.MOL
|
rse ETL rows failed",
logutil.TableField(m.table.GetIdentify()),
logutil.PathField(fp.FilePath),
logutil.VarsField(SubStringPrefixLimit(fmt.Sprintf("%v", line), 102400)),
)
return err
}
cacheFileData.Put(ro
|
conditional_block
|
merge.go
|
for idx := 0; idx < length; idx++ {
fileList = append(fileList, l.Remove(l.Front()).(string))
}
return fileList, nil
}
// doMergeFiles handle merge (read->write->delete) ops for all files in the target directory.
// Handle the files one by one, act uploadFile and do the deletion if upload is success.
// Upload the files to SQL table
// Delete the files from FileService
func (m *Merge) doMergeFiles(ctx context.Context, files []*FileMeta) error {
ctx, span := trace.Start(ctx, "doMergeFiles")
defer span.End()
// Control task concurrency
m.runningJobs <- struct{}{}
defer func() {
<-m.runningJobs
}()
// Step 3. do simple merge
var uploadFile = func(ctx context.Context, fp *FileMeta) error {
row := m.table.GetRow(ctx)
defer row.Free()
// open reader
reader, err := newETLReader(ctx, m.table, m.fs, fp.FilePath, fp.FileSize, m.mp)
if err != nil {
m.logger.Error(fmt.Sprintf("merge file meet read failed: %v", err))
return err
}
defer reader.Close()
cacheFileData := &SliceCache{}
defer cacheFileData.Reset()
// read all content
var line []string
line, err = reader.ReadLine()
for ; line != nil && err == nil; line, err = reader.ReadLine() {
if err = row.ParseRow(line); err != nil {
m.logger.Error("parse ETL rows failed",
logutil.TableField(m.table.GetIdentify()),
logutil.PathField(fp.FilePath),
logutil.VarsField(SubStringPrefixLimit(fmt.Sprintf("%v", line), 102400)),
)
return err
}
cacheFileData.Put(row)
}
if err != nil {
m.logger.Warn("failed to read file",
logutil.PathField(fp.FilePath), zap.Error(err))
return err
}
// sql insert
if cacheFileData.Size() > 0 {
if err = cacheFileData.Flush(m.table); err != nil {
return err
}
cacheFileData.Reset()
}
// delete empty file or file already uploaded
if cacheFileData.Size() == 0 {
if err = m.fs.Delete(ctx, fp.FilePath); err != nil {
m.logger.Warn("failed to delete file", zap.Error(err))
return err
}
}
return nil
}
var err error
for _, fp := range files {
if err = uploadFile(ctx, fp); err != nil {
// todo: adjust the sleep settings
// Sleep 10 seconds to wait for the database to recover
time.Sleep(10 * time.Second)
m.logger.Error("failed to upload file to MO",
logutil.TableField(m.table.GetIdentify()),
logutil.PathField(fp.FilePath),
zap.Error(err),
)
}
}
logutil.Debug("upload files success", logutil.TableField(m.table.GetIdentify()), zap.Int("file count", len(files)))
return err
}
func SubStringPrefixLimit(str string, length int) string {
if length <= 0 {
return ""
}
if len(str) < length {
return str
} else {
return str[:length] + "..."
}
}
type ContentReader struct {
ctx context.Context
path string
idx int
length int
content [][]string
logger *log.MOLogger
reader *simdcsv.Reader
raw io.ReadCloser
}
// BatchReadRows ~= 20MB rawlog file has about 3700+ rows
const BatchReadRows = 4000
func NewContentReader(ctx context.Context, path string, reader *simdcsv.Reader, raw io.ReadCloser) *ContentReader {
logger := runtime.ProcessLevelRuntime().Logger().WithContext(ctx).Named(LoggerNameContentReader)
return &ContentReader{
ctx: ctx,
path: path,
length: 0,
content: make([][]string, BatchReadRows),
logger: logger,
reader: reader,
raw: raw,
}
}
func (s *ContentReader) ReadLine() ([]string, error) {
if s.idx == s.length && s.reader != nil {
var cnt int
var err error
s.content, cnt, err = s.reader.Read(BatchReadRows, s.ctx, s.content)
if err != nil {
return nil, err
} else if s.content == nil {
s.logger.Error("ContentReader.ReadLine.nil", logutil.PathField(s.path),
zap.Bool("nil", s.content == nil),
zap.Error(s.ctx.Err()),
zap.Bool("SupportedCPU", simdcsv.SupportedCPU()),
)
return nil, moerr.NewInternalError(s.ctx, "read files meet context Done")
}
if cnt < BatchReadRows {
//s.reader.Close() // DO NOT call, because it is a forever loop with empty op.
s.reader = nil
s.raw.Close()
s.raw = nil
s.logger.Debug("ContentReader.ReadLine.EOF", logutil.PathField(s.path), zap.Int("rows", cnt))
}
s.idx = 0
s.length = cnt
s.logger.Debug("ContentReader.ReadLine", logutil.PathField(s.path), zap.Int("rows", cnt),
zap.Bool("SupportedCPU", simdcsv.SupportedCPU()),
)
}
if s.idx < s.length {
idx := s.idx
s.idx++
if s.content == nil || len(s.content) == 0 {
s.logger.Error("ContentReader.ReadLine.nil",
logutil.PathField(s.path),
zap.Bool("nil", s.content == nil),
zap.Int("cached", len(s.content)),
zap.Int("idx", idx),
zap.Bool("SupportedCPU", simdcsv.SupportedCPU()),
)
}
return s.content[idx], nil
}
return nil, nil
}
func (s *ContentReader) ReadRow(row *table.Row) error {
panic("NOT implement")
}
func (s *ContentReader) Close() {
capLen := cap(s.content)
s.content = s.content[:capLen]
for idx := range s.content {
s.content[idx] = nil
}
if s.raw != nil {
_ = s.raw.Close()
s.raw = nil
}
}
func newETLReader(ctx context.Context, tbl *table.Table, fs fileservice.FileService, path string, size int64, mp *mpool.MPool) (ETLReader, error) {
if strings.LastIndex(path, table.CsvExtension) > 0 {
return NewCSVReader(ctx, fs, path)
} else if strings.LastIndex(path, table.TaeExtension) > 0 {
r, err := etl.NewTaeReader(ctx, tbl, path, size, fs, mp)
if err != nil {
r.Close()
return nil, err
}
_, err = r.ReadAll(ctx)
if err != nil {
r.Close()
return nil, err
}
return r, nil
} else {
panic("NOT Implements")
}
}
// NewCSVReader create new csv reader.
// success case return: ok_reader, nil error
// failed case return: nil_reader, error
func NewCSVReader(ctx context.Context, fs fileservice.FileService, path string) (ETLReader, error) {
// external.ReadFile
var reader io.ReadCloser
vec := fileservice.IOVector{
FilePath: path,
Entries: []fileservice.IOEntry{
0: {
Offset: 0,
Size: -1,
ReadCloserForRead: &reader,
},
},
}
// open file reader
if err := fs.Read(ctx, &vec); err != nil {
return nil, err
}
// parse csv content
simdCsvReader := simdcsv.NewReaderWithOptions(reader,
table.CommonCsvOptions.FieldTerminator,
'#',
true,
true)
// return content Reader
return NewContentReader(ctx, path, simdCsvReader, reader), nil
}
type Cache interface {
Put(*table.Row)
Size() int64
Flush(*table.Table) error
Reset()
IsEmpty() bool
}
type SliceCache struct {
m [][]string
size int64
}
func (c *SliceCache) Flush(tbl *table.Table) error {
_, err := db_holder.WriteRowRecords(c.m, tbl, MAX_MERGE_INSERT_TIME)
c.Reset()
return err
}
func (c *SliceCache) Reset() {
for idx := range c.m {
c.m[idx] = nil
}
c.m = c.m[:0]
c.size = 0
}
func (c *SliceCache) IsEmpty() bool {
return len(c.m) == 0
}
func (c *SliceCache) Put(r *table.Row) {
c.m = append(c.m, r.GetCsvStrings())
c.size += r.Size()
}
func (c *SliceCache) Size() int64 { return c.size }
func LongRunETLMerge(ctx cont
|
ext.Context, ta
|
identifier_name
|
|
merge.go
|
}
}
length := l.Len()
fileList := make([]string, 0, length)
for idx := 0; idx < length; idx++ {
fileList = append(fileList, l.Remove(l.Front()).(string))
}
return fileList, nil
}
// doMergeFiles handle merge (read->write->delete) ops for all files in the target directory.
// Handle the files one by one, act uploadFile and do the deletion if upload is success.
// Upload the files to SQL table
// Delete the files from FileService
func (m *Merge) doMergeFiles(ctx context.Context, files []*FileMeta) error {
ctx, span := trace.Start(ctx, "doMergeFiles")
defer span.End()
// Control task concurrency
m.runningJobs <- struct{}{}
defer func() {
<-m.runningJobs
}()
// Step 3. do simple merge
var uploadFile = func(ctx context.Context, fp *FileMeta) error {
row := m.table.GetRow(ctx)
defer row.Free()
// open reader
reader, err := newETLReader(ctx, m.table, m.fs, fp.FilePath, fp.FileSize, m.mp)
if err != nil {
m.logger.Error(fmt.Sprintf("merge file meet read failed: %v", err))
return err
}
defer reader.Close()
cacheFileData := &SliceCache{}
defer cacheFileData.Reset()
// read all content
var line []string
line, err = reader.ReadLine()
for ; line != nil && err == nil; line, err = reader.ReadLine() {
if err = row.ParseRow(line); err != nil {
m.logger.Error("parse ETL rows failed",
logutil.TableField(m.table.GetIdentify()),
logutil.PathField(fp.FilePath),
logutil.VarsField(SubStringPrefixLimit(fmt.Sprintf("%v", line), 102400)),
)
return err
}
cacheFileData.Put(row)
}
if err != nil {
m.logger.Warn("failed to read file",
logutil.PathField(fp.FilePath), zap.Error(err))
return err
}
// sql insert
if cacheFileData.Size() > 0 {
if err = cacheFileData.Flush(m.table); err != nil {
return err
}
cacheFileData.Reset()
}
// delete empty file or file already uploaded
if cacheFileData.Size() == 0 {
if err = m.fs.Delete(ctx, fp.FilePath); err != nil {
m.logger.Warn("failed to delete file", zap.Error(err))
return err
}
}
return nil
}
var err error
for _, fp := range files {
if err = uploadFile(ctx, fp); err != nil {
// todo: adjust the sleep settings
// Sleep 10 seconds to wait for the database to recover
time.Sleep(10 * time.Second)
m.logger.Error("failed to upload file to MO",
logutil.TableField(m.table.GetIdentify()),
logutil.PathField(fp.FilePath),
zap.Error(err),
)
}
}
logutil.Debug("upload files success", logutil.TableField(m.table.GetIdentify()), zap.Int("file count", len(files)))
return err
}
func SubStringPrefixLimit(str string, length int) string {
if length <= 0 {
return ""
}
if len(str) < length {
return str
} else {
return str[:length] + "..."
}
}
type ContentReader struct {
ctx context.Context
path string
idx int
length int
content [][]string
logger *log.MOLogger
reader *simdcsv.Reader
raw io.ReadCloser
}
// BatchReadRows ~= 20MB rawlog file has about 3700+ rows
const BatchReadRows = 4000
func NewContentReader(ctx context.Context, path string, reader *simdcsv.Reader, raw io.ReadCloser) *ContentReader {
logger := runtime.ProcessLevelRuntime().Logger().WithContext(ctx).Named(LoggerNameContentReader)
return &ContentReader{
ctx: ctx,
path: path,
length: 0,
content: make([][]string, BatchReadRows),
logger: logger,
reader: reader,
raw: raw,
}
}
func (s *ContentReader) ReadLine() ([]string, error) {
if s.idx == s.length && s.reader != nil {
var cnt int
var err error
s.content, cnt, err = s.reader.Read(BatchReadRows, s.ctx, s.content)
if err != nil {
return nil, err
} else if s.content == nil {
s.logger.Error("ContentReader.ReadLine.nil", logutil.PathField(s.path),
zap.Bool("nil", s.content == nil),
zap.Error(s.ctx.Err()),
zap.Bool("SupportedCPU", simdcsv.SupportedCPU()),
)
return nil, moerr.NewInternalError(s.ctx, "read files meet context Done")
}
if cnt < BatchReadRows {
//s.reader.Close() // DO NOT call, because it is a forever loop with empty op.
s.reader = nil
s.raw.Close()
s.raw = nil
s.logger.Debug("ContentReader.ReadLine.EOF", logutil.PathField(s.path), zap.Int("rows", cnt))
}
s.idx = 0
s.length = cnt
s.logger.Debug("ContentReader.ReadLine", logutil.PathField(s.path), zap.Int("rows", cnt),
zap.Bool("SupportedCPU", simdcsv.SupportedCPU()),
)
}
if s.idx < s.length {
idx := s.idx
s.idx++
if s.content == nil || len(s.content) == 0 {
s.logger.Error("ContentReader.ReadLine.nil",
logutil.PathField(s.path),
zap.Bool("nil", s.content == nil),
zap.Int("cached", len(s.content)),
zap.Int("idx", idx),
zap.Bool("SupportedCPU", simdcsv.SupportedCPU()),
)
}
return s.content[idx], nil
}
return nil, nil
}
func (s *ContentReader) ReadRow(row *table.Row) error {
panic("NOT implement")
}
func (s *ContentReader) Close() {
capLen := cap(s.content)
s.content = s.content[:capLen]
for idx := range s.content {
s.content[idx] = nil
}
if s.raw != nil {
_ = s.raw.Close()
s.raw = nil
}
}
func newETLReader(ctx context.Context, tbl *table.Table, fs fileservice.FileService, path string, size int64, mp *mpool.MPool) (ETLReader, error) {
if strings.LastIndex(path, table.CsvExtension) > 0 {
return NewCSVReader(ctx, fs, path)
} else if strings.LastIndex(path, table.TaeExtension) > 0 {
r, err := etl.NewTaeReader(ctx, tbl, path, size, fs, mp)
if err != nil {
r.Close()
return nil, err
}
_, err = r.ReadAll(ctx)
if err != nil {
r.Close()
return nil, err
}
return r, nil
} else {
panic("NOT Implements")
}
}
// NewCSVReader create new csv reader.
// success case return: ok_reader, nil error
// failed case return: nil_reader, error
func NewCSVReader(ctx context.Context, fs fileservice.FileService, path string) (ETLReader, error) {
// external.ReadFile
var reader io.ReadCloser
vec := fileservice.IOVector{
FilePath: path,
Entries: []fileservice.IOEntry{
0: {
Offset: 0,
Size: -1,
ReadCloserForRead: &reader,
},
},
}
// open file reader
if err := fs.Read(ctx, &vec); err != nil {
return nil, err
}
// parse csv content
simdCsvReader := simdcsv.NewReaderWithOptions(reader,
table.CommonCsvOptions.FieldTerminator,
'#',
true,
true)
// return content Reader
return NewContentReader(ctx, path, simdCsvReader, reader), nil
}
type Cache interface {
Put(*table.Row)
Size() int64
Flush(*table.Table) error
Reset()
IsEmpty() bool
}
type SliceCache struct {
m [][]string
size int64
}
func (c *SliceCache) Flush(tbl *table.Table) error {
_, err := db_holder.WriteRowRecords(c.m, tbl, MAX_MERGE_INSERT_TIME)
c.Reset()
return err
}
func (c *SliceCache) Reset() {
for idx := range c.m {
c.m[idx] = nil
}
c.m = c.m[:0]
c.size = 0
}
func (c *SliceCache) IsEmpty() bool {
return len(c.m) == 0
}
func (c *SliceCache) Put(r *table.Row) {
c.m = append(c.m, r.G
|
etCsvStrings())
c.size += r.Size()
}
func (c *SliceCache) S
|
identifier_body
|
|
merge.go
|
const LoggerNameContentReader = "ETLContentReader"
const MAX_MERGE_INSERT_TIME = 10 * time.Second
const defaultMaxFileSize = 32 * mpool.MB
// ========================
// handle merge
// ========================
// Merge like a compaction, merge input files into one/two/... files.
// - NewMergeService init merge as service, with serviceInited to avoid multi init.
// - MergeTaskExecutorFactory drive by Cron TaskService.
// - NewMerge handle merge obj init.
// - Merge.Start() as service loop, trigger Merge.Main()
// - Merge.Main() handle main job.
// 1. foreach account, build `rootPath` with tuple {account, date, Table }
// 2. call Merge.doMergeFiles() with all files in `rootPath`, do merge job
//
// - Merge.doMergeFiles handle one job flow: read each file, merge in cache, write into file.
type Merge struct {
task task.Task // set by WithTask
table *table.Table // set by WithTable
fs fileservice.FileService // set by WithFileService
pathBuilder table.PathBuilder // const as table.NewAccountDatePathBuilder()
// MaxFileSize the total filesize to trigger doMergeFiles(),default: 32 MB
// Deprecated
MaxFileSize int64 // set by WithMaxFileSize
// MaxMergeJobs 允许进行的 Merge 的任务个数,default: 1
MaxMergeJobs int64 // set by WithMaxMergeJobs
// logger
logger *log.MOLogger
// mp for TAEReader if needed.
mp *mpool.MPool
// runningJobs control task concurrency, init with MaxMergeJobs cnt
runningJobs chan struct{}
// flow ctrl
ctx context.Context
cancelFunc context.CancelFunc
}
type MergeOption func(*Merge)
func (opt MergeOption) Apply(m *Merge) {
opt(m)
}
func WithTask(task task.Task) MergeOption {
return MergeOption(func(m *Merge) {
m.task = task
})
}
func WithTable(tbl *table.Table) MergeOption {
return MergeOption(func(m *Merge) {
m.table = tbl
})
}
func WithFileService(fs fileservice.FileService) MergeOption {
return MergeOption(func(m *Merge) {
m.fs = fs
})
}
func WithMaxFileSize(filesize int64) MergeOption {
return MergeOption(func(m *Merge) {
m.MaxFileSize = filesize
})
}
func WithMaxMergeJobs(jobs int64) MergeOption {
return MergeOption(func(m *Merge) {
m.MaxMergeJobs = jobs
})
}
// serviceInited handle Merge as service
var serviceInited uint32
func NewMergeService(ctx context.Context, opts ...MergeOption) (*Merge, bool, error) {
// fix multi-init in standalone
if !atomic.CompareAndSwapUint32(&serviceInited, 0, 1) {
return nil, true, nil
}
m, err := NewMerge(ctx, opts...)
return m, false, err
}
var poolMux sync.Mutex
var ETLMergeTaskPool *mpool.MPool
func getMpool() (*mpool.MPool, error) {
poolMux.Lock()
defer poolMux.Unlock()
if ETLMergeTaskPool == nil {
mp, err := mpool.NewMPool("etl_merge_task", 0, mpool.NoFixed)
if err != nil {
return nil, err
}
ETLMergeTaskPool = mp
}
return ETLMergeTaskPool, nil
}
func NewMerge(ctx context.Context, opts ...MergeOption) (*Merge, error) {
var err error
m := &Merge{
pathBuilder: table.NewAccountDatePathBuilder(),
MaxFileSize: defaultMaxFileSize,
MaxMergeJobs: 1,
logger: runtime.ProcessLevelRuntime().Logger().WithContext(ctx).Named(LoggerNameETLMerge),
}
m.ctx, m.cancelFunc = context.WithCancel(ctx)
for _, opt := range opts {
opt(m)
}
if m.mp, err = getMpool(); err != nil {
return nil, err
}
m.validate(ctx)
m.runningJobs = make(chan struct{}, m.MaxMergeJobs)
return m, nil
}
// validate check missing init elems. Panic with has missing elems.
func (m *Merge) validate(ctx context.Context) {
if m.table == nil {
panic(moerr.NewInternalError(ctx, "merge task missing input 'table'"))
}
if m.fs == nil {
panic(moerr.NewInternalError(ctx, "merge task missing input 'FileService'"))
}
}
// Start for service Loop
func (m *Merge) Start(ctx context.Context, interval time.Duration) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
m.Main(ctx)
case <-m.ctx.Done():
return
}
}
}
// Stop should call only once
func (m *Merge) Stop() {
m.cancelFunc()
}
// =======================
// main logic
// =======================
type FileMeta struct {
FilePath string
FileSize int64
}
// Main do list all accounts, all dates which belong to m.table.GetName()
func (m *Merge) Main(ctx context.Context) error {
var files = make([]*FileMeta, 0, 1000)
var totalSize int64
accounts, err := m.fs.List(ctx, "/")
if err != nil {
return err
}
if len(accounts) == 0 {
m.logger.Info("merge find empty data")
return nil
}
m.logger.Debug(fmt.Sprintf("merge task with max file: %v MB", m.MaxFileSize/mpool.MB))
for _, account := range accounts {
if !account.IsDir {
m.logger.Warn(fmt.Sprintf("path is not dir: %s", account.Name))
continue
}
// build targetPath like "${account}/logs/*/*/*/${table_name}"
targetPath := m.pathBuilder.Build(account.Name, table.MergeLogTypeLogs, table.ETLParamTSAll, m.table.GetDatabase(), m.table.GetName())
// search all paths like:
// 0: ${account}/logs/2023/05/31/${table_name}
// 1: ${account}/logs/2023/06/01/${table_name}
// 2: ...
rootPaths, err := m.getAllTargetPath(ctx, targetPath)
if err != nil {
return err
}
// get all file entry
for _, rootPath := range rootPaths {
m.logger.Info("start merge", logutil.TableField(m.table.GetIdentify()), logutil.PathField(rootPath),
zap.String("metadata.ID", m.task.Metadata.ID))
fileEntrys, err := m.fs.List(ctx, rootPath)
if err != nil {
// fixme: m.logger.Error()
return err
}
files = files[:0]
totalSize = 0
for _, f := range fileEntrys {
filepath := path.Join(rootPath, f.Name)
totalSize += f.Size
files = append(files, &FileMeta{filepath, f.Size})
if totalSize > m.MaxFileSize {
if err = m.doMergeFiles(ctx, files); err != nil {
m.logger.Error(fmt.Sprintf("merge task meet error: %v", err))
}
files = files[:0]
totalSize = 0
}
}
if len(files) > 0 {
if err = m.doMergeFiles(ctx, files); err != nil {
m.logger.Warn(fmt.Sprintf("merge task meet error: %v", err))
}
}
}
}
return err
}
func (m *Merge) getAllTargetPath(ctx context.Context, filePath string) ([]string, error) {
sep := "/"
pathDir := strings.Split(filePath, sep)
l := list.New()
if pathDir[0] == "" {
l.PushBack(sep)
} else {
l.PushBack(pathDir[0])
}
for i := 1; i < len(pathDir); i++ {
length := l.Len()
for j := 0; j < length; j++ {
elem := l.Remove(l.Front())
prefix := elem.(string)
entries, err := m.fs.List(ctx, prefix)
if err != nil {
return nil, err
}
for _, entry := range entries {
if !entry.IsDir && i+1 != len(pathDir) {
continue
}
matched, err := path.Match(pathDir[i], entry.Name)
if err != nil {
return nil, err
}
if !matched {
continue
}
l.PushBack(path.Join(prefix, entry.Name))
}
}
}
length := l.Len()
fileList := make([]string, 0, length)
for idx := 0; idx < length; idx++ {
fileList = append(fileList, l.Remove(l.Front()).(string))
}
return fileList, nil
}
// doMergeFiles handle merge (read->write->delete) ops for all files in the target directory.
// Handle the files one
|
)
const LoggerNameETLMerge = "ETLMerge"
|
random_line_split
|
|
pg_v6.x.x.js
|
// `Function` types exised in this file, cause of they come from another
// untyped npm lib.
/* Cause of <flow 0.36 did not support export type very well,
// so copy the types from pg-pool
// https://github.com/flowtype/flow-typed/issues/16
// https://github.com/facebook/flow/commit/843389f89c69516506213e298096a14867a45061
const Pool = require('pg-pool');
import type {
PgPoolConfig,
PoolConnectCallback,
DoneCallback,
PoolClient
} from 'pg-pool';
*/
// ------------- copy from 'pg-pool' ------------>>
/*
* PgPoolConfig's properties are passed unchanged to both
* the node-postgres Client constructor and the node-pool constructor
* allowing you to fully configure the behavior of both
* node-pool (https://github.com/coopernurse/node-pool)
*/
declare type PgPoolConfig = {
// node-pool ----------------
name: string,
create: Function,
destroy: Function,
max: number,
min: number,
refreshIdle: boolean,
idleTimeoutMillis: number,
reapIntervalMillis: number,
returnToHead: boolean,
priorityRange: number,
validate: Function,
validateAsync: Function,
log: Function,
// node-postgres Client ------
//database user's name
user: string,
//name of database to connect
database: string,
//database user's password
password: string,
//database port
port: number,
// database host. defaults to localhost
host?: string,
// whether to try SSL/TLS to connect to server. default value: false
ssl?: boolean,
// name displayed in the pg_stat_activity view and included in CSV log entries
// default value: process.env.PGAPPNAME
application_name?: string,
// fallback value for the application_name configuration parameter
// default value: false
fallback_application_name?: string,
// pg-pool
Client: mixed,
Promise: mixed,
onCreate: Function,
};
/*
* Not extends from Client, cause some of Client's functions(ex: connect and end)
* should not be used by PoolClient (which returned from Pool.connect).
*/
declare type PoolClient = {
release(error?: mixed): void,
query:
( (query: QueryConfig|string, callback?: QueryCallback) => Query ) &
( (text: string, values: Array<any>, callback?: QueryCallback) => Query ),
on:
((event: 'drain', listener: () => void) => events$EventEmitter )&
((event: 'error', listener: (err: PG_ERROR) => void) => events$EventEmitter )&
((event: 'notification', listener: (message: any) => void) => events$EventEmitter )&
((event: 'notice', listener: (message: any) => void) => events$EventEmitter )&
((event: 'end', listener: () => void) => events$EventEmitter ),
}
declare type PoolConnectCallback = (error: PG_ERROR|null,
client: PoolClient|null, done: DoneCallback) => void;
declare type DoneCallback = (error?: mixed) => void;
// https://github.com/facebook/flow/blob/master/lib/node.js#L581
// on() returns a events$EventEmitter
declare class Pool extends events$EventEmitter {
constructor(options: $Shape<PgPoolConfig>, Client?: Class<Client>): void;
connect(cb?: PoolConnectCallback): Promise<PoolClient>;
take(cb?: PoolConnectCallback): Promise<PoolClient>;
end(cb?: DoneCallback): Promise<void>;
// Note: not like the pg's Client, the Pool.query return a Promise,
// not a Thenable Query which Client returned.
// And there is a flow(<0.34) issue here, when Array<mixed>,
// the overloading will not work
query:
( (query: QueryConfig|string, callback?: QueryCallback) => Promise<ResultSet> ) &
( (text: string, values: Array<any>, callback?: QueryCallback) => Promise<ResultSet>);
/* flow issue: https://github.com/facebook/flow/issues/2423
* When this fixed, this overloading can be used.
*/
/*
on:
((event: 'connect', listener: (client: PoolClient) => void) => events$EventEmitter )&
((event: 'acquire', listener: (client: PoolClient) => void) => events$EventEmitter )&
((event: "error", listener: (err: PG_ERROR) => void) => events$EventEmitter )&
((event: string, listener: Function) => events$EventEmitter);
*/
}
// <<------------- copy from 'pg-pool' ------------------------------
// error
declare type PG_ERROR = {
name: string,
length: number,
severity: string,
code: string,
detail: string|void,
hint: string|void,
position: string|void,
internalPosition: string|void,
internalQuery: string|void,
where: string|void,
schema: string|void,
table: string|void,
column: string|void,
dataType: string|void,
constraint: string|void,
file: string|void,
line: string|void,
routine: string|void
};
declare type ClientConfig = {
//database user's name
user?: string,
//name of database to connect
database?: string,
//database user's password
password?: string,
//database port
port?: number,
// database host. defaults to localhost
host?: string,
// whether to try SSL/TLS to connect to server. default value: false
ssl?: boolean,
// name displayed in the pg_stat_activity view and included in CSV log entries
// default value: process.env.PGAPPNAME
application_name?: string,
// fallback value for the application_name configuration parameter
// default value: false
fallback_application_name?: string,
}
declare type Row = {
[key: string]: mixed,
};
declare type ResultSet = {
command: string,
rowCount: number,
oid: number,
rows: Array<Row>,
};
declare type ResultBuilder = {
command: string,
rowCount: number,
oid: number,
rows: Array<Row>,
addRow: (row: Row) => void,
};
declare type QueryConfig = {
name?: string,
text: string,
values?: any[],
};
declare type QueryCallback = (err: PG_ERROR|null, result: ResultSet|void) => void;
declare type ClientConnectCallback = (err: PG_ERROR|null, client: Client|void) => void;
/*
* lib/query.js
* Query extends from EventEmitter in source code.
* but in Flow there is no multiple extends.
* And in Flow await is a `declare function $await<T>(p: Promise<T> | T): T;`
* seems can not resolve a Thenable's value type directly
* so `Query extends Promise` to make thing temporarily work.
* like this:
* const q = client.query('select * from some');
* q.on('row',cb); // Event
* const result = await q; // or await
*
* ToDo: should find a better way.
*/
declare class Query extends Promise<ResultSet> {
then<U>(
onFulfill?: ?((value: ResultSet) => Promise<U> | U),
onReject?: ?((error: PG_ERROR) => Promise<U> | U)
): Promise<U>;
// Because then and catch return a Promise,
// .then.catch will lose catch's type information PG_ERROR.
catch<U>(
onReject?: ?((error: PG_ERROR) => Promise<U> | U)
): Promise<U>;
on :
((event: 'row', listener: (row: Row, result: ResultBuilder) => void) => events$EventEmitter )&
((event: 'end', listener: (result: ResultBuilder) => void) => events$EventEmitter )&
((event: 'error', listener: (err: PG_ERROR) => void) => events$EventEmitter );
}
/*
* lib/client.js
* Note: not extends from EventEmitter, for This Type returned by on().
* Flow's EventEmitter force return a EventEmitter in on().
* ToDo: Not sure in on() if return events$EventEmitter or this will be more suitable
* return this will restrict event to given literial when chain on().on().on().
* return a events$EventEmitter will fallback to raw EventEmitter, when chains
*/
declare class Client {
constructor(config?: string | ClientConfig): void;
connect(callback?: ClientConnectCallback):void;
end(): void;
escapeLiteral(str: string): string;
escapeIdentifier(str: string): string;
query:
( (query: QueryConfig|string, callback?: QueryCallback) =>
|
random_line_split
|
||
domaintools.py
|
- mesh using marching cubes
- calculate volume and area of domains using mesh
'''
def __init__(self,coords,fields, density_field_index=0, density_threshold = 0.5):
''' Define and calculate useful variables for DomainAnalysis routines
'''
self.__coords = coords
self.__fields = fields
self.__density_field_index = density_field_index
self.__density_threshold = density_threshold
self.__ndim = len(coords.shape) - 1
self.__Nx = coords.shape[:self.__ndim]
self.__nfields = fields.shape[self.__ndim]
self.__M = np.prod(self.__Nx)
# assume box starts at (0,0,0) and ends at (lx,ly,lz)
if not np.all(self.__coords.ravel()[0:self.__ndim] == np.zeros(self.__ndim)):
raise ValueError("coords[0,0,0] != (0,0,0)")
if self.__ndim == 2:
#self.__boxl = tuple(self.__coords[-1,-1] + self.__coords[1,1])
#self.__boxh = tuple(np.array(self.__boxl)*0.5)
self.__gridspacing = (self.__coords[1,0][0], self.__coords[0,1][1])
self.__hvoxel = np.array([coords[1,0],coords[0,1]])
elif self.__ndim == 3:
#self.__boxl = tuple(self.__coords[-1,-1,-1] + self.__coords[1,1,1])
#self.__boxh = tuple(np.array(self.__boxl)*0.5)
self.__gridspacing = (self.__coords[1,0,0][0], self.__coords[0,1,0][1], self.__coords[0,0,1][2])
self.__hvoxel = np.array([coords[1,0,0],coords[0,1,0],coords[0,0,1]])
self.__hcell = self.__hvoxel * self.__Nx
self.__volvoxel = np.linalg.det(self.__hvoxel)
assert (np.abs(self.__volvoxel - np.linalg.det(self.__hcell) / self.__M) < 1e-5), "Volume of voxel != (Volume of cell / n voxels). This should be true!"
self.__boxl = tuple(np.sqrt(np.sum(np.square(self.__hcell),axis=1)))
self.__boxh = tuple(np.array(self.__boxl)*0.5)
# check if orthorhombic
self.__orthorhombic = True
hnorm = self.__hcell / np.linalg.norm(self.__hcell, axis=0)
if self.__ndim == 2 and np.dot(hnorm[0],hnorm[1]) != 0:
self.__orthorhombic = False
elif self.__ndim == 3 :
if np.dot(hnorm[0],[1,0,0]) != 0 or np.dot(hnorm[1],[0,1,0]) != 0 or np.dot(hnorm[2],[0,0,1]) != 0:
self.__orthorhombic = False
print("Warning! Cell is not orthorhombic. This code was written for orthorhombic cells and non-orthorhombic support is in progress. So be careful, and check that the code is doing what you think it should!")
# check if density field is reasonable between 0-1, if not throw warning
if self.__ndim == 2:
mindensity= np.min(self.__fields[:,:,self.__density_field_index])
maxdensity= np.max(self.__fields[:,:,self.__density_field_index])
elif self.__ndim == 3:
mindensity= np.min(self.__fields[:,:,:,self.__density_field_index])
maxdensity= np.max(self.__fields[:,:,:,self.__density_field_index])
if maxdensity > 1.0 or mindensity < 0.0:
print("Warning: The density field is not between 0-1 (min: {}, max: {}). The specified threshold of {} might be inappropriate.".format(mindensity,maxdensity,self.__density_threshold))
self.__needToIndexDomains = True
def setDensityThreshold(density_threshold):
self.__density_threshold = density_threshold
# if changing the Density threshold, will need to index domains again
self.__needToIndexDomains = True
def getNdim(self):
return self.__ndim
def getBoxl(self):
return self.__boxl
def getVolVoxel(self):
return self.__volvoxel
def getDomainStats(self, useMesh=True, plotMesh=False,outputMesh=False,add_periodic_domains=False, applyPBC=True):
''' Calculate properties of each of the domains
return com, surface_area, volume, IQ
if useMesh == True, calculate a isosurface mesh to calculate the volumes and areas.
This is very accurate, but can have issues creating a good mesh if domains are poorly defined (as in certain CL systems)
(Specifically the issue is if two domains are only separated by a single grid point. When this happens,
the border around the domain belongs to two domains simultaneously and my current burning algorithm throws
an error. I use the border around a domain when applying PBC's to make sure a domain is continuous.
Eventually I might think of a better algorithm that will be robust to this edge case...
)
useMesh == False uses the less accurate approach of summing over the voxels to get the volume and area
the volume is still pretty accurate, the area...well, I'm not even going to implement it since in CL I only want volume
add periodic domains = true adds a center for mass at each of the locations for each periodic domain
'''
if useMesh and not self.__orthorhombic:
print("Warning: computing volume/area using mesh, but cell is not orthorhombic. This will lead to errors in the surface areas calculation of the domains")
# create boolean selector from density fields for region definition
if self.__ndim == 2:
isdomain_array = (self.__fields[:,:,self.__density_field_index] > self.__density_threshold)
elif self.__ndim == 3:
isdomain_array = (self.__fields[:,:,:,self.__density_field_index] > self.__density_threshold)
# FIXME, things break for non-cubic boxes. It must have to do with the vtk vs numpy indexing
# identify domains
if self.__needToIndexDomains:
self.__regionID = None # initially empty, created in computeRegionIDs
self.__ndomains = self.identifyAndIndexDomains(isdomain_array)
else:
print("Note: Using cached domain ID's")
#nstats = 1+ 3*getCenter + getArea + getVol + getIQ
#stats = np.zeros((self.__ndomains,nstats))
com = np.zeros((self.__ndomains, self.__ndim))
surface_area = np.zeros(self.__ndomains)
volume = np.zeros(self.__ndomains)
IQ = np.zeros(self.__ndomains)
#for each domain
for idomain in range(0,self.__ndomains):
# calc center of domain
com[idomain,:] = self.calcDomainCOM(idomain+1,units='coord')
if useMesh:
if self.__ndim == 2:
# mesh domain
contours,density_centered = self.meshSingleDomain(idomain+1,wrap_before_mesh=applyPBC)
assert (len(contours) == 1), "The contour should only be one curve, if not the area and volume calculations will be completely wrong!"
# get surface area (perimeter) and volume (area)
surface_area[idomain] = self.contour_perimeter(contours[0])
volume[idomain] = self.contour_area(contours[0])
if plotMesh:
# draw surface behind the mesh
self.plotContours2D(contours,filename="mesh.{}.png".format(idomain+1),surface=density_centered)
# dont draw surface behind the mesh
#self.plotContours2D(contours,filename="mesh.{}.png".format(idomain+1))
if self.__ndim == 3:
# mesh domain
verts, faces, normals, values = self.meshSingleDomain(idomain+1,wrap_before_mesh=applyPBC)
# get surface area, volume and isoperimetric quotient
surface_area[idomain] = measure.mesh_surface_area(verts, faces)
volume[idomain] = self.mesh_volume(verts,faces)
if plotMesh:
self.plotMesh3D(verts,faces, filename="mesh.{}.png".format(idomain+1))
if outputMesh:
self.writeMesh(verts,faces,fileprefix="mesh.{}.".format(idomain+1))
IQ[idomain] = self.calcIQ(surface_area[idomain], volume[idomain])
else:
surface_area[idomain] = -1.0 #FIXME surface_area is currently not calculated if no mesh
volume[idomain] = self
|
random_line_split
|
||
domaintools.py
|
entire domain is continuous (ie not split across boundaries)
2) Grab a little margin around each domain (the domain's "border") so that marching cubes can interpolate. The border is computed in identifyAndIndexDomains().
3) Mesh the domain using marching cubes
'''
isdomain = (self.__regionID == idomain)
#isborder = (self.__borderID == idomain)
isborder = np.zeros(self.__Nx,dtype=np.bool)
# convert to tuple to correctly set indicies of isborder
isborder[tuple(self.__regionBorder[idomain-1])] = True
if self.__ndim == 2:
alldensity = self.__fields[:,:, self.__density_field_index]
elif self.__ndim == 3:
alldensity = self.__fields[:,:,:, self.__density_field_index]
# center box and properties around center of mass (so that domains don't cross pbc)
# np.roll is the key function here
# if domains percolate then this will break
com_box = self.calcDomainCOM(idomain,units='box')
com_coord = self.calcDomainCOM(idomain,units='coord')
#coords_tmp = np.copy(self.__coords)
for i in range(self.__ndim):
shift = int(0.5*self.__Nx[i] - com_box[i])
isdomain = np.roll(isdomain,shift,axis=i)
isborder = np.roll(isborder,shift,axis=i)
#coords_tmp = np.roll(coords_tmp,shift,axis=i)
alldensity = np.roll(alldensity,shift,axis=i)
# isolate the domain of interest
isdomain_or_isborder = isdomain + isborder # since both bool, sum is the union of the two fields
mydensity = np.zeros(self.__Nx)
mydensity[isdomain_or_isborder] = alldensity[isdomain_or_isborder]
#
#tmp =mydensity[:,:,:,np.newaxis]
#viz.writeVTK('test.vtk',self.__coords,tmp)
# plot for debugging
# import sys
# sys.path.append('/home/trent/Documents/college/polymers/ResearchTools/plot/')
# sys.path.append('../../')
# import PolyFTS_to_VTK
# AllCoords = np.reshape(coords_tmp,(self.__M, self.__ndim))
# AllCoords = AllCoords.T
# tmp = np.ravel(isdomain)
# tmp = np.resize(tmp,(1,len(tmp)))
# PolyFTS_to_VTK.writeVTK("isdomain.vtk", self.__Nx, True, self.__M, AllCoords,tmp)
# tmp = np.ravel(isborder)
# tmp = np.resize(tmp,(1,len(tmp)))
# PolyFTS_to_VTK.writeVTK("isborder.vtk", self.__Nx, True, self.__M, AllCoords,tmp)
# tmp = np.ravel(mydensity)
# tmp = np.resize(tmp,(1,len(tmp)))
# PolyFTS_to_VTK.writeVTK("mydensity.vtk", self.__Nx, True, self.__M, AllCoords,tmp)
# mesh! (using scikit-image)
if self.__ndim == 2:
# calculate contours in 'box' units
contours = measure.find_contours(mydensity, self.__density_threshold)
# convert 'box' units to 'coords' units (this is key for non-orthorhombic cells)
for i,c in enumerate(contours):
contours[i] = np.array((np.mat(self.__hvoxel).T * np.mat(c).T).T)
return contours,alldensity
elif self.__ndim == 3:
#from skimage import measure
#verts, faces, normals, values = measure.marching_cubes_lewiner(mydensity, self.__density_threshold, spacing = self.__gridspacing)
# do not use spacing=self.__gridspacing, let marching cubes calculate verticies in 'box' units (0,Nx)
verts, faces, normals, values = measure.marching_cubes_lewiner(mydensity, self.__density_threshold)
# convert 'box' units to 'coords' units (this is key for non-orthorhombic cells)
for i,v in enumerate(verts):
verts[i] = np.array((np.mat(self.__hvoxel).T * np.mat(v).T).T)
n = normals[i]
normals[i] = np.array((np.mat(self.__hvoxel).T * np.mat(n).T).T)
return verts, faces, normals, values, alldensity
else:
raise ValueError("Meshing makes no sense in 1 dimension!")
def contour_perimeter(self,contour):
'''calculate perimeter of contour by suming up the line-segment lengths
'''
assert (np.all(contour[0] == contour[-1])), "Contour must be closed! (1st point == last point)"
#TODO vectorize this for loop
p = 0.0
n=contour.shape[0]
for i in range(n-1):
v = contour[i+1] - contour[i]
p += np.sqrt(np.square(v).sum())
return p
def contour_area(self,contour):
''' Calculate area of shape enclosed in contour
similar to calculating mesh volume
use trick from http://geomalgorithms.com/a01-_area.html
'''
assert (np.all(contour[0] == contour[-1])), "Contour must be closed! (1st point == last point)"
#TODO vectorize this for loop
area = 0.0
n=contour.shape[0]
for i in range(n-1):
area += np.cross(contour[i],contour[i+1])
return 0.5*np.abs(area)
def mesh_volume(self, verts, faces):
'''calculate volume of a mesh, using cross product trick
'''
actual_verts = verts[faces]
v0 = actual_verts[:,0,:]
v1 = actual_verts[:,1,:]
v2 = actual_verts[:,2,:]
# TODO: dont do the volume rescaling here, instead change the actual position of "verts" in getDomainStats my scaling each vert position by h (or something along these lines)
# introduce factor to scale the volume if non-orthorhombic box
# this is because the mesh is generated assuming a
if self.__orthorhombic:
factor=1.0
else:
factor = self.__volvoxel / np.prod(self.__gridspacing)
# 1/6 \sum v0 \cdot (v1 x v2)
return factor * 1.0/6.0 * np.abs( (v0*np.cross(v1,v2)).sum(axis=1).sum() )
def voxel_volume(self,idomain):
''' Get volume of idomain using voxels
'''
#v_voxel = np.prod(self.__gridspacing) # volume of single voxel
v_voxel = self.__volvoxel
n_voxel = np.sum(self.__regionID == idomain) # number of voxels in ith domain
return v_voxel*n_voxel
def writeContours(self, contours,filename):
''' write contours to data files
The format is built for using the gnuplot command "plot 'file' index 0 u 1:2"
Each individual contor is plotted in two x,y columns
Each contour is separated by two new lines (see gnuplot "index" for explanation)
'''
with open(filename,'wb') as f:
f.write(b"# NContours = %d\n" % len(contours))
for contour in contours:
#np.savetxt(f,contour,footer='\n',comments='')
np.savetxt(f,contour)
f.write(b"\n\n")
def plotContours2D(self, contours, surface=None, filename=None):
''' Plot a mesh from marching squares
'''
import matplotlib.pyplot as plt
# Display the image and plot all contours found
fig, ax = plt.subplots()
ax.set_aspect(1)
if surface is not None:
x = np.arange(self.__Nx[0])
y = np.arange(self.__Nx[1])
xx,yy = np.meshgrid(x,y)
# nice one-liner to rotate all of xx and yy using hvoxel
xxrot,yyrot = np.einsum('ji, mni -> jmn', self.__hvoxel.T, np.dstack([xx, yy]))
# using pcolormesh allows us to use non-orthorhombic boxes
im=ax.pcolormesh(xxrot,yyrot,surface.T)
fig.colorbar(im,ax=ax)
# imshow only worked for orthorhombic boxes
#ax.imshow(surface.T, interpolation='nearest')
for n, contour in enumerate(contours):
ax.plot(contour[:, 0], contour[:, 1], linewidth=2, color='k',ls='--',marker='o')
#ax.axis('image')
#ax.set_xticks([])
#ax.set_yticks([])
if not filename:
plt.show()
else:
|
plt.show()
plt.savefig(filename)
|
conditional_block
|
|
domaintools.py
|
volume = np.zeros(self.__ndomains)
IQ = np.zeros(self.__ndomains)
#for each domain
for idomain in range(0,self.__ndomains):
# calc center of domain
com[idomain,:] = self.calcDomainCOM(idomain+1,units='coord')
if useMesh:
if self.__ndim == 2:
# mesh domain
contours,density_centered = self.meshSingleDomain(idomain+1,wrap_before_mesh=applyPBC)
assert (len(contours) == 1), "The contour should only be one curve, if not the area and volume calculations will be completely wrong!"
# get surface area (perimeter) and volume (area)
surface_area[idomain] = self.contour_perimeter(contours[0])
volume[idomain] = self.contour_area(contours[0])
if plotMesh:
# draw surface behind the mesh
self.plotContours2D(contours,filename="mesh.{}.png".format(idomain+1),surface=density_centered)
# dont draw surface behind the mesh
#self.plotContours2D(contours,filename="mesh.{}.png".format(idomain+1))
if self.__ndim == 3:
# mesh domain
verts, faces, normals, values = self.meshSingleDomain(idomain+1,wrap_before_mesh=applyPBC)
# get surface area, volume and isoperimetric quotient
surface_area[idomain] = measure.mesh_surface_area(verts, faces)
volume[idomain] = self.mesh_volume(verts,faces)
if plotMesh:
self.plotMesh3D(verts,faces, filename="mesh.{}.png".format(idomain+1))
if outputMesh:
self.writeMesh(verts,faces,fileprefix="mesh.{}.".format(idomain+1))
IQ[idomain] = self.calcIQ(surface_area[idomain], volume[idomain])
else:
surface_area[idomain] = -1.0 #FIXME surface_area is currently not calculated if no mesh
volume[idomain] = self.voxel_volume(idomain+1) # get volume from voxels
IQ[idomain] = 0.0
if add_periodic_domains:
for idomain in range(1,self.__ndomains+1):
extracom = self.pbc_domain_locs(idomain,com[idomain-1])
if extracom:
com = np.concatenate((com,extracom))
extra_num = len(extracom)
IQ = np.concatenate((IQ,[IQ[idomain-1]]*extra_num))
surface_area = np.concatenate((surface_area,[surface_area[idomain-1]]*extra_num))
volume = np.concatenate((volume,[volume[idomain-1]]*extra_num))
return self.__ndomains, com, surface_area, volume, IQ
def
|
(self, area, vol):
'''returns isoperimetric coefficient. 1 for perfect circle or sphere, less for other shapes
note that in 2d "area" is actually perimeter, and "vol" is actually area
This difference didn't seem to warrant a completely different method though
'''
if self.__ndim == 2:
return 4.0*np.pi*vol / (area * area)
elif self.__ndim == 3:
return 36.0*np.pi * vol*vol / (area * area * area)
def meshAllDomains(self,datafile=None,plotfile=None):
''' Mesh all domains using marching cubes or marching squares
Options:
- Save plot of mesh to plotfile if specified
- save mesh data to file if specified
'''
if self.__ndim == 2:
mydensity = self.__fields[:,:, self.__density_field_index]
# calculate contours in 'box' units
contours = measure.find_contours(mydensity, self.__density_threshold)
# convert 'box' units to 'coords' units (this is key for non-orthorhombic cells)
for i,c in enumerate(contours):
contours[i] = np.array((np.mat(self.__hvoxel).T * np.mat(c).T).T)
# this is old, only works for orthorhombic cells
# need to scale contours to be in terms of 'coords' dimensions
#for c in contours:
# c /= self.__Nx
# c *= self.__boxl
if datafile:
self.writeContours(contours,datafile)
if plotfile:
self.plotContours2D(contours,surface=mydensity,filename=plotfile)
return contours
elif self.__ndim == 3:
mydensity = self.__fields[:,:,:, self.__density_field_index]
#verts, faces, normals, values = measure.marching_cubes_lewiner(mydensity, self.__density_threshold, spacing = self.__gridspacing)
# do not use spacing=self.__gridspacing, let marching cubes calculate verticies in 'box' units (0,Nx)
verts, faces, normals, values = measure.marching_cubes_lewiner(mydensity, self.__density_threshold)
# convert 'box' units to 'coords' units (this is key for non-orthorhombic cells)
for i,v in enumerate(verts):
verts[i] = np.array((np.mat(self.__hvoxel).T * np.mat(v).T).T)
n = normals[i]
normals[i] = np.array((np.mat(self.__hvoxel).T * np.mat(n).T).T)
print('Warning: Rotating verts and normals from "box" units to "coords" units is untested! Check this before proceeding!')
pdb.set_trace()
if datafile:
raise NotImplementedError("Support for writing 3D mesh not implemented")
if plotfile:
self.plotMesh3D(verts,faces,filename=plotfile)
return verts,faces, normals, values
def meshSingleDomain(self,idomain, wrap_before_mesh=True):
'''
Function to:
1) apply PBC to the domains so that an entire domain is continuous (ie not split across boundaries)
2) Grab a little margin around each domain (the domain's "border") so that marching cubes can interpolate. The border is computed in identifyAndIndexDomains().
3) Mesh the domain using marching cubes
'''
isdomain = (self.__regionID == idomain)
#isborder = (self.__borderID == idomain)
isborder = np.zeros(self.__Nx,dtype=np.bool)
# convert to tuple to correctly set indicies of isborder
isborder[tuple(self.__regionBorder[idomain-1])] = True
if self.__ndim == 2:
alldensity = self.__fields[:,:, self.__density_field_index]
elif self.__ndim == 3:
alldensity = self.__fields[:,:,:, self.__density_field_index]
# center box and properties around center of mass (so that domains don't cross pbc)
# np.roll is the key function here
# if domains percolate then this will break
com_box = self.calcDomainCOM(idomain,units='box')
com_coord = self.calcDomainCOM(idomain,units='coord')
#coords_tmp = np.copy(self.__coords)
for i in range(self.__ndim):
shift = int(0.5*self.__Nx[i] - com_box[i])
isdomain = np.roll(isdomain,shift,axis=i)
isborder = np.roll(isborder,shift,axis=i)
#coords_tmp = np.roll(coords_tmp,shift,axis=i)
alldensity = np.roll(alldensity,shift,axis=i)
# isolate the domain of interest
isdomain_or_isborder = isdomain + isborder # since both bool, sum is the union of the two fields
mydensity = np.zeros(self.__Nx)
mydensity[isdomain_or_isborder] = alldensity[isdomain_or_isborder]
#
#tmp =mydensity[:,:,:,np.newaxis]
#viz.writeVTK('test.vtk',self.__coords,tmp)
# plot for debugging
# import sys
# sys.path.append('/home/trent/Documents/college/polymers/ResearchTools/plot/')
# sys.path.append('../../')
# import PolyFTS_to_VTK
# AllCoords = np.reshape(coords_tmp,(self.__M, self.__ndim))
# AllCoords = AllCoords.T
# tmp = np.ravel(isdomain)
# tmp = np.resize(tmp,(1,len(tmp)))
# PolyFTS_to_VTK.writeVTK("isdomain.vtk", self.__Nx, True, self.__M, AllCoords,tmp)
# tmp = np.ravel(isborder)
# tmp = np.resize(tmp,(1,len(tmp)))
# PolyFTS_to_VTK.writeVTK("isborder.vtk", self.__Nx, True, self.__M, AllCoords,tmp)
# tmp = np.ravel(mydensity)
# tmp = np.resize(tmp,(1,len(tmp)))
# PolyFTS_to_VTK.writeVTK("mydensity.vtk", self.__Nx, True, self.__M, AllCoords,tmp)
# mesh! (using scikit-image)
if self.__ndim ==
|
calcIQ
|
identifier_name
|
domaintools.py
|
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces])
mesh.set_edgecolor('k')
ax.add_collection3d(mesh)
ax.set_xlim(0, self.__boxl[0])
ax.set_ylim(0, self.__boxl[1])
ax.set_zlim(0, self.__boxl[2])
plt.tight_layout()
if not filename:
plt.show()
else:
plt.savefig(filename)
plt.close()
def writeMesh(self,verts,faces,fileprefix="mesh."):
'''save mesh to a file'''
np.savetxt(fileprefix + "verts.dat",verts,header='Autogenerated mesh file. Contains x y z positions of each vertex' )
np.savetxt(fileprefix + "faces.dat",faces, header='Autogenerated mesh file. Contains vertex indicies of each triangle in mesh')
def calcDomainCOM(self,idomain, units='box'):
''' given a domain index, apply PBC and return the center of mass
Can return result in 'box' units (0 to Nx) or in 'coord' units (0 to boxl)
'''
isdomain = (self.__regionID == idomain)
N = np.sum(isdomain)
indicies = np.transpose(np.nonzero(isdomain))
coords = np.zeros((N,self.__ndim))
#TODO could I do this without for loop? (will be faster)
for i in range(N):
index = tuple(indicies[i])
if units == "box":
coord = index + self.__image_flags[index] * self.__Nx
elif units == "coord":
coord = self.__coords[index] + self.__image_flags[index] * self.__boxl
else:
raise ValueError("Invalid units entry of \'%s\'" % units)
coords[i] = coord
# now average in order to get center of the domain (each point weighted evenly)
return np.average(coords,axis=0)
def identifyAndIndexDomains(self, isdomain_array):
''' This function populates the regionID member variable
if regionID == 0, it is the continuous domain
points with regionID == i, correspond to the ith domain
Also sets - image_flags (which PBC a domain belongs to) and
- isborder (whether a grid is adjacent to a domain)
'''
# if regionID == -1, it has not been visited
self.__regionID = np.full(self.__Nx,-1,dtype=np.int32);
# image_flags are only for the domains themselves, the image flags of the border are not needed
self.__image_flags = np.zeros(list(self.__Nx) + [self.__ndim])
###self.__borderID = np.full(self.__Nx,0,dtype=np.int32);
self.__regionBorder = [[]]
region_number = 1;
#this is where the recursive magic happens
for i in np.ndindex(self.__Nx):
if (self.__regionID[i] == -1):
if (isdomain_array[i]):
current_image_flag = np.zeros(self.__ndim)
self.spread_region(i, region_number, isdomain_array,current_image_flag);
self.__regionBorder.append([])
region_number += 1;
else:
# note - dont assign borders here, this is acomplished inside of spread_region()
self.__regionID[i] = 0;
self.__image_flags[i]= np.zeros(self.__ndim)
# now cleaning up
nregions = region_number-1;
# remove last element from lists (should be empty)
assert (self.__regionBorder[-1] == [])
del self.__regionBorder[-1]
# check that lengths of region structs are correct
assert (len(self.__regionBorder) == nregions)
# convert border and imageflag lists to numpy arrays
for i in range(nregions):
self.__regionBorder[i] = np.array(self.__regionBorder[i]).transpose()
# change caching flag
self.__needToIndexDomains = False
return nregions
def spread_region(self, coord_center, region_number, isdomain_array,current_image_flag):
''' recursive function:
given a point, find the neighbors of that point,
for each neighbor, send back into function
'''
self.__regionID[coord_center] = region_number;
self.__image_flags[coord_center] = current_image_flag
neighbors,neigh_image_flags = self.getNeighbors(coord_center, current_image_flag);
for i in range(len(neighbors)):
neighbor = neighbors[i]
image_flag = tuple(neigh_image_flags[i])
if (self.__regionID[neighbor] == -1):
if (isdomain_array[neighbor]):
self.spread_region(neighbor, region_number, isdomain_array, image_flag);
else:
self.__regionID[neighbor] = 0;
if self.__regionID[neighbor] == 0:
# only append to list if neighbor isn't in there already
if neighbor not in self.__regionBorder[region_number-1]:
# must have neighbors that are domain (since spread region is only called
# if coord_center is a domain). Therefore, it's a border
self.__regionBorder[region_number-1].append(neighbor)
# set image flags of non-domain adjacent to domain according to the domain
# basically, I need the border to have the correct image flags
# NOTE: image flags of borders aren't used anymore
#self.__regionBorderImageFlags[region_number-1].append(image_flag)
def getNeighbors(self,coord_center,center_image_flag=[]):
''' given a coord (tuple), return
1) the neighbors of that coord (also tuple) AND
2) the image_flag (which PBC) that neighbor corresponds to
'''
# set default
if center_image_flag == []:
center_image_flag = np.zeros(self.__ndim)
neighbors = [];
neigh_image_flags = np.tile(center_image_flag, (2*self.__ndim,1))
for i in range(self.__ndim):
coord_neigh = np.copy(coord_center)
coord_neigh[i] -= 1;
self.applyPBC(coord_neigh, neigh_image_flags[2*i]);
neighbors.append(tuple(coord_neigh))
coord_neigh = np.copy(coord_center)
coord_neigh[i] += 1
self.applyPBC(coord_neigh,neigh_image_flags[2*i+1])
neighbors.append(tuple(coord_neigh))
return neighbors, neigh_image_flags
def applyPBC(self,coord,image_flag):
for i in range(self.__ndim):
if coord[i] >= self.__Nx[i]:
coord[i] = 0
image_flag[i] += 1
if coord[i] < 0:
coord[i] = self.__Nx[i] - 1
image_flag[i] -= 1
def pbc_domain_locs(self,idomain,local_com):
'''This function returns the locations of the other domains on the periodic boundary.
for example for a domain with its center on the corner of the box, it would return all
the other box corners'''
extra_com = []
domain = (self.__regionID == idomain)
local_flags = self.__image_flags[domain]
unique_flags = set([])
for i in range(np.shape(local_flags)[0]):
unique_flags.add(tuple(local_flags[i]))
unique_flags.remove((0,0,0))#remove duplicate com
for flag in unique_flags:
flag = np.array(flag)
new_com = -1*flag*self.__boxl+local_com
#find the location of the extra periodic com by adding the box length times the flag to the current com
extra_com.append(new_com)
num_extra = len(extra_com)
return extra_com
class DomainTracker:
def __init__(self, boxl, vol_threshold=0.2):
self.__boxl = boxl # stores max box position, (lower corner is at 0,0,0)
self.__boxh = 0.5*boxl
self.__ndim = len(boxl)
self.__vol_threshold = vol_threshold # volume threshold below which to ignore domains, percentage
self.__is_init_pos = False
#self.__msd = # stores average squared displacement (averaged over all micelles)
def setInitialPositions(self,ndomains, com):
''' Set initial positions of domains
'''
self.__ndomains = ndomains
self.__pos0 = np.copy(com) # initial position of each domain
self.__pos_prev = np.copy(com)
self.__imageflags = np.zeros((self.__ndomains,self.__ndim)) # which PBC image is the domain in (so that MSD can exceed the size of box)
self.__sqdisp = np.zeros(self.__ndomains) # stores squared displacement of each micelle
self.__is_init_pos = True
def getMSD(self):
|
''' Returns mean squared displacement (averaged over all micelles)
'''
assert(self.__is_init_pos)
return np.average(self.__sqdisp)
|
identifier_body
|
|
BackendMapping.ts
|
true,
draggable : true
};
// チームに関するデータ
private teamSetting:any = [];
private teamData:any = [];
private teamMarker:any = [];
private teamRoot:any = [];
// テンプレート
private template = {
filterTeam: Cmn.dir + '/mus/filterTeam.mustache',
fukudashi : Cmn.dir + '/mus/fukidashi.mustache',
list: Cmn.dir + '/mus/backendList.mustache'
};
private category:any;
private resultList:any;
// ターゲット
private target = {
table: '#resultListBody',
result: '.resultList'
};
constructor() {
var _t = this,
div = $('#resultMap').get(0);
_t.areaMap = new google.maps.Map(div, _t.mapOption);
// カテゴリーデータの取得
$.ajax({
url: Cmn.dir + '/data/category.json',
dataType: 'json',
type: 'GET',
async: false,
success: (data)=>{
this.category = data;
}
});
this.markerBounds = new google.maps.LatLngBounds();
_t.getEntryData();
// 画像クリック
$(document).on('click', '.jsClick-ModalImage', function(e){
e.preventDefault();
var file = $(this).data('image'),
w = Frame.getBrowserWidth(),
h = Frame.getBrowserHeight(),
tag = '<img src="' + Cmn.dir + '/uploads/' + file + '" alt="">';
// TODO: ブラウザサイズに応じて画像表示領域を変更
$('#modal-contents').html(tag);
$('#modal').removeClass('hide');
});
// モーダルクローズ
$('#modal-bg').on('click', ()=>{
$('#modal').addClass('hide');
$('#modal-contents').html('');
});
// チームフィルタリング
$(document).on('change', '#filter-team', function(){
_t.resultList.removeClass('hide');
var selected = $(this).val();
if(selected === '0'){
// すべてを表示
_t.viewTeam = '';
for (var i = 0, iLen = _t.marker.length; i < iLen; i++) {
var obj = _t.marker[i];
obj.setVisible(true);
}
}else{
_t.viewTeam = selected;
// 一致しないチームリストを非表示にする
_t.resultList.filter(function(index){
var self = $(this),
team = self.data('team');
if(team !== selected) self.addClass('hide');
});
// 一致しないチームのピンを非表示にする
for (var i = 0, iLen = _t.marker.length; i < iLen; i++) {
var obj = _t.marker[i],
team = obj.team;
if(team === selected){
obj.setVisible(true);
}else{
obj.setVisible(false);
}
}
}
});
// 各チームのトータル件数表示ボタン
$('#entryTotal-btn').on('click', (e)=>{
e.preventDefault();
$('#entryTotal-data').toggleClass('show');
});
$(document).on('click', '.jsClick-Move', function(e){
var self = $(this),
lat = self.data('lat'),
lng = self.data('lng'),
pos = new google.maps.LatLng(lat, lng);
_t.areaMap.setCenter(pos);
_t.areaMap.setZoom(18);
});
// 1分ごとにデータを取得(1000ms * 60s = 60000ms
setInterval(()=> {
_t.getEntryData();
}, 60000);
// 30秒毎に時間を更新(1000ms * 10s = 10000ms
setInterval(()=>{
if(this.isData === false) return false;
for (var i = 0, iLen = this.resultList.length; i < iLen; i++) {
var obj = $(this.resultList[i]),
created = obj.find('.created'),
__c = created.data('created'),
_c = this.getTimeDiff(__c);
created.text(_c);
}
}, 10000);
// 5分毎に削除データの取得(1000ms * 60s * 5min = 300000ms
setInterval(()=>{
$.ajax({
url: Cmn.dir + '/deleteData',
dataType: 'json',
type: 'GET',
success: (data)=>{
if(data.status === 'success'){
_t.resultList.filter(function(index){
var self = $(this),
data_id = String(self.data('id')),
isRemove = data.list.indexOf(data_id);
if(isRemove > -1){
// 削除データが存在する場合
self.remove();
_t.resultList.splice(index, 1);
_t.marker[index].setMap(null);
}
});
}
},
error: ()=>{},
complete: ()=>{}
});
}, 300000); //300000
// 各チームの登録件数
setInterval(()=>{
// 一度トータル件数を削除
for (var i = 0, iLen = this.teamSetting.length; i < iLen; i++) {
var obj = this.teamSetting[i];
obj.total = 0;
}
// 件数を計算
for (var n = 0, nLen = this.teamData.length; n < nLen; n++) {
var obj1 = this.teamData[n],
isArr = this.teamSetting.filter((elem, index, arr)=>{
return (elem.name === obj1.team)
});
isArr[0].total++;
}
// トータル件数をセット
var elem = '';
for (var j = 0, jLen = this.teamSetting.length; j < jLen; j++) {
var obj2 = this.teamSetting[j];
elem += '<li>' + obj2.name + ' : ' + obj2.total + '件</li>';
}
$('#entryTotal-data').html(elem);
}, 10000);
}
/**
* 登録されたデータをオフセットで取得
*/
getEntryData() {
$.ajax({
url : Cmn.dir + '/allData?offset=' + this.offset,
dataType: 'json',
type : 'GET',
success : (data)=> {
switch (data.status) {
case 'success':
this.isData = true;
this.offset = data.count;
this.setTeamList(data.data);
break;
default:
break;
}
},
error : ()=> {
},
complete: ()=> {
}
|
/**
* チームデータの格納
* @param data
*/
setTeamList(data) {
var elem = '',
count = data.length;
$.get(this.template.filterTeam, (template)=> {
for (var i = 0, iLen = data.length; i < iLen; i++) {
var obj = data[i],
data_id = 0,
isArr = this.teamSetting.filter((elem, index, arr)=> { // 配列内にデータが存在するかチェック
return (elem.name === obj.team)
});
// 該当するチームデータが存在しない場合はチーム情報をセット
if (isArr.length === 0) {
var len = this.teamSetting.length,
//color_code = Math.floor(Math.random() * 16777215).toString(16),
color_code = this.getRandomColor(this.teamSetting),
team = {
name : obj.team,
color: color_code,
id : len,
total: 0
};
this.teamSetting.push(team);
data_id = len;
// <select>タグに挿入
elem += Mustache.render(template, obj);
} else {
data_id = isArr[0].id;
}
obj.team_id = data_id;
this.teamData.push(obj);
}
$('#filter-team').append(elem);
this.setMarker(count);
this.setList(count);
});
}
/**
* 登録データの表示
* @param count
*/
setList(count){
var diff = this.teamData.length - count; // 追加データの個数を返却
$.get(this.template.list, (template)=>{
var elem = '';
for (var i = this.teamData.length - 1; i >= diff; i--) {
var obj = this.teamData[i],
visible = (this.viewTeam === obj.team || this.viewTeam === '') ? '' : ' hide';
obj.color = this.teamSetting[obj.team_id].color;
obj.category = this.category[obj.cat];
obj.cls = visible;
obj.created = this.getTimeDiff(obj.created_at);
elem += Mustache.render(template, obj);
}
$(this.target.table).prepend(elem);
this.resultList =
|
});
}
|
identifier_name
|
BackendMapping.ts
|
true,
draggable : true
};
// チームに関するデータ
private teamSetting:any = [];
private teamData:any = [];
private teamMarker:any = [];
private teamRoot:any = [];
// テンプレート
private template = {
filterTeam: Cmn.dir + '/mus/filterTeam.mustache',
fukudashi : Cmn.dir + '/mus/fukidashi.mustache',
list: Cmn.dir + '/mus/backendList.mustache'
};
private category:any;
private resultList:any;
// ターゲット
private target = {
table: '#resultListBody',
result: '.resultList'
};
constructor() {
var _t = this,
div = $('#resultMap').get(0);
_t.areaMap = new google.maps.Map(div, _t.mapOption);
// カテゴリーデータの取得
$.ajax({
url: Cmn.dir + '/data/category.json',
dataType: 'json',
type: 'GET',
async: false,
success: (data)=>{
this.category = data;
}
});
this.markerBounds = new google.maps.LatLngBounds();
_t.getEntryData();
// 画像クリック
$(document).on('click', '.jsClick-ModalImage', function(e){
e.preventDefault();
var file = $(this).data('image'),
w = Frame.getBrowserWidth(),
h = Frame.getBrowserHeight(),
tag = '<img src="' + Cmn.dir + '/uploads/' + file + '" alt="">';
// TODO: ブラウザサイズに応じて画像表示領域を変更
$('#modal-contents').html(tag);
$('#modal').removeClass('hide');
});
// モーダルクローズ
$('#modal-bg').on('click', ()=>{
$('#modal').addClass('hide');
$('#modal-contents').html('');
});
// チームフィルタリング
$(document).on('change', '#filter-team', function(){
_t.resultList.removeClass('hide');
var selected = $(this).val();
if(selected === '0'){
// すべてを表示
_t.viewTeam = '';
for (var i = 0, iLen = _t.marker.length; i < iLen; i++) {
var obj = _t.marker[i];
obj.setVisible(true);
}
}else{
_t.viewTeam = selected;
// 一致しないチームリストを非表示にする
_t.resultList.filter(function(index){
var self = $(this),
team = self.data('team');
if(team !== selected) self.addClass('hide');
});
// 一致しないチームのピンを非表示にする
for (var i = 0, iLen = _t.marker.length; i < iLen; i++) {
var obj = _t.marker[i],
team = obj.team;
if(team === selected){
obj.setVisible(true);
}else{
obj.setVisible(false);
}
}
}
});
// 各チームのトータル件数表示ボタン
$('#entryTotal-btn').on('click', (e)=>{
e.preventDefault();
$('#entryTotal-data').toggleClass('show');
});
$(document).on('click', '.jsClick-Move', function(e){
var self = $(this),
lat = self.data('lat'),
lng = self.data('lng'),
pos = new google.maps.LatLng(lat, lng);
_t.areaMap.setCenter(pos);
_t.areaMap.setZoom(18);
});
// 1分ごとにデータを取得(1000ms * 60s = 60000ms
setInterval(()=> {
_t.getEntryData();
}, 60000);
// 30秒毎に時間を更新(1000ms * 10s = 10000ms
setInterval(()=>{
if(this.isData === false) return false;
for (var i = 0, iLen = this.resultList.length; i < iLen; i++) {
var obj = $(this.resultList[i]),
created = obj.find('.created'),
__c = created.data('created'),
_c = this.getTimeDiff(__c);
created.text(_c);
}
}, 10000);
// 5分毎に削除データの取得(1000ms * 60s * 5min = 300000ms
setInterval(()=>{
$.ajax({
url: Cmn.dir + '/deleteData',
dataType: 'json',
type: 'GET',
success: (data)=>{
if(data.status === 'success'){
_t.resultList.filter(function(index){
var self = $(this),
data_id = String(self.data('id')),
isRemove = data.list.indexOf(data_id);
if(isRemove > -1){
// 削除データが存在する場合
self.remove();
_t.resultList.splice(index, 1);
_t.marker[index].setMap(null);
}
});
}
},
error: ()=>{},
complete: ()=>{}
});
}, 300000); //300000
// 各チームの登録件数
setInterval(()=>{
// 一度トータル件数を削除
for (var i = 0, iLen = this.teamSetting.length; i < iLen; i++) {
var obj = this.teamSetting[i];
obj.total = 0;
}
// 件数を計算
for (var n = 0, nLen = this.teamData.length; n < nLen; n++) {
var obj1 = this.teamData[n],
isArr = this.teamSetting.filter((elem, index, arr)=>{
return (elem.name === obj1.team)
});
isArr[0].total++;
}
// トータル件数をセット
var elem = '';
for (var j = 0, jLen = this.teamSetting.length; j < jLen; j++) {
var obj2 = this.teamSetting[j];
elem += '<li>' + obj2.name + ' : ' + obj2.total + '件</li>';
}
$('#entryTotal-data').html(elem);
}, 10000);
}
/**
* 登録されたデータをオフセットで取得
*/
getEntryData() {
$.ajax({
url : Cmn.dir + '/allData?offset=' + this.offset,
dataType: 'json',
type : 'GET',
success : (data)=> {
switch (data.status) {
case 'success':
this.isData = true;
this.offset = data.count;
this.setTeamList(data.data);
break;
default:
break;
}
},
error : ()=> {
},
complete: ()=> {
}
});
}
/**
* チームデータの格納
* @param data
*/
setTeamList(data) {
var elem = '',
count = data.length;
$.get(this.template.filterTeam, (template)=> {
for (var i = 0, iLen = data.length; i < iLen; i++) {
var obj = data[i],
data_id = 0,
isArr = this.teamSetting.filter((elem, index, arr)=> { // 配列内にデータが存在するかチェック
return (elem.name === obj.team)
});
// 該当するチームデータが存在しない場合はチーム情報をセット
if (isArr.length === 0) {
var len = this.teamSetting.length,
//color_code = Math.floor(Math.random() * 16777215).toString(16),
color_code = this.getRandomColor(this.teamSetting),
team = {
name : obj.team,
color: color_code,
id : len,
total: 0
};
this.teamSetting.push(team);
data_id = len;
// <select>タグに挿入
elem += Mustache.render(template, obj);
} else {
data_id = isArr[0].id;
}
obj.team_id = data_id;
|
visible = (this.viewTeam === obj.team || this.viewTeam ==
= '') ? '' : ' hide';
obj.color = this.teamSetting[obj.team_id].color;
obj.category = this.category[obj.cat];
obj.cls = visible;
obj.created = this.getTimeDiff(obj.created_at);
elem += Mustache.render(template, obj);
}
$(this.target.table).prepend(elem);
this.resultList
|
this.teamData.push(obj);
}
$('#filter-team').append(elem);
this.setMarker(count);
this.setList(count);
});
}
/**
* 登録データの表示
* @param count
*/
setList(count){
var diff = this.teamData.length - count; // 追加データの個数を返却
$.get(this.template.list, (template)=>{
var elem = '';
for (var i = this.teamData.length - 1; i >= diff; i--) {
var obj = this.teamData[i],
|
conditional_block
|
BackendMapping.ts
|
true,
draggable : true
};
// チームに関するデータ
private teamSetting:any = [];
private teamData:any = [];
private teamMarker:any = [];
private teamRoot:any = [];
// テンプレート
private template = {
filterTeam: Cmn.dir + '/mus/filterTeam.mustache',
fukudashi : Cmn.dir + '/mus/fukidashi.mustache',
list: Cmn.dir + '/mus/backendList.mustache'
};
private category:any;
private resultList:any;
// ターゲット
private target = {
table: '#resultListBody',
result: '.resultList'
};
constructor() {
var _t = this,
div = $('#resultMap').get(0);
_t.areaMap = new google.maps.Map(div, _t.mapOption);
// カテゴリーデータの取得
$.ajax({
url: Cmn.dir + '/data/category.json',
dataType: 'json',
type: 'GET',
async: false,
success: (data)=>{
this.category = data;
}
});
this.markerBounds = new google.maps.LatLngBounds();
_t.getEntryData();
// 画像クリック
$(document).on('click', '.jsClick-ModalImage', function(e){
e.preventDefault();
var file = $(this).data('image'),
w = Frame.getBrowserWidth(),
h = Frame.getBrowserHeight(),
tag = '<img src="' + Cmn.dir + '/uploads/' + file + '" alt="">';
// TODO: ブラウザサイズに応じて画像表示領域を変更
$('#modal-contents').html(tag);
$('#modal').removeClass('hide');
});
// モーダルクローズ
$('#modal-bg').on('click', ()=>{
$('#modal').addClass('hide');
$('#modal-contents').html('');
});
// チームフィルタリング
$(document).on('change', '#filter-team', function(){
_t.resultList.removeClass('hide');
var selected = $(this).val();
if(selected === '0'){
// すべてを表示
_t.viewTeam = '';
for (var i = 0, iLen = _t.marker.length; i < iLen; i++) {
var obj = _t.marker[i];
obj.setVisible(true);
}
}else{
_t.viewTeam = selected;
// 一致しないチームリストを非表示にする
_t.resultList.filter(function(index){
var self = $(this),
team = self.data('team');
if(team !== selected) self.addClass('hide');
});
// 一致しないチームのピンを非表示にする
for (var i = 0, iLen = _t.marker.length; i < iLen; i++) {
var obj = _t.marker[i],
team = obj.team;
if(team === selected){
obj.setVisible(true);
}else{
obj.setVisible(false);
}
}
}
});
// 各チームのトータル件数表示ボタン
$('#entryTotal-btn').on('click', (e)=>{
e.preventDefault();
$('#entryTotal-data').toggleClass('show');
});
$(document).on('click', '.jsClick-Move', function(e){
var self = $(this),
lat = self.data('lat'),
lng = self.data('lng'),
pos = new google.maps.LatLng(lat, lng);
_t.areaMap.setCenter(pos);
_t.areaMap.setZoom(18);
});
// 1分ごとにデータを取得(1000ms * 60s = 60000ms
setInterval(()=> {
_t.getEntryData();
}, 60000);
// 30秒毎に時間を更新(1000ms * 10s = 10000ms
setInterval(()=>{
if(this.isData === false) return false;
for (var i = 0, iLen = this.resultList.length; i < iLen; i++) {
var obj = $(this.resultList[i]),
created = obj.find('.created'),
__c = created.data('created'),
_c = this.getTimeDiff(__c);
created.text(_c);
}
}, 10000);
// 5分毎に削除データの取得(1000ms * 60s * 5min = 300000ms
setInterval(()=>{
$.ajax({
url: Cmn.dir + '/deleteData',
dataType: 'json',
type: 'GET',
success: (data)=>{
if(data.status === 'success'){
_t.resultList.filter(function(index){
var self = $(this),
data_id = String(self.data('id')),
isRemove = data.list.indexOf(data_id);
if(isRemove > -1){
// 削除データが存在する場合
self.remove();
_t.resultList.splice(index, 1);
_t.marker[index].setMap(null);
}
});
}
},
error: ()=>{},
complete: ()=>{}
});
}, 300000); //300000
// 各チームの登録件数
setInterval(()=>{
// 一度トータル件数を削除
for (var i = 0, iLen = this.teamSetting.length; i < iLen; i++) {
var obj = this.teamSetting[i];
obj.total = 0;
}
// 件数を計算
for (var n = 0, nLen = this.teamData.length; n < nLen; n++) {
var obj1 = this.teamData[n],
isArr = this.teamSetting.filter((elem, index, arr)=>{
return (elem.name === obj1.team)
});
isArr[0].total++;
}
// トータル件数をセット
var elem = '';
for (var j = 0, jLen = this.teamSetting.length; j < jLen; j++) {
var obj2 = this.teamSetting[j];
elem += '<li>' + obj2.name + ' : ' + obj2.total + '件</li>';
}
$('#entryTotal-data').html(elem);
}, 10000);
}
/**
* 登録されたデータをオフセットで取得
*/
getEntryData() {
$.ajax({
url : Cmn.dir + '/allData?offset=' + this.offset,
dataType: 'json',
type : 'GET',
success : (data)=> {
switch (data.status) {
case 'success':
this.isData = true;
this.offset = data.count;
this.setTeamList(data.data);
break;
default:
break;
}
},
error : ()=> {
},
complete: ()=> {
|
}
/**
* チームデータの格納
* @param data
*/
setTeamList(data) {
var elem = '',
count = data.length;
$.get(this.template.filterTeam, (template)=> {
for (var i = 0, iLen = data.length; i < iLen; i++) {
var obj = data[i],
data_id = 0,
isArr = this.teamSetting.filter((elem, index, arr)=> { // 配列内にデータが存在するかチェック
return (elem.name === obj.team)
});
// 該当するチームデータが存在しない場合はチーム情報をセット
if (isArr.length === 0) {
var len = this.teamSetting.length,
//color_code = Math.floor(Math.random() * 16777215).toString(16),
color_code = this.getRandomColor(this.teamSetting),
team = {
name : obj.team,
color: color_code,
id : len,
total: 0
};
this.teamSetting.push(team);
data_id = len;
// <select>タグに挿入
elem += Mustache.render(template, obj);
} else {
data_id = isArr[0].id;
}
obj.team_id = data_id;
this.teamData.push(obj);
}
$('#filter-team').append(elem);
this.setMarker(count);
this.setList(count);
});
}
/**
* 登録データの表示
* @param count
*/
setList(count){
var diff = this.teamData.length - count; // 追加データの個数を返却
$.get(this.template.list, (template)=>{
var elem = '';
for (var i = this.teamData.length - 1; i >= diff; i--) {
var obj = this.teamData[i],
visible = (this.viewTeam === obj.team || this.viewTeam === '') ? '' : ' hide';
obj.color = this.teamSetting[obj.team_id].color;
obj.category = this.category[obj.cat];
obj.cls = visible;
obj.created = this.getTimeDiff(obj.created_at);
elem += Mustache.render(template, obj);
}
$(this.target.table).prepend(elem);
this.resultList = $(
|
}
});
|
random_line_split
|
BackendMapping.ts
|
true,
draggable : true
};
// チームに関するデータ
private teamSetting:any = [];
private teamData:any = [];
private teamMarker:any = [];
private teamRoot:any = [];
// テンプレート
private template = {
filterTeam: Cmn.dir + '/mus/filterTeam.mustache',
fukudashi : Cmn.dir + '/mus/fukidashi.mustache',
list: Cmn.dir + '/mus/backendList.mustache'
};
private category:any;
private resultList:any;
// ターゲット
private target = {
table: '#resultListBody',
result: '.resultList'
};
constructor() {
var _t = this,
div = $('#resultMap').get(0);
_t.areaMap = new google.maps.Map(div, _t.mapOption);
// カテゴリーデータの取得
$.ajax({
url: Cmn.dir + '/data/category.json',
dataType: 'json',
type: 'GET',
async: false,
success: (data)=>{
this.category = data;
}
});
this.markerBounds = new google.maps.LatLngBounds();
_t.getEntryData();
// 画像クリック
$(document).on('click', '.jsClick-ModalImage', function(e){
e.preventDefault();
var file = $(this).data('image'),
w = Frame.getBrowserWidth(),
h = Frame.getBrowserHeight(),
tag = '<img src="' + Cmn.dir + '/uploads/' + file + '" alt="">';
// TODO: ブラウザサイズに応じて画像表示領域を変更
$('#modal-contents').html(tag);
$('#modal').removeClass('hide');
});
// モーダルクローズ
$('#modal-bg').on('click', ()=>{
$('#modal').addClass('hide');
$('#modal-contents').html('');
});
// チームフィルタリング
$(document).on('change', '#filter-team', function(){
_t.resultList.removeClass('hide');
var selected = $(this).val();
if(selected === '0'){
// すべてを表示
_t.viewTeam = '';
for (var i = 0, iLen = _t.marker.length; i < iLen; i++) {
var obj = _t.marker[i];
obj.setVisible(true);
}
}else{
_t.viewTeam = selected;
// 一致しないチームリストを非表示にする
_t.resultList.filter(function(index){
var self = $(this),
team = self.data('team');
if(team !== selected) self.addClass('hide');
});
// 一致しないチームのピンを非表示にする
for (var i = 0, iLen = _t.marker.length; i < iLen; i++) {
var obj = _t.marker[i],
team = obj.team;
if(team === selected){
obj.setVisible(true);
}else{
obj.setVisible(false);
}
}
}
});
// 各チームのトータル件数表示ボタン
$('#entryTotal-btn').on('click', (e)=>{
e.preventDefault();
$('#entryTotal-data').toggleClass('show');
});
$(document).on('click', '.jsClick-Move', function(e){
var self = $(this),
lat = self.data('lat'),
lng = self.data('lng'),
pos = new google.maps.LatLng(lat, lng);
_t.areaMap.setCenter(pos);
_t.areaMap.setZoom(18);
});
// 1分ごとにデータを取得(1000ms * 60s = 60000ms
setInterval(()=> {
_t.getEntryData();
}, 60000);
// 30秒毎に時間を更新(1000ms * 10s = 10000ms
setInterval(()=>{
if(this.isData === false) return false;
for (var i = 0, iLen = this.resultList.length; i < iLen; i++) {
var obj = $(this.resultList[i]),
created = obj.find('.created'),
__c = created.data('created'),
_c = this.getTimeDiff(__c);
created.text(_c);
}
}, 10000);
// 5分毎に削除データの取得(1000ms * 60s * 5min = 300000ms
setInterval(()=>{
$.ajax({
url: Cmn.dir + '/deleteData',
dataType: 'json',
type: 'GET',
success: (data)=>{
if(data.status === 'success'){
_t.resultList.filter(function(index){
var self = $(this),
data_id = String(self.data('id')),
isRemove = data.list.indexOf(data_id);
if(isRemove > -1){
// 削除データが存在する場合
self.remove();
_t.resultList.splice(index, 1);
_t.marker[index].setMap(null);
}
});
}
},
error: ()=>{},
complete: ()=>{}
});
}, 300000); //300000
// 各チームの登録件数
setInterval(()=>{
// 一度トータル件数を削除
for (var i = 0, iLen = this.teamSetting.length; i < iLen; i++) {
var obj = this.teamSetting[i];
obj.total = 0;
}
// 件数を計算
for (var n = 0, nLen = this.teamData.length; n < nLen; n++) {
var obj1 = this.teamData[n],
isArr = this.teamSetting.filter((elem, index, arr)=>{
return (elem.name === obj1.team)
});
isArr[0].total++;
}
// トータル件数をセット
var elem = '';
for (var j = 0, jLen = this.teamSetting.length; j < jLen; j++) {
var obj2 = this.teamSetting[j];
elem += '<li>' + obj2.name + ' : ' + obj2.total + '件</li>';
}
$('#entryTotal-data').html(elem);
}, 10000);
}
/**
* 登録されたデータをオフセットで取得
*/
getEntryData() {
$.ajax({
url : Cmn.dir + '/allData?offset=' + this.offset,
dataType: 'json',
type : 'GET',
success : (data)=> {
switch (data.status) {
case 'success':
this.isData = true;
this.offset = data.count;
this.setTeamList(data.data);
break;
default:
break;
}
},
error : ()=> {
},
complete: ()=> {
}
});
}
/
|
f (isArr.length === 0) {
var len = this.teamSetting.length,
//color_code = Math.floor(Math.random() * 16777215).toString(16),
color_code = this.getRandomColor(this.teamSetting),
team = {
name : obj.team,
color: color_code,
id : len,
total: 0
};
this.teamSetting.push(team);
data_id = len;
// <select>タグに挿入
elem += Mustache.render(template, obj);
} else {
data_id = isArr[0].id;
}
obj.team_id = data_id;
this.teamData.push(obj);
}
$('#filter-team').append(elem);
this.setMarker(count);
this.setList(count);
});
}
/**
* 登録データの表示
* @param count
*/
setList(count){
var diff = this.teamData.length - count; // 追加データの個数を返却
$.get(this.template.list, (template)=>{
var elem = '';
for (var i = this.teamData.length - 1; i >= diff; i--) {
var obj = this.teamData[i],
visible = (this.viewTeam === obj.team || this.viewTeam === '') ? '' : ' hide';
obj.color = this.teamSetting[obj.team_id].color;
obj.category = this.category[obj.cat];
obj.cls = visible;
obj.created = this.getTimeDiff(obj.created_at);
elem += Mustache.render(template, obj);
}
$(this.target.table).prepend(elem);
this.resultList
|
**
* チームデータの格納
* @param data
*/
setTeamList(data) {
var elem = '',
count = data.length;
$.get(this.template.filterTeam, (template)=> {
for (var i = 0, iLen = data.length; i < iLen; i++) {
var obj = data[i],
data_id = 0,
isArr = this.teamSetting.filter((elem, index, arr)=> { // 配列内にデータが存在するかチェック
return (elem.name === obj.team)
});
// 該当するチームデータが存在しない場合はチーム情報をセット
i
|
identifier_body
|
download_params_and_roslaunch_agent.py
|
ore
import boto3
import yaml
import rospy
from markov import utils_parse_model_metadata
from markov.utils import force_list
from markov.constants import DEFAULT_COLOR
from markov.architecture.constants import Input
from markov.utils import get_boto_config
from markov.log_handler.constants import (SIMAPP_EVENT_ERROR_CODE_400, SIMAPP_EVENT_ERROR_CODE_500,
SIMAPP_SIMULATION_WORKER_EXCEPTION)
from markov.log_handler.logger import Logger
from markov.log_handler.exception_handler import log_and_exit
LOG = Logger(__name__, logging.INFO).get_logger()
# Pass a list with 2 values for CAR_COLOR, MODEL_S3_BUCKET, MODEL_S3_PREFIX, MODEL_METADATA_FILE_S3_KEY for multicar
CAR_COLOR_YAML_KEY = "CAR_COLOR"
RACE_TYPE_YAML_KEY = "RACE_TYPE"
HEAD_TO_MODEL_RACE_TYPE = "HEAD_TO_MODEL"
TIME_TRIAL_RACE_TYPE = "TIME_TRIAL"
MODEL_S3_BUCKET_YAML_KEY = "MODEL_S3_BUCKET"
MODEL_S3_PREFIX_YAML_KEY = "MODEL_S3_PREFIX"
MODEL_METADATA_FILE_S3_YAML_KEY = "MODEL_METADATA_FILE_S3_KEY"
# Amount of time to wait to guarantee that RoboMaker's network configuration is ready.
WAIT_FOR_ROBOMAKER_TIME = 10
def main():
""" Main function for downloading yaml params """
try:
# parse argument
s3_region = sys.argv[1]
s3_bucket = sys.argv[2]
s3_prefix = sys.argv[3]
s3_yaml_name = sys.argv[4]
launch_name = sys.argv[5]
# create boto3 session/client and download yaml/json file
session = boto3.session.Session()
s3_endpoint_url = os.environ.get("S3_ENDPOINT_URL", None)
if s3_endpoint_url is not None:
LOG.info('Endpoint URL {}'.format(s3_endpoint_url))
rospy.set_param('S3_ENDPOINT_URL', s3_endpoint_url)
else:
# create boto3 session/client and download yaml/json file
ec2_client = session.client('ec2', s3_region)
LOG.info('Checking internet connection...')
response = ec2_client.describe_vpcs()
if not response['Vpcs']:
log_and_exit("No VPC attached to instance", SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
LOG.info('Verified internet connection')
s3_client = session.client('s3', region_name=s3_region, endpoint_url=s3_endpoint_url, config=get_boto_config())
yaml_key = os.path.normpath(os.path.join(s3_prefix, s3_yaml_name))
local_yaml_path = os.path.abspath(os.path.join(os.getcwd(), s3_yaml_name))
s3_client.download_file(Bucket=s3_bucket, Key=yaml_key, Filename=local_yaml_path)
# Get values passed in yaml files. Default values are for backward compatibility and for single racecar racing
default_yaml_values = {RACE_TYPE_YAML_KEY: TIME_TRIAL_RACE_TYPE,
MODEL_S3_BUCKET_YAML_KEY: s3_bucket,
MODEL_S3_PREFIX_YAML_KEY: s3_prefix,
CAR_COLOR_YAML_KEY: DEFAULT_COLOR,
MODEL_METADATA_FILE_S3_YAML_KEY: None}
yaml_dict = get_yaml_dict(local_yaml_path)
yaml_values = get_yaml_values(yaml_dict, default_yaml_values)
# Forcing the yaml parameter to list
force_list_params = [MODEL_METADATA_FILE_S3_YAML_KEY, MODEL_S3_BUCKET_YAML_KEY, MODEL_S3_PREFIX_YAML_KEY,
CAR_COLOR_YAML_KEY]
for params in force_list_params:
yaml_values[params] = force_list(yaml_values[params])
# Populate the model_metadata_s3_key values to handle both training and evaluation for all race_formats
if None in yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY]:
# MODEL_METADATA_FILE_S3_KEY not passed as part of yaml file ==> This happens during evaluation
# Assume model_metadata.json is present in the s3_prefix/model/ folder
yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY] = list()
for s3_prefix in yaml_values[MODEL_S3_PREFIX_YAML_KEY]:
yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY].append(os.path.join(s3_prefix, 'model/model_metadata.json'))
# Set multicar value if its a head to model racetype
multicar = yaml_values[RACE_TYPE_YAML_KEY] == HEAD_TO_MODEL_RACE_TYPE
# Validate the yaml values
validate_yaml_values(yaml_values, multicar)
# List of racecar names that should include second camera while launching
racecars_with_stereo_cameras = list()
# List of racecar names that should include lidar while launching
racecars_with_lidars = list()
# List of SimApp versions
simapp_versions = list()
for agent_index, model_s3_bucket in enumerate(yaml_values[MODEL_S3_BUCKET_YAML_KEY]):
racecar_name = 'racecar_'+str(agent_index) if len(yaml_values[MODEL_S3_BUCKET_YAML_KEY]) > 1 else 'racecar'
# Make a local folder with the racecar name to download the model_metadata.json
if not os.path.exists(os.path.join(os.getcwd(), racecar_name)):
os.makedirs(os.path.join(os.getcwd(), racecar_name))
local_model_metadata_path = os.path.abspath(os.path.join(os.path.join(os.getcwd(), racecar_name),
'model_metadata.json'))
json_key = yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY][agent_index]
json_key = json_key.replace('s3://{}/'.format(model_s3_bucket), '')
s3_client.download_file(Bucket=model_s3_bucket, Key=json_key, Filename=local_model_metadata_path)
sensors, _, simapp_version = utils_parse_model_metadata.parse_model_metadata(local_model_metadata_path)
simapp_versions.append(simapp_version)
if Input.STEREO.value in sensors:
racecars_with_stereo_cameras.append(racecar_name)
if Input.LIDAR.value in sensors or Input.SECTOR_LIDAR.value in sensors:
racecars_with_lidars.append(racecar_name)
cmd = [''.join(("roslaunch deepracer_simulation_environment {} ".format(launch_name),
"local_yaml_path:={} ".format(local_yaml_path),
"racecars_with_stereo_cameras:={} ".format(','.join(racecars_with_stereo_cameras)),
"racecars_with_lidars:={} multicar:={} ".format(','.join(racecars_with_lidars), multicar),
"car_colors:={} simapp_versions:={}".format(','.join(yaml_values[CAR_COLOR_YAML_KEY]),
','.join(simapp_versions))))]
Popen(cmd, shell=True, executable="/bin/bash")
except botocore.exceptions.ClientError as ex:
log_and_exit("Download params and launch of agent node failed: s3_bucket: {}, yaml_key: {}, {}"
.format(s3_bucket, yaml_key, ex),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except botocore.exceptions.EndpointConnectionError:
log_and_exit("No Internet connection or s3 service unavailable",
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
except Exception as ex:
log_and_exit("Download params and launch of agent node failed: s3_bucket: {}, yaml_key: {}, {}"
.format(s3_bucket, yaml_key, ex),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def
|
(yaml_values, multicar):
""" Validate that the parameter provided in the yaml file for configuration is correct.
Some of the params requires list of two values. This is mostly checked as part of this function
Arguments:
yaml_values {[dict]} -- [All the yaml parameter as a list]
multicar {[bool]} -- [Is multicar enabled (True), else False]
Raises:
Exception -- [Exception]
"""
# Verify if all the yaml keys required for launching models have same number of values
same_len_values = [MODEL_S3_BUCKET_YAML_KEY, MODEL_S3_PREFIX_YAML_KEY, MODEL_METADATA_FILE_S3_YAML_KEY,
CAR_COLOR_YAML_KEY]
LOG.info(yaml_values)
if not all(map(lambda param: len(yaml_values[param]) == len(yaml_values[same_len_values[0]]), same_len_values)):
raise Exception('Incorrect number of values for these yaml parameters {}'.format(same_len_values))
# Verify if all yaml keys have 2 values for multi car racing
if multicar and len(yaml_values[MODEL_S3_PREFIX_YAML_KEY]) != 2:
raise Exception('Incorrect number of values for multicar racing yaml parameters {}'.format(same_len_values))
# Verify if all yaml keys have 1 value for single car racing
if not multicar and len(yaml_values[MODEL_S3_PREFIX_YAML_KEY]) != 1:
raise Exception('Incorrect number of values for single car racing yaml parameters {}'.format(same_len_values))
def get_yaml_dict(local_yaml_path):
'''local_yaml_path - path to the local yaml file
'''
with open(local_yaml_path, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
log_and_exit("yaml read error:
|
validate_yaml_values
|
identifier_name
|
download_params_and_roslaunch_agent.py
|
markov.utils import force_list
from markov.constants import DEFAULT_COLOR
from markov.architecture.constants import Input
from markov.utils import get_boto_config
from markov.log_handler.constants import (SIMAPP_EVENT_ERROR_CODE_400, SIMAPP_EVENT_ERROR_CODE_500,
SIMAPP_SIMULATION_WORKER_EXCEPTION)
from markov.log_handler.logger import Logger
from markov.log_handler.exception_handler import log_and_exit
LOG = Logger(__name__, logging.INFO).get_logger()
# Pass a list with 2 values for CAR_COLOR, MODEL_S3_BUCKET, MODEL_S3_PREFIX, MODEL_METADATA_FILE_S3_KEY for multicar
CAR_COLOR_YAML_KEY = "CAR_COLOR"
RACE_TYPE_YAML_KEY = "RACE_TYPE"
HEAD_TO_MODEL_RACE_TYPE = "HEAD_TO_MODEL"
TIME_TRIAL_RACE_TYPE = "TIME_TRIAL"
MODEL_S3_BUCKET_YAML_KEY = "MODEL_S3_BUCKET"
MODEL_S3_PREFIX_YAML_KEY = "MODEL_S3_PREFIX"
MODEL_METADATA_FILE_S3_YAML_KEY = "MODEL_METADATA_FILE_S3_KEY"
# Amount of time to wait to guarantee that RoboMaker's network configuration is ready.
WAIT_FOR_ROBOMAKER_TIME = 10
def main():
""" Main function for downloading yaml params """
try:
# parse argument
s3_region = sys.argv[1]
s3_bucket = sys.argv[2]
s3_prefix = sys.argv[3]
s3_yaml_name = sys.argv[4]
launch_name = sys.argv[5]
# create boto3 session/client and download yaml/json file
session = boto3.session.Session()
s3_endpoint_url = os.environ.get("S3_ENDPOINT_URL", None)
if s3_endpoint_url is not None:
LOG.info('Endpoint URL {}'.format(s3_endpoint_url))
rospy.set_param('S3_ENDPOINT_URL', s3_endpoint_url)
else:
# create boto3 session/client and download yaml/json file
ec2_client = session.client('ec2', s3_region)
LOG.info('Checking internet connection...')
response = ec2_client.describe_vpcs()
if not response['Vpcs']:
log_and_exit("No VPC attached to instance", SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
LOG.info('Verified internet connection')
s3_client = session.client('s3', region_name=s3_region, endpoint_url=s3_endpoint_url, config=get_boto_config())
yaml_key = os.path.normpath(os.path.join(s3_prefix, s3_yaml_name))
local_yaml_path = os.path.abspath(os.path.join(os.getcwd(), s3_yaml_name))
s3_client.download_file(Bucket=s3_bucket, Key=yaml_key, Filename=local_yaml_path)
# Get values passed in yaml files. Default values are for backward compatibility and for single racecar racing
default_yaml_values = {RACE_TYPE_YAML_KEY: TIME_TRIAL_RACE_TYPE,
MODEL_S3_BUCKET_YAML_KEY: s3_bucket,
MODEL_S3_PREFIX_YAML_KEY: s3_prefix,
CAR_COLOR_YAML_KEY: DEFAULT_COLOR,
MODEL_METADATA_FILE_S3_YAML_KEY: None}
yaml_dict = get_yaml_dict(local_yaml_path)
yaml_values = get_yaml_values(yaml_dict, default_yaml_values)
# Forcing the yaml parameter to list
force_list_params = [MODEL_METADATA_FILE_S3_YAML_KEY, MODEL_S3_BUCKET_YAML_KEY, MODEL_S3_PREFIX_YAML_KEY,
CAR_COLOR_YAML_KEY]
for params in force_list_params:
yaml_values[params] = force_list(yaml_values[params])
# Populate the model_metadata_s3_key values to handle both training and evaluation for all race_formats
if None in yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY]:
# MODEL_METADATA_FILE_S3_KEY not passed as part of yaml file ==> This happens during evaluation
# Assume model_metadata.json is present in the s3_prefix/model/ folder
yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY] = list()
for s3_prefix in yaml_values[MODEL_S3_PREFIX_YAML_KEY]:
yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY].append(os.path.join(s3_prefix, 'model/model_metadata.json'))
# Set multicar value if its a head to model racetype
multicar = yaml_values[RACE_TYPE_YAML_KEY] == HEAD_TO_MODEL_RACE_TYPE
# Validate the yaml values
validate_yaml_values(yaml_values, multicar)
# List of racecar names that should include second camera while launching
racecars_with_stereo_cameras = list()
# List of racecar names that should include lidar while launching
racecars_with_lidars = list()
# List of SimApp versions
simapp_versions = list()
for agent_index, model_s3_bucket in enumerate(yaml_values[MODEL_S3_BUCKET_YAML_KEY]):
racecar_name = 'racecar_'+str(agent_index) if len(yaml_values[MODEL_S3_BUCKET_YAML_KEY]) > 1 else 'racecar'
# Make a local folder with the racecar name to download the model_metadata.json
if not os.path.exists(os.path.join(os.getcwd(), racecar_name)):
os.makedirs(os.path.join(os.getcwd(), racecar_name))
local_model_metadata_path = os.path.abspath(os.path.join(os.path.join(os.getcwd(), racecar_name),
'model_metadata.json'))
json_key = yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY][agent_index]
json_key = json_key.replace('s3://{}/'.format(model_s3_bucket), '')
s3_client.download_file(Bucket=model_s3_bucket, Key=json_key, Filename=local_model_metadata_path)
sensors, _, simapp_version = utils_parse_model_metadata.parse_model_metadata(local_model_metadata_path)
simapp_versions.append(simapp_version)
if Input.STEREO.value in sensors:
racecars_with_stereo_cameras.append(racecar_name)
if Input.LIDAR.value in sensors or Input.SECTOR_LIDAR.value in sensors:
racecars_with_lidars.append(racecar_name)
cmd = [''.join(("roslaunch deepracer_simulation_environment {} ".format(launch_name),
"local_yaml_path:={} ".format(local_yaml_path),
"racecars_with_stereo_cameras:={} ".format(','.join(racecars_with_stereo_cameras)),
"racecars_with_lidars:={} multicar:={} ".format(','.join(racecars_with_lidars), multicar),
"car_colors:={} simapp_versions:={}".format(','.join(yaml_values[CAR_COLOR_YAML_KEY]),
','.join(simapp_versions))))]
Popen(cmd, shell=True, executable="/bin/bash")
except botocore.exceptions.ClientError as ex:
log_and_exit("Download params and launch of agent node failed: s3_bucket: {}, yaml_key: {}, {}"
.format(s3_bucket, yaml_key, ex),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except botocore.exceptions.EndpointConnectionError:
log_and_exit("No Internet connection or s3 service unavailable",
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
except Exception as ex:
log_and_exit("Download params and launch of agent node failed: s3_bucket: {}, yaml_key: {}, {}"
.format(s3_bucket, yaml_key, ex),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def validate_yaml_values(yaml_values, multicar):
""" Validate that the parameter provided in the yaml file for configuration is correct.
Some of the params requires list of two values. This is mostly checked as part of this function
Arguments:
yaml_values {[dict]} -- [All the yaml parameter as a list]
multicar {[bool]} -- [Is multicar enabled (True), else False]
Raises:
Exception -- [Exception]
"""
# Verify if all the yaml keys required for launching models have same number of values
same_len_values = [MODEL_S3_BUCKET_YAML_KEY, MODEL_S3_PREFIX_YAML_KEY, MODEL_METADATA_FILE_S3_YAML_KEY,
CAR_COLOR_YAML_KEY]
LOG.info(yaml_values)
if not all(map(lambda param: len(yaml_values[param]) == len(yaml_values[same_len_values[0]]), same_len_values)):
raise Exception('Incorrect number of values for these yaml parameters {}'.format(same_len_values))
# Verify if all yaml keys have 2 values for multi car racing
if multicar and len(yaml_values[MODEL_S3_PREFIX_YAML_KEY]) != 2:
raise Exception('Incorrect number of values for multicar racing yaml parameters {}'.format(same_len_values))
# Verify if all yaml keys have 1 value for single car racing
if not multicar and len(yaml_values[MODEL_S3_PREFIX_YAML_KEY]) != 1:
raise Exception('Incorrect number of values for single car racing yaml parameters {}'.format(same_len_values))
def get_yaml_dict(local_yaml_path):
|
'''local_yaml_path - path to the local yaml file
'''
with open(local_yaml_path, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
log_and_exit("yaml read error: {}".format(exc),
SIMAPP_SIMULATION_WORKER_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500)
|
identifier_body
|
|
download_params_and_roslaunch_agent.py
|
ore
import boto3
import yaml
import rospy
from markov import utils_parse_model_metadata
from markov.utils import force_list
from markov.constants import DEFAULT_COLOR
from markov.architecture.constants import Input
from markov.utils import get_boto_config
from markov.log_handler.constants import (SIMAPP_EVENT_ERROR_CODE_400, SIMAPP_EVENT_ERROR_CODE_500,
SIMAPP_SIMULATION_WORKER_EXCEPTION)
from markov.log_handler.logger import Logger
from markov.log_handler.exception_handler import log_and_exit
LOG = Logger(__name__, logging.INFO).get_logger()
# Pass a list with 2 values for CAR_COLOR, MODEL_S3_BUCKET, MODEL_S3_PREFIX, MODEL_METADATA_FILE_S3_KEY for multicar
CAR_COLOR_YAML_KEY = "CAR_COLOR"
RACE_TYPE_YAML_KEY = "RACE_TYPE"
HEAD_TO_MODEL_RACE_TYPE = "HEAD_TO_MODEL"
TIME_TRIAL_RACE_TYPE = "TIME_TRIAL"
MODEL_S3_BUCKET_YAML_KEY = "MODEL_S3_BUCKET"
MODEL_S3_PREFIX_YAML_KEY = "MODEL_S3_PREFIX"
MODEL_METADATA_FILE_S3_YAML_KEY = "MODEL_METADATA_FILE_S3_KEY"
# Amount of time to wait to guarantee that RoboMaker's network configuration is ready.
WAIT_FOR_ROBOMAKER_TIME = 10
def main():
""" Main function for downloading yaml params """
try:
# parse argument
s3_region = sys.argv[1]
s3_bucket = sys.argv[2]
s3_prefix = sys.argv[3]
s3_yaml_name = sys.argv[4]
launch_name = sys.argv[5]
# create boto3 session/client and download yaml/json file
session = boto3.session.Session()
s3_endpoint_url = os.environ.get("S3_ENDPOINT_URL", None)
if s3_endpoint_url is not None:
LOG.info('Endpoint URL {}'.format(s3_endpoint_url))
rospy.set_param('S3_ENDPOINT_URL', s3_endpoint_url)
else:
# create boto3 session/client and download yaml/json file
ec2_client = session.client('ec2', s3_region)
LOG.info('Checking internet connection...')
response = ec2_client.describe_vpcs()
if not response['Vpcs']:
log_and_exit("No VPC attached to instance", SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
LOG.info('Verified internet connection')
s3_client = session.client('s3', region_name=s3_region, endpoint_url=s3_endpoint_url, config=get_boto_config())
yaml_key = os.path.normpath(os.path.join(s3_prefix, s3_yaml_name))
local_yaml_path = os.path.abspath(os.path.join(os.getcwd(), s3_yaml_name))
s3_client.download_file(Bucket=s3_bucket, Key=yaml_key, Filename=local_yaml_path)
# Get values passed in yaml files. Default values are for backward compatibility and for single racecar racing
default_yaml_values = {RACE_TYPE_YAML_KEY: TIME_TRIAL_RACE_TYPE,
MODEL_S3_BUCKET_YAML_KEY: s3_bucket,
MODEL_S3_PREFIX_YAML_KEY: s3_prefix,
CAR_COLOR_YAML_KEY: DEFAULT_COLOR,
MODEL_METADATA_FILE_S3_YAML_KEY: None}
yaml_dict = get_yaml_dict(local_yaml_path)
yaml_values = get_yaml_values(yaml_dict, default_yaml_values)
# Forcing the yaml parameter to list
force_list_params = [MODEL_METADATA_FILE_S3_YAML_KEY, MODEL_S3_BUCKET_YAML_KEY, MODEL_S3_PREFIX_YAML_KEY,
CAR_COLOR_YAML_KEY]
for params in force_list_params:
yaml_values[params] = force_list(yaml_values[params])
# Populate the model_metadata_s3_key values to handle both training and evaluation for all race_formats
if None in yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY]:
# MODEL_METADATA_FILE_S3_KEY not passed as part of yaml file ==> This happens during evaluation
# Assume model_metadata.json is present in the s3_prefix/model/ folder
yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY] = list()
for s3_prefix in yaml_values[MODEL_S3_PREFIX_YAML_KEY]:
yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY].append(os.path.join(s3_prefix, 'model/model_metadata.json'))
# Set multicar value if its a head to model racetype
multicar = yaml_values[RACE_TYPE_YAML_KEY] == HEAD_TO_MODEL_RACE_TYPE
# Validate the yaml values
validate_yaml_values(yaml_values, multicar)
# List of racecar names that should include second camera while launching
racecars_with_stereo_cameras = list()
# List of racecar names that should include lidar while launching
racecars_with_lidars = list()
# List of SimApp versions
simapp_versions = list()
for agent_index, model_s3_bucket in enumerate(yaml_values[MODEL_S3_BUCKET_YAML_KEY]):
racecar_name = 'racecar_'+str(agent_index) if len(yaml_values[MODEL_S3_BUCKET_YAML_KEY]) > 1 else 'racecar'
# Make a local folder with the racecar name to download the model_metadata.json
if not os.path.exists(os.path.join(os.getcwd(), racecar_name)):
os.makedirs(os.path.join(os.getcwd(), racecar_name))
local_model_metadata_path = os.path.abspath(os.path.join(os.path.join(os.getcwd(), racecar_name),
'model_metadata.json'))
json_key = yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY][agent_index]
json_key = json_key.replace('s3://{}/'.format(model_s3_bucket), '')
s3_client.download_file(Bucket=model_s3_bucket, Key=json_key, Filename=local_model_metadata_path)
sensors, _, simapp_version = utils_parse_model_metadata.parse_model_metadata(local_model_metadata_path)
simapp_versions.append(simapp_version)
if Input.STEREO.value in sensors:
racecars_with_stereo_cameras.append(racecar_name)
if Input.LIDAR.value in sensors or Input.SECTOR_LIDAR.value in sensors:
|
cmd = [''.join(("roslaunch deepracer_simulation_environment {} ".format(launch_name),
"local_yaml_path:={} ".format(local_yaml_path),
"racecars_with_stereo_cameras:={} ".format(','.join(racecars_with_stereo_cameras)),
"racecars_with_lidars:={} multicar:={} ".format(','.join(racecars_with_lidars), multicar),
"car_colors:={} simapp_versions:={}".format(','.join(yaml_values[CAR_COLOR_YAML_KEY]),
','.join(simapp_versions))))]
Popen(cmd, shell=True, executable="/bin/bash")
except botocore.exceptions.ClientError as ex:
log_and_exit("Download params and launch of agent node failed: s3_bucket: {}, yaml_key: {}, {}"
.format(s3_bucket, yaml_key, ex),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except botocore.exceptions.EndpointConnectionError:
log_and_exit("No Internet connection or s3 service unavailable",
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
except Exception as ex:
log_and_exit("Download params and launch of agent node failed: s3_bucket: {}, yaml_key: {}, {}"
.format(s3_bucket, yaml_key, ex),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def validate_yaml_values(yaml_values, multicar):
""" Validate that the parameter provided in the yaml file for configuration is correct.
Some of the params requires list of two values. This is mostly checked as part of this function
Arguments:
yaml_values {[dict]} -- [All the yaml parameter as a list]
multicar {[bool]} -- [Is multicar enabled (True), else False]
Raises:
Exception -- [Exception]
"""
# Verify if all the yaml keys required for launching models have same number of values
same_len_values = [MODEL_S3_BUCKET_YAML_KEY, MODEL_S3_PREFIX_YAML_KEY, MODEL_METADATA_FILE_S3_YAML_KEY,
CAR_COLOR_YAML_KEY]
LOG.info(yaml_values)
if not all(map(lambda param: len(yaml_values[param]) == len(yaml_values[same_len_values[0]]), same_len_values)):
raise Exception('Incorrect number of values for these yaml parameters {}'.format(same_len_values))
# Verify if all yaml keys have 2 values for multi car racing
if multicar and len(yaml_values[MODEL_S3_PREFIX_YAML_KEY]) != 2:
raise Exception('Incorrect number of values for multicar racing yaml parameters {}'.format(same_len_values))
# Verify if all yaml keys have 1 value for single car racing
if not multicar and len(yaml_values[MODEL_S3_PREFIX_YAML_KEY]) != 1:
raise Exception('Incorrect number of values for single car racing yaml parameters {}'.format(same_len_values))
def get_yaml_dict(local_yaml_path):
'''local_yaml_path - path to the local yaml file
'''
with open(local_yaml_path, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
log_and_exit("yaml read error:
|
racecars_with_lidars.append(racecar_name)
|
conditional_block
|
download_params_and_roslaunch_agent.py
|
APP_SIMULATION_WORKER_EXCEPTION)
from markov.log_handler.logger import Logger
from markov.log_handler.exception_handler import log_and_exit
LOG = Logger(__name__, logging.INFO).get_logger()
# Pass a list with 2 values for CAR_COLOR, MODEL_S3_BUCKET, MODEL_S3_PREFIX, MODEL_METADATA_FILE_S3_KEY for multicar
CAR_COLOR_YAML_KEY = "CAR_COLOR"
RACE_TYPE_YAML_KEY = "RACE_TYPE"
HEAD_TO_MODEL_RACE_TYPE = "HEAD_TO_MODEL"
TIME_TRIAL_RACE_TYPE = "TIME_TRIAL"
MODEL_S3_BUCKET_YAML_KEY = "MODEL_S3_BUCKET"
MODEL_S3_PREFIX_YAML_KEY = "MODEL_S3_PREFIX"
MODEL_METADATA_FILE_S3_YAML_KEY = "MODEL_METADATA_FILE_S3_KEY"
# Amount of time to wait to guarantee that RoboMaker's network configuration is ready.
WAIT_FOR_ROBOMAKER_TIME = 10
def main():
""" Main function for downloading yaml params """
try:
# parse argument
s3_region = sys.argv[1]
s3_bucket = sys.argv[2]
s3_prefix = sys.argv[3]
s3_yaml_name = sys.argv[4]
launch_name = sys.argv[5]
# create boto3 session/client and download yaml/json file
session = boto3.session.Session()
s3_endpoint_url = os.environ.get("S3_ENDPOINT_URL", None)
if s3_endpoint_url is not None:
LOG.info('Endpoint URL {}'.format(s3_endpoint_url))
rospy.set_param('S3_ENDPOINT_URL', s3_endpoint_url)
else:
# create boto3 session/client and download yaml/json file
ec2_client = session.client('ec2', s3_region)
LOG.info('Checking internet connection...')
response = ec2_client.describe_vpcs()
if not response['Vpcs']:
log_and_exit("No VPC attached to instance", SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
LOG.info('Verified internet connection')
s3_client = session.client('s3', region_name=s3_region, endpoint_url=s3_endpoint_url, config=get_boto_config())
yaml_key = os.path.normpath(os.path.join(s3_prefix, s3_yaml_name))
local_yaml_path = os.path.abspath(os.path.join(os.getcwd(), s3_yaml_name))
s3_client.download_file(Bucket=s3_bucket, Key=yaml_key, Filename=local_yaml_path)
# Get values passed in yaml files. Default values are for backward compatibility and for single racecar racing
default_yaml_values = {RACE_TYPE_YAML_KEY: TIME_TRIAL_RACE_TYPE,
MODEL_S3_BUCKET_YAML_KEY: s3_bucket,
MODEL_S3_PREFIX_YAML_KEY: s3_prefix,
CAR_COLOR_YAML_KEY: DEFAULT_COLOR,
MODEL_METADATA_FILE_S3_YAML_KEY: None}
yaml_dict = get_yaml_dict(local_yaml_path)
yaml_values = get_yaml_values(yaml_dict, default_yaml_values)
# Forcing the yaml parameter to list
force_list_params = [MODEL_METADATA_FILE_S3_YAML_KEY, MODEL_S3_BUCKET_YAML_KEY, MODEL_S3_PREFIX_YAML_KEY,
CAR_COLOR_YAML_KEY]
for params in force_list_params:
yaml_values[params] = force_list(yaml_values[params])
# Populate the model_metadata_s3_key values to handle both training and evaluation for all race_formats
if None in yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY]:
# MODEL_METADATA_FILE_S3_KEY not passed as part of yaml file ==> This happens during evaluation
# Assume model_metadata.json is present in the s3_prefix/model/ folder
yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY] = list()
for s3_prefix in yaml_values[MODEL_S3_PREFIX_YAML_KEY]:
yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY].append(os.path.join(s3_prefix, 'model/model_metadata.json'))
# Set multicar value if its a head to model racetype
multicar = yaml_values[RACE_TYPE_YAML_KEY] == HEAD_TO_MODEL_RACE_TYPE
# Validate the yaml values
validate_yaml_values(yaml_values, multicar)
# List of racecar names that should include second camera while launching
racecars_with_stereo_cameras = list()
# List of racecar names that should include lidar while launching
racecars_with_lidars = list()
# List of SimApp versions
simapp_versions = list()
for agent_index, model_s3_bucket in enumerate(yaml_values[MODEL_S3_BUCKET_YAML_KEY]):
racecar_name = 'racecar_'+str(agent_index) if len(yaml_values[MODEL_S3_BUCKET_YAML_KEY]) > 1 else 'racecar'
# Make a local folder with the racecar name to download the model_metadata.json
if not os.path.exists(os.path.join(os.getcwd(), racecar_name)):
os.makedirs(os.path.join(os.getcwd(), racecar_name))
local_model_metadata_path = os.path.abspath(os.path.join(os.path.join(os.getcwd(), racecar_name),
'model_metadata.json'))
json_key = yaml_values[MODEL_METADATA_FILE_S3_YAML_KEY][agent_index]
json_key = json_key.replace('s3://{}/'.format(model_s3_bucket), '')
s3_client.download_file(Bucket=model_s3_bucket, Key=json_key, Filename=local_model_metadata_path)
sensors, _, simapp_version = utils_parse_model_metadata.parse_model_metadata(local_model_metadata_path)
simapp_versions.append(simapp_version)
if Input.STEREO.value in sensors:
racecars_with_stereo_cameras.append(racecar_name)
if Input.LIDAR.value in sensors or Input.SECTOR_LIDAR.value in sensors:
racecars_with_lidars.append(racecar_name)
cmd = [''.join(("roslaunch deepracer_simulation_environment {} ".format(launch_name),
"local_yaml_path:={} ".format(local_yaml_path),
"racecars_with_stereo_cameras:={} ".format(','.join(racecars_with_stereo_cameras)),
"racecars_with_lidars:={} multicar:={} ".format(','.join(racecars_with_lidars), multicar),
"car_colors:={} simapp_versions:={}".format(','.join(yaml_values[CAR_COLOR_YAML_KEY]),
','.join(simapp_versions))))]
Popen(cmd, shell=True, executable="/bin/bash")
except botocore.exceptions.ClientError as ex:
log_and_exit("Download params and launch of agent node failed: s3_bucket: {}, yaml_key: {}, {}"
.format(s3_bucket, yaml_key, ex),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_400)
except botocore.exceptions.EndpointConnectionError:
log_and_exit("No Internet connection or s3 service unavailable",
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
except Exception as ex:
log_and_exit("Download params and launch of agent node failed: s3_bucket: {}, yaml_key: {}, {}"
.format(s3_bucket, yaml_key, ex),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
def validate_yaml_values(yaml_values, multicar):
""" Validate that the parameter provided in the yaml file for configuration is correct.
Some of the params requires list of two values. This is mostly checked as part of this function
Arguments:
yaml_values {[dict]} -- [All the yaml parameter as a list]
multicar {[bool]} -- [Is multicar enabled (True), else False]
Raises:
Exception -- [Exception]
"""
# Verify if all the yaml keys required for launching models have same number of values
same_len_values = [MODEL_S3_BUCKET_YAML_KEY, MODEL_S3_PREFIX_YAML_KEY, MODEL_METADATA_FILE_S3_YAML_KEY,
CAR_COLOR_YAML_KEY]
LOG.info(yaml_values)
if not all(map(lambda param: len(yaml_values[param]) == len(yaml_values[same_len_values[0]]), same_len_values)):
raise Exception('Incorrect number of values for these yaml parameters {}'.format(same_len_values))
# Verify if all yaml keys have 2 values for multi car racing
if multicar and len(yaml_values[MODEL_S3_PREFIX_YAML_KEY]) != 2:
raise Exception('Incorrect number of values for multicar racing yaml parameters {}'.format(same_len_values))
# Verify if all yaml keys have 1 value for single car racing
if not multicar and len(yaml_values[MODEL_S3_PREFIX_YAML_KEY]) != 1:
raise Exception('Incorrect number of values for single car racing yaml parameters {}'.format(same_len_values))
def get_yaml_dict(local_yaml_path):
'''local_yaml_path - path to the local yaml file
'''
with open(local_yaml_path, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
log_and_exit("yaml read error: {}".format(exc),
SIMAPP_SIMULATION_WORKER_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_500)
def get_yaml_values(yaml_dict, default_vals=None):
'''yaml_dict - dict containing yaml configs
default_vals - Dictionary of the default values to be used if key is not present
'''
try:
return_values = dict()
|
default_val_keys = default_vals.keys() if default_vals else []
|
random_line_split
|
|
2_tracking.py
|
]]
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0.0
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
return float(intersect) / (sum_area - intersect)
class Box(object):
"""
match_state:一个box是否匹配到一个track中,若没有,应该生成新的track
"""
def __init__(self, frame_index, id, box, score, gps_coor, feature):
self.frame_index = frame_index
self.id = id
self.box = box
self.score = score
self.gps_coor = gps_coor
self.feature = feature
self.center = (self.box[0] + self.box[2] / 2, self.box[1] + self.box[3] / 2)
self.match_state = NO_MATCHED
class Track(object):
def __init__(self, id, sequence):
self.id = id
self.sequence = sequence
self.match_state = MATCHED
# self.leak_time = int(0) # 镂空次数,如果连续两帧
def append(self, box):
self.sequence.append(box)
def get_last(self):
return self.sequence[-1]
def get_last_feature(self):
return self.sequence[-1].feature
def get_last_gps(self):
return self.sequence[-1].gps_coor
def show(self):
print("For track-" + str(self.id) + ' : ', "length-" + len(self.sequence), ", matchState-", self.match_state)
class Frame(object):
def __init__(self, index, boxes):
self.index = index
self.boxes = boxes
def append(self, box):
self.boxes.append(box)
def show(self):
print("For frame index-" + str(self.index) + ' : ', "length-" + len(self.boxes))
for i in range(len(self.boxes)):
|
def Hungary(task_matrix):
b = task_matrix.copy()
# 行和列减0
for i in range(len(b)):
row_min = np.min(b[i])
for j in range(len(b[i])):
b[i][j] -= row_min
for i in range(len(b[0])):
col_min = np.min(b[:, i])
for j in range(len(b)):
b[j][i] -= col_min
line_count = 0
# 线数目小于矩阵长度时,进行循环
while (line_count < len(b)):
line_count = 0
row_zero_count = []
col_zero_count = []
for i in range(len(b)):
row_zero_count.append(np.sum(b[i] == 0))
for i in range(len(b[0])):
col_zero_count.append((np.sum(b[:, i] == 0)))
# 划线的顺序(分行或列)
line_order = []
row_or_col = []
for i in range(len(b[0]), 0, -1):
while (i in row_zero_count):
line_order.append(row_zero_count.index(i))
row_or_col.append(0)
row_zero_count[row_zero_count.index(i)] = 0
while (i in col_zero_count):
line_order.append(col_zero_count.index(i))
row_or_col.append(1)
col_zero_count[col_zero_count.index(i)] = 0
# 画线覆盖0,并得到行减最小值,列加最小值后的矩阵
delete_count_of_row = []
delete_count_of_rol = []
row_and_col = [i for i in range(len(b))]
for i in range(len(line_order)):
if row_or_col[i] == 0:
delete_count_of_row.append(line_order[i])
else:
delete_count_of_rol.append(line_order[i])
c = np.delete(b, delete_count_of_row, axis=0)
c = np.delete(c, delete_count_of_rol, axis=1)
line_count = len(delete_count_of_row) + len(delete_count_of_rol)
# 线数目等于矩阵长度时,跳出
if line_count == len(b):
break
# 判断是否画线覆盖所有0,若覆盖,进行加减操作
if 0 not in c:
row_sub = list(set(row_and_col) - set(delete_count_of_row))
min_value = np.min(c)
for i in row_sub:
b[i] = b[i] - min_value
for i in delete_count_of_rol:
b[:, i] = b[:, i] + min_value
break
row_ind, col_ind = linear_sum_assignment(b)
min_cost = task_matrix[row_ind, col_ind].sum()
best_solution = list(task_matrix[row_ind, col_ind])
return best_solution, col_ind
# dict:key-帧序列数,value-Box_list
def analysis_to_frame_dict(file_path):
frame_dict = {}
lines = open(file_path, 'r').readlines()
for line in lines:
words = line.strip('\n').split(',')
# print 'what: ', words[0], len(words)
index = int(words[0])
id = int(words[1])
box = [int(float(words[2])), int(float(words[3])), int(float(words[4])), int(float(words[5]))]
score = float(words[6])
gps_x = float(words[8])
gps_y = float(words[9])
ft = np.zeros(len(words) - 10)
for i in range(10, len(words)):
ft[i - 10] = float(words[i])
cur_box = Box(index, id, box, score, (gps_x, gps_y), ft)
if index not in frame_dict:
frame_dict[index] = Frame(index, [])
frame_dict[index].append(cur_box)
return frame_dict
def get_num_frames(video_path):
cap = cv2.VideoCapture(video_path)
return int(cap.get(7))
def process_a_video(camera_dir):
det_reid_ft_path = camera_dir + "/det_reid_features.txt"
result_path = camera_dir + "/det_reid_track.txt"
roi_path = camera_dir + '/roi.jpg'
video_path = camera_dir + '/vdo.avi'
all_frames = get_num_frames(video_path)
roi_src = cv2.imread(roi_path)
frame_dict = analysis_to_frame_dict(det_reid_ft_path) # dict:key-帧序列数,value-Frame
result_dict = {} # 记录最后结果的字典:key-id,value-track
flowing_track = [] # 记录当前正在跟踪的tracks
print(all_frames)
all_frames_num = len(frame_dict)
count = 0
for k in range(1, all_frames+1):
cur_frame = frame_dict.get(k)
if cur_frame == None:
continue
# print k, '**************************************************'
# print len(cur_frame.boxes)
processed_boxes = preprocess_boxes(cur_frame.boxes, roi_src)
cur_frame.boxes = processed_boxes
# print len(cur_frame.boxes)
# 当前帧的所有box试图与现有的track匹配
track_features = []
# 如果一个track,它和当前帧所有的box都不相似,那么它应该被移除,不参与后面的匹配
delete_tks = []
for tk in flowing_track:
tk_ft = tk.get_last_feature()
tk_gps = tk.get_last_gps()
no_matched_flag = True
for box in cur_frame.boxes:
# 计算特征差
box_ft = box.feature
feature_dis_vec = box_ft - tk_ft
feature_dis = np.dot(feature_dis_vec.T, feature_dis_vec)
# 计算gps差
box_gps = box.gps_coor
gps_dis_vec = ((tk_gps[0]-box_gps[0]), (tk_gps[1]-box_gps[1]))
gps_dis = (gps_dis_vec[0]*100000)**2 + (gps_dis_vec[1]*100000)**2
# print feature_dis, gps_dis_vec, gps_dis
total_dis = gps_dis*WIGHTS + feature_dis
# if feature_dis < 50:
# print 'near: ', gps_dis*WIGHTS, feature_dis, total_dis
# if feature_dis > 150:
# print 'far: ', gps_dis*WIGHTS, feature_dis, total_dis
|
box = self.boxes[i]
print('box', i, ': ', box)
|
random_line_split
|
2_tracking.py
|
]]
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0.0
else:
intersect = (right_line - left_
|
ottom_line - top_line)
return float(intersect) / (sum_area - intersect)
class Box(object):
"""
match_state:一个box是否匹配到一个track中,若没有,应该生成新的track
"""
def __init__(self, frame_index, id, box, score, gps_coor, feature):
self.frame_index = frame_index
self.id = id
self.box = box
self.score = score
self.gps_coor = gps_coor
self.feature = feature
self.center = (self.box[0] + self.box[2] / 2, self.box[1] + self.box[3] / 2)
self.match_state = NO_MATCHED
class Track(object):
def __init__(self, id, sequence):
self.id = id
self.sequence = sequence
self.match_state = MATCHED
# self.leak_time = int(0) # 镂空次数,如果连续两帧
def append(self, box):
self.sequence.append(box)
def get_last(self):
return self.sequence[-1]
def get_last_feature(self):
return self.sequence[-1].feature
def get_last_gps(self):
return self.sequence[-1].gps_coor
def show(self):
print("For track-" + str(self.id) + ' : ', "length-" + len(self.sequence), ", matchState-", self.match_state)
class Frame(object):
def __init__(self, index, boxes):
self.index = index
self.boxes = boxes
def append(self, box):
self.boxes.append(box)
def show(self):
print("For frame index-" + str(self.index) + ' : ', "length-" + len(self.boxes))
for i in range(len(self.boxes)):
box = self.boxes[i]
print('box', i, ': ', box)
def Hungary(task_matrix):
b = task_matrix.copy()
# 行和列减0
for i in range(len(b)):
row_min = np.min(b[i])
for j in range(len(b[i])):
b[i][j] -= row_min
for i in range(len(b[0])):
col_min = np.min(b[:, i])
for j in range(len(b)):
b[j][i] -= col_min
line_count = 0
# 线数目小于矩阵长度时,进行循环
while (line_count < len(b)):
line_count = 0
row_zero_count = []
col_zero_count = []
for i in range(len(b)):
row_zero_count.append(np.sum(b[i] == 0))
for i in range(len(b[0])):
col_zero_count.append((np.sum(b[:, i] == 0)))
# 划线的顺序(分行或列)
line_order = []
row_or_col = []
for i in range(len(b[0]), 0, -1):
while (i in row_zero_count):
line_order.append(row_zero_count.index(i))
row_or_col.append(0)
row_zero_count[row_zero_count.index(i)] = 0
while (i in col_zero_count):
line_order.append(col_zero_count.index(i))
row_or_col.append(1)
col_zero_count[col_zero_count.index(i)] = 0
# 画线覆盖0,并得到行减最小值,列加最小值后的矩阵
delete_count_of_row = []
delete_count_of_rol = []
row_and_col = [i for i in range(len(b))]
for i in range(len(line_order)):
if row_or_col[i] == 0:
delete_count_of_row.append(line_order[i])
else:
delete_count_of_rol.append(line_order[i])
c = np.delete(b, delete_count_of_row, axis=0)
c = np.delete(c, delete_count_of_rol, axis=1)
line_count = len(delete_count_of_row) + len(delete_count_of_rol)
# 线数目等于矩阵长度时,跳出
if line_count == len(b):
break
# 判断是否画线覆盖所有0,若覆盖,进行加减操作
if 0 not in c:
row_sub = list(set(row_and_col) - set(delete_count_of_row))
min_value = np.min(c)
for i in row_sub:
b[i] = b[i] - min_value
for i in delete_count_of_rol:
b[:, i] = b[:, i] + min_value
break
row_ind, col_ind = linear_sum_assignment(b)
min_cost = task_matrix[row_ind, col_ind].sum()
best_solution = list(task_matrix[row_ind, col_ind])
return best_solution, col_ind
# dict:key-帧序列数,value-Box_list
def analysis_to_frame_dict(file_path):
frame_dict = {}
lines = open(file_path, 'r').readlines()
for line in lines:
words = line.strip('\n').split(',')
# print 'what: ', words[0], len(words)
index = int(words[0])
id = int(words[1])
box = [int(float(words[2])), int(float(words[3])), int(float(words[4])), int(float(words[5]))]
score = float(words[6])
gps_x = float(words[8])
gps_y = float(words[9])
ft = np.zeros(len(words) - 10)
for i in range(10, len(words)):
ft[i - 10] = float(words[i])
cur_box = Box(index, id, box, score, (gps_x, gps_y), ft)
if index not in frame_dict:
frame_dict[index] = Frame(index, [])
frame_dict[index].append(cur_box)
return frame_dict
def get_num_frames(video_path):
cap = cv2.VideoCapture(video_path)
return int(cap.get(7))
def process_a_video(camera_dir):
det_reid_ft_path = camera_dir + "/det_reid_features.txt"
result_path = camera_dir + "/det_reid_track.txt"
roi_path = camera_dir + '/roi.jpg'
video_path = camera_dir + '/vdo.avi'
all_frames = get_num_frames(video_path)
roi_src = cv2.imread(roi_path)
frame_dict = analysis_to_frame_dict(det_reid_ft_path) # dict:key-帧序列数,value-Frame
result_dict = {} # 记录最后结果的字典:key-id,value-track
flowing_track = [] # 记录当前正在跟踪的tracks
print(all_frames)
all_frames_num = len(frame_dict)
count = 0
for k in range(1, all_frames+1):
cur_frame = frame_dict.get(k)
if cur_frame == None:
continue
# print k, '**************************************************'
# print len(cur_frame.boxes)
processed_boxes = preprocess_boxes(cur_frame.boxes, roi_src)
cur_frame.boxes = processed_boxes
# print len(cur_frame.boxes)
# 当前帧的所有box试图与现有的track匹配
track_features = []
# 如果一个track,它和当前帧所有的box都不相似,那么它应该被移除,不参与后面的匹配
delete_tks = []
for tk in flowing_track:
tk_ft = tk.get_last_feature()
tk_gps = tk.get_last_gps()
no_matched_flag = True
for box in cur_frame.boxes:
# 计算特征差
box_ft = box.feature
feature_dis_vec = box_ft - tk_ft
feature_dis = np.dot(feature_dis_vec.T, feature_dis_vec)
# 计算gps差
box_gps = box.gps_coor
gps_dis_vec = ((tk_gps[0]-box_gps[0]), (tk_gps[1]-box_gps[1]))
gps_dis = (gps_dis_vec[0]*100000)**2 + (gps_dis_vec[1]*100000)**2
# print feature_dis, gps_dis_vec, gps_dis
total_dis = gps_dis*WIGHTS + feature_dis
# if feature_dis < 50:
# print 'near: ', gps_dis*WIGHTS, feature_dis, total_dis
# if feature_dis > 150:
# print 'far: ', gps_dis*WIGHTS, feature_dis,
|
line) * (b
|
conditional_block
|
2_tracking.py
|
]]
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0.0
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
return float(intersect) / (sum_area - intersect)
class Box(object):
"""
match_state:一个box是否匹配到一个track中,若没有,应该生成新的track
"""
def __init__(self, frame_index, id, box, score, gps_coor, feature):
self.frame_index = frame_index
self.id = id
self.box = box
self.score = score
self.gps_coor = gps_coor
self.feature = feature
self.center = (self.box[0] + self.box[2] / 2, self.box[1] + self.box[3] / 2)
self.match_state = NO_MATCHED
class Track(object):
def __init__(self, id, sequence):
self.id = id
self.sequence = sequence
self.match_state = MATCHED
# self.leak_time = int(0) # 镂空次数,如果连续两帧
def append(self, box):
self.sequence.append(box)
def get_last(self):
return self.sequence[-1]
def get_last_feature(self):
return self.sequence[-1].feature
def get_last_gps(self):
return self.sequence[-1].gps_coor
def show(self):
print("For track-" + str(self.id) + ' : ', "length-" + len(self.sequence), ", matchState-", self.match_state)
class Frame(object):
def __init__(self, index, boxes):
self.index = index
self.boxes = boxes
def append(self, box):
self.boxes.append(box)
def show(self)
|
.index) + ' : ', "length-" + len(self.boxes))
for i in range(len(self.boxes)):
box = self.boxes[i]
print('box', i, ': ', box)
def Hungary(task_matrix):
b = task_matrix.copy()
# 行和列减0
for i in range(len(b)):
row_min = np.min(b[i])
for j in range(len(b[i])):
b[i][j] -= row_min
for i in range(len(b[0])):
col_min = np.min(b[:, i])
for j in range(len(b)):
b[j][i] -= col_min
line_count = 0
# 线数目小于矩阵长度时,进行循环
while (line_count < len(b)):
line_count = 0
row_zero_count = []
col_zero_count = []
for i in range(len(b)):
row_zero_count.append(np.sum(b[i] == 0))
for i in range(len(b[0])):
col_zero_count.append((np.sum(b[:, i] == 0)))
# 划线的顺序(分行或列)
line_order = []
row_or_col = []
for i in range(len(b[0]), 0, -1):
while (i in row_zero_count):
line_order.append(row_zero_count.index(i))
row_or_col.append(0)
row_zero_count[row_zero_count.index(i)] = 0
while (i in col_zero_count):
line_order.append(col_zero_count.index(i))
row_or_col.append(1)
col_zero_count[col_zero_count.index(i)] = 0
# 画线覆盖0,并得到行减最小值,列加最小值后的矩阵
delete_count_of_row = []
delete_count_of_rol = []
row_and_col = [i for i in range(len(b))]
for i in range(len(line_order)):
if row_or_col[i] == 0:
delete_count_of_row.append(line_order[i])
else:
delete_count_of_rol.append(line_order[i])
c = np.delete(b, delete_count_of_row, axis=0)
c = np.delete(c, delete_count_of_rol, axis=1)
line_count = len(delete_count_of_row) + len(delete_count_of_rol)
# 线数目等于矩阵长度时,跳出
if line_count == len(b):
break
# 判断是否画线覆盖所有0,若覆盖,进行加减操作
if 0 not in c:
row_sub = list(set(row_and_col) - set(delete_count_of_row))
min_value = np.min(c)
for i in row_sub:
b[i] = b[i] - min_value
for i in delete_count_of_rol:
b[:, i] = b[:, i] + min_value
break
row_ind, col_ind = linear_sum_assignment(b)
min_cost = task_matrix[row_ind, col_ind].sum()
best_solution = list(task_matrix[row_ind, col_ind])
return best_solution, col_ind
# dict:key-帧序列数,value-Box_list
def analysis_to_frame_dict(file_path):
frame_dict = {}
lines = open(file_path, 'r').readlines()
for line in lines:
words = line.strip('\n').split(',')
# print 'what: ', words[0], len(words)
index = int(words[0])
id = int(words[1])
box = [int(float(words[2])), int(float(words[3])), int(float(words[4])), int(float(words[5]))]
score = float(words[6])
gps_x = float(words[8])
gps_y = float(words[9])
ft = np.zeros(len(words) - 10)
for i in range(10, len(words)):
ft[i - 10] = float(words[i])
cur_box = Box(index, id, box, score, (gps_x, gps_y), ft)
if index not in frame_dict:
frame_dict[index] = Frame(index, [])
frame_dict[index].append(cur_box)
return frame_dict
def get_num_frames(video_path):
cap = cv2.VideoCapture(video_path)
return int(cap.get(7))
def process_a_video(camera_dir):
det_reid_ft_path = camera_dir + "/det_reid_features.txt"
result_path = camera_dir + "/det_reid_track.txt"
roi_path = camera_dir + '/roi.jpg'
video_path = camera_dir + '/vdo.avi'
all_frames = get_num_frames(video_path)
roi_src = cv2.imread(roi_path)
frame_dict = analysis_to_frame_dict(det_reid_ft_path) # dict:key-帧序列数,value-Frame
result_dict = {} # 记录最后结果的字典:key-id,value-track
flowing_track = [] # 记录当前正在跟踪的tracks
print(all_frames)
all_frames_num = len(frame_dict)
count = 0
for k in range(1, all_frames+1):
cur_frame = frame_dict.get(k)
if cur_frame == None:
continue
# print k, '**************************************************'
# print len(cur_frame.boxes)
processed_boxes = preprocess_boxes(cur_frame.boxes, roi_src)
cur_frame.boxes = processed_boxes
# print len(cur_frame.boxes)
# 当前帧的所有box试图与现有的track匹配
track_features = []
# 如果一个track,它和当前帧所有的box都不相似,那么它应该被移除,不参与后面的匹配
delete_tks = []
for tk in flowing_track:
tk_ft = tk.get_last_feature()
tk_gps = tk.get_last_gps()
no_matched_flag = True
for box in cur_frame.boxes:
# 计算特征差
box_ft = box.feature
feature_dis_vec = box_ft - tk_ft
feature_dis = np.dot(feature_dis_vec.T, feature_dis_vec)
# 计算gps差
box_gps = box.gps_coor
gps_dis_vec = ((tk_gps[0]-box_gps[0]), (tk_gps[1]-box_gps[1]))
gps_dis = (gps_dis_vec[0]*100000)**2 + (gps_dis_vec[1]*100000)**2
# print feature_dis, gps_dis_vec, gps_dis
total_dis = gps_dis*WIGHTS + feature_dis
# if feature_dis < 50:
# print 'near: ', gps_dis*WIGHTS, feature_dis, total_dis
# if feature_dis > 150:
# print 'far: ', gps_dis*WIGHTS, feature_dis, total
|
:
print("For frame index-" + str(self
|
identifier_body
|
2_tracking.py
|
, x0, y1, x1), which reflects
(top, left, bottom, right)
:param rec2: (y0, x0, y1, x1)
:return: scala value of IoU
"""
rec1 = [box1[0], box1[1], box1[0] + box1[2], box1[1] + box1[3]]
rec2 = [box2[0], box2[1], box2[0] + box2[2], box2[1] + box2[3]]
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0.0
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
return float(intersect) / (sum_area - intersect)
class Box(object):
"""
match_state:一个box是否匹配到一个track中,若没有,应该生成新的track
"""
def __init__(self, frame_index, id, box, score, gps_coor, feature):
self.frame_index = frame_index
self.id = id
self.box = box
self.score = score
self.gps_coor = gps_coor
self.feature = feature
self.center = (self.box[0] + self.box[2] / 2, self.box[1] + self.box[3] / 2)
self.match_state = NO_MATCHED
class Track(object):
def __init__(self, id, sequence):
self.id = id
self.sequence = sequence
self.match_state = MATCHED
# self.leak_time = int(0) # 镂空次数,如果连续两帧
def append(self, box):
self.sequence.append(box)
def get_last(self):
return self.sequence[-1]
def get_last_feature(self):
return self.sequence[-1].feature
def get_last_gps(self):
return self.sequence[-1].gps_coor
def show(self):
print("For track-" + str(self.id) + ' : ', "length-" + len(self.sequence), ", matchState-", self.match_state)
class Frame(object):
def __init__(self, index, boxes):
self.index = index
self.boxes = boxes
def append(self, box):
self.boxes.append(box)
def show(self):
print("For frame index-" + str(self.index) + ' : ', "length-" + len(self.boxes))
for i in range(len(self.boxes)):
box = self.boxes[i]
print('box', i, ': ', box)
def Hungary(task_matrix):
b = task_matrix.copy()
# 行和列减0
for i in range(len(b)):
row_min = np.min(b[i])
for j in range(len(b[i])):
b[i][j] -= row_min
for i in range(len(b[0])):
col_min = np.min(b[:, i])
for j in range(len(b)):
b[j][i] -= col_min
line_count = 0
# 线数目小于矩阵长度时,进行循环
while (line_count < len(b)):
line_count = 0
row_zero_count = []
col_zero_count = []
for i in range(len(b)):
row_zero_count.append(np.sum(b[i] == 0))
for i in range(len(b[0])):
col_zero_count.append((np.sum(b[:, i] == 0)))
# 划线的顺序(分行或列)
line_order = []
row_or_col = []
for i in range(len(b[0]), 0, -1):
while (i in row_zero_count):
line_order.append(row_zero_count.index(i))
row_or_col.append(0)
row_zero_count[row_zero_count.index(i)] = 0
while (i in col_zero_count):
line_order.append(col_zero_count.index(i))
row_or_col.append(1)
col_zero_count[col_zero_count.index(i)] = 0
# 画线覆盖0,并得到行减最小值,列加最小值后的矩阵
delete_count_of_row = []
delete_count_of_rol = []
row_and_col = [i for i in range(len(b))]
for i in range(len(line_order)):
if row_or_col[i] == 0:
delete_count_of_row.append(line_order[i])
else:
delete_count_of_rol.append(line_order[i])
c = np.delete(b, delete_count_of_row, axis=0)
c = np.delete(c, delete_count_of_rol, axis=1)
line_count = len(delete_count_of_row) + len(delete_count_of_rol)
# 线数目等于矩阵长度时,跳出
if line_count == len(b):
break
# 判断是否画线覆盖所有0,若覆盖,进行加减操作
if 0 not in c:
row_sub = list(set(row_and_col) - set(delete_count_of_row))
min_value = np.min(c)
for i in row_sub:
b[i] = b[i] - min_value
for i in delete_count_of_rol:
b[:, i] = b[:, i] + min_value
break
row_ind, col_ind = linear_sum_assignment(b)
min_cost = task_matrix[row_ind, col_ind].sum()
best_solution = list(task_matrix[row_ind, col_ind])
return best_solution, col_ind
# dict:key-帧序列数,value-Box_list
def analysis_to_frame_dict(file_path):
frame_dict = {}
lines = open(file_path, 'r').readlines()
for line in lines:
words = line.strip('\n').split(',')
# print 'what: ', words[0], len(words)
index = int(words[0])
id = int(words[1])
box = [int(float(words[2])), int(float(words[3])), int(float(words[4])), int(float(words[5]))]
score = float(words[6])
gps_x = float(words[8])
gps_y = float(words[9])
ft = np.zeros(len(words) - 10)
for i in range(10, len(words)):
ft[i - 10] = float(words[i])
cur_box = Box(index, id, box, score, (gps_x, gps_y), ft)
if index not in frame_dict:
frame_dict[index] = Frame(index, [])
frame_dict[index].append(cur_box)
return frame_dict
def get_num_frames(video_path):
cap = cv2.VideoCapture(video_path)
return int(cap.get(7))
def process_a_video(camera_dir):
det_reid_ft_path = camera_dir + "/det_reid_features.txt"
result_path = camera_dir + "/det_reid_track.txt"
roi_path = camera_dir + '/roi.jpg'
video_path = camera_dir + '/vdo.avi'
all_frames = get_num_frames(video_path)
roi_src = cv2.imread(roi_path)
frame_dict = analysis_to_frame_dict(det_reid_ft_path) # dict:key-帧序列数,value-Frame
result_dict = {} # 记录最后结果的字典:key-id,value-track
flowing_track = [] # 记录当前正在跟踪的tracks
print(all_frames)
all_frames_num = len(frame_dict)
count = 0
for k in range(1, all_frames+1):
cur_frame = frame_dict.get(k)
if cur_frame == None:
continue
# print k, '**************************************************'
# print len(cur_frame.boxes)
processed_boxes = preprocess_boxes(cur_frame.boxes, roi_src)
cur_frame.boxes = processed_boxes
# print len(cur_frame.boxes)
# 当前帧的所有box试图与现有的track匹配
track_features = []
# 如果一个track,它和当前帧所有的box都不相似,那么它应该被移除,不参与后面的匹配
delete_tks = []
for tk in flowing_track:
tk_ft = tk.get_last_feature()
tk_gps = tk.get_last_gps()
no_matched_flag = True
for box in cur_frame.boxes:
# 计算特征差
box_ft = box.feature
feature_dis_vec = box_ft - tk_ft
feature_dis = np.dot(feature_dis_vec.T, feature_dis_vec)
# 计算gps差
box_gps = box.gps_coor
gps_dis_vec = ((tk_gps[0]-box_gps[0]), (tk
|
m rec1: (y0
|
identifier_name
|
|
add-action-form.component.ts
|
boolean = false;
resultValue: any = {};
appAttributeParams: any = {};
validateForm: FormGroup;
code: any;
nameverification: any;//name
conturi: any; //功能地址
contauthorizationUri: any = '只能输入数字、26个英文字母(大小写)、:/?&#-_{}.=,多个URL以英文逗号分隔';
private checkPwd: any = CheckRegExp(this.regService.getPwd())
idNum: number;
id: number;
subscription: Subscription;//订阅问题
appIconFile: any = {
list: [],
number: 1,
apiUrl: `${document.location.origin}/console-api/attachmentController/uploadImage`,
};
componentChange(value: any, fieldName: string) {
if (this.checkHasFieldName(fieldName)) {
this.validateForm.controls[fieldName].setValue(value);
}
}
guid() {
return (this.S4() + this.S4() + this.S4() + this.S4() + this.S4() + this.S4() + this.S4() + this.S4());
}
S4() {
return (((1 + Math.random()) * 0x10000) | 0).toString(16).substring(1);
}
checkHasFieldName(fieldName: string) {
let has = false;
for (let o in this.validateForm.controls) {
if (fieldName && fieldName == o) {
has = true;
break;
}
}
return has;
}
_submitForm() {
for (const i in this.validateForm.controls) {
this.validateForm.controls[i].markAsDirty();
}
// console.log(this.validateForm);
if (this.validateForm.invalid) {
return;
}
this.resultData.emit(this.resultValue);
this.onSubmit.emit(this.validateForm);
}
constructor(private regService: RegexpSService, private fb: FormBuilder, private service: AddActionFormService, private route: ActivatedRoute
) {
this.subscription = this.service.editGrabble$.subscribe((grabble: any) => {
this.id = grabble;
});
|
rols['checkPassword'].updateValueAndValidity();
});
}
getCaptcha(e: MouseEvent) {
e.preventDefault();
}
// confirmationValidator = (control: FormControl): { [s: string]: boolean } => {
// if (!control.value) {
// return { required: true };
// } else if (control.value !== this.validateForm.controls['password'].value) {
// return { confirm: true, error: true };
// }
// };
//
//
//
confirmationSerialNumber(): ValidatorFn {
return (control: FormControl) => {
let forbiddenName: boolean = false;
// let controlV = control.value ? control.value : '';
// controlV && (controlV = controlV.trim());
if (/\D/g.test(control.value) || control.value.length > 6) {
forbiddenName = true;
}
return forbiddenName ? { 'forbiddenName': { value: control.value } } : null;
}
};
initValidateForm() {
//监听service里的id值,当编辑时传id
if (this.validateForm) {
return false;
}
const that = this;
if (this.id) {//当操作为编辑时,添加id值
this.validateForm = this.fb.group({
name: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(100), Validators.pattern(/^\S.*\S$|^\S$/)], this.checkRepeatCodeAs],
code: that.code,
authorizationUri: [null, [Validators.maxLength(768), Validators.minLength(2), Validators.pattern(/^([A-Za-z0-9 | : | \? | \= | \. | # | & | \- | \/ | _ | \{ | \}]+\,*)+$/)]],
uri: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(256), Validators.pattern(/^[A-Za-z0-9\-_:#\?\=&\.{}/]+$/)]],
// serialNumber: [null, [Validators.required, this.checkPwd]],
serialNumber: 0,
// serialNumber: [null, [Validators.required, this.confirmationSerialNumber()]],
desc: [null, [Validators.maxLength(256)]],
fucTypeDicId: [null, [Validators.required]],
parentId: [(this.action && this.action.parentId ? this.action.parentId : 0)],
id: [this.id],
parentName: [(this.action && this.action.parentName ? this.action.parentName : 'root')]
});
} else {
this.validateForm = this.fb.group({
name: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(100), Validators.pattern(/^\S.*\S$|^\S$/)], this.checkRepeatCodeAs],
code: that.code,
authorizationUri: [null, [Validators.maxLength(256), Validators.minLength(2), Validators.pattern(/^([A-Za-z0-9 | : | \? | \= | \. | # | & | \- | \/ | _ | \{ | \}]+\,*)+$/)]],
uri: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(256), Validators.pattern(/^[A-Za-z0-9\-_:#\?\=&\.{}/]+$/)]],
// serialNumber: [null, [Validators.required, this.checkPwd]],
serialNumber: 0,
// serialNumber: [null, [Validators.required, this.confirmationSerialNumber()]],
desc: [null, [Validators.maxLength(256)]],
fucTypeDicId: this.defaultType,
parentId: [0],
id: null,
parentName: ['root']
});
}
}
resetValidateForm(data: any) {
if (data.icon == null || data.icon == '' || data.icon == undefined) { // 没有icon时清除
this.appIconFile.list = [];
this.appIconFile = Object.assign({}, this.appIconFile);
}
const that = this;
if (this.editOrAdd == 'select' || this.editOrAdd == 'update') {
this.validateForm.reset({
name: null,
code: that.code,
authorizationUri: null,
uri: null,
desc: null,
fucTypeDicId: this.defaultType,
id: null,
serialNumber: 0,
parentId: data.parentId ? data.parentId : 0,
parentName: data.parentName ? data.parentName : 'root',
});
} else {//新增
this.validateForm.reset({
name: null,
code: that.code,
authorizationUri: null,
uri: null,
desc: null,
fucTypeDicId: this.defaultType,
id: null,
serialNumber: 0,
parentId: data.parentId ? data.parentId : 0,
parentName: data.parentName ? data.parentName : 'root'
});
}
}
space() {
}
/**
* 检验name是否重复
* 这里使用箭头函数是因为内部取不到this
* @param {FormControl} control [description]
* @return {[type]} [description]
*/
checkRepeatCodeAs = async (control: FormControl): Promise<any> => {
let cont = !(/^([\u4E00-\u9FA5]|[A-Za-z]|[0-9]|[ ]|[-_&])+$/).test(control.value || '')
if (cont) {
return new Promise((resolve: any, reject: any) => {
resolve({ 'features': { value: cont } })
this.nameverification = "只能输入中文、数字、26个英文字母(大小写)、-_&、空格"
})
} else {
return new Promise((resolve: any, reject: any) => {
let controlV: string = control.value || ''
controlV && (controlV = controlV.trim())
let params: any = {
isname: controlV,
}
params.appId = this.route.snapshot.params['id'];
if (this.id) {
params.id = this.id;
}
this.service.checkRepeat(params).subscribe((data: any) => {
resolve(!data.success ? { 'features': { value: control.value } } : null)
this.nameverification = "应用名称已存在"
})
})
}
}
/**
* 检验uri
*/
checkRepeaturiAs = async (control: FormControl): Promise<any> => {
let cont = !(/^([A-Za-z]|[0-9]|[:/?&#-_{}.])+$/).test(control.value || '')
if (control.value && control.value.length < 10) {
return new Promise((resolve: any, reject: any) => {
resolve({ 'conturi': { value: true } })
this.conturi = "输入的功能地址长度不得小于10位"
})
} else if (cont) {
return new Promise((resolve: any, reject: any) => {
resolve({ 'conturi
|
}
updateConfirmValidator() {
/** wait for refresh value */
setTimeout(() => {
this.validateForm.cont
|
identifier_body
|
add-action-form.component.ts
|
refresh value */
setTimeout(() => {
this.validateForm.controls['checkPassword'].updateValueAndValidity();
});
}
getCaptcha(e: MouseEvent) {
e.preventDefault();
}
// confirmationValidator = (control: FormControl): { [s: string]: boolean } => {
// if (!control.value) {
// return { required: true };
// } else if (control.value !== this.validateForm.controls['password'].value) {
// return { confirm: true, error: true };
// }
// };
//
//
//
confirmationSerialNumber(): ValidatorFn {
return (control: FormControl) => {
let forbiddenName: boolean = false;
// let controlV = control.value ? control.value : '';
// controlV && (controlV = controlV.trim());
if (/\D/g.test(control.value) || control.value.length > 6) {
forbiddenName = true;
}
return forbiddenName ? { 'forbiddenName': { value: control.value } } : null;
}
};
initValidateForm() {
//监听service里的id值,当编辑时传id
if (this.validateForm) {
return false;
}
const that = this;
if (this.id) {//当操作为编辑时,添加id值
this.validateForm = this.fb.group({
name: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(100), Validators.pattern(/^\S.*\S$|^\S$/)], this.checkRepeatCodeAs],
code: that.code,
authorizationUri: [null, [Validators.maxLength(768), Validators.minLength(2), Validators.pattern(/^([A-Za-z0-9 | : | \? | \= | \. | # | & | \- | \/ | _ | \{ | \}]+\,*)+$/)]],
uri: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(256), Validators.pattern(/^[A-Za-z0-9\-_:#\?\=&\.{}/]+$/)]],
// serialNumber: [null, [Validators.required, this.checkPwd]],
serialNumber: 0,
// serialNumber: [null, [Validators.required, this.confirmationSerialNumber()]],
desc: [null, [Validators.maxLength(256)]],
fucTypeDicId: [null, [Validators.required]],
parentId: [(this.action && this.action.parentId ? this.action.parentId : 0)],
id: [this.id],
parentName: [(this.action && this.action.parentName ? this.action.parentName : 'root')]
});
} else {
this.validateForm = this.fb.group({
name: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(100), Validators.pattern(/^\S.*\S$|^\S$/)], this.checkRepeatCodeAs],
code: that.code,
authorizationUri: [null, [Validators.maxLength(256), Validators.minLength(2), Validators.pattern(/^([A-Za-z0-9 | : | \? | \= | \. | # | & | \- | \/ | _ | \{ | \}]+\,*)+$/)]],
uri: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(256), Validators.pattern(/^[A-Za-z0-9\-_:#\?\=&\.{}/]+$/)]],
// serialNumber: [null, [Validators.required, this.checkPwd]],
serialNumber: 0,
// serialNumber: [null, [Validators.required, this.confirmationSerialNumber()]],
desc: [null, [Validators.maxLength(256)]],
fucTypeDicId: this.defaultType,
parentId: [0],
id: null,
parentName: ['root']
});
}
}
resetValidateForm(data: any) {
if (data.icon == null || data.icon == '' || data.icon == undefined) { // 没有icon时清除
this.appIconFile.list = [];
this.appIconFile = Object.assign({}, this.appIconFile);
}
const that = this;
if (this.editOrAdd == 'select' || this.editOrAdd == 'update') {
this.validateForm.reset({
name: null,
code: that.code,
authorizationUri: null,
uri: null,
desc: null,
fucTypeDicId: this.defaultType,
id: null,
serialNumber: 0,
parentId: data.parentId ? data.parentId : 0,
parentName: data.parentName ? data.parentName : 'root',
});
} else {//新增
this.validateForm.reset({
name: null,
code: that.code,
authorizationUri: null,
uri: null,
desc: null,
fucTypeDicId: this.defaultType,
id: null,
serialNumber: 0,
parentId: data.parentId ? data.parentId : 0,
parentName: data.parentName ? data.parentName : 'root'
});
}
}
space() {
}
/**
* 检验name是否重复
* 这里使用箭头函数是因为内部取不到this
* @param {FormControl} control [description]
* @return {[type]} [description]
*/
checkRepeatCodeAs = async (control: FormControl): Promise<any> => {
let cont = !(/^([\u4E00-\u9FA5]|[A-Za-z]|[0-9]|[ ]|[-_&])+$/).test(control.value || '')
if (cont) {
return new Promise((resolve: any, reject: any) => {
resolve({ 'features': { value: cont } })
this.nameverification = "只能输入中文、数字、26个英文字母(大小写)、-_&、空格"
})
} else {
return new Promise((resolve: any, reject: any) => {
let controlV: string = control.value || ''
controlV && (controlV = controlV.trim())
let params: any = {
isname: controlV,
}
params.appId = this.route.snapshot.params['id'];
if (this.id) {
params.id = this.id;
}
this.service.checkRepeat(params).subscribe((data: any) => {
resolve(!data.success ? { 'features': { value: control.value } } : null)
this.nameverification = "应用名称已存在"
})
})
}
}
/**
* 检验uri
*/
checkRepeaturiAs = async (control: FormControl): Promise<any> => {
let cont = !(/^([A-Za-z]|[0-9]|[:/?&#-_{}.])+$/).test(control.value || '')
if (control.value && control.value.length < 10) {
return new Promise((resolve: any, reject: any) => {
resolve({ 'conturi': { value: true } })
this.conturi = "输入的功能地址长度不得小于10位"
})
} else if (cont) {
return new Promise((resolve: any, reject: any) => {
resolve({ 'conturi': { value: cont } })
this.conturi = "只能输入数字、26个英文字母(大小写)、':/?&#-_{}.'"
})
}
}
ngOnInit() {
this.code = this.guid();
this.action = {};
this.initValidateForm();
}
selectSearchAdditionalAppId(value: any, fieldName: string) {
this.componentChange(value, fieldName);
}
getFormControl(name: string) {
return this.validateForm.controls[name];
}
initIcon() {
/* this.appIconFile.list = [{
uid: 146,
name: 'yhgj.png',
status: 'done',
url: 'https://zos.alipayobjects.com/rmsportal/jkjgkEfvpUPVyRjUImniVslZfWPnJuuZ.png',
//thumbUrl: icon
}];*/
/*let icon=this.action.icon;
if(icon){
this.appIconFile.list = [{
uid: 146,
name: 'yhgj.png',
status: 'done',
//url: 'https://zos.alipayobjects.com/rmsportal/jkjgkEfvpUPVyRjUImniVslZfWPnJuuZ.png',
thumbUrl: this.action.icon
}];
this.resultValue.icon = this.appIconFile.list[0].thumbUrl;
}*/
}
initActionFormData() {
if (this.action) {
for (let o in this.action) {
this.componentChange(this.action[o], o);
}
}
}
showAddUserModal() {
this.isShowAddUerModal = true;
}
onSearchUserList(params: any) {
this.userTableFieldParams = params;
}
onUploadAppIconFile(files: any[]) {
if (files.length > 0 && files[0].thumbUrl) {
this.resultValue.icon = files[0].thumbUrl;
} else {
this.resultValue.icon = "";
}
}
ngOnChanges(changes: SimpleChanges) {
this.initValidateForm();
if (changes.action && changes.action.currentValue) {
this.action = {};
|
this.action =
|
identifier_name
|
|
add-action-form.component.ts
|
if (this.validateForm.invalid) {
return;
}
this.resultData.emit(this.resultValue);
this.onSubmit.emit(this.validateForm);
}
constructor(private regService: RegexpSService, private fb: FormBuilder, private service: AddActionFormService, private route: ActivatedRoute
) {
this.subscription = this.service.editGrabble$.subscribe((grabble: any) => {
this.id = grabble;
});
}
updateConfirmValidator() {
/** wait for refresh value */
setTimeout(() => {
this.validateForm.controls['checkPassword'].updateValueAndValidity();
});
}
getCaptcha(e: MouseEvent) {
e.preventDefault();
}
// confirmationValidator = (control: FormControl): { [s: string]: boolean } => {
// if (!control.value) {
// return { required: true };
// } else if (control.value !== this.validateForm.controls['password'].value) {
// return { confirm: true, error: true };
// }
// };
//
//
//
confirmationSerialNumber(): ValidatorFn {
return (control: FormControl) => {
let forbiddenName: boolean = false;
// let controlV = control.value ? control.value : '';
// controlV && (controlV = controlV.trim());
if (/\D/g.test(control.value) || control.value.length > 6) {
forbiddenName = true;
}
return forbiddenName ? { 'forbiddenName': { value: control.value } } : null;
}
};
initValidateForm() {
//监听service里的id值,当编辑时传id
if (this.validateForm) {
return false;
}
const that = this;
if (this.id) {//当操作为编辑时,添加id值
this.validateForm = this.fb.group({
name: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(100), Validators.pattern(/^\S.*\S$|^\S$/)], this.checkRepeatCodeAs],
code: that.code,
authorizationUri: [null, [Validators.maxLength(768), Validators.minLength(2), Validators.pattern(/^([A-Za-z0-9 | : | \? | \= | \. | # | & | \- | \/ | _ | \{ | \}]+\,*)+$/)]],
uri: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(256), Validators.pattern(/^[A-Za-z0-9\-_:#\?\=&\.{}/]+$/)]],
// serialNumber: [null, [Validators.required, this.checkPwd]],
serialNumber: 0,
// serialNumber: [null, [Validators.required, this.confirmationSerialNumber()]],
desc: [null, [Validators.maxLength(256)]],
fucTypeDicId: [null, [Validators.required]],
parentId: [(this.action && this.action.parentId ? this.action.parentId : 0)],
id: [this.id],
parentName: [(this.action && this.action.parentName ? this.action.parentName : 'root')]
});
} else {
this.validateForm = this.fb.group({
name: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(100), Validators.pattern(/^\S.*\S$|^\S$/)], this.checkRepeatCodeAs],
code: that.code,
authorizationUri: [null, [Validators.maxLength(256), Validators.minLength(2), Validators.pattern(/^([A-Za-z0-9 | : | \? | \= | \. | # | & | \- | \/ | _ | \{ | \}]+\,*)+$/)]],
uri: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(256), Validators.pattern(/^[A-Za-z0-9\-_:#\?\=&\.{}/]+$/)]],
// serialNumber: [null, [Validators.required, this.checkPwd]],
serialNumber: 0,
// serialNumber: [null, [Validators.required, this.confirmationSerialNumber()]],
desc: [null, [Validators.maxLength(256)]],
fucTypeDicId: this.defaultType,
parentId: [0],
id: null,
parentName: ['root']
});
}
}
resetValidateForm(data: any) {
if (data.icon == null || data.icon == '' || data.icon == undefined) { // 没有icon时清除
this.appIconFile.list = [];
this.appIconFile = Object.assign({}, this.appIconFile);
}
const that = this;
if (this.editOrAdd == 'select' || this.editOrAdd == 'update') {
this.validateForm.reset({
name: null,
code: that.code,
authorizationUri: null,
uri: null,
desc: null,
fucTypeDicId: this.defaultType,
id: null,
serialNumber: 0,
parentId: data.parentId ? data.parentId : 0,
parentName: data.parentName ? data.parentName : 'root',
});
} else {//新增
this.validateForm.reset({
name: null,
code: that.code,
authorizationUri: null,
uri: null,
desc: null,
fucTypeDicId: this.defaultType,
id: null,
serialNumber: 0,
parentId: data.parentId ? data.parentId : 0,
parentName: data.parentName ? data.parentName : 'root'
});
}
}
space() {
}
/**
* 检验name是否重复
* 这里使用箭头函数是因为内部取不到this
* @param {FormControl} control [description]
* @return {[type]} [description]
*/
checkRepeatCodeAs = async (control: FormControl): Promise<any> => {
let cont = !(/^([\u4E00-\u9FA5]|[A-Za-z]|[0-9]|[ ]|[-_&])+$/).test(control.value || '')
if (cont) {
return new Promise((resolve: any, reject: any) => {
resolve({ 'features': { value: cont } })
this.nameverification = "只能输入中文、数字、26个英文字母(大小写)、-_&、空格"
})
} else {
return new Promise((resolve: any, reject: any) => {
let controlV: string = control.value || ''
controlV && (controlV = controlV.trim())
let params: any = {
isname: controlV,
}
params.appId = this.route.snapshot.params['id'];
if (this.id) {
params.id = this.id;
}
this.service.checkRepeat(params).subscribe((data: any) => {
resolve(!data.success ? { 'features': { value: control.value } } : null)
this.nameverification = "应用名称已存在"
})
})
}
}
/**
* 检验uri
*/
checkRepeaturiAs = async (control: FormControl): Promise<any> => {
let cont = !(/^([A-Za-z]|[0-9]|[:/?&#-_{}.])+$/).test(control.value || '')
if (control.value && control.value.length < 10) {
return new Promise((resolve: any, reject: any) => {
resolve({ 'conturi': { value: true } })
this.conturi = "输入的功能地址长度不得小于10位"
})
} else if (cont) {
return new Promise((resolve: any, reject: any) => {
resolve({ 'conturi': { value: cont } })
this.conturi = "只能输入数字、26个英文字母(大小写)、':/?&#-_{}.'"
})
}
}
ngOnInit() {
this.code = this.guid();
this.action = {};
this.initValidateForm();
}
selectSearchAdditionalAppId(value: any, fieldName: string) {
this.componentChange(value, fieldName);
}
getFormControl(name: string) {
return this.validateForm.controls[name];
}
initIcon() {
/* this.appIconFile.list = [{
uid: 146,
name: 'yhgj.png',
status: 'done',
url: 'https://zos.alipayobjects.com/rmsportal/jkjgkEfvpUPVyRjUImniVslZfWPnJuuZ.png',
//thumbUrl: icon
}];*/
/*let icon=this.action.icon;
if(icon){
this.appIconFile.list = [{
uid: 146,
name: 'yhgj.png',
status: 'done',
//url: 'https://zos.alipayobjects.com/rmsportal/jkjgkEfvpUPVyRjUImniVslZfWPnJuuZ.png',
thumbUrl: this.action.icon
}];
this.resultValue.icon = this.appIconFile.list[0].thumbUrl;
}*/
}
initActionFormData() {
if (this.action) {
for (let o in this.action) {
this.componentChange(this.action[o], o);
}
}
}
showAddUserModal() {
this.isShowAddUerModal = true;
}
onSearchUserList(params: any) {
|
random_line_split
|
||
add-action-form.component.ts
|
boolean = false;
resultValue: any = {};
appAttributeParams: any = {};
validateForm: FormGroup;
code: any;
nameverification: any;//name
conturi: any; //功能地址
contauthorizationUri: any = '只能输入数字、26个英文字母(大小写)、:/?&#-_{}.=,多个URL以英文逗号分隔';
private checkPwd: any = CheckRegExp(this.regService.getPwd())
idNum: number;
id: number;
subscription: Subscription;//订阅问题
appIconFile: any = {
list: [],
number: 1,
apiUrl: `${document.location.origin}/console-api/attachmentController/uploadImage`,
};
componentChange(value: any, fieldName: string) {
if (this.checkHasFieldName(fieldName)) {
this.validateForm.controls[fieldName].setValue(value);
}
}
guid() {
return (this.S4() + this.S4() + this.S4() + this.S4() + this.S4() + this.S4() + this.S4() + this.S4());
}
S4() {
return (((1 + Math.random()) * 0x10000) | 0).toString(16).substring(1);
}
checkHasFieldName(fieldName: string) {
let has = false;
for (let o in this.validateForm.controls) {
if (fieldName && fieldName == o) {
has = true;
break;
}
}
return has;
}
_submitForm() {
for (const i in this.validateForm.controls) {
this.validateForm.controls[i].markAsDirty();
}
// console.log(this.validateForm);
if (this.validateForm.invalid) {
return;
}
this.resultData.emit(this.resultValue);
this.onSubmit.emit(this.validateForm);
}
constructor(private regService: RegexpSService, private fb: FormBuilder, private service: AddActionFormService, private route: ActivatedRoute
) {
this.subscription = this.service.editGrabble$.subscribe((grabble: any) => {
this.id = grabble;
});
}
updateConfirmValidator() {
/** wait for refresh value */
setTimeout(() => {
this.validateForm.controls['checkPassword'].updateValueAndValidity();
});
}
getCaptcha(e: MouseEvent) {
e.preventDefault();
}
// confirmationValidator = (control: FormControl): { [s: string]: boolean } => {
// if (!control.value) {
// return { required: true };
// } else if (control.value !== this.validateForm.controls['password'].value) {
// return { confirm: true, error: true };
// }
// };
//
//
//
confirmationSerialNumber(): ValidatorFn {
return (control: FormControl) => {
let forbiddenName: boolean = false;
// let controlV = control.value ? control.value : '';
// controlV && (controlV = controlV.trim());
if (/\D/g.test(control.value) || control.value.length > 6) {
forbiddenName = true;
}
return forbiddenName ? { 'forbiddenName': { value: control.value } } :
|
() {
//监听service里的id值,当编辑时传id
if (this.validateForm) {
return false;
}
const that = this;
if (this.id) {//当操作为编辑时,添加id值
this.validateForm = this.fb.group({
name: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(100), Validators.pattern(/^\S.*\S$|^\S$/)], this.checkRepeatCodeAs],
code: that.code,
authorizationUri: [null, [Validators.maxLength(768), Validators.minLength(2), Validators.pattern(/^([A-Za-z0-9 | : | \? | \= | \. | # | & | \- | \/ | _ | \{ | \}]+\,*)+$/)]],
uri: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(256), Validators.pattern(/^[A-Za-z0-9\-_:#\?\=&\.{}/]+$/)]],
// serialNumber: [null, [Validators.required, this.checkPwd]],
serialNumber: 0,
// serialNumber: [null, [Validators.required, this.confirmationSerialNumber()]],
desc: [null, [Validators.maxLength(256)]],
fucTypeDicId: [null, [Validators.required]],
parentId: [(this.action && this.action.parentId ? this.action.parentId : 0)],
id: [this.id],
parentName: [(this.action && this.action.parentName ? this.action.parentName : 'root')]
});
} else {
this.validateForm = this.fb.group({
name: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(100), Validators.pattern(/^\S.*\S$|^\S$/)], this.checkRepeatCodeAs],
code: that.code,
authorizationUri: [null, [Validators.maxLength(256), Validators.minLength(2), Validators.pattern(/^([A-Za-z0-9 | : | \? | \= | \. | # | & | \- | \/ | _ | \{ | \}]+\,*)+$/)]],
uri: [null, [Validators.required, Validators.minLength(2), Validators.maxLength(256), Validators.pattern(/^[A-Za-z0-9\-_:#\?\=&\.{}/]+$/)]],
// serialNumber: [null, [Validators.required, this.checkPwd]],
serialNumber: 0,
// serialNumber: [null, [Validators.required, this.confirmationSerialNumber()]],
desc: [null, [Validators.maxLength(256)]],
fucTypeDicId: this.defaultType,
parentId: [0],
id: null,
parentName: ['root']
});
}
}
resetValidateForm(data: any) {
if (data.icon == null || data.icon == '' || data.icon == undefined) { // 没有icon时清除
this.appIconFile.list = [];
this.appIconFile = Object.assign({}, this.appIconFile);
}
const that = this;
if (this.editOrAdd == 'select' || this.editOrAdd == 'update') {
this.validateForm.reset({
name: null,
code: that.code,
authorizationUri: null,
uri: null,
desc: null,
fucTypeDicId: this.defaultType,
id: null,
serialNumber: 0,
parentId: data.parentId ? data.parentId : 0,
parentName: data.parentName ? data.parentName : 'root',
});
} else {//新增
this.validateForm.reset({
name: null,
code: that.code,
authorizationUri: null,
uri: null,
desc: null,
fucTypeDicId: this.defaultType,
id: null,
serialNumber: 0,
parentId: data.parentId ? data.parentId : 0,
parentName: data.parentName ? data.parentName : 'root'
});
}
}
space() {
}
/**
* 检验name是否重复
* 这里使用箭头函数是因为内部取不到this
* @param {FormControl} control [description]
* @return {[type]} [description]
*/
checkRepeatCodeAs = async (control: FormControl): Promise<any> => {
let cont = !(/^([\u4E00-\u9FA5]|[A-Za-z]|[0-9]|[ ]|[-_&])+$/).test(control.value || '')
if (cont) {
return new Promise((resolve: any, reject: any) => {
resolve({ 'features': { value: cont } })
this.nameverification = "只能输入中文、数字、26个英文字母(大小写)、-_&、空格"
})
} else {
return new Promise((resolve: any, reject: any) => {
let controlV: string = control.value || ''
controlV && (controlV = controlV.trim())
let params: any = {
isname: controlV,
}
params.appId = this.route.snapshot.params['id'];
if (this.id) {
params.id = this.id;
}
this.service.checkRepeat(params).subscribe((data: any) => {
resolve(!data.success ? { 'features': { value: control.value } } : null)
this.nameverification = "应用名称已存在"
})
})
}
}
/**
* 检验uri
*/
checkRepeaturiAs = async (control: FormControl): Promise<any> => {
let cont = !(/^([A-Za-z]|[0-9]|[:/?&#-_{}.])+$/).test(control.value || '')
if (control.value && control.value.length < 10) {
return new Promise((resolve: any, reject: any) => {
resolve({ 'conturi': { value: true } })
this.conturi = "输入的功能地址长度不得小于10位"
})
} else if (cont) {
return new Promise((resolve: any, reject: any) => {
resolve({ 'conturi': {
|
null;
}
};
initValidateForm
|
conditional_block
|
__init__.py
|
process.communicate()
if stdout:
stdout = stdout.rstrip(' \n')
else:
stdout = ""
if stderr:
stderr = stderr.rstrip(' \n')
else:
stderr = ""
return (process.returncode, stdout, stderr)
def detectSystem():
(returncode, stdout, stderr) = runCommand("hostname")
if returncode != 0:
raise BatchelorException("runCommand(\"hostname\") failed")
hostname = stdout
if hostname.startswith("gridka"):
raise BatchelorException("hostname '" + hostname + "' seems to indicate gridka, but the wrong host")
elif hostname == "compass-kit.gridka.de":
return "gridka"
elif hostname.startswith("lxplus") or hostname.endswith(".cern.ch"):
return "lxplus"
elif hostname.endswith(".e18.physik.tu-muenchen.de"):
return "e18"
elif hostname.startswith("ccage"):
return "lyon"
elif hostname.startswith("login") and runCommand("which llsubmit")[0] == 0:
return "c2pap"
return "UNKNOWN"
def _getRealPath(path):
return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
def _checkForSpecialCharacters(string):
if string is None:
string = ""
specialCharacters = [' ', ':', ';', '"', '\'', '@', '!', '?', '$', '\\', '/',
'#', '(', ')', '{', '}', '[', ']', '.', ',', '*']
foundChars = []
for char in specialCharacters:
if string.find(char) > 0:
foundChars.append(char)
if foundChars:
msg = "forbidden characters in job name ("
for char in foundChars:
msg += repr(char) + ", "
msg = msg[:-2]
msg += ")"
raise BatchelorException(msg)
def checkConfig(configFileName, system = ""):
config = ConfigParser.RawConfigParser()
if not config.read(os.path.abspath(configFileName)):
|
error = False
if system != "" and not config.has_section(system):
print("ERROR: System set but corresponding section is missing in config file.")
error = True
requiredOptions = { "c2pap": [ "group", "notification", "notify_user", "node_usage", "wall_clock_limit", "resources", "job_type", "class" ],
"e18": [ "shortqueue", "memory", "header_file", "arch" ],
"gridka": [ "queue", "project", "memory", "header_file" ],
"lxplus": [ "queue", "pool", "header_file" ],
"lyon": [],
"local": [ "shell", "cores" ],
"simulator": [ "lifetime" ] }
filesToTest = { "gridka": [ "header_file" ],
"e18": [ "header_file" ],
"lxplus": [ "header_file" ],
"c2pap": [ "header_file" ],
"local": [ "shell" ] }
for section in requiredOptions.keys():
if config.has_section(section):
options = requiredOptions[section]
for option in options:
if not config.has_option(section, option):
print("ERROR: '" + section + "' section is missing option '" + option + "'.")
error = True
continue
if section in filesToTest.keys() and option in filesToTest[section] and (system == "" or system == section):
path = _getRealPath(config.get(section, option))
if not os.path.exists(path):
print("ERROR: Could not find required file '" + path + "'.")
error = True
if error:
return False
return True
class Batchelor:
debug = False
bprintTicker = ""
batchFunctions = None
def __init__(self):
self._config = ConfigParser.RawConfigParser()
def bprint(self, msg):
self.bprintTicker += ('' if self.bprintTicker == '' else '\n') + msg
if self.debug:
print(msg)
def initialize(self, configFileName, systemOverride = ""):
self.bprint("Initializing...")
if not self._config.read(os.path.abspath(configFileName)):
self.bprint("Could not read config file '" + configFileName + "'. Initialization failed...")
return False
if systemOverride == "":
self._system = detectSystem()
if self._system == "UNKNOWN":
self.bprint("Could not determine on which system we are. Initialization failed...")
return False
self.bprint("Detected system '" + self._system + "'.")
else:
self._system = systemOverride
self.bprint("System manually set to '" + self._system + "'.")
if not self._config.has_section(self._system):
self.bprint("Could not find section describing '" + self._system +
"' in config file '" + configFileName + "'. Initialization failed...")
return False
if not checkConfig(configFileName, self._system):
self.bprint("Config file contains errors. Initialization failed...")
return False
self.bprint("Importing appropriate submodule.")
if self._system == "c2pap":
import batchelor._batchelorC2PAP as batchFunctions
elif self._system == "gridka":
import batchelor._batchelorGridka as batchFunctions
elif self._system == "e18":
import batchelor._batchelorE18 as batchFunctions
elif self._system == "lxplus":
import batchelor._batchelorLxplus as batchFunctions
elif self._system == "lyon":
import batchelor._batchelorLyon as batchFunctions
elif self._system == "local":
import batchelor._batchelorLocal as batchFunctions
batchFunctions.initialize(self._config)
elif self._system == "simulator":
import batchelor._batchelorSimulator as batchFunctions
else:
self.bprint("Unknown system '" + self._system + "', cannot load appropriate submodule. Initialization failed...")
return False
self.batchFunctions = batchFunctions
self.bprint("Imported " + batchFunctions.submoduleIdentifier() + " submodule.")
self.bprint("Initialized.")
return True
def initialized(self):
if self.batchFunctions:
return True
else:
return False
def shutdown(self):
if not self.initialized():
raise BatchelorException("not initialized")
if "shutdown" in self.batchFunctions.__dict__.keys():
return self.batchFunctions.shutdown()
def submitJob(self, command, outputFile, jobName = None):
if not self.initialized():
raise BatchelorException("not initialized")
if "submitJob" in self.batchFunctions.__dict__.keys():
_checkForSpecialCharacters(jobName)
return self.batchFunctions.submitJob(self._config, command, outputFile, jobName)
else:
raise BatchelorException("not implemented")
def submitJobs(self, jobs):
# 'jobs' should be a list of arguments as they need to be specified for
# 'submitJob', e.g.:
# [ [ "command 1", "output file 1", "name 1" ],
# [ "command 2", "output file 2", None ],
# ... ]
# The return value is a list of job IDs in the same order as the jobs.
# A job ID of -1 indicates an error during submission of this job.
if not self.initialized():
raise BatchelorException("not initialized")
if "submitJobs" in self.batchFunctions.__dict__.keys():
for i in range(len(jobs)):
if len(jobs[i]) == 3:
_checkForSpecialCharacters(jobs[i][2])
elif len(jobs[i]) == 2:
# the 'submitJob' method of the 'Batchelor' class
# has a default argument for the job name, do
# something similar here
jobs[i].append(None)
else:
raise BatchelorException("wrong number of arguments")
return self.batchFunctions.submitJobs(self._config, jobs)
else:
jobIds = []
for job in jobs:
try:
jobId = self.submitJob(*job)
except batchelor.BatchelorException as exc:
jobId = -1
jobIds.append(jobId)
return jobIds
def getListOfActiveJobs(self, jobName = None):
if not self.initialized():
raise BatchelorException("not initialized")
if "getListOfActiveJobs" in self.batchFunctions.__dict__.keys():
_checkForSpecialCharacters(jobName)
return self.batchFunctions.getListOfActiveJobs(jobName)
else:
raise BatchelorException("not implemented")
def getNActiveJobs(self, jobName = None):
if not self.initialized():
raise BatchelorException("not initialized")
if "getNActiveJobs" in self.batchFunctions.__dict__.keys():
_checkForSpecialCharacters(jobName)
return self.batchFunctions.getNActiveJobs(jobName)
else:
raise BatchelorException("not implemented")
def jobStillRunning(self, jobId):
if not self.initialized():
raise BatchelorException("not initialized")
if "jobStillRunning" in self.batchFunctions.__dict__.keys():
return self.batchFunctions.jobStillRunning(jobId)
else:
raise BatchelorException("not implemented")
def getListOf
|
print("ERROR: Could not read config file '" + configFileName + "'.")
return False
|
conditional_block
|
__init__.py
|
process.communicate()
if stdout:
stdout = stdout.rstrip(' \n')
else:
stdout = ""
if stderr:
stderr = stderr.rstrip(' \n')
else:
stderr = ""
return (process.returncode, stdout, stderr)
def detectSystem():
(returncode, stdout, stderr) = runCommand("hostname")
if returncode != 0:
raise BatchelorException("runCommand(\"hostname\") failed")
hostname = stdout
if hostname.startswith("gridka"):
raise BatchelorException("hostname '" + hostname + "' seems to indicate gridka, but the wrong host")
elif hostname == "compass-kit.gridka.de":
return "gridka"
elif hostname.startswith("lxplus") or hostname.endswith(".cern.ch"):
return "lxplus"
elif hostname.endswith(".e18.physik.tu-muenchen.de"):
return "e18"
elif hostname.startswith("ccage"):
return "lyon"
elif hostname.startswith("login") and runCommand("which llsubmit")[0] == 0:
return "c2pap"
return "UNKNOWN"
def _getRealPath(path):
return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
def _checkForSpecialCharacters(string):
if string is None:
string = ""
specialCharacters = [' ', ':', ';', '"', '\'', '@', '!', '?', '$', '\\', '/',
'#', '(', ')', '{', '}', '[', ']', '.', ',', '*']
foundChars = []
for char in specialCharacters:
if string.find(char) > 0:
foundChars.append(char)
if foundChars:
msg = "forbidden characters in job name ("
for char in foundChars:
msg += repr(char) + ", "
msg = msg[:-2]
msg += ")"
raise BatchelorException(msg)
def checkConfig(configFileName, system = ""):
config = ConfigParser.RawConfigParser()
if not config.read(os.path.abspath(configFileName)):
print("ERROR: Could not read config file '" + configFileName + "'.")
return False
error = False
if system != "" and not config.has_section(system):
print("ERROR: System set but corresponding section is missing in config file.")
error = True
requiredOptions = { "c2pap": [ "group", "notification", "notify_user", "node_usage", "wall_clock_limit", "resources", "job_type", "class" ],
"e18": [ "shortqueue", "memory", "header_file", "arch" ],
"gridka": [ "queue", "project", "memory", "header_file" ],
"lxplus": [ "queue", "pool", "header_file" ],
"lyon": [],
"local": [ "shell", "cores" ],
"simulator": [ "lifetime" ] }
filesToTest = { "gridka": [ "header_file" ],
"e18": [ "header_file" ],
"lxplus": [ "header_file" ],
"c2pap": [ "header_file" ],
"local": [ "shell" ] }
for section in requiredOptions.keys():
if config.has_section(section):
options = requiredOptions[section]
for option in options:
if not config.has_option(section, option):
print("ERROR: '" + section + "' section is missing option '" + option + "'.")
error = True
continue
if section in filesToTest.keys() and option in filesToTest[section] and (system == "" or system == section):
path = _getRealPath(config.get(section, option))
if not os.path.exists(path):
print("ERROR: Could not find required file '" + path + "'.")
error = True
if error:
return False
return True
class Batchelor:
debug = False
|
self._config = ConfigParser.RawConfigParser()
def bprint(self, msg):
self.bprintTicker += ('' if self.bprintTicker == '' else '\n') + msg
if self.debug:
print(msg)
def initialize(self, configFileName, systemOverride = ""):
self.bprint("Initializing...")
if not self._config.read(os.path.abspath(configFileName)):
self.bprint("Could not read config file '" + configFileName + "'. Initialization failed...")
return False
if systemOverride == "":
self._system = detectSystem()
if self._system == "UNKNOWN":
self.bprint("Could not determine on which system we are. Initialization failed...")
return False
self.bprint("Detected system '" + self._system + "'.")
else:
self._system = systemOverride
self.bprint("System manually set to '" + self._system + "'.")
if not self._config.has_section(self._system):
self.bprint("Could not find section describing '" + self._system +
"' in config file '" + configFileName + "'. Initialization failed...")
return False
if not checkConfig(configFileName, self._system):
self.bprint("Config file contains errors. Initialization failed...")
return False
self.bprint("Importing appropriate submodule.")
if self._system == "c2pap":
import batchelor._batchelorC2PAP as batchFunctions
elif self._system == "gridka":
import batchelor._batchelorGridka as batchFunctions
elif self._system == "e18":
import batchelor._batchelorE18 as batchFunctions
elif self._system == "lxplus":
import batchelor._batchelorLxplus as batchFunctions
elif self._system == "lyon":
import batchelor._batchelorLyon as batchFunctions
elif self._system == "local":
import batchelor._batchelorLocal as batchFunctions
batchFunctions.initialize(self._config)
elif self._system == "simulator":
import batchelor._batchelorSimulator as batchFunctions
else:
self.bprint("Unknown system '" + self._system + "', cannot load appropriate submodule. Initialization failed...")
return False
self.batchFunctions = batchFunctions
self.bprint("Imported " + batchFunctions.submoduleIdentifier() + " submodule.")
self.bprint("Initialized.")
return True
def initialized(self):
if self.batchFunctions:
return True
else:
return False
def shutdown(self):
if not self.initialized():
raise BatchelorException("not initialized")
if "shutdown" in self.batchFunctions.__dict__.keys():
return self.batchFunctions.shutdown()
def submitJob(self, command, outputFile, jobName = None):
if not self.initialized():
raise BatchelorException("not initialized")
if "submitJob" in self.batchFunctions.__dict__.keys():
_checkForSpecialCharacters(jobName)
return self.batchFunctions.submitJob(self._config, command, outputFile, jobName)
else:
raise BatchelorException("not implemented")
def submitJobs(self, jobs):
# 'jobs' should be a list of arguments as they need to be specified for
# 'submitJob', e.g.:
# [ [ "command 1", "output file 1", "name 1" ],
# [ "command 2", "output file 2", None ],
# ... ]
# The return value is a list of job IDs in the same order as the jobs.
# A job ID of -1 indicates an error during submission of this job.
if not self.initialized():
raise BatchelorException("not initialized")
if "submitJobs" in self.batchFunctions.__dict__.keys():
for i in range(len(jobs)):
if len(jobs[i]) == 3:
_checkForSpecialCharacters(jobs[i][2])
elif len(jobs[i]) == 2:
# the 'submitJob' method of the 'Batchelor' class
# has a default argument for the job name, do
# something similar here
jobs[i].append(None)
else:
raise BatchelorException("wrong number of arguments")
return self.batchFunctions.submitJobs(self._config, jobs)
else:
jobIds = []
for job in jobs:
try:
jobId = self.submitJob(*job)
except batchelor.BatchelorException as exc:
jobId = -1
jobIds.append(jobId)
return jobIds
def getListOfActiveJobs(self, jobName = None):
if not self.initialized():
raise BatchelorException("not initialized")
if "getListOfActiveJobs" in self.batchFunctions.__dict__.keys():
_checkForSpecialCharacters(jobName)
return self.batchFunctions.getListOfActiveJobs(jobName)
else:
raise BatchelorException("not implemented")
def getNActiveJobs(self, jobName = None):
if not self.initialized():
raise BatchelorException("not initialized")
if "getNActiveJobs" in self.batchFunctions.__dict__.keys():
_checkForSpecialCharacters(jobName)
return self.batchFunctions.getNActiveJobs(jobName)
else:
raise BatchelorException("not implemented")
def jobStillRunning(self, jobId):
if not self.initialized():
raise BatchelorException("not initialized")
if "jobStillRunning" in self.batchFunctions.__dict__.keys():
return self.batchFunctions.jobStillRunning(jobId)
else:
raise BatchelorException("not implemented")
def getListOfError
|
bprintTicker = ""
batchFunctions = None
def __init__(self):
|
random_line_split
|
__init__.py
|
process.communicate()
if stdout:
stdout = stdout.rstrip(' \n')
else:
stdout = ""
if stderr:
stderr = stderr.rstrip(' \n')
else:
stderr = ""
return (process.returncode, stdout, stderr)
def detectSystem():
(returncode, stdout, stderr) = runCommand("hostname")
if returncode != 0:
raise BatchelorException("runCommand(\"hostname\") failed")
hostname = stdout
if hostname.startswith("gridka"):
raise BatchelorException("hostname '" + hostname + "' seems to indicate gridka, but the wrong host")
elif hostname == "compass-kit.gridka.de":
return "gridka"
elif hostname.startswith("lxplus") or hostname.endswith(".cern.ch"):
return "lxplus"
elif hostname.endswith(".e18.physik.tu-muenchen.de"):
return "e18"
elif hostname.startswith("ccage"):
return "lyon"
elif hostname.startswith("login") and runCommand("which llsubmit")[0] == 0:
return "c2pap"
return "UNKNOWN"
def _getRealPath(path):
return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
def _checkForSpecialCharacters(string):
if string is None:
string = ""
specialCharacters = [' ', ':', ';', '"', '\'', '@', '!', '?', '$', '\\', '/',
'#', '(', ')', '{', '}', '[', ']', '.', ',', '*']
foundChars = []
for char in specialCharacters:
if string.find(char) > 0:
foundChars.append(char)
if foundChars:
msg = "forbidden characters in job name ("
for char in foundChars:
msg += repr(char) + ", "
msg = msg[:-2]
msg += ")"
raise BatchelorException(msg)
def checkConfig(configFileName, system = ""):
config = ConfigParser.RawConfigParser()
if not config.read(os.path.abspath(configFileName)):
print("ERROR: Could not read config file '" + configFileName + "'.")
return False
error = False
if system != "" and not config.has_section(system):
print("ERROR: System set but corresponding section is missing in config file.")
error = True
requiredOptions = { "c2pap": [ "group", "notification", "notify_user", "node_usage", "wall_clock_limit", "resources", "job_type", "class" ],
"e18": [ "shortqueue", "memory", "header_file", "arch" ],
"gridka": [ "queue", "project", "memory", "header_file" ],
"lxplus": [ "queue", "pool", "header_file" ],
"lyon": [],
"local": [ "shell", "cores" ],
"simulator": [ "lifetime" ] }
filesToTest = { "gridka": [ "header_file" ],
"e18": [ "header_file" ],
"lxplus": [ "header_file" ],
"c2pap": [ "header_file" ],
"local": [ "shell" ] }
for section in requiredOptions.keys():
if config.has_section(section):
options = requiredOptions[section]
for option in options:
if not config.has_option(section, option):
print("ERROR: '" + section + "' section is missing option '" + option + "'.")
error = True
continue
if section in filesToTest.keys() and option in filesToTest[section] and (system == "" or system == section):
path = _getRealPath(config.get(section, option))
if not os.path.exists(path):
print("ERROR: Could not find required file '" + path + "'.")
error = True
if error:
return False
return True
class Batchelor:
debug = False
bprintTicker = ""
batchFunctions = None
def __init__(self):
self._config = ConfigParser.RawConfigParser()
def bprint(self, msg):
self.bprintTicker += ('' if self.bprintTicker == '' else '\n') + msg
if self.debug:
print(msg)
def initialize(self, configFileName, systemOverride = ""):
self.bprint("Initializing...")
if not self._config.read(os.path.abspath(configFileName)):
self.bprint("Could not read config file '" + configFileName + "'. Initialization failed...")
return False
if systemOverride == "":
self._system = detectSystem()
if self._system == "UNKNOWN":
self.bprint("Could not determine on which system we are. Initialization failed...")
return False
self.bprint("Detected system '" + self._system + "'.")
else:
self._system = systemOverride
self.bprint("System manually set to '" + self._system + "'.")
if not self._config.has_section(self._system):
self.bprint("Could not find section describing '" + self._system +
"' in config file '" + configFileName + "'. Initialization failed...")
return False
if not checkConfig(configFileName, self._system):
self.bprint("Config file contains errors. Initialization failed...")
return False
self.bprint("Importing appropriate submodule.")
if self._system == "c2pap":
import batchelor._batchelorC2PAP as batchFunctions
elif self._system == "gridka":
import batchelor._batchelorGridka as batchFunctions
elif self._system == "e18":
import batchelor._batchelorE18 as batchFunctions
elif self._system == "lxplus":
import batchelor._batchelorLxplus as batchFunctions
elif self._system == "lyon":
import batchelor._batchelorLyon as batchFunctions
elif self._system == "local":
import batchelor._batchelorLocal as batchFunctions
batchFunctions.initialize(self._config)
elif self._system == "simulator":
import batchelor._batchelorSimulator as batchFunctions
else:
self.bprint("Unknown system '" + self._system + "', cannot load appropriate submodule. Initialization failed...")
return False
self.batchFunctions = batchFunctions
self.bprint("Imported " + batchFunctions.submoduleIdentifier() + " submodule.")
self.bprint("Initialized.")
return True
def initialized(self):
if self.batchFunctions:
return True
else:
return False
def shutdown(self):
if not self.initialized():
raise BatchelorException("not initialized")
if "shutdown" in self.batchFunctions.__dict__.keys():
return self.batchFunctions.shutdown()
def submitJob(self, command, outputFile, jobName = None):
if not self.initialized():
raise BatchelorException("not initialized")
if "submitJob" in self.batchFunctions.__dict__.keys():
_checkForSpecialCharacters(jobName)
return self.batchFunctions.submitJob(self._config, command, outputFile, jobName)
else:
raise BatchelorException("not implemented")
def
|
(self, jobs):
# 'jobs' should be a list of arguments as they need to be specified for
# 'submitJob', e.g.:
# [ [ "command 1", "output file 1", "name 1" ],
# [ "command 2", "output file 2", None ],
# ... ]
# The return value is a list of job IDs in the same order as the jobs.
# A job ID of -1 indicates an error during submission of this job.
if not self.initialized():
raise BatchelorException("not initialized")
if "submitJobs" in self.batchFunctions.__dict__.keys():
for i in range(len(jobs)):
if len(jobs[i]) == 3:
_checkForSpecialCharacters(jobs[i][2])
elif len(jobs[i]) == 2:
# the 'submitJob' method of the 'Batchelor' class
# has a default argument for the job name, do
# something similar here
jobs[i].append(None)
else:
raise BatchelorException("wrong number of arguments")
return self.batchFunctions.submitJobs(self._config, jobs)
else:
jobIds = []
for job in jobs:
try:
jobId = self.submitJob(*job)
except batchelor.BatchelorException as exc:
jobId = -1
jobIds.append(jobId)
return jobIds
def getListOfActiveJobs(self, jobName = None):
if not self.initialized():
raise BatchelorException("not initialized")
if "getListOfActiveJobs" in self.batchFunctions.__dict__.keys():
_checkForSpecialCharacters(jobName)
return self.batchFunctions.getListOfActiveJobs(jobName)
else:
raise BatchelorException("not implemented")
def getNActiveJobs(self, jobName = None):
if not self.initialized():
raise BatchelorException("not initialized")
if "getNActiveJobs" in self.batchFunctions.__dict__.keys():
_checkForSpecialCharacters(jobName)
return self.batchFunctions.getNActiveJobs(jobName)
else:
raise BatchelorException("not implemented")
def jobStillRunning(self, jobId):
if not self.initialized():
raise BatchelorException("not initialized")
if "jobStillRunning" in self.batchFunctions.__dict__.keys():
return self.batchFunctions.jobStillRunning(jobId)
else:
raise BatchelorException("not implemented")
def getList
|
submitJobs
|
identifier_name
|
__init__.py
|
def runCommand(commandString):
commandString = "errHandler() { (( errcount++ )); }; trap errHandler ERR\n" + commandString.rstrip('\n') + "\nexit $errcount"
process = subprocess.Popen(commandString,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
executable="/bin/bash")
(stdout, stderr) = process.communicate()
if stdout:
stdout = stdout.rstrip(' \n')
else:
stdout = ""
if stderr:
stderr = stderr.rstrip(' \n')
else:
stderr = ""
return (process.returncode, stdout, stderr)
def detectSystem():
(returncode, stdout, stderr) = runCommand("hostname")
if returncode != 0:
raise BatchelorException("runCommand(\"hostname\") failed")
hostname = stdout
if hostname.startswith("gridka"):
raise BatchelorException("hostname '" + hostname + "' seems to indicate gridka, but the wrong host")
elif hostname == "compass-kit.gridka.de":
return "gridka"
elif hostname.startswith("lxplus") or hostname.endswith(".cern.ch"):
return "lxplus"
elif hostname.endswith(".e18.physik.tu-muenchen.de"):
return "e18"
elif hostname.startswith("ccage"):
return "lyon"
elif hostname.startswith("login") and runCommand("which llsubmit")[0] == 0:
return "c2pap"
return "UNKNOWN"
def _getRealPath(path):
return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
def _checkForSpecialCharacters(string):
if string is None:
string = ""
specialCharacters = [' ', ':', ';', '"', '\'', '@', '!', '?', '$', '\\', '/',
'#', '(', ')', '{', '}', '[', ']', '.', ',', '*']
foundChars = []
for char in specialCharacters:
if string.find(char) > 0:
foundChars.append(char)
if foundChars:
msg = "forbidden characters in job name ("
for char in foundChars:
msg += repr(char) + ", "
msg = msg[:-2]
msg += ")"
raise BatchelorException(msg)
def checkConfig(configFileName, system = ""):
config = ConfigParser.RawConfigParser()
if not config.read(os.path.abspath(configFileName)):
print("ERROR: Could not read config file '" + configFileName + "'.")
return False
error = False
if system != "" and not config.has_section(system):
print("ERROR: System set but corresponding section is missing in config file.")
error = True
requiredOptions = { "c2pap": [ "group", "notification", "notify_user", "node_usage", "wall_clock_limit", "resources", "job_type", "class" ],
"e18": [ "shortqueue", "memory", "header_file", "arch" ],
"gridka": [ "queue", "project", "memory", "header_file" ],
"lxplus": [ "queue", "pool", "header_file" ],
"lyon": [],
"local": [ "shell", "cores" ],
"simulator": [ "lifetime" ] }
filesToTest = { "gridka": [ "header_file" ],
"e18": [ "header_file" ],
"lxplus": [ "header_file" ],
"c2pap": [ "header_file" ],
"local": [ "shell" ] }
for section in requiredOptions.keys():
if config.has_section(section):
options = requiredOptions[section]
for option in options:
if not config.has_option(section, option):
print("ERROR: '" + section + "' section is missing option '" + option + "'.")
error = True
continue
if section in filesToTest.keys() and option in filesToTest[section] and (system == "" or system == section):
path = _getRealPath(config.get(section, option))
if not os.path.exists(path):
print("ERROR: Could not find required file '" + path + "'.")
error = True
if error:
return False
return True
class Batchelor:
debug = False
bprintTicker = ""
batchFunctions = None
def __init__(self):
self._config = ConfigParser.RawConfigParser()
def bprint(self, msg):
self.bprintTicker += ('' if self.bprintTicker == '' else '\n') + msg
if self.debug:
print(msg)
def initialize(self, configFileName, systemOverride = ""):
self.bprint("Initializing...")
if not self._config.read(os.path.abspath(configFileName)):
self.bprint("Could not read config file '" + configFileName + "'. Initialization failed...")
return False
if systemOverride == "":
self._system = detectSystem()
if self._system == "UNKNOWN":
self.bprint("Could not determine on which system we are. Initialization failed...")
return False
self.bprint("Detected system '" + self._system + "'.")
else:
self._system = systemOverride
self.bprint("System manually set to '" + self._system + "'.")
if not self._config.has_section(self._system):
self.bprint("Could not find section describing '" + self._system +
"' in config file '" + configFileName + "'. Initialization failed...")
return False
if not checkConfig(configFileName, self._system):
self.bprint("Config file contains errors. Initialization failed...")
return False
self.bprint("Importing appropriate submodule.")
if self._system == "c2pap":
import batchelor._batchelorC2PAP as batchFunctions
elif self._system == "gridka":
import batchelor._batchelorGridka as batchFunctions
elif self._system == "e18":
import batchelor._batchelorE18 as batchFunctions
elif self._system == "lxplus":
import batchelor._batchelorLxplus as batchFunctions
elif self._system == "lyon":
import batchelor._batchelorLyon as batchFunctions
elif self._system == "local":
import batchelor._batchelorLocal as batchFunctions
batchFunctions.initialize(self._config)
elif self._system == "simulator":
import batchelor._batchelorSimulator as batchFunctions
else:
self.bprint("Unknown system '" + self._system + "', cannot load appropriate submodule. Initialization failed...")
return False
self.batchFunctions = batchFunctions
self.bprint("Imported " + batchFunctions.submoduleIdentifier() + " submodule.")
self.bprint("Initialized.")
return True
def initialized(self):
if self.batchFunctions:
return True
else:
return False
def shutdown(self):
if not self.initialized():
raise BatchelorException("not initialized")
if "shutdown" in self.batchFunctions.__dict__.keys():
return self.batchFunctions.shutdown()
def submitJob(self, command, outputFile, jobName = None):
if not self.initialized():
raise BatchelorException("not initialized")
if "submitJob" in self.batchFunctions.__dict__.keys():
_checkForSpecialCharacters(jobName)
return self.batchFunctions.submitJob(self._config, command, outputFile, jobName)
else:
raise BatchelorException("not implemented")
def submitJobs(self, jobs):
# 'jobs' should be a list of arguments as they need to be specified for
# 'submitJob', e.g.:
# [ [ "command 1", "output file 1", "name 1" ],
# [ "command 2", "output file 2", None ],
# ... ]
# The return value is a list of job IDs in the same order as the jobs.
# A job ID of -1 indicates an error during submission of this job.
if not self.initialized():
raise BatchelorException("not initialized")
if "submitJobs" in self.batchFunctions.__dict__.keys():
for i in range(len(jobs)):
if len(jobs[i]) == 3:
_checkForSpecialCharacters(jobs[i][2])
elif len(jobs[i]) == 2:
# the 'submitJob' method of the 'Batchelor' class
# has a default argument for the job name, do
# something similar here
jobs[i].append(None)
else:
raise BatchelorException("wrong number of arguments")
return self.batchFunctions.submitJobs(self._config, jobs)
else:
jobIds = []
for job in jobs:
try:
jobId = self.submitJob(*job)
except batchelor.BatchelorException as exc:
jobId = -1
jobIds.append(jobId)
return jobIds
def getListOfActiveJobs(self, jobName = None):
if not self.initialized():
raise BatchelorException("not initialized")
if "getListOfActiveJobs" in self.batchFunctions.__dict__.keys():
_checkForSpecialCharacters(jobName)
return self.batchFunctions.getListOfActiveJobs(jobName)
else:
raise BatchelorException("not implemented")
def getNActiveJobs(self, jobName = None):
if not self.initialized():
raise BatchelorException("not initialized")
if "getNActiveJobs" in self.batch
|
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
identifier_body
|
|
clob.rs
|
/// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных.
/// Данные читаются из CLOB-а в кодировке `UTF-8`.
#[inline]
pub fn new_reader<'lob>(&'lob mut self) -> Result<ClobReader<'lob, 'conn>> {
self.new_reader_with_charset(Charset::AL32UTF8)
}
/// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных.
/// Данные читаются из CLOB-а в указанной кодировке.
///
/// Каждый вызов `read` будет заполнять массив байтами в запрошенной кодировке. Так как стандартные методы Rust для
/// работы читателем байт как читателем текста предполагают, что представлен в UTF-8, то их нельзя использовать для
/// данного читателя, т.к. тест будет извлекаться с указанной кодировке.
#[inline]
pub fn new_reader_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobReader<'lob, 'conn>> {
try!(self.impl_.open(LobOpenMode::ReadOnly));
Ok(ClobReader { lob: self, piece: Piece::First, charset: charset })
}
/// Создает писателя в данный символьный объект. Преимущество использования писателя вместо прямой записи
/// в объект в том, что функциональные и доменные индексы базы данных (если они есть) для данного большого
/// объекта будут обновлены только после уничтожения писателя, а не при каждой записи в объект, что в
/// лучшую сторону сказывается на производительности.
///
/// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько
/// локаторов (которые представляет данный класс) на него существует.
#[inline]
pub fn new_writer<'lob>(&'lob mut self) -> Result<ClobWriter<'lob, 'conn>> {
self.new_writer_with_charset(Charset::AL32UTF8)
}
/// Создает писателя в данный символьный объект, записывающий текстовые данные, представленные в указанной кодировке.
///
/// Преимущество использования писателя вместо прямой записи в объект в том, что функциональные и доменные индексы
/// базы данных (если они есть) для данного большого объекта будут обновлены только после уничтожения писателя, а не
/// при каждой записи в объект, что в лучшую сторону сказывается на производительности.
///
/// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько
/// локаторов (которые представляет данный класс) на него существует.
#[inline]
pub fn new_writer_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobWriter<'lob, 'conn>> {
try!(self.impl_.open(LobOpenMode::WriteOnly));
Ok(ClobWriter { lob: self, piece: Piece::First, charset: charset })
}
/// Получает кодировку базы данных для данного большого символьного объекта.
#[inline]
pub fn charset(&self) -> Result<Charset> {
self.impl_.charset().map_err(Into::into)
}
/// Если CLOB прочитан или записан не полностью, то сообщает базе данных, что дальнейшее чтение/запись не требуются
/// и закрывает CLOB.
fn close(&mut self, piece: Piece) -> DbResult<()> {
// Если LOB был прочитан/записан не полностью, то отменяем запросы на чтение/запись и восстанавливаемся
if piece != Piece::Last {
try!(self.impl_.break_());
try!(self.impl_.reset());
}
self.impl_.close()
}
}
impl<'conn> LobPrivate<'conn> for Clob<'conn> {
fn new(raw: &[u8], conn: &'conn Connection) -> Result<Self> {
let p = raw.as_ptr() as *const *mut Lob;
let locator = unsafe { *p as *mut Lob };
let impl_ = LobImpl::from(conn, locator);
let form = try!(impl_.form());
Ok(Clob { impl_: impl_, form: form })
}
}
impl<'conn> io::Read for Clob<'conn> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.impl_.read(Piece::One, Charset::AL32UTF8, self.form, buf).0
}
}
impl<'conn> io::Write for Clob<'conn> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.impl_.write(Piece::One, Charset::AL32UTF8, self.form, buf).0
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
//-------------------------------------------------------------------------------------------------
/// Позволяет писать в большой символьный объект, не вызывая пересчета индексов после каждой записи.
/// Индексы будут пересчитаны только после уничтожения данного объекта.
#[derive(Debug)]
pub struct ClobWriter<'lob, 'conn: 'lob> {
lob: &'lob mut Clob<'conn>,
piece: Piece,
charset: Charset,
}
impl<'lob, 'conn: 'lob> ClobWriter<'lob, 'conn> {
/// Получает `CLOB`, записываемый данным писателем.
pub fn lob(&mut self) -> &mut Clob<'conn> {
self.lob
}
/// Укорачивает данный объект до указанной длины. В случае, если новая длина больше предыдущей, будет
/// возвращена ошибка (таким образом, данную функцию нельзя использовать для увеличения размера LOB).
#[inline]
pub fn trim(&mut self, len: Chars) -> Result<()> {
self.lob.trim(len)
}
/// Заполняет LOB, начиная с указанного индекса, указанным количеством нулей. После завершения
/// работы в `count` будет записано реальное количество очищенных байт.
#[inline]
pub fn erase(&mut self, offset: Chars, count: &mut Chars) -> Result<()> {
self.lob.erase(offset, count)
}
}
impl<'lob, 'conn: 'lob> io::Write for ClobWriter<'lob, 'conn> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let (res, piece) = self.lob.impl_.write(self.piece, self.charset, self.lob.form, buf);
self.piece = piece;
res
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<'lob, 'conn: 'lob> Drop for ClobWriter<'lob, 'conn> {
fn drop(&mut self) {
// Невозможно делать панику отсюда, т.к. приложение из-за этого крашится
let _ = self.lob.close(self.piece);//.expect("Error when close CLOB writer");
}
}
//-------------------------------------------------------------------------------------------------
/// Позволяет читать из большой бинарного объекта в потоковом режиме. Каждый вызов `read` читает очередную порцию данных.
#[derive(Debug)]
pub struct ClobReader<'lob, 'conn: 'lob> {
lob: &'lob mut Clob<'conn>,
/// Описательная часть порции данных, получаемых из базы данных (первая или нет).
piece: Piece,
/// Кодировка, в которой следует интерпретировать получаемые из базы данных байты.
charset: Charset,
}
impl<'lob, 'conn: 'lob> ClobReader<'lob, 'conn> {
/// Получает `CLOB`, читаемый данным читателем.
pub fn lob(&mut self) -> &mut Clob<'conn> {
self.lob
}
}
impl<'lob, 'conn: 'lob> io::Read for ClobReader<'lob, 'conn> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let (
|
res, piece) = self.lob.impl_.read(self.piece, self.charset, self.lob.for
|
conditional_block
|
|
clob.rs
|
символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных.
/// Данные читаются из CLOB-а в кодировке `UTF-8`.
#[inline]
pub fn new_reader<'lob>(&'lob mut self) -> Result<ClobReader<'lob, 'conn>> {
self.new_reader_with_charset(Charset::AL32UTF8)
}
/// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных.
/// Данные читаются из CLOB-а в указанной кодировке.
///
/// Каждый вызов `read` будет заполнять массив байтами в запрошенной кодировке. Так как стандартные методы Rust для
/// работы читателем байт как читателем текста предполагают, что представлен в UTF-8, то их нельзя использовать для
/// данного читателя, т.к. тест будет извлекаться с указанной кодировке.
#[inline]
pub fn new_reader_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobReader<'lob, 'conn>> {
try!(self.impl_.open(LobOpenMode::ReadOnly));
Ok(ClobReader { lob: self, piece: Piece::First, charset: charset })
}
/// Создает писателя в данный символьный объект. Преимущество использования писателя вместо прямой записи
/// в объект в том, что функциональные и доменные индексы базы данных (если они есть) для данного большого
/// объекта будут обновлены только после уничтожения писателя, а не при каждой записи в объект, что в
/// лучшую сторону сказывается на производительности.
///
/// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько
/// локаторов (которые представляет данный класс) на него существует.
#[inline]
pub fn new_writer<'lob>(&'lob mut self) -> Result<ClobWriter<'lob, 'conn>> {
self.new_writer_with_charset(Charset::AL32UTF8)
}
/// Создает писателя в данный символьный объект, записывающий текстовые данные, представленные в указанной кодировке.
///
/// Преимущество использования писателя вместо прямой записи в объект в том, что функциональные и доменные индексы
/// базы данных (если они есть) для данного большого объекта будут обновлены только после уничтожения писателя, а не
/// при каждой записи в объект, что в лучшую сторону сказывается на производительности.
///
/// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько
/// локаторов (которые представляет данный класс) на него существует.
#[inline]
pub fn new_writer_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobWriter<'lob, 'conn>> {
try!(self.impl_.open(LobOpenMode::WriteOnly));
Ok(ClobWriter { lob: self, piece: Piece::First, charset: charset })
}
/// Получает кодировку базы данных для данного большого символьного объекта.
#[inline]
pub fn charset(&self) -> Result<Charset> {
self.impl_.charset().map_err(Into::into)
}
/// Если CLOB прочитан или записан не полностью, то сообщает базе данных, что дальнейшее чтение/запись не требуются
/// и закрывает CLOB.
fn close(&mut self, piece: Piece) -> DbResult<()> {
// Если LOB был прочитан/записан не полностью, то отменяем запросы на чтение/запись и восстанавливаемся
if piece != Piece::Last {
try!(self.impl_.break_());
try!(self.impl_.reset());
}
self.impl_.close()
}
}
impl<'conn> LobPrivate<'conn> for Clob<'conn> {
fn new(raw: &[u8], conn: &'conn Connection) -> Result<Self> {
let p = raw.as_ptr() as *const *mut Lob;
let locator = unsafe { *p as *mut Lob };
let impl_ = LobImpl::from(conn, locator);
let form = try!(impl_.form());
Ok(Clob { impl_: impl_, form: form })
}
}
impl<'conn> io::Read for Clob<'conn> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.impl_.read(Piece::One, Charset::AL32UTF8, self.form, buf).0
}
}
impl<'conn> io::Write for Clob<'conn> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.impl_.write(Piece::One, Charset::AL32UTF8, self.form, buf).0
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
//-------------------------------------------------------------------------------------------------
/// Позволяет писать в большой символьный объект, не вызывая пересчета индексов после каждой записи.
/// Индексы будут пересчитаны только после уничтожения данного объекта.
#[derive(Debug)]
pub struct ClobWriter<'lob, 'conn: 'lob> {
lob: &'lob mut Clob<'conn>,
piece: Piece,
charset: Charset,
}
impl<'lob, 'conn: 'lob> ClobWriter<'lob, 'conn> {
/// Получает `CLOB`, записываемый данным писателем.
pub fn lob(&mut self) -> &mut Clob<'conn> {
self.lob
}
/// Укорачивает данный объект до указанной длины. В случае, если новая длина больше предыдущей, будет
/// возвращена ошибка (таким образом, данную функцию нельзя использовать для увеличения размера LOB).
#[inline]
pub fn trim(&mut self, len: Chars) -> Result<()> {
self.lob.trim(len)
}
/// Заполняет LOB, начиная с указанного индекса, указанным количеством нулей. После завершения
/// работы в `count` будет записано реальное количество очищенных байт.
#[inline]
pub fn erase(&mut self, offset: Chars, count: &mut Chars) -> Result<()> {
self.lob.erase(offset, count)
}
}
impl<'lob, 'conn: 'lob> io::Write for ClobWriter<'lob, 'conn> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let (res, piece) = self.lob.impl_.write(self.piece, self.charset, self.lob.form, buf);
self.piece = piece;
res
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<'lob, 'conn: 'lob> Drop for ClobWriter<'lob, 'conn> {
fn drop(&mut self) {
// Невозможно делать панику отсюда, т.к. приложение из-за этого крашится
let _ = self.lob.close(self.piece);//.expect("Error when close CLOB writer");
}
}
//-------------------------------------------------------------------------------------------------
/// Позволяет читать из большой бинарного объекта в потоковом режиме. Каждый вызов `read` читает очередную порцию данных.
#[derive(Debug)]
pub struct ClobReader<'lob, 'conn: 'lob> {
lob: &'lob mut Clob<'conn>,
/// Описательная часть порции данных, получаемых из базы данных (первая или нет).
piece: Piece,
/// Кодировка, в которой следует интерпретировать получаемые из базы данных байты.
charset: Charset,
}
impl<'lob, 'conn: 'lob> ClobReader<'lob, 'conn> {
/// Получает `CLOB`, читаемый данным
|
читателем.
pub fn lob(&mut self) -> &mut Clob<'conn> {
self.lob
}
}
impl<'lob, 'conn: 'lob> io::Read for ClobReader<'lob, 'conn> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let (res, piece) = self.lob.impl_.read(self.piece, self.charset, self.lob.form, buf);
self.piece = p
|
identifier_body
|
|
clob.rs
|
/// В зависимости от настроек сервера базы данных данное значение может варьироваться от
/// 8 до 128 терабайт (TB).
#[inline]
pub fn capacity(&self) -> Result<Bytes> {
let len = try!(self.impl_.capacity());
Ok(Bytes(len))
}
/// For LOBs with storage parameter `BASICFILE`, the amount of a chunk's space that is used to store
/// the internal LOB value. This is the amount that users should use when reading or writing the LOB
/// value. If possible, users should start their writes at chunk boundaries, such as the beginning of
/// a chunk, and write a chunk at a time.
///
/// For LOBs with storage parameter `SECUREFILE`, chunk size is an advisory size and is provided for
/// backward compatibility.
///
/// When creating a table that contains an internal LOB, the user can specify the chunking factor,
/// which can be a multiple of Oracle Database blocks. This corresponds to the chunk size used by
/// the LOB data layer when accessing and modifying the LOB value. Part of the chunk is used to store
/// system-related information, and the rest stores the LOB value. This function returns the amount
/// of space used in the LOB chunk to store the LOB value. Performance is improved if the application
/// issues read or write requests using a multiple of this chunk size. For writes, there is an added
/// benefit because LOB chunks are versioned and, if all writes are done on a chunk basis, no extra
/// versioning is done or duplicated. Users could batch up the write until they have enough for a chunk
/// instead of issuing several write calls for the same chunk.
#[inline]
pub fn get_chunk_size(&self) -> Result<Bytes> {
let size = try!(self.impl_.get_chunk_size());
Ok(Bytes(size as u64))
}
/// Укорачивает данный объект до указанной длины. В случае, если новая длина больше предыдущей, будет
/// возвращена ошибка (таким образом, данную функцию нельзя использовать для увеличения размера LOB).
///
/// # Производительность
/// Необходимо учитывать, что в случае частой записи предпочтительней делать ее через специальный
/// объект-писатель, который можно получить из данного объекта вызовом функции [`new_writer()`](#function.new_writer).
/// Если поступить таким образом, то обновление функциональных и доменных индексов базы данных (если они
/// есть) для данного большого объекта будет отложено до тех пор, пока объект-писатель не будет уничтожен.
/// При вызове же данной функции обновление данных индексов произойдет сразу же по окончании вызова, что
/// может сильно снизить производительность.
#[inline]
pub fn trim(&mut self, len: Chars) -> Result<()> {
self.impl_.trim(len.0).map_err(Into::into)
}
/// Заполняет LOB, начиная с указанного индекса, указанным количеством пробелов. После завершения
/// работы в `count` будет записано реальное количество очищенных символов.
///
/// # Производительность
/// Необходимо учитывать, что в случае частой записи предпочтительней делать ее через специальный
/// объект-писатель, который можно получить из данного объекта вызовом функции [`new_writer()`](#function.new_writer).
/// Если поступить таким образом, то обновление функциональных и доменных индексов базы данных (если они
/// есть) для данного большого объекта будет отложено до тех пор, пока объект-писатель не будет уничтожен.
/// При вызове же данной функции обновление данных индексов произойдет сразу же по окончании вызова, что
/// может сильно снизить производительность.
#[inline]
pub fn erase(&mut self, offset: Chars, count: &mut Chars) -> Result<()> {
self.impl_.erase(offset.0, &mut count.0).map_err(Into::into)
}
/// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных.
|
}
/// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных.
/// Данные читаются из CLOB-а в указанной кодировке.
///
/// Каждый вызов `read` будет заполнять массив байтами в запрошенной кодировке. Так как стандартные методы Rust для
/// работы читателем байт как читателем текста предполагают, что представлен в UTF-8, то их нельзя использовать для
/// данного читателя, т.к. тест будет извлекаться с указанной кодировке.
#[inline]
pub fn new_reader_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobReader<'lob, 'conn>> {
try!(self.impl_.open(LobOpenMode::ReadOnly));
Ok(ClobReader { lob: self, piece: Piece::First, charset: charset })
}
/// Создает писателя в данный символьный объект. Преимущество использования писателя вместо прямой записи
/// в объект в том, что функциональные и доменные индексы базы данных (если они есть) для данного большого
/// объекта будут обновлены только после уничтожения писателя, а не при каждой записи в объект, что в
/// лучшую сторону сказывается на производительности.
///
/// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько
/// локаторов (которые представляет данный класс) на него существует.
#[inline]
pub fn new_writer<'lob>(&'lob mut self) -> Result<ClobWriter<'lob, 'conn>> {
self.new_writer_with_charset(Charset::AL32UTF8)
}
/// Создает писателя в данный символьный объект, записывающий текстовые данные, представленные в указанной кодировке.
///
/// Преимущество использования писателя вместо прямой записи в объект в том, что функциональные и доменные индексы
/// базы данных (если они есть) для данного большого объекта будут обновлены только после уничтожения писателя, а не
/// при каждой записи в объект, что в лучшую сторону сказывается на производительности.
///
/// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько
/// локаторов (которые представляет данный класс) на него существует.
#[inline]
pub fn new_writer_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobWriter<'lob, 'conn>> {
try!(self.impl_.open(LobOpenMode::WriteOnly));
Ok(ClobWriter { lob: self, piece: Piece::First, charset: charset })
}
/// Получает кодировку базы данных для данного большого символьного объекта.
#[inline]
pub fn charset(&self) -> Result<Charset> {
self.impl_.charset().map_err(Into::into)
}
/// Если CLOB прочитан или записан не полностью, то сообщает базе данных, что дальнейшее чтение/запись не требуются
/// и закрывает CLOB.
fn close(&mut self, piece: Piece) -> DbResult<()> {
// Если LOB был прочитан/записан не полностью, то отменяем запросы на чтение/запись и восстанавливаемся
if piece != Piece::Last {
try!(self.impl_.break_());
try!(self.impl_.reset());
}
self.impl_.close()
}
}
impl<'conn> LobPrivate<'conn> for Clob<'conn> {
fn new(raw: &[u8], conn: &'conn Connection) -> Result<Self> {
let p = raw.as_ptr() as *const *mut Lob;
let locator = unsafe { *p as *mut Lob };
let impl_ = LobImpl
|
/// Данные читаются из CLOB-а в кодировке `UTF-8`.
#[inline]
pub fn new_reader<'lob>(&'lob mut self) -> Result<ClobReader<'lob, 'conn>> {
self.new_reader_with_charset(Charset::AL32UTF8)
|
random_line_split
|
clob.rs
|
2UTF8)
}
/// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных.
/// Данные читаются из CLOB-а в указанной кодировке.
///
/// Каждый вызов `read` будет заполнять массив байтами в запрошенной кодировке. Так как стандартные методы Rust для
/// работы читателем байт как читателем текста предполагают, что представлен в UTF-8, то их нельзя использовать для
/// данного читателя, т.к. тест будет извлекаться с указанной кодировке.
#[inline]
pub fn new_reader_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobReader<'lob, 'conn>> {
try!(self.impl_.open(LobOpenMode::ReadOnly));
Ok(ClobReader { lob: self, piece: Piece::First, charset: charset })
}
/// Создает писателя в данный символьный объект. Преимущество использования писателя вместо прямой записи
/// в объект в том, что функциональные и доменные индексы базы данных (если они есть) для данного большого
/// объекта будут обновлены только после уничтожения писателя, а не при каждой записи в объект, что в
/// лучшую сторону сказывается на производительности.
///
/// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько
/// локаторов (которые представляет данный класс) на него существует.
#[inline]
pub fn new_writer<'lob>(&'lob mut self) -> Result<ClobWriter<'lob, 'conn>> {
self.new_writer_with_charset(Charset::AL32UTF8)
}
/// Создает писателя в данный символьный объект, записывающий текстовые данные, представленные в указанной кодировке.
///
/// Преимущество использования писателя вместо прямой записи в объект в том, что функциональные и доменные индексы
/// базы данных (если они есть) для данного большого объекта будут обновлены только после уничтожения писателя, а не
/// при каждой записи в объект, что в лучшую сторону сказывается на производительности.
///
/// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько
/// локаторов (которые представляет данный класс) на него существует.
#[inline]
pub fn new_writer_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobWriter<'lob, 'conn>> {
try!(self.impl_.open(LobOpenMode::WriteOnly));
Ok(ClobWriter { lob: self, piece: Piece::First, charset: charset })
}
/// Получает кодировку базы данных для данного большого символьного объекта.
#[inline]
pub fn charset(&self) -> Result<Charset> {
self.impl_.charset().map_err(Into::into)
}
/// Если CLOB прочитан или записан не полностью, то сообщает базе данных, что дальнейшее чтение/запись не требуются
/// и закрывает CLOB.
fn close(&mut self, piece: Piece) -> DbResult<()> {
// Если LOB был прочитан/записан не полностью, то отменяем запросы на чтение/запись и восстанавливаемся
if piece != Piece::Last {
try!(self.impl_.break_());
try!(self.impl_.reset());
}
self.impl_.close()
}
}
impl<'conn> LobPrivate<'conn> for Clob<'conn> {
fn new(raw: &[u8], conn: &'conn Connection) -> Result<Self> {
let p = raw.as_ptr() as *const *mut Lob;
let locator = unsafe { *p as *mut Lob };
let impl_ = LobImpl::from(conn, locator);
let form = try!(impl_.form());
Ok(Clob { impl_: impl_, form: form })
}
}
impl<'conn> io::Read for Clob<'conn> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.impl_.read(Piece::One, Charset::AL32UTF8, self.form, buf).0
}
}
impl<'conn> io::Write for Clob<'conn> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.impl_.write(Piece::One, Charset::AL32UTF8, self.form, buf).0
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
//-------------------------------------------------------------------------------------------------
/// Позволяет писать в большой символьный объект, не вызывая пересчета индексов после каждой записи.
/// Индексы будут пересчитаны только после уничтожения данного объекта.
#[derive(Debug)]
pub struct ClobWriter<'lob, 'conn: 'lob> {
lob: &'lob mut Clob<'conn>,
piece: Piece,
charset: Charset,
}
impl<'lob, 'conn: 'lob> ClobWriter<'lob, 'conn> {
/// Получает `CLOB`, записываемый данным писателем.
pub fn lob(&mut self) -> &mut Clob<'conn> {
self.lob
}
/// Укорачивает данный объект до указанной длины. В случае, если новая длина больше предыдущей, будет
/// возвращена ошибка (таким образом, данную функцию нельзя использовать для увеличения размера LOB).
#[inline]
pub fn trim(&mut self, len: Chars) -> Result<()> {
self.lob.trim(len)
}
/// Заполняет LOB, начиная с указанного индекса, указанным количеством нулей. После завершения
/// работы в `count` будет записано реальное количество очищенных байт.
#[inline]
pub fn erase(&mut self, offset: Chars, count: &mut Chars) -> Result<()> {
self.lob.erase(offset, count)
}
}
impl<'lob, 'conn: 'lob> io::Write for ClobWriter<'lob, 'conn> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let (res, piece) = self.lob.impl_.write(self.piece, self.charset, self.lob.form, buf);
self.piece = piece;
res
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<'lob, 'conn: 'lob> Drop for ClobWriter<'lob, 'conn> {
fn drop(&mut self) {
// Невозможно делать панику отсюда, т.к. приложение из-за этого крашится
let _ = self.lob.close(self.piece);//.expect("Error when close CLOB writer");
}
}
//-------------------------------------------------------------------------------------------------
/// Позволяет читать из большой бинарного объекта в потоковом режиме. Каждый вызов `read` читает очередную порцию данных.
#[derive(Debug)]
pub struct ClobReader<'lob, 'conn: 'lob> {
lob: &'lob mut Clob<'conn>,
/// Описательная часть порции данных, получаемых из базы данных (первая или нет).
piece: Piece,
/// Кодировка, в которой следует интерпретировать получаемые из базы данных байты.
charset: Charset,
}
impl<'lob, 'conn: 'lob> ClobReader<'lob, 'conn> {
/// Получает `CLOB`, читаемый данным читателем.
pub fn lob(&mut self) -> &mut Clob<'conn> {
self.lob
}
}
impl<'lob, 'conn: 'lob> io::Read for ClobReader<'lob, 'conn> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let (res, piece) = self.lob.impl_.read(self.piece, self.charset, self.lob.form, buf);
self.piece = piece;
res
}
}
impl<'lob, 'conn: 'lob> Drop for ClobReader<'lob, 'conn> {
fn drop(&mut self) {
// Невозможно делать панику отсюда, т.к. приложение из-за этого крашится
let _ = self.lob.close(self.piece);//.expect("Error when close CLOB reader");
}
}
|
identifier_name
|
||
visualization.py
|
(x):
if '__' in x:
return [int(x) for x in x.split('__')[1].split('_')]
else:
return [0]
df['var type'] = df['index'].apply(lambda x: x.split('__')[0])
df = df.loc[df['var type'] == var, :]
var_idxs = df['index'].apply(split_fun)
indexs = np.stack(var_idxs)
if astype == 'array':
sizes = indexs.max(axis=0) + 1
var_array = df['mode'].copy().values.reshape(sizes)
return var_array
else:
df_out = pd.DataFrame(np.concatenate([indexs, np.expand_dims(df['mode'].values, -1)], axis=1))
df_out.columns = list(df_out.columns[:-1]) + [var]
return df_out
def extract_mean_as_array(trace, var='z', astype='array'):
df = pm.summary(trace)
df = df.reset_index()
def split_fun(x):
if '__' in x:
return [int(x) for x in x.split('__')[1].split('_')]
else:
return [0]
df['var type'] = df['index'].apply(lambda x: x.split('__')[0])
df = df.loc[df['var type'] == var, :]
var_idxs = df['index'].apply(split_fun)
indexs = np.stack(var_idxs)
if astype == 'array':
sizes = indexs.max(axis=0)+1
var_array = df['mean'].copy().values.reshape(sizes)
return var_array
else:
df_out = pd.DataFrame(np.concatenate([indexs, np.expand_dims(df['mean'].values, -1)], axis=1))
idx_cols = [str(i) for i in df_out.columns[:-1]]
df_out.columns = idx_cols+[var]
if astype == 'xarray':
return pd_to_xarray_datacube(df_out, idx_cols, value_col=var)
else:
return df_out
def gen_data_for_plot(data, x, z=None, rand_sample_vars=[], mean_sample_vars=[], const_vars={}, stages='balanced', nstages=5, samples_per_x_range=500, truncate_to_percentile=0):
"""
Generate some data that we can use to plot poterior/param values for
:param data: data used to train model, so that levels of x are known
:param x: continous data for x axis
:param z: catergorical data for y axis
:param rand_sample_vars:
:return:
"""
data_points = data.copy()
unq_x = data[x].unique()
if len(unq_x) < 7: #catergorical
x_data = data[x].sample(samples_per_x_range).values
else:
if truncate_to_percentile:
x_data = np.linspace(np.percentile(data[x],truncate_to_percentile), np.percentile(data[x],100-truncate_to_percentile), samples_per_x_range)
else:
x_data = np.linspace(data[x].min(), data[x].max(), samples_per_x_range)
df = pd.DataFrame({x:x_data})
for var in mean_sample_vars:
var_mean = data[var].mean(skipna=True)
var_std = data[var].std(skipna=True)
df[var] = var_mean
data_points = data_points.loc[(var_mean-var_std<data_points[var]) & (data_points[var]<var_mean+var_std),:]
for var in rand_sample_vars:
df[var] = np.random.choice(data[var], size=(samples_per_x_range, ))
for var, val in const_vars.items():
df[var] = [val] * samples_per_x_range
if 'consider' not in var:
var_std = data[var].std(skipna=True)
data_points = data_points.loc[(val - var_std < data_points[var]) & (data_points[var] < val + var_std), :]
if stages == 'balanced':
df_stages = pd.DataFrame({'current_epoch':list(range(nstages))})
n_reps = int(np.ceil(df.shape[0]/df_stages.shape[0]))
df_stages = pd.concat([df_stages]*n_reps, axis=0).iloc[0:samples_per_x_range,:].reset_index(drop=True)
df_stages = df_stages.sample(frac=1).reset_index(drop=True)
df = pd.concat([df, df_stages], axis=1, sort=False)
if z is not None:
data_cont = []
unique_z = data[z].unique()
if len(unique_z) >= 7: # make cont into categorical
unique_z = np.linspace(data[z].min(), data[z].max(), 7)
unique_z += (unique_z[1] - unique_z[0])/2
unique_z = unique_z[:-1]
for z_val in unique_z:
new_df = df.copy()
new_df[z] = z_val
data_cont.append(new_df)
df = pd.concat(data_cont, axis=0)
return df, data_points
def pairplot_divergence(trace, ax=None, divergence=True, color='C3', divergence_color='C2'):
theta = trace.get_values(varname='theta', combine=True)[:, 0]
logtau = trace.get_values(varname='tau_log__', combine=True)
if not ax:
_, ax = plt.subplots(1, 1, figsize=(10, 5))
ax.plot(theta, logtau, 'o', color=color, alpha=.5)
if divergence:
divergent = trace['diverging']
ax.plot(theta[divergent], logtau[divergent], 'o', color=divergence_color)
ax.set_xlabel('theta[0]')
ax.set_ylabel('log(tau)')
ax.set_title('scatter plot between log(tau) and theta[0]');
return ax
def plot_vars(mod, data, x, y, facet_row=None, facet_col=None, hue=None, style=None, y_levels=None, y_level_name='set_y_level_name',
maps=None, data_points=None, mean_center_means=None, vars_to_label=None,
num_draws_from_params=100, out_of_sample=True, combine_trace=False, legend='full', points_alpha=0.01):
for var_name in mod.input_vars:
if 'consider' in var_name:
mod.input_vars[var_name].set_value(data[var_name].iloc[0])
else:
mod.input_vars[var_name].set_value(data[var_name])
vars_ppc = [v for v in [x, y, hue, facet_col, facet_row, style] if v is not None and v != y_level_name]
pps = mod.sample_posterior_predictive(vars=vars_ppc, num_draws_from_params=num_draws_from_params, out_of_sample=out_of_sample)
df_ppc_cont = []
for var in vars_ppc:
label = [var] if (y_levels is None) or (var!=y) else y_levels
df_ppc_var_cont = []
for ppc_idx, ppc_sample in enumerate(pps[var]):
df_ppc_var = pd.DataFrame(ppc_sample, columns=label)
df_ppc_var['ppc_idx'] = ppc_idx
df_ppc_var_cont.append(df_ppc_var)
df_ppc = pd.concat(df_ppc_var_cont, axis=0)
if var != vars_ppc[-1]:
df_ppc = df_ppc.drop('ppc_idx', axis=1)
df_ppc_cont.append(df_ppc)
df = pd.concat(df_ppc_cont, axis=1)
if maps:
for col in df.columns:
if col in maps:
df[col] = df[col].map({v:k for k,v in maps[col].items()})
if y_levels is not None:
vars_ppc.remove(y)
df = df.melt(id_vars=['ppc_idx']+vars_ppc, value_vars=y_levels, var_name=y_level_name, value_name=y).reset_index()
hue = hue if y_level_name == facet_row or y_level_name == facet_col else y_level_name
# if mean_center_means is not None:
# for var in mean_center_means:
# df[var] += df[var]*mean_center_means[var]['sd']+mean_center_means['mean']
# df_prev = df.drop(['index', 'ppc_idx'], axis=1).groupby(
# ['previous_bout', 'current_epoch', 'feature']).mean().reset_index()
# df_prev.to_csv(
# '../../data/processed/previous_bout_feature.csv')
# df_prev = pd.read_csv('../../data/processed/previous_bout_feature.csv')
#
# df_current = df.drop(['index', 'ppc_idx'], axis=1).groupby(
# ['current_epoch', 'feature']).mean().reset_index()
# df_current.to_csv('../../data/output/current_bout_feature.csv')
#
# df_merged = pd.merge(df_current,df_prev, on=['current_epoch','feature'])
# df_merged['Difference when inc previous stage'] = df_merged['feature_rate_p_x'] - df_merged['feature_rate_p_y']
# df_merged['Trans P when marginalizing over previous stage'] = df_merged['feature
|
split_fun
|
identifier_name
|
|
visualization.py
|
def stage_parameters(trace, stage_param_names, stage_map, label_plot=True):
stage_map = {v:k for k,v in stage_map.items()}
_, axs = model_parameters(trace, stage_param_names)
for param in stage_param_names:
if trace[param].dtype == np.float64:
means = extract_mean_as_array(trace, param, 'df')
print(param, ':\n', sep='')
for idx, row in means.iterrows():
stage_str = [stage_map[row[level]] for level in row.index if level != param]
print(stage_str, row[param])
if label_plot:
axs[0, 0].axvline(row[param], linewidth=0.5, linestyle='--', color='r')
axs[0,0].text(row[param],
(axs[0,0].get_ylim()[1] - axs[0,0].get_ylim()[0])/np.random.normal(loc=2, scale=0.5),
'_'.join(stage_str), rotation=45)
plt.show()
def extract_mode_as_array(trace, var='z', astype='array'):
def trace_mode(x):
return pd.Series(mode(x).mode[0], name='mode')
df = pm.summary(trace, stat_funcs=[trace_mode], varnames=[var])
df = df.reset_index()
def split_fun(x):
if '__' in x:
return [int(x) for x in x.split('__')[1].split('_')]
else:
return [0]
df['var type'] = df['index'].apply(lambda x: x.split('__')[0])
df = df.loc[df['var type'] == var, :]
var_idxs = df['index'].apply(split_fun)
indexs = np.stack(var_idxs)
if astype == 'array':
sizes = indexs.max(axis=0) + 1
var_array = df['mode'].copy().values.reshape(sizes)
return var_array
else:
df_out = pd.DataFrame(np.concatenate([indexs, np.expand_dims(df['mode'].values, -1)], axis=1))
df_out.columns = list(df_out.columns[:-1]) + [var]
return df_out
def extract_mean_as_array(trace, var='z', astype='array'):
df = pm.summary(trace)
df = df.reset_index()
def split_fun(x):
if '__' in x:
return [int(x) for x in x.split('__')[1].split('_')]
else:
return [0]
df['var type'] = df['index'].apply(lambda x: x.split('__')[0])
df = df.loc[df['var type'] == var, :]
var_idxs = df['index'].apply(split_fun)
indexs = np.stack(var_idxs)
if astype == 'array':
sizes = indexs.max(axis=0)+1
var_array = df['mean'].copy().values.reshape(sizes)
return var_array
else:
df_out = pd.DataFrame(np.concatenate([indexs, np.expand_dims(df['mean'].values, -1)], axis=1))
idx_cols = [str(i) for i in df_out.columns[:-1]]
df_out.columns = idx_cols+[var]
if astype == 'xarray':
return pd_to_xarray_datacube(df_out, idx_cols, value_col=var)
else:
return df_out
def gen_data_for_plot(data, x, z=None, rand_sample_vars=[], mean_sample_vars=[], const_vars={}, stages='balanced', nstages=5, samples_per_x_range=500, truncate_to_percentile=0):
"""
Generate some data that we can use to plot poterior/param values for
:param data: data used to train model, so that levels of x are known
:param x: continous data for x axis
:param z: catergorical data for y axis
:param rand_sample_vars:
:return:
"""
data_points = data.copy()
unq_x = data[x].unique()
if len(unq_x) < 7: #catergorical
x_data = data[x].sample(samples_per_x_range).values
else:
if truncate_to_percentile:
x_data = np.linspace(np.percentile(data[x],truncate_to_percentile), np.percentile(data[x],100-truncate_to_percentile), samples_per_x_range)
else:
x_data = np.linspace(data[x].min(), data[x].max(), samples_per_x_range)
df = pd.DataFrame({x:x_data})
for var in mean_sample_vars:
var_mean = data[var].mean(skipna=True)
var_std = data[var].std(skipna=True)
df[var] = var_mean
data_points = data_points.loc[(var_mean-var_std<data_points[var]) & (data_points[var]<var_mean+var_std),:]
for var in rand_sample_vars:
df[var] = np.random.choice(data[var], size=(samples_per_x_range, ))
for var, val in const_vars.items():
df[var] = [val] * samples_per_x_range
if 'consider' not in var:
var_std = data[var].std(skipna=True)
data_points = data_points.loc[(val - var_std < data_points[var]) & (data_points[var] < val + var_std), :]
if stages == 'balanced':
df_stages = pd.DataFrame({'current_epoch':list(range(nstages))})
n_reps = int(np.ceil(df.shape[0]/df_stages.shape[0]))
df_stages = pd.concat([df_stages]*n_reps, axis=0).iloc[0:samples_per_x_range,:].reset_index(drop=True)
df_stages = df_stages.sample(frac=1).reset_index(drop=True)
df = pd.concat([df, df_stages], axis=1, sort=False)
if z is not None:
data_cont = []
unique_z = data[z].unique()
if len(unique_z) >= 7: # make cont into categorical
unique_z = np.linspace(data[z].min(), data[z].max(), 7)
unique_z += (unique_z[1] - unique_z[0])/2
unique_z = unique_z[:-1]
for z_val in unique_z:
new_df = df.copy()
new_df[z] = z_val
data_cont.append(new_df)
df = pd.concat(data_cont, axis=0)
return df, data_points
def pairplot_divergence(trace, ax=None, divergence=True, color='C3', divergence_color='C2'):
theta = trace.get_values(varname='theta', combine=True)[:, 0]
logtau = trace.get_values(varname='tau_log__', combine=True)
if not ax:
_, ax = plt.subplots(1, 1, figsize=(10, 5))
ax.plot(theta, logtau, 'o', color=color, alpha=.5)
if divergence:
divergent = trace['diverging']
ax.plot(theta[divergent], logtau[divergent], 'o', color=divergence_color)
ax.set_xlabel('theta[0]')
ax.set_ylabel('log(tau)')
ax.set_title('scatter plot between log(tau) and theta[0]');
return ax
def plot_vars(mod, data, x, y, facet_row=None, facet_col=None, hue=None, style=None, y_levels=None, y_level_name='set_y_level_name',
maps=None, data_points=None, mean_center_means=None, vars_to_label=None,
num_draws_from_params=100, out_of_sample=True, combine_trace=False, legend='full', points_alpha=0.01):
for var_name in mod.input_vars:
if 'consider' in var_name:
mod.input_vars[var_name].set_value(data[var_name].iloc[0])
else:
mod.input_vars[var_name].set_value(data[var_name])
vars_ppc = [v for v in [x, y, hue, facet_col, facet_row, style] if v is not None and v != y_level_name]
pps = mod.sample_posterior_predictive(vars=vars_ppc, num_draws_from_params=num_draws_from_params, out_of_sample=out_of_sample)
df_ppc_cont = []
for var in vars_ppc:
label = [var] if (y_levels is None) or (var!=y) else y_levels
df_ppc_var_cont = []
for ppc_idx, ppc_sample in enumerate(pps[var]):
df_ppc_var = pd.DataFrame(ppc_sample, columns=label)
df_ppc_var['ppc_idx'] = ppc_idx
df_ppc_var_cont.append(df_ppc_var)
df_ppc = pd.concat(df_ppc_var_cont, axis=0)
if var != vars_ppc[-1]:
df_ppc = df_ppc.drop('ppc_idx', axis=1)
df_ppc_cont.append(df_ppc)
df = pd.concat(df_ppc_cont, axis=1)
if maps:
for col in df.columns:
if col in maps:
df[col] = df[col].map({v:k for k,v
|
summary_df = pm.summary(trace, varnames=varnames)
print(summary_df)
axs = pm.traceplot(trace, varnames=varnames)
return summary_df, axs
|
identifier_body
|
|
visualization.py
|
('__')[1].split('_')]
else:
return [0]
df['var type'] = df['index'].apply(lambda x: x.split('__')[0])
df = df.loc[df['var type'] == var, :]
var_idxs = df['index'].apply(split_fun)
indexs = np.stack(var_idxs)
if astype == 'array':
sizes = indexs.max(axis=0) + 1
var_array = df['mode'].copy().values.reshape(sizes)
return var_array
else:
df_out = pd.DataFrame(np.concatenate([indexs, np.expand_dims(df['mode'].values, -1)], axis=1))
df_out.columns = list(df_out.columns[:-1]) + [var]
return df_out
def extract_mean_as_array(trace, var='z', astype='array'):
df = pm.summary(trace)
df = df.reset_index()
def split_fun(x):
if '__' in x:
return [int(x) for x in x.split('__')[1].split('_')]
else:
return [0]
df['var type'] = df['index'].apply(lambda x: x.split('__')[0])
df = df.loc[df['var type'] == var, :]
var_idxs = df['index'].apply(split_fun)
indexs = np.stack(var_idxs)
if astype == 'array':
sizes = indexs.max(axis=0)+1
var_array = df['mean'].copy().values.reshape(sizes)
return var_array
else:
df_out = pd.DataFrame(np.concatenate([indexs, np.expand_dims(df['mean'].values, -1)], axis=1))
idx_cols = [str(i) for i in df_out.columns[:-1]]
df_out.columns = idx_cols+[var]
if astype == 'xarray':
return pd_to_xarray_datacube(df_out, idx_cols, value_col=var)
else:
return df_out
def gen_data_for_plot(data, x, z=None, rand_sample_vars=[], mean_sample_vars=[], const_vars={}, stages='balanced', nstages=5, samples_per_x_range=500, truncate_to_percentile=0):
"""
Generate some data that we can use to plot poterior/param values for
:param data: data used to train model, so that levels of x are known
:param x: continous data for x axis
:param z: catergorical data for y axis
:param rand_sample_vars:
:return:
"""
data_points = data.copy()
unq_x = data[x].unique()
if len(unq_x) < 7: #catergorical
x_data = data[x].sample(samples_per_x_range).values
|
x_data = np.linspace(data[x].min(), data[x].max(), samples_per_x_range)
df = pd.DataFrame({x:x_data})
for var in mean_sample_vars:
var_mean = data[var].mean(skipna=True)
var_std = data[var].std(skipna=True)
df[var] = var_mean
data_points = data_points.loc[(var_mean-var_std<data_points[var]) & (data_points[var]<var_mean+var_std),:]
for var in rand_sample_vars:
df[var] = np.random.choice(data[var], size=(samples_per_x_range, ))
for var, val in const_vars.items():
df[var] = [val] * samples_per_x_range
if 'consider' not in var:
var_std = data[var].std(skipna=True)
data_points = data_points.loc[(val - var_std < data_points[var]) & (data_points[var] < val + var_std), :]
if stages == 'balanced':
df_stages = pd.DataFrame({'current_epoch':list(range(nstages))})
n_reps = int(np.ceil(df.shape[0]/df_stages.shape[0]))
df_stages = pd.concat([df_stages]*n_reps, axis=0).iloc[0:samples_per_x_range,:].reset_index(drop=True)
df_stages = df_stages.sample(frac=1).reset_index(drop=True)
df = pd.concat([df, df_stages], axis=1, sort=False)
if z is not None:
data_cont = []
unique_z = data[z].unique()
if len(unique_z) >= 7: # make cont into categorical
unique_z = np.linspace(data[z].min(), data[z].max(), 7)
unique_z += (unique_z[1] - unique_z[0])/2
unique_z = unique_z[:-1]
for z_val in unique_z:
new_df = df.copy()
new_df[z] = z_val
data_cont.append(new_df)
df = pd.concat(data_cont, axis=0)
return df, data_points
def pairplot_divergence(trace, ax=None, divergence=True, color='C3', divergence_color='C2'):
theta = trace.get_values(varname='theta', combine=True)[:, 0]
logtau = trace.get_values(varname='tau_log__', combine=True)
if not ax:
_, ax = plt.subplots(1, 1, figsize=(10, 5))
ax.plot(theta, logtau, 'o', color=color, alpha=.5)
if divergence:
divergent = trace['diverging']
ax.plot(theta[divergent], logtau[divergent], 'o', color=divergence_color)
ax.set_xlabel('theta[0]')
ax.set_ylabel('log(tau)')
ax.set_title('scatter plot between log(tau) and theta[0]');
return ax
def plot_vars(mod, data, x, y, facet_row=None, facet_col=None, hue=None, style=None, y_levels=None, y_level_name='set_y_level_name',
maps=None, data_points=None, mean_center_means=None, vars_to_label=None,
num_draws_from_params=100, out_of_sample=True, combine_trace=False, legend='full', points_alpha=0.01):
for var_name in mod.input_vars:
if 'consider' in var_name:
mod.input_vars[var_name].set_value(data[var_name].iloc[0])
else:
mod.input_vars[var_name].set_value(data[var_name])
vars_ppc = [v for v in [x, y, hue, facet_col, facet_row, style] if v is not None and v != y_level_name]
pps = mod.sample_posterior_predictive(vars=vars_ppc, num_draws_from_params=num_draws_from_params, out_of_sample=out_of_sample)
df_ppc_cont = []
for var in vars_ppc:
label = [var] if (y_levels is None) or (var!=y) else y_levels
df_ppc_var_cont = []
for ppc_idx, ppc_sample in enumerate(pps[var]):
df_ppc_var = pd.DataFrame(ppc_sample, columns=label)
df_ppc_var['ppc_idx'] = ppc_idx
df_ppc_var_cont.append(df_ppc_var)
df_ppc = pd.concat(df_ppc_var_cont, axis=0)
if var != vars_ppc[-1]:
df_ppc = df_ppc.drop('ppc_idx', axis=1)
df_ppc_cont.append(df_ppc)
df = pd.concat(df_ppc_cont, axis=1)
if maps:
for col in df.columns:
if col in maps:
df[col] = df[col].map({v:k for k,v in maps[col].items()})
if y_levels is not None:
vars_ppc.remove(y)
df = df.melt(id_vars=['ppc_idx']+vars_ppc, value_vars=y_levels, var_name=y_level_name, value_name=y).reset_index()
hue = hue if y_level_name == facet_row or y_level_name == facet_col else y_level_name
# if mean_center_means is not None:
# for var in mean_center_means:
# df[var] += df[var]*mean_center_means[var]['sd']+mean_center_means['mean']
# df_prev = df.drop(['index', 'ppc_idx'], axis=1).groupby(
# ['previous_bout', 'current_epoch', 'feature']).mean().reset_index()
# df_prev.to_csv(
# '../../data/processed/previous_bout_feature.csv')
# df_prev = pd.read_csv('../../data/processed/previous_bout_feature.csv')
#
# df_current = df.drop(['index', 'ppc_idx'], axis=1).groupby(
# ['current_epoch', 'feature']).mean().reset_index()
# df_current.to_csv('../../data/output/current_bout_feature.csv')
#
# df_merged = pd.merge(df_current,df_prev, on=['current_epoch','feature'])
# df_merged['Difference when inc previous stage'] = df_merged['feature_rate_p_x'] - df_merged['feature_rate_p_y']
# df_merged['Trans P when marginalizing over previous stage'] = df_merged['feature_rate_p_x']
# df_merged['Trans P inc previous stage'] = df_merged['feature_rate_p_y
|
else:
if truncate_to_percentile:
x_data = np.linspace(np.percentile(data[x],truncate_to_percentile), np.percentile(data[x],100-truncate_to_percentile), samples_per_x_range)
else:
|
random_line_split
|
visualization.py
|
')[1].split('_')]
else:
return [0]
df['var type'] = df['index'].apply(lambda x: x.split('__')[0])
df = df.loc[df['var type'] == var, :]
var_idxs = df['index'].apply(split_fun)
indexs = np.stack(var_idxs)
if astype == 'array':
sizes = indexs.max(axis=0) + 1
var_array = df['mode'].copy().values.reshape(sizes)
return var_array
else:
df_out = pd.DataFrame(np.concatenate([indexs, np.expand_dims(df['mode'].values, -1)], axis=1))
df_out.columns = list(df_out.columns[:-1]) + [var]
return df_out
def extract_mean_as_array(trace, var='z', astype='array'):
df = pm.summary(trace)
df = df.reset_index()
def split_fun(x):
if '__' in x:
return [int(x) for x in x.split('__')[1].split('_')]
else:
|
df['var type'] = df['index'].apply(lambda x: x.split('__')[0])
df = df.loc[df['var type'] == var, :]
var_idxs = df['index'].apply(split_fun)
indexs = np.stack(var_idxs)
if astype == 'array':
sizes = indexs.max(axis=0)+1
var_array = df['mean'].copy().values.reshape(sizes)
return var_array
else:
df_out = pd.DataFrame(np.concatenate([indexs, np.expand_dims(df['mean'].values, -1)], axis=1))
idx_cols = [str(i) for i in df_out.columns[:-1]]
df_out.columns = idx_cols+[var]
if astype == 'xarray':
return pd_to_xarray_datacube(df_out, idx_cols, value_col=var)
else:
return df_out
def gen_data_for_plot(data, x, z=None, rand_sample_vars=[], mean_sample_vars=[], const_vars={}, stages='balanced', nstages=5, samples_per_x_range=500, truncate_to_percentile=0):
"""
Generate some data that we can use to plot poterior/param values for
:param data: data used to train model, so that levels of x are known
:param x: continous data for x axis
:param z: catergorical data for y axis
:param rand_sample_vars:
:return:
"""
data_points = data.copy()
unq_x = data[x].unique()
if len(unq_x) < 7: #catergorical
x_data = data[x].sample(samples_per_x_range).values
else:
if truncate_to_percentile:
x_data = np.linspace(np.percentile(data[x],truncate_to_percentile), np.percentile(data[x],100-truncate_to_percentile), samples_per_x_range)
else:
x_data = np.linspace(data[x].min(), data[x].max(), samples_per_x_range)
df = pd.DataFrame({x:x_data})
for var in mean_sample_vars:
var_mean = data[var].mean(skipna=True)
var_std = data[var].std(skipna=True)
df[var] = var_mean
data_points = data_points.loc[(var_mean-var_std<data_points[var]) & (data_points[var]<var_mean+var_std),:]
for var in rand_sample_vars:
df[var] = np.random.choice(data[var], size=(samples_per_x_range, ))
for var, val in const_vars.items():
df[var] = [val] * samples_per_x_range
if 'consider' not in var:
var_std = data[var].std(skipna=True)
data_points = data_points.loc[(val - var_std < data_points[var]) & (data_points[var] < val + var_std), :]
if stages == 'balanced':
df_stages = pd.DataFrame({'current_epoch':list(range(nstages))})
n_reps = int(np.ceil(df.shape[0]/df_stages.shape[0]))
df_stages = pd.concat([df_stages]*n_reps, axis=0).iloc[0:samples_per_x_range,:].reset_index(drop=True)
df_stages = df_stages.sample(frac=1).reset_index(drop=True)
df = pd.concat([df, df_stages], axis=1, sort=False)
if z is not None:
data_cont = []
unique_z = data[z].unique()
if len(unique_z) >= 7: # make cont into categorical
unique_z = np.linspace(data[z].min(), data[z].max(), 7)
unique_z += (unique_z[1] - unique_z[0])/2
unique_z = unique_z[:-1]
for z_val in unique_z:
new_df = df.copy()
new_df[z] = z_val
data_cont.append(new_df)
df = pd.concat(data_cont, axis=0)
return df, data_points
def pairplot_divergence(trace, ax=None, divergence=True, color='C3', divergence_color='C2'):
theta = trace.get_values(varname='theta', combine=True)[:, 0]
logtau = trace.get_values(varname='tau_log__', combine=True)
if not ax:
_, ax = plt.subplots(1, 1, figsize=(10, 5))
ax.plot(theta, logtau, 'o', color=color, alpha=.5)
if divergence:
divergent = trace['diverging']
ax.plot(theta[divergent], logtau[divergent], 'o', color=divergence_color)
ax.set_xlabel('theta[0]')
ax.set_ylabel('log(tau)')
ax.set_title('scatter plot between log(tau) and theta[0]');
return ax
def plot_vars(mod, data, x, y, facet_row=None, facet_col=None, hue=None, style=None, y_levels=None, y_level_name='set_y_level_name',
maps=None, data_points=None, mean_center_means=None, vars_to_label=None,
num_draws_from_params=100, out_of_sample=True, combine_trace=False, legend='full', points_alpha=0.01):
for var_name in mod.input_vars:
if 'consider' in var_name:
mod.input_vars[var_name].set_value(data[var_name].iloc[0])
else:
mod.input_vars[var_name].set_value(data[var_name])
vars_ppc = [v for v in [x, y, hue, facet_col, facet_row, style] if v is not None and v != y_level_name]
pps = mod.sample_posterior_predictive(vars=vars_ppc, num_draws_from_params=num_draws_from_params, out_of_sample=out_of_sample)
df_ppc_cont = []
for var in vars_ppc:
label = [var] if (y_levels is None) or (var!=y) else y_levels
df_ppc_var_cont = []
for ppc_idx, ppc_sample in enumerate(pps[var]):
df_ppc_var = pd.DataFrame(ppc_sample, columns=label)
df_ppc_var['ppc_idx'] = ppc_idx
df_ppc_var_cont.append(df_ppc_var)
df_ppc = pd.concat(df_ppc_var_cont, axis=0)
if var != vars_ppc[-1]:
df_ppc = df_ppc.drop('ppc_idx', axis=1)
df_ppc_cont.append(df_ppc)
df = pd.concat(df_ppc_cont, axis=1)
if maps:
for col in df.columns:
if col in maps:
df[col] = df[col].map({v:k for k,v in maps[col].items()})
if y_levels is not None:
vars_ppc.remove(y)
df = df.melt(id_vars=['ppc_idx']+vars_ppc, value_vars=y_levels, var_name=y_level_name, value_name=y).reset_index()
hue = hue if y_level_name == facet_row or y_level_name == facet_col else y_level_name
# if mean_center_means is not None:
# for var in mean_center_means:
# df[var] += df[var]*mean_center_means[var]['sd']+mean_center_means['mean']
# df_prev = df.drop(['index', 'ppc_idx'], axis=1).groupby(
# ['previous_bout', 'current_epoch', 'feature']).mean().reset_index()
# df_prev.to_csv(
# '../../data/processed/previous_bout_feature.csv')
# df_prev = pd.read_csv('../../data/processed/previous_bout_feature.csv')
#
# df_current = df.drop(['index', 'ppc_idx'], axis=1).groupby(
# ['current_epoch', 'feature']).mean().reset_index()
# df_current.to_csv('../../data/output/current_bout_feature.csv')
#
# df_merged = pd.merge(df_current,df_prev, on=['current_epoch','feature'])
# df_merged['Difference when inc previous stage'] = df_merged['feature_rate_p_x'] - df_merged['feature_rate_p_y']
# df_merged['Trans P when marginalizing over previous stage'] = df_merged['feature_rate_p_x']
# df_merged['Trans P inc previous stage'] = df_merged['feature_rate_p
|
return [0]
|
conditional_block
|
train_ddpg_selfplay.py
|
os.path.join('runs', current_time + '_'
+ hp.ENV_NAME + '_' + hp.EXP_NAME)
pi_atk = DDPGActor(hp.N_OBS, hp.N_ACTS).to(device)
Q_atk = DDPGCritic(hp.N_OBS, hp.N_ACTS).to(device)
pi_gk = DDPGActor(hp.N_OBS, hp.N_ACTS).to(device)
Q_gk = DDPGCritic(hp.N_OBS, hp.N_ACTS).to(device)
# Playing
pi_atk.share_memory()
pi_gk.share_memory()
exp_queue = mp.Queue(maxsize=hp.EXP_GRAD_RATIO)
finish_event = mp.Event()
sigma_m = mp.Value('f', hp.NOISE_SIGMA_INITIAL)
gif_req_m = mp.Value('i', -1)
data_proc_list = []
for _ in range(hp.N_ROLLOUT_PROCESSES):
data_proc = mp.Process(
target=data_func,
args=(
{'pi_atk': pi_atk, 'pi_gk': pi_gk},
device,
exp_queue,
finish_event,
sigma_m,
gif_req_m,
hp
)
)
data_proc.start()
data_proc_list.append(data_proc)
# Training
tgt_pi_atk = TargetActor(pi_atk)
tgt_Q_atk = TargetCritic(Q_atk)
tgt_pi_gk = TargetActor(pi_gk)
tgt_Q_gk = TargetCritic(Q_gk)
pi_opt_atk = optim.Adam(pi_atk.parameters(), lr=hp.LEARNING_RATE)
Q_opt_atk = optim.Adam(Q_atk.parameters(), lr=hp.LEARNING_RATE)
pi_opt_gk = optim.Adam(pi_gk.parameters(), lr=hp.LEARNING_RATE)
Q_opt_gk = optim.Adam(Q_gk.parameters(), lr=hp.LEARNING_RATE)
buffer_atk = ReplayBuffer(buffer_size=hp.REPLAY_SIZE,
observation_space=hp.observation_space,
action_space=hp.action_space,
device=hp.DEVICE
)
buffer_gk = ReplayBuffer(buffer_size=hp.REPLAY_SIZE,
observation_space=hp.observation_space,
action_space=hp.action_space,
device=hp.DEVICE
)
n_grads = 0
n_samples = 0
n_episodes = 0
best_reward_atk = None
best_reward_gk = None
last_gif = None
try:
while n_grads < hp.TOTAL_GRAD_STEPS:
metrics = {}
ep_infos = list()
st_time = time.perf_counter()
# Collect EXP_GRAD_RATIO sample for each grad step
new_samples = 0
while new_samples < hp.EXP_GRAD_RATIO:
exp = exp_queue.get()
if exp is None:
raise Exception # got None value in queue
safe_exp = copy.deepcopy(exp)
del(exp)
# Dict is returned with end of episode info
if not 'exp_atk' in safe_exp:
logs = {"ep_info/"+key: value for key,
value in safe_exp.items() if 'truncated' not in key}
ep_infos.append(logs)
n_episodes += 1
else:
buffer_atk.add(
obs=safe_exp['exp_atk'].state,
next_obs=safe_exp['exp_atk'].last_state if safe_exp['exp_atk'].last_state is not None
else safe_exp['exp_atk'].state,
action=safe_exp['exp_atk'].action,
reward=safe_exp['exp_atk'].reward,
done=False if safe_exp['exp_atk'].last_state is not None else True
)
buffer_gk.add(
obs=safe_exp['exp_gk'].state,
next_obs=safe_exp['exp_gk'].last_state if safe_exp['exp_gk'].last_state is not None
else safe_exp['exp_gk'].state,
action=safe_exp['exp_gk'].action,
reward=safe_exp['exp_gk'].reward,
done=False if safe_exp['exp_gk'].last_state is not None else True
)
new_samples += 1
n_samples += new_samples
sample_time = time.perf_counter()
# Only start training after buffer is larger than initial value
if buffer_atk.size() < hp.REPLAY_INITIAL or buffer_gk.size() < hp.REPLAY_INITIAL:
continue
# Sample a batch and load it as a tensor on device
batch_atk = buffer_atk.sample(hp.BATCH_SIZE)
S_v_atk = batch_atk.observations
A_v_atk = batch_atk.actions
r_v_atk = batch_atk.rewards
dones_atk = batch_atk.dones
S_next_v_atk = batch_atk.next_observations
batch_gk = buffer_gk.sample(hp.BATCH_SIZE)
S_v_gk = batch_gk.observations
A_v_gk = batch_gk.actions
r_v_gk = batch_gk.rewards
dones_gk = batch_gk.dones
S_next_v_gk = batch_gk.next_observations
# train critic
Q_opt_atk.zero_grad()
Q_v_atk = Q_atk(S_v_atk, A_v_atk) # expected Q for S,A
A_next_v_atk = tgt_pi_atk(S_next_v_atk) # Get an Bootstrap Action for S_next
Q_next_v_atk = tgt_Q_atk(S_next_v_atk, A_next_v_atk) # Bootstrap Q_next
Q_next_v_atk[dones_atk == 1.] = 0.0 # No bootstrap if transition is terminal
# Calculate_atk a reference Q value using the bootstrap Q
Q_ref_v_atk = r_v_atk + Q_next_v_atk * (hp.GAMMA**hp.REWARD_STEPS)
Q_loss_v_atk = F.mse_loss(Q_v_atk, Q_ref_v_atk.detach())
Q_loss_v_atk.backward()
Q_opt_atk.step()
metrics["train/loss_Q_atk"] = Q_loss_v_atk.cpu().detach().numpy()
Q_opt_gk.zero_grad()
Q_v_gk = Q_gk(S_v_gk, A_v_gk) # expected Q for S,A
A_next_v_gk = tgt_pi_gk(S_next_v_gk) # Get an Bootstrap Action for S_next
Q_next_v_gk = tgt_Q_gk(S_next_v_gk, A_next_v_gk) # Bootstrap Q_next
Q_next_v_gk[dones_gk == 1.] = 0.0 # No bootstrap if transition is terminal
# Calculate_gk a reference Q value using the bootstrap Q
Q_ref_v_gk = r_v_gk + Q_next_v_gk * (hp.GAMMA**hp.REWARD_STEPS)
Q_loss_v_gk = F.mse_loss(Q_v_gk, Q_ref_v_gk.detach())
Q_loss_v_gk.backward()
Q_opt_gk.step()
metrics["train/loss_Q_gk"] = Q_loss_v_gk.cpu().detach().numpy()
# train actor - Maximize Q value received over every S
pi_opt_atk.zero_grad()
A_cur_v_atk = pi_atk(S_v_atk)
pi_loss_v_atk = -Q_atk(S_v_atk, A_cur_v_atk)
pi_loss_v_atk = pi_loss_v_atk.mean()
pi_loss_v_atk.backward()
pi_opt_atk.step()
metrics["train/loss_pi_atk"] = pi_loss_v_atk.cpu().detach().numpy()
pi_opt_gk.zero_grad()
A_cur_v_gk = pi_gk(S_v_gk)
pi_loss_v_gk = -Q_gk(S_v_gk, A_cur_v_gk)
pi_loss_v_gk = pi_loss_v_gk.mean()
pi_loss_v_gk.backward()
pi_opt_gk.step()
metrics["train/loss_pi_gk"] = pi_loss_v_gk.cpu().detach().numpy()
# Sync target networks
tgt_pi_atk.sync(alpha=1 - 1e-3)
tgt_Q_atk.sync(alpha=1 - 1e-3)
tgt_pi_gk.sync(alpha=1 - 1e-3)
tgt_Q_gk.sync(alpha=1 - 1e-3)
n_grads += 1
grad_time = time.perf_counter()
metrics['speed/samples'] = new_samples/(sample_time - st_time)
metrics['speed/grad'] = 1/(grad_time - sample_time)
metrics['speed/total'] = 1/(grad_time - st_time)
metrics['counters/samples'] = n_samples
metrics['counters/grads'] = n_grads
metrics['counters/episodes'] = n_episodes
metrics['counters/buffer_len_atk'] = buffer_atk.size()
metrics['counters/buffer_len_gk'] = buffer_gk.size()
if ep_infos:
for key in ep_infos[0].keys():
|
metrics[key] = np.mean([info[key] for info in ep_infos])
|
conditional_block
|
|
train_ddpg_selfplay.py
|
import numpy as np
import rc_gym
import torch.multiprocessing as mp
import torch.nn.functional as F
import torch.optim as optim
import wandb
from agents.ddpg_selfplay import (DDPGHP, DDPGActor, DDPGCritic, TargetActor,
TargetCritic, data_func)
from agents.utils import ReplayBuffer, save_checkpoint, unpack_batch, ExperienceFirstLast
if __name__ == "__main__":
mp.set_start_method('spawn')
os.environ['OMP_NUM_THREADS'] = "1"
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False,
action="store_true", help="Enable cuda")
parser.add_argument("-n", "--name", required=True,
help="Name of the run")
parser.add_argument("-e", "--env", required=True,
help="Name of the gym environment")
args = parser.parse_args()
device = "cuda" if args.cuda else "cpu"
# Input Experiment Hyperparameters
hp = DDPGHP(
EXP_NAME=args.name,
DEVICE=device,
ENV_NAME=args.env,
N_ROLLOUT_PROCESSES=3,
LEARNING_RATE=0.0001,
EXP_GRAD_RATIO=10,
BATCH_SIZE=256,
GAMMA=0.95,
REWARD_STEPS=1,
NOISE_SIGMA_INITIAL=0.8,
NOISE_THETA=0.15,
NOISE_SIGMA_DECAY=0.99,
NOISE_SIGMA_MIN=0.15,
NOISE_SIGMA_GRAD_STEPS=3000,
REPLAY_SIZE=1000000,
REPLAY_INITIAL=100000,
SAVE_FREQUENCY=100000,
GIF_FREQUENCY=100000,
TOTAL_GRAD_STEPS=2000000,
MULTI_AGENT=False,
N_AGENTS=1
)
wandb.init(project='RoboCIn-RL', entity='matheusalb',
name=hp.EXP_NAME, config=hp.to_dict())
current_time = datetime.datetime.now().strftime('%b-%d_%H-%M-%S')
tb_path = os.path.join('runs', current_time + '_'
+ hp.ENV_NAME + '_' + hp.EXP_NAME)
pi_atk = DDPGActor(hp.N_OBS, hp.N_ACTS).to(device)
Q_atk = DDPGCritic(hp.N_OBS, hp.N_ACTS).to(device)
pi_gk = DDPGActor(hp.N_OBS, hp.N_ACTS).to(device)
Q_gk = DDPGCritic(hp.N_OBS, hp.N_ACTS).to(device)
# Playing
pi_atk.share_memory()
pi_gk.share_memory()
exp_queue = mp.Queue(maxsize=hp.EXP_GRAD_RATIO)
finish_event = mp.Event()
sigma_m = mp.Value('f', hp.NOISE_SIGMA_INITIAL)
gif_req_m = mp.Value('i', -1)
data_proc_list = []
for _ in range(hp.N_ROLLOUT_PROCESSES):
data_proc = mp.Process(
target=data_func,
args=(
{'pi_atk': pi_atk, 'pi_gk': pi_gk},
device,
exp_queue,
finish_event,
sigma_m,
gif_req_m,
hp
)
)
data_proc.start()
data_proc_list.append(data_proc)
# Training
tgt_pi_atk = TargetActor(pi_atk)
tgt_Q_atk = TargetCritic(Q_atk)
tgt_pi_gk = TargetActor(pi_gk)
tgt_Q_gk = TargetCritic(Q_gk)
pi_opt_atk = optim.Adam(pi_atk.parameters(), lr=hp.LEARNING_RATE)
Q_opt_atk = optim.Adam(Q_atk.parameters(), lr=hp.LEARNING_RATE)
pi_opt_gk = optim.Adam(pi_gk.parameters(), lr=hp.LEARNING_RATE)
Q_opt_gk = optim.Adam(Q_gk.parameters(), lr=hp.LEARNING_RATE)
buffer_atk = ReplayBuffer(buffer_size=hp.REPLAY_SIZE,
observation_space=hp.observation_space,
action_space=hp.action_space,
device=hp.DEVICE
)
buffer_gk = ReplayBuffer(buffer_size=hp.REPLAY_SIZE,
observation_space=hp.observation_space,
action_space=hp.action_space,
device=hp.DEVICE
)
n_grads = 0
n_samples = 0
n_episodes = 0
best_reward_atk = None
best_reward_gk = None
last_gif = None
try:
while n_grads < hp.TOTAL_GRAD_STEPS:
metrics = {}
ep_infos = list()
st_time = time.perf_counter()
# Collect EXP_GRAD_RATIO sample for each grad step
new_samples = 0
while new_samples < hp.EXP_GRAD_RATIO:
exp = exp_queue.get()
if exp is None:
raise Exception # got None value in queue
safe_exp = copy.deepcopy(exp)
del(exp)
# Dict is returned with end of episode info
if not 'exp_atk' in safe_exp:
logs = {"ep_info/"+key: value for key,
value in safe_exp.items() if 'truncated' not in key}
ep_infos.append(logs)
n_episodes += 1
else:
buffer_atk.add(
obs=safe_exp['exp_atk'].state,
next_obs=safe_exp['exp_atk'].last_state if safe_exp['exp_atk'].last_state is not None
else safe_exp['exp_atk'].state,
action=safe_exp['exp_atk'].action,
reward=safe_exp['exp_atk'].reward,
done=False if safe_exp['exp_atk'].last_state is not None else True
)
buffer_gk.add(
obs=safe_exp['exp_gk'].state,
next_obs=safe_exp['exp_gk'].last_state if safe_exp['exp_gk'].last_state is not None
else safe_exp['exp_gk'].state,
action=safe_exp['exp_gk'].action,
reward=safe_exp['exp_gk'].reward,
done=False if safe_exp['exp_gk'].last_state is not None else True
)
new_samples += 1
n_samples += new_samples
sample_time = time.perf_counter()
# Only start training after buffer is larger than initial value
if buffer_atk.size() < hp.REPLAY_INITIAL or buffer_gk.size() < hp.REPLAY_INITIAL:
continue
# Sample a batch and load it as a tensor on device
batch_atk = buffer_atk.sample(hp.BATCH_SIZE)
S_v_atk = batch_atk.observations
A_v_atk = batch_atk.actions
r_v_atk = batch_atk.rewards
dones_atk = batch_atk.dones
S_next_v_atk = batch_atk.next_observations
batch_gk = buffer_gk.sample(hp.BATCH_SIZE)
S_v_gk = batch_gk.observations
A_v_gk = batch_gk.actions
r_v_gk = batch_gk.rewards
dones_gk = batch_gk.dones
S_next_v_gk = batch_gk.next_observations
# train critic
Q_opt_atk.zero_grad()
Q_v_atk = Q_atk(S_v_atk, A_v_atk) # expected Q for S,A
A_next_v_atk = tgt_pi_atk(S_next_v_atk) # Get an Bootstrap Action for S_next
Q_next_v_atk = tgt_Q_atk(S_next_v_atk, A_next_v_atk) # Bootstrap Q_next
Q_next_v_atk[dones_atk == 1.] = 0.0 # No bootstrap if transition is terminal
# Calculate_atk a reference Q value using the bootstrap Q
Q_ref_v_atk = r_v_atk + Q_next_v_atk * (hp.GAMMA**hp.REWARD_STEPS)
Q_loss_v_atk = F.mse_loss(Q_v_atk, Q_ref_v_atk.detach())
Q_loss_v_atk.backward()
Q_opt_atk.step()
metrics["train/loss_Q_atk"] = Q_loss_v_atk.cpu().detach().numpy()
Q_opt_gk.zero_grad()
Q_v_gk = Q_gk(S_v_gk, A_v_gk) # expected Q for S,A
A_next_v_gk = tgt_pi_gk(S_next_v_gk) # Get an Bootstrap Action for S_next
Q_next_v_gk = tgt_Q_gk(S_next_v_gk, A_next_v_gk) # Bootstrap Q_next
Q_next_v_gk[dones_gk == 1.] = 0.0 # No bootstrap if transition is terminal
# Calculate_gk a reference Q value using the bootstrap Q
Q_ref_v_gk = r_v_gk + Q_next_v_gk * (hp.GAMMA**hp.REWARD_STEPS)
Q_loss_v_gk = F.mse_loss(Q_v_gk, Q_ref_v_gk.detach
|
import gym
|
random_line_split
|
|
spatialfr.py
|
length-cropped_length)/2)
return matrix[i1:i2, i1:i2]
def determine_radius(df):
# Determine the radius of the primary beam.
# Fit a Gaussian to the distribution of RA and Dec positions.
# Use 2x the determined sigma as a cutoff for sources, in steps of 2.5 deg.
ra_hist, ra_bins = np.histogram(df.ra, bins=50)
dec_hist, dec_bins = np.histogram(df.dec, bins=50)
ra_p0 = [max(ra_hist), np.mean(ra_bins), 8]
dec_p0 = [max(dec_hist), np.mean(dec_bins), 8]
def gaussian(x, a, b, c):
return a * np.exp(-(x-b)**2 / (2*c**2))
try:
ra_popt, _ = curve_fit(gaussian, ra_bins[:-1], ra_hist, p0=ra_p0)
dec_popt, _ = curve_fit(gaussian, dec_bins[:-1], dec_hist, p0=dec_p0)
radius = np.ceil(
(2*np.mean([abs(ra_popt[2]), abs(dec_popt[2])]))/2.5)*2.5
# Check this radius against the extent of source available.
if radius > max(df.ra) - min(df.ra) or radius > max(df.dec) - min(df.dec):
radius = max([df.ra.max() - df.ra.min(), df.dec.max() - df.dec.min()])/2
print('gaussian fit done')
except:
radius = max([df.ra.max() - df.ra.min(), df.dec.max() - df.dec.min()])/2
# print(radius)
return radius
def interpfr(
radius, ra, dec, fr, fr_err, ra_centre, dec_centre, obsid, hr,
interp_method='linear', resolution=200):
grid_x, grid_y = np.meshgrid(
np.linspace(-radius+5.25, radius-5.25, resolution),
np.linspace(-radius+5.25, radius-5.25, resolution))
grid_x += ra_centre
grid_y += dec_centre
grid_fr = np.fliplr(
griddata(
np.vstack((ra, dec)).T, fr,
(grid_x, grid_y), method=interp_method,
fill_value=0))
beam_extent = (
ra_centre+radius,
ra_centre-radius,
dec_centre-radius,
dec_centre+radius)
print(beam_extent)
crop_factor = 1./np.sqrt(2)
cropped_beam_extent = (
ra_centre+radius*crop_factor,
(ra_centre-radius*crop_factor),
dec_centre-radius*crop_factor,
dec_centre+radius*crop_factor)
print(cropped_beam_extent)
grid_fr = cropper(grid_fr)
# np.savetxt("%s_%shr_interpfr_grid.csv" % (obsid, hr), grid_fr, delimiter=",")
return grid_fr, cropped_beam_extent
def plot_interp_fr(grid, beam_extent):
fig, ax = plt.subplots(1, 1, figsize=(15, 12))
img1 = ax.imshow(
grid, cmap="plasma", extent=beam_extent, origin="lower")
ax.set_xlabel("RA [deg]")
ax.set_ylabel("Dec [deg]")
fig.colorbar(img1, ax=ax, format="%.2f", fraction=0.046, pad=0.04)
fig.suptitle('Interpolated Faraday depth at %shrs %s' % (hr, str(obsid)))
plt.savefig('%s_%shrs_ionfr_interped.png' % (obsid, hr))
def plane_fit(grid, obsid, hr):
print('shape', grid.shape)
m = grid.shape[0] # size of the matrix
X1, X2 = np.mgrid[:m, :m]
# Regression
X = np.hstack((np.reshape(X1, (m*m, 1)), np.reshape(X2, (m*m, 1))))
X = np.hstack((np.ones((m*m, 1)), X))
YY = np.reshape(grid, (m*m, 1))
theta = np.dot(np.dot(np.linalg.pinv(np.dot(X.transpose(), X)), X.transpose()), YY)
# ax.scatter(grid[:, 0], grid[:, 1], grid[:, 2], c='r', s=20)
plane = np.reshape(np.dot(X, theta), (m, m))
# Subtraction
grid_sub = grid - plane
return X1, X2, grid, plane, grid_sub
def plot_3d_plane_fit(X1, X2, grid, grid_sub, plane, obsid, hr):
fig = plt.figure(figsize=(18, 15))
ax = fig.add_subplot(3, 1, 1, projection='3d')
# jet = plt.get_cmap('jet')
plasma = plt.get_cmap('plasma')
ff = ax.plot_surface(X1, X2, grid, rstride=1, cstride=1, cmap=plasma, linewidth=0)
fig.colorbar(ff, shrink=0.8)
ax = fig.add_subplot(3, 1, 2, projection='3d')
surf = ax.plot_surface(X1, X2, plane) # , cmap=jet)
ax.plot_surface(X1, X2, grid, rstride=1, cstride=1, cmap=plasma, linewidth=0)
fig.colorbar(surf, shrink=0.8)
ax = fig.add_subplot(3, 1, 3, projection='3d')
subt = ax.plot_surface(X1, X2, grid_sub, rstride=1, cstride=1, cmap=plasma, linewidth=0)
fig.colorbar(subt, shrink=0.8)
plt.savefig('%s_%shrs_ionfr_plane_fit_resids.png' % (obsid, hr))
plt.show()
def linefit(grid, obsid, beam_lim):
slicepoints = np.linspace(60, 76, 8)
fig, ax = plt.subplots(1, 1, figsize=(15, 12))
for i in slicepoints:
y = grid[int(i), :]
ras = np.linspace(beam_lim[0], beam_lim[1], len(y))
x = np.linspace(beam_lim[2], beam_lim[3], len(y))
ax.plot(x, y, label=str(round(ras[int(i)], 1)))
# regplot = sns.regplot(x=x, y=y)
# yy = sns.residplot(x, y, lowess=True, scatter_kws={"color": "black"}, line_kws={"color": "red"})
# yy.set(xlabel='Dec (deg)', ylabel='Residuals', title='Fitted rotation measure curve and residuals (constant Ra)')
ax.legend(loc="upper center", title='RA [deg]')
ax.set_title('Faraday depth residuals along constant RA')
ax.set_xlabel('DEC [deg]')
ax.set_ylabel("r'$\phi$' Residuals")
plt.savefig('%s_14hrs_residuals_constdeg.png' % (obsid))
#fig = regplot.get_figure()
#fig.savefig('1065880128_14hrs_1ra_ionfr.png')
def spatial_fr_plot(ra, dec, fr, fr_err, obsid, title='xx'):
print('Using plotly')
size = fr_err
fig = go.Figure(data=[go.Scatter(
x=ra,
y=dec,
mode='markers',
text=fr,
hoverinfo='text',
marker=dict(
color=fr,
colorscale='Magma_r',
size=fr_err,
# sizemode = 'diameter',
showscale=True,
sizeref=2. * max(size) / (5**2)
)
)])
fig.update_layout(
autosize=False,
width=1000,
height=800,
title='colour=Faraday rotation, size=error',
xaxis=dict(title='Ra [deg]'),
yaxis=dict(title='Dec [deg]'))
# xaxis=dict(range=[beam_lim[0], beam_lim[1]], title='Ra [deg]'),
# yaxis=dict(range=[beam_lim[2], beam_lim[3]], title='Dec [deg]'))
fig.update_xaxes(autorange="reversed")
#fig.show()
fig.write_image(title)
def fr_resids_plot(grid_sub, ra, dec, beam_extent, obsid):
|
fig, ax = plt.subplots(1, 1, figsize=(15, 12))
img1 = ax.imshow(grid_sub, extent=beam_extent, cmap="plasma")
ax.set_xlabel("RA [deg]")
ax.set_ylabel("Dec [deg]")
ax.set_title("Faraday depth residuals after plane surface fit")
fig.colorbar(img1, ax=ax, format="%.2f", fraction=0.046, pad=0.04)
plt.savefig('%s_14hrs_fr_resids.png' % (obsid))
# plt.show()
|
identifier_body
|
|
spatialfr.py
|
file who's path is given.
'''
yaml_file = '/home/chege/Desktop/curtin_work/vega/%s.yaml' % (obsid)
ras = []
decs = []
fluxes = []
ampscales = []
stds = []
with open(yaml_file, 'r') as f:
unpacked = yaml.load(f, Loader=SafeLoader)
for sos in sources:
for soc in unpacked['sources']:
if unpacked['sources'][soc]['name'] == sos:
print('.........found source')
print('................getting its ra & dec')
ras.append(float(unpacked['sources'][soc]['ra']))
decs.append(float(unpacked['sources'][soc]['dec']))
ampscales.append(float(np.nanmedian(unpacked['sources'][soc]['amp_scales'][1:13])))
stds.append(float(np.nanstd(unpacked['sources'][soc]['amp_scales'][1:13])))
fluxes.append(float(unpacked['sources'][soc]['flux_density']))
return ras, decs, fluxes, ampscales, stds
def make_df(hour, obsid, csvfyl=None):
if csvfyl is not None: # if we have a csv file with ra dec, fr, and fr_err
df = pd.read_csv(csvfyl)
df = df.drop(df.columns[0], axis=1)
else:
txtdir = '/home/chege/Desktop/curtin_work/run_ionfr/singleLOScomparisons/1066568224_frtxts'
fyls = sorted([fyl for fyl in os.listdir(txtdir) if fyl.split('.')[-1] == 'txt'])
sources = [fyl.split('_')[0] for fyl in fyls]
print(len(sources))
ras, decs, fluxes, ampscales, stds = get_radec(sources, obsid)
frs = []
frs_errs = []
i = 1
for fyl in fyls:
print(i)
i += 1
fylpath = txtdir + '/' + fyl
fr_value, fr_value_err = get_fr_value(fylpath, hour)
frs.append(float(fr_value))
frs_errs.append(float(fr_value_err))
df = pd.DataFrame(
list(zip(ras, decs, fluxes, ampscales, stds, frs, frs_errs)),
columns=['ra', 'dec', 'flux', 'ampscales', 'stds', 'fr', 'fr_err'])
print('made dataframe with radec and fr values')
df = df.dropna(axis=0)
# blacklist = df[((df.stds - df.stds.median()) / df.stds.std()).abs() > 3]
# print(blacklist)
# blacklist.to_csv('blacklist_sources.csv', mode='a', header=False)
df = df[((df.stds - df.stds.median()) / df.stds.std()).abs() < 3]
print(df.head())
df = df.nlargest(700, 'flux', keep="'all'")
# df.to_csv('%s_%shrs_ionfr.csv' % (obsid, hour))
return df
def get_center(df):
bulk_centre_ra = np.mean(df.ra)
bulk_centre_dec = np.mean(df.dec)
radius = determine_radius(df)
# Recalculate the centre, based on the sources within the radius,
# and specify the sources to be used for analysis.
filtered = np.array(
[[a, b, c, d, e, f, g] for a, b, c, d, e, f, g
in zip(df.ra, df.dec, df.flux, df.ampscales, df.stds, df.fr, df.fr_err)
if abs(a-bulk_centre_ra) < radius
and abs(b-bulk_centre_dec) < radius])
fra = filtered[:, 0]
fdec = filtered[:, 1]
fflux = filtered[:, 2]
fampscales = filtered[:, 3]
fstds = filtered[:, 4]
f_fr = filtered[:, 5]
f_fr_err = filtered[:, 6]
ra_centre = fra.mean()
dec_centre = fdec.mean()
return radius, fra, fdec, fflux, fampscales, fstds, f_fr, f_fr_err, ra_centre, dec_centre
def cropper(matrix, crop_factor=1./np.sqrt(2)):
length = len(matrix)
cropped_length = int(length * crop_factor)
i1 = int((length-cropped_length)/2)
i2 = length-int((length-cropped_length)/2)
return matrix[i1:i2, i1:i2]
def
|
(df):
# Determine the radius of the primary beam.
# Fit a Gaussian to the distribution of RA and Dec positions.
# Use 2x the determined sigma as a cutoff for sources, in steps of 2.5 deg.
ra_hist, ra_bins = np.histogram(df.ra, bins=50)
dec_hist, dec_bins = np.histogram(df.dec, bins=50)
ra_p0 = [max(ra_hist), np.mean(ra_bins), 8]
dec_p0 = [max(dec_hist), np.mean(dec_bins), 8]
def gaussian(x, a, b, c):
return a * np.exp(-(x-b)**2 / (2*c**2))
try:
ra_popt, _ = curve_fit(gaussian, ra_bins[:-1], ra_hist, p0=ra_p0)
dec_popt, _ = curve_fit(gaussian, dec_bins[:-1], dec_hist, p0=dec_p0)
radius = np.ceil(
(2*np.mean([abs(ra_popt[2]), abs(dec_popt[2])]))/2.5)*2.5
# Check this radius against the extent of source available.
if radius > max(df.ra) - min(df.ra) or radius > max(df.dec) - min(df.dec):
radius = max([df.ra.max() - df.ra.min(), df.dec.max() - df.dec.min()])/2
print('gaussian fit done')
except:
radius = max([df.ra.max() - df.ra.min(), df.dec.max() - df.dec.min()])/2
# print(radius)
return radius
def interpfr(
radius, ra, dec, fr, fr_err, ra_centre, dec_centre, obsid, hr,
interp_method='linear', resolution=200):
grid_x, grid_y = np.meshgrid(
np.linspace(-radius+5.25, radius-5.25, resolution),
np.linspace(-radius+5.25, radius-5.25, resolution))
grid_x += ra_centre
grid_y += dec_centre
grid_fr = np.fliplr(
griddata(
np.vstack((ra, dec)).T, fr,
(grid_x, grid_y), method=interp_method,
fill_value=0))
beam_extent = (
ra_centre+radius,
ra_centre-radius,
dec_centre-radius,
dec_centre+radius)
print(beam_extent)
crop_factor = 1./np.sqrt(2)
cropped_beam_extent = (
ra_centre+radius*crop_factor,
(ra_centre-radius*crop_factor),
dec_centre-radius*crop_factor,
dec_centre+radius*crop_factor)
print(cropped_beam_extent)
grid_fr = cropper(grid_fr)
# np.savetxt("%s_%shr_interpfr_grid.csv" % (obsid, hr), grid_fr, delimiter=",")
return grid_fr, cropped_beam_extent
def plot_interp_fr(grid, beam_extent):
fig, ax = plt.subplots(1, 1, figsize=(15, 12))
img1 = ax.imshow(
grid, cmap="plasma", extent=beam_extent, origin="lower")
ax.set_xlabel("RA [deg]")
ax.set_ylabel("Dec [deg]")
fig.colorbar(img1, ax=ax, format="%.2f", fraction=0.046, pad=0.04)
fig.suptitle('Interpolated Faraday depth at %shrs %s' % (hr, str(obsid)))
plt.savefig('%s_%shrs_ionfr_interped.png' % (obsid, hr))
def plane_fit(grid, obsid, hr):
print('shape', grid.shape)
m = grid.shape[0] # size of the matrix
X1, X2 = np.mgrid[:m, :m]
# Regression
X = np.hstack((np.reshape(X1, (m*m, 1)), np.reshape(X2, (m*m, 1))))
X = np.hstack((np.ones((m*m, 1)), X))
YY = np.reshape(grid, (m*m, 1))
theta = np.dot(np.dot(np.linalg.pinv(np.dot(X.transpose(), X)), X.transpose()), YY)
# ax.scatter(grid[:, 0], grid[:, 1], grid[:, 2], c='r', s=20)
plane = np.reshape(np.dot(X, theta), (m, m
|
determine_radius
|
identifier_name
|
spatialfr.py
|
yaml file who's path is given.
'''
yaml_file = '/home/chege/Desktop/curtin_work/vega/%s.yaml' % (obsid)
ras = []
decs = []
fluxes = []
ampscales = []
stds = []
with open(yaml_file, 'r') as f:
unpacked = yaml.load(f, Loader=SafeLoader)
for sos in sources:
for soc in unpacked['sources']:
if unpacked['sources'][soc]['name'] == sos:
print('.........found source')
print('................getting its ra & dec')
ras.append(float(unpacked['sources'][soc]['ra']))
decs.append(float(unpacked['sources'][soc]['dec']))
ampscales.append(float(np.nanmedian(unpacked['sources'][soc]['amp_scales'][1:13])))
stds.append(float(np.nanstd(unpacked['sources'][soc]['amp_scales'][1:13])))
fluxes.append(float(unpacked['sources'][soc]['flux_density']))
return ras, decs, fluxes, ampscales, stds
def make_df(hour, obsid, csvfyl=None):
if csvfyl is not None: # if we have a csv file with ra dec, fr, and fr_err
df = pd.read_csv(csvfyl)
df = df.drop(df.columns[0], axis=1)
else:
txtdir = '/home/chege/Desktop/curtin_work/run_ionfr/singleLOScomparisons/1066568224_frtxts'
fyls = sorted([fyl for fyl in os.listdir(txtdir) if fyl.split('.')[-1] == 'txt'])
sources = [fyl.split('_')[0] for fyl in fyls]
print(len(sources))
ras, decs, fluxes, ampscales, stds = get_radec(sources, obsid)
frs = []
frs_errs = []
i = 1
for fyl in fyls:
print(i)
i += 1
fylpath = txtdir + '/' + fyl
fr_value, fr_value_err = get_fr_value(fylpath, hour)
frs.append(float(fr_value))
frs_errs.append(float(fr_value_err))
df = pd.DataFrame(
list(zip(ras, decs, fluxes, ampscales, stds, frs, frs_errs)),
columns=['ra', 'dec', 'flux', 'ampscales', 'stds', 'fr', 'fr_err'])
print('made dataframe with radec and fr values')
df = df.dropna(axis=0)
# blacklist = df[((df.stds - df.stds.median()) / df.stds.std()).abs() > 3]
# print(blacklist)
# blacklist.to_csv('blacklist_sources.csv', mode='a', header=False)
df = df[((df.stds - df.stds.median()) / df.stds.std()).abs() < 3]
print(df.head())
df = df.nlargest(700, 'flux', keep="'all'")
# df.to_csv('%s_%shrs_ionfr.csv' % (obsid, hour))
return df
def get_center(df):
bulk_centre_ra = np.mean(df.ra)
bulk_centre_dec = np.mean(df.dec)
radius = determine_radius(df)
# Recalculate the centre, based on the sources within the radius,
# and specify the sources to be used for analysis.
filtered = np.array(
[[a, b, c, d, e, f, g] for a, b, c, d, e, f, g
in zip(df.ra, df.dec, df.flux, df.ampscales, df.stds, df.fr, df.fr_err)
if abs(a-bulk_centre_ra) < radius
and abs(b-bulk_centre_dec) < radius])
fra = filtered[:, 0]
fdec = filtered[:, 1]
fflux = filtered[:, 2]
fampscales = filtered[:, 3]
fstds = filtered[:, 4]
f_fr = filtered[:, 5]
f_fr_err = filtered[:, 6]
ra_centre = fra.mean()
dec_centre = fdec.mean()
return radius, fra, fdec, fflux, fampscales, fstds, f_fr, f_fr_err, ra_centre, dec_centre
def cropper(matrix, crop_factor=1./np.sqrt(2)):
length = len(matrix)
cropped_length = int(length * crop_factor)
i1 = int((length-cropped_length)/2)
i2 = length-int((length-cropped_length)/2)
return matrix[i1:i2, i1:i2]
def determine_radius(df):
# Determine the radius of the primary beam.
# Fit a Gaussian to the distribution of RA and Dec positions.
# Use 2x the determined sigma as a cutoff for sources, in steps of 2.5 deg.
ra_hist, ra_bins = np.histogram(df.ra, bins=50)
dec_hist, dec_bins = np.histogram(df.dec, bins=50)
ra_p0 = [max(ra_hist), np.mean(ra_bins), 8]
dec_p0 = [max(dec_hist), np.mean(dec_bins), 8]
def gaussian(x, a, b, c):
return a * np.exp(-(x-b)**2 / (2*c**2))
try:
ra_popt, _ = curve_fit(gaussian, ra_bins[:-1], ra_hist, p0=ra_p0)
dec_popt, _ = curve_fit(gaussian, dec_bins[:-1], dec_hist, p0=dec_p0)
radius = np.ceil(
(2*np.mean([abs(ra_popt[2]), abs(dec_popt[2])]))/2.5)*2.5
# Check this radius against the extent of source available.
if radius > max(df.ra) - min(df.ra) or radius > max(df.dec) - min(df.dec):
radius = max([df.ra.max() - df.ra.min(), df.dec.max() - df.dec.min()])/2
print('gaussian fit done')
except:
radius = max([df.ra.max() - df.ra.min(), df.dec.max() - df.dec.min()])/2
# print(radius)
return radius
def interpfr(
radius, ra, dec, fr, fr_err, ra_centre, dec_centre, obsid, hr,
interp_method='linear', resolution=200):
grid_x, grid_y = np.meshgrid(
np.linspace(-radius+5.25, radius-5.25, resolution),
np.linspace(-radius+5.25, radius-5.25, resolution))
grid_x += ra_centre
grid_y += dec_centre
|
np.vstack((ra, dec)).T, fr,
(grid_x, grid_y), method=interp_method,
fill_value=0))
beam_extent = (
ra_centre+radius,
ra_centre-radius,
dec_centre-radius,
dec_centre+radius)
print(beam_extent)
crop_factor = 1./np.sqrt(2)
cropped_beam_extent = (
ra_centre+radius*crop_factor,
(ra_centre-radius*crop_factor),
dec_centre-radius*crop_factor,
dec_centre+radius*crop_factor)
print(cropped_beam_extent)
grid_fr = cropper(grid_fr)
# np.savetxt("%s_%shr_interpfr_grid.csv" % (obsid, hr), grid_fr, delimiter=",")
return grid_fr, cropped_beam_extent
def plot_interp_fr(grid, beam_extent):
fig, ax = plt.subplots(1, 1, figsize=(15, 12))
img1 = ax.imshow(
grid, cmap="plasma", extent=beam_extent, origin="lower")
ax.set_xlabel("RA [deg]")
ax.set_ylabel("Dec [deg]")
fig.colorbar(img1, ax=ax, format="%.2f", fraction=0.046, pad=0.04)
fig.suptitle('Interpolated Faraday depth at %shrs %s' % (hr, str(obsid)))
plt.savefig('%s_%shrs_ionfr_interped.png' % (obsid, hr))
def plane_fit(grid, obsid, hr):
print('shape', grid.shape)
m = grid.shape[0] # size of the matrix
X1, X2 = np.mgrid[:m, :m]
# Regression
X = np.hstack((np.reshape(X1, (m*m, 1)), np.reshape(X2, (m*m, 1))))
X = np.hstack((np.ones((m*m, 1)), X))
YY = np.reshape(grid, (m*m, 1))
theta = np.dot(np.dot(np.linalg.pinv(np.dot(X.transpose(), X)), X.transpose()), YY)
# ax.scatter(grid[:, 0], grid[:, 1], grid[:, 2], c='r', s=20)
plane = np.reshape(np.dot(X, theta), (m, m))
|
grid_fr = np.fliplr(
griddata(
|
random_line_split
|
spatialfr.py
|
return fr_value, fr_value_err
def get_radec(sources, obsid):
'''
Gets the Ra and Dec of a source from the yaml file who's path is given.
'''
yaml_file = '/home/chege/Desktop/curtin_work/vega/%s.yaml' % (obsid)
ras = []
decs = []
fluxes = []
ampscales = []
stds = []
with open(yaml_file, 'r') as f:
unpacked = yaml.load(f, Loader=SafeLoader)
for sos in sources:
for soc in unpacked['sources']:
if unpacked['sources'][soc]['name'] == sos:
print('.........found source')
print('................getting its ra & dec')
ras.append(float(unpacked['sources'][soc]['ra']))
decs.append(float(unpacked['sources'][soc]['dec']))
ampscales.append(float(np.nanmedian(unpacked['sources'][soc]['amp_scales'][1:13])))
stds.append(float(np.nanstd(unpacked['sources'][soc]['amp_scales'][1:13])))
fluxes.append(float(unpacked['sources'][soc]['flux_density']))
return ras, decs, fluxes, ampscales, stds
def make_df(hour, obsid, csvfyl=None):
if csvfyl is not None: # if we have a csv file with ra dec, fr, and fr_err
df = pd.read_csv(csvfyl)
df = df.drop(df.columns[0], axis=1)
else:
txtdir = '/home/chege/Desktop/curtin_work/run_ionfr/singleLOScomparisons/1066568224_frtxts'
fyls = sorted([fyl for fyl in os.listdir(txtdir) if fyl.split('.')[-1] == 'txt'])
sources = [fyl.split('_')[0] for fyl in fyls]
print(len(sources))
ras, decs, fluxes, ampscales, stds = get_radec(sources, obsid)
frs = []
frs_errs = []
i = 1
for fyl in fyls:
print(i)
i += 1
fylpath = txtdir + '/' + fyl
fr_value, fr_value_err = get_fr_value(fylpath, hour)
frs.append(float(fr_value))
frs_errs.append(float(fr_value_err))
df = pd.DataFrame(
list(zip(ras, decs, fluxes, ampscales, stds, frs, frs_errs)),
columns=['ra', 'dec', 'flux', 'ampscales', 'stds', 'fr', 'fr_err'])
print('made dataframe with radec and fr values')
df = df.dropna(axis=0)
# blacklist = df[((df.stds - df.stds.median()) / df.stds.std()).abs() > 3]
# print(blacklist)
# blacklist.to_csv('blacklist_sources.csv', mode='a', header=False)
df = df[((df.stds - df.stds.median()) / df.stds.std()).abs() < 3]
print(df.head())
df = df.nlargest(700, 'flux', keep="'all'")
# df.to_csv('%s_%shrs_ionfr.csv' % (obsid, hour))
return df
def get_center(df):
bulk_centre_ra = np.mean(df.ra)
bulk_centre_dec = np.mean(df.dec)
radius = determine_radius(df)
# Recalculate the centre, based on the sources within the radius,
# and specify the sources to be used for analysis.
filtered = np.array(
[[a, b, c, d, e, f, g] for a, b, c, d, e, f, g
in zip(df.ra, df.dec, df.flux, df.ampscales, df.stds, df.fr, df.fr_err)
if abs(a-bulk_centre_ra) < radius
and abs(b-bulk_centre_dec) < radius])
fra = filtered[:, 0]
fdec = filtered[:, 1]
fflux = filtered[:, 2]
fampscales = filtered[:, 3]
fstds = filtered[:, 4]
f_fr = filtered[:, 5]
f_fr_err = filtered[:, 6]
ra_centre = fra.mean()
dec_centre = fdec.mean()
return radius, fra, fdec, fflux, fampscales, fstds, f_fr, f_fr_err, ra_centre, dec_centre
def cropper(matrix, crop_factor=1./np.sqrt(2)):
length = len(matrix)
cropped_length = int(length * crop_factor)
i1 = int((length-cropped_length)/2)
i2 = length-int((length-cropped_length)/2)
return matrix[i1:i2, i1:i2]
def determine_radius(df):
# Determine the radius of the primary beam.
# Fit a Gaussian to the distribution of RA and Dec positions.
# Use 2x the determined sigma as a cutoff for sources, in steps of 2.5 deg.
ra_hist, ra_bins = np.histogram(df.ra, bins=50)
dec_hist, dec_bins = np.histogram(df.dec, bins=50)
ra_p0 = [max(ra_hist), np.mean(ra_bins), 8]
dec_p0 = [max(dec_hist), np.mean(dec_bins), 8]
def gaussian(x, a, b, c):
return a * np.exp(-(x-b)**2 / (2*c**2))
try:
ra_popt, _ = curve_fit(gaussian, ra_bins[:-1], ra_hist, p0=ra_p0)
dec_popt, _ = curve_fit(gaussian, dec_bins[:-1], dec_hist, p0=dec_p0)
radius = np.ceil(
(2*np.mean([abs(ra_popt[2]), abs(dec_popt[2])]))/2.5)*2.5
# Check this radius against the extent of source available.
if radius > max(df.ra) - min(df.ra) or radius > max(df.dec) - min(df.dec):
radius = max([df.ra.max() - df.ra.min(), df.dec.max() - df.dec.min()])/2
print('gaussian fit done')
except:
radius = max([df.ra.max() - df.ra.min(), df.dec.max() - df.dec.min()])/2
# print(radius)
return radius
def interpfr(
radius, ra, dec, fr, fr_err, ra_centre, dec_centre, obsid, hr,
interp_method='linear', resolution=200):
grid_x, grid_y = np.meshgrid(
np.linspace(-radius+5.25, radius-5.25, resolution),
np.linspace(-radius+5.25, radius-5.25, resolution))
grid_x += ra_centre
grid_y += dec_centre
grid_fr = np.fliplr(
griddata(
np.vstack((ra, dec)).T, fr,
(grid_x, grid_y), method=interp_method,
fill_value=0))
beam_extent = (
ra_centre+radius,
ra_centre-radius,
dec_centre-radius,
dec_centre+radius)
print(beam_extent)
crop_factor = 1./np.sqrt(2)
cropped_beam_extent = (
ra_centre+radius*crop_factor,
(ra_centre-radius*crop_factor),
dec_centre-radius*crop_factor,
dec_centre+radius*crop_factor)
print(cropped_beam_extent)
grid_fr = cropper(grid_fr)
# np.savetxt("%s_%shr_interpfr_grid.csv" % (obsid, hr), grid_fr, delimiter=",")
return grid_fr, cropped_beam_extent
def plot_interp_fr(grid, beam_extent):
fig, ax = plt.subplots(1, 1, figsize=(15, 12))
img1 = ax.imshow(
grid, cmap="plasma", extent=beam_extent, origin="lower")
ax.set_xlabel("RA [deg]")
ax.set_ylabel("Dec [deg]")
fig.colorbar(img1, ax=ax, format="%.2f", fraction=0.046, pad=0.04)
fig.suptitle('Interpolated Faraday depth at %shrs %s' % (hr, str(obsid)))
plt.savefig('%s_%shrs_ionfr_interped.png' % (obsid, hr))
def plane_fit(grid, obsid, hr):
print('shape', grid.shape)
m = grid.shape[0] # size of the matrix
X1, X2 = np.mgrid[:m, :m]
# Regression
X = np.hstack((np.reshape(X1, (m*m, 1)), np.reshape(X2, (m*m, 1))))
X = np.hstack((np.ones((m*m, 1)), X))
YY = np.reshape(grid, (m*m, 1))
theta = np.dot(np
|
print('found the hr, reading its fr value')
fr_value = row[3]
fr_value_err = row[4]
|
conditional_block
|
|
functions.py
|
)/3)
plt.rc('axes', labelsize=22)
plt.grid(b=True, which='major', color='#666666', linestyle='-', alpha=0.2)
trans1 = Affine2D().translate(-0.1, 0.0) + ax.transData
trans2 = Affine2D().translate(+0.1, 0.0) + ax.transData
er1 = ax.errorbar(y1, x, xerr=yerr1, marker="o", linestyle="none", transform=trans1)
ax.axvline(x=0, color="black")
ax.set_ylim(-0.3, len(df) - 1 + 0.3)
return plt.savefig('static/'+name + '.png', bbox_inches='tight')
def clean_string2(liste):
liste = re.sub(r"[\W\_]|\d+", ' ', liste)
liste = " ".join(liste.split())
liste = liste.lower()
return liste
def get_main_text(soup):
text = soup.find(id="ad_description_text").text
text = clean_string2(text)
cut_string = "ihr wg gesucht team"
try:
return text.split(cut_string, 1)[1]
except:
return text
def get_text(link):
bs = get_bs_from_http(link)
text = get_main_text(bs)
return clean_string2(text)
def get_bs_from_html(html):
return BeautifulSoup(html.text, "html.parser")
def get_text_from_clean(text, liste, direction="right"):
pairs = []
if direction == "right":
for item in liste:
try:
if item in text:
pairs.append([item, text.split(item)[1].split()[0]])
else:
pairs.append([item, "none"])
except:
pairs.append([item, "none"])
if direction == "left":
for item in liste:
try:
if item in text:
pairs.append([item, text.split(item)[0].split()[-1]])
else:
pairs.append([item, "none"])
except:
pairs.append([item, "none"])
return pairs
def clean_string(liste):
liste = Flatten(liste)
liste = " ".join(liste)
liste = " ".join(liste.split())
return liste
def get_text_from_html(bs, class_name):
string_list = []
soup = bs.find_all(class_=class_name)
for entry in soup:
string_list.append(entry.text)
return string_list
def Flatten(ul):
fl = []
for i in ul:
if type(i) is list:
fl += Flatten(i)
else:
fl += [i]
return fl
def get_all_data_from_site(bs, link):
names = ["Wohnung", "Zimmergröße", "Sonstige", "Nebenkosten", "Miete", "Gesamtmiete", "Kaution",
"Ablösevereinbarung"]
my_list = get_text_from_html(bs, "col-sm-12 hidden-xs")
my_list = clean_string(my_list)
dict1 = dict(get_text_from_clean(my_list, names, "left"))
names = ["frei ab: ", "frei bis: "]
my_list = get_text_from_html(bs, "col-sm-3")
my_list = clean_string(my_list)
dict2 = dict(get_text_from_clean(my_list, names, "right"))
names = [" Zimmer in "]
my_list = get_text_from_html(bs, "col-sm-6")
my_list = clean_string(my_list)
dict3 = dict(get_text_from_clean(my_list, names, "right"))
names = ["Malmännliche", "weiblich", 'height="17"']
count = []
for name in names:
try:
string = str(bs.find(
class_="mr5 detail-list-fav-button display-inline-block hidden-xs create_favourite").next_sibling.next_sibling)
count.append(string.count(name))
except:
count.append("none")
dict4 = dict(zip(names, count))
my_list = get_text_from_html(bs, "ul-detailed-view-datasheet print_text_left")
my_list = [x.strip() for x in my_list]
try:
dict5 = dict(get_text_from_clean(my_list[1], ["zwischen"], "left"))
except:
dict5 = dict(get_text_from_clean(my_list, ["zwischen"], "left"))
my_list = get_text_from_html(bs, "ul-detailed-view-datasheet print_text_left")
my_list = [x.strip() for x in my_list]
try:
dict8 = dict(get_text_from_clean(my_list[1], ["Geschlecht"], "right"))
except:
dict8 = dict(get_text_from_clean(my_list, ["Geschlecht"], "right"))
item_list = ["glyphicons glyphicons-bath-bathtub noprint",
"glyphicons glyphicons-wifi-alt noprint",
"glyphicons glyphicons-car noprint",
"glyphicons glyphicons-fabric noprint",
"glyphicons glyphicons-display noprint",
"glyphicons glyphicons-folder-closed noprint",
"glyphicons glyphicons-mixed-buildings noprint",
"glyphicons glyphicons-building noprint",
"glyphicons glyphicons-bus noprint",
"glyphicons glyphicons-bed noprint",
"glyphicons glyphicons-fire noprint"]
data_list = []
for item in item_list:
try:
data_list.append([item[22:-8], clean_string([bs.find(class_=item).next_sibling.next_sibling.next_sibling])])
except:
data_list.append([item[22:-8], "none"])
dict6 = dict(data_list)
liste = get_text_from_html(bs, "col-sm-4 mb10")
adress_string = clean_string(liste).replace("Adresse ", "").replace("Umzugsfirma beauftragen1", "").replace(
"Umzugsfirma beauftragen 1", "")
dict7 = {"Adresse": adress_string, "Link": link}
names = "Miete pro Tag: "
my_list = get_text_from_html(bs, "col-sm-5")
my_list = clean_string(my_list)
if names in my_list:
dict9 = {"taeglich": 1}
else:
dict9 = {"taeglich": 0}
div_id = 'popover-energy-certification'
try:
cs = clean_string([bs.find(id=div_id).next_sibling])
dict10 = {"baujahr": cs}
except:
dict10 = {"baujahr": "none"}
rauchen = "Rauchen nicht erwünscht"
nichrauchen = "Rauchen überall erlaubt"
my_list = get_text_from_html(bs, "col-sm-6")
my_list = clean_string(my_list)
if rauchen in my_list:
dict11 = {"rauchen": "raucher"}
if nichrauchen in my_list:
dict11 = {"rauchen": "nichtraucher"}
if rauchen not in my_list and nichrauchen not in my_list:
dict11 = {"rauchen": "keine_Angabe"}
wg_list = ["Zweck-WG", "keine Zweck-WG", "Berufstätigen-WG", "gemischte WG", "Studenten-WG", "Frauen-WG",
"Azubi-WG"]
dict12 = []
for wg in wg_list:
my_list = get_text_from_html(bs, "col-sm-6")
my_list = clean_string(my_list)
if wg in my_list:
dict12.append([wg, 1])
else:
dict12.append([wg, 0])
dict12 = dict(dict12)
dict_list = [dict1, dict2, dict3, dict4, dict5, dict8, dict6, dict7, dict7, dict9, dict10, dict11, dict12]
for item in dict_list:
dict1.update(item)
return dict1
def get_bs_from_html(html):
return BeautifulSoup(html.text, "html.parser")
|
def get_bs_from_http(link):
html = requests.get(link)
return BeautifulSoup(html.text, "html.parser")
def get_html_request(link):
return requests.get(link)
def merge_dicts(dic1, dic2):
try:
dic3 = dict(dic2)
for k, v in dic1.items():
dic3[k] = Flatten([dic3[k], v]) if k in dic3 else v
return dic3
except:
return dic1
def replace_viertel(x, viertel_liste):
if x in viertel_liste:
return x
elif any([i in x for i in viertel_liste]):
return [i for (i, v) in zip(viertel_liste, [i in x for i in viertel_liste]) if v][0]
else:
return x
def link_to_pandas(full_link, df_saved):
stem = full_link[:57]
link = full_link[57:]
bs = get_bs_from_http(stem + link)
data = get_all_data
|
random_line_split
|
|
functions.py
|
)/3)
plt.rc('axes', labelsize=22)
plt.grid(b=True, which='major', color='#666666', linestyle='-', alpha=0.2)
trans1 = Affine2D().translate(-0.1, 0.0) + ax.transData
trans2 = Affine2D().translate(+0.1, 0.0) + ax.transData
er1 = ax.errorbar(y1, x, xerr=yerr1, marker="o", linestyle="none", transform=trans1)
ax.axvline(x=0, color="black")
ax.set_ylim(-0.3, len(df) - 1 + 0.3)
return plt.savefig('static/'+name + '.png', bbox_inches='tight')
def clean_string2(liste):
liste = r
|
_main_text(soup):
text = soup.find(id="ad_description_text").text
text = clean_string2(text)
cut_string = "ihr wg gesucht team"
try:
return text.split(cut_string, 1)[1]
except:
return text
def get_text(link):
bs = get_bs_from_http(link)
text = get_main_text(bs)
return clean_string2(text)
def get_bs_from_html(html):
return BeautifulSoup(html.text, "html.parser")
def get_text_from_clean(text, liste, direction="right"):
pairs = []
if direction == "right":
for item in liste:
try:
if item in text:
pairs.append([item, text.split(item)[1].split()[0]])
else:
pairs.append([item, "none"])
except:
pairs.append([item, "none"])
if direction == "left":
for item in liste:
try:
if item in text:
pairs.append([item, text.split(item)[0].split()[-1]])
else:
pairs.append([item, "none"])
except:
pairs.append([item, "none"])
return pairs
def clean_string(liste):
liste = Flatten(liste)
liste = " ".join(liste)
liste = " ".join(liste.split())
return liste
def get_text_from_html(bs, class_name):
string_list = []
soup = bs.find_all(class_=class_name)
for entry in soup:
string_list.append(entry.text)
return string_list
def Flatten(ul):
fl = []
for i in ul:
if type(i) is list:
fl += Flatten(i)
else:
fl += [i]
return fl
def get_all_data_from_site(bs, link):
names = ["Wohnung", "Zimmergröße", "Sonstige", "Nebenkosten", "Miete", "Gesamtmiete", "Kaution",
"Ablösevereinbarung"]
my_list = get_text_from_html(bs, "col-sm-12 hidden-xs")
my_list = clean_string(my_list)
dict1 = dict(get_text_from_clean(my_list, names, "left"))
names = ["frei ab: ", "frei bis: "]
my_list = get_text_from_html(bs, "col-sm-3")
my_list = clean_string(my_list)
dict2 = dict(get_text_from_clean(my_list, names, "right"))
names = [" Zimmer in "]
my_list = get_text_from_html(bs, "col-sm-6")
my_list = clean_string(my_list)
dict3 = dict(get_text_from_clean(my_list, names, "right"))
names = ["Malmännliche", "weiblich", 'height="17"']
count = []
for name in names:
try:
string = str(bs.find(
class_="mr5 detail-list-fav-button display-inline-block hidden-xs create_favourite").next_sibling.next_sibling)
count.append(string.count(name))
except:
count.append("none")
dict4 = dict(zip(names, count))
my_list = get_text_from_html(bs, "ul-detailed-view-datasheet print_text_left")
my_list = [x.strip() for x in my_list]
try:
dict5 = dict(get_text_from_clean(my_list[1], ["zwischen"], "left"))
except:
dict5 = dict(get_text_from_clean(my_list, ["zwischen"], "left"))
my_list = get_text_from_html(bs, "ul-detailed-view-datasheet print_text_left")
my_list = [x.strip() for x in my_list]
try:
dict8 = dict(get_text_from_clean(my_list[1], ["Geschlecht"], "right"))
except:
dict8 = dict(get_text_from_clean(my_list, ["Geschlecht"], "right"))
item_list = ["glyphicons glyphicons-bath-bathtub noprint",
"glyphicons glyphicons-wifi-alt noprint",
"glyphicons glyphicons-car noprint",
"glyphicons glyphicons-fabric noprint",
"glyphicons glyphicons-display noprint",
"glyphicons glyphicons-folder-closed noprint",
"glyphicons glyphicons-mixed-buildings noprint",
"glyphicons glyphicons-building noprint",
"glyphicons glyphicons-bus noprint",
"glyphicons glyphicons-bed noprint",
"glyphicons glyphicons-fire noprint"]
data_list = []
for item in item_list:
try:
data_list.append([item[22:-8], clean_string([bs.find(class_=item).next_sibling.next_sibling.next_sibling])])
except:
data_list.append([item[22:-8], "none"])
dict6 = dict(data_list)
liste = get_text_from_html(bs, "col-sm-4 mb10")
adress_string = clean_string(liste).replace("Adresse ", "").replace("Umzugsfirma beauftragen1", "").replace(
"Umzugsfirma beauftragen 1", "")
dict7 = {"Adresse": adress_string, "Link": link}
names = "Miete pro Tag: "
my_list = get_text_from_html(bs, "col-sm-5")
my_list = clean_string(my_list)
if names in my_list:
dict9 = {"taeglich": 1}
else:
dict9 = {"taeglich": 0}
div_id = 'popover-energy-certification'
try:
cs = clean_string([bs.find(id=div_id).next_sibling])
dict10 = {"baujahr": cs}
except:
dict10 = {"baujahr": "none"}
rauchen = "Rauchen nicht erwünscht"
nichrauchen = "Rauchen überall erlaubt"
my_list = get_text_from_html(bs, "col-sm-6")
my_list = clean_string(my_list)
if rauchen in my_list:
dict11 = {"rauchen": "raucher"}
if nichrauchen in my_list:
dict11 = {"rauchen": "nichtraucher"}
if rauchen not in my_list and nichrauchen not in my_list:
dict11 = {"rauchen": "keine_Angabe"}
wg_list = ["Zweck-WG", "keine Zweck-WG", "Berufstätigen-WG", "gemischte WG", "Studenten-WG", "Frauen-WG",
"Azubi-WG"]
dict12 = []
for wg in wg_list:
my_list = get_text_from_html(bs, "col-sm-6")
my_list = clean_string(my_list)
if wg in my_list:
dict12.append([wg, 1])
else:
dict12.append([wg, 0])
dict12 = dict(dict12)
dict_list = [dict1, dict2, dict3, dict4, dict5, dict8, dict6, dict7, dict7, dict9, dict10, dict11, dict12]
for item in dict_list:
dict1.update(item)
return dict1
def get_bs_from_html(html):
return BeautifulSoup(html.text, "html.parser")
def get_bs_from_http(link):
html = requests.get(link)
return BeautifulSoup(html.text, "html.parser")
def get_html_request(link):
return requests.get(link)
def merge_dicts(dic1, dic2):
try:
dic3 = dict(dic2)
for k, v in dic1.items():
dic3[k] = Flatten([dic3[k], v]) if k in dic3 else v
return dic3
except:
return dic1
def replace_viertel(x, viertel_liste):
if x in viertel_liste:
return x
elif any([i in x for i in viertel_liste]):
return [i for (i, v) in zip(viertel_liste, [i in x for i in viertel_liste]) if v][0]
else:
return x
def link_to_pandas(full_link, df_saved):
stem = full_link[:57]
link = full_link[57:]
bs = get_bs_from_http(stem + link)
data = get_all_data
|
e.sub(r"[\W\_]|\d+", ' ', liste)
liste = " ".join(liste.split())
liste = liste.lower()
return liste
def get
|
identifier_body
|
functions.py
|
)/3)
plt.rc('axes', labelsize=22)
plt.grid(b=True, which='major', color='#666666', linestyle='-', alpha=0.2)
trans1 = Affine2D().translate(-0.1, 0.0) + ax.transData
trans2 = Affine2D().translate(+0.1, 0.0) + ax.transData
er1 = ax.errorbar(y1, x, xerr=yerr1, marker="o", linestyle="none", transform=trans1)
ax.axvline(x=0, color="black")
ax.set_ylim(-0.3, len(df) - 1 + 0.3)
return plt.savefig('static/'+name + '.png', bbox_inches='tight')
def clean_string2(liste):
liste = re.sub(r"[\W\_]|\d+", ' ', liste)
liste = " ".join(liste.split())
liste = liste.lower()
return liste
def get_main_text(soup):
text = soup.find(id="ad_description_text").text
text = clean_string2(text)
cut_string = "ihr wg gesucht team"
try:
return text.split(cut_string, 1)[1]
except:
return text
def get_text(link):
bs = get_bs_from_http(link)
text = get_main_text(bs)
return clean_string2(text)
def get_bs_from_html(html):
return BeautifulSoup(html.text, "html.parser")
def get_text_from_clean(text, liste, direction="right"):
pairs = []
if direction == "right":
for item in liste:
try:
if item in text:
pairs.append([item, text.split(item)[1].split()[0]])
else:
pairs.append([item, "none"])
except:
pairs.append([item, "none"])
if direction == "left":
for item in liste:
try:
if item in text:
pairs.append([item, text.split(item)[0].split()[-1]])
else:
pairs.append([item, "none"])
except:
pairs.append([item, "none"])
return pairs
def clean_string(liste):
liste = Flatten(liste)
liste = " ".join(liste)
liste = " ".join(liste.split())
return liste
def get_text_from_html(bs, class_name):
string_list = []
soup = bs.find_all(class_=class_name)
for entry in soup:
string_list.append(entry.text)
return string_list
def Flatten(ul):
fl = []
for i in ul:
if type(i) is list:
fl += Flatten(i)
else:
fl += [i]
return fl
def get_all_data_from_site(bs, link):
names = ["Wohnung", "Zimmergröße", "Sonstige", "Nebenkosten", "Miete", "Gesamtmiete", "Kaution",
"Ablösevereinbarung"]
my_list = get_text_from_html(bs, "col-sm-12 hidden-xs")
my_list = clean_string(my_list)
dict1 = dict(get_text_from_clean(my_list, names, "left"))
names = ["frei ab: ", "frei bis: "]
my_list = get_text_from_html(bs, "col-sm-3")
my_list = clean_string(my_list)
dict2 = dict(get_text_from_clean(my_list, names, "right"))
names = [" Zimmer in "]
my_list = get_text_from_html(bs, "col-sm-6")
my_list = clean_string(my_list)
dict3 = dict(get_text_from_clean(my_list, names, "right"))
names = ["Malmännliche", "weiblich", 'height="17"']
count = []
for name in names:
try:
string = str(bs.find(
class_="mr5 detail-list-fav-button display-inline-block hidden-xs create_favourite").next_sibling.next_sibling)
count.append(string.count(name))
except:
count.append("none")
dict4 = dict(zip(names, count))
my_list = get_text_from_html(bs, "ul-detailed-view-datasheet print_text_left")
my_list = [x.strip() for x in my_list]
try:
dict5 = dict(get_text_from_clean(my_list[1], ["zwischen"], "left"))
except:
dict5 = dict(get_text_from_clean(my_list, ["zwischen"], "left"))
my_list = get_text_from_html(bs, "ul-detailed-view-datasheet print_text_left")
my_list = [x.strip() for x in my_list]
try:
dict8 = dict(get_text_from_clean(my_list[1], ["Geschlecht"], "right"))
except:
dict8 = dict(get_text_from_clean(my_list, ["Geschlecht"], "right"))
item_list = ["glyphicons glyphicons-bath-bathtub noprint",
"glyphicons glyphicons-wifi-alt noprint",
"glyphicons glyphicons-car noprint",
"glyphicons glyphicons-fabric noprint",
"glyphicons glyphicons-display noprint",
"glyphicons glyphicons-folder-closed noprint",
"glyphicons glyphicons-mixed-buildings noprint",
"glyphicons glyphicons-building noprint",
"glyphicons glyphicons-bus noprint",
"glyphicons glyphicons-bed noprint",
"glyphicons glyphicons-fire noprint"]
data_list = []
for item in item_list:
try:
data_list.append([item[22:-8], clean_string([bs.find(class_=item).next_sibling.next_sibling.next_sibling])])
except:
data_list.append([item[22:-8], "none"])
dict6 = dict(data_list)
liste = get_text_from_html(bs, "col-sm-4 mb10")
adress_string = clean_string(liste).replace("Adresse ", "").replace("Umzugsfirma beauftragen1", "").replace(
"Umzugsfirma beauftragen 1", "")
dict7 = {"Adresse": adress_string, "Link": link}
names = "Miete pro Tag: "
my_list = get_text_from_html(bs, "col-sm-5")
my_list = clean_string(my_list)
if names in my_list:
dict9 = {"taeglich": 1}
else:
dict9 = {"taeglich": 0}
div_id = 'popover-energy-certification'
try:
cs = clean_string([bs.find(id=div_id).next_sibling])
dict10 = {"baujahr": cs}
except:
dict10 = {"baujahr": "none"}
rauchen = "Rauchen nicht erwünscht"
nichrauchen = "Rauchen überall erlaubt"
my_list = get_text_from_html(bs, "col-sm-6")
my_list = clean_string(my_list)
if rauchen in my_list:
dict11 = {"rauchen": "raucher"}
if nichrauchen in my_list:
dict11 = {"rauchen": "nichtraucher"}
if rauchen not in my_list and nichrauchen not in my_list:
dict11 = {"rauchen": "keine_Angabe"}
wg_list = ["Zweck-WG", "keine Zweck-WG", "Berufstätigen-WG", "gemischte WG", "Studenten-WG", "Frauen-WG",
"Azubi-WG"]
dict12 = []
for wg in wg_list:
my_list = get_text_from_html(bs, "col-sm-6")
my_list = clean_string(my_list)
if wg in my_list:
dict12.append([wg, 1])
else:
dict12.append([w
|
ct(dict12)
dict_list = [dict1, dict2, dict3, dict4, dict5, dict8, dict6, dict7, dict7, dict9, dict10, dict11, dict12]
for item in dict_list:
dict1.update(item)
return dict1
def get_bs_from_html(html):
return BeautifulSoup(html.text, "html.parser")
def get_bs_from_http(link):
html = requests.get(link)
return BeautifulSoup(html.text, "html.parser")
def get_html_request(link):
return requests.get(link)
def merge_dicts(dic1, dic2):
try:
dic3 = dict(dic2)
for k, v in dic1.items():
dic3[k] = Flatten([dic3[k], v]) if k in dic3 else v
return dic3
except:
return dic1
def replace_viertel(x, viertel_liste):
if x in viertel_liste:
return x
elif any([i in x for i in viertel_liste]):
return [i for (i, v) in zip(viertel_liste, [i in x for i in viertel_liste]) if v][0]
else:
return x
def link_to_pandas(full_link, df_saved):
stem = full_link[:57]
link = full_link[57:]
bs = get_bs_from_http(stem + link)
data = get_all
|
g, 0])
dict12 = di
|
conditional_block
|
functions.py
|
)/3)
plt.rc('axes', labelsize=22)
plt.grid(b=True, which='major', color='#666666', linestyle='-', alpha=0.2)
trans1 = Affine2D().translate(-0.1, 0.0) + ax.transData
trans2 = Affine2D().translate(+0.1, 0.0) + ax.transData
er1 = ax.errorbar(y1, x, xerr=yerr1, marker="o", linestyle="none", transform=trans1)
ax.axvline(x=0, color="black")
ax.set_ylim(-0.3, len(df) - 1 + 0.3)
return plt.savefig('static/'+name + '.png', bbox_inches='tight')
def clean_string2(liste):
liste = re.sub(r"[\W\_]|\d+", ' ', liste)
liste = " ".join(liste.split())
liste = liste.lower()
return liste
def get_main_
|
text = soup.find(id="ad_description_text").text
text = clean_string2(text)
cut_string = "ihr wg gesucht team"
try:
return text.split(cut_string, 1)[1]
except:
return text
def get_text(link):
bs = get_bs_from_http(link)
text = get_main_text(bs)
return clean_string2(text)
def get_bs_from_html(html):
return BeautifulSoup(html.text, "html.parser")
def get_text_from_clean(text, liste, direction="right"):
pairs = []
if direction == "right":
for item in liste:
try:
if item in text:
pairs.append([item, text.split(item)[1].split()[0]])
else:
pairs.append([item, "none"])
except:
pairs.append([item, "none"])
if direction == "left":
for item in liste:
try:
if item in text:
pairs.append([item, text.split(item)[0].split()[-1]])
else:
pairs.append([item, "none"])
except:
pairs.append([item, "none"])
return pairs
def clean_string(liste):
liste = Flatten(liste)
liste = " ".join(liste)
liste = " ".join(liste.split())
return liste
def get_text_from_html(bs, class_name):
string_list = []
soup = bs.find_all(class_=class_name)
for entry in soup:
string_list.append(entry.text)
return string_list
def Flatten(ul):
fl = []
for i in ul:
if type(i) is list:
fl += Flatten(i)
else:
fl += [i]
return fl
def get_all_data_from_site(bs, link):
names = ["Wohnung", "Zimmergröße", "Sonstige", "Nebenkosten", "Miete", "Gesamtmiete", "Kaution",
"Ablösevereinbarung"]
my_list = get_text_from_html(bs, "col-sm-12 hidden-xs")
my_list = clean_string(my_list)
dict1 = dict(get_text_from_clean(my_list, names, "left"))
names = ["frei ab: ", "frei bis: "]
my_list = get_text_from_html(bs, "col-sm-3")
my_list = clean_string(my_list)
dict2 = dict(get_text_from_clean(my_list, names, "right"))
names = [" Zimmer in "]
my_list = get_text_from_html(bs, "col-sm-6")
my_list = clean_string(my_list)
dict3 = dict(get_text_from_clean(my_list, names, "right"))
names = ["Malmännliche", "weiblich", 'height="17"']
count = []
for name in names:
try:
string = str(bs.find(
class_="mr5 detail-list-fav-button display-inline-block hidden-xs create_favourite").next_sibling.next_sibling)
count.append(string.count(name))
except:
count.append("none")
dict4 = dict(zip(names, count))
my_list = get_text_from_html(bs, "ul-detailed-view-datasheet print_text_left")
my_list = [x.strip() for x in my_list]
try:
dict5 = dict(get_text_from_clean(my_list[1], ["zwischen"], "left"))
except:
dict5 = dict(get_text_from_clean(my_list, ["zwischen"], "left"))
my_list = get_text_from_html(bs, "ul-detailed-view-datasheet print_text_left")
my_list = [x.strip() for x in my_list]
try:
dict8 = dict(get_text_from_clean(my_list[1], ["Geschlecht"], "right"))
except:
dict8 = dict(get_text_from_clean(my_list, ["Geschlecht"], "right"))
item_list = ["glyphicons glyphicons-bath-bathtub noprint",
"glyphicons glyphicons-wifi-alt noprint",
"glyphicons glyphicons-car noprint",
"glyphicons glyphicons-fabric noprint",
"glyphicons glyphicons-display noprint",
"glyphicons glyphicons-folder-closed noprint",
"glyphicons glyphicons-mixed-buildings noprint",
"glyphicons glyphicons-building noprint",
"glyphicons glyphicons-bus noprint",
"glyphicons glyphicons-bed noprint",
"glyphicons glyphicons-fire noprint"]
data_list = []
for item in item_list:
try:
data_list.append([item[22:-8], clean_string([bs.find(class_=item).next_sibling.next_sibling.next_sibling])])
except:
data_list.append([item[22:-8], "none"])
dict6 = dict(data_list)
liste = get_text_from_html(bs, "col-sm-4 mb10")
adress_string = clean_string(liste).replace("Adresse ", "").replace("Umzugsfirma beauftragen1", "").replace(
"Umzugsfirma beauftragen 1", "")
dict7 = {"Adresse": adress_string, "Link": link}
names = "Miete pro Tag: "
my_list = get_text_from_html(bs, "col-sm-5")
my_list = clean_string(my_list)
if names in my_list:
dict9 = {"taeglich": 1}
else:
dict9 = {"taeglich": 0}
div_id = 'popover-energy-certification'
try:
cs = clean_string([bs.find(id=div_id).next_sibling])
dict10 = {"baujahr": cs}
except:
dict10 = {"baujahr": "none"}
rauchen = "Rauchen nicht erwünscht"
nichrauchen = "Rauchen überall erlaubt"
my_list = get_text_from_html(bs, "col-sm-6")
my_list = clean_string(my_list)
if rauchen in my_list:
dict11 = {"rauchen": "raucher"}
if nichrauchen in my_list:
dict11 = {"rauchen": "nichtraucher"}
if rauchen not in my_list and nichrauchen not in my_list:
dict11 = {"rauchen": "keine_Angabe"}
wg_list = ["Zweck-WG", "keine Zweck-WG", "Berufstätigen-WG", "gemischte WG", "Studenten-WG", "Frauen-WG",
"Azubi-WG"]
dict12 = []
for wg in wg_list:
my_list = get_text_from_html(bs, "col-sm-6")
my_list = clean_string(my_list)
if wg in my_list:
dict12.append([wg, 1])
else:
dict12.append([wg, 0])
dict12 = dict(dict12)
dict_list = [dict1, dict2, dict3, dict4, dict5, dict8, dict6, dict7, dict7, dict9, dict10, dict11, dict12]
for item in dict_list:
dict1.update(item)
return dict1
def get_bs_from_html(html):
return BeautifulSoup(html.text, "html.parser")
def get_bs_from_http(link):
html = requests.get(link)
return BeautifulSoup(html.text, "html.parser")
def get_html_request(link):
return requests.get(link)
def merge_dicts(dic1, dic2):
try:
dic3 = dict(dic2)
for k, v in dic1.items():
dic3[k] = Flatten([dic3[k], v]) if k in dic3 else v
return dic3
except:
return dic1
def replace_viertel(x, viertel_liste):
if x in viertel_liste:
return x
elif any([i in x for i in viertel_liste]):
return [i for (i, v) in zip(viertel_liste, [i in x for i in viertel_liste]) if v][0]
else:
return x
def link_to_pandas(full_link, df_saved):
stem = full_link[:57]
link = full_link[57:]
bs = get_bs_from_http(stem + link)
data = get_all
|
text(soup):
|
identifier_name
|
agent_sample_stats.py
|
s)))
any_ungrounded_ratio = 100 * (any_ungrounded / float(len(stmts)))
return all_ungrounded_ratio, any_ungrounded_ratio
def get_agent_counts(stmts):
agents = gm.ungrounded_texts(stmts)
agent_counts = [t[1] for t in agents]
return agent_counts
fname = '../step3_sample_training_test/famplex_test_stmts_mapped.pkl'
stmts = ac.load_statements(fname)
allu_test, anyu_test = get_ungrounded_stats(stmts)
counts_test = get_agent_counts(stmts)
fname = '../step3_sample_training_test/training_pmid_stmts.pkl'
stmts = ac.load_statements(fname)
allu_train, anyu_train = get_ungrounded_stats(stmts)
counts_train = get_agent_counts(stmts)
return (allu_test, anyu_test, allu_train, anyu_train,
counts_train, counts_test)
def plot_ungrounded_stats(allu_test, anyu_test, allu_train, anyu_train):
"""Plot training vs test corpus any and all arguments ungrounded pcts."""
pf.set_fig_params()
plt.figure(figsize=(2, 2.2), dpi=300)
xticks = np.array([0, 1])
col_width = 0.3
btrain = plt.bar(xticks - 0.5*col_width, [allu_train, anyu_train],
col_width, align='center', color=pf.ORANGE)
btest = plt.bar(xticks + 0.5*col_width, [allu_test, anyu_test], col_width,
align='center', color=pf.GREEN)
plt.xticks(xticks, ('All args\nungrounded', 'Any args\nungrounded'))
plt.ylabel('Pct. Extracted Events')
plt.ylim((0, 35))
ax = plt.gca()
pf.format_axis(ax)
plt.subplots_adjust(left=0.17, bottom=0.14, top=0.94, right=0.93)
plt.legend((btrain, btest), (without_fplx_label, with_fplx_label),
loc='upper left', frameon=False, fontsize=pf.fontsize)
plt.savefig('ungrounded_stats.pdf')
def plot_ungrounded_frequencies(counts_list, labels, colors, plot_filename):
|
plt.ion()
ax = fig.gca()
for i, (bin_starts, fracs_total) in \
enumerate(zip(bin_starts_list, fracs_total_list)):
xvals = np.array(bin_starts) / len(counts_list[i])
yvals = fracs_total / float(np.sum(counts_list[i]))
ax.plot(xvals, yvals, color=colors[i])
ax.plot(xvals, xvals, color='gray', linestyle='dotted')
labels = list(labels)
labels.append('Uniform distribution')
pf.format_axis(ax)
ax.legend(labels, loc='lower right', frameon=False, fontsize=pf.fontsize)
plt.xlim([0,1])
plt.ylim([0,1])
plt.subplots_adjust(left=0.18, bottom=0.15, right=0.96, top=0.92)
ax.set_xlabel('String rank (normalized)')
ax.set_ylabel('Rel. freq. of occurrences')
plt.savefig(plot_filename)
cats = (['P'], ['F', 'C', 'X'], ['S'], ['B'], ['U'], ['M'])
cat_names = ('Protein/gene', 'Family/complex', 'Small molecule',
'Biological process', 'Other/unknown', 'microRNA')
def grounding_stats(data, plot=False):
rows = []
num_agents = len(data)
if plot:
plt.figure(figsize=(2.2, 2), dpi=300)
for ix, cat in enumerate(cats):
cat_rows = data[data.EntityType.apply(lambda et: et in cat)]
cat_number = len(cat_rows)
cat_pct = (100 * cat_number / float(num_agents))
cat_pct_str = '%.1f' % cat_pct
correct_rows = cat_rows[cat_rows.Grounding == 1]
correct_number = len(correct_rows)
correct_pct = (100 * correct_number / float(cat_number)) if \
cat_number > 0 else 0
correct_pct_of_total = (100 * correct_number) / float(num_agents)
correct_pct_str = '%.1f' % correct_pct
def stderr(k, n):
return np.sqrt(((k/float(n)) * (1-(k/float(n)))) / float(n))
stderr_inc = 100 * stderr(cat_number - correct_number, cat_number)
stderr_corr = 100 * stderr(correct_number, cat_number)
rows.append((cat, cat_number, cat_pct, correct_number,
correct_pct, stderr_corr))
if plot:
inc_handle = plt.bar(ix, cat_pct, color=pf.ORANGE, align='center',
yerr=stderr_inc, linewidth=0.5)
corr_handle = plt.bar(ix, correct_pct_of_total, color=pf.GREEN,
align='center', yerr=stderr_corr,
linewidth=0.5)
if plot:
plt.xticks(range(len(cats)), cat_names, rotation=90)
plt.ylabel('Pct. Curated Entities')
plt.subplots_adjust(left=0.18, bottom=0.43, top=0.96)
ax = plt.gca()
pf.format_axis(ax)
plt.legend((corr_handle, inc_handle), ('Correct', 'Incorrect'),
loc='upper right', frameon=False, fontsize=pf.fontsize)
plt.show()
write_unicode_csv('agents_sample_stats.csv', rows)
return rows
def combined_graph(results):
prot_bef, prot_bef_err = results['training'][0][4:6]
fam_bef, fam_bef_err = results['training'][1][4:6]
prot_aft, prot_aft_err = results['test'][0][4:6]
fam_aft, fam_aft_err = results['test'][1][4:6]
plt.figure(figsize=(2.8, 2.2), dpi=300)
width = 0.3
bef_color = pf.ORANGE
aft_color = pf.GREEN
ax = plt.gca()
error_kw = dict(ecolor='black', lw=1, capsize=2, capthick=1)
befh = plt.bar(-0.5*width, prot_bef, width=width, yerr=prot_bef_err,
color=bef_color, error_kw=error_kw)
afth = plt.bar(0.5*width, prot_aft, width=width, yerr=prot_aft_err,
color=aft_color, error_kw=error_kw)
plt.bar(1 - 0.5*width, fam_bef, width=width, yerr=fam_bef_err,
color=bef_color, error_kw=error_kw)
plt.bar(1 + 0.5*width, fam_aft, width=width, yerr=fam_aft_err,
color=aft_color, error_kw=error_kw)
plt.xticks((0+(width/2.), 1+(width/2.)),
('Protein/\ngene', 'Family/\ncomplex'))
plt.ylabel('Grounding accuracy')
pf.format_axis(ax, tick_padding=3)
plt.legend((befh, afth), (without_fplx_label, with_fplx_label),
loc='upper right',
frameon=False, fontsize=pf.fontsize)
plt.subplots_adjust(left=0.22, bottom=0.15, top=0.94, right=0.94)
plt.savefig('combined_results.pdf')
plt.show()
def print_combined_table(results):
rows = []
header = ['\\#', 'Entity \\%', '\\# Corr.', '\\% Corr.',
'\\#', 'Entity \\%', '\\# Corr.', '\\% Corr.']
rows.append(header)
r_tr = results['training']
r_te = results['test']
def format(res):
return (res[1], '%.1f' % res[2], res[3],
'%.1f $\pm$ %.1f' % (res[4], res[5]))
for row_ix in range(6):
row =
|
"""Plot the distribution of ungrounded strings in training vs test corpus.
"""
bin_interval = 1
fracs_total_list = []
bin_starts_list = []
for counts in counts_list:
freq_dist = []
bin_starts = list(range(0, len(counts), bin_interval))
bin_starts_list.append(bin_starts)
for bin_start_ix in bin_starts:
bin_end_ix = bin_start_ix + bin_interval
if bin_end_ix < len(counts):
freq_dist.append(np.sum(counts[bin_start_ix:bin_end_ix]))
else:
freq_dist.append(np.sum(counts[bin_start_ix:]))
freq_dist = np.array(freq_dist)
fracs_total = np.cumsum(freq_dist)
fracs_total_list.append(fracs_total)
fig = plt.figure(figsize=(2.3, 2.2), dpi=300)
|
identifier_body
|
agent_sample_stats.py
|
s)))
any_ungrounded_ratio = 100 * (any_ungrounded / float(len(stmts)))
return all_ungrounded_ratio, any_ungrounded_ratio
def get_agent_counts(stmts):
agents = gm.ungrounded_texts(stmts)
agent_counts = [t[1] for t in agents]
return agent_counts
fname = '../step3_sample_training_test/famplex_test_stmts_mapped.pkl'
stmts = ac.load_statements(fname)
allu_test, anyu_test = get_ungrounded_stats(stmts)
counts_test = get_agent_counts(stmts)
fname = '../step3_sample_training_test/training_pmid_stmts.pkl'
stmts = ac.load_statements(fname)
allu_train, anyu_train = get_ungrounded_stats(stmts)
counts_train = get_agent_counts(stmts)
return (allu_test, anyu_test, allu_train, anyu_train,
counts_train, counts_test)
def plot_ungrounded_stats(allu_test, anyu_test, allu_train, anyu_train):
"""Plot training vs test corpus any and all arguments ungrounded pcts."""
pf.set_fig_params()
plt.figure(figsize=(2, 2.2), dpi=300)
xticks = np.array([0, 1])
col_width = 0.3
btrain = plt.bar(xticks - 0.5*col_width, [allu_train, anyu_train],
col_width, align='center', color=pf.ORANGE)
btest = plt.bar(xticks + 0.5*col_width, [allu_test, anyu_test], col_width,
align='center', color=pf.GREEN)
plt.xticks(xticks, ('All args\nungrounded', 'Any args\nungrounded'))
plt.ylabel('Pct. Extracted Events')
plt.ylim((0, 35))
ax = plt.gca()
pf.format_axis(ax)
plt.subplots_adjust(left=0.17, bottom=0.14, top=0.94, right=0.93)
plt.legend((btrain, btest), (without_fplx_label, with_fplx_label),
loc='upper left', frameon=False, fontsize=pf.fontsize)
plt.savefig('ungrounded_stats.pdf')
def plot_ungrounded_frequencies(counts_list, labels, colors, plot_filename):
"""Plot the distribution of ungrounded strings in training vs test corpus.
"""
bin_interval = 1
fracs_total_list = []
bin_starts_list = []
for counts in counts_list:
freq_dist = []
bin_starts = list(range(0, len(counts), bin_interval))
bin_starts_list.append(bin_starts)
for bin_start_ix in bin_starts:
bin_end_ix = bin_start_ix + bin_interval
if bin_end_ix < len(counts):
freq_dist.append(np.sum(counts[bin_start_ix:bin_end_ix]))
else:
freq_dist.append(np.sum(counts[bin_start_ix:]))
freq_dist = np.array(freq_dist)
fracs_total = np.cumsum(freq_dist)
fracs_total_list.append(fracs_total)
fig = plt.figure(figsize=(2.3, 2.2), dpi=300)
plt.ion()
ax = fig.gca()
for i, (bin_starts, fracs_total) in \
enumerate(zip(bin_starts_list, fracs_total_list)):
xvals = np.array(bin_starts) / len(counts_list[i])
yvals = fracs_total / float(np.sum(counts_list[i]))
ax.plot(xvals, yvals, color=colors[i])
ax.plot(xvals, xvals, color='gray', linestyle='dotted')
labels = list(labels)
labels.append('Uniform distribution')
pf.format_axis(ax)
ax.legend(labels, loc='lower right', frameon=False, fontsize=pf.fontsize)
plt.xlim([0,1])
plt.ylim([0,1])
plt.subplots_adjust(left=0.18, bottom=0.15, right=0.96, top=0.92)
ax.set_xlabel('String rank (normalized)')
ax.set_ylabel('Rel. freq. of occurrences')
plt.savefig(plot_filename)
cats = (['P'], ['F', 'C', 'X'], ['S'], ['B'], ['U'], ['M'])
cat_names = ('Protein/gene', 'Family/complex', 'Small molecule',
'Biological process', 'Other/unknown', 'microRNA')
def grounding_stats(data, plot=False):
rows = []
num_agents = len(data)
if plot:
plt.figure(figsize=(2.2, 2), dpi=300)
for ix, cat in enumerate(cats):
cat_rows = data[data.EntityType.apply(lambda et: et in cat)]
cat_number = len(cat_rows)
cat_pct = (100 * cat_number / float(num_agents))
cat_pct_str = '%.1f' % cat_pct
correct_rows = cat_rows[cat_rows.Grounding == 1]
correct_number = len(correct_rows)
correct_pct = (100 * correct_number / float(cat_number)) if \
cat_number > 0 else 0
correct_pct_of_total = (100 * correct_number) / float(num_agents)
correct_pct_str = '%.1f' % correct_pct
def stderr(k, n):
return np.sqrt(((k/float(n)) * (1-(k/float(n)))) / float(n))
stderr_inc = 100 * stderr(cat_number - correct_number, cat_number)
stderr_corr = 100 * stderr(correct_number, cat_number)
rows.append((cat, cat_number, cat_pct, correct_number,
correct_pct, stderr_corr))
if plot:
inc_handle = plt.bar(ix, cat_pct, color=pf.ORANGE, align='center',
yerr=stderr_inc, linewidth=0.5)
corr_handle = plt.bar(ix, correct_pct_of_total, color=pf.GREEN,
align='center', yerr=stderr_corr,
linewidth=0.5)
if plot:
plt.xticks(range(len(cats)), cat_names, rotation=90)
plt.ylabel('Pct. Curated Entities')
plt.subplots_adjust(left=0.18, bottom=0.43, top=0.96)
ax = plt.gca()
pf.format_axis(ax)
plt.legend((corr_handle, inc_handle), ('Correct', 'Incorrect'),
loc='upper right', frameon=False, fontsize=pf.fontsize)
plt.show()
write_unicode_csv('agents_sample_stats.csv', rows)
return rows
def combined_graph(results):
prot_bef, prot_bef_err = results['training'][0][4:6]
fam_bef, fam_bef_err = results['training'][1][4:6]
prot_aft, prot_aft_err = results['test'][0][4:6]
fam_aft, fam_aft_err = results['test'][1][4:6]
plt.figure(figsize=(2.8, 2.2), dpi=300)
|
width = 0.3
bef_color = pf.ORANGE
aft_color = pf.GREEN
ax = plt.gca()
error_kw = dict(ecolor='black', lw=1, capsize=2, capthick=1)
befh = plt.bar(-0.5*width, prot_bef, width=width, yerr=prot_bef_err,
color=bef_color, error_kw=error_kw)
afth = plt.bar(0.5*width, prot_aft, width=width, yerr=prot_aft_err,
color=aft_color, error_kw=error_kw)
plt.bar(1 - 0.5*width, fam_bef, width=width, yerr=fam_bef_err,
color=bef_color, error_kw=error_kw)
plt.bar(1 + 0.5*width, fam_aft, width=width, yerr=fam_aft_err,
color=aft_color, error_kw=error_kw)
plt.xticks((0+(width/2.), 1+(width/2.)),
('Protein/\ngene', 'Family/\ncomplex'))
plt.ylabel('Grounding accuracy')
pf.format_axis(ax, tick_padding=3)
plt.legend((befh, afth), (without_fplx_label, with_fplx_label),
loc='upper right',
frameon=False, fontsize=pf.fontsize)
plt.subplots_adjust(left=0.22, bottom=0.15, top=0.94, right=0.94)
plt.savefig('combined_results.pdf')
plt.show()
def print_combined_table(results):
rows = []
header = ['\\#', 'Entity \\%', '\\# Corr.', '\\% Corr.',
'\\#', 'Entity \\%', '\\# Corr.', '\\% Corr.']
rows.append(header)
r_tr = results['training']
r_te = results['test']
def format(res):
return (res[1], '%.1f' % res[2], res[3],
'%.1f $\pm$ %.1f' % (res[4], res[5]))
for row_ix in range(6):
row =
|
random_line_split
|
|
agent_sample_stats.py
|
all_ungrounded_ratio = 100 * (all_ungrounded / float(len(stmts)))
any_ungrounded_ratio = 100 * (any_ungrounded / float(len(stmts)))
return all_ungrounded_ratio, any_ungrounded_ratio
def get_agent_counts(stmts):
agents = gm.ungrounded_texts(stmts)
agent_counts = [t[1] for t in agents]
return agent_counts
fname = '../step3_sample_training_test/famplex_test_stmts_mapped.pkl'
stmts = ac.load_statements(fname)
allu_test, anyu_test = get_ungrounded_stats(stmts)
counts_test = get_agent_counts(stmts)
fname = '../step3_sample_training_test/training_pmid_stmts.pkl'
stmts = ac.load_statements(fname)
allu_train, anyu_train = get_ungrounded_stats(stmts)
counts_train = get_agent_counts(stmts)
return (allu_test, anyu_test, allu_train, anyu_train,
counts_train, counts_test)
def plot_ungrounded_stats(allu_test, anyu_test, allu_train, anyu_train):
"""Plot training vs test corpus any and all arguments ungrounded pcts."""
pf.set_fig_params()
plt.figure(figsize=(2, 2.2), dpi=300)
xticks = np.array([0, 1])
col_width = 0.3
btrain = plt.bar(xticks - 0.5*col_width, [allu_train, anyu_train],
col_width, align='center', color=pf.ORANGE)
btest = plt.bar(xticks + 0.5*col_width, [allu_test, anyu_test], col_width,
align='center', color=pf.GREEN)
plt.xticks(xticks, ('All args\nungrounded', 'Any args\nungrounded'))
plt.ylabel('Pct. Extracted Events')
plt.ylim((0, 35))
ax = plt.gca()
pf.format_axis(ax)
plt.subplots_adjust(left=0.17, bottom=0.14, top=0.94, right=0.93)
plt.legend((btrain, btest), (without_fplx_label, with_fplx_label),
loc='upper left', frameon=False, fontsize=pf.fontsize)
plt.savefig('ungrounded_stats.pdf')
def plot_ungrounded_frequencies(counts_list, labels, colors, plot_filename):
"""Plot the distribution of ungrounded strings in training vs test corpus.
"""
bin_interval = 1
fracs_total_list = []
bin_starts_list = []
for counts in counts_list:
freq_dist = []
bin_starts = list(range(0, len(counts), bin_interval))
bin_starts_list.append(bin_starts)
for bin_start_ix in bin_starts:
bin_end_ix = bin_start_ix + bin_interval
if bin_end_ix < len(counts):
freq_dist.append(np.sum(counts[bin_start_ix:bin_end_ix]))
else:
freq_dist.append(np.sum(counts[bin_start_ix:]))
freq_dist = np.array(freq_dist)
fracs_total = np.cumsum(freq_dist)
fracs_total_list.append(fracs_total)
fig = plt.figure(figsize=(2.3, 2.2), dpi=300)
plt.ion()
ax = fig.gca()
for i, (bin_starts, fracs_total) in \
enumerate(zip(bin_starts_list, fracs_total_list)):
xvals = np.array(bin_starts) / len(counts_list[i])
yvals = fracs_total / float(np.sum(counts_list[i]))
ax.plot(xvals, yvals, color=colors[i])
ax.plot(xvals, xvals, color='gray', linestyle='dotted')
labels = list(labels)
labels.append('Uniform distribution')
pf.format_axis(ax)
ax.legend(labels, loc='lower right', frameon=False, fontsize=pf.fontsize)
plt.xlim([0,1])
plt.ylim([0,1])
plt.subplots_adjust(left=0.18, bottom=0.15, right=0.96, top=0.92)
ax.set_xlabel('String rank (normalized)')
ax.set_ylabel('Rel. freq. of occurrences')
plt.savefig(plot_filename)
cats = (['P'], ['F', 'C', 'X'], ['S'], ['B'], ['U'], ['M'])
cat_names = ('Protein/gene', 'Family/complex', 'Small molecule',
'Biological process', 'Other/unknown', 'microRNA')
def grounding_stats(data, plot=False):
rows = []
num_agents = len(data)
if plot:
plt.figure(figsize=(2.2, 2), dpi=300)
for ix, cat in enumerate(cats):
cat_rows = data[data.EntityType.apply(lambda et: et in cat)]
cat_number = len(cat_rows)
cat_pct = (100 * cat_number / float(num_agents))
cat_pct_str = '%.1f' % cat_pct
correct_rows = cat_rows[cat_rows.Grounding == 1]
correct_number = len(correct_rows)
correct_pct = (100 * correct_number / float(cat_number)) if \
cat_number > 0 else 0
correct_pct_of_total = (100 * correct_number) / float(num_agents)
correct_pct_str = '%.1f' % correct_pct
def stderr(k, n):
return np.sqrt(((k/float(n)) * (1-(k/float(n)))) / float(n))
stderr_inc = 100 * stderr(cat_number - correct_number, cat_number)
stderr_corr = 100 * stderr(correct_number, cat_number)
rows.append((cat, cat_number, cat_pct, correct_number,
correct_pct, stderr_corr))
if plot:
inc_handle = plt.bar(ix, cat_pct, color=pf.ORANGE, align='center',
yerr=stderr_inc, linewidth=0.5)
corr_handle = plt.bar(ix, correct_pct_of_total, color=pf.GREEN,
align='center', yerr=stderr_corr,
linewidth=0.5)
if plot:
plt.xticks(range(len(cats)), cat_names, rotation=90)
plt.ylabel('Pct. Curated Entities')
plt.subplots_adjust(left=0.18, bottom=0.43, top=0.96)
ax = plt.gca()
pf.format_axis(ax)
plt.legend((corr_handle, inc_handle), ('Correct', 'Incorrect'),
loc='upper right', frameon=False, fontsize=pf.fontsize)
plt.show()
write_unicode_csv('agents_sample_stats.csv', rows)
return rows
def combined_graph(results):
prot_bef, prot_bef_err = results['training'][0][4:6]
fam_bef, fam_bef_err = results['training'][1][4:6]
prot_aft, prot_aft_err = results['test'][0][4:6]
fam_aft, fam_aft_err = results['test'][1][4:6]
plt.figure(figsize=(2.8, 2.2), dpi=300)
width = 0.3
bef_color = pf.ORANGE
aft_color = pf.GREEN
ax = plt.gca()
error_kw = dict(ecolor='black', lw=1, capsize=2, capthick=1)
befh = plt.bar(-0.5*width, prot_bef, width=width, yerr=prot_bef_err,
color=bef_color, error_kw=error_kw)
afth = plt.bar(0.5*width, prot_aft, width=width, yerr=prot_aft_err,
color=aft_color, error_kw=error_kw)
plt.bar(1 - 0.5*width, fam_bef, width=width, yerr=fam_bef_err,
color=bef_color, error_kw=error_kw)
plt.bar(1 + 0.5*width, fam_aft, width=width, yerr=fam_aft_err,
color=aft_color, error_kw=error_kw)
plt.xticks((0+(width/2.), 1+(width/2.)),
('Protein/\ngene', 'Family/\ncomplex'))
plt.ylabel('Grounding accuracy')
pf.format_axis(ax, tick_padding=3)
plt.legend((befh, afth), (without_fplx_label, with_fplx_label),
loc='upper right',
frameon=False, fontsize=pf.fontsize)
plt.subplots_adjust(left=0.22, bottom=0.15, top=0.94, right=0.94)
plt.savefig('combined_results.pdf')
plt.show()
def print_combined_table(results):
rows = []
header = ['\\#', 'Entity \\%', '\\# Cor
|
agents_ungrounded = []
for ag in stmt.agent_list():
if ag is not None and list(ag.db_refs.keys()) == ['TEXT']:
agents_ungrounded.append(True)
else:
agents_ungrounded.append(False)
if all(agents_ungrounded):
all_ungrounded += 1
if any(agents_ungrounded):
any_ungrounded += 1
|
conditional_block
|
|
agent_sample_stats.py
|
s)))
any_ungrounded_ratio = 100 * (any_ungrounded / float(len(stmts)))
return all_ungrounded_ratio, any_ungrounded_ratio
def get_agent_counts(stmts):
agents = gm.ungrounded_texts(stmts)
agent_counts = [t[1] for t in agents]
return agent_counts
fname = '../step3_sample_training_test/famplex_test_stmts_mapped.pkl'
stmts = ac.load_statements(fname)
allu_test, anyu_test = get_ungrounded_stats(stmts)
counts_test = get_agent_counts(stmts)
fname = '../step3_sample_training_test/training_pmid_stmts.pkl'
stmts = ac.load_statements(fname)
allu_train, anyu_train = get_ungrounded_stats(stmts)
counts_train = get_agent_counts(stmts)
return (allu_test, anyu_test, allu_train, anyu_train,
counts_train, counts_test)
def plot_ungrounded_stats(allu_test, anyu_test, allu_train, anyu_train):
"""Plot training vs test corpus any and all arguments ungrounded pcts."""
pf.set_fig_params()
plt.figure(figsize=(2, 2.2), dpi=300)
xticks = np.array([0, 1])
col_width = 0.3
btrain = plt.bar(xticks - 0.5*col_width, [allu_train, anyu_train],
col_width, align='center', color=pf.ORANGE)
btest = plt.bar(xticks + 0.5*col_width, [allu_test, anyu_test], col_width,
align='center', color=pf.GREEN)
plt.xticks(xticks, ('All args\nungrounded', 'Any args\nungrounded'))
plt.ylabel('Pct. Extracted Events')
plt.ylim((0, 35))
ax = plt.gca()
pf.format_axis(ax)
plt.subplots_adjust(left=0.17, bottom=0.14, top=0.94, right=0.93)
plt.legend((btrain, btest), (without_fplx_label, with_fplx_label),
loc='upper left', frameon=False, fontsize=pf.fontsize)
plt.savefig('ungrounded_stats.pdf')
def plot_ungrounded_frequencies(counts_list, labels, colors, plot_filename):
"""Plot the distribution of ungrounded strings in training vs test corpus.
"""
bin_interval = 1
fracs_total_list = []
bin_starts_list = []
for counts in counts_list:
freq_dist = []
bin_starts = list(range(0, len(counts), bin_interval))
bin_starts_list.append(bin_starts)
for bin_start_ix in bin_starts:
bin_end_ix = bin_start_ix + bin_interval
if bin_end_ix < len(counts):
freq_dist.append(np.sum(counts[bin_start_ix:bin_end_ix]))
else:
freq_dist.append(np.sum(counts[bin_start_ix:]))
freq_dist = np.array(freq_dist)
fracs_total = np.cumsum(freq_dist)
fracs_total_list.append(fracs_total)
fig = plt.figure(figsize=(2.3, 2.2), dpi=300)
plt.ion()
ax = fig.gca()
for i, (bin_starts, fracs_total) in \
enumerate(zip(bin_starts_list, fracs_total_list)):
xvals = np.array(bin_starts) / len(counts_list[i])
yvals = fracs_total / float(np.sum(counts_list[i]))
ax.plot(xvals, yvals, color=colors[i])
ax.plot(xvals, xvals, color='gray', linestyle='dotted')
labels = list(labels)
labels.append('Uniform distribution')
pf.format_axis(ax)
ax.legend(labels, loc='lower right', frameon=False, fontsize=pf.fontsize)
plt.xlim([0,1])
plt.ylim([0,1])
plt.subplots_adjust(left=0.18, bottom=0.15, right=0.96, top=0.92)
ax.set_xlabel('String rank (normalized)')
ax.set_ylabel('Rel. freq. of occurrences')
plt.savefig(plot_filename)
cats = (['P'], ['F', 'C', 'X'], ['S'], ['B'], ['U'], ['M'])
cat_names = ('Protein/gene', 'Family/complex', 'Small molecule',
'Biological process', 'Other/unknown', 'microRNA')
def grounding_stats(data, plot=False):
rows = []
num_agents = len(data)
if plot:
plt.figure(figsize=(2.2, 2), dpi=300)
for ix, cat in enumerate(cats):
cat_rows = data[data.EntityType.apply(lambda et: et in cat)]
cat_number = len(cat_rows)
cat_pct = (100 * cat_number / float(num_agents))
cat_pct_str = '%.1f' % cat_pct
correct_rows = cat_rows[cat_rows.Grounding == 1]
correct_number = len(correct_rows)
correct_pct = (100 * correct_number / float(cat_number)) if \
cat_number > 0 else 0
correct_pct_of_total = (100 * correct_number) / float(num_agents)
correct_pct_str = '%.1f' % correct_pct
def stderr(k, n):
return np.sqrt(((k/float(n)) * (1-(k/float(n)))) / float(n))
stderr_inc = 100 * stderr(cat_number - correct_number, cat_number)
stderr_corr = 100 * stderr(correct_number, cat_number)
rows.append((cat, cat_number, cat_pct, correct_number,
correct_pct, stderr_corr))
if plot:
inc_handle = plt.bar(ix, cat_pct, color=pf.ORANGE, align='center',
yerr=stderr_inc, linewidth=0.5)
corr_handle = plt.bar(ix, correct_pct_of_total, color=pf.GREEN,
align='center', yerr=stderr_corr,
linewidth=0.5)
if plot:
plt.xticks(range(len(cats)), cat_names, rotation=90)
plt.ylabel('Pct. Curated Entities')
plt.subplots_adjust(left=0.18, bottom=0.43, top=0.96)
ax = plt.gca()
pf.format_axis(ax)
plt.legend((corr_handle, inc_handle), ('Correct', 'Incorrect'),
loc='upper right', frameon=False, fontsize=pf.fontsize)
plt.show()
write_unicode_csv('agents_sample_stats.csv', rows)
return rows
def
|
(results):
prot_bef, prot_bef_err = results['training'][0][4:6]
fam_bef, fam_bef_err = results['training'][1][4:6]
prot_aft, prot_aft_err = results['test'][0][4:6]
fam_aft, fam_aft_err = results['test'][1][4:6]
plt.figure(figsize=(2.8, 2.2), dpi=300)
width = 0.3
bef_color = pf.ORANGE
aft_color = pf.GREEN
ax = plt.gca()
error_kw = dict(ecolor='black', lw=1, capsize=2, capthick=1)
befh = plt.bar(-0.5*width, prot_bef, width=width, yerr=prot_bef_err,
color=bef_color, error_kw=error_kw)
afth = plt.bar(0.5*width, prot_aft, width=width, yerr=prot_aft_err,
color=aft_color, error_kw=error_kw)
plt.bar(1 - 0.5*width, fam_bef, width=width, yerr=fam_bef_err,
color=bef_color, error_kw=error_kw)
plt.bar(1 + 0.5*width, fam_aft, width=width, yerr=fam_aft_err,
color=aft_color, error_kw=error_kw)
plt.xticks((0+(width/2.), 1+(width/2.)),
('Protein/\ngene', 'Family/\ncomplex'))
plt.ylabel('Grounding accuracy')
pf.format_axis(ax, tick_padding=3)
plt.legend((befh, afth), (without_fplx_label, with_fplx_label),
loc='upper right',
frameon=False, fontsize=pf.fontsize)
plt.subplots_adjust(left=0.22, bottom=0.15, top=0.94, right=0.94)
plt.savefig('combined_results.pdf')
plt.show()
def print_combined_table(results):
rows = []
header = ['\\#', 'Entity \\%', '\\# Corr.', '\\% Corr.',
'\\#', 'Entity \\%', '\\# Corr.', '\\% Corr.']
rows.append(header)
r_tr = results['training']
r_te = results['test']
def format(res):
return (res[1], '%.1f' % res[2], res[3],
'%.1f $\pm$ %.1f' % (res[4], res[5]))
for row_ix in range(6):
row
|
combined_graph
|
identifier_name
|
nextbus_test.go
|
.77513" lon="-122.41946" secsSinceReport="4" predictable="true" heading="225" speedKmHr="0" leadingVehicleId="1112"/>
<vehicle id="2222" routeTag="2" dirTag="2_inbound" lat="37.74891" lon="-122.45848" secsSinceReport="5" predictable="true" heading="217" speedKmHr="0" leadingVehicleId="2223"/>
<lastTime time="1234567890123"/>
</body>
`,
makeURL("predictions", "a", "alpha", "stopId", "11123"): `
<body copyright="All data copyright some transit company.">
<predictions agencyTitle="some transit company" routeTitle="The First" routeTag="1" stopTitle="Some Station Outbound" stopTag="1123">
<direction title="Outbound">
<prediction epochTime="1490564618948" seconds="623" minutes="10" isDeparture="false" dirTag="7____O_F00" vehicle="6581" block="0712" tripTag="7447642"/>
<prediction epochTime="1490565376790" seconds="1381" minutes="23" isDeparture="false" affectedByLayover="true" dirTag="7____O_F00" vehicle="6720" block="0705" tripTag="7447643"/>
</direction>
</predictions>
<predictions agencyTitle="some transit company" routeTitle="The Second" routeTag="2" stopTitle="Some Station Outbound" stopTag="1123">
<direction title="Outbound">
<prediction epochTime="1490564681782" seconds="686" minutes="11" isDeparture="false" dirTag="6____O_F00" vehicle="8618" block="0609" tripTag="7447028"/>
<prediction epochTime="1490565307084" seconds="1311" minutes="21" isDeparture="false" dirTag="6____O_F00" vehicle="8807" block="0602" tripTag="7447029"/>
</direction>
</predictions>
</body>
`,
makeURL("predictionsForMultiStops", "a", "alpha", "stops", "1|1123", "stops", "1|1124"): `
<body copyright="All data copyright some transit company.">
<predictions agencyTitle="some transit company" routeTitle="The First" routeTag="1" stopTitle="Some Station Outbound" stopTag="1123">
<direction title="Outbound">
<prediction epochTime="1487277081162" seconds="181" minutes="3" isDeparture="false" dirTag="1____O_F00" vehicle="1111" vehiclesInConsist="2" block="9999" tripTag="7318265"/>
<prediction epochTime="1487277463429" seconds="563" minutes="9" isDeparture="false" affectedByLayover="true" dirTag="1____O_F00" vehicle="2222" vehiclesInConsist="2" block="8888" tripTag="7318264"/>
</direction>
</predictions>
<predictions agencyTitle="some transit company" routeTitle="The First" routeTag="1" stopTitle="Some Other Station Outbound" stopTag="1124">
<direction title="Outbound">
<prediction epochTime="1487278019915" seconds="1120" minutes="18" isDeparture="false" affectedByLayover="true" dirTag="1____O_F00" vehicle="4444" vehiclesInConsist="2" block="6666" tripTag="7318264"/>
</direction>
<message text="No Elevator at Blah blah Station" priority="Normal"/>
</predictions>
</body>
`}
type fakeRoundTripper struct {
t *testing.T
}
func (f fakeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
if req.Body != nil {
req.Body.Close()
req.Body = nil
}
url := req.URL.String()
xml, ok := fakes[url]
if !ok {
valid := []string{}
for k := range fakes {
valid = append(valid, k)
}
f.t.Fatalf("Unexpected url %q. allowable urls are=%q", url, valid)
return nil, nil
}
res := http.Response{}
res.StatusCode = http.StatusOK
res.Body = ioutil.NopCloser(strings.NewReader(xml))
res.Request = req
return &res, nil
}
func testingClient(t *testing.T) *http.Client {
httpClient := http.Client{}
httpClient.Transport = fakeRoundTripper{t}
return &httpClient
}
func xmlName(s string) xml.Name {
return xml.Name{Space: "", Local: s}
}
func stopMarkers(tags ...string) []StopMarker {
var result []StopMarker
for _, t := range tags {
result = append(result, StopMarker{xmlName("stop"), t})
}
return result
}
func TestGetAgencyList(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetAgencyList()
ok(t, err)
expected := []Agency{
Agency{xmlName("agency"), "alpha", "The First", "What a Transit Agency"},
Agency{xmlName("agency"), "beta", "The Second", "Never never land"},
}
equals(t, expected, found)
}
func TestGetRouteList(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetRouteList("alpha")
ok(t, err)
expected := []Route{
Route{xmlName("route"), "1", "1-first"},
Route{xmlName("route"), "2", "2-second"},
}
equals(t, expected, found)
}
func TestGetRouteConfig(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetRouteConfig("alpha")
ok(t, err)
expected := []RouteConfig{
RouteConfig{
xmlName("route"),
[]Stop{
Stop{
xmlName("stop"),
"1123", "First stop", "12.3456789", "-123.45789", "98765",
},
Stop{
xmlName("stop"),
"1234", "Second stop", "23.4567890", "-456.78901", "87654",
},
},
"1",
"1-first",
"660000",
"ffffff",
"12.3456789",
"45.6789012",
"-123.4567890",
"-456.78901",
[]Direction{
Direction{
xmlName("direction"),
"1out", "Outbound to somewhere", "Outbound", "true",
stopMarkers("1123", "1234"),
},
Direction{
xmlName("direction"),
"1in", "Inbound to somewhere", "Inbound", "true",
stopMarkers("1234", "1123"),
},
},
nil,
},
}
equals(t, expected, found)
}
func TestGetVehicleLocations(t *testing.T)
|
},
VehicleLocation{
xmlName("vehicle"),
"2222",
"2",
"2_inbound",
"37.74891",
"-122.45848",
"5",
"true",
"217",
"0",
"2
|
{
nb := NewClient(testingClient(t))
found, err := nb.GetVehicleLocations("alpha")
ok(t, err)
expected := LocationResponse{
xmlName("body"),
[]VehicleLocation{
VehicleLocation{
xmlName("vehicle"),
"1111",
"1",
"1_outbound",
"37.77513",
"-122.41946",
"4",
"true",
"225",
"0",
"1112",
|
identifier_body
|
nextbus_test.go
|
.77513" lon="-122.41946" secsSinceReport="4" predictable="true" heading="225" speedKmHr="0" leadingVehicleId="1112"/>
<vehicle id="2222" routeTag="2" dirTag="2_inbound" lat="37.74891" lon="-122.45848" secsSinceReport="5" predictable="true" heading="217" speedKmHr="0" leadingVehicleId="2223"/>
<lastTime time="1234567890123"/>
</body>
`,
makeURL("predictions", "a", "alpha", "stopId", "11123"): `
<body copyright="All data copyright some transit company.">
<predictions agencyTitle="some transit company" routeTitle="The First" routeTag="1" stopTitle="Some Station Outbound" stopTag="1123">
<direction title="Outbound">
<prediction epochTime="1490564618948" seconds="623" minutes="10" isDeparture="false" dirTag="7____O_F00" vehicle="6581" block="0712" tripTag="7447642"/>
<prediction epochTime="1490565376790" seconds="1381" minutes="23" isDeparture="false" affectedByLayover="true" dirTag="7____O_F00" vehicle="6720" block="0705" tripTag="7447643"/>
</direction>
</predictions>
<predictions agencyTitle="some transit company" routeTitle="The Second" routeTag="2" stopTitle="Some Station Outbound" stopTag="1123">
<direction title="Outbound">
<prediction epochTime="1490564681782" seconds="686" minutes="11" isDeparture="false" dirTag="6____O_F00" vehicle="8618" block="0609" tripTag="7447028"/>
<prediction epochTime="1490565307084" seconds="1311" minutes="21" isDeparture="false" dirTag="6____O_F00" vehicle="8807" block="0602" tripTag="7447029"/>
</direction>
</predictions>
</body>
`,
makeURL("predictionsForMultiStops", "a", "alpha", "stops", "1|1123", "stops", "1|1124"): `
<body copyright="All data copyright some transit company.">
<predictions agencyTitle="some transit company" routeTitle="The First" routeTag="1" stopTitle="Some Station Outbound" stopTag="1123">
<direction title="Outbound">
<prediction epochTime="1487277081162" seconds="181" minutes="3" isDeparture="false" dirTag="1____O_F00" vehicle="1111" vehiclesInConsist="2" block="9999" tripTag="7318265"/>
<prediction epochTime="1487277463429" seconds="563" minutes="9" isDeparture="false" affectedByLayover="true" dirTag="1____O_F00" vehicle="2222" vehiclesInConsist="2" block="8888" tripTag="7318264"/>
</direction>
</predictions>
<predictions agencyTitle="some transit company" routeTitle="The First" routeTag="1" stopTitle="Some Other Station Outbound" stopTag="1124">
<direction title="Outbound">
<prediction epochTime="1487278019915" seconds="1120" minutes="18" isDeparture="false" affectedByLayover="true" dirTag="1____O_F00" vehicle="4444" vehiclesInConsist="2" block="6666" tripTag="7318264"/>
</direction>
<message text="No Elevator at Blah blah Station" priority="Normal"/>
</predictions>
</body>
`}
type fakeRoundTripper struct {
t *testing.T
}
func (f fakeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
if req.Body != nil {
req.Body.Close()
req.Body = nil
}
url := req.URL.String()
xml, ok := fakes[url]
if !ok {
valid := []string{}
for k := range fakes {
valid = append(valid, k)
}
f.t.Fatalf("Unexpected url %q. allowable urls are=%q", url, valid)
return nil, nil
}
res := http.Response{}
res.StatusCode = http.StatusOK
res.Body = ioutil.NopCloser(strings.NewReader(xml))
res.Request = req
return &res, nil
}
func testingClient(t *testing.T) *http.Client {
httpClient := http.Client{}
httpClient.Transport = fakeRoundTripper{t}
return &httpClient
}
func xmlName(s string) xml.Name {
return xml.Name{Space: "", Local: s}
}
func stopMarkers(tags ...string) []StopMarker {
var result []StopMarker
for _, t := range tags {
result = append(result, StopMarker{xmlName("stop"), t})
}
return result
}
func
|
(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetAgencyList()
ok(t, err)
expected := []Agency{
Agency{xmlName("agency"), "alpha", "The First", "What a Transit Agency"},
Agency{xmlName("agency"), "beta", "The Second", "Never never land"},
}
equals(t, expected, found)
}
func TestGetRouteList(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetRouteList("alpha")
ok(t, err)
expected := []Route{
Route{xmlName("route"), "1", "1-first"},
Route{xmlName("route"), "2", "2-second"},
}
equals(t, expected, found)
}
func TestGetRouteConfig(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetRouteConfig("alpha")
ok(t, err)
expected := []RouteConfig{
RouteConfig{
xmlName("route"),
[]Stop{
Stop{
xmlName("stop"),
"1123", "First stop", "12.3456789", "-123.45789", "98765",
},
Stop{
xmlName("stop"),
"1234", "Second stop", "23.4567890", "-456.78901", "87654",
},
},
"1",
"1-first",
"660000",
"ffffff",
"12.3456789",
"45.6789012",
"-123.4567890",
"-456.78901",
[]Direction{
Direction{
xmlName("direction"),
"1out", "Outbound to somewhere", "Outbound", "true",
stopMarkers("1123", "1234"),
},
Direction{
xmlName("direction"),
"1in", "Inbound to somewhere", "Inbound", "true",
stopMarkers("1234", "1123"),
},
},
nil,
},
}
equals(t, expected, found)
}
func TestGetVehicleLocations(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetVehicleLocations("alpha")
ok(t, err)
expected := LocationResponse{
xmlName("body"),
[]VehicleLocation{
VehicleLocation{
xmlName("vehicle"),
"1111",
"1",
"1_outbound",
"37.77513",
"-122.41946",
"4",
"true",
"225",
"0",
"1112",
},
VehicleLocation{
xmlName("vehicle"),
"2222",
"2",
"2_inbound",
"37.74891",
"-122.45848",
"5",
"true",
"217",
"0",
"2
|
TestGetAgencyList
|
identifier_name
|
nextbus_test.go
|
.77513" lon="-122.41946" secsSinceReport="4" predictable="true" heading="225" speedKmHr="0" leadingVehicleId="1112"/>
<vehicle id="2222" routeTag="2" dirTag="2_inbound" lat="37.74891" lon="-122.45848" secsSinceReport="5" predictable="true" heading="217" speedKmHr="0" leadingVehicleId="2223"/>
<lastTime time="1234567890123"/>
</body>
`,
makeURL("predictions", "a", "alpha", "stopId", "11123"): `
<body copyright="All data copyright some transit company.">
<predictions agencyTitle="some transit company" routeTitle="The First" routeTag="1" stopTitle="Some Station Outbound" stopTag="1123">
<direction title="Outbound">
<prediction epochTime="1490564618948" seconds="623" minutes="10" isDeparture="false" dirTag="7____O_F00" vehicle="6581" block="0712" tripTag="7447642"/>
<prediction epochTime="1490565376790" seconds="1381" minutes="23" isDeparture="false" affectedByLayover="true" dirTag="7____O_F00" vehicle="6720" block="0705" tripTag="7447643"/>
</direction>
</predictions>
<predictions agencyTitle="some transit company" routeTitle="The Second" routeTag="2" stopTitle="Some Station Outbound" stopTag="1123">
<direction title="Outbound">
<prediction epochTime="1490564681782" seconds="686" minutes="11" isDeparture="false" dirTag="6____O_F00" vehicle="8618" block="0609" tripTag="7447028"/>
<prediction epochTime="1490565307084" seconds="1311" minutes="21" isDeparture="false" dirTag="6____O_F00" vehicle="8807" block="0602" tripTag="7447029"/>
</direction>
</predictions>
</body>
`,
makeURL("predictionsForMultiStops", "a", "alpha", "stops", "1|1123", "stops", "1|1124"): `
<body copyright="All data copyright some transit company.">
<predictions agencyTitle="some transit company" routeTitle="The First" routeTag="1" stopTitle="Some Station Outbound" stopTag="1123">
<direction title="Outbound">
<prediction epochTime="1487277081162" seconds="181" minutes="3" isDeparture="false" dirTag="1____O_F00" vehicle="1111" vehiclesInConsist="2" block="9999" tripTag="7318265"/>
<prediction epochTime="1487277463429" seconds="563" minutes="9" isDeparture="false" affectedByLayover="true" dirTag="1____O_F00" vehicle="2222" vehiclesInConsist="2" block="8888" tripTag="7318264"/>
</direction>
</predictions>
<predictions agencyTitle="some transit company" routeTitle="The First" routeTag="1" stopTitle="Some Other Station Outbound" stopTag="1124">
<direction title="Outbound">
<prediction epochTime="1487278019915" seconds="1120" minutes="18" isDeparture="false" affectedByLayover="true" dirTag="1____O_F00" vehicle="4444" vehiclesInConsist="2" block="6666" tripTag="7318264"/>
</direction>
<message text="No Elevator at Blah blah Station" priority="Normal"/>
</predictions>
</body>
`}
type fakeRoundTripper struct {
t *testing.T
}
func (f fakeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
if req.Body != nil {
req.Body.Close()
req.Body = nil
}
url := req.URL.String()
xml, ok := fakes[url]
if !ok {
valid := []string{}
for k := range fakes {
valid = append(valid, k)
}
f.t.Fatalf("Unexpected url %q. allowable urls are=%q", url, valid)
return nil, nil
}
res := http.Response{}
res.StatusCode = http.StatusOK
res.Body = ioutil.NopCloser(strings.NewReader(xml))
res.Request = req
return &res, nil
}
func testingClient(t *testing.T) *http.Client {
httpClient := http.Client{}
httpClient.Transport = fakeRoundTripper{t}
return &httpClient
}
func xmlName(s string) xml.Name {
return xml.Name{Space: "", Local: s}
}
func stopMarkers(tags ...string) []StopMarker {
var result []StopMarker
for _, t := range tags {
result = append(result, StopMarker{xmlName("stop"), t})
}
return result
}
func TestGetAgencyList(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetAgencyList()
ok(t, err)
expected := []Agency{
Agency{xmlName("agency"), "alpha", "The First", "What a Transit Agency"},
Agency{xmlName("agency"), "beta", "The Second", "Never never land"},
}
equals(t, expected, found)
}
func TestGetRouteList(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetRouteList("alpha")
ok(t, err)
expected := []Route{
Route{xmlName("route"), "1", "1-first"},
Route{xmlName("route"), "2", "2-second"},
}
equals(t, expected, found)
}
func TestGetRouteConfig(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetRouteConfig("alpha")
ok(t, err)
expected := []RouteConfig{
RouteConfig{
xmlName("route"),
[]Stop{
Stop{
xmlName("stop"),
"1123", "First stop", "12.3456789", "-123.45789", "98765",
},
Stop{
xmlName("stop"),
"1234", "Second stop", "23.4567890", "-456.78901", "87654",
},
},
"1",
|
"ffffff",
"12.3456789",
"45.6789012",
"-123.4567890",
"-456.78901",
[]Direction{
Direction{
xmlName("direction"),
"1out", "Outbound to somewhere", "Outbound", "true",
stopMarkers("1123", "1234"),
},
Direction{
xmlName("direction"),
"1in", "Inbound to somewhere", "Inbound", "true",
stopMarkers("1234", "1123"),
},
},
nil,
},
}
equals(t, expected, found)
}
func TestGetVehicleLocations(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetVehicleLocations("alpha")
ok(t, err)
expected := LocationResponse{
xmlName("body"),
[]VehicleLocation{
VehicleLocation{
xmlName("vehicle"),
"1111",
"1",
"1_outbound",
"37.77513",
"-122.41946",
"4",
"true",
"225",
"0",
"1112",
},
VehicleLocation{
xmlName("vehicle"),
"2222",
"2",
"2_inbound",
"37.74891",
"-122.45848",
"5",
"true",
"217",
"0",
"22
|
"1-first",
"660000",
|
random_line_split
|
nextbus_test.go
|
77513" lon="-122.41946" secsSinceReport="4" predictable="true" heading="225" speedKmHr="0" leadingVehicleId="1112"/>
<vehicle id="2222" routeTag="2" dirTag="2_inbound" lat="37.74891" lon="-122.45848" secsSinceReport="5" predictable="true" heading="217" speedKmHr="0" leadingVehicleId="2223"/>
<lastTime time="1234567890123"/>
</body>
`,
makeURL("predictions", "a", "alpha", "stopId", "11123"): `
<body copyright="All data copyright some transit company.">
<predictions agencyTitle="some transit company" routeTitle="The First" routeTag="1" stopTitle="Some Station Outbound" stopTag="1123">
<direction title="Outbound">
<prediction epochTime="1490564618948" seconds="623" minutes="10" isDeparture="false" dirTag="7____O_F00" vehicle="6581" block="0712" tripTag="7447642"/>
<prediction epochTime="1490565376790" seconds="1381" minutes="23" isDeparture="false" affectedByLayover="true" dirTag="7____O_F00" vehicle="6720" block="0705" tripTag="7447643"/>
</direction>
</predictions>
<predictions agencyTitle="some transit company" routeTitle="The Second" routeTag="2" stopTitle="Some Station Outbound" stopTag="1123">
<direction title="Outbound">
<prediction epochTime="1490564681782" seconds="686" minutes="11" isDeparture="false" dirTag="6____O_F00" vehicle="8618" block="0609" tripTag="7447028"/>
<prediction epochTime="1490565307084" seconds="1311" minutes="21" isDeparture="false" dirTag="6____O_F00" vehicle="8807" block="0602" tripTag="7447029"/>
</direction>
</predictions>
</body>
`,
makeURL("predictionsForMultiStops", "a", "alpha", "stops", "1|1123", "stops", "1|1124"): `
<body copyright="All data copyright some transit company.">
<predictions agencyTitle="some transit company" routeTitle="The First" routeTag="1" stopTitle="Some Station Outbound" stopTag="1123">
<direction title="Outbound">
<prediction epochTime="1487277081162" seconds="181" minutes="3" isDeparture="false" dirTag="1____O_F00" vehicle="1111" vehiclesInConsist="2" block="9999" tripTag="7318265"/>
<prediction epochTime="1487277463429" seconds="563" minutes="9" isDeparture="false" affectedByLayover="true" dirTag="1____O_F00" vehicle="2222" vehiclesInConsist="2" block="8888" tripTag="7318264"/>
</direction>
</predictions>
<predictions agencyTitle="some transit company" routeTitle="The First" routeTag="1" stopTitle="Some Other Station Outbound" stopTag="1124">
<direction title="Outbound">
<prediction epochTime="1487278019915" seconds="1120" minutes="18" isDeparture="false" affectedByLayover="true" dirTag="1____O_F00" vehicle="4444" vehiclesInConsist="2" block="6666" tripTag="7318264"/>
</direction>
<message text="No Elevator at Blah blah Station" priority="Normal"/>
</predictions>
</body>
`}
type fakeRoundTripper struct {
t *testing.T
}
func (f fakeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
if req.Body != nil
|
url := req.URL.String()
xml, ok := fakes[url]
if !ok {
valid := []string{}
for k := range fakes {
valid = append(valid, k)
}
f.t.Fatalf("Unexpected url %q. allowable urls are=%q", url, valid)
return nil, nil
}
res := http.Response{}
res.StatusCode = http.StatusOK
res.Body = ioutil.NopCloser(strings.NewReader(xml))
res.Request = req
return &res, nil
}
func testingClient(t *testing.T) *http.Client {
httpClient := http.Client{}
httpClient.Transport = fakeRoundTripper{t}
return &httpClient
}
func xmlName(s string) xml.Name {
return xml.Name{Space: "", Local: s}
}
func stopMarkers(tags ...string) []StopMarker {
var result []StopMarker
for _, t := range tags {
result = append(result, StopMarker{xmlName("stop"), t})
}
return result
}
func TestGetAgencyList(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetAgencyList()
ok(t, err)
expected := []Agency{
Agency{xmlName("agency"), "alpha", "The First", "What a Transit Agency"},
Agency{xmlName("agency"), "beta", "The Second", "Never never land"},
}
equals(t, expected, found)
}
func TestGetRouteList(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetRouteList("alpha")
ok(t, err)
expected := []Route{
Route{xmlName("route"), "1", "1-first"},
Route{xmlName("route"), "2", "2-second"},
}
equals(t, expected, found)
}
func TestGetRouteConfig(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetRouteConfig("alpha")
ok(t, err)
expected := []RouteConfig{
RouteConfig{
xmlName("route"),
[]Stop{
Stop{
xmlName("stop"),
"1123", "First stop", "12.3456789", "-123.45789", "98765",
},
Stop{
xmlName("stop"),
"1234", "Second stop", "23.4567890", "-456.78901", "87654",
},
},
"1",
"1-first",
"660000",
"ffffff",
"12.3456789",
"45.6789012",
"-123.4567890",
"-456.78901",
[]Direction{
Direction{
xmlName("direction"),
"1out", "Outbound to somewhere", "Outbound", "true",
stopMarkers("1123", "1234"),
},
Direction{
xmlName("direction"),
"1in", "Inbound to somewhere", "Inbound", "true",
stopMarkers("1234", "1123"),
},
},
nil,
},
}
equals(t, expected, found)
}
func TestGetVehicleLocations(t *testing.T) {
nb := NewClient(testingClient(t))
found, err := nb.GetVehicleLocations("alpha")
ok(t, err)
expected := LocationResponse{
xmlName("body"),
[]VehicleLocation{
VehicleLocation{
xmlName("vehicle"),
"1111",
"1",
"1_outbound",
"37.77513",
"-122.41946",
"4",
"true",
"225",
"0",
"1112",
},
VehicleLocation{
xmlName("vehicle"),
"2222",
"2",
"2_inbound",
"37.74891",
"-122.45848",
"5",
"true",
"217",
"0",
"2
|
{
req.Body.Close()
req.Body = nil
}
|
conditional_block
|
battle.py
|
'#':
other = unit_at(x, y, units)
if other is not None and other.race != self.race and not other.dead:
targets.append(other)
return targets
def find_in_range_tiles(self, arena, units):
# Find tiles in range to an enemy
in_range_tiles = set() #Set to avoid duplicates
for u in units:
if u.race == self.race or u.dead:
continue
in_range_tiles.update(u.find_open_tiles(arena, units))
return in_range_tiles
def perform_attack(self, arena, targets):
# Sort targets by hit points, and then position
target = sorted(targets, key=lambda t: (t.hp, t.x, t.y))[0]
# Reduce hit points and check if dead
target.hp -= self.attack
if target.hp <= 0:
target.dead = True
return {'target': target}
def perform_turn(self, arena, units):
"""
Returns a result, and a dictionary containing any extra required info about what happened
during the turn.
"""
# Verify that unit hasn't died
if self.dead:
return 'dead', {}
# Verify that enemies are still present
targets = [u for u in units if u.race == self.enemy_race() and not u.dead]
if len(targets) == 0:
return 'no-targets', {}
# Check for in-range targets
targets = self.find_adjacent_targets(arena, units)
if len(targets) > 0:
data = self.perform_attack(arena, targets)
return 'attack', data
# Find reachable tiles
in_range = self.find_in_range_tiles(arena, units)
target, paths = find_target_tile(self.x, self.y, in_range, arena, units)
if target is None:
return 'no-reachable', {}
# If multiple paths exist, pick the starting point using reading order
optimal_paths = find_optimal_paths((self.x, self.y), target, paths)
choices = sorted([op[0] for op in optimal_paths])
x, y = choices[0]
# Update position
self.x = x
self.y = y
# Check for in-range targets after moving
targets = self.find_adjacent_targets(arena, units)
if len(targets) > 0:
data = self.perform_attack(arena, targets)
return 'move-attack', data
else:
return 'moved', {'pos': (x, y)}
def __repr__(self):
return '{}{} {}: {}/{} at ({},{})'.format('Dead ' if self.dead else '',
self.race.title(), self.name, self.hp,
self.attack, self.x, self.y)
def read_arena():
arena = []
units = []
for x, line in enumerate(sys.stdin):
line = line.strip()
# Extract units from line
extracted = ''
for y, c in enumerate(line):
if c == 'G':
goblin = Unit('goblin', x, y, 200, 3)
units.append(goblin)
extracted += '.'
elif c == 'E':
elf = Unit('elf', x, y, 200, 3)
units.append(elf)
extracted += '.'
else:
extracted += c
arena.append(list(extracted))
return arena, units
def print_arena(arena, units):
arena_copy = copy.deepcopy(arena)
#Draw units
for unit in units:
if unit.dead:
continue
arena_copy[unit.x][unit.y] = unit
for row in arena_copy:
row_end = ''
for tile in row:
if isinstance(tile, Unit):
row_end += '{}({}), '.format(tile.name, tile.hp)
tile = tile.name
print(tile, end='')
print(' ', row_end)
def unit_at(x, y, units):
"""
Returns the unit present at x,y or None.
"""
for u in units:
if u.x == x and u.y == y:
return u
return None
def find_target_tile(src_x, src_y, tiles, arena, units):
arena_copy = copy.deepcopy(arena)
for u in units:
if u.dead:
continue
arena_copy[u.x][u.y] = '#'
arena_copy[src_x][src_y] = '.' #Set this back to open as it's our starting point
# Initialize Djikstra's Algorithm
unvisited = set()
dist = {}
prev = {}
for x, row in enumerate(arena_copy):
for y, tile in enumerate(row):
if arena_copy[x][y] == '.':
dist[(x, y)] = float('inf')
prev[(x, y)] = None
unvisited.add((x, y))
# Set source to 0
dist[(src_x, src_y)] = 0
# Iterate through set
while unvisited:
# Find min
min_value = float('inf')
selected = None
for node in unvisited:
if dist[node] < min_value:
min_value = dist[node]
selected = node
# End looping is no nodes are accessible
if selected is None:
break
unvisited.remove(selected)
node_x, node_y = selected
for x, y in [(node_x+1, node_y), (node_x, node_y+1), (node_x-1, node_y), (node_x, node_y-1)]:
if (x, y) in unvisited:
new_distance = dist[(node_x, node_y)] + 1
if new_distance < dist[(x, y)]:
dist[(x, y)] = new_distance
prev[(x, y)] = [selected]
elif new_distance == dist[(x, y)]:
prev[(x, y)].append(selected)
# Filter out unreachable and unconsidered values
distances = {k: v for k, v in dist.items() if k in tiles and v != float('inf')}
if len(distances) == 0:
return None, None
target = sorted([(v, k[0], k[1]) for k, v in distances.items()])[0]
target = (target[1], target[2]) #Extract x,y coords
return target, prev
def find_optimal_paths(source, target, graph):
# Because the graph gives the previous item, work backwards from target
def update_paths(source, current, path, graph, optimal_paths):
# If we've found the target, record the path
if source == current:
optimal_paths.append(path)
return
cur_x, cur_y = current
for x, y in graph[current]:
path = (current,) + path
update_paths(source, (x, y), path, graph, optimal_paths)
optimal_paths = []
update_paths(source, target, (), graph, optimal_paths)
return optimal_paths
def find_next_step(start, end, paths):
"""
Given initial and final (x,y) coordinates and a dictionary of partial paths, return the
next step towards reaching
"""
def find_paths(start, current, distance, paths, choices):
"""
Given the start point, and the current point, builds a dictionary indicating the first step
and the minimum distance to the end using that step. Distance indicates the distance from
current to end.
"""
# Find all paths resulting in the minimum distance
options = []
min_distance = min(paths[current].values())
for option, distance in paths[current].items():
if distance == min_distance:
# If we find the beginning, break out
if option == start:
if option not in choices or choices[current] < distance + min_distance:
choices[current] = distance + min_distance
return
# Add to list of options
options.append(option)
# For each path, recursively find minimal paths
for option in options:
find_paths(start, option, min_distance, paths, choices)
choices = {}
find_paths(start, end, 0, paths, choices)
choices = sorted(choices.keys())
return choices[0]
def perform_round(arena, units):
"""
Performs a round of moving and combat, returns True if the full round is executed.
"""
# Order units and split into goblins and elves
units = [u for u in sorted(units, key=lambda u: (u.x, u.y)) if not u.dead]
for unit in units:
result, data = unit.perform_turn(arena, units)
if result == 'no-targets':
return False #Nothing to attack, game over
return True
def battle():
|
start = time.time()
round_end = time.time()
arena, units = read_arena()
initial_arena = copy.deepcopy(arena)
initial_units = copy.deepcopy(units)
#Loop until no deaths
deaths = 1
power = 2
while deaths > 0:
#Update elf powers
power += 1
arena = copy.deepcopy(initial_arena)
units = copy.deepcopy(initial_units)
for u in units:
if u.race == 'elf':
u.attack = power
|
identifier_body
|
|
battle.py
|
elf':
return 'goblin'
else:
raise ValueError('Invalid race')
def find_open_tiles(self, arena, units):
"""
Returns a list of all open tiles adjacent to the unit.
"""
tiles = []
for x, y in [(self.x+1, self.y), (self.x, self.y+1), (self.x-1, self.y), (self.x, self.y-1)]:
if arena[x][y] == '.':
tiles.append((x, y))
return tiles
def find_adjacent_targets(self, arena, units):
"""
Returns a list of all adjacent targets in range.
"""
in_range = []
targets = []
for x, y in [(self.x+1, self.y), (self.x, self.y+1), (self.x-1, self.y), (self.x, self.y-1)]:
if arena[x][y] != '#':
other = unit_at(x, y, units)
if other is not None and other.race != self.race and not other.dead:
targets.append(other)
return targets
def find_in_range_tiles(self, arena, units):
# Find tiles in range to an enemy
in_range_tiles = set() #Set to avoid duplicates
for u in units:
if u.race == self.race or u.dead:
continue
in_range_tiles.update(u.find_open_tiles(arena, units))
return in_range_tiles
def perform_attack(self, arena, targets):
# Sort targets by hit points, and then position
target = sorted(targets, key=lambda t: (t.hp, t.x, t.y))[0]
# Reduce hit points and check if dead
target.hp -= self.attack
if target.hp <= 0:
target.dead = True
return {'target': target}
def perform_turn(self, arena, units):
"""
Returns a result, and a dictionary containing any extra required info about what happened
during the turn.
"""
# Verify that unit hasn't died
if self.dead:
return 'dead', {}
# Verify that enemies are still present
targets = [u for u in units if u.race == self.enemy_race() and not u.dead]
if len(targets) == 0:
return 'no-targets', {}
# Check for in-range targets
targets = self.find_adjacent_targets(arena, units)
if len(targets) > 0:
data = self.perform_attack(arena, targets)
return 'attack', data
# Find reachable tiles
in_range = self.find_in_range_tiles(arena, units)
target, paths = find_target_tile(self.x, self.y, in_range, arena, units)
|
return 'no-reachable', {}
# If multiple paths exist, pick the starting point using reading order
optimal_paths = find_optimal_paths((self.x, self.y), target, paths)
choices = sorted([op[0] for op in optimal_paths])
x, y = choices[0]
# Update position
self.x = x
self.y = y
# Check for in-range targets after moving
targets = self.find_adjacent_targets(arena, units)
if len(targets) > 0:
data = self.perform_attack(arena, targets)
return 'move-attack', data
else:
return 'moved', {'pos': (x, y)}
def __repr__(self):
return '{}{} {}: {}/{} at ({},{})'.format('Dead ' if self.dead else '',
self.race.title(), self.name, self.hp,
self.attack, self.x, self.y)
def read_arena():
arena = []
units = []
for x, line in enumerate(sys.stdin):
line = line.strip()
# Extract units from line
extracted = ''
for y, c in enumerate(line):
if c == 'G':
goblin = Unit('goblin', x, y, 200, 3)
units.append(goblin)
extracted += '.'
elif c == 'E':
elf = Unit('elf', x, y, 200, 3)
units.append(elf)
extracted += '.'
else:
extracted += c
arena.append(list(extracted))
return arena, units
def print_arena(arena, units):
arena_copy = copy.deepcopy(arena)
#Draw units
for unit in units:
if unit.dead:
continue
arena_copy[unit.x][unit.y] = unit
for row in arena_copy:
row_end = ''
for tile in row:
if isinstance(tile, Unit):
row_end += '{}({}), '.format(tile.name, tile.hp)
tile = tile.name
print(tile, end='')
print(' ', row_end)
def unit_at(x, y, units):
"""
Returns the unit present at x,y or None.
"""
for u in units:
if u.x == x and u.y == y:
return u
return None
def find_target_tile(src_x, src_y, tiles, arena, units):
arena_copy = copy.deepcopy(arena)
for u in units:
if u.dead:
continue
arena_copy[u.x][u.y] = '#'
arena_copy[src_x][src_y] = '.' #Set this back to open as it's our starting point
# Initialize Djikstra's Algorithm
unvisited = set()
dist = {}
prev = {}
for x, row in enumerate(arena_copy):
for y, tile in enumerate(row):
if arena_copy[x][y] == '.':
dist[(x, y)] = float('inf')
prev[(x, y)] = None
unvisited.add((x, y))
# Set source to 0
dist[(src_x, src_y)] = 0
# Iterate through set
while unvisited:
# Find min
min_value = float('inf')
selected = None
for node in unvisited:
if dist[node] < min_value:
min_value = dist[node]
selected = node
# End looping is no nodes are accessible
if selected is None:
break
unvisited.remove(selected)
node_x, node_y = selected
for x, y in [(node_x+1, node_y), (node_x, node_y+1), (node_x-1, node_y), (node_x, node_y-1)]:
if (x, y) in unvisited:
new_distance = dist[(node_x, node_y)] + 1
if new_distance < dist[(x, y)]:
dist[(x, y)] = new_distance
prev[(x, y)] = [selected]
elif new_distance == dist[(x, y)]:
prev[(x, y)].append(selected)
# Filter out unreachable and unconsidered values
distances = {k: v for k, v in dist.items() if k in tiles and v != float('inf')}
if len(distances) == 0:
return None, None
target = sorted([(v, k[0], k[1]) for k, v in distances.items()])[0]
target = (target[1], target[2]) #Extract x,y coords
return target, prev
def find_optimal_paths(source, target, graph):
# Because the graph gives the previous item, work backwards from target
def update_paths(source, current, path, graph, optimal_paths):
# If we've found the target, record the path
if source == current:
optimal_paths.append(path)
return
cur_x, cur_y = current
for x, y in graph[current]:
path = (current,) + path
update_paths(source, (x, y), path, graph, optimal_paths)
optimal_paths = []
update_paths(source, target, (), graph, optimal_paths)
return optimal_paths
def find_next_step(start, end, paths):
"""
Given initial and final (x,y) coordinates and a dictionary of partial paths, return the
next step towards reaching
"""
def find_paths(start, current, distance, paths, choices):
"""
Given the start point, and the current point, builds a dictionary indicating the first step
and the minimum distance to the end using that step. Distance indicates the distance from
current to end.
"""
# Find all paths resulting in the minimum distance
options = []
min_distance = min(paths[current].values())
for option, distance in paths[current].items():
if distance == min_distance:
# If we find the beginning, break out
if option == start:
if option not in choices or choices[current] < distance + min_distance:
choices[current] = distance + min_distance
return
# Add to list of options
options.append(option)
# For each path, recursively find minimal paths
for option in options:
find_paths(start, option, min_distance, paths, choices)
choices = {}
find_paths(start, end, 0, paths, choices)
choices = sorted(choices.keys())
return choices[0]
def perform_round(arena, units):
"""
Performs a round of moving and combat, returns True if the full round is executed.
|
if target is None:
|
random_line_split
|
battle.py
|
'elf':
return 'goblin'
else:
raise ValueError('Invalid race')
def find_open_tiles(self, arena, units):
"""
Returns a list of all open tiles adjacent to the unit.
"""
tiles = []
for x, y in [(self.x+1, self.y), (self.x, self.y+1), (self.x-1, self.y), (self.x, self.y-1)]:
if arena[x][y] == '.':
tiles.append((x, y))
return tiles
def find_adjacent_targets(self, arena, units):
"""
Returns a list of all adjacent targets in range.
"""
in_range = []
targets = []
for x, y in [(self.x+1, self.y), (self.x, self.y+1), (self.x-1, self.y), (self.x, self.y-1)]:
if arena[x][y] != '#':
other = unit_at(x, y, units)
if other is not None and other.race != self.race and not other.dead:
targets.append(other)
return targets
def find_in_range_tiles(self, arena, units):
# Find tiles in range to an enemy
in_range_tiles = set() #Set to avoid duplicates
for u in units:
if u.race == self.race or u.dead:
continue
in_range_tiles.update(u.find_open_tiles(arena, units))
return in_range_tiles
def perform_attack(self, arena, targets):
# Sort targets by hit points, and then position
target = sorted(targets, key=lambda t: (t.hp, t.x, t.y))[0]
# Reduce hit points and check if dead
target.hp -= self.attack
if target.hp <= 0:
target.dead = True
return {'target': target}
def perform_turn(self, arena, units):
"""
Returns a result, and a dictionary containing any extra required info about what happened
during the turn.
"""
# Verify that unit hasn't died
if self.dead:
return 'dead', {}
# Verify that enemies are still present
targets = [u for u in units if u.race == self.enemy_race() and not u.dead]
if len(targets) == 0:
return 'no-targets', {}
# Check for in-range targets
targets = self.find_adjacent_targets(arena, units)
if len(targets) > 0:
data = self.perform_attack(arena, targets)
return 'attack', data
# Find reachable tiles
in_range = self.find_in_range_tiles(arena, units)
target, paths = find_target_tile(self.x, self.y, in_range, arena, units)
if target is None:
return 'no-reachable', {}
# If multiple paths exist, pick the starting point using reading order
optimal_paths = find_optimal_paths((self.x, self.y), target, paths)
choices = sorted([op[0] for op in optimal_paths])
x, y = choices[0]
# Update position
self.x = x
self.y = y
# Check for in-range targets after moving
targets = self.find_adjacent_targets(arena, units)
if len(targets) > 0:
data = self.perform_attack(arena, targets)
return 'move-attack', data
else:
return 'moved', {'pos': (x, y)}
def __repr__(self):
return '{}{} {}: {}/{} at ({},{})'.format('Dead ' if self.dead else '',
self.race.title(), self.name, self.hp,
self.attack, self.x, self.y)
def read_arena():
arena = []
units = []
for x, line in enumerate(sys.stdin):
line = line.strip()
# Extract units from line
extracted = ''
for y, c in enumerate(line):
if c == 'G':
goblin = Unit('goblin', x, y, 200, 3)
units.append(goblin)
extracted += '.'
elif c == 'E':
elf = Unit('elf', x, y, 200, 3)
units.append(elf)
extracted += '.'
else:
extracted += c
arena.append(list(extracted))
return arena, units
def print_arena(arena, units):
arena_copy = copy.deepcopy(arena)
#Draw units
for unit in units:
if unit.dead:
continue
arena_copy[unit.x][unit.y] = unit
for row in arena_copy:
row_end = ''
for tile in row:
if isinstance(tile, Unit):
row_end += '{}({}), '.format(tile.name, tile.hp)
tile = tile.name
print(tile, end='')
print(' ', row_end)
def unit_at(x, y, units):
"""
Returns the unit present at x,y or None.
"""
for u in units:
if u.x == x and u.y == y:
return u
return None
def find_target_tile(src_x, src_y, tiles, arena, units):
arena_copy = copy.deepcopy(arena)
for u in units:
if u.dead:
continue
arena_copy[u.x][u.y] = '#'
arena_copy[src_x][src_y] = '.' #Set this back to open as it's our starting point
# Initialize Djikstra's Algorithm
unvisited = set()
dist = {}
prev = {}
for x, row in enumerate(arena_copy):
for y, tile in enumerate(row):
if arena_copy[x][y] == '.':
dist[(x, y)] = float('inf')
prev[(x, y)] = None
unvisited.add((x, y))
# Set source to 0
dist[(src_x, src_y)] = 0
# Iterate through set
while unvisited:
# Find min
min_value = float('inf')
selected = None
for node in unvisited:
if dist[node] < min_value:
min_value = dist[node]
selected = node
# End looping is no nodes are accessible
if selected is None:
break
unvisited.remove(selected)
node_x, node_y = selected
for x, y in [(node_x+1, node_y), (node_x, node_y+1), (node_x-1, node_y), (node_x, node_y-1)]:
if (x, y) in unvisited:
new_distance = dist[(node_x, node_y)] + 1
if new_distance < dist[(x, y)]:
dist[(x, y)] = new_distance
prev[(x, y)] = [selected]
elif new_distance == dist[(x, y)]:
prev[(x, y)].append(selected)
# Filter out unreachable and unconsidered values
distances = {k: v for k, v in dist.items() if k in tiles and v != float('inf')}
if len(distances) == 0:
return None, None
target = sorted([(v, k[0], k[1]) for k, v in distances.items()])[0]
target = (target[1], target[2]) #Extract x,y coords
return target, prev
def find_optimal_paths(source, target, graph):
# Because the graph gives the previous item, work backwards from target
def update_paths(source, current, path, graph, optimal_paths):
# If we've found the target, record the path
if source == current:
optimal_paths.append(path)
return
cur_x, cur_y = current
for x, y in graph[current]:
|
optimal_paths = []
update_paths(source, target, (), graph, optimal_paths)
return optimal_paths
def find_next_step(start, end, paths):
"""
Given initial and final (x,y) coordinates and a dictionary of partial paths, return the
next step towards reaching
"""
def find_paths(start, current, distance, paths, choices):
"""
Given the start point, and the current point, builds a dictionary indicating the first step
and the minimum distance to the end using that step. Distance indicates the distance from
current to end.
"""
# Find all paths resulting in the minimum distance
options = []
min_distance = min(paths[current].values())
for option, distance in paths[current].items():
if distance == min_distance:
# If we find the beginning, break out
if option == start:
if option not in choices or choices[current] < distance + min_distance:
choices[current] = distance + min_distance
return
# Add to list of options
options.append(option)
# For each path, recursively find minimal paths
for option in options:
find_paths(start, option, min_distance, paths, choices)
choices = {}
find_paths(start, end, 0, paths, choices)
choices = sorted(choices.keys())
return choices[0]
def perform_round(arena, units):
"""
Performs a round of moving and combat, returns True if the full round is executed.
|
path = (current,) + path
update_paths(source, (x, y), path, graph, optimal_paths)
|
conditional_block
|
battle.py
|
'elf':
return 'goblin'
else:
raise ValueError('Invalid race')
def find_open_tiles(self, arena, units):
"""
Returns a list of all open tiles adjacent to the unit.
"""
tiles = []
for x, y in [(self.x+1, self.y), (self.x, self.y+1), (self.x-1, self.y), (self.x, self.y-1)]:
if arena[x][y] == '.':
tiles.append((x, y))
return tiles
def find_adjacent_targets(self, arena, units):
"""
Returns a list of all adjacent targets in range.
"""
in_range = []
targets = []
for x, y in [(self.x+1, self.y), (self.x, self.y+1), (self.x-1, self.y), (self.x, self.y-1)]:
if arena[x][y] != '#':
other = unit_at(x, y, units)
if other is not None and other.race != self.race and not other.dead:
targets.append(other)
return targets
def find_in_range_tiles(self, arena, units):
# Find tiles in range to an enemy
in_range_tiles = set() #Set to avoid duplicates
for u in units:
if u.race == self.race or u.dead:
continue
in_range_tiles.update(u.find_open_tiles(arena, units))
return in_range_tiles
def perform_attack(self, arena, targets):
# Sort targets by hit points, and then position
target = sorted(targets, key=lambda t: (t.hp, t.x, t.y))[0]
# Reduce hit points and check if dead
target.hp -= self.attack
if target.hp <= 0:
target.dead = True
return {'target': target}
def perform_turn(self, arena, units):
"""
Returns a result, and a dictionary containing any extra required info about what happened
during the turn.
"""
# Verify that unit hasn't died
if self.dead:
return 'dead', {}
# Verify that enemies are still present
targets = [u for u in units if u.race == self.enemy_race() and not u.dead]
if len(targets) == 0:
return 'no-targets', {}
# Check for in-range targets
targets = self.find_adjacent_targets(arena, units)
if len(targets) > 0:
data = self.perform_attack(arena, targets)
return 'attack', data
# Find reachable tiles
in_range = self.find_in_range_tiles(arena, units)
target, paths = find_target_tile(self.x, self.y, in_range, arena, units)
if target is None:
return 'no-reachable', {}
# If multiple paths exist, pick the starting point using reading order
optimal_paths = find_optimal_paths((self.x, self.y), target, paths)
choices = sorted([op[0] for op in optimal_paths])
x, y = choices[0]
# Update position
self.x = x
self.y = y
# Check for in-range targets after moving
targets = self.find_adjacent_targets(arena, units)
if len(targets) > 0:
data = self.perform_attack(arena, targets)
return 'move-attack', data
else:
return 'moved', {'pos': (x, y)}
def __repr__(self):
return '{}{} {}: {}/{} at ({},{})'.format('Dead ' if self.dead else '',
self.race.title(), self.name, self.hp,
self.attack, self.x, self.y)
def read_arena():
arena = []
units = []
for x, line in enumerate(sys.stdin):
line = line.strip()
# Extract units from line
extracted = ''
for y, c in enumerate(line):
if c == 'G':
goblin = Unit('goblin', x, y, 200, 3)
units.append(goblin)
extracted += '.'
elif c == 'E':
elf = Unit('elf', x, y, 200, 3)
units.append(elf)
extracted += '.'
else:
extracted += c
arena.append(list(extracted))
return arena, units
def print_arena(arena, units):
arena_copy = copy.deepcopy(arena)
#Draw units
for unit in units:
if unit.dead:
continue
arena_copy[unit.x][unit.y] = unit
for row in arena_copy:
row_end = ''
for tile in row:
if isinstance(tile, Unit):
row_end += '{}({}), '.format(tile.name, tile.hp)
tile = tile.name
print(tile, end='')
print(' ', row_end)
def unit_at(x, y, units):
"""
Returns the unit present at x,y or None.
"""
for u in units:
if u.x == x and u.y == y:
return u
return None
def find_target_tile(src_x, src_y, tiles, arena, units):
arena_copy = copy.deepcopy(arena)
for u in units:
if u.dead:
continue
arena_copy[u.x][u.y] = '#'
arena_copy[src_x][src_y] = '.' #Set this back to open as it's our starting point
# Initialize Djikstra's Algorithm
unvisited = set()
dist = {}
prev = {}
for x, row in enumerate(arena_copy):
for y, tile in enumerate(row):
if arena_copy[x][y] == '.':
dist[(x, y)] = float('inf')
prev[(x, y)] = None
unvisited.add((x, y))
# Set source to 0
dist[(src_x, src_y)] = 0
# Iterate through set
while unvisited:
# Find min
min_value = float('inf')
selected = None
for node in unvisited:
if dist[node] < min_value:
min_value = dist[node]
selected = node
# End looping is no nodes are accessible
if selected is None:
break
unvisited.remove(selected)
node_x, node_y = selected
for x, y in [(node_x+1, node_y), (node_x, node_y+1), (node_x-1, node_y), (node_x, node_y-1)]:
if (x, y) in unvisited:
new_distance = dist[(node_x, node_y)] + 1
if new_distance < dist[(x, y)]:
dist[(x, y)] = new_distance
prev[(x, y)] = [selected]
elif new_distance == dist[(x, y)]:
prev[(x, y)].append(selected)
# Filter out unreachable and unconsidered values
distances = {k: v for k, v in dist.items() if k in tiles and v != float('inf')}
if len(distances) == 0:
return None, None
target = sorted([(v, k[0], k[1]) for k, v in distances.items()])[0]
target = (target[1], target[2]) #Extract x,y coords
return target, prev
def find_optimal_paths(source, target, graph):
# Because the graph gives the previous item, work backwards from target
def update_paths(source, current, path, graph, optimal_paths):
# If we've found the target, record the path
if source == current:
optimal_paths.append(path)
return
cur_x, cur_y = current
for x, y in graph[current]:
path = (current,) + path
update_paths(source, (x, y), path, graph, optimal_paths)
optimal_paths = []
update_paths(source, target, (), graph, optimal_paths)
return optimal_paths
def find_next_step(start, end, paths):
"""
Given initial and final (x,y) coordinates and a dictionary of partial paths, return the
next step towards reaching
"""
def
|
(start, current, distance, paths, choices):
"""
Given the start point, and the current point, builds a dictionary indicating the first step
and the minimum distance to the end using that step. Distance indicates the distance from
current to end.
"""
# Find all paths resulting in the minimum distance
options = []
min_distance = min(paths[current].values())
for option, distance in paths[current].items():
if distance == min_distance:
# If we find the beginning, break out
if option == start:
if option not in choices or choices[current] < distance + min_distance:
choices[current] = distance + min_distance
return
# Add to list of options
options.append(option)
# For each path, recursively find minimal paths
for option in options:
find_paths(start, option, min_distance, paths, choices)
choices = {}
find_paths(start, end, 0, paths, choices)
choices = sorted(choices.keys())
return choices[0]
def perform_round(arena, units):
"""
Performs a round of moving and combat, returns True if the full round is executed.
|
find_paths
|
identifier_name
|
lib.rs
|
(&mut self) {
let p = self.data;
if p != 0 as *mut _ {
self.data = 0 as *mut _;
let _ = unsafe { Vec::from_raw_parts(p as *mut u8, 0, self.byte_len()) };
}
}
}
impl Bitmap {
/// Create a new bitmap, returning None if the data can't be allocated or
/// if the width of each slice can't fit in a usize. entries * width must
/// not overflow usize.
pub fn new(entries: usize, width: usize) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|needed| {
let ptr = {
let mut alloc = Vec::<u8>::with_capacity(needed);
let ptr = alloc.as_mut_ptr();
std::mem::forget(alloc);
ptr
};
unsafe { std::ptr::write_bytes(ptr, 0, needed); }
Some(Bitmap {
entries: entries,
width: width,
data: ptr as *mut u8
})
})
}
}
/// Create a new Bitmap from raw parts. Will return None if the given
/// entry and width would overflow the number of bits or bytes needed to
/// store the Bitmap.
pub unsafe fn from_raw_parts(entries: usize, width: usize, ptr: *mut u8) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|_| {
Some(Bitmap {
entries: entries,
width: width,
data: ptr
})
})
}
}
/// Get the `i`th bitslice, returning None on out-of-bounds
pub fn get(&self, i: usize) -> Option<usize> {
if i >= self.entries {
None
} else {
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
let mut bits_left = self.width;
let mut value: usize = 0;
while bits_left > 0 {
// how many bits can we need to set in this byte?
let can_get = std::cmp::min(8 - in_byte_offset, bits_left);
// alright, pull them out.
let byte = unsafe { *self.data.offset(byte_offset as isize) };
let got = get_n_bits_at(byte, can_get as u8, in_byte_offset as u8) as usize;
// make room for the bits we just read
value <<= can_get;
value |= got;
// update all the state
bit_offset += can_get;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_get;
}
Some(value)
}
}
/// Set the `i`th bitslice to `value`, returning false on out-of-bounds or if `value` contains
/// bits outside of the least significant `self.width` bits.
pub fn set(&mut self, i: usize, mut value: usize) -> bool {
let usize = std::mem::size_of::<usize>() * 8;
if i >= self.entries || value & !(usize::max_value() >> (std::cmp::min(usize-1, usize - self.width))) != 0 {
false
} else {
// shift over into the high bits
value <<= std::cmp::min(usize - 1, usize - self.width);
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
let mut bits_left = self.width;
while bits_left > 0 {
let can_set = std::cmp::min(8 - in_byte_offset, bits_left);
// pull out the highest can_set bits from value
let mut to_set: usize = value >> (usize - can_set);
// move them into where they will live
to_set <<= 8 - can_set - in_byte_offset;
let addr = unsafe { self.data.offset(byte_offset as isize) };
let mut byte = unsafe { *addr };
debug_assert!(to_set <= 255);
// clear the bits we'll be setting
byte &= !(0xFF
>>
(7 - in_byte_offset)
<<
(8usize.saturating_sub(in_byte_offset).saturating_sub(self.width)));
byte |= to_set as u8;
unsafe { *addr = byte };
// update all the state
value <<= can_set;
bit_offset += can_set;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_set;
}
true
}
}
/// Length in number of bitslices cointained.
pub fn len(&self) -> usize {
self.entries
}
/// Size of the internal buffer, in bytes.
pub fn byte_len(&self) -> usize {
// can't overflow, since creation asserts that it doesn't.
let w = self.entries * self.width;
let r = w % 8;
(w + r) / 8
}
pub fn iter(&self) -> Slices {
Slices { idx: 0, bm: self }
}
/// Get the raw pointer to this Bitmap's data.
pub unsafe fn get_ptr(&self) -> *mut u8 {
self.data
}
/// Set the raw pointer to this Bitmap's data, returning the old one. It needs to be free'd
/// with `Vec`'s destructor if the Bitmap was not made with `from_raw_parts`. In general this
/// operation should really be avoided. The destructor will call `Vec`s destructor on the
/// internal pointer.
pub unsafe fn set_ptr(&mut self, ptr: *mut u8) -> *mut u8 {
let p = self.data;
self.data = ptr;
p
}
}
/// Iterator over the bitslices in the bitmap
pub struct Slices<'a> {
idx: usize,
bm: &'a Bitmap
}
impl<'a> Iterator for Slices<'a> {
type Item = usize;
/// *NOTE*: This iterator is not "well-behaved", in that if you keep calling
/// `next` after it returns None, eventually it will overflow and start
/// yielding elements again. Use the `fuse` method to make this
/// "well-behaved".
fn next(&mut self) -> Option<usize> {
let rv = self.bm.get(self.idx);
self.idx += 1;
rv
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.bm.len(), Some(self.bm.len()))
}
}
impl<'a> std::iter::IntoIterator for &'a Bitmap {
type Item = usize;
type IntoIter = Slices<'a>;
fn into_iter(self) -> Slices<'a> {
self.iter()
}
}
#[cfg(test)]
mod test {
extern crate quickcheck;
use self::quickcheck::quickcheck;
use super::{get_n_bits_at, Bitmap};
use std;
#[test]
fn empty() {
let bm = Bitmap::new(10, 10).unwrap();
for i in 0..10 {
assert_eq!(bm.get(i), Some(0));
}
assert_eq!(bm.get(11), None);
}
#[test]
fn get() {
let mut data: [u8; 4] = [0b000_001_01, 0b0_011_100_1, 0b01_110_111, 0];
let bm = Bitmap {
entries: 8,
width: 3,
data: &mut data as *mut [u8; 4] as *mut u8
};
for i in 0..8 {
assert_eq!(bm.get(i), Some(i));
}
assert_eq!(bm.get(8), None);
assert_eq!(bm.get(9), None);
// we don't use real data here, so don't bother freeing it
let mut bm = bm;
unsafe { bm.set_ptr(std::ptr::null_mut()); }
}
#[test]
fn set() {
let mut bm = Bitmap::new(10, 3).unwrap();
for i in 0..8 {
assert!(bm.set(i, i));
assert_eq!(bm.get(i
|
drop
|
identifier_name
|
|
lib.rs
|
: usize, width: usize) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|needed| {
let ptr = {
let mut alloc = Vec::<u8>::with_capacity(needed);
let ptr = alloc.as_mut_ptr();
std::mem::forget(alloc);
ptr
};
unsafe { std::ptr::write_bytes(ptr, 0, needed); }
Some(Bitmap {
entries: entries,
width: width,
data: ptr as *mut u8
})
})
}
}
/// Create a new Bitmap from raw parts. Will return None if the given
/// entry and width would overflow the number of bits or bytes needed to
/// store the Bitmap.
pub unsafe fn from_raw_parts(entries: usize, width: usize, ptr: *mut u8) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|_| {
Some(Bitmap {
entries: entries,
width: width,
data: ptr
})
})
}
}
/// Get the `i`th bitslice, returning None on out-of-bounds
pub fn get(&self, i: usize) -> Option<usize> {
if i >= self.entries
|
else {
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
let mut bits_left = self.width;
let mut value: usize = 0;
while bits_left > 0 {
// how many bits can we need to set in this byte?
let can_get = std::cmp::min(8 - in_byte_offset, bits_left);
// alright, pull them out.
let byte = unsafe { *self.data.offset(byte_offset as isize) };
let got = get_n_bits_at(byte, can_get as u8, in_byte_offset as u8) as usize;
// make room for the bits we just read
value <<= can_get;
value |= got;
// update all the state
bit_offset += can_get;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_get;
}
Some(value)
}
}
/// Set the `i`th bitslice to `value`, returning false on out-of-bounds or if `value` contains
/// bits outside of the least significant `self.width` bits.
pub fn set(&mut self, i: usize, mut value: usize) -> bool {
let usize = std::mem::size_of::<usize>() * 8;
if i >= self.entries || value & !(usize::max_value() >> (std::cmp::min(usize-1, usize - self.width))) != 0 {
false
} else {
// shift over into the high bits
value <<= std::cmp::min(usize - 1, usize - self.width);
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
let mut bits_left = self.width;
while bits_left > 0 {
let can_set = std::cmp::min(8 - in_byte_offset, bits_left);
// pull out the highest can_set bits from value
let mut to_set: usize = value >> (usize - can_set);
// move them into where they will live
to_set <<= 8 - can_set - in_byte_offset;
let addr = unsafe { self.data.offset(byte_offset as isize) };
let mut byte = unsafe { *addr };
debug_assert!(to_set <= 255);
// clear the bits we'll be setting
byte &= !(0xFF
>>
(7 - in_byte_offset)
<<
(8usize.saturating_sub(in_byte_offset).saturating_sub(self.width)));
byte |= to_set as u8;
unsafe { *addr = byte };
// update all the state
value <<= can_set;
bit_offset += can_set;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_set;
}
true
}
}
/// Length in number of bitslices cointained.
pub fn len(&self) -> usize {
self.entries
}
/// Size of the internal buffer, in bytes.
pub fn byte_len(&self) -> usize {
// can't overflow, since creation asserts that it doesn't.
let w = self.entries * self.width;
let r = w % 8;
(w + r) / 8
}
pub fn iter(&self) -> Slices {
Slices { idx: 0, bm: self }
}
/// Get the raw pointer to this Bitmap's data.
pub unsafe fn get_ptr(&self) -> *mut u8 {
self.data
}
/// Set the raw pointer to this Bitmap's data, returning the old one. It needs to be free'd
/// with `Vec`'s destructor if the Bitmap was not made with `from_raw_parts`. In general this
/// operation should really be avoided. The destructor will call `Vec`s destructor on the
/// internal pointer.
pub unsafe fn set_ptr(&mut self, ptr: *mut u8) -> *mut u8 {
let p = self.data;
self.data = ptr;
p
}
}
/// Iterator over the bitslices in the bitmap
pub struct Slices<'a> {
idx: usize,
bm: &'a Bitmap
}
impl<'a> Iterator for Slices<'a> {
type Item = usize;
/// *NOTE*: This iterator is not "well-behaved", in that if you keep calling
/// `next` after it returns None, eventually it will overflow and start
/// yielding elements again. Use the `fuse` method to make this
/// "well-behaved".
fn next(&mut self) -> Option<usize> {
let rv = self.bm.get(self.idx);
self.idx += 1;
rv
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.bm.len(), Some(self.bm.len()))
}
}
impl<'a> std::iter::IntoIterator for &'a Bitmap {
type Item = usize;
type IntoIter = Slices<'a>;
fn into_iter(self) -> Slices<'a> {
self.iter()
}
}
#[cfg(test)]
mod test {
extern crate quickcheck;
use self::quickcheck::quickcheck;
use super::{get_n_bits_at, Bitmap};
use std;
#[test]
fn empty() {
let bm = Bitmap::new(10, 10).unwrap();
for i in 0..10 {
assert_eq!(bm.get(i), Some(0));
}
assert_eq!(bm.get(11), None);
}
#[test]
fn get() {
let mut data: [u8; 4] = [0b000_001_01, 0b0_011_100_1, 0b01_110_111, 0];
let bm = Bitmap {
entries: 8,
width: 3,
data: &mut data as *mut [u8; 4] as *mut u8
};
for i in 0..8 {
assert_eq!(bm.get(i), Some(i));
}
assert_eq!(bm.get(8), None);
assert_eq!(bm.get(9), None);
// we don't use real data here, so don't bother freeing it
let mut bm = bm;
unsafe { bm.set_ptr(std::ptr::null_mut()); }
}
#[test]
fn set() {
let mut bm = Bitmap::new(10, 3).unwrap();
for i in 0..8 {
assert!(bm.set(i, i));
assert_eq!(bm.get(i), Some(i));
}
assert_eq!(bm.get(8), Some(0));
assert_eq!(bm.get(9), Some(0));
assert_eq!(bm.get(10), None);
}
#[test]
fn get_n_bits() {
macro_rules! t {
( $( $e:expr, $n:expr, $s:expr, $g:expr; )* ) => (
{
$(
assert_eq!(get_n_bits_at($e, $n, $s), $g);
)*
}
|
{
None
}
|
conditional_block
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.