file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
size_cache_fs.go
|
()
// check if we aren't already inside
node := u.files.GetByKey(info.Path)
if node != nil {
file := node.Value.(*cacheFile)
u.currSize -= file.Size
}
// while we can pop files and the cache is full..
for u.currSize > 0 && u.currSize+info.Size > u.cacheSize {
node := u.files.PopMin()
// node CAN'T be nil as currSize > 0
file := node.Value.(*cacheFile)
if err := u.cache.Remove(file.Path); err != nil {
return fmt.Errorf("error removing cache file: %v", err)
}
u.currSize -= file.Size
path := filepath.Dir(file.Path)
for path != "" && path != "." && path != "/" {
f, err := u.cache.Open(path)
if err != nil {
_ = f.Close()
return fmt.Errorf("error opening parent directory: %v", err)
}
dirs, err := f.Readdir(-1)
if err != nil {
_ = f.Close()
return fmt.Errorf("error reading parent directory: %v", err)
}
_ = f.Close()
if len(dirs) == 0 {
if err := u.cache.Remove(path); err != nil {
return fmt.Errorf("error removing parent directory: %v", err)
}
path = filepath.Dir(path)
} else {
break
}
}
}
u.files.AddOrUpdate(info.Path, sortedset.SCORE(info.LastAccessTime), info)
u.currSize += info.Size
return nil
}
func (u *SizeCacheFS) removeFromCache(name string) {
u.cacheL.Lock()
defer u.cacheL.Unlock()
node := u.files.GetByKey(name)
if node != nil {
// If we remove file that is open, the file will re-add itself in
// the cache on close. This is expected behavior as a removed open file
// will re-appear on close ?
u.files.Remove(name)
info := node.Value.(*cacheFile)
u.currSize -= info.Size
}
}
/*
func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.layer.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
}
if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
}
return cacheMiss, nil, err
}
*/
func (u *SizeCacheFS) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.cache.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
} else if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
} else {
return cacheMiss, nil, err
}
}
func (u *SizeCacheFS) copyToCache(name string) (*cacheFile, error) {
// If layer file exists, we need to remove it
// and replace it with current file
// TODO
// Get size, if size over our limit, evict one file
bfh, err := u.base.Open(name)
if err != nil {
if err == os.ErrNotExist {
return nil, err
} else {
return nil, fmt.Errorf("error opening base file: %v", err)
}
}
// First make sure the directory exists
exists, err := Exists(u.cache, filepath.Dir(name))
if err != nil {
return nil, err
}
if !exists {
err = u.cache.MkdirAll(filepath.Dir(name), 0777) // FIXME?
if err != nil {
return nil, err
}
}
// Create the file on the overlay
lfh, err := u.cache.Create(name)
if err != nil {
return nil, err
}
n, err := io.Copy(lfh, bfh)
if err != nil {
// If anything fails, clean up the file
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, fmt.Errorf("error copying layer to base: %v", err)
}
bfi, err := bfh.Stat()
if err != nil || bfi.Size() != n {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, syscall.EIO
}
isDir := bfi.IsDir()
err = lfh.Close()
if err != nil {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, err
}
if err := bfh.Close(); err != nil {
return nil, fmt.Errorf("error closing base file: %v", err)
}
if err := u.cache.Chtimes(name, bfi.ModTime(), bfi.ModTime()); err != nil {
return nil, err
}
// if cache is stale and file already inside sorted set, we are just going to update it
// Create info
if !isDir {
info := &cacheFile{
Path: name,
Size: bfi.Size(),
LastAccessTime: time.Now().UnixNano() / 1000,
}
return info, nil
} else {
return nil, nil
}
}
func (u *SizeCacheFS) Chtimes(name string, atime, mtime time.Time) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chtimes(name, atime, mtime)
}
return u.base.Chtimes(name, atime, mtime)
}
func (u *SizeCacheFS) Chmod(name string, mode os.FileMode) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chmod(name, mode)
}
return u.base.Chmod(name, mode)
}
func (u *SizeCacheFS) Stat(name string) (os.FileInfo, error) {
return u.base.Stat(name)
}
func (u *SizeCacheFS) Rename(oldname, newname string) error {
exists, err := Exists(u.cache, oldname)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
info := u.getCacheFile(oldname)
u.removeFromCache(oldname)
info.Path = newname
if err := u.addToCache(info); err != nil {
return err
}
if err := u.cache.Rename(oldname, newname); err != nil {
return err
}
}
return u.base.Rename(oldname, newname)
}
func (u *SizeCacheFS) Remove(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return fmt.Errorf("error determining if file exists: %v", err)
}
// If cache file exists, update to ensure consistency
if exists {
if err := u.cache.Remove(name); err != nil {
return fmt.Errorf("error removing cache file: %v", err)
}
u.removeFromCache(name)
}
return u.base.Remove(name)
}
func (u *SizeCacheFS) RemoveAll(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
err := Walk(u.cache, name, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return u.Remove(path)
} else {
return nil
}
})
if err != nil {
return err
}
// Remove the dirs
_ = u.cache.RemoveAll(name)
}
return u.base.RemoveAll(name)
}
func (u *SizeCacheFS)
|
OpenFile
|
identifier_name
|
|
ikey.js
|
8-4360
*/
/**
* Allows you to resort elements within a sortable container by using the keyboard. Requires
* the Draggables, Droppables and Sortables interface plugins. The container and each item inside
* the container must have an ID. Sortables are especially useful for lists.
*
* @see Plugins/Interface/Draggable
* @see Plugins/Interface/Droppable
* @see Plugins/Interface/Sortable
* @author Joshua Ryan
* @author Colin Clark
* @name Keyable
* @cat Plugins/Interface
* @option String accept The class name for items inside the container (mandatory)
* @option String activeclass The class for the container when one of its items has started to move
* @option String hoverclass The class for the container when an acceptable item is inside it
* @option String helperclass The helper is used to point to the place where the item will be
* moved. This is the class for the helper.
* @option Function onChange Callback that gets called when the sortable list changed. It takes
* an array of serialized elements
* @option String axis Use 'horizontally' or 'vertically' to constrain dragging to an axis
* @option DOMElement domNode The conatainer of keyable items
* @option Function onStart Callback function triggered when the dragging starts
* @option Function onStop Callback function triggered when the dragging stops
* @example $('ul').Keyable(
* {
* accept : 'sortableitem',
* activeclass : 'sortableactive',
* hoverclass : 'sortablehover',
* helperclass : 'sorthelper',
* domNode : $('ul').get(0)
* }
* )
*/
jQuery.iKey = {
// The node focused on for incoming actions
focusedNode : null,
// Sets the mode of keying vs mousing
keying : false,
/**
* Process down arrow events
*/
handleDownAction : function (isCtrl, event) {
var target = jQuery(jQuery.iKey.focusedNode).next();
var wrap;
if (!target || !this.isElement(target.get(0))) {
target = jQuery(jQuery.iKey.firstElement(
jQuery.iKey.focusedNode.get(0).parentNode)
);
wrap = true;
}
if (!isCtrl)
|
else if (!wrap) {
jQuery(target).after(jQuery.iKey.focusedNode);
}
else {
jQuery(target).before(jQuery.iKey.focusedNode);
}
},
/**
* Process up arrow events
*/
handleUpAction : function(isCtrl, event) {
var target = jQuery(jQuery.iKey.focusedNode).prev();
var wrap = false;
if (!target || !this.isElement(target.get(0))) {
target = jQuery(jQuery.iKey.lastElement(
jQuery.iKey.focusedNode.get(0).parentNode)
);
wrap = true;
}
if (!isCtrl) {
this.focusNode(target, event);
}
else if (!wrap) {
jQuery(target).before(jQuery.iKey.focusedNode);
}
else {
jQuery(target).after(jQuery.iKey.focusedNode);
}
},
/**
* 'Focus' on a node to be the focus of future actions
*/
focusNode : function(aNode, event) {
// deselect any previously focused node
jQuery.iKey.deselectFocusedNode(event);
jQuery.iKey.focusedNode = aNode;
jQuery(aNode).removeClass(event.data.accept);
jQuery(aNode).addClass(event.data.activeclass);
},
/**
* 'Select' the focused node, similar to a user 'clicking' on an item for drag and drop
*/
selectFocusedNode : function(event) {
//if we are not in keyboard sort mode, set things up
if (jQuery.iKey.focusedNode == null) {
jQuery.iKey.focusedNode = jQuery('.' + event.data.accept, event.data.domNode).get(0);
}
if (jQuery.iKey.keying == true) {
jQuery.iKey.focusNode(jQuery.iKey.focusedNode, event);
}
},
/**
* Deselect the current selected node, similar to releasing the mouse button
*/
deselectFocusedNode : function(event) {
if (jQuery.iKey.focusedNode != null) {
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.activeclass);
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.hoverclass);
jQuery(jQuery.iKey.focusedNode).addClass(event.data.accept);
jQuery.iKey.focusedNode = null;
}
},
/**
* End keyboard mode, for use when users switches to using the mouse for DnD type activities
*/
endKeyboardMode : function(event) {
if (jQuery.iKey.keying) {
jQuery.iKey.deselectFocusedNode(event);
jQuery(document)
.unbind('mousemove', jQuery.iKey.endKeyboardMode)
.unbind('mousedown', jQuery.iKey.endKeyboardMode);
}
jQuery.iKey.keying = false;
},
/**
* Change state from that of selecting a node to being ready to actually move the current node
*/
handleKeyDown : function (event) {
if (event.ctrlKey && jQuery.iKey.focusedNode != null) {
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.activeclass);
jQuery(jQuery.iKey.focusedNode).addClass(event.data.hoverclass);
}
},
/**
* Change state from that of being ready to move a node to that of selecting a node from the list
*/
handleKeyUp : function (event) {
kCode = event.keyCode || event.which;
if (kCode == 17 && jQuery.iKey.focusedNode != null) {
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.hoverclass);
jQuery(jQuery.iKey.focusedNode).addClass(event.data.activeclass);
}
},
/**
* Handle arrow key presses, could be either moving through the list to select a node or moving a node
*/
handleArrowKeyPress : function (event) {
kCode = event.keyCode || event.which;
// Pass any input other then arrow keys onto other event handlers
if (kCode < 37 || kCode > 40) {
return true;
}
// Listen for mouse actions to end keyboard mode
if (!jQuery.iKey.keying) {
jQuery.iKey.keying = true;
jQuery(document)
.bind('mousemove', event.data, jQuery.iKey.endKeyboardMode)
.bind('mousedown', event.data, jQuery.iKey.endKeyboardMode);
}
// Ensure a focused node
while (!jQuery.iKey.focusedNode) {
jQuery.iKey.selectFocusedNode(event);
}
// down arrow
if (kCode == 40 && (!event.data.axis || event.data.axis == 'vertically')) {
jQuery.iKey.handleDownAction(event.ctrlKey, event);
}
// up arrow
else if (kCode == 38 && (!event.data.axis || event.data.axis == 'vertically')) {
jQuery.iKey.handleUpAction(event.ctrlKey, event);
}
// right arrow
else if (kCode == 39 && (event.data.axis || event.data.axis == 'horizontally')) {
jQuery.iKey.handleDownAction(event.ctrlKey, event);
}
// left arrow
else if (kCode == 37 && (event.data.axis || event.data.axis == 'horizontally')) {
jQuery.iKey.handleUpAction(event.ctrlKey, event);
}
else {
return true;
}
return false;
},
/**
* Gets the first Element of a nodes child node list
*/
firstElement : function(node) {
var child = node.firstChild;
while (!this.isElement(child)) {
return child = child.nextSibling;
}
return child;
},
/**
* Gets the last Element of a nodes child node list
*/
lastElement : function(node) {
var child = node.lastChild;
while (!this.isElement(child)) {
child = child.previousSibling;
}
return child;
},
/**
* tests if the passed in node is an Element
*/
isElement : function(node) {
return node && node.nodeType == 1;
},
/**
* Builds the Keyable with the set parameters and binds all neeeded events.
*
* Gets called when ever a Keyable is created.
*/
build : function(o) {
if (!o) {
o = {};
}
return this.each(
function() {
if (this.isKeyable || !jQuery.iUtil) {
return;
}
var el = this;
var dhe = jQuery(this);
if (jQuery.browser.msie) {
dhe.each(
function() {
this.unselectable = "on";
}
);
}
else {
dhe.css('-moz-user-select', 'none');
dhe.css('user-select', 'none');
dhe.css('-khtml-user-select', 'none');
|
{
this.focusNode(target, event);
}
|
conditional_block
|
ikey.js
|
78-4360
*/
/**
* Allows you to resort elements within a sortable container by using the keyboard. Requires
* the Draggables, Droppables and Sortables interface plugins. The container and each item inside
* the container must have an ID. Sortables are especially useful for lists.
*
* @see Plugins/Interface/Draggable
* @see Plugins/Interface/Droppable
* @see Plugins/Interface/Sortable
* @author Joshua Ryan
* @author Colin Clark
* @name Keyable
* @cat Plugins/Interface
* @option String accept The class name for items inside the container (mandatory)
* @option String activeclass The class for the container when one of its items has started to move
* @option String hoverclass The class for the container when an acceptable item is inside it
* @option String helperclass The helper is used to point to the place where the item will be
* moved. This is the class for the helper.
* @option Function onChange Callback that gets called when the sortable list changed. It takes
* an array of serialized elements
* @option String axis Use 'horizontally' or 'vertically' to constrain dragging to an axis
* @option DOMElement domNode The conatainer of keyable items
* @option Function onStart Callback function triggered when the dragging starts
* @option Function onStop Callback function triggered when the dragging stops
* @example $('ul').Keyable(
* {
* accept : 'sortableitem',
* activeclass : 'sortableactive',
* hoverclass : 'sortablehover',
* helperclass : 'sorthelper',
* domNode : $('ul').get(0)
* }
* )
*/
jQuery.iKey = {
// The node focused on for incoming actions
focusedNode : null,
// Sets the mode of keying vs mousing
keying : false,
/**
* Process down arrow events
*/
handleDownAction : function (isCtrl, event) {
var target = jQuery(jQuery.iKey.focusedNode).next();
var wrap;
if (!target || !this.isElement(target.get(0))) {
target = jQuery(jQuery.iKey.firstElement(
jQuery.iKey.focusedNode.get(0).parentNode)
);
wrap = true;
}
if (!isCtrl) {
this.focusNode(target, event);
}
else if (!wrap) {
jQuery(target).after(jQuery.iKey.focusedNode);
}
else {
jQuery(target).before(jQuery.iKey.focusedNode);
}
},
/**
* Process up arrow events
*/
handleUpAction : function(isCtrl, event) {
var target = jQuery(jQuery.iKey.focusedNode).prev();
var wrap = false;
if (!target || !this.isElement(target.get(0))) {
target = jQuery(jQuery.iKey.lastElement(
jQuery.iKey.focusedNode.get(0).parentNode)
);
wrap = true;
}
if (!isCtrl) {
this.focusNode(target, event);
}
else if (!wrap) {
jQuery(target).before(jQuery.iKey.focusedNode);
}
else {
jQuery(target).after(jQuery.iKey.focusedNode);
}
},
/**
* 'Focus' on a node to be the focus of future actions
*/
focusNode : function(aNode, event) {
// deselect any previously focused node
jQuery.iKey.deselectFocusedNode(event);
jQuery.iKey.focusedNode = aNode;
jQuery(aNode).removeClass(event.data.accept);
jQuery(aNode).addClass(event.data.activeclass);
},
/**
* 'Select' the focused node, similar to a user 'clicking' on an item for drag and drop
*/
selectFocusedNode : function(event) {
//if we are not in keyboard sort mode, set things up
if (jQuery.iKey.focusedNode == null) {
jQuery.iKey.focusedNode = jQuery('.' + event.data.accept, event.data.domNode).get(0);
}
if (jQuery.iKey.keying == true) {
jQuery.iKey.focusNode(jQuery.iKey.focusedNode, event);
}
},
/**
* Deselect the current selected node, similar to releasing the mouse button
*/
deselectFocusedNode : function(event) {
if (jQuery.iKey.focusedNode != null) {
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.activeclass);
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.hoverclass);
jQuery(jQuery.iKey.focusedNode).addClass(event.data.accept);
jQuery.iKey.focusedNode = null;
}
},
/**
* End keyboard mode, for use when users switches to using the mouse for DnD type activities
*/
endKeyboardMode : function(event) {
if (jQuery.iKey.keying) {
jQuery.iKey.deselectFocusedNode(event);
jQuery(document)
.unbind('mousemove', jQuery.iKey.endKeyboardMode)
.unbind('mousedown', jQuery.iKey.endKeyboardMode);
}
jQuery.iKey.keying = false;
},
/**
* Change state from that of selecting a node to being ready to actually move the current node
*/
handleKeyDown : function (event) {
if (event.ctrlKey && jQuery.iKey.focusedNode != null) {
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.activeclass);
jQuery(jQuery.iKey.focusedNode).addClass(event.data.hoverclass);
}
},
/**
* Change state from that of being ready to move a node to that of selecting a node from the list
*/
handleKeyUp : function (event) {
kCode = event.keyCode || event.which;
if (kCode == 17 && jQuery.iKey.focusedNode != null) {
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.hoverclass);
jQuery(jQuery.iKey.focusedNode).addClass(event.data.activeclass);
}
},
/**
* Handle arrow key presses, could be either moving through the list to select a node or moving a node
*/
handleArrowKeyPress : function (event) {
kCode = event.keyCode || event.which;
// Pass any input other then arrow keys onto other event handlers
if (kCode < 37 || kCode > 40) {
return true;
}
// Listen for mouse actions to end keyboard mode
if (!jQuery.iKey.keying) {
jQuery.iKey.keying = true;
jQuery(document)
.bind('mousemove', event.data, jQuery.iKey.endKeyboardMode)
.bind('mousedown', event.data, jQuery.iKey.endKeyboardMode);
}
// Ensure a focused node
while (!jQuery.iKey.focusedNode) {
jQuery.iKey.selectFocusedNode(event);
}
// down arrow
if (kCode == 40 && (!event.data.axis || event.data.axis == 'vertically')) {
jQuery.iKey.handleDownAction(event.ctrlKey, event);
}
// up arrow
else if (kCode == 38 && (!event.data.axis || event.data.axis == 'vertically')) {
jQuery.iKey.handleUpAction(event.ctrlKey, event);
}
// right arrow
else if (kCode == 39 && (event.data.axis || event.data.axis == 'horizontally')) {
jQuery.iKey.handleDownAction(event.ctrlKey, event);
}
// left arrow
else if (kCode == 37 && (event.data.axis || event.data.axis == 'horizontally')) {
jQuery.iKey.handleUpAction(event.ctrlKey, event);
}
else {
return true;
}
return false;
},
/**
* Gets the first Element of a nodes child node list
*/
firstElement : function(node) {
var child = node.firstChild;
while (!this.isElement(child)) {
return child = child.nextSibling;
}
return child;
},
/**
* Gets the last Element of a nodes child node list
*/
lastElement : function(node) {
var child = node.lastChild;
while (!this.isElement(child)) {
child = child.previousSibling;
}
return child;
},
/**
* tests if the passed in node is an Element
*/
isElement : function(node) {
return node && node.nodeType == 1;
},
/**
* Builds the Keyable with the set parameters and binds all neeeded events.
*
* Gets called when ever a Keyable is created.
*/
build : function(o) {
if (!o) {
o = {};
}
return this.each(
function() {
if (this.isKeyable || !jQuery.iUtil) {
return;
}
var el = this;
var dhe = jQuery(this);
if (jQuery.browser.msie) {
dhe.each(
function() {
this.unselectable = "on";
}
);
|
dhe.css('-khtml-user-select', 'none');
|
}
else {
dhe.css('-moz-user-select', 'none');
dhe.css('user-select', 'none');
|
random_line_split
|
b_get_data.py
|
pd.set_option('display.width', 200)
def get_data(file):
'file = full path of the original file. obter os dataframes dos ficheiros'
df = pd.read_csv(file, sep=';')
df = time_transform(df)
df = timestamp_round_down(df)
df = df.bfill()
return df
def logs_cols_uniform(df):
'A partir de uma lista de draframe uniformizar os nomes relacionados com tempo e turbine_ID'
df = df.rename(columns={'TimeDetected': 'Timestamp', 'UnitTitle':'Turbine_ID'})
return df
def timestamp_round_down(df, time_column='Timestamp'):
'Arredondar os intervalos de tempo para os 10 minutos anteriores'
df[time_column] = df.apply(lambda x: x[time_column] - datetime.timedelta(minutes=x[time_column].minute % 10,seconds=x[time_column].second, microseconds=x[time_column].microsecond),axis=1)
return df
def time_transform(df, time_column='Timestamp'):
'Transformar as colunas referentes a tempo no data type tempo'
df[time_column] = pd.to_datetime(df[time_column]).dt.tz_convert(None)
# df[time_column] = df[time_column]
return df
def component(component, col):
pair_comp_col=[]
for i in col:
if component in i:
pair_comp_col.append(i)
return pair_comp_col
def component_df_creation(df):
# Retornar dataframes por tipo de componente
time_id = ['Timestamp', 'Turbine_ID']
pair_hyd = component('Hyd', df.columns)
pair_trafo = component('Trafo', df.columns)
pair_gear = component('Gear', df.columns)
pair_gen = component('Gen', df.columns)
pair_rot = component('Rtr', df.columns)
pair_amb = component('Amb', df.columns)
pair_blds = component('Blds', df.columns)
pair_cont = component('Cont', df.columns)
pair_nac = component('Nac', df.columns)
pair_spin = component('Spin', df.columns)
pair_bus = component('Busbar', df.columns)
pair_vol = component('Volt', df.columns)
#Create DF for each component
df_generator = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_gen_bear = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_transformer = df[time_id + pair_trafo + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_hydraulic = df[time_id + pair_hyd + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_gearbox = df[time_id + pair_gear + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
return df_generator, df_gen_bear, df_transformer, df_hydraulic, df_gearbox
def fail_dummies(df):
'''Uniformização da tabela de logs e transformação com get_dummies'''
# Colunas a manter
fail_cols_manter = ['Timestamp', 'Turbine_ID', 'Component']
df = df[fail_cols_manter]
# transformação de Get_Dummies
df = pd.get_dummies(df, columns=['Component'])
return df
def sig_fail_merge_dfs(sig_df, fail_df, component):
'fazer o merge com o failures e desevolver o já dummyfied'
#filtrar o componente
fail_df = fail_df[fail_df['Component'] == component]
# aplicar o dummies
fail_df = fail_dummies(fail_df)
# fazer o merge
df_merged = sig_df.merge(fail_df, on=['Timestamp','Turbine_ID'], how='outer')
# colocar zeros
df_merged.rename(columns= {'Component_' + component:'Component'}, inplace=True)
df_merged['Component'] = df_merged['Component'].fillna(0)
df_merged = df_merged.sort_values(by=['Turbine_ID','Timestamp'])
df_merged.fillna(0, inplace=True)
return df_merged
def fill_na_by_turbine(df, turbines_list):
df_ = pd.DataFrame(columns=df.columns, dtype='int64')
for turbine in turbines_list:
df1 = df.loc[df['Turbine_ID']==turbine]
if df1['Component'].nunique()>1:
index = df1[df1['Component']==1]
index['date'] = index['Timestamp']
index = index[['date','Timestamp', 'Turbine_ID']]
df_merged = df1.merge(index, how='left', on=['Turbine_ID','Timestamp'])
df_merged = df_merged.fillna(method='bfill')
#If there is not a failure after, hold present date
df_merged['date'] = df_merged['date'].fillna(df_merged['Timestamp'])
df_merged['TTF'] = round((df_merged['date'] - df_merged['Timestamp']) / np.timedelta64(1, 'D'),0)
df_merged = df_merged.fillna(method='Bfill')
else:
df_merged = df1
df_merged['date'] = df_merged['Timestamp']
df_merged['TTF'] = 0 # df_merged['date'] - df_merged['Timestamp']
# df_merged = df_merged.fillna(method='Bfill')
#Drop Column Date
df_final = df_merged.drop(columns='date')
#df_final['TTF'] = df_final['TTF'].dt.days
df_ = pd.concat([df_, df_final])
df_['Timestamp'] = pd.to_datetime(df_['Timestamp'])
return df_
def fill_na_by_turb_predict(df, turbines_list):
df = df.fillna(method='bfill')
return df
def Failure_Time_Horizon(days, period):
if 2 <= days <= period:
Flag=1
else:
Flag=0
return Flag
def aplic_var_target(df, period):
nome = str(period)
nome = nome+'_days'
df[nome] = df.apply(lambda x: Failure_Time_Horizon(x['TTF'], period),axis=1)
return df
def group_por_frequency(df, period='Dia', strategy='mean'):
'Função para agregar o data-frame pela medida de tempo pretendida, periodo _Dia_ ou _Hora_'
if period == 'Dia':
df['Date'] = df['Timestamp'].dt.date
elif period == 'Hora':
df['Date'] = df.apply(lambda x: x['Timestamp'] - datetime.timedelta(hours=x['Timestamp'].hour % -1, minutes=x['Timestamp'].minute, seconds=x['Timestamp'].second, microseconds=x['Timestamp'].microsecond),axis=1)
else:
print('Medida de tempo não suportada')
if strategy == 'max':
df = df.groupby(by=['Turbine_ID','Date']).max().reset_index().drop(columns='Timestamp')
else:
df = df.groupby(by=['Turbine_ID','Date']).mean().reset_index()
df['Date'] = pd.to_datetime(df['Date'])
return df
def add_features(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date', 'TTF', '60_days', 'Component']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[5:]:
sensor_cols.append(i)
sensor_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two new subset dataframes columns to the engine subset
new
|
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss, roc_curve, precision_score, recall_score,confusion_matrix,f1_score,fbeta_score, make_scorer
|
random_line_split
|
|
b_get_data.py
|
=[]
for i in col:
if component in i:
pair_comp_col.append(i)
return pair_comp_col
def component_df_creation(df):
# Retornar dataframes por tipo de componente
time_id = ['Timestamp', 'Turbine_ID']
pair_hyd = component('Hyd', df.columns)
pair_trafo = component('Trafo', df.columns)
pair_gear = component('Gear', df.columns)
pair_gen = component('Gen', df.columns)
pair_rot = component('Rtr', df.columns)
pair_amb = component('Amb', df.columns)
pair_blds = component('Blds', df.columns)
pair_cont = component('Cont', df.columns)
pair_nac = component('Nac', df.columns)
pair_spin = component('Spin', df.columns)
pair_bus = component('Busbar', df.columns)
pair_vol = component('Volt', df.columns)
#Create DF for each component
df_generator = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_gen_bear = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_transformer = df[time_id + pair_trafo + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_hydraulic = df[time_id + pair_hyd + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_gearbox = df[time_id + pair_gear + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
return df_generator, df_gen_bear, df_transformer, df_hydraulic, df_gearbox
def fail_dummies(df):
'''Uniformização da tabela de logs e transformação com get_dummies'''
# Colunas a manter
fail_cols_manter = ['Timestamp', 'Turbine_ID', 'Component']
df = df[fail_cols_manter]
# transformação de Get_Dummies
df = pd.get_dummies(df, columns=['Component'])
return df
def sig_fail_merge_dfs(sig_df, fail_df, component):
'fazer o merge com o failures e desevolver o já dummyfied'
#filtrar o componente
fail_df = fail_df[fail_df['Component'] == component]
# aplicar o dummies
fail_df = fail_dummies(fail_df)
# fazer o merge
df_merged = sig_df.merge(fail_df, on=['Timestamp','Turbine_ID'], how='outer')
# colocar zeros
df_merged.rename(columns= {'Component_' + component:'Component'}, inplace=True)
df_merged['Component'] = df_merged['Component'].fillna(0)
df_merged = df_merged.sort_values(by=['Turbine_ID','Timestamp'])
df_merged.fillna(0, inplace=True)
return df_merged
def fill_na
|
rbines_list):
df_ = pd.DataFrame(columns=df.columns, dtype='int64')
for turbine in turbines_list:
df1 = df.loc[df['Turbine_ID']==turbine]
if df1['Component'].nunique()>1:
index = df1[df1['Component']==1]
index['date'] = index['Timestamp']
index = index[['date','Timestamp', 'Turbine_ID']]
df_merged = df1.merge(index, how='left', on=['Turbine_ID','Timestamp'])
df_merged = df_merged.fillna(method='bfill')
#If there is not a failure after, hold present date
df_merged['date'] = df_merged['date'].fillna(df_merged['Timestamp'])
df_merged['TTF'] = round((df_merged['date'] - df_merged['Timestamp']) / np.timedelta64(1, 'D'),0)
df_merged = df_merged.fillna(method='Bfill')
else:
df_merged = df1
df_merged['date'] = df_merged['Timestamp']
df_merged['TTF'] = 0 # df_merged['date'] - df_merged['Timestamp']
# df_merged = df_merged.fillna(method='Bfill')
#Drop Column Date
df_final = df_merged.drop(columns='date')
#df_final['TTF'] = df_final['TTF'].dt.days
df_ = pd.concat([df_, df_final])
df_['Timestamp'] = pd.to_datetime(df_['Timestamp'])
return df_
def fill_na_by_turb_predict(df, turbines_list):
df = df.fillna(method='bfill')
return df
def Failure_Time_Horizon(days, period):
if 2 <= days <= period:
Flag=1
else:
Flag=0
return Flag
def aplic_var_target(df, period):
nome = str(period)
nome = nome+'_days'
df[nome] = df.apply(lambda x: Failure_Time_Horizon(x['TTF'], period),axis=1)
return df
def group_por_frequency(df, period='Dia', strategy='mean'):
'Função para agregar o data-frame pela medida de tempo pretendida, periodo _Dia_ ou _Hora_'
if period == 'Dia':
df['Date'] = df['Timestamp'].dt.date
elif period == 'Hora':
df['Date'] = df.apply(lambda x: x['Timestamp'] - datetime.timedelta(hours=x['Timestamp'].hour % -1, minutes=x['Timestamp'].minute, seconds=x['Timestamp'].second, microseconds=x['Timestamp'].microsecond),axis=1)
else:
print('Medida de tempo não suportada')
if strategy == 'max':
df = df.groupby(by=['Turbine_ID','Date']).max().reset_index().drop(columns='Timestamp')
else:
df = df.groupby(by=['Turbine_ID','Date']).mean().reset_index()
df['Date'] = pd.to_datetime(df['Date'])
return df
def add_features(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date', 'TTF', '60_days', 'Component']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[5:]:
sensor_cols.append(i)
sensor_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two new subset dataframes columns to the engine subset
new_ftrs = pd.concat([df_engine,av,sd], axis=1)
# add the new features rows to the output dataframe
df_out = pd.concat([df_out,new_ftrs])
df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )
return df_out
def add_feat_predict(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[2:]:
sensor_cols.append(i)
sensor_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two
|
_by_turbine(df, tu
|
identifier_name
|
b_get_data.py
|
=[]
for i in col:
if component in i:
pair_comp_col.append(i)
return pair_comp_col
def component_df_creation(df):
# Retornar dataframes por tipo de componente
time_id = ['Timestamp', 'Turbine_ID']
pair_hyd = component('Hyd', df.columns)
pair_trafo = component('Trafo', df.columns)
pair_gear = component('Gear', df.columns)
pair_gen = component('Gen', df.columns)
pair_rot = component('Rtr', df.columns)
pair_amb = component('Amb', df.columns)
pair_blds = component('Blds', df.columns)
pair_cont = component('Cont', df.columns)
pair_nac = component('Nac', df.columns)
pair_spin = component('Spin', df.columns)
pair_bus = component('Busbar', df.columns)
pair_vol = component('Volt', df.columns)
#Create DF for each component
df_generator = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_gen_bear = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_transformer = df[time_id + pair_trafo + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_hydraulic = df[time_id + pair_hyd + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_gearbox = df[time_id + pair_gear + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
return df_generator, df_gen_bear, df_transformer, df_hydraulic, df_gearbox
def fail_dummies(df):
'''Uniformização da tabela de logs e transformação com get_dummies'''
# Colunas a manter
fail_cols_manter = ['Timestamp', 'Turbine_ID', 'Component']
df = df[fail_cols_manter]
# transformação de Get_Dummies
df = pd.get_dummies(df, columns=['Component'])
return df
def sig_fail_merge_dfs(sig_df, fail_df, component):
'fazer o merge com o failures e desevolver o já dummyfied'
#filtrar o componente
fail_df = fail_df[fail_df['Component'] == component]
# aplicar o dummies
fail_df = fail_dummies(fail_df)
# fazer o merge
df_merged = sig_df.merge(fail_df, on=['Timestamp','Turbine_ID'], how='outer')
# colocar zeros
df_merged.rename(columns= {'Component_' + component:'Component'}, inplace=True)
df_merged['Component'] = df_merged['Component'].fillna(0)
df_merged = df_merged.sort_values(by=['Turbine_ID','Timestamp'])
df_merged.fillna(0, inplace=True)
return df_merged
def fill_na_by_turbine(df, turbines_list):
df_ = pd.DataFrame(columns=df.columns, dtype='int64')
for turbine in turbines_list:
df1 = df.loc[df['Turbine_ID']==turbine]
if df1['Component'].nunique()>1:
index = df1[df1['Component']==1]
index['date'] = index['Timestamp']
index = index[['date','Timestamp', 'Turbine_ID']]
df_merged = df1.merge(index, how='left', on=['Turbine_ID','Timestamp'])
df_merged = df_merged.fillna(method='bfill')
#If there is not a failure after, hold present date
df_merged['date'] = df_merged['date'].fillna(df_merged['Timestamp'])
df_merged['TTF'] = round((df_merged['date'] - df_merged['Timestamp']) / np.timedelta64(1, 'D'),0)
df_merged = df_merged.fillna(method='Bfill')
else:
df_merged = df1
df_merged['date'] = df_merged['Timestamp']
df_merged['TTF'] = 0 # df_merged['date'] - df_merged['Timestamp']
# df_merged = df_merged.fillna(method='Bfill')
#Drop Column Date
df_final = df_merged.drop(columns='date')
#df_final['TTF'] = df_final['TTF'].dt.days
df_ = pd.concat([df_, df_final])
df_['Timestamp'] = pd.to_datetime(df_['Timestamp'])
return df_
def fill_na_by_turb_predict(df, turbines_list):
df = df.fillna(method='bfill')
return df
def Failure_Time_Horizon(days, period):
if 2 <= days <= period:
Flag=1
else:
Flag=0
return Flag
def aplic_var_target(df, period):
nome = str(period)
nome = nome+'_days'
df[nome] = df.apply(lambda x: Failure_Time_Horizon(x['TTF'], period),axis=1)
return df
def group_por_frequency(df, period='Dia', strategy='mean'):
'Função para agregar o data-frame pela medida de tempo pretendida, periodo _Dia_ ou _Hora_'
if period == 'Dia':
df['Date'] = df['Timestamp'].dt.date
elif period == 'Hora':
df['Date'] = df.apply(lambda x: x['Timestamp'] - datetime.timedelta(hours=x['Timestamp'].hour % -1, minutes=x['Timestamp'].minute, seconds=x['Timestamp'].second, microseconds=x['Timestamp'].microsecond),axis=1)
else:
print('Medida de tempo não suportada')
if strategy == 'max':
df = df.groupby(by=['Turbine_ID','Date']).max().reset_index().drop(columns='Timestamp')
else:
df = df.groupby(by=['Turbine_ID','Date']).mean().reset_index()
df['Date'] = pd.to_datetime(df['Date'])
return df
def add_features(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date', 'TTF', '60_days', 'Component']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[5:]:
sensor_cols.append(i)
sensor_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two new subset dataframes columns to the engine subset
new_ftrs = pd.concat([df_engine,av,sd], axis=1)
# add the new features rows to the output dataframe
df_out = pd.concat([df_out,new_ftrs])
df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )
return df_out
def add_feat_predict(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[2:]:
sensor_col
|
or_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two
|
s.append(i)
sens
|
conditional_block
|
b_get_data.py
|
_col=[]
for i in col:
if component in i:
pair_comp_col.append(i)
return pair_comp_col
def component_df_creation(df):
# Retornar dataframes por tipo de componente
time_id = ['Timestamp', 'Turbine_ID']
pair_hyd = component('Hyd', df.columns)
pair_trafo = component('Trafo', df.columns)
pair_gear = component('Gear', df.columns)
pair_gen = component('Gen', df.columns)
pair_rot = component('Rtr', df.columns)
pair_amb = component('Amb', df.columns)
pair_blds = component('Blds', df.columns)
pair_cont = component('Cont', df.columns)
pair_nac = component('Nac', df.columns)
pair_spin = component('Spin', df.columns)
pair_bus = component('Busbar', df.columns)
pair_vol = component('Volt', df.columns)
#Create DF for each component
df_generator = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_gen_bear = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_transformer = df[time_id + pair_trafo + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_hydraulic = df[time_id + pair_hyd + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_gearbox = df[time_id + pair_gear + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
return df_generator, df_gen_bear, df_transformer, df_hydraulic, df_gearbox
def fail_dummies(df):
'''Uniformização da tabela de logs e transformação com get_dummies'''
# Colunas a manter
fail_cols_manter = ['Timestamp', 'Turbine_ID', 'Component']
df = df[fail_cols_manter]
# transformação de Get_Dummies
df = pd.get_dummies(df, columns=['Component'])
return df
def sig_fail_merge_dfs(sig_df, fail_df, component):
'fazer o merge com o failures e desevolver o já dummyfied'
#filtrar o componente
fail_df = fail_df[fail_df['Component'] == component]
# aplicar o dummies
fail_df = fail_dummies(fail_df)
# fazer o merge
df_merged = sig_df.merge(fail_df, on=['Timestamp','Turbine_ID'], how='outer')
# colocar zeros
df_merged.rename(columns= {'Component_' + component:'Component'}, inplace=True)
df_merged['Component'] = df_merged['Component'].fillna(0)
df_merged = df_merged.sort_values(by=['Turbine_ID','Timestamp'])
df_merged.fillna(0, inplace=True)
return df_merged
def fill_na_by_turbine(df, turbines_list):
df_ = pd.DataFrame(columns=df.columns, dtype='int64')
for turbine in turbines_list:
df1 = df.loc[df['Turbine_ID']==turbine]
if df1['Component'].nunique()>1:
index = df1[df1['Component']==1]
index['date'] = index['Timestamp']
index = index[['date','Timestamp', 'Turbine_ID']]
df_merged = df1.merge(index, how='left', on=['Turbine_ID','Timestamp'])
df_merged = df_merged.fillna(method='bfill')
#If there is not a failure after, hold present date
df_merged['date'] = df_merged['date'].fillna(df_merged['Timestamp'])
df_merged['TTF'] = round((df_merged['date'] - df_merged['Timestamp']) / np.timedelta64(1, 'D'),0)
df_merged = df_merged.fillna(method='Bfill')
else:
df_merged = df1
df_merged['date'] = df_merged['Timestamp']
df_merged['TTF'] = 0 # df_merged['date'] - df_merged['Timestamp']
# df_merged = df_merged.fillna(method='Bfill')
#Drop Column Date
df_final = df_merged.drop(columns='date')
#df_final['TTF'] = df_final['TTF'].dt.days
df_ = pd.concat([df_, df_final])
df_['Timestamp'] = pd.to_datetime(df_['Timestamp'])
return df_
def fill_na_by_turb_predict(df, turbines_list):
df = df.fillna(method='bfill')
return df
def Failure_Time_Horizon(days, period):
if 2 <= days <= period:
Flag=1
else:
Flag=0
return Flag
def aplic_var_target(df, period):
nome = str(period)
nome = nome+'_days'
df[nome] = df.apply(lambda x: Failure_Time_Horizon(x['TTF'], period),axis=1)
return df
def group_por_frequency(df, period='Dia', strategy='mean'):
'Função
|
features(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date', 'TTF', '60_days', 'Component']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[5:]:
sensor_cols.append(i)
sensor_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two new subset dataframes columns to the engine subset
new_ftrs = pd.concat([df_engine,av,sd], axis=1)
# add the new features rows to the output dataframe
df_out = pd.concat([df_out,new_ftrs])
df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )
return df_out
def add_feat_predict(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[2:]:
sensor_cols.append(i)
sensor_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two
|
para agregar o data-frame pela medida de tempo pretendida, periodo _Dia_ ou _Hora_'
if period == 'Dia':
df['Date'] = df['Timestamp'].dt.date
elif period == 'Hora':
df['Date'] = df.apply(lambda x: x['Timestamp'] - datetime.timedelta(hours=x['Timestamp'].hour % -1, minutes=x['Timestamp'].minute, seconds=x['Timestamp'].second, microseconds=x['Timestamp'].microsecond),axis=1)
else:
print('Medida de tempo não suportada')
if strategy == 'max':
df = df.groupby(by=['Turbine_ID','Date']).max().reset_index().drop(columns='Timestamp')
else:
df = df.groupby(by=['Turbine_ID','Date']).mean().reset_index()
df['Date'] = pd.to_datetime(df['Date'])
return df
def add_
|
identifier_body
|
main.go
|
.Values["user"]
fmt.Println("val",val)
user, ok := val.(User)
if !ok {
fmt.Println("did not find user session")
return User{Authenticated: false}
}
fmt.Println(val.(User))
fmt.Println("user.username",user.Username)
return user
}
//if basic auth headers exists, proceed to pass request to services
//if not, check if session user is authenticated
func AuthMiddleware(handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
username, password, _ := r.BasicAuth()
fmt.Println(r.BasicAuth())
if username=="" || !checkUsernameAndPassword(username, password) {
//w.Header().Set("WWW-Authenticate", `Basic realm="Please enter your username and password for this site"`)
//w.WriteHeader(401)
//w.Write([]byte("Unauthorised.\n"))
//w.Write([]byte("checking session instead.\n"))
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
user := getUser(session)
fmt.Println(user)
if auth := user.Authenticated; !auth {
session.AddFlash("You don't have access!")
err = session.Save(r, w)
if err != nil {
fmt.Printf("You don't have access!")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("You don't have access!")
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
fmt.Println("authenticated via user session")
handler(w, r)
return
}
fmt.Println("authenticated via basic auth")
handler(w, r)
}
}
func checkUsernameAndPassword(username, password string) bool {
fmt.Println("[checkUsernameAndPassword]")
correctPassword := retrieveUserPassword(username)
return password == correctPassword
}
func index(w http.ResponseWriter, r *http.Request)
|
func AddUser( w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := r.FormValue("password")
res , _ := database.Exec("INSERT INTO users(username,password) VALUES (?,?)",username,password)
fmt.Println(res)
fmt.Fprintf(w, "User successfully added")
http.Redirect(w, r, "/", http.StatusFound)
}
func login(w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := retrieveUserPassword(username)
session, err := store.Get(r, "cookie-name")
if err != nil {
}
// Where authentication could be done
if r.FormValue("password") != password {
if r.FormValue("password") == "" {
session.AddFlash("Must enter a password")
}
session.AddFlash("The password was incorrect")
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
user := &User{
Username: username,
Password: password,
Authenticated: true,
}
session.Values["user"] = user
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("login successful")
//expiration := time.Now().Add(365 * 24 * time.Hour)
//cookie := http.Cookie{Name: "username", Value: username, Expires: expiration}
//http.SetCookie(w, &cookie)
//cookie2:= http.Cookie{Name: "password", Value: password, Expires: expiration}
//http.SetCookie(w, &cookie2)
http.Redirect(w, r, "/", http.StatusFound)
}
func logout(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session.Values["user"] = User{}
session.Options.MaxAge = -1
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/", http.StatusFound)
}
func AddBookmark( w http.ResponseWriter, r *http.Request){
fmt.Println("add bookmark service reached")
url := r.FormValue("url")
username := getUserName(w,r)
userId := retrieveUserId(username)
fmt.Println(url," inserted for user", username )
res, _ := database.Exec("INSERT INTO bookmarks(url,userid) VALUES(?,?)",url,userId)
fmt.Println(res)
http.Redirect(w, r, "/", http.StatusFound)
//fmt.Fprintf(w, "Bookmark added")
}
func UpdateBookmark(w http.ResponseWriter, r *http.Request){
fmt.Println("update bookmark service hit")
url := r.FormValue("url")
tagname := r.FormValue("tagname")
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
res, _ := database.Exec("INSERT OR IGNORE INTO tags(name) VALUES(?)",tagname)
if res != nil {}
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1",tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
fmt.Println(tagId,bookmarkId)
fmt.Println(err)
res2, _ := database.Exec("INSERT INTO tag_bookmark(tagid,bookmarkid) VALUES(?,?)",tagId,bookmarkId)
if res2 != nil {}
fmt.Fprintf(w, "tag added")
}else {
fmt.Fprintf(w, "password incorrect")
}
}
func DeleteTag(w http.ResponseWriter, r *http.Request){
fmt.Println("deleting tag from bookmark")
fmt.Println(r)
tagname := r.URL.Query().Get("tag")
url := r.URL.Query().Get("url")
//url := r.FormValue("url")
//tagname := r.FormValue("tagname")
fmt.Println(url,tagname)
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1", tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
res2, _ := database.Exec("DELETE FROM tag_bookmark WHERE tagid=$1 AND bookmarkId=$2", tagId, bookmarkId)
fmt.Println(res2)
fmt.Fprintf(w, "tag deleted")
}
}
func ListBookmarks(w http.ResponseWriter, r *http.Request) {
fmt.Println("ListBookmarks service hit")
tags := r.URL.Query().Get("tags")
fmt.Println("tags:",tags)
username := getUserName(w,r)
fmt.Println("username:",username)
userId := retrieveUserId(username)
fmt.Println("userId:",userId)
var bookmark Bookmark
queryString := fmt.Sprintf("SELECT bookmarks.id as bookmarkId, bookmarks.url as url, " +
"coalesce(tags.name,'') AS tagName, coalesce(tags.id,0) AS tagId FROM bookmarks " +
"LEFT JOIN tag_bookmark " +
"ON tag_bookmark.bookmarkid = bookmarks.id LEFT JOIN tags " +
"ON tags.id = tag_bookmark.tagid WHERE bookmarks.userid=%d", userId)
//if tags != "" {
// oldQueryString := queryString
// queryString = fmt.Sprintf(oldQueryString+" AND tagName IN (%s)", "'"+
// strings.Replace(tags, ",", "','", -1)+"'")
//}
rows, err := database.Query(queryString)
if err != nil && err != sql.ErrNoRows {
// log the error
fmt.Fprintf(w, "" )
return
}
defer rows.Close()
bookmarkResults := make(map[string][]string)
//tagResults := make(map[int][]string)
for rows.Next(){
err := rows.Scan(&bookmark.BookmarkId,&bookmark.Url,&bookmark.TagName,&bookmark.TagId)
if err != nil && err != sql.ErrNoRows {
// log the error
}
//bookmarkRecord := []string{bookmark.tagId,bookmark.tagName}
//bookmarkRecord[bookmark.tagId]=bookmark.tagName
//bookmarkResults[bookmark.url] = append(bookmarkResults[bookmark.url], bookmarkRecord)
bookmarkResults[bookmark.Url] = append(bookmarkResults[bookmark.Url], bookmark.TagName)
//fmt.Println(bookmark.Url, bookmark.TagName)
}
for key := range bookmarkResults {
if len(tags) != 0 {
//&& !strings.Contains(strings.Join(bookmarkResults[key], ","),tags)
tagsList := strings.Split(tags, ",")
//tagName = bookmarkResults[key]
for _, tag := range tagsList {
if !stringInSlice(tag,bookmarkResults[key]){
delete(bookmarkResults, key)
}
}
}
}
type bookmarkJson struct {
Url string `json:"url"`
TagName []string `json:"tagName"`
}
var data []bookmarkJson
for
|
{
session, err := store.Get(r, "cookie-name")
if err != nil {
}
user := getUser(session)
fmt.Println("[serving main page]",user)
tpl.ExecuteTemplate(w, "index.gohtml", user)
}
|
identifier_body
|
main.go
|
//w.Header().Set("WWW-Authenticate", `Basic realm="Please enter your username and password for this site"`)
//w.WriteHeader(401)
//w.Write([]byte("Unauthorised.\n"))
//w.Write([]byte("checking session instead.\n"))
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
user := getUser(session)
fmt.Println(user)
if auth := user.Authenticated; !auth {
session.AddFlash("You don't have access!")
err = session.Save(r, w)
if err != nil {
fmt.Printf("You don't have access!")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("You don't have access!")
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
fmt.Println("authenticated via user session")
handler(w, r)
return
}
fmt.Println("authenticated via basic auth")
handler(w, r)
}
}
func checkUsernameAndPassword(username, password string) bool {
fmt.Println("[checkUsernameAndPassword]")
correctPassword := retrieveUserPassword(username)
return password == correctPassword
}
func index(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
}
user := getUser(session)
fmt.Println("[serving main page]",user)
tpl.ExecuteTemplate(w, "index.gohtml", user)
}
func AddUser( w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := r.FormValue("password")
res , _ := database.Exec("INSERT INTO users(username,password) VALUES (?,?)",username,password)
fmt.Println(res)
fmt.Fprintf(w, "User successfully added")
http.Redirect(w, r, "/", http.StatusFound)
}
func login(w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := retrieveUserPassword(username)
session, err := store.Get(r, "cookie-name")
if err != nil {
}
// Where authentication could be done
if r.FormValue("password") != password {
if r.FormValue("password") == "" {
session.AddFlash("Must enter a password")
}
session.AddFlash("The password was incorrect")
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
user := &User{
Username: username,
Password: password,
Authenticated: true,
}
session.Values["user"] = user
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("login successful")
//expiration := time.Now().Add(365 * 24 * time.Hour)
//cookie := http.Cookie{Name: "username", Value: username, Expires: expiration}
//http.SetCookie(w, &cookie)
//cookie2:= http.Cookie{Name: "password", Value: password, Expires: expiration}
//http.SetCookie(w, &cookie2)
http.Redirect(w, r, "/", http.StatusFound)
}
func logout(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session.Values["user"] = User{}
session.Options.MaxAge = -1
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/", http.StatusFound)
}
func AddBookmark( w http.ResponseWriter, r *http.Request){
fmt.Println("add bookmark service reached")
url := r.FormValue("url")
username := getUserName(w,r)
userId := retrieveUserId(username)
fmt.Println(url," inserted for user", username )
res, _ := database.Exec("INSERT INTO bookmarks(url,userid) VALUES(?,?)",url,userId)
fmt.Println(res)
http.Redirect(w, r, "/", http.StatusFound)
//fmt.Fprintf(w, "Bookmark added")
}
func UpdateBookmark(w http.ResponseWriter, r *http.Request){
fmt.Println("update bookmark service hit")
url := r.FormValue("url")
tagname := r.FormValue("tagname")
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
res, _ := database.Exec("INSERT OR IGNORE INTO tags(name) VALUES(?)",tagname)
if res != nil {}
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1",tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
fmt.Println(tagId,bookmarkId)
fmt.Println(err)
res2, _ := database.Exec("INSERT INTO tag_bookmark(tagid,bookmarkid) VALUES(?,?)",tagId,bookmarkId)
if res2 != nil {}
fmt.Fprintf(w, "tag added")
}else {
fmt.Fprintf(w, "password incorrect")
}
}
func DeleteTag(w http.ResponseWriter, r *http.Request){
fmt.Println("deleting tag from bookmark")
fmt.Println(r)
tagname := r.URL.Query().Get("tag")
url := r.URL.Query().Get("url")
//url := r.FormValue("url")
//tagname := r.FormValue("tagname")
fmt.Println(url,tagname)
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1", tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
res2, _ := database.Exec("DELETE FROM tag_bookmark WHERE tagid=$1 AND bookmarkId=$2", tagId, bookmarkId)
fmt.Println(res2)
fmt.Fprintf(w, "tag deleted")
}
}
func ListBookmarks(w http.ResponseWriter, r *http.Request) {
fmt.Println("ListBookmarks service hit")
tags := r.URL.Query().Get("tags")
fmt.Println("tags:",tags)
username := getUserName(w,r)
fmt.Println("username:",username)
userId := retrieveUserId(username)
fmt.Println("userId:",userId)
var bookmark Bookmark
queryString := fmt.Sprintf("SELECT bookmarks.id as bookmarkId, bookmarks.url as url, " +
"coalesce(tags.name,'') AS tagName, coalesce(tags.id,0) AS tagId FROM bookmarks " +
"LEFT JOIN tag_bookmark " +
"ON tag_bookmark.bookmarkid = bookmarks.id LEFT JOIN tags " +
"ON tags.id = tag_bookmark.tagid WHERE bookmarks.userid=%d", userId)
//if tags != "" {
// oldQueryString := queryString
// queryString = fmt.Sprintf(oldQueryString+" AND tagName IN (%s)", "'"+
// strings.Replace(tags, ",", "','", -1)+"'")
//}
rows, err := database.Query(queryString)
if err != nil && err != sql.ErrNoRows {
// log the error
fmt.Fprintf(w, "" )
return
}
defer rows.Close()
bookmarkResults := make(map[string][]string)
//tagResults := make(map[int][]string)
for rows.Next(){
err := rows.Scan(&bookmark.BookmarkId,&bookmark.Url,&bookmark.TagName,&bookmark.TagId)
if err != nil && err != sql.ErrNoRows {
// log the error
}
//bookmarkRecord := []string{bookmark.tagId,bookmark.tagName}
//bookmarkRecord[bookmark.tagId]=bookmark.tagName
//bookmarkResults[bookmark.url] = append(bookmarkResults[bookmark.url], bookmarkRecord)
bookmarkResults[bookmark.Url] = append(bookmarkResults[bookmark.Url], bookmark.TagName)
//fmt.Println(bookmark.Url, bookmark.TagName)
}
for key := range bookmarkResults {
if len(tags) != 0 {
//&& !strings.Contains(strings.Join(bookmarkResults[key], ","),tags)
tagsList := strings.Split(tags, ",")
//tagName = bookmarkResults[key]
for _, tag := range tagsList {
if !stringInSlice(tag,bookmarkResults[key]){
delete(bookmarkResults, key)
}
}
}
}
type bookmarkJson struct {
Url string `json:"url"`
TagName []string `json:"tagName"`
}
var data []bookmarkJson
for key := range bookmarkResults {
bookmarkj := bookmarkJson{Url: key, TagName: bookmarkResults[key]}
data=append(data, bookmarkj )
}
bJsondata, _ := json.Marshal(data)
jsonData := string(bJsondata)
fmt.Println(jsonData)
fmt.Fprintf(w, jsonData )
}
func retrieveUserPassword(username string) string {
var dbUser UserData
err := database.QueryRow("SELECT password, id FROM users WHERE username=$1", username).
Scan(&dbUser.Password,&dbUser.Id)
if err != nil {
fmt.Println("[retrieveUserPassword] user not found in DB",username)
panic(err)
|
}
return dbUser.Password
}
|
random_line_split
|
|
main.go
|
correctPassword
}
func index(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
}
user := getUser(session)
fmt.Println("[serving main page]",user)
tpl.ExecuteTemplate(w, "index.gohtml", user)
}
func AddUser( w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := r.FormValue("password")
res , _ := database.Exec("INSERT INTO users(username,password) VALUES (?,?)",username,password)
fmt.Println(res)
fmt.Fprintf(w, "User successfully added")
http.Redirect(w, r, "/", http.StatusFound)
}
func login(w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := retrieveUserPassword(username)
session, err := store.Get(r, "cookie-name")
if err != nil {
}
// Where authentication could be done
if r.FormValue("password") != password {
if r.FormValue("password") == "" {
session.AddFlash("Must enter a password")
}
session.AddFlash("The password was incorrect")
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
user := &User{
Username: username,
Password: password,
Authenticated: true,
}
session.Values["user"] = user
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("login successful")
//expiration := time.Now().Add(365 * 24 * time.Hour)
//cookie := http.Cookie{Name: "username", Value: username, Expires: expiration}
//http.SetCookie(w, &cookie)
//cookie2:= http.Cookie{Name: "password", Value: password, Expires: expiration}
//http.SetCookie(w, &cookie2)
http.Redirect(w, r, "/", http.StatusFound)
}
func logout(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session.Values["user"] = User{}
session.Options.MaxAge = -1
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/", http.StatusFound)
}
func AddBookmark( w http.ResponseWriter, r *http.Request){
fmt.Println("add bookmark service reached")
url := r.FormValue("url")
username := getUserName(w,r)
userId := retrieveUserId(username)
fmt.Println(url," inserted for user", username )
res, _ := database.Exec("INSERT INTO bookmarks(url,userid) VALUES(?,?)",url,userId)
fmt.Println(res)
http.Redirect(w, r, "/", http.StatusFound)
//fmt.Fprintf(w, "Bookmark added")
}
func UpdateBookmark(w http.ResponseWriter, r *http.Request){
fmt.Println("update bookmark service hit")
url := r.FormValue("url")
tagname := r.FormValue("tagname")
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
res, _ := database.Exec("INSERT OR IGNORE INTO tags(name) VALUES(?)",tagname)
if res != nil {}
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1",tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
fmt.Println(tagId,bookmarkId)
fmt.Println(err)
res2, _ := database.Exec("INSERT INTO tag_bookmark(tagid,bookmarkid) VALUES(?,?)",tagId,bookmarkId)
if res2 != nil {}
fmt.Fprintf(w, "tag added")
}else {
fmt.Fprintf(w, "password incorrect")
}
}
func DeleteTag(w http.ResponseWriter, r *http.Request){
fmt.Println("deleting tag from bookmark")
fmt.Println(r)
tagname := r.URL.Query().Get("tag")
url := r.URL.Query().Get("url")
//url := r.FormValue("url")
//tagname := r.FormValue("tagname")
fmt.Println(url,tagname)
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1", tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
res2, _ := database.Exec("DELETE FROM tag_bookmark WHERE tagid=$1 AND bookmarkId=$2", tagId, bookmarkId)
fmt.Println(res2)
fmt.Fprintf(w, "tag deleted")
}
}
func ListBookmarks(w http.ResponseWriter, r *http.Request) {
fmt.Println("ListBookmarks service hit")
tags := r.URL.Query().Get("tags")
fmt.Println("tags:",tags)
username := getUserName(w,r)
fmt.Println("username:",username)
userId := retrieveUserId(username)
fmt.Println("userId:",userId)
var bookmark Bookmark
queryString := fmt.Sprintf("SELECT bookmarks.id as bookmarkId, bookmarks.url as url, " +
"coalesce(tags.name,'') AS tagName, coalesce(tags.id,0) AS tagId FROM bookmarks " +
"LEFT JOIN tag_bookmark " +
"ON tag_bookmark.bookmarkid = bookmarks.id LEFT JOIN tags " +
"ON tags.id = tag_bookmark.tagid WHERE bookmarks.userid=%d", userId)
//if tags != "" {
// oldQueryString := queryString
// queryString = fmt.Sprintf(oldQueryString+" AND tagName IN (%s)", "'"+
// strings.Replace(tags, ",", "','", -1)+"'")
//}
rows, err := database.Query(queryString)
if err != nil && err != sql.ErrNoRows {
// log the error
fmt.Fprintf(w, "" )
return
}
defer rows.Close()
bookmarkResults := make(map[string][]string)
//tagResults := make(map[int][]string)
for rows.Next(){
err := rows.Scan(&bookmark.BookmarkId,&bookmark.Url,&bookmark.TagName,&bookmark.TagId)
if err != nil && err != sql.ErrNoRows {
// log the error
}
//bookmarkRecord := []string{bookmark.tagId,bookmark.tagName}
//bookmarkRecord[bookmark.tagId]=bookmark.tagName
//bookmarkResults[bookmark.url] = append(bookmarkResults[bookmark.url], bookmarkRecord)
bookmarkResults[bookmark.Url] = append(bookmarkResults[bookmark.Url], bookmark.TagName)
//fmt.Println(bookmark.Url, bookmark.TagName)
}
for key := range bookmarkResults {
if len(tags) != 0 {
//&& !strings.Contains(strings.Join(bookmarkResults[key], ","),tags)
tagsList := strings.Split(tags, ",")
//tagName = bookmarkResults[key]
for _, tag := range tagsList {
if !stringInSlice(tag,bookmarkResults[key]){
delete(bookmarkResults, key)
}
}
}
}
type bookmarkJson struct {
Url string `json:"url"`
TagName []string `json:"tagName"`
}
var data []bookmarkJson
for key := range bookmarkResults {
bookmarkj := bookmarkJson{Url: key, TagName: bookmarkResults[key]}
data=append(data, bookmarkj )
}
bJsondata, _ := json.Marshal(data)
jsonData := string(bJsondata)
fmt.Println(jsonData)
fmt.Fprintf(w, jsonData )
}
func retrieveUserPassword(username string) string {
var dbUser UserData
err := database.QueryRow("SELECT password, id FROM users WHERE username=$1", username).
Scan(&dbUser.Password,&dbUser.Id)
if err != nil {
fmt.Println("[retrieveUserPassword] user not found in DB",username)
panic(err)
}
return dbUser.Password
}
func retrieveUserId(username string) int {
var dbUser UserData
err := database.QueryRow("SELECT id FROM users WHERE username=$1", username).
Scan(&dbUser.Id)
if err != nil {
fmt.Println("[retrieveUserId] user not found in DB",username)
panic(err)
}
return dbUser.Id
}
func retrieveBookmarkId(url string, userId int) int {
var bookmarkId int
err := database.QueryRow("SELECT id FROM bookmarks WHERE url=$1 AND userid=$2",url,userId).Scan(&bookmarkId)
fmt.Println(err)
return bookmarkId
}
func stringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
func getUserName (w http.ResponseWriter,r *http.Request) string {
var username string
username, _, ok := r.BasicAuth()
if !ok {
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return ""
}
fmt.Println("username retrieved from session")
user := getUser(session)
username = user.Username
return username
}
fmt.Println("username retrieved from basic auth")
return username
}
func
|
initDB
|
identifier_name
|
|
main.go
|
(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("You don't have access!")
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
fmt.Println("authenticated via user session")
handler(w, r)
return
}
fmt.Println("authenticated via basic auth")
handler(w, r)
}
}
func checkUsernameAndPassword(username, password string) bool {
fmt.Println("[checkUsernameAndPassword]")
correctPassword := retrieveUserPassword(username)
return password == correctPassword
}
func index(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
}
user := getUser(session)
fmt.Println("[serving main page]",user)
tpl.ExecuteTemplate(w, "index.gohtml", user)
}
func AddUser( w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := r.FormValue("password")
res , _ := database.Exec("INSERT INTO users(username,password) VALUES (?,?)",username,password)
fmt.Println(res)
fmt.Fprintf(w, "User successfully added")
http.Redirect(w, r, "/", http.StatusFound)
}
func login(w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := retrieveUserPassword(username)
session, err := store.Get(r, "cookie-name")
if err != nil {
}
// Where authentication could be done
if r.FormValue("password") != password {
if r.FormValue("password") == "" {
session.AddFlash("Must enter a password")
}
session.AddFlash("The password was incorrect")
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
user := &User{
Username: username,
Password: password,
Authenticated: true,
}
session.Values["user"] = user
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("login successful")
//expiration := time.Now().Add(365 * 24 * time.Hour)
//cookie := http.Cookie{Name: "username", Value: username, Expires: expiration}
//http.SetCookie(w, &cookie)
//cookie2:= http.Cookie{Name: "password", Value: password, Expires: expiration}
//http.SetCookie(w, &cookie2)
http.Redirect(w, r, "/", http.StatusFound)
}
func logout(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session.Values["user"] = User{}
session.Options.MaxAge = -1
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/", http.StatusFound)
}
func AddBookmark( w http.ResponseWriter, r *http.Request){
fmt.Println("add bookmark service reached")
url := r.FormValue("url")
username := getUserName(w,r)
userId := retrieveUserId(username)
fmt.Println(url," inserted for user", username )
res, _ := database.Exec("INSERT INTO bookmarks(url,userid) VALUES(?,?)",url,userId)
fmt.Println(res)
http.Redirect(w, r, "/", http.StatusFound)
//fmt.Fprintf(w, "Bookmark added")
}
func UpdateBookmark(w http.ResponseWriter, r *http.Request){
fmt.Println("update bookmark service hit")
url := r.FormValue("url")
tagname := r.FormValue("tagname")
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
res, _ := database.Exec("INSERT OR IGNORE INTO tags(name) VALUES(?)",tagname)
if res != nil {}
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1",tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
fmt.Println(tagId,bookmarkId)
fmt.Println(err)
res2, _ := database.Exec("INSERT INTO tag_bookmark(tagid,bookmarkid) VALUES(?,?)",tagId,bookmarkId)
if res2 != nil {}
fmt.Fprintf(w, "tag added")
}else {
fmt.Fprintf(w, "password incorrect")
}
}
func DeleteTag(w http.ResponseWriter, r *http.Request){
fmt.Println("deleting tag from bookmark")
fmt.Println(r)
tagname := r.URL.Query().Get("tag")
url := r.URL.Query().Get("url")
//url := r.FormValue("url")
//tagname := r.FormValue("tagname")
fmt.Println(url,tagname)
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1", tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
res2, _ := database.Exec("DELETE FROM tag_bookmark WHERE tagid=$1 AND bookmarkId=$2", tagId, bookmarkId)
fmt.Println(res2)
fmt.Fprintf(w, "tag deleted")
}
}
func ListBookmarks(w http.ResponseWriter, r *http.Request) {
fmt.Println("ListBookmarks service hit")
tags := r.URL.Query().Get("tags")
fmt.Println("tags:",tags)
username := getUserName(w,r)
fmt.Println("username:",username)
userId := retrieveUserId(username)
fmt.Println("userId:",userId)
var bookmark Bookmark
queryString := fmt.Sprintf("SELECT bookmarks.id as bookmarkId, bookmarks.url as url, " +
"coalesce(tags.name,'') AS tagName, coalesce(tags.id,0) AS tagId FROM bookmarks " +
"LEFT JOIN tag_bookmark " +
"ON tag_bookmark.bookmarkid = bookmarks.id LEFT JOIN tags " +
"ON tags.id = tag_bookmark.tagid WHERE bookmarks.userid=%d", userId)
//if tags != "" {
// oldQueryString := queryString
// queryString = fmt.Sprintf(oldQueryString+" AND tagName IN (%s)", "'"+
// strings.Replace(tags, ",", "','", -1)+"'")
//}
rows, err := database.Query(queryString)
if err != nil && err != sql.ErrNoRows {
// log the error
fmt.Fprintf(w, "" )
return
}
defer rows.Close()
bookmarkResults := make(map[string][]string)
//tagResults := make(map[int][]string)
for rows.Next(){
err := rows.Scan(&bookmark.BookmarkId,&bookmark.Url,&bookmark.TagName,&bookmark.TagId)
if err != nil && err != sql.ErrNoRows {
// log the error
}
//bookmarkRecord := []string{bookmark.tagId,bookmark.tagName}
//bookmarkRecord[bookmark.tagId]=bookmark.tagName
//bookmarkResults[bookmark.url] = append(bookmarkResults[bookmark.url], bookmarkRecord)
bookmarkResults[bookmark.Url] = append(bookmarkResults[bookmark.Url], bookmark.TagName)
//fmt.Println(bookmark.Url, bookmark.TagName)
}
for key := range bookmarkResults {
if len(tags) != 0 {
//&& !strings.Contains(strings.Join(bookmarkResults[key], ","),tags)
tagsList := strings.Split(tags, ",")
//tagName = bookmarkResults[key]
for _, tag := range tagsList {
if !stringInSlice(tag,bookmarkResults[key]){
delete(bookmarkResults, key)
}
}
}
}
type bookmarkJson struct {
Url string `json:"url"`
TagName []string `json:"tagName"`
}
var data []bookmarkJson
for key := range bookmarkResults {
bookmarkj := bookmarkJson{Url: key, TagName: bookmarkResults[key]}
data=append(data, bookmarkj )
}
bJsondata, _ := json.Marshal(data)
jsonData := string(bJsondata)
fmt.Println(jsonData)
fmt.Fprintf(w, jsonData )
}
func retrieveUserPassword(username string) string {
var dbUser UserData
err := database.QueryRow("SELECT password, id FROM users WHERE username=$1", username).
Scan(&dbUser.Password,&dbUser.Id)
if err != nil {
fmt.Println("[retrieveUserPassword] user not found in DB",username)
panic(err)
}
return dbUser.Password
}
func retrieveUserId(username string) int {
var dbUser UserData
err := database.QueryRow("SELECT id FROM users WHERE username=$1", username).
Scan(&dbUser.Id)
if err != nil {
fmt.Println("[retrieveUserId] user not found in DB",username)
panic(err)
}
return dbUser.Id
}
func retrieveBookmarkId(url string, userId int) int {
var bookmarkId int
err := database.QueryRow("SELECT id FROM bookmarks WHERE url=$1 AND userid=$2",url,userId).Scan(&bookmarkId)
fmt.Println(err)
return bookmarkId
}
func stringInSlice(a string, list []string) bool {
for _, b := range list
|
{
if b == a {
return true
}
}
|
conditional_block
|
|
alpha_beta.py
|
pe = self.coupeSuivante(idCoupe)
if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if (joueur != joueurCoupeFinale):
#on vérifie si on va affamer l'adversaire
#si non, on prend les graines normalement
if (self.nourrirAdversaire(joueur,coupeFinale)):
while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):
self.scores[joueur]+=self.plateau[idCoupe]
self.plateau[idCoupe]=0
idCoupe = self.coupePrecedente(idCoupe)
#si on va affamer l'adversaire :
# on ne prend aucune graine donc on ne fait rien
self.tour=(self.tour+1)%2
#On compte le nombre de graines restantes sur le plateau
def grainesRestantes(self):
return np.sum(self.plateau)
#on compte le nombre de graines restantes sur le plateau pour les coupes de joueur
def grainesRestantesJoueur(self,joueur):
if joueur==0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
#détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,
#Yson adversaire sera affamé ou pas
#on regarde donc si il restera au moins une graine sur le terrain de l'adversaire
def nourrirAdversaire(self,joueur,coupeFinale):
adversaire = (joueur+1)%2
#on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)
admissible = False
idCoupe = (self.nCoupes*(adversaire+1))-1
while (self.joueurCoupe(idCoupe)==adversaire):
#si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible
if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):
admissible=True
#si joueur peut pas prendre la coupe idCoupe le coup est admissible
elif (not self.coupePrenable(idCoupe)):
admissible=True
idCoupe=self.coupePrecedente(idCoupe)
#True si le coup est admissible pour la règle "nourrir"
return admissible
#coupes admissibles que peut jouer joueur pour nourrir son adversaire
def coupesAdmissiblesNourrir(self,joueur):
coupesAdmissibles = []
#on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)
idCoupe = (self.nCoupes*(joueur+1))-1
distance = 1
while (self.joueurCoupe(idCoupe)==joueur):
#s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire
#le coup est admissible, au moins une graine nourrira l'adversaire
if self.plateau[idCoupe]>=distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance +=1
return coupesAdmissibles
def coupesAdmissibles(self,joueur):
adversaire = (joueur+1)%2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
#si aucun coup ne peut être joué pour nourrir l'adversaire
if len(coupesAdmissibles)==0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2*self.nCoupes,dtype=int)
self.finie = True
#partie terminée
#sinon toutes les coupes non vides sont admissibles
else :
coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]
return coupesAdmissibles
def tourDuJoueur(self):
joueur = 0
#si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir
coupesAdmissibles = self.coupesAdmissibles(joueur)
print("C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:")
nCoupe = int(input())
#print("coupesAdmissibles",coupesAdmissibles)
while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (nCoupe in coupesAdmissibles)):
#cas où la coupelle n'existe pas, ou correspond à un coup non admissible
print("Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.")
nCoupe = int(input())
self.deplacer(joueur,nCoupe)
self.jouer()
def tourOrdi(self):
joueur = 1
self.profondeur = 0
self.value = self.alphabeta(joueur,-np.inf,np.inf)
for idCoupe in self.arbreFils.keys():
print("coupe = ",idCoupe," : valeur = ",self.arbreFils[idCoupe].value)
for idCoupe in self.arbreFils.keys():
if self.value==self.arbreFils[idCoupe].value:
self.deplacer(joueur,idCoupe)
break
self.jouer()
def partieFinie(self):
#True si le plateau ne contient plus aucune graine
limiteGagne = self.nCoupes*self.nGrainesParCoupelleInit
self.finie = (self.grainesRestantes()==0 or self.scores[0]> limiteGagne or self.scores[1]> limiteGagne)
return self.finie
def afficherPlateau(self):
print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],self.plateau[0:self.nCoupes]])) # [::-1] permet d'inverse la liste
def afficherScores(self):
print("score J1........."+str(self.scores[0]))
print("score MinMax....."+str(self.scores[1]))
def evaluation(self,joueur):
adversaire = (joueur+1)%2
return self.scores[joueur]-self.scores[adversaire]
#Fonction principale
def jouer(self):
if (not self.partieFinie()) :
self.afficherPlateau()
self.afficherScores()
if (self.tour==0):
self.tourDuJoueur()
else:
self.tourOrdi()
print("\n")
else:
self.afficherPlateau()
self.afficherScores()
print("Partie Finie !")
#plus vraiment utile, le code du minimax est repris dans celui de la fonction alphabeta
def minimax(self, joueurMaximisant, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
|
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.minimax(joueurMaximisant)
|
identifier_body
|
|
alpha_beta.py
|
/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf
#Code par Léo et Paul
#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)
# -> se pencher sur la fonction "partieFinie" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..
#Pb: structure d'arbre trop compliquée: (*)
#l'arbre est construit à partir d'une liste selon le principe suivant:
#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes
#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue
class terrainDeJeu:
# [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)
def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur
self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0,0] # scores[0] = score du joueur 0...
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
#clone le terrain de jeu pour pouvoir simuler un coup par la suite
def clone(self):
clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)
clone.plateau= self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
#retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)
def coupeSuivante(self,idCoupe):
return (idCoupe + 1)%(2*self.nCoupes)
#retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)
def coupePrecedente(self,idCoupe):
return (idCoupe - 1)%(2*self.nCoupes)
#retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe
def joueurCoupe(self,idCoupe):
return 0 if idCoupe < self.nCoupes else 1
#retourne si idCoupe peut être prise (contient 2 ou 3 graines)
def coupePrenable(self,idCoupe):
return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)
def deplacer(self,joueur,idCoupe):
coupeInitiale = idCoupe #id de la coupelle choisie
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while (nGraines != 0): #On redistribue les graines de la coupelle initiale
idCoupe = self.coupeSuivante(idCoupe)
if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if (joueur != joueurCoupeFinale):
#on vérifie si on va affamer l'adversaire
#si non, on prend les graines normalement
if (self.nourrirAdversaire(joueur,coupeFinale)):
while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):
self.scores[joueur]+=self.plateau[idCoupe]
self.plateau[idCoupe]=0
idCoupe = self.coupePrecedente(idCoupe)
#si on va affamer l'adversaire :
# on ne prend aucune graine donc on ne fait rien
self.tour=(self.tour+1)%2
#On compte le nombre de graines restantes sur le plateau
def grainesRestantes(self):
return np.sum(self.plateau)
#on compte le nombre de graines restantes sur le plateau pour les coupes de joueur
def grainesRestantesJoueur(self,joueur):
if joueur==0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
#détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,
#Yson adversaire sera affamé ou pas
#on regarde donc si il restera au moins une graine sur le terrain de l'adversaire
def nourrirAdversaire(self,joueur,coupeFinale):
adversaire = (joueur+1)%2
#on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)
admissible = False
idCoupe = (self.nCoupes*(adversaire+1))-1
while (self.joueurCoupe(idCoupe)==adversaire):
#si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible
if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):
admissible=True
#si joueur peut pas prendre la coupe idCoupe le coup est admissible
elif (not self.coupePrenable(idCoupe)):
admissible=True
idCoupe=self.coupePrecedente(idCoupe)
#True si le coup est admissible pour la règle "nourrir"
return admissible
#coupes admissibles que peut jouer joueur pour nourrir son adversaire
def coupesAdmissiblesNourrir(self,joueur):
|
#on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)
idCoupe = (self.nCoupes*(joueur+1))-1
distance = 1
while (self.joueurCoupe(idCoupe)==joueur):
#s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire
#le coup est admissible, au moins une graine nourrira l'adversaire
if self.plateau[idCoupe]>=distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance +=1
return coupesAdmissibles
def coupesAdmissibles(self,joueur):
adversaire = (joueur+1)%2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
#si aucun coup ne peut être joué pour nourrir l'adversaire
if len(coupesAdmissibles)==0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2*self.nCoupes,dtype=int)
self.finie = True
#partie terminée
#sinon toutes les coupes non vides sont admissibles
else :
coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]
return coupesAdmissibles
def tourDuJoueur(self):
joueur = 0
#si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir
coupesAdmissibles = self.coupesAdmissibles(joueur)
print("C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:")
nCoupe = int(input())
#print("coupesAdmissibles",coupesAdmissibles)
while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (n
|
coupesAdmissibles = []
|
random_line_split
|
alpha_beta.py
|
.fr/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf
#Code par Léo et Paul
#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)
# -> se pencher sur la fonction "partieFinie" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..
#Pb: structure d'arbre trop compliquée: (*)
#l'arbre est construit à partir d'une liste selon le principe suivant:
#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes
#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue
class terrainDeJeu:
# [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)
def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur
self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0,0] # scores[0] = score du joueur 0...
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
#clone le terrain de jeu pour pouvoir simuler un coup par la suite
def clone(self):
clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)
clone.plateau= self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
#retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)
def coupeSuivante(self,idCoupe):
return (idCoupe + 1)%(2*self.nCoupes)
#retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)
def coupePrecedente(self,idCoupe):
return (idCoupe - 1)%(2*self.nCoupes)
#retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe
def joueurCoupe(self,idCoupe):
return 0 if idCoupe < self.nCoupes else 1
#retourne si idCoupe peut être prise (contient 2 ou 3 graines)
def coupePrenable(self,idCoupe):
return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)
def deplacer(self,joueur,idCoupe):
coupeInitiale = idCoupe #id de la coupelle choisie
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while (nGraines != 0): #On redistribue les graines de la coupelle initiale
idCoupe = self.coupeSuivante(idCoupe)
if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if (joueur != joueurCoupeFinale):
#on vérifie si on va affamer l'adversaire
#si non, on prend les graines normalement
if (self.nourrirAdversaire(joueur,coupeFinale)):
while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):
self.scores[joueur]+=self.plateau[idCoupe]
self.plateau[idCoupe]=0
idCoupe = self.coupePrecedente(idCoupe)
#si on va affamer l'adversaire :
# on ne prend aucune graine donc on ne fait rien
self.tour=(self.tour+1)%2
#On compte le nombre de graines restantes sur le plateau
def grainesRestantes(self):
return np.sum(self.plateau)
#on compte le nombre de graines restantes sur le plateau pour les coupes de joueur
def grainesRestantesJoueur(self,joueur):
if joueur==0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
#détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,
#Yson adversaire sera affamé ou pas
#on regarde donc si il restera au moins une graine sur le terrain de l'adversaire
def nourrirAdversaire(self,joueur,coupeFinale):
adversaire = (joueur+1)%2
#on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)
admissible = False
idCoupe = (self.nCoupes*(adversaire+1))-1
while (self.joueurCoupe(idCoupe)==adversaire):
#si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible
if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):
admissible=True
#si joueur peut pas prendre la coupe idCoupe le coup est admissible
elif (not self.coupePrenable(idCoupe)):
admissible=True
idCoupe=self.coupePrecedente(idCoupe)
#True si le coup est admissible pour la règle "nourrir"
return admissible
#coupes admissibles que peut jouer joueur pour nourrir son adversaire
def coupesAdmissiblesNourrir(self,joueur):
coupesAdmissibles = []
#on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)
idCoupe = (self.nCoupes*(joueur+1))-1
distance = 1
while (self.joueurCoupe(idCoupe)==joueur):
#s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire
#le coup est admissible, au moins une graine nourrira l'adversaire
if self.plateau[idCoupe]>=distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance +=1
return coupesAdmissibles
def coupesAdmissibles(self,joueur):
adversaire = (joueur+1)%2
|
.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
#si aucun coup ne peut être joué pour nourrir l'adversaire
if len(coupesAdmissibles)==0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2*self.nCoupes,dtype=int)
self.finie = True
#partie terminée
#sinon toutes les coupes non vides sont admissibles
else :
coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]
return coupesAdmissibles
def tourDuJoueur(self):
joueur = 0
#si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir
coupesAdmissibles = self.coupesAdmissibles(joueur)
print("C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:")
nCoupe = int(input())
#print("coupesAdmissibles",coupesAdmissibles)
while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (nCou
|
if self
|
identifier_name
|
alpha_beta.py
|
/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf
#Code par Léo et Paul
#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)
# -> se pencher sur la fonction "partieFinie" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..
#Pb: structure d'arbre trop compliquée: (*)
#l'arbre est construit à partir d'une liste selon le principe suivant:
#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes
#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue
class terrainDeJeu:
# [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)
def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur
self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0,0] # scores[0] = score du joueur 0...
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
#clone le terrain de jeu pour pouvoir simuler un coup par la suite
def clone(self):
clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)
clone.plateau= self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
#retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)
def coupeSuivante(self,idCoupe):
return (idCoupe + 1)%(2*self.nCoupes)
#retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)
def coupePrecedente(self,idCoupe):
return (idCoupe - 1)%(2*self.nCoupes)
#retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe
def joueurCoupe(self,idCoupe):
return 0 if idCoupe < self.nCoupes else 1
#retourne si idCoupe peut être prise (contient 2 ou 3 graines)
def coupePrenable(self,idCoupe):
return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)
def deplacer(self,joueur,idCoupe):
coupeInitiale = idCoupe #id de la coupelle choisie
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while (nGraines != 0): #On redistribue les graines de la coupelle initiale
idCoupe = self.coupeSuivante(idCoupe)
if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if (joueur != joueurCoupeFinale):
#on vérifie si on va affamer l'adversaire
#si non, on prend les graines normalement
if (self.nourrirAdversaire(joueur,coupeFinale)):
|
mpte le nombre de graines restantes sur le plateau
def grainesRestantes(self):
return np.sum(self.plateau)
#on compte le nombre de graines restantes sur le plateau pour les coupes de joueur
def grainesRestantesJoueur(self,joueur):
if joueur==0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
#détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,
#Yson adversaire sera affamé ou pas
#on regarde donc si il restera au moins une graine sur le terrain de l'adversaire
def nourrirAdversaire(self,joueur,coupeFinale):
adversaire = (joueur+1)%2
#on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)
admissible = False
idCoupe = (self.nCoupes*(adversaire+1))-1
while (self.joueurCoupe(idCoupe)==adversaire):
#si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible
if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):
admissible=True
#si joueur peut pas prendre la coupe idCoupe le coup est admissible
elif (not self.coupePrenable(idCoupe)):
admissible=True
idCoupe=self.coupePrecedente(idCoupe)
#True si le coup est admissible pour la règle "nourrir"
return admissible
#coupes admissibles que peut jouer joueur pour nourrir son adversaire
def coupesAdmissiblesNourrir(self,joueur):
coupesAdmissibles = []
#on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)
idCoupe = (self.nCoupes*(joueur+1))-1
distance = 1
while (self.joueurCoupe(idCoupe)==joueur):
#s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire
#le coup est admissible, au moins une graine nourrira l'adversaire
if self.plateau[idCoupe]>=distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance +=1
return coupesAdmissibles
def coupesAdmissibles(self,joueur):
adversaire = (joueur+1)%2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
#si aucun coup ne peut être joué pour nourrir l'adversaire
if len(coupesAdmissibles)==0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2*self.nCoupes,dtype=int)
self.finie = True
#partie terminée
#sinon toutes les coupes non vides sont admissibles
else :
coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]
return coupesAdmissibles
def tourDuJoueur(self):
joueur = 0
#si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir
coupesAdmissibles = self.coupesAdmissibles(joueur)
print("C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:")
nCoupe = int(input())
#print("coupesAdmissibles",coupesAdmissibles)
while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (
|
while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):
self.scores[joueur]+=self.plateau[idCoupe]
self.plateau[idCoupe]=0
idCoupe = self.coupePrecedente(idCoupe)
#si on va affamer l'adversaire :
# on ne prend aucune graine donc on ne fait rien
self.tour=(self.tour+1)%2
#On co
|
conditional_block
|
minilab.py
|
ed_dirs = []
Host.__init__(self, name, inNamespace, **kwargs)
def list_processes(self):
process_list = []
my_ns_symlink = '/proc/%s/ns/net' % self.pid
for symlink in glob.glob('/proc/[1-9]*/ns/net'):
pid = None
try:
if os.path.samefile(my_ns_symlink, symlink):
pid = symlink.split('/')[2]
except:
pass
else:
if pid and int(pid) != self.pid:
process_list.append(pid)
return process_list
def stop_all_processes(self):
info('**** Stopping all remaining processes on %s\n' % self.name)
running_processes = self.list_processes()
for process in running_processes:
cmd = "kill -9 %s" % process
info('**** killing process id %s\n' % process)
subprocess.call(shlex.split(cmd))
def stop_processes(self):
self.stop_ssh_server()
self.stop_all_processes()
def create_ssh_config(self):
self.ssh_pid_file = os.path.join(self.root_dir, "var", "run",
"sshd.pid")
|
def copy_auth_keys(self):
ssh_dir = os.path.join(self.root_dir, 'root/.ssh')
if not os.path.exists(ssh_dir):
os.mkdir(ssh_dir, 0700)
key_file = open(self.auth_keys)
destination = open(os.path.join(ssh_dir, 'authorized_keys'), 'wb')
destination.write(key_file.read())
destination.close()
key_file.close()
def start_ssh_server(self):
if self.auth_keys:
self.copy_auth_keys()
ssh_config = self.create_ssh_config()
host_config_path = os.path.join(self.root_dir,
'etc/ssh/sshd_config')
sshf = open(host_config_path, 'wb')
sshf.write(ssh_config)
sshf.close()
info('**** Starting ssh server on %s\n' % self.name)
start_ssh = '/usr/sbin/sshd -f %s' % host_config_path
self.cmd(shlex.split(start_ssh))
def stop_ssh_server(self):
info('**** Stopping ssh server on %s\n' % self.name)
kill_ssh = "/bin/kill $(cat %s)" % self.ssh_pid_file
self.cmd(shlex.split(kill_ssh))
def clean_all(self):
pass
def mount_root_fs(hostname, lab_dir, root_fs):
info('**** Mounting filesystem for %s\n' % hostname)
if not os.path.exists(lab_dir):
os.mkdir(lab_dir)
host_dir = os.path.join(lab_dir, hostname)
work_dir = os.path.join(host_dir, 'work')
upper_dir = os.path.join(host_dir, 'upper')
merged_dir = os.path.join(host_dir, 'merged')
if not os.path.exists(host_dir):
os.mkdir(host_dir)
os.mkdir(work_dir)
os.mkdir(upper_dir)
os.mkdir(merged_dir)
cmd = "mount -t overlay overlay -o lowerdir=%s,upperdir=%s,workdir=%s %s" % \
(root_fs, upper_dir, work_dir, merged_dir)
mount_root = shlex.split(cmd)
subprocess.call(mount_root)
host_proc = os.path.join(merged_dir, 'proc')
cmd_p = "mount -t proc proc %s" % host_proc
mount_proc = shlex.split(cmd_p)
subprocess.call(mount_proc)
host_sys = os.path.join(merged_dir, 'sys')
cmd_s = "mount -t sysfs sysfs %s" % host_sys
mount_sys = shlex.split(cmd_s)
subprocess.call(mount_sys)
return merged_dir
def umount_root_fs(hostname, lab_dir):
info('**** Unmounting filesystem for %s\n' % hostname)
host_dir = os.path.join(lab_dir, hostname)
merged_dir = os.path.join(host_dir, 'merged')
host_proc = os.path.join(merged_dir, 'proc')
host_sys = os.path.join(merged_dir, 'sys')
for mount_point in [host_sys, host_proc, merged_dir]:
subprocess.call(shlex.split("umount %s" % mount_point))
# fixme: currently need to umount /sys
# subprocess.call(shlex.split("umount %s" % '/sys'))
def load_config(config_file):
cfg = open(config_file)
config = yaml.load(cfg)
cfg.close()
return config
def setup_controllers(net, topology):
for controller in topology['controllers']:
ctrl = RemoteController(controller['name'],
ip=controller['ip'],
port=controller['port'])
info('*** Adding controller\n')
net.addController(ctrl)
def setup_hosts(net, switches, config, topology):
info('*** Adding hosts\n')
hosts = {}
ssh_template = None
auth_keys = None
if 'ssh' in config:
template = config['ssh']['template']
tmpl_dir = config['ssh']['tmpl_dir']
env = Environment(loader=FileSystemLoader(tmpl_dir))
ssh_template = env.get_template(template)
if 'authorized_keys' in config['ssh']:
auth_keys = config['ssh']['authorized_keys']
for host in topology['hosts']:
if host['is_manageable']:
root_dir = mount_root_fs(host['name'], config['ml_dir'],
config['rootfs'])
new_host = net.addHost(host['name'], ip=None,
cls=ManageableHost,
root_dir=root_dir,
ssh_template=ssh_template,
auth_keys=auth_keys)
else:
new_host = net.addHost(host['name'])
for link in host['links']:
switch = switches[link['sw']]
lnk = net.addLink(new_host, switch)
if 'ip' in link:
ip, netmask = link['ip'].split('/')
new_host.setIP(ip, prefixLen=netmask, intf=lnk.intf1)
if 'gw' in host:
new_host.sendCmd('ip route add default via %s' % host['gw'])
new_host.waiting = False
if not host['name'] in hosts:
hosts[host['name']] = {'node': new_host, 'rootfs': new_host.name}
return hosts
def setup_switches(net, topology):
switches = {}
info('*** Adding switches\n')
# first loop : create switches
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
switches[switch['name']] = net.addSwitch(switch['name'],
dpid=switch['dpid'],
cls=OVSSwitch,
protocols=protocols)
# second loop: add links between switches
for switch in topology['switches']:
if 'links' in switch:
for peer in switch['links']:
net.addLink(switches[switch['name']],
switches[peer])
return switches
def setup_nat(net, topology):
node = None
if 'nat' in topology:
info('*** Setup nat gateway node\n')
node = connectToInternet(net,
switch=topology['nat']['switch']['name'],
node_name=topology['nat']['node']['name'],
ip_address=topology['nat']['node']['ip'])
info('** Starting nat\n')
startNAT(node,
inetIntf=topology['nat']['ext_iface'],
intIP=topology['nat']['node']['ip'])
return node
def fix_switch_protocols(topology):
""" force protocols versions as mininet < 2.2.0 is not doing its job"""
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
cmd = "ovs-vsctl set Bridge %s protocols=%s" % (switch['name'],
protocols)
subprocess.call(shlex.split(cmd))
def set_oob_switch_standalone(topology):
if 'nat' in topology:
switch = topology['nat']['switch']['name']
cmd = shlex.split("ovs-vsctl set-fail-mode %s standalone " % switch)
subprocess.call(cmd)
cmd2 = shlex.split("ovs-vsctl del-controller %s" % switch)
subprocess.call(cmd2)
def tear_down_nat(node):
info('** Stopping nat\n')
stopNAT(node)
def start(net, topology):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.start_ssh_server()
info('** Starting network\n')
net.start()
fix_switch_protocols(topology)
set_oob_switch_standalone(topology)
CLI(net)
def stop(net, config):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.stop_processes()
umount_root_fs(name, config['ml_dir'])
info('** Stopping network\n')
net.stop()
def setup_topo(config, topology):
nat_node = None
try:
net = Mininet(controller=RemoteController)
setup_controllers(net, topology)
switches = setup_switches(net, topology)
setup_hosts(net, switches,
|
return self.ssh_template.render(pid_file=self.ssh_pid_file,
host_dir=self.root_dir)
|
random_line_split
|
minilab.py
|
ed_dirs = []
Host.__init__(self, name, inNamespace, **kwargs)
def list_processes(self):
process_list = []
my_ns_symlink = '/proc/%s/ns/net' % self.pid
for symlink in glob.glob('/proc/[1-9]*/ns/net'):
pid = None
try:
if os.path.samefile(my_ns_symlink, symlink):
pid = symlink.split('/')[2]
except:
pass
else:
if pid and int(pid) != self.pid:
process_list.append(pid)
return process_list
def stop_all_processes(self):
info('**** Stopping all remaining processes on %s\n' % self.name)
running_processes = self.list_processes()
for process in running_processes:
cmd = "kill -9 %s" % process
info('**** killing process id %s\n' % process)
subprocess.call(shlex.split(cmd))
def stop_processes(self):
self.stop_ssh_server()
self.stop_all_processes()
def create_ssh_config(self):
self.ssh_pid_file = os.path.join(self.root_dir, "var", "run",
"sshd.pid")
return self.ssh_template.render(pid_file=self.ssh_pid_file,
host_dir=self.root_dir)
def copy_auth_keys(self):
ssh_dir = os.path.join(self.root_dir, 'root/.ssh')
if not os.path.exists(ssh_dir):
os.mkdir(ssh_dir, 0700)
key_file = open(self.auth_keys)
destination = open(os.path.join(ssh_dir, 'authorized_keys'), 'wb')
destination.write(key_file.read())
destination.close()
key_file.close()
def start_ssh_server(self):
if self.auth_keys:
self.copy_auth_keys()
ssh_config = self.create_ssh_config()
host_config_path = os.path.join(self.root_dir,
'etc/ssh/sshd_config')
sshf = open(host_config_path, 'wb')
sshf.write(ssh_config)
sshf.close()
info('**** Starting ssh server on %s\n' % self.name)
start_ssh = '/usr/sbin/sshd -f %s' % host_config_path
self.cmd(shlex.split(start_ssh))
def stop_ssh_server(self):
info('**** Stopping ssh server on %s\n' % self.name)
kill_ssh = "/bin/kill $(cat %s)" % self.ssh_pid_file
self.cmd(shlex.split(kill_ssh))
def clean_all(self):
pass
def mount_root_fs(hostname, lab_dir, root_fs):
info('**** Mounting filesystem for %s\n' % hostname)
if not os.path.exists(lab_dir):
os.mkdir(lab_dir)
host_dir = os.path.join(lab_dir, hostname)
work_dir = os.path.join(host_dir, 'work')
upper_dir = os.path.join(host_dir, 'upper')
merged_dir = os.path.join(host_dir, 'merged')
if not os.path.exists(host_dir):
os.mkdir(host_dir)
os.mkdir(work_dir)
os.mkdir(upper_dir)
os.mkdir(merged_dir)
cmd = "mount -t overlay overlay -o lowerdir=%s,upperdir=%s,workdir=%s %s" % \
(root_fs, upper_dir, work_dir, merged_dir)
mount_root = shlex.split(cmd)
subprocess.call(mount_root)
host_proc = os.path.join(merged_dir, 'proc')
cmd_p = "mount -t proc proc %s" % host_proc
mount_proc = shlex.split(cmd_p)
subprocess.call(mount_proc)
host_sys = os.path.join(merged_dir, 'sys')
cmd_s = "mount -t sysfs sysfs %s" % host_sys
mount_sys = shlex.split(cmd_s)
subprocess.call(mount_sys)
return merged_dir
def umount_root_fs(hostname, lab_dir):
info('**** Unmounting filesystem for %s\n' % hostname)
host_dir = os.path.join(lab_dir, hostname)
merged_dir = os.path.join(host_dir, 'merged')
host_proc = os.path.join(merged_dir, 'proc')
host_sys = os.path.join(merged_dir, 'sys')
for mount_point in [host_sys, host_proc, merged_dir]:
subprocess.call(shlex.split("umount %s" % mount_point))
# fixme: currently need to umount /sys
# subprocess.call(shlex.split("umount %s" % '/sys'))
def load_config(config_file):
cfg = open(config_file)
config = yaml.load(cfg)
cfg.close()
return config
def setup_controllers(net, topology):
for controller in topology['controllers']:
ctrl = RemoteController(controller['name'],
ip=controller['ip'],
port=controller['port'])
info('*** Adding controller\n')
net.addController(ctrl)
def setup_hosts(net, switches, config, topology):
info('*** Adding hosts\n')
hosts = {}
ssh_template = None
auth_keys = None
if 'ssh' in config:
template = config['ssh']['template']
tmpl_dir = config['ssh']['tmpl_dir']
env = Environment(loader=FileSystemLoader(tmpl_dir))
ssh_template = env.get_template(template)
if 'authorized_keys' in config['ssh']:
auth_keys = config['ssh']['authorized_keys']
for host in topology['hosts']:
if host['is_manageable']:
root_dir = mount_root_fs(host['name'], config['ml_dir'],
config['rootfs'])
new_host = net.addHost(host['name'], ip=None,
cls=ManageableHost,
root_dir=root_dir,
ssh_template=ssh_template,
auth_keys=auth_keys)
else:
new_host = net.addHost(host['name'])
for link in host['links']:
switch = switches[link['sw']]
lnk = net.addLink(new_host, switch)
if 'ip' in link:
ip, netmask = link['ip'].split('/')
new_host.setIP(ip, prefixLen=netmask, intf=lnk.intf1)
if 'gw' in host:
new_host.sendCmd('ip route add default via %s' % host['gw'])
new_host.waiting = False
if not host['name'] in hosts:
hosts[host['name']] = {'node': new_host, 'rootfs': new_host.name}
return hosts
def setup_switches(net, topology):
switches = {}
info('*** Adding switches\n')
# first loop : create switches
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
switches[switch['name']] = net.addSwitch(switch['name'],
dpid=switch['dpid'],
cls=OVSSwitch,
protocols=protocols)
# second loop: add links between switches
for switch in topology['switches']:
if 'links' in switch:
for peer in switch['links']:
net.addLink(switches[switch['name']],
switches[peer])
return switches
def setup_nat(net, topology):
node = None
if 'nat' in topology:
info('*** Setup nat gateway node\n')
node = connectToInternet(net,
switch=topology['nat']['switch']['name'],
node_name=topology['nat']['node']['name'],
ip_address=topology['nat']['node']['ip'])
info('** Starting nat\n')
startNAT(node,
inetIntf=topology['nat']['ext_iface'],
intIP=topology['nat']['node']['ip'])
return node
def fix_switch_protocols(topology):
|
def set_oob_switch_standalone(topology):
if 'nat' in topology:
switch = topology['nat']['switch']['name']
cmd = shlex.split("ovs-vsctl set-fail-mode %s standalone " % switch)
subprocess.call(cmd)
cmd2 = shlex.split("ovs-vsctl del-controller %s" % switch)
subprocess.call(cmd2)
def tear_down_nat(node):
info('** Stopping nat\n')
stopNAT(node)
def start(net, topology):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.start_ssh_server()
info('** Starting network\n')
net.start()
fix_switch_protocols(topology)
set_oob_switch_standalone(topology)
CLI(net)
def stop(net, config):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.stop_processes()
umount_root_fs(name, config['ml_dir'])
info('** Stopping network\n')
net.stop()
def setup_topo(config, topology):
nat_node = None
try:
net = Mininet(controller=RemoteController)
setup_controllers(net, topology)
switches = setup_switches(net, topology)
setup_hosts(net, switches,
|
""" force protocols versions as mininet < 2.2.0 is not doing its job"""
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
cmd = "ovs-vsctl set Bridge %s protocols=%s" % (switch['name'],
protocols)
subprocess.call(shlex.split(cmd))
|
identifier_body
|
minilab.py
|
ed_dirs = []
Host.__init__(self, name, inNamespace, **kwargs)
def list_processes(self):
process_list = []
my_ns_symlink = '/proc/%s/ns/net' % self.pid
for symlink in glob.glob('/proc/[1-9]*/ns/net'):
pid = None
try:
if os.path.samefile(my_ns_symlink, symlink):
pid = symlink.split('/')[2]
except:
pass
else:
if pid and int(pid) != self.pid:
process_list.append(pid)
return process_list
def stop_all_processes(self):
info('**** Stopping all remaining processes on %s\n' % self.name)
running_processes = self.list_processes()
for process in running_processes:
cmd = "kill -9 %s" % process
info('**** killing process id %s\n' % process)
subprocess.call(shlex.split(cmd))
def stop_processes(self):
self.stop_ssh_server()
self.stop_all_processes()
def create_ssh_config(self):
self.ssh_pid_file = os.path.join(self.root_dir, "var", "run",
"sshd.pid")
return self.ssh_template.render(pid_file=self.ssh_pid_file,
host_dir=self.root_dir)
def copy_auth_keys(self):
ssh_dir = os.path.join(self.root_dir, 'root/.ssh')
if not os.path.exists(ssh_dir):
os.mkdir(ssh_dir, 0700)
key_file = open(self.auth_keys)
destination = open(os.path.join(ssh_dir, 'authorized_keys'), 'wb')
destination.write(key_file.read())
destination.close()
key_file.close()
def start_ssh_server(self):
if self.auth_keys:
self.copy_auth_keys()
ssh_config = self.create_ssh_config()
host_config_path = os.path.join(self.root_dir,
'etc/ssh/sshd_config')
sshf = open(host_config_path, 'wb')
sshf.write(ssh_config)
sshf.close()
info('**** Starting ssh server on %s\n' % self.name)
start_ssh = '/usr/sbin/sshd -f %s' % host_config_path
self.cmd(shlex.split(start_ssh))
def stop_ssh_server(self):
info('**** Stopping ssh server on %s\n' % self.name)
kill_ssh = "/bin/kill $(cat %s)" % self.ssh_pid_file
self.cmd(shlex.split(kill_ssh))
def clean_all(self):
pass
def mount_root_fs(hostname, lab_dir, root_fs):
info('**** Mounting filesystem for %s\n' % hostname)
if not os.path.exists(lab_dir):
os.mkdir(lab_dir)
host_dir = os.path.join(lab_dir, hostname)
work_dir = os.path.join(host_dir, 'work')
upper_dir = os.path.join(host_dir, 'upper')
merged_dir = os.path.join(host_dir, 'merged')
if not os.path.exists(host_dir):
os.mkdir(host_dir)
os.mkdir(work_dir)
os.mkdir(upper_dir)
os.mkdir(merged_dir)
cmd = "mount -t overlay overlay -o lowerdir=%s,upperdir=%s,workdir=%s %s" % \
(root_fs, upper_dir, work_dir, merged_dir)
mount_root = shlex.split(cmd)
subprocess.call(mount_root)
host_proc = os.path.join(merged_dir, 'proc')
cmd_p = "mount -t proc proc %s" % host_proc
mount_proc = shlex.split(cmd_p)
subprocess.call(mount_proc)
host_sys = os.path.join(merged_dir, 'sys')
cmd_s = "mount -t sysfs sysfs %s" % host_sys
mount_sys = shlex.split(cmd_s)
subprocess.call(mount_sys)
return merged_dir
def umount_root_fs(hostname, lab_dir):
info('**** Unmounting filesystem for %s\n' % hostname)
host_dir = os.path.join(lab_dir, hostname)
merged_dir = os.path.join(host_dir, 'merged')
host_proc = os.path.join(merged_dir, 'proc')
host_sys = os.path.join(merged_dir, 'sys')
for mount_point in [host_sys, host_proc, merged_dir]:
subprocess.call(shlex.split("umount %s" % mount_point))
# fixme: currently need to umount /sys
# subprocess.call(shlex.split("umount %s" % '/sys'))
def load_config(config_file):
cfg = open(config_file)
config = yaml.load(cfg)
cfg.close()
return config
def setup_controllers(net, topology):
for controller in topology['controllers']:
ctrl = RemoteController(controller['name'],
ip=controller['ip'],
port=controller['port'])
info('*** Adding controller\n')
net.addController(ctrl)
def setup_hosts(net, switches, config, topology):
info('*** Adding hosts\n')
hosts = {}
ssh_template = None
auth_keys = None
if 'ssh' in config:
template = config['ssh']['template']
tmpl_dir = config['ssh']['tmpl_dir']
env = Environment(loader=FileSystemLoader(tmpl_dir))
ssh_template = env.get_template(template)
if 'authorized_keys' in config['ssh']:
auth_keys = config['ssh']['authorized_keys']
for host in topology['hosts']:
if host['is_manageable']:
root_dir = mount_root_fs(host['name'], config['ml_dir'],
config['rootfs'])
new_host = net.addHost(host['name'], ip=None,
cls=ManageableHost,
root_dir=root_dir,
ssh_template=ssh_template,
auth_keys=auth_keys)
else:
new_host = net.addHost(host['name'])
for link in host['links']:
switch = switches[link['sw']]
lnk = net.addLink(new_host, switch)
if 'ip' in link:
ip, netmask = link['ip'].split('/')
new_host.setIP(ip, prefixLen=netmask, intf=lnk.intf1)
if 'gw' in host:
new_host.sendCmd('ip route add default via %s' % host['gw'])
new_host.waiting = False
if not host['name'] in hosts:
hosts[host['name']] = {'node': new_host, 'rootfs': new_host.name}
return hosts
def
|
(net, topology):
switches = {}
info('*** Adding switches\n')
# first loop : create switches
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
switches[switch['name']] = net.addSwitch(switch['name'],
dpid=switch['dpid'],
cls=OVSSwitch,
protocols=protocols)
# second loop: add links between switches
for switch in topology['switches']:
if 'links' in switch:
for peer in switch['links']:
net.addLink(switches[switch['name']],
switches[peer])
return switches
def setup_nat(net, topology):
node = None
if 'nat' in topology:
info('*** Setup nat gateway node\n')
node = connectToInternet(net,
switch=topology['nat']['switch']['name'],
node_name=topology['nat']['node']['name'],
ip_address=topology['nat']['node']['ip'])
info('** Starting nat\n')
startNAT(node,
inetIntf=topology['nat']['ext_iface'],
intIP=topology['nat']['node']['ip'])
return node
def fix_switch_protocols(topology):
""" force protocols versions as mininet < 2.2.0 is not doing its job"""
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
cmd = "ovs-vsctl set Bridge %s protocols=%s" % (switch['name'],
protocols)
subprocess.call(shlex.split(cmd))
def set_oob_switch_standalone(topology):
if 'nat' in topology:
switch = topology['nat']['switch']['name']
cmd = shlex.split("ovs-vsctl set-fail-mode %s standalone " % switch)
subprocess.call(cmd)
cmd2 = shlex.split("ovs-vsctl del-controller %s" % switch)
subprocess.call(cmd2)
def tear_down_nat(node):
info('** Stopping nat\n')
stopNAT(node)
def start(net, topology):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.start_ssh_server()
info('** Starting network\n')
net.start()
fix_switch_protocols(topology)
set_oob_switch_standalone(topology)
CLI(net)
def stop(net, config):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.stop_processes()
umount_root_fs(name, config['ml_dir'])
info('** Stopping network\n')
net.stop()
def setup_topo(config, topology):
nat_node = None
try:
net = Mininet(controller=RemoteController)
setup_controllers(net, topology)
switches = setup_switches(net, topology)
setup_hosts(net, switches,
|
setup_switches
|
identifier_name
|
minilab.py
|
ed_dirs = []
Host.__init__(self, name, inNamespace, **kwargs)
def list_processes(self):
process_list = []
my_ns_symlink = '/proc/%s/ns/net' % self.pid
for symlink in glob.glob('/proc/[1-9]*/ns/net'):
pid = None
try:
if os.path.samefile(my_ns_symlink, symlink):
pid = symlink.split('/')[2]
except:
pass
else:
if pid and int(pid) != self.pid:
process_list.append(pid)
return process_list
def stop_all_processes(self):
info('**** Stopping all remaining processes on %s\n' % self.name)
running_processes = self.list_processes()
for process in running_processes:
cmd = "kill -9 %s" % process
info('**** killing process id %s\n' % process)
subprocess.call(shlex.split(cmd))
def stop_processes(self):
self.stop_ssh_server()
self.stop_all_processes()
def create_ssh_config(self):
self.ssh_pid_file = os.path.join(self.root_dir, "var", "run",
"sshd.pid")
return self.ssh_template.render(pid_file=self.ssh_pid_file,
host_dir=self.root_dir)
def copy_auth_keys(self):
ssh_dir = os.path.join(self.root_dir, 'root/.ssh')
if not os.path.exists(ssh_dir):
os.mkdir(ssh_dir, 0700)
key_file = open(self.auth_keys)
destination = open(os.path.join(ssh_dir, 'authorized_keys'), 'wb')
destination.write(key_file.read())
destination.close()
key_file.close()
def start_ssh_server(self):
if self.auth_keys:
|
ssh_config = self.create_ssh_config()
host_config_path = os.path.join(self.root_dir,
'etc/ssh/sshd_config')
sshf = open(host_config_path, 'wb')
sshf.write(ssh_config)
sshf.close()
info('**** Starting ssh server on %s\n' % self.name)
start_ssh = '/usr/sbin/sshd -f %s' % host_config_path
self.cmd(shlex.split(start_ssh))
def stop_ssh_server(self):
info('**** Stopping ssh server on %s\n' % self.name)
kill_ssh = "/bin/kill $(cat %s)" % self.ssh_pid_file
self.cmd(shlex.split(kill_ssh))
def clean_all(self):
pass
def mount_root_fs(hostname, lab_dir, root_fs):
info('**** Mounting filesystem for %s\n' % hostname)
if not os.path.exists(lab_dir):
os.mkdir(lab_dir)
host_dir = os.path.join(lab_dir, hostname)
work_dir = os.path.join(host_dir, 'work')
upper_dir = os.path.join(host_dir, 'upper')
merged_dir = os.path.join(host_dir, 'merged')
if not os.path.exists(host_dir):
os.mkdir(host_dir)
os.mkdir(work_dir)
os.mkdir(upper_dir)
os.mkdir(merged_dir)
cmd = "mount -t overlay overlay -o lowerdir=%s,upperdir=%s,workdir=%s %s" % \
(root_fs, upper_dir, work_dir, merged_dir)
mount_root = shlex.split(cmd)
subprocess.call(mount_root)
host_proc = os.path.join(merged_dir, 'proc')
cmd_p = "mount -t proc proc %s" % host_proc
mount_proc = shlex.split(cmd_p)
subprocess.call(mount_proc)
host_sys = os.path.join(merged_dir, 'sys')
cmd_s = "mount -t sysfs sysfs %s" % host_sys
mount_sys = shlex.split(cmd_s)
subprocess.call(mount_sys)
return merged_dir
def umount_root_fs(hostname, lab_dir):
info('**** Unmounting filesystem for %s\n' % hostname)
host_dir = os.path.join(lab_dir, hostname)
merged_dir = os.path.join(host_dir, 'merged')
host_proc = os.path.join(merged_dir, 'proc')
host_sys = os.path.join(merged_dir, 'sys')
for mount_point in [host_sys, host_proc, merged_dir]:
subprocess.call(shlex.split("umount %s" % mount_point))
# fixme: currently need to umount /sys
# subprocess.call(shlex.split("umount %s" % '/sys'))
def load_config(config_file):
cfg = open(config_file)
config = yaml.load(cfg)
cfg.close()
return config
def setup_controllers(net, topology):
for controller in topology['controllers']:
ctrl = RemoteController(controller['name'],
ip=controller['ip'],
port=controller['port'])
info('*** Adding controller\n')
net.addController(ctrl)
def setup_hosts(net, switches, config, topology):
info('*** Adding hosts\n')
hosts = {}
ssh_template = None
auth_keys = None
if 'ssh' in config:
template = config['ssh']['template']
tmpl_dir = config['ssh']['tmpl_dir']
env = Environment(loader=FileSystemLoader(tmpl_dir))
ssh_template = env.get_template(template)
if 'authorized_keys' in config['ssh']:
auth_keys = config['ssh']['authorized_keys']
for host in topology['hosts']:
if host['is_manageable']:
root_dir = mount_root_fs(host['name'], config['ml_dir'],
config['rootfs'])
new_host = net.addHost(host['name'], ip=None,
cls=ManageableHost,
root_dir=root_dir,
ssh_template=ssh_template,
auth_keys=auth_keys)
else:
new_host = net.addHost(host['name'])
for link in host['links']:
switch = switches[link['sw']]
lnk = net.addLink(new_host, switch)
if 'ip' in link:
ip, netmask = link['ip'].split('/')
new_host.setIP(ip, prefixLen=netmask, intf=lnk.intf1)
if 'gw' in host:
new_host.sendCmd('ip route add default via %s' % host['gw'])
new_host.waiting = False
if not host['name'] in hosts:
hosts[host['name']] = {'node': new_host, 'rootfs': new_host.name}
return hosts
def setup_switches(net, topology):
switches = {}
info('*** Adding switches\n')
# first loop : create switches
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
switches[switch['name']] = net.addSwitch(switch['name'],
dpid=switch['dpid'],
cls=OVSSwitch,
protocols=protocols)
# second loop: add links between switches
for switch in topology['switches']:
if 'links' in switch:
for peer in switch['links']:
net.addLink(switches[switch['name']],
switches[peer])
return switches
def setup_nat(net, topology):
node = None
if 'nat' in topology:
info('*** Setup nat gateway node\n')
node = connectToInternet(net,
switch=topology['nat']['switch']['name'],
node_name=topology['nat']['node']['name'],
ip_address=topology['nat']['node']['ip'])
info('** Starting nat\n')
startNAT(node,
inetIntf=topology['nat']['ext_iface'],
intIP=topology['nat']['node']['ip'])
return node
def fix_switch_protocols(topology):
""" force protocols versions as mininet < 2.2.0 is not doing its job"""
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
cmd = "ovs-vsctl set Bridge %s protocols=%s" % (switch['name'],
protocols)
subprocess.call(shlex.split(cmd))
def set_oob_switch_standalone(topology):
if 'nat' in topology:
switch = topology['nat']['switch']['name']
cmd = shlex.split("ovs-vsctl set-fail-mode %s standalone " % switch)
subprocess.call(cmd)
cmd2 = shlex.split("ovs-vsctl del-controller %s" % switch)
subprocess.call(cmd2)
def tear_down_nat(node):
info('** Stopping nat\n')
stopNAT(node)
def start(net, topology):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.start_ssh_server()
info('** Starting network\n')
net.start()
fix_switch_protocols(topology)
set_oob_switch_standalone(topology)
CLI(net)
def stop(net, config):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.stop_processes()
umount_root_fs(name, config['ml_dir'])
info('** Stopping network\n')
net.stop()
def setup_topo(config, topology):
nat_node = None
try:
net = Mininet(controller=RemoteController)
setup_controllers(net, topology)
switches = setup_switches(net, topology)
setup_hosts(net, switches,
|
self.copy_auth_keys()
|
conditional_block
|
get_samples.py
|
.csv', 'r', encoding='utf8').readlines()[1:]]
capacity_dict = {}
for e1 in capacity_txt:
for e in e1:
if e == '0': continue
capacity_dict[e] = 0
ca = set(capacity_dict.keys())
intents = set(json.loads(open('./data/intents.txt', 'r', encoding='utf8').readlines()[0]))
diff = intents ^ ca
def min_edit_distance(word1, word2):
m, n = len(word1), len(word2)
if m == 0: retu
|
0: return m
dp = [[0] * (n + 1) for _ in range(m + 1)] # 初始化dp和边界
for i in range(1, m + 1): dp[i][0] = i
for j in range(1, n + 1): dp[0][j] = j
for i in range(1, m + 1): # 计算dp
for j in range(1, n + 1):
a=word1[i - 1];b=word2[j - 1]
if word1[i - 1] == word2[j - 1]:
d = 0
else:
d = 1
dp[i][j] = min(dp[i - 1][j - 1] + d, dp[i][j - 1] + 1, dp[i - 1][j] + 1)
return dp[m][n]
def words_sim(word1, word2):
w1 = char_cut(word1); w2 = set(char_cut(word2))
intersection = set(w1).intersection(w2)
union = set(w1).union(set(w2))
if len(intersection) == 0:
return None
dice_dist = 2 * len(intersection) / len(union)
#edit_distance = min_edit_distance(word1, word2)
return dice_dist #/ (edit_distance + 1e-8)
def get_sample(src_file, des_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5
txt = open(src_file, 'r', encoding='utf8').readlines()
for e in txt:
sys.stdout.write('Handle progress: ' + str(idcnt) + ' / ' + str(len(txt)) + '\n'); sys.stdout.flush(); idcnt += 1
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0: continue
if question in qesset: continue
sim_dict = {}
for e in labels:
max_sim = 0; sim_word = ''
for k, v in capacity_dict.items():
dist = words_sim(e[1], k)
if dist and dist > max_sim:
max_sim = dist
sim_word = k
if max_sim < threshold: continue
if sim_word not in sim_dict: sim_dict[sim_word] = 0
sim_dict[sim_word] += max_sim
sorted_sim_dict = sorted(sim_dict.items(), key=lambda d:d[1], reverse=True)
if sorted_sim_dict:
label = sorted_sim_dict[0][0]
else:
continue
if question not in qes2label:
qes2label[question] = []
qesset.add(question)
if label not in label2id:
label2id[label] = index
index += 1
if label not in label_cnt: label_cnt[label] = 0
label_cnt[label] += 1
qes2label[question].append(label)
if label not in label2qes:
label2qes[label] = []
label2qes[label].append(question)
sorted_label_cnt = sorted(label_cnt.items(), key=lambda d:d[1], reverse=True)
for k, v in capacity_dict.items():
if k in label_cnt:
capacity_dict[k] = label_cnt[k]
label_num = 0; sample_num = 0
with open(des_file, 'w', encoding='utf8') as f:
for k, v in label2qes.items():
if k not in capacity_dict and len(v) < 1000: continue
f.write('## intent:' + k + '\n'); label_num += 1
v = list(set(v))
for ele in v:
#f.write('- ' + ' '.join(char_cut(ele)) + '\n')
f.write('- ' + ele + '\n')
sample_num += 1
f.write('\n')
print('label_num = %d, sample_num = %d' % (label_num, sample_num))
a=1
def get_fasttext_sample(src_file, des_file):
label_set = set(); qes_set = set()
txt = open(src_file, 'r', encoding='utf8').readlines()
with open(des_file, 'w', encoding='utf8') as f:
for e in txt:
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0 or question.strip() == '': continue
qes_set.add(question)
line_sample = []
for e in labels:
line_sample.append('__label__' + e[1]); label_set.add(e[1])
for e in char_cut(question):
line_sample.append(e)
f.write(' '.join(line_sample) + '\n')
print("label number: {}, question number: {}".format(len(label_set), len(qes_set)))
b=1
def get_ft_data(src_file, train_file, test_file, val_file):
res = []
item_regex = re.compile(r'\s*[-\*+]\s*(.+)')
txt = open(src_file, 'r', encoding='utf8').readlines()
for line in txt:
if '## intent:' in line:
label = line.strip().split(':')[-1]
else:
match = re.match(item_regex, line)
if match:
item = match.group(1)
#seg_item = ' '.join(list(jieba.cut(item)))
seg_item = item #' '.join(char_cut(item))
#res.append('__label__' + label + ' ' + seg_item + '\n')
res.append(label + '\t' + seg_item + '\n')
random.shuffle(res)
with open(train_file, 'w', encoding='utf8') as f1:
for e in res[:int(len(res) * 0.6)]: f1.write(e)
with open(test_file, 'w', encoding='utf8') as f2:
for e in res[int(len(res) * 0.6):int(len(res) * 0.8)]: f2.write(e)
with open(val_file, 'w', encoding='utf8') as f3:
for e in res[int(len(res) * 0.8):]: f3.write(e)
def get_sample_new(src_file, train_file, test_file, val_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5; res = []
txt = open(src_file, 'r', encoding='utf8').readlines()
re_patten = [('活动策划', re.compile(u"活动|策划")), ('视频识别', re.compile(u"视频识别")), ('项目管理', re.compile(u"项目|管理")),
('图像算法', re.compile(u"图像算法")), ('视频算法', re.compile(u"视频算法")), ('入职准备', re.compile(u"入职|入职准备")),
('视频流转码', re.compile(u"视频流|转码")), ('用户运营', re.compile(u"用户|运营")), ('数据挖掘', re.compile(u"数据挖掘|挖掘")),
('用户研究', re.compile(u"用户研究")), ('数据库索引', re.compile(u"数据库|索引")), ('社交', re.compile(u"社交")),
('音频编解码', re.compile(u"音频|编解码")), ('数据分析', re.compile(u"数据|分析")), ('流媒体封装', re.compile(u"流媒体|封装")),
('图像识别', re.compile(u"图像识别")), ('游戏', re.compile(u"游戏")), ('计算广告', re.compile(u"计算广告")),
('高并发', re.compile(u"高并发|并发")), ('面试辅导', re.compile(u"面试|辅导")), ('技术', re.compile(u"技术")),
('手机游戏', re.compile(u"手机|游戏")), ('需求评估', re.compile(u"需求评估")), ('全栈', re.compile(u"全栈")),
('游戏制作人', re.compile(u"游戏制作人|制作人")), ('创意创新', re.compile(u"创意|创新")), ('协调能力', re.compile(u"协调能力|协调")),
('数据运营', re.compile(u"数据运营
|
rn n
if n ==
|
identifier_name
|
get_samples.py
|
.csv', 'r', encoding='utf8').readlines()[1:]]
capacity_dict = {}
for e1 in capacity_txt:
for e in e1:
if e == '0': continue
capacity_dict[e] = 0
ca = set(capacity_dict.keys())
intents = set(json.loads(open('./data/intents.txt', 'r', encoding='utf8').readlines()[0]))
diff = intents ^ ca
def min_edit_distance(word1, word2):
m, n = len(word1), len(word2)
if m == 0: return n
if n == 0: return m
dp = [[0] * (n + 1) for _ in range(m + 1)] # 初始化dp和边界
for i in range(1, m + 1): dp[i][0] = i
for j in range(1, n + 1): dp[0][j] = j
for i in range(1, m + 1): # 计算dp
for j in range(1, n + 1):
a=word1[i - 1];b=word2[j - 1]
if word1[i - 1] == word2[j - 1]:
d = 0
else:
d = 1
dp[i][j] = min(dp[i - 1][j - 1] + d, dp[i][j - 1] + 1, dp[i - 1][j] + 1)
return dp[m][n]
def words_sim(word1, word2):
w1 = char_cut(word1); w2 = set(char_cut(word2))
intersection = set(w1).intersection(w2)
union = set(w1).union(set(w2))
if len(intersection) == 0:
return None
dice_dist = 2 * len(intersection) / len(union)
#edit_distance = min_edit_distance(word1, word2)
return dice_dist #/ (edit_distance + 1e-8)
def get_sample(src_file, des_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5
txt = open(src_file, 'r', encoding='utf8').readlines()
for e in txt:
sys.stdout.write('Handle progress: ' + str(idcnt) + ' / ' + str(len(txt)) + '\n'); sys.stdout.flush(); idcnt += 1
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0: continue
if question in qesset: continue
sim_dict = {}
for e in labels:
max_sim = 0; sim_word = ''
for k, v in capacity_dict.items():
dist = words_sim(e[1], k)
if dist and dist > max_sim:
max_sim = dist
sim_word = k
if max_sim < threshold: continue
if sim_word not in sim_dict: sim_dict[sim_word] = 0
sim_dict[sim_word] += max_sim
sorted_sim_dict = sorted(sim_dict.items(), key=lambda d:d[1], reverse=True)
if sorted_sim_dict:
label = sorted_sim_dict[0][0]
else:
continue
if question not in qes2label:
qes2label[question] = []
qesset.add(question)
if label not in label2id:
label2id[label] = index
index += 1
if label not in label_cnt: label_cnt[label] = 0
label_cnt[label] += 1
qes2label[question].append(label)
if label not in label2qes:
label2qes[label] = []
label2qes[label].append(question)
sorted_label_cnt = sorted(label_cnt.items(), key=lambda d:d[1], reverse=True)
for k, v in capacity_dict.items():
if k in label_cnt:
capacity_dict[k] = label_cnt[k]
label_num = 0; sample_num = 0
with open(des_file, 'w', encoding='utf8') as f:
for k, v in label2qes.items():
if k not in capacity_dict and len(v) < 1000: continue
f.write('## intent:' + k + '\n'); label_num += 1
v = list(set(v))
for ele in v:
#f.write('- ' + ' '.join(char_cut(ele)) + '\n')
f.write('- ' + ele + '\n')
sample_num += 1
f.write('\n')
print('label_num = %d, sample_num = %d' % (label_num, sample_num))
a=1
def get_fasttext_sample(src_file, des_file):
label_set = set(); qes_set = set()
txt = open(src_file, 'r', encoding='utf8').readlines()
with
|
le(r'\s*[-\*+]\s*(.+)')
txt = open(src_file, 'r', encoding='utf8').readlines()
for line in txt:
if '## intent:' in line:
label = line.strip().split(':')[-1]
else:
match = re.match(item_regex, line)
if match:
item = match.group(1)
#seg_item = ' '.join(list(jieba.cut(item)))
seg_item = item #' '.join(char_cut(item))
#res.append('__label__' + label + ' ' + seg_item + '\n')
res.append(label + '\t' + seg_item + '\n')
random.shuffle(res)
with open(train_file, 'w', encoding='utf8') as f1:
for e in res[:int(len(res) * 0.6)]: f1.write(e)
with open(test_file, 'w', encoding='utf8') as f2:
for e in res[int(len(res) * 0.6):int(len(res) * 0.8)]: f2.write(e)
with open(val_file, 'w', encoding='utf8') as f3:
for e in res[int(len(res) * 0.8):]: f3.write(e)
def get_sample_new(src_file, train_file, test_file, val_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5; res = []
txt = open(src_file, 'r', encoding='utf8').readlines()
re_patten = [('活动策划', re.compile(u"活动|策划")), ('视频识别', re.compile(u"视频识别")), ('项目管理', re.compile(u"项目|管理")),
('图像算法', re.compile(u"图像算法")), ('视频算法', re.compile(u"视频算法")), ('入职准备', re.compile(u"入职|入职准备")),
('视频流转码', re.compile(u"视频流|转码")), ('用户运营', re.compile(u"用户|运营")), ('数据挖掘', re.compile(u"数据挖掘|挖掘")),
('用户研究', re.compile(u"用户研究")), ('数据库索引', re.compile(u"数据库|索引")), ('社交', re.compile(u"社交")),
('音频编解码', re.compile(u"音频|编解码")), ('数据分析', re.compile(u"数据|分析")), ('流媒体封装', re.compile(u"流媒体|封装")),
('图像识别', re.compile(u"图像识别")), ('游戏', re.compile(u"游戏")), ('计算广告', re.compile(u"计算广告")),
('高并发', re.compile(u"高并发|并发")), ('面试辅导', re.compile(u"面试|辅导")), ('技术', re.compile(u"技术")),
('手机游戏', re.compile(u"手机|游戏")), ('需求评估', re.compile(u"需求评估")), ('全栈', re.compile(u"全栈")),
('游戏制作人', re.compile(u"游戏制作人|制作人")), ('创意创新', re.compile(u"创意|创新")), ('协调能力', re.compile(u"协调能力|协调")),
('数据运营', re.compile(u"数据运营
|
open(des_file, 'w', encoding='utf8') as f:
for e in txt:
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0 or question.strip() == '': continue
qes_set.add(question)
line_sample = []
for e in labels:
line_sample.append('__label__' + e[1]); label_set.add(e[1])
for e in char_cut(question):
line_sample.append(e)
f.write(' '.join(line_sample) + '\n')
print("label number: {}, question number: {}".format(len(label_set), len(qes_set)))
b=1
def get_ft_data(src_file, train_file, test_file, val_file):
res = []
item_regex = re.compi
|
identifier_body
|
get_samples.py
|
.csv', 'r', encoding='utf8').readlines()[1:]]
capacity_dict = {}
for e1 in capacity_txt:
for e in e1:
if e == '0': continue
capacity_dict[e] = 0
ca = set(capacity_dict.keys())
intents = set(json.loads(open('./data/intents.txt', 'r', encoding='utf8').readlines()[0]))
diff = intents ^ ca
def min_edit_distance(word1, word2):
m, n = len(word1), len(word2)
if m == 0: return n
if n == 0: return m
dp = [[0] * (n + 1) for _ in range(m + 1)] # 初始化dp和边界
for i in range(1, m + 1): dp[i][0] = i
for j in range(1, n + 1): dp[0][j] = j
for i in range(1, m + 1): # 计算dp
for j in range(1, n + 1):
a=word1[i - 1];b=word2[j - 1]
if word1[i - 1] == wor
|
))
intersection = set(w1).intersection(w2)
union = set(w1).union(set(w2))
if len(intersection) == 0:
return None
dice_dist = 2 * len(intersection) / len(union)
#edit_distance = min_edit_distance(word1, word2)
return dice_dist #/ (edit_distance + 1e-8)
def get_sample(src_file, des_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5
txt = open(src_file, 'r', encoding='utf8').readlines()
for e in txt:
sys.stdout.write('Handle progress: ' + str(idcnt) + ' / ' + str(len(txt)) + '\n'); sys.stdout.flush(); idcnt += 1
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0: continue
if question in qesset: continue
sim_dict = {}
for e in labels:
max_sim = 0; sim_word = ''
for k, v in capacity_dict.items():
dist = words_sim(e[1], k)
if dist and dist > max_sim:
max_sim = dist
sim_word = k
if max_sim < threshold: continue
if sim_word not in sim_dict: sim_dict[sim_word] = 0
sim_dict[sim_word] += max_sim
sorted_sim_dict = sorted(sim_dict.items(), key=lambda d:d[1], reverse=True)
if sorted_sim_dict:
label = sorted_sim_dict[0][0]
else:
continue
if question not in qes2label:
qes2label[question] = []
qesset.add(question)
if label not in label2id:
label2id[label] = index
index += 1
if label not in label_cnt: label_cnt[label] = 0
label_cnt[label] += 1
qes2label[question].append(label)
if label not in label2qes:
label2qes[label] = []
label2qes[label].append(question)
sorted_label_cnt = sorted(label_cnt.items(), key=lambda d:d[1], reverse=True)
for k, v in capacity_dict.items():
if k in label_cnt:
capacity_dict[k] = label_cnt[k]
label_num = 0; sample_num = 0
with open(des_file, 'w', encoding='utf8') as f:
for k, v in label2qes.items():
if k not in capacity_dict and len(v) < 1000: continue
f.write('## intent:' + k + '\n'); label_num += 1
v = list(set(v))
for ele in v:
#f.write('- ' + ' '.join(char_cut(ele)) + '\n')
f.write('- ' + ele + '\n')
sample_num += 1
f.write('\n')
print('label_num = %d, sample_num = %d' % (label_num, sample_num))
a=1
def get_fasttext_sample(src_file, des_file):
label_set = set(); qes_set = set()
txt = open(src_file, 'r', encoding='utf8').readlines()
with open(des_file, 'w', encoding='utf8') as f:
for e in txt:
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0 or question.strip() == '': continue
qes_set.add(question)
line_sample = []
for e in labels:
line_sample.append('__label__' + e[1]); label_set.add(e[1])
for e in char_cut(question):
line_sample.append(e)
f.write(' '.join(line_sample) + '\n')
print("label number: {}, question number: {}".format(len(label_set), len(qes_set)))
b=1
def get_ft_data(src_file, train_file, test_file, val_file):
res = []
item_regex = re.compile(r'\s*[-\*+]\s*(.+)')
txt = open(src_file, 'r', encoding='utf8').readlines()
for line in txt:
if '## intent:' in line:
label = line.strip().split(':')[-1]
else:
match = re.match(item_regex, line)
if match:
item = match.group(1)
#seg_item = ' '.join(list(jieba.cut(item)))
seg_item = item #' '.join(char_cut(item))
#res.append('__label__' + label + ' ' + seg_item + '\n')
res.append(label + '\t' + seg_item + '\n')
random.shuffle(res)
with open(train_file, 'w', encoding='utf8') as f1:
for e in res[:int(len(res) * 0.6)]: f1.write(e)
with open(test_file, 'w', encoding='utf8') as f2:
for e in res[int(len(res) * 0.6):int(len(res) * 0.8)]: f2.write(e)
with open(val_file, 'w', encoding='utf8') as f3:
for e in res[int(len(res) * 0.8):]: f3.write(e)
def get_sample_new(src_file, train_file, test_file, val_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5; res = []
txt = open(src_file, 'r', encoding='utf8').readlines()
re_patten = [('活动策划', re.compile(u"活动|策划")), ('视频识别', re.compile(u"视频识别")), ('项目管理', re.compile(u"项目|管理")),
('图像算法', re.compile(u"图像算法")), ('视频算法', re.compile(u"视频算法")), ('入职准备', re.compile(u"入职|入职准备")),
('视频流转码', re.compile(u"视频流|转码")), ('用户运营', re.compile(u"用户|运营")), ('数据挖掘', re.compile(u"数据挖掘|挖掘")),
('用户研究', re.compile(u"用户研究")), ('数据库索引', re.compile(u"数据库|索引")), ('社交', re.compile(u"社交")),
('音频编解码', re.compile(u"音频|编解码")), ('数据分析', re.compile(u"数据|分析")), ('流媒体封装', re.compile(u"流媒体|封装")),
('图像识别', re.compile(u"图像识别")), ('游戏', re.compile(u"游戏")), ('计算广告', re.compile(u"计算广告")),
('高并发', re.compile(u"高并发|并发")), ('面试辅导', re.compile(u"面试|辅导")), ('技术', re.compile(u"技术")),
('手机游戏', re.compile(u"手机|游戏")), ('需求评估', re.compile(u"需求评估")), ('全栈', re.compile(u"全栈")),
('游戏制作人', re.compile(u"游戏制作人|制作人")), ('创意创新', re.compile(u"创意|创新")), ('协调能力', re.compile(u"协调能力|协调")),
('数据运营', re.compile(u"数据运营")),
|
d2[j - 1]:
d = 0
else:
d = 1
dp[i][j] = min(dp[i - 1][j - 1] + d, dp[i][j - 1] + 1, dp[i - 1][j] + 1)
return dp[m][n]
def words_sim(word1, word2):
w1 = char_cut(word1); w2 = set(char_cut(word2
|
conditional_block
|
get_samples.py
|
id:
label2id[label] = index
index += 1
if label not in label_cnt: label_cnt[label] = 0
label_cnt[label] += 1
qes2label[question].append(label)
if label not in label2qes:
label2qes[label] = []
label2qes[label].append(question)
sorted_label_cnt = sorted(label_cnt.items(), key=lambda d:d[1], reverse=True)
for k, v in capacity_dict.items():
if k in label_cnt:
capacity_dict[k] = label_cnt[k]
label_num = 0; sample_num = 0
with open(des_file, 'w', encoding='utf8') as f:
for k, v in label2qes.items():
if k not in capacity_dict and len(v) < 1000: continue
f.write('## intent:' + k + '\n'); label_num += 1
v = list(set(v))
for ele in v:
#f.write('- ' + ' '.join(char_cut(ele)) + '\n')
f.write('- ' + ele + '\n')
sample_num += 1
f.write('\n')
print('label_num = %d, sample_num = %d' % (label_num, sample_num))
a=1
def get_fasttext_sample(src_file, des_file):
label_set = set(); qes_set = set()
txt = open(src_file, 'r', encoding='utf8').readlines()
with open(des_file, 'w', encoding='utf8') as f:
for e in txt:
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0 or question.strip() == '': continue
qes_set.add(question)
line_sample = []
for e in labels:
line_sample.append('__label__' + e[1]); label_set.add(e[1])
for e in char_cut(question):
line_sample.append(e)
f.write(' '.join(line_sample) + '\n')
print("label number: {}, question number: {}".format(len(label_set), len(qes_set)))
b=1
def get_ft_data(src_file, train_file, test_file, val_file):
res = []
item_regex = re.compile(r'\s*[-\*+]\s*(.+)')
txt = open(src_file, 'r', encoding='utf8').readlines()
for line in txt:
if '## intent:' in line:
label = line.strip().split(':')[-1]
else:
match = re.match(item_regex, line)
if match:
item = match.group(1)
#seg_item = ' '.join(list(jieba.cut(item)))
seg_item = item #' '.join(char_cut(item))
#res.append('__label__' + label + ' ' + seg_item + '\n')
res.append(label + '\t' + seg_item + '\n')
random.shuffle(res)
with open(train_file, 'w', encoding='utf8') as f1:
for e in res[:int(len(res) * 0.6)]: f1.write(e)
with open(test_file, 'w', encoding='utf8') as f2:
for e in res[int(len(res) * 0.6):int(len(res) * 0.8)]: f2.write(e)
with open(val_file, 'w', encoding='utf8') as f3:
for e in res[int(len(res) * 0.8):]: f3.write(e)
def get_sample_new(src_file, train_file, test_file, val_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5; res = []
txt = open(src_file, 'r', encoding='utf8').readlines()
re_patten = [('活动策划', re.compile(u"活动|策划")), ('视频识别', re.compile(u"视频识别")), ('项目管理', re.compile(u"项目|管理")),
('图像算法', re.compile(u"图像算法")), ('视频算法', re.compile(u"视频算法")), ('入职准备', re.compile(u"入职|入职准备")),
('视频流转码', re.compile(u"视频流|转码")), ('用户运营', re.compile(u"用户|运营")), ('数据挖掘', re.compile(u"数据挖掘|挖掘")),
('用户研究', re.compile(u"用户研究")), ('数据库索引', re.compile(u"数据库|索引")), ('社交', re.compile(u"社交")),
('音频编解码', re.compile(u"音频|编解码")), ('数据分析', re.compile(u"数据|分析")), ('流媒体封装', re.compile(u"流媒体|封装")),
('图像识别', re.compile(u"图像识别")), ('游戏', re.compile(u"游戏")), ('计算广告', re.compile(u"计算广告")),
('高并发', re.compile(u"高并发|并发")), ('面试辅导', re.compile(u"面试|辅导")), ('技术', re.compile(u"技术")),
('手机游戏', re.compile(u"手机|游戏")), ('需求评估', re.compile(u"需求评估")), ('全栈', re.compile(u"全栈")),
('游戏制作人', re.compile(u"游戏制作人|制作人")), ('创意创新', re.compile(u"创意|创新")), ('协调能力', re.compile(u"协调能力|协调")),
('数据运营', re.compile(u"数据运营")), ('排版美工', re.compile(u"排版|美工")), ('SQL调优', re.compile(u"SQL|调优")),
('数值策划', re.compile(u"数值|策划")), ('求职应聘', re.compile(u"求职|应聘")), ('广告算法', re.compile(u"广告算法")),
('选题策划', re.compile(u"选题|策划")), ('游戏运营', re.compile(u"游戏运营")), ('需求分析', re.compile(u"需求分析")),
('文案编辑', re.compile(u"文案|编辑")), ('运营', re.compile(u"运营")), ('推荐算法', re.compile(u"推荐算法|推荐")),
('宣传推广', re.compile(u"宣传|推广")), ('电子商务', re.compile(u"电子|商务")), ('沟通能力', re.compile(u"沟通能力|沟通")),
('物料制作', re.compile(u"物料|制作")), ('交互设计', re.compile(u"交互|设计")), ('APP', re.compile(u"APP")),
('爬虫', re.compile(u"爬虫")), ('渠道增长', re.compile(u"渠道增长")), ('资源谈判', re.compile(u"资源谈判|谈判")),
('数据采集', re.compile(u"数据采集")), ('产品', re.compile(u"产品")), ('机器学习', re.compile(u"机器学习|深度学习|人工智能")),
('视频编解码', re.compile(u"视频|编解码")), ('游戏策划', re.compile(u"游戏策划")),]
for e in txt:
sys.stdout.write('Handle progress: ' + str(idcnt) + ' / ' + str(len(txt)) + '\n'); sys.stdout.flush(); idcnt += 1
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
'''
for e in labels:
if e[1] not in label2qes: label2qes[e[1]] = set()
label2qes[e[1]].add(question)
if e[1] not in label_cnt: label_cnt[e[1]] = 0
label_cnt[e[1]] += 1
'''
for e1, e2 in re_patten:
if e2.search(question):
res.append(e1 + '\t' + question + '\n'); break
aa=e2.search(question)
a=1
'''
sorted_label2qes = sorted(label2qes.items(), key=lambda d:len(d[1]), reverse=True)
sorted_label_cnt = sorted(label_cnt.items(), key=lambda d:d[1], reverse=True)
for e in sorted_label2qes:
if len(e[1]) < 1000: continue
for e1 in e[1]: res.append(e[0] + '\t' + e1 + '\n')
'''
random.shuffle(res)
with open(train_file, 'w', encoding='utf8') as f1:
for e in res[:int(len(res) * 0.6)]: f1.write(e)
with open(test_file, 'w', encoding='utf8') as f2:
for e in res[int(len(res) * 0.6):int(len(res) * 0.8)]: f2.write(e)
with open(val_file, 'w', encoding='utf8') as f3:
for e in res[int(len(res) * 0.8):]: f3.write(e)
a=1
if __name__ == '__main__':
|
#min_edit_distance('求职', '求职应聘')
#get_sample('./data/q1.res', './data/sen_class_corp666.md')
|
random_line_split
|
|
foreign.rs
|
py to ALL receive operations
static ref RECV_ACCOUNT: RwLock<Option<String>> = RwLock::new(None);
}
/// get current receive account name
pub fn get_receive_account() -> Option<String> {
RECV_ACCOUNT.read().unwrap().clone()
}
/// get tor proof address
pub fn get_proof_address<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
) -> Result<String, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let provable_address = proofaddress::payment_proof_address(&keychain, ProofAddressType::Onion)
.map_err(|e| {
ErrorKind::PaymentProofAddress(format!(
"Error occurred in getting payment proof address, {}",
e
))
})?;
Ok(provable_address.public_key)
}
///
pub fn set_receive_account(account: String) {
RECV_ACCOUNT.write().unwrap().replace(account.to_string());
}
/// Return the version info
pub fn check_version() -> Result<VersionInfo, Error> {
// Proof address will be the onion address (Dalec Paublic Key). It is exactly what we need
Ok(VersionInfo {
foreign_api_version: FOREIGN_API_VERSION,
supported_slate_versions: SlateVersion::iter().collect(),
})
}
/// Build a coinbase transaction
pub fn build_coinbase<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
block_fees: &BlockFees,
test_mode: bool,
) -> Result<CbData, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
|
/// verify slate messages
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()
}
/// Receive a tx as recipient
/// Note: key_id & output_amounts needed for secure claims, mwc713.
pub fn receive_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
address: Option<String>,
key_id_opt: Option<&str>,
output_amounts: Option<Vec<u64>>,
dest_acct_name: Option<&str>,
message: Option<String>,
use_test_rng: bool,
refresh_from_node: bool,
) -> Result<(Slate, Context), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let display_from = address.clone().unwrap_or("http listener".to_string());
let slate_message = &slate.participant_data[0].message;
let address_for_logging = address.clone().unwrap_or("http".to_string());
// that means it's not mqs so need to print it
if slate_message.is_some() {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs. Message: [\"{}\"]",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false),
slate_message.clone().unwrap()
)
.to_string()
);
} else {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs.",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false)
)
.to_string()
);
}
debug!("foreign just received_tx just got slate = {:?}", slate);
let mut ret_slate = slate.clone();
check_ttl(w, &ret_slate, refresh_from_node)?;
let mut dest_acct_name = dest_acct_name.map(|s| s.to_string());
if dest_acct_name.is_none() {
dest_acct_name = get_receive_account();
}
let parent_key_id = match dest_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.to_owned())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
// Don't do this multiple times
let tx = updater::retrieve_txs(
&mut *w,
keychain_mask,
None,
Some(ret_slate.id),
Some(&parent_key_id),
use_test_rng,
None,
None,
)?;
for t in &tx {
if t.tx_type == TxLogEntryType::TxReceived {
return Err(ErrorKind::TransactionAlreadyReceived(ret_slate.id.to_string()).into());
}
if let Some(offset) = t.kernel_offset {
let offset_skey = slate.tx.offset.secret_key()?;
let keychain = w.keychain(keychain_mask)?;
let offset_commit = keychain.secp().commit(0, offset_skey)?;
if offset == offset_commit {
return Err(ErrorKind::TransactionWithSameOffsetAlreadyReceived(
offset_commit.to_hex(),
)
.into());
}
}
}
let message = match message {
Some(mut m) => {
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let num_outputs = match &output_amounts {
Some(v) => v.len(),
None => 1,
};
let height = w.last_confirmed_height()?;
// Note: key_id & output_amounts needed for secure claims, mwc713.
let mut context = tx::add_output_to_slate(
&mut *w,
keychain_mask,
&mut ret_slate,
height,
Some(address_for_logging),
key_id_opt,
output_amounts,
&parent_key_id,
1,
message,
false,
use_test_rng,
num_outputs,
)?;
let keychain = w.keychain(keychain_mask)?;
if slate.compact_slate {
// Add our contribution to the offset
ret_slate.adjust_offset(&keychain, &mut context)?;
}
tx::update_message(&mut *w, keychain_mask, &ret_slate)?;
let excess = ret_slate.calc_excess(Some(&keychain))?;
if let Some(ref mut p) = ret_slate.payment_proof {
if p.sender_address
.public_key
.eq(&p.receiver_address.public_key)
{
debug!("file proof, replace the receiver address with its address");
let sec_key = proofaddress::payment_proof_address_secret(&keychain, None)?;
let onion_address = OnionV3Address::from_private(&sec_key.0)?;
let dalek_pubkey = onion_address.to_ov3_str();
p.receiver_address = ProvableAddress::from_str(&dalek_pubkey)?;
}
let sig = tx::create_payment_proof_signature(
ret_slate.amount,
&excess,
p.sender_address.clone(),
p.receiver_address.clone(),
proofaddress::payment_proof_address_secret(&keychain, None)?,
)?;
p.receiver_signature = Some(sig);
}
Ok((ret_slate, context))
}
/// Receive an tx that this wallet has issued
pub fn finalize_invoice_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
refresh_from_node: bool,
use_test_rng: bool,
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut sl = slate.clone();
check_ttl(w, &sl, refresh_from_node)?;
// Participant id 0 for mwc713 compatibility
let context = w.get_private_context(keychain_mask, sl.id.as_bytes(), 0)?;
if slate.compact_slate {
// Add our contribution to the offset
sl.adjust_offset(&w.keychain(keychain_mask)?, &context)?;
// Slate can be 'compact' - it is mean some of the data can be gone
let mut temp_ctx = context.clone();
temp_ctx.sec_key = context.initial_sec_key.clone();
temp_ctx.sec_nonce = context.initial_sec_nonce.clone();
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&temp_ctx,
false,
use_test_rng,
)?;
}
// Participant id 0 for mwc713 compatibility
tx::complete_tx(&mut *w, keychain_mask, &mut sl, 0, &context)?;
tx::update_stored_tx(&mut *w, keychain_mask, &context, &sl, true)?;
tx::update_message(&mut *w, keychain_mask, &sl)?;
{
let mut batch = w.batch(keychain_mask)?;
// Participant id 0 for mwc713 compatibility
batch.delete_private_context(sl.id.as_bytes(), 0)?;
batch.commit()?;
}
Ok(sl)
}
/// Process the incoming swap message
|
{
updater::build_coinbase(&mut *w, keychain_mask, block_fees, test_mode)
}
|
identifier_body
|
foreign.rs
|
allpy to ALL receive operations
static ref RECV_ACCOUNT: RwLock<Option<String>> = RwLock::new(None);
}
/// get current receive account name
pub fn get_receive_account() -> Option<String> {
RECV_ACCOUNT.read().unwrap().clone()
}
/// get tor proof address
pub fn get_proof_address<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
) -> Result<String, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let provable_address = proofaddress::payment_proof_address(&keychain, ProofAddressType::Onion)
.map_err(|e| {
ErrorKind::PaymentProofAddress(format!(
"Error occurred in getting payment proof address, {}",
e
))
})?;
Ok(provable_address.public_key)
}
///
pub fn set_receive_account(account: String) {
RECV_ACCOUNT.write().unwrap().replace(account.to_string());
}
/// Return the version info
pub fn check_version() -> Result<VersionInfo, Error> {
// Proof address will be the onion address (Dalec Paublic Key). It is exactly what we need
Ok(VersionInfo {
foreign_api_version: FOREIGN_API_VERSION,
supported_slate_versions: SlateVersion::iter().collect(),
|
pub fn build_coinbase<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
block_fees: &BlockFees,
test_mode: bool,
) -> Result<CbData, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
updater::build_coinbase(&mut *w, keychain_mask, block_fees, test_mode)
}
/// verify slate messages
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()
}
/// Receive a tx as recipient
/// Note: key_id & output_amounts needed for secure claims, mwc713.
pub fn receive_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
address: Option<String>,
key_id_opt: Option<&str>,
output_amounts: Option<Vec<u64>>,
dest_acct_name: Option<&str>,
message: Option<String>,
use_test_rng: bool,
refresh_from_node: bool,
) -> Result<(Slate, Context), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let display_from = address.clone().unwrap_or("http listener".to_string());
let slate_message = &slate.participant_data[0].message;
let address_for_logging = address.clone().unwrap_or("http".to_string());
// that means it's not mqs so need to print it
if slate_message.is_some() {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs. Message: [\"{}\"]",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false),
slate_message.clone().unwrap()
)
.to_string()
);
} else {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs.",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false)
)
.to_string()
);
}
debug!("foreign just received_tx just got slate = {:?}", slate);
let mut ret_slate = slate.clone();
check_ttl(w, &ret_slate, refresh_from_node)?;
let mut dest_acct_name = dest_acct_name.map(|s| s.to_string());
if dest_acct_name.is_none() {
dest_acct_name = get_receive_account();
}
let parent_key_id = match dest_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.to_owned())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
// Don't do this multiple times
let tx = updater::retrieve_txs(
&mut *w,
keychain_mask,
None,
Some(ret_slate.id),
Some(&parent_key_id),
use_test_rng,
None,
None,
)?;
for t in &tx {
if t.tx_type == TxLogEntryType::TxReceived {
return Err(ErrorKind::TransactionAlreadyReceived(ret_slate.id.to_string()).into());
}
if let Some(offset) = t.kernel_offset {
let offset_skey = slate.tx.offset.secret_key()?;
let keychain = w.keychain(keychain_mask)?;
let offset_commit = keychain.secp().commit(0, offset_skey)?;
if offset == offset_commit {
return Err(ErrorKind::TransactionWithSameOffsetAlreadyReceived(
offset_commit.to_hex(),
)
.into());
}
}
}
let message = match message {
Some(mut m) => {
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let num_outputs = match &output_amounts {
Some(v) => v.len(),
None => 1,
};
let height = w.last_confirmed_height()?;
// Note: key_id & output_amounts needed for secure claims, mwc713.
let mut context = tx::add_output_to_slate(
&mut *w,
keychain_mask,
&mut ret_slate,
height,
Some(address_for_logging),
key_id_opt,
output_amounts,
&parent_key_id,
1,
message,
false,
use_test_rng,
num_outputs,
)?;
let keychain = w.keychain(keychain_mask)?;
if slate.compact_slate {
// Add our contribution to the offset
ret_slate.adjust_offset(&keychain, &mut context)?;
}
tx::update_message(&mut *w, keychain_mask, &ret_slate)?;
let excess = ret_slate.calc_excess(Some(&keychain))?;
if let Some(ref mut p) = ret_slate.payment_proof {
if p.sender_address
.public_key
.eq(&p.receiver_address.public_key)
{
debug!("file proof, replace the receiver address with its address");
let sec_key = proofaddress::payment_proof_address_secret(&keychain, None)?;
let onion_address = OnionV3Address::from_private(&sec_key.0)?;
let dalek_pubkey = onion_address.to_ov3_str();
p.receiver_address = ProvableAddress::from_str(&dalek_pubkey)?;
}
let sig = tx::create_payment_proof_signature(
ret_slate.amount,
&excess,
p.sender_address.clone(),
p.receiver_address.clone(),
proofaddress::payment_proof_address_secret(&keychain, None)?,
)?;
p.receiver_signature = Some(sig);
}
Ok((ret_slate, context))
}
/// Receive an tx that this wallet has issued
pub fn finalize_invoice_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
refresh_from_node: bool,
use_test_rng: bool,
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut sl = slate.clone();
check_ttl(w, &sl, refresh_from_node)?;
// Participant id 0 for mwc713 compatibility
let context = w.get_private_context(keychain_mask, sl.id.as_bytes(), 0)?;
if slate.compact_slate {
// Add our contribution to the offset
sl.adjust_offset(&w.keychain(keychain_mask)?, &context)?;
// Slate can be 'compact' - it is mean some of the data can be gone
let mut temp_ctx = context.clone();
temp_ctx.sec_key = context.initial_sec_key.clone();
temp_ctx.sec_nonce = context.initial_sec_nonce.clone();
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&temp_ctx,
false,
use_test_rng,
)?;
}
// Participant id 0 for mwc713 compatibility
tx::complete_tx(&mut *w, keychain_mask, &mut sl, 0, &context)?;
tx::update_stored_tx(&mut *w, keychain_mask, &context, &sl, true)?;
tx::update_message(&mut *w, keychain_mask, &sl)?;
{
let mut batch = w.batch(keychain_mask)?;
// Participant id 0 for mwc713 compatibility
batch.delete_private_context(sl.id.as_bytes(), 0)?;
batch.commit()?;
}
Ok(sl)
}
/// Process the incoming swap message received
|
})
}
/// Build a coinbase transaction
|
random_line_split
|
foreign.rs
|
py to ALL receive operations
static ref RECV_ACCOUNT: RwLock<Option<String>> = RwLock::new(None);
}
/// get current receive account name
pub fn get_receive_account() -> Option<String> {
RECV_ACCOUNT.read().unwrap().clone()
}
/// get tor proof address
pub fn get_proof_address<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
) -> Result<String, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let provable_address = proofaddress::payment_proof_address(&keychain, ProofAddressType::Onion)
.map_err(|e| {
ErrorKind::PaymentProofAddress(format!(
"Error occurred in getting payment proof address, {}",
e
))
})?;
Ok(provable_address.public_key)
}
///
pub fn set_receive_account(account: String) {
RECV_ACCOUNT.write().unwrap().replace(account.to_string());
}
/// Return the version info
pub fn
|
() -> Result<VersionInfo, Error> {
// Proof address will be the onion address (Dalec Paublic Key). It is exactly what we need
Ok(VersionInfo {
foreign_api_version: FOREIGN_API_VERSION,
supported_slate_versions: SlateVersion::iter().collect(),
})
}
/// Build a coinbase transaction
pub fn build_coinbase<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
block_fees: &BlockFees,
test_mode: bool,
) -> Result<CbData, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
updater::build_coinbase(&mut *w, keychain_mask, block_fees, test_mode)
}
/// verify slate messages
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()
}
/// Receive a tx as recipient
/// Note: key_id & output_amounts needed for secure claims, mwc713.
pub fn receive_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
address: Option<String>,
key_id_opt: Option<&str>,
output_amounts: Option<Vec<u64>>,
dest_acct_name: Option<&str>,
message: Option<String>,
use_test_rng: bool,
refresh_from_node: bool,
) -> Result<(Slate, Context), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let display_from = address.clone().unwrap_or("http listener".to_string());
let slate_message = &slate.participant_data[0].message;
let address_for_logging = address.clone().unwrap_or("http".to_string());
// that means it's not mqs so need to print it
if slate_message.is_some() {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs. Message: [\"{}\"]",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false),
slate_message.clone().unwrap()
)
.to_string()
);
} else {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs.",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false)
)
.to_string()
);
}
debug!("foreign just received_tx just got slate = {:?}", slate);
let mut ret_slate = slate.clone();
check_ttl(w, &ret_slate, refresh_from_node)?;
let mut dest_acct_name = dest_acct_name.map(|s| s.to_string());
if dest_acct_name.is_none() {
dest_acct_name = get_receive_account();
}
let parent_key_id = match dest_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.to_owned())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
// Don't do this multiple times
let tx = updater::retrieve_txs(
&mut *w,
keychain_mask,
None,
Some(ret_slate.id),
Some(&parent_key_id),
use_test_rng,
None,
None,
)?;
for t in &tx {
if t.tx_type == TxLogEntryType::TxReceived {
return Err(ErrorKind::TransactionAlreadyReceived(ret_slate.id.to_string()).into());
}
if let Some(offset) = t.kernel_offset {
let offset_skey = slate.tx.offset.secret_key()?;
let keychain = w.keychain(keychain_mask)?;
let offset_commit = keychain.secp().commit(0, offset_skey)?;
if offset == offset_commit {
return Err(ErrorKind::TransactionWithSameOffsetAlreadyReceived(
offset_commit.to_hex(),
)
.into());
}
}
}
let message = match message {
Some(mut m) => {
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let num_outputs = match &output_amounts {
Some(v) => v.len(),
None => 1,
};
let height = w.last_confirmed_height()?;
// Note: key_id & output_amounts needed for secure claims, mwc713.
let mut context = tx::add_output_to_slate(
&mut *w,
keychain_mask,
&mut ret_slate,
height,
Some(address_for_logging),
key_id_opt,
output_amounts,
&parent_key_id,
1,
message,
false,
use_test_rng,
num_outputs,
)?;
let keychain = w.keychain(keychain_mask)?;
if slate.compact_slate {
// Add our contribution to the offset
ret_slate.adjust_offset(&keychain, &mut context)?;
}
tx::update_message(&mut *w, keychain_mask, &ret_slate)?;
let excess = ret_slate.calc_excess(Some(&keychain))?;
if let Some(ref mut p) = ret_slate.payment_proof {
if p.sender_address
.public_key
.eq(&p.receiver_address.public_key)
{
debug!("file proof, replace the receiver address with its address");
let sec_key = proofaddress::payment_proof_address_secret(&keychain, None)?;
let onion_address = OnionV3Address::from_private(&sec_key.0)?;
let dalek_pubkey = onion_address.to_ov3_str();
p.receiver_address = ProvableAddress::from_str(&dalek_pubkey)?;
}
let sig = tx::create_payment_proof_signature(
ret_slate.amount,
&excess,
p.sender_address.clone(),
p.receiver_address.clone(),
proofaddress::payment_proof_address_secret(&keychain, None)?,
)?;
p.receiver_signature = Some(sig);
}
Ok((ret_slate, context))
}
/// Receive an tx that this wallet has issued
pub fn finalize_invoice_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
refresh_from_node: bool,
use_test_rng: bool,
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut sl = slate.clone();
check_ttl(w, &sl, refresh_from_node)?;
// Participant id 0 for mwc713 compatibility
let context = w.get_private_context(keychain_mask, sl.id.as_bytes(), 0)?;
if slate.compact_slate {
// Add our contribution to the offset
sl.adjust_offset(&w.keychain(keychain_mask)?, &context)?;
// Slate can be 'compact' - it is mean some of the data can be gone
let mut temp_ctx = context.clone();
temp_ctx.sec_key = context.initial_sec_key.clone();
temp_ctx.sec_nonce = context.initial_sec_nonce.clone();
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&temp_ctx,
false,
use_test_rng,
)?;
}
// Participant id 0 for mwc713 compatibility
tx::complete_tx(&mut *w, keychain_mask, &mut sl, 0, &context)?;
tx::update_stored_tx(&mut *w, keychain_mask, &context, &sl, true)?;
tx::update_message(&mut *w, keychain_mask, &sl)?;
{
let mut batch = w.batch(keychain_mask)?;
// Participant id 0 for mwc713 compatibility
batch.delete_private_context(sl.id.as_bytes(), 0)?;
batch.commit()?;
}
Ok(sl)
}
/// Process the incoming swap message
|
check_version
|
identifier_name
|
foreign.rs
|
py to ALL receive operations
static ref RECV_ACCOUNT: RwLock<Option<String>> = RwLock::new(None);
}
/// get current receive account name
pub fn get_receive_account() -> Option<String> {
RECV_ACCOUNT.read().unwrap().clone()
}
/// get tor proof address
pub fn get_proof_address<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
) -> Result<String, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let provable_address = proofaddress::payment_proof_address(&keychain, ProofAddressType::Onion)
.map_err(|e| {
ErrorKind::PaymentProofAddress(format!(
"Error occurred in getting payment proof address, {}",
e
))
})?;
Ok(provable_address.public_key)
}
///
pub fn set_receive_account(account: String) {
RECV_ACCOUNT.write().unwrap().replace(account.to_string());
}
/// Return the version info
pub fn check_version() -> Result<VersionInfo, Error> {
// Proof address will be the onion address (Dalec Paublic Key). It is exactly what we need
Ok(VersionInfo {
foreign_api_version: FOREIGN_API_VERSION,
supported_slate_versions: SlateVersion::iter().collect(),
})
}
/// Build a coinbase transaction
pub fn build_coinbase<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
block_fees: &BlockFees,
test_mode: bool,
) -> Result<CbData, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
updater::build_coinbase(&mut *w, keychain_mask, block_fees, test_mode)
}
/// verify slate messages
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()
}
/// Receive a tx as recipient
/// Note: key_id & output_amounts needed for secure claims, mwc713.
pub fn receive_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
address: Option<String>,
key_id_opt: Option<&str>,
output_amounts: Option<Vec<u64>>,
dest_acct_name: Option<&str>,
message: Option<String>,
use_test_rng: bool,
refresh_from_node: bool,
) -> Result<(Slate, Context), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let display_from = address.clone().unwrap_or("http listener".to_string());
let slate_message = &slate.participant_data[0].message;
let address_for_logging = address.clone().unwrap_or("http".to_string());
// that means it's not mqs so need to print it
if slate_message.is_some() {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs. Message: [\"{}\"]",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false),
slate_message.clone().unwrap()
)
.to_string()
);
} else {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs.",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false)
)
.to_string()
);
}
debug!("foreign just received_tx just got slate = {:?}", slate);
let mut ret_slate = slate.clone();
check_ttl(w, &ret_slate, refresh_from_node)?;
let mut dest_acct_name = dest_acct_name.map(|s| s.to_string());
if dest_acct_name.is_none() {
dest_acct_name = get_receive_account();
}
let parent_key_id = match dest_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.to_owned())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
// Don't do this multiple times
let tx = updater::retrieve_txs(
&mut *w,
keychain_mask,
None,
Some(ret_slate.id),
Some(&parent_key_id),
use_test_rng,
None,
None,
)?;
for t in &tx {
if t.tx_type == TxLogEntryType::TxReceived {
return Err(ErrorKind::TransactionAlreadyReceived(ret_slate.id.to_string()).into());
}
if let Some(offset) = t.kernel_offset {
let offset_skey = slate.tx.offset.secret_key()?;
let keychain = w.keychain(keychain_mask)?;
let offset_commit = keychain.secp().commit(0, offset_skey)?;
if offset == offset_commit {
return Err(ErrorKind::TransactionWithSameOffsetAlreadyReceived(
offset_commit.to_hex(),
)
.into());
}
}
}
let message = match message {
Some(mut m) => {
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let num_outputs = match &output_amounts {
Some(v) => v.len(),
None => 1,
};
let height = w.last_confirmed_height()?;
// Note: key_id & output_amounts needed for secure claims, mwc713.
let mut context = tx::add_output_to_slate(
&mut *w,
keychain_mask,
&mut ret_slate,
height,
Some(address_for_logging),
key_id_opt,
output_amounts,
&parent_key_id,
1,
message,
false,
use_test_rng,
num_outputs,
)?;
let keychain = w.keychain(keychain_mask)?;
if slate.compact_slate
|
tx::update_message(&mut *w, keychain_mask, &ret_slate)?;
let excess = ret_slate.calc_excess(Some(&keychain))?;
if let Some(ref mut p) = ret_slate.payment_proof {
if p.sender_address
.public_key
.eq(&p.receiver_address.public_key)
{
debug!("file proof, replace the receiver address with its address");
let sec_key = proofaddress::payment_proof_address_secret(&keychain, None)?;
let onion_address = OnionV3Address::from_private(&sec_key.0)?;
let dalek_pubkey = onion_address.to_ov3_str();
p.receiver_address = ProvableAddress::from_str(&dalek_pubkey)?;
}
let sig = tx::create_payment_proof_signature(
ret_slate.amount,
&excess,
p.sender_address.clone(),
p.receiver_address.clone(),
proofaddress::payment_proof_address_secret(&keychain, None)?,
)?;
p.receiver_signature = Some(sig);
}
Ok((ret_slate, context))
}
/// Receive an tx that this wallet has issued
pub fn finalize_invoice_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
refresh_from_node: bool,
use_test_rng: bool,
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut sl = slate.clone();
check_ttl(w, &sl, refresh_from_node)?;
// Participant id 0 for mwc713 compatibility
let context = w.get_private_context(keychain_mask, sl.id.as_bytes(), 0)?;
if slate.compact_slate {
// Add our contribution to the offset
sl.adjust_offset(&w.keychain(keychain_mask)?, &context)?;
// Slate can be 'compact' - it is mean some of the data can be gone
let mut temp_ctx = context.clone();
temp_ctx.sec_key = context.initial_sec_key.clone();
temp_ctx.sec_nonce = context.initial_sec_nonce.clone();
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&temp_ctx,
false,
use_test_rng,
)?;
}
// Participant id 0 for mwc713 compatibility
tx::complete_tx(&mut *w, keychain_mask, &mut sl, 0, &context)?;
tx::update_stored_tx(&mut *w, keychain_mask, &context, &sl, true)?;
tx::update_message(&mut *w, keychain_mask, &sl)?;
{
let mut batch = w.batch(keychain_mask)?;
// Participant id 0 for mwc713 compatibility
batch.delete_private_context(sl.id.as_bytes(), 0)?;
batch.commit()?;
}
Ok(sl)
}
/// Process the incoming swap
|
{
// Add our contribution to the offset
ret_slate.adjust_offset(&keychain, &mut context)?;
}
|
conditional_block
|
inventory.pb.go
|
}
func (m *PkgGroup) String() string { return proto.CompactTextString(m) }
func (*PkgGroup) ProtoMessage() {}
func (*PkgGroup) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{1}
}
func (m *PkgGroup) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgGroup.Unmarshal(m, b)
}
func (m *PkgGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgGroup.Marshal(b, m, deterministic)
}
func (m *PkgGroup) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgGroup.Merge(m, src)
}
func (m *PkgGroup) XXX_Size() int {
return xxx_messageInfo_PkgGroup.Size(m)
}
func (m *PkgGroup) XXX_DiscardUnknown() {
xxx_messageInfo_PkgGroup.DiscardUnknown(m)
}
var xxx_messageInfo_PkgGroup proto.InternalMessageInfo
func (m *PkgGroup) GetDeviceName() string {
if m != nil {
return m.DeviceName
}
return ""
}
func (m *PkgGroup) GetName() string {
if m != nil {
return m.Name
}
return ""
}
type PkgInfo struct {
Package *PkgGroup `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"`
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
BuildInformation string `protobuf:"bytes,3,opt,name=build_information,json=buildInformation,proto3" json:"build_information,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PkgInfo) Reset() { *m = PkgInfo{} }
func (m *PkgInfo) String() string { return proto.CompactTextString(m) }
func (*PkgInfo) ProtoMessage() {}
func (*PkgInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{2}
}
func (m *PkgInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgInfo.Unmarshal(m, b)
}
func (m *PkgInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgInfo.Marshal(b, m, deterministic)
}
func (m *PkgInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgInfo.Merge(m, src)
}
func (m *PkgInfo) XXX_Size() int {
return xxx_messageInfo_PkgInfo.Size(m)
}
func (m *PkgInfo) XXX_DiscardUnknown() {
xxx_messageInfo_PkgInfo.DiscardUnknown(m)
}
var xxx_messageInfo_PkgInfo proto.InternalMessageInfo
func (m *PkgInfo) GetPackage() *PkgGroup {
if m != nil {
return m.Package
}
return nil
}
func (m *PkgInfo) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
func (m *PkgInfo) GetBuildInformation() string {
if m != nil {
return m.BuildInformation
}
return ""
}
type Inventory struct {
Major uint32 `protobuf:"varint,50,opt,name=major,proto3" json:"major,omitempty"`
Minor uint32 `protobuf:"varint,51,opt,name=minor,proto3" json:"minor,omitempty"`
BootImageName string `protobuf:"bytes,52,opt,name=boot_image_name,json=bootImageName,proto3" json:"boot_image_name,omitempty"`
LoadPath []*PkgInfo `protobuf:"bytes,53,rep,name=load_path,json=loadPath,proto3" json:"load_path,omitempty"`
NodeType uint64 `protobuf:"varint,54,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"`
SecureDomainRouterName string `protobuf:"bytes,55,opt,name=secure_domain_router_name,json=secureDomainRouterName,proto3" json:"secure_domain_router_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Inventory) Reset() { *m = Inventory{} }
func (m *Inventory) String() string { return proto.CompactTextString(m) }
func (*Inventory) ProtoMessage() {}
func (*Inventory) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{3}
}
func (m *Inventory) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Inventory.Unmarshal(m, b)
}
func (m *Inventory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Inventory.Marshal(b, m, deterministic)
}
func (m *Inventory) XXX_Merge(src proto.Message) {
xxx_messageInfo_Inventory.Merge(m, src)
}
func (m *Inventory) XXX_Size() int {
return xxx_messageInfo_Inventory.Size(m)
}
func (m *Inventory) XXX_DiscardUnknown() {
xxx_messageInfo_Inventory.DiscardUnknown(m)
}
var xxx_messageInfo_Inventory proto.InternalMessageInfo
func (m *Inventory) GetMajor() uint32 {
if m != nil {
return m.Major
}
return 0
}
func (m *Inventory) GetMinor() uint32
|
func (m *Inventory) GetBootImageName() string {
if m != nil {
return m.BootImageName
}
return ""
}
func (m *Inventory) GetLoadPath() []*PkgInfo {
if m != nil {
return m.LoadPath
}
return nil
}
func (m *Inventory) GetNodeType() uint64 {
if m != nil {
return m.NodeType
}
return 0
}
func (m *Inventory) GetSecureDomainRouterName() string {
if m != nil {
return m.SecureDomainRouterName
}
return ""
}
func init() {
proto.RegisterType((*Inventory_KEYS)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory_KEYS")
proto.RegisterType((*PkgGroup)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_group")
proto.RegisterType((*PkgInfo)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_info")
proto.RegisterType((*Inventory)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory")
}
func init() { proto.RegisterFile("inventory.proto", fileDescriptor_7173caedb7c6ae96) }
var fileDescriptor_7173caedb7c6ae96 = []byte{
// 394 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xcf, 0x6e, 0xd4, 0x30,
0x10, 0xc6, 0x95, 0xb6, 0xd0, 0xcd, 0xac, 0x4a, 0xc1, 0x42, 0x28, 0x88, 0x03, 0x51, 0x0e, 0x28,
0x12, 0x22, 0x87, 0x2d, 0x7f, 0xc4, 0x8d, 0x03, 0x1c, 0x2a, 0x24, 0x84, 0x02, 0x17, 0xb8, 0x58,
0xde, 0x64, 0x9a, 0x9a, 0xc6, 0x1e, 0xcb, 0x71, 0x02, 0x39, 0x21, 0x9e, 0x8b, 0xb7, 0xe0, 0x89,
0x90, 0x9d, 0x6c, 0x56, 0x7d, 0x80, 0xbd, 0x65, 0x7e, 0xdf, 0x17, 0xcf, 0x7c, 0xa3, 0x81, 0x73,
0xa9,
|
{
if m != nil {
return m.Minor
}
return 0
}
|
identifier_body
|
inventory.pb.go
|
func (m *Inventory_KEYS) String() string { return proto.CompactTextString(m) }
func (*Inventory_KEYS) ProtoMessage() {}
func (*Inventory_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{0}
}
func (m *Inventory_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Inventory_KEYS.Unmarshal(m, b)
}
func (m *Inventory_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Inventory_KEYS.Marshal(b, m, deterministic)
}
func (m *Inventory_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_Inventory_KEYS.Merge(m, src)
}
func (m *Inventory_KEYS) XXX_Size() int {
return xxx_messageInfo_Inventory_KEYS.Size(m)
}
func (m *Inventory_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_Inventory_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_Inventory_KEYS proto.InternalMessageInfo
func (m *Inventory_KEYS) GetNodeName() string {
if m != nil {
return m.NodeName
}
return ""
}
type PkgGroup struct {
DeviceName string `protobuf:"bytes,1,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PkgGroup) Reset() { *m = PkgGroup{} }
func (m *PkgGroup) String() string { return proto.CompactTextString(m) }
func (*PkgGroup) ProtoMessage() {}
func (*PkgGroup) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{1}
}
func (m *PkgGroup) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgGroup.Unmarshal(m, b)
}
func (m *PkgGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgGroup.Marshal(b, m, deterministic)
}
func (m *PkgGroup) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgGroup.Merge(m, src)
}
func (m *PkgGroup) XXX_Size() int {
return xxx_messageInfo_PkgGroup.Size(m)
}
func (m *PkgGroup) XXX_DiscardUnknown() {
xxx_messageInfo_PkgGroup.DiscardUnknown(m)
}
var xxx_messageInfo_PkgGroup proto.InternalMessageInfo
func (m *PkgGroup) GetDeviceName() string {
if m != nil {
return m.DeviceName
}
return ""
}
func (m *PkgGroup) GetName() string {
if m != nil {
return m.Name
}
return ""
}
type PkgInfo struct {
Package *PkgGroup `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"`
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
BuildInformation string `protobuf:"bytes,3,opt,name=build_information,json=buildInformation,proto3" json:"build_information,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PkgInfo) Reset() { *m = PkgInfo{} }
func (m *PkgInfo) String() string { return proto.CompactTextString(m) }
func (*PkgInfo) ProtoMessage() {}
func (*PkgInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{2}
}
func (m *PkgInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgInfo.Unmarshal(m, b)
}
func (m *PkgInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgInfo.Marshal(b, m, deterministic)
}
func (m *PkgInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgInfo.Merge(m, src)
}
func (m *PkgInfo) XXX_Size() int {
return xxx_messageInfo_PkgInfo.Size(m)
}
func (m *PkgInfo) XXX_DiscardUnknown() {
xxx_messageInfo_PkgInfo.DiscardUnknown(m)
}
var xxx_messageInfo_PkgInfo proto.InternalMessageInfo
func (m *PkgInfo) GetPackage() *PkgGroup {
if m != nil {
return m.Package
}
return nil
}
func (m *PkgInfo) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
func (m *PkgInfo) GetBuildInformation() string {
if m != nil {
return m.BuildInformation
}
return ""
}
type Inventory struct {
Major uint32 `protobuf:"varint,50,opt,name=major,proto3" json:"major,omitempty"`
Minor uint32 `protobuf:"varint,51,opt,name=minor,proto3" json:"minor,omitempty"`
BootImageName string `protobuf:"bytes,52,opt,name=boot_image_name,json=bootImageName,proto3" json:"boot_image_name,omitempty"`
LoadPath []*PkgInfo `protobuf:"bytes,53,rep,name=load_path,json=loadPath,proto3" json:"load_path,omitempty"`
NodeType uint64 `protobuf:"varint,54,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"`
SecureDomainRouterName string `protobuf:"bytes,55,opt,name=secure_domain_router_name,json=secureDomainRouterName,proto3" json:"secure_domain_router_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Inventory) Reset() { *m = Inventory{} }
func (m *Inventory) String() string { return proto.CompactTextString(m) }
func (*Inventory) ProtoMessage() {}
func (*Inventory) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{3}
}
func (m *Inventory) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Inventory.Unmarshal(m, b)
}
func (m *Inventory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Inventory.Marshal(b, m, deterministic)
}
func (m *Inventory) XXX_Merge(src proto.Message) {
xxx_messageInfo_Inventory.Merge(m, src)
}
func (m *Inventory) XXX_Size() int {
return xxx_messageInfo_Inventory.Size(m)
}
func (m *Inventory) XXX_DiscardUnknown() {
xxx_messageInfo_Inventory.DiscardUnknown(m)
}
var xxx_messageInfo_Inventory proto.InternalMessageInfo
func (m *Inventory) GetMajor() uint32 {
if m != nil {
return m.Major
}
return 0
}
func (m *Inventory) GetMinor() uint32 {
if m != nil {
return m.Minor
}
return 0
}
func (m *Inventory) GetBootImageName() string {
if m != nil {
return m.BootImageName
}
return ""
}
func (m *Inventory) GetLoadPath() []*PkgInfo {
if m != nil {
return m.LoadPath
}
return nil
}
func (m *Inventory) GetNodeType() uint64 {
if m != nil {
return m.NodeType
}
return 0
}
func (m *Inventory) GetSecureDomainRouterName() string {
if m != nil {
return m.SecureDomainRouterName
}
return ""
}
func init() {
proto.RegisterType((*Inventory_KEYS)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory_KEYS")
proto.RegisterType((*PkgGroup)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_group")
proto.RegisterType((*PkgInfo)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_info")
proto.RegisterType((*Inventory)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory")
}
func init() { proto.RegisterFile("inventory.proto", fileDescriptor_7173caedb7c6ae96) }
var fileDescriptor_7173caedb7c6ae96 = []byte{
// 394 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
|
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Inventory_KEYS) Reset() { *m = Inventory_KEYS{} }
|
random_line_split
|
|
inventory.pb.go
|
}
func (m *PkgGroup) String() string { return proto.CompactTextString(m) }
func (*PkgGroup) ProtoMessage() {}
func (*PkgGroup) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{1}
}
func (m *PkgGroup) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgGroup.Unmarshal(m, b)
}
func (m *PkgGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgGroup.Marshal(b, m, deterministic)
}
func (m *PkgGroup) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgGroup.Merge(m, src)
}
func (m *PkgGroup) XXX_Size() int {
return xxx_messageInfo_PkgGroup.Size(m)
}
func (m *PkgGroup) XXX_DiscardUnknown() {
xxx_messageInfo_PkgGroup.DiscardUnknown(m)
}
var xxx_messageInfo_PkgGroup proto.InternalMessageInfo
func (m *PkgGroup) GetDeviceName() string {
if m != nil {
return m.DeviceName
}
return ""
}
func (m *PkgGroup) GetName() string {
if m != nil {
return m.Name
}
return ""
}
type PkgInfo struct {
Package *PkgGroup `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"`
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
BuildInformation string `protobuf:"bytes,3,opt,name=build_information,json=buildInformation,proto3" json:"build_information,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PkgInfo) Reset() { *m = PkgInfo{} }
func (m *PkgInfo) String() string { return proto.CompactTextString(m) }
func (*PkgInfo) ProtoMessage() {}
func (*PkgInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{2}
}
func (m *PkgInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgInfo.Unmarshal(m, b)
}
func (m *PkgInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgInfo.Marshal(b, m, deterministic)
}
func (m *PkgInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgInfo.Merge(m, src)
}
func (m *PkgInfo) XXX_Size() int {
return xxx_messageInfo_PkgInfo.Size(m)
}
func (m *PkgInfo) XXX_DiscardUnknown() {
xxx_messageInfo_PkgInfo.DiscardUnknown(m)
}
var xxx_messageInfo_PkgInfo proto.InternalMessageInfo
func (m *PkgInfo) GetPackage() *PkgGroup {
if m != nil {
return m.Package
}
return nil
}
func (m *PkgInfo) GetVersion() string {
if m != nil
|
return ""
}
func (m *PkgInfo) GetBuildInformation() string {
if m != nil {
return m.BuildInformation
}
return ""
}
type Inventory struct {
Major uint32 `protobuf:"varint,50,opt,name=major,proto3" json:"major,omitempty"`
Minor uint32 `protobuf:"varint,51,opt,name=minor,proto3" json:"minor,omitempty"`
BootImageName string `protobuf:"bytes,52,opt,name=boot_image_name,json=bootImageName,proto3" json:"boot_image_name,omitempty"`
LoadPath []*PkgInfo `protobuf:"bytes,53,rep,name=load_path,json=loadPath,proto3" json:"load_path,omitempty"`
NodeType uint64 `protobuf:"varint,54,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"`
SecureDomainRouterName string `protobuf:"bytes,55,opt,name=secure_domain_router_name,json=secureDomainRouterName,proto3" json:"secure_domain_router_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Inventory) Reset() { *m = Inventory{} }
func (m *Inventory) String() string { return proto.CompactTextString(m) }
func (*Inventory) ProtoMessage() {}
func (*Inventory) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{3}
}
func (m *Inventory) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Inventory.Unmarshal(m, b)
}
func (m *Inventory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Inventory.Marshal(b, m, deterministic)
}
func (m *Inventory) XXX_Merge(src proto.Message) {
xxx_messageInfo_Inventory.Merge(m, src)
}
func (m *Inventory) XXX_Size() int {
return xxx_messageInfo_Inventory.Size(m)
}
func (m *Inventory) XXX_DiscardUnknown() {
xxx_messageInfo_Inventory.DiscardUnknown(m)
}
var xxx_messageInfo_Inventory proto.InternalMessageInfo
func (m *Inventory) GetMajor() uint32 {
if m != nil {
return m.Major
}
return 0
}
func (m *Inventory) GetMinor() uint32 {
if m != nil {
return m.Minor
}
return 0
}
func (m *Inventory) GetBootImageName() string {
if m != nil {
return m.BootImageName
}
return ""
}
func (m *Inventory) GetLoadPath() []*PkgInfo {
if m != nil {
return m.LoadPath
}
return nil
}
func (m *Inventory) GetNodeType() uint64 {
if m != nil {
return m.NodeType
}
return 0
}
func (m *Inventory) GetSecureDomainRouterName() string {
if m != nil {
return m.SecureDomainRouterName
}
return ""
}
func init() {
proto.RegisterType((*Inventory_KEYS)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory_KEYS")
proto.RegisterType((*PkgGroup)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_group")
proto.RegisterType((*PkgInfo)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_info")
proto.RegisterType((*Inventory)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory")
}
func init() { proto.RegisterFile("inventory.proto", fileDescriptor_7173caedb7c6ae96) }
var fileDescriptor_7173caedb7c6ae96 = []byte{
// 394 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xcf, 0x6e, 0xd4, 0x30,
0x10, 0xc6, 0x95, 0xb6, 0xd0, 0xcd, 0xac, 0x4a, 0xc1, 0x42, 0x28, 0x88, 0x03, 0x51, 0x0e, 0x28,
0x12, 0x22, 0x87, 0x2d, 0x7f, 0xc4, 0x8d, 0x03, 0x1c, 0x2a, 0x24, 0x84, 0x02, 0x17, 0xb8, 0x58,
0xde, 0x64, 0x9a, 0x9a, 0xc6, 0x1e, 0xcb, 0x71, 0x02, 0x39, 0x21, 0x9e, 0x8b, 0xb7, 0xe0, 0x89,
0x90, 0x9d, 0x6c, 0x56, 0x7d, 0x80, 0xbd, 0x65, 0x7e, 0xdf, 0x17, 0xcf, 0x7c, 0xa3, 0x81, 0x73,
0xa9,
|
{
return m.Version
}
|
conditional_block
|
inventory.pb.go
|
{} }
func (m *PkgGroup) String() string { return proto.CompactTextString(m) }
func (*PkgGroup) ProtoMessage() {}
func (*PkgGroup) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{1}
}
func (m *PkgGroup) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgGroup.Unmarshal(m, b)
}
func (m *PkgGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgGroup.Marshal(b, m, deterministic)
}
func (m *PkgGroup)
|
(src proto.Message) {
xxx_messageInfo_PkgGroup.Merge(m, src)
}
func (m *PkgGroup) XXX_Size() int {
return xxx_messageInfo_PkgGroup.Size(m)
}
func (m *PkgGroup) XXX_DiscardUnknown() {
xxx_messageInfo_PkgGroup.DiscardUnknown(m)
}
var xxx_messageInfo_PkgGroup proto.InternalMessageInfo
func (m *PkgGroup) GetDeviceName() string {
if m != nil {
return m.DeviceName
}
return ""
}
func (m *PkgGroup) GetName() string {
if m != nil {
return m.Name
}
return ""
}
type PkgInfo struct {
Package *PkgGroup `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"`
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
BuildInformation string `protobuf:"bytes,3,opt,name=build_information,json=buildInformation,proto3" json:"build_information,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PkgInfo) Reset() { *m = PkgInfo{} }
func (m *PkgInfo) String() string { return proto.CompactTextString(m) }
func (*PkgInfo) ProtoMessage() {}
func (*PkgInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{2}
}
func (m *PkgInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgInfo.Unmarshal(m, b)
}
func (m *PkgInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgInfo.Marshal(b, m, deterministic)
}
func (m *PkgInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgInfo.Merge(m, src)
}
func (m *PkgInfo) XXX_Size() int {
return xxx_messageInfo_PkgInfo.Size(m)
}
func (m *PkgInfo) XXX_DiscardUnknown() {
xxx_messageInfo_PkgInfo.DiscardUnknown(m)
}
var xxx_messageInfo_PkgInfo proto.InternalMessageInfo
func (m *PkgInfo) GetPackage() *PkgGroup {
if m != nil {
return m.Package
}
return nil
}
func (m *PkgInfo) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
func (m *PkgInfo) GetBuildInformation() string {
if m != nil {
return m.BuildInformation
}
return ""
}
type Inventory struct {
Major uint32 `protobuf:"varint,50,opt,name=major,proto3" json:"major,omitempty"`
Minor uint32 `protobuf:"varint,51,opt,name=minor,proto3" json:"minor,omitempty"`
BootImageName string `protobuf:"bytes,52,opt,name=boot_image_name,json=bootImageName,proto3" json:"boot_image_name,omitempty"`
LoadPath []*PkgInfo `protobuf:"bytes,53,rep,name=load_path,json=loadPath,proto3" json:"load_path,omitempty"`
NodeType uint64 `protobuf:"varint,54,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"`
SecureDomainRouterName string `protobuf:"bytes,55,opt,name=secure_domain_router_name,json=secureDomainRouterName,proto3" json:"secure_domain_router_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Inventory) Reset() { *m = Inventory{} }
func (m *Inventory) String() string { return proto.CompactTextString(m) }
func (*Inventory) ProtoMessage() {}
func (*Inventory) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{3}
}
func (m *Inventory) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Inventory.Unmarshal(m, b)
}
func (m *Inventory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Inventory.Marshal(b, m, deterministic)
}
func (m *Inventory) XXX_Merge(src proto.Message) {
xxx_messageInfo_Inventory.Merge(m, src)
}
func (m *Inventory) XXX_Size() int {
return xxx_messageInfo_Inventory.Size(m)
}
func (m *Inventory) XXX_DiscardUnknown() {
xxx_messageInfo_Inventory.DiscardUnknown(m)
}
var xxx_messageInfo_Inventory proto.InternalMessageInfo
func (m *Inventory) GetMajor() uint32 {
if m != nil {
return m.Major
}
return 0
}
func (m *Inventory) GetMinor() uint32 {
if m != nil {
return m.Minor
}
return 0
}
func (m *Inventory) GetBootImageName() string {
if m != nil {
return m.BootImageName
}
return ""
}
func (m *Inventory) GetLoadPath() []*PkgInfo {
if m != nil {
return m.LoadPath
}
return nil
}
func (m *Inventory) GetNodeType() uint64 {
if m != nil {
return m.NodeType
}
return 0
}
func (m *Inventory) GetSecureDomainRouterName() string {
if m != nil {
return m.SecureDomainRouterName
}
return ""
}
func init() {
proto.RegisterType((*Inventory_KEYS)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory_KEYS")
proto.RegisterType((*PkgGroup)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_group")
proto.RegisterType((*PkgInfo)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_info")
proto.RegisterType((*Inventory)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory")
}
func init() { proto.RegisterFile("inventory.proto", fileDescriptor_7173caedb7c6ae96) }
var fileDescriptor_7173caedb7c6ae96 = []byte{
// 394 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xcf, 0x6e, 0xd4, 0x30,
0x10, 0xc6, 0x95, 0xb6, 0xd0, 0xcd, 0xac, 0x4a, 0xc1, 0x42, 0x28, 0x88, 0x03, 0x51, 0x0e, 0x28,
0x12, 0x22, 0x87, 0x2d, 0x7f, 0xc4, 0x8d, 0x03, 0x1c, 0x2a, 0x24, 0x84, 0x02, 0x17, 0xb8, 0x58,
0xde, 0x64, 0x9a, 0x9a, 0xc6, 0x1e, 0xcb, 0x71, 0x02, 0x39, 0x21, 0x9e, 0x8b, 0xb7, 0xe0, 0x89,
0x90, 0x9d, 0x6c, 0x56, 0x7d, 0x80, 0xbd, 0x65, 0x7e, 0xdf, 0x17, 0xcf, 0x7c, 0xa3, 0x81, 0x73,
0xa9,
|
XXX_Merge
|
identifier_name
|
Serigne.py
|
".format(train.shape))
print("The test data size after dropping Id feature is : {} ".format(test.shape))
############################################
############ Data Processing ############
################################################
############## Outliers ############
# fig, ax = plt.subplots()
# ax.scatter(x = train['GrLivArea'], y = train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
#Deleting outliers
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
#Check the graphic again
# fig, ax = plt.subplots() # GRAPH 1
# ax.scatter(train['GrLivArea'], train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
############## Target variable #############
# Uncomment to PLOT
# # SalePrice is the variable we need to predict. So let's do some analysis on this variable first.
# sns.distplot(train['SalePrice'] , fit=norm) # GRAPH 2
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice']) # STANDART
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure() # GRAPH 3
# res = stats.probplot(train['SalePrice'], plot=plt)
# # The target variable is right skewed. As (linear) models love normally distributed data ,
# # we need to transform this variable and make it more normally distributed.
################################################################################
############## Log-transformation of the target variable ############
# Uncomment to PLOT
# #We use the numpy fuction log1p which applies log(1+x) to all elements of the column
# train["SalePrice"] = np.log1p(train["SalePrice"])
# #Check the new distribution
# sns.distplot(train['SalePrice'] , fit=norm)
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice'])
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
|
############################################################
############## Features engineering ############
# let's first concatenate the train and test data in the same dataframe
ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(['SalePrice'], axis=1, inplace=True)
print("\nall_data size is : {}".format(all_data.shape))
##### Missing Data. #####
# Uncomment to PLOT
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
print(missing_data.head(20))
# f, ax = plt.subplots(figsize=(15, 12)) # GRAPH 4
# plt.xticks(rotation='90')
# sns.barplot(x=all_data_na.index, y=all_data_na)
# plt.xlabel('Features', fontsize=15)
# plt.ylabel('Percent of missing values', fontsize=15)
# plt.title('Percent missing data by feature', fontsize=15)
# plt.show()
############################################################
############## Data Correlation ############
#Correlation map to see how features are correlated with SalePrice
corrmat = train.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax=0.9, square=True, linewidth = 1)
plt.show()
############################################################
############## Imputing missing values ############
# --------------->>>>>>>> groupby() and mode()
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
all_data["Alley"] = all_data["Alley"].fillna("None")
all_data["Fence"] = all_data["Fence"].fillna("None")
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
# LotFrontage : Since the area of each street connected to the house property most likely have a similar area to other houses in its neighborhood ,
# we can fill in missing values by the median LotFrontage of the neighborhood.
# Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(lambda x: x.fillna(x.median()))
# GarageType, GarageFinish, GarageQual and GarageCond : Replacing missing data with None
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None')
# GarageYrBlt, GarageArea and GarageCars : Replacing missing data with 0 (Since No garage = no cars in such garage.)
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0)
# BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath :
# missing values are likely zero for having no basement
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[col] = all_data[col].fillna(0)
# BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2 :
# For all these categorical basement-related features, NaN means that there is no basement
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None')
# MasVnrArea and MasVnrType :
# NA most likely means no masonry veneer for these houses. We can fill 0 for the area and None for the type.
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
##### !!!!! #####
# MSZoning (The general zoning classification) :
# 'RL' is by far the most common value. So we can fill in missing values with 'RL'
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
# Utilities : For this categorical feature all records are "AllPub", except for one "NoSeWa" and 2 NA .
# Since the house with 'NoSewa' is in the training set, this feature won't help in predictive modelling.
# We can then safely remove it.
all_data = all_data.drop(['Utilities'], axis=1)
# Functional : data description says NA means typical
all_data["Functional"] = all_data["Functional"].fillna("Typ")
# Electrical : It has one NA value. Since this feature has mostly 'SBrkr', we can set that for the missing value.
all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0])
# KitchenQual: Only one NA value, and same as Electrical,
# we set 'TA' (which is the most frequent) for the missing value in KitchenQual.
all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0])
# Exterior1st and Exterior2nd :
# Again Both Exterior 1 & 2 have only one missing value. We will just substitute in the most common string
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0])
all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0])
# SaleType : Fill in again with most frequent which is "WD"
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0])
# MSSubClass : Na most likely means No building
|
# #Get also the QQ-plot
# fig = plt.figure()
# res = stats.probplot(train['SalePrice'], plot=plt)
# plt.show()
|
random_line_split
|
Serigne.py
|
warnings.warn = ignore_warn # ignore annoying warning (from sklearn and seaborn)
pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) # Limiting floats output to 3 decimal points
# print(check_output(["ls", "../data"]).decode("utf8")) # check the files available in the directory
############################################
#Now let's import and put the train and test datasets in pandas dataframe
train = pd.read_csv('../data/train.csv')
test = pd.read_csv('../data/test.csv')
##display the first five rows of the train dataset.
# print(train.head(5))
##display the first five rows of the test dataset.
# print(test.head(5))
############ Inro ############
############################################
#check the numbers of samples and features
print("The train data size before dropping Id feature is : {} ".format(train.shape))
print("The test data size before dropping Id feature is : {} ".format(test.shape))
#Save the 'Id' column
train_ID = train['Id']
test_ID = test['Id']
print('\n\n')
print("The train ID :\n {} ".format(np.array(train_ID)))
print("The test ID :\n {} ".format(np.array(test_ID)))
#Now drop the 'Id' colum since it's unnecessary for the prediction process.
train.drop("Id", axis = 1, inplace = True)
test.drop("Id", axis = 1, inplace = True)
#check again the data size after dropping the 'Id' variable
print("\nThe train data size after dropping Id feature is : {} ".format(train.shape))
print("The test data size after dropping Id feature is : {} ".format(test.shape))
############################################
############ Data Processing ############
################################################
############## Outliers ############
# fig, ax = plt.subplots()
# ax.scatter(x = train['GrLivArea'], y = train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
#Deleting outliers
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
#Check the graphic again
# fig, ax = plt.subplots() # GRAPH 1
# ax.scatter(train['GrLivArea'], train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
############## Target variable #############
# Uncomment to PLOT
# # SalePrice is the variable we need to predict. So let's do some analysis on this variable first.
# sns.distplot(train['SalePrice'] , fit=norm) # GRAPH 2
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice']) # STANDART
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure() # GRAPH 3
# res = stats.probplot(train['SalePrice'], plot=plt)
# # The target variable is right skewed. As (linear) models love normally distributed data ,
# # we need to transform this variable and make it more normally distributed.
################################################################################
############## Log-transformation of the target variable ############
# Uncomment to PLOT
# #We use the numpy fuction log1p which applies log(1+x) to all elements of the column
# train["SalePrice"] = np.log1p(train["SalePrice"])
# #Check the new distribution
# sns.distplot(train['SalePrice'] , fit=norm)
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice'])
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure()
# res = stats.probplot(train['SalePrice'], plot=plt)
# plt.show()
############################################################
############## Features engineering ############
# let's first concatenate the train and test data in the same dataframe
ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(['SalePrice'], axis=1, inplace=True)
print("\nall_data size is : {}".format(all_data.shape))
##### Missing Data. #####
# Uncomment to PLOT
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
print(missing_data.head(20))
# f, ax = plt.subplots(figsize=(15, 12)) # GRAPH 4
# plt.xticks(rotation='90')
# sns.barplot(x=all_data_na.index, y=all_data_na)
# plt.xlabel('Features', fontsize=15)
# plt.ylabel('Percent of missing values', fontsize=15)
# plt.title('Percent missing data by feature', fontsize=15)
# plt.show()
############################################################
############## Data Correlation ############
#Correlation map to see how features are correlated with SalePrice
corrmat = train.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax=0.9, square=True, linewidth = 1)
plt.show()
############################################################
############## Imputing missing values ############
# --------------->>>>>>>> groupby() and mode()
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
all_data["Alley"] = all_data["Alley"].fillna("None")
all_data["Fence"] = all_data["Fence"].fillna("None")
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
# LotFrontage : Since the area of each street connected to the house property most likely have a similar area to other houses in its neighborhood ,
# we can fill in missing values by the median LotFrontage of the neighborhood.
# Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(lambda x: x.fillna(x.median()))
# GarageType, GarageFinish, GarageQual and GarageCond : Replacing missing data with None
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None')
# GarageYrBlt, GarageArea and GarageCars : Replacing missing data with 0 (Since No garage = no cars in such garage.)
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0)
# BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath :
# missing values are likely zero for having no basement
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[col] = all_data[col].fillna(0)
# BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2 :
# For all these categorical basement-related features, NaN means that there is no basement
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None')
# MasVnrArea and MasVnrType :
# NA most likely means no masonry veneer for these houses. We can fill 0 for the area and None for the type.
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
##### !!!!! #####
# MSZoning (The general zoning classification) :
# 'RL' is by far the most common value. So we can fill in missing values with 'RL'
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
# Utilities : For this categorical feature all records are "AllPub", except for one "No
|
pass
|
identifier_body
|
|
Serigne.py
|
(train.shape))
print("The test data size after dropping Id feature is : {} ".format(test.shape))
############################################
############ Data Processing ############
################################################
############## Outliers ############
# fig, ax = plt.subplots()
# ax.scatter(x = train['GrLivArea'], y = train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
#Deleting outliers
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
#Check the graphic again
# fig, ax = plt.subplots() # GRAPH 1
# ax.scatter(train['GrLivArea'], train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
############## Target variable #############
# Uncomment to PLOT
# # SalePrice is the variable we need to predict. So let's do some analysis on this variable first.
# sns.distplot(train['SalePrice'] , fit=norm) # GRAPH 2
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice']) # STANDART
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure() # GRAPH 3
# res = stats.probplot(train['SalePrice'], plot=plt)
# # The target variable is right skewed. As (linear) models love normally distributed data ,
# # we need to transform this variable and make it more normally distributed.
################################################################################
############## Log-transformation of the target variable ############
# Uncomment to PLOT
# #We use the numpy fuction log1p which applies log(1+x) to all elements of the column
# train["SalePrice"] = np.log1p(train["SalePrice"])
# #Check the new distribution
# sns.distplot(train['SalePrice'] , fit=norm)
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice'])
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure()
# res = stats.probplot(train['SalePrice'], plot=plt)
# plt.show()
############################################################
############## Features engineering ############
# let's first concatenate the train and test data in the same dataframe
ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(['SalePrice'], axis=1, inplace=True)
print("\nall_data size is : {}".format(all_data.shape))
##### Missing Data. #####
# Uncomment to PLOT
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
print(missing_data.head(20))
# f, ax = plt.subplots(figsize=(15, 12)) # GRAPH 4
# plt.xticks(rotation='90')
# sns.barplot(x=all_data_na.index, y=all_data_na)
# plt.xlabel('Features', fontsize=15)
# plt.ylabel('Percent of missing values', fontsize=15)
# plt.title('Percent missing data by feature', fontsize=15)
# plt.show()
############################################################
############## Data Correlation ############
#Correlation map to see how features are correlated with SalePrice
corrmat = train.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax=0.9, square=True, linewidth = 1)
plt.show()
############################################################
############## Imputing missing values ############
# --------------->>>>>>>> groupby() and mode()
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
all_data["Alley"] = all_data["Alley"].fillna("None")
all_data["Fence"] = all_data["Fence"].fillna("None")
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
# LotFrontage : Since the area of each street connected to the house property most likely have a similar area to other houses in its neighborhood ,
# we can fill in missing values by the median LotFrontage of the neighborhood.
# Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(lambda x: x.fillna(x.median()))
# GarageType, GarageFinish, GarageQual and GarageCond : Replacing missing data with None
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None')
# GarageYrBlt, GarageArea and GarageCars : Replacing missing data with 0 (Since No garage = no cars in such garage.)
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0)
# BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath :
# missing values are likely zero for having no basement
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
|
# BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2 :
# For all these categorical basement-related features, NaN means that there is no basement
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None')
# MasVnrArea and MasVnrType :
# NA most likely means no masonry veneer for these houses. We can fill 0 for the area and None for the type.
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
##### !!!!! #####
# MSZoning (The general zoning classification) :
# 'RL' is by far the most common value. So we can fill in missing values with 'RL'
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
# Utilities : For this categorical feature all records are "AllPub", except for one "NoSeWa" and 2 NA .
# Since the house with 'NoSewa' is in the training set, this feature won't help in predictive modelling.
# We can then safely remove it.
all_data = all_data.drop(['Utilities'], axis=1)
# Functional : data description says NA means typical
all_data["Functional"] = all_data["Functional"].fillna("Typ")
# Electrical : It has one NA value. Since this feature has mostly 'SBrkr', we can set that for the missing value.
all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0])
# KitchenQual: Only one NA value, and same as Electrical,
# we set 'TA' (which is the most frequent) for the missing value in KitchenQual.
all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0])
# Exterior1st and Exterior2nd :
# Again Both Exterior 1 & 2 have only one missing value. We will just substitute in the most common string
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0])
all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0])
# SaleType : Fill in again with most frequent which is "WD"
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0])
# MSSubClass : Na most likely means No building
|
all_data[col] = all_data[col].fillna(0)
|
conditional_block
|
Serigne.py
|
(*args, **kwargs):
pass
warnings.warn = ignore_warn # ignore annoying warning (from sklearn and seaborn)
pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) # Limiting floats output to 3 decimal points
# print(check_output(["ls", "../data"]).decode("utf8")) # check the files available in the directory
############################################
#Now let's import and put the train and test datasets in pandas dataframe
train = pd.read_csv('../data/train.csv')
test = pd.read_csv('../data/test.csv')
##display the first five rows of the train dataset.
# print(train.head(5))
##display the first five rows of the test dataset.
# print(test.head(5))
############ Inro ############
############################################
#check the numbers of samples and features
print("The train data size before dropping Id feature is : {} ".format(train.shape))
print("The test data size before dropping Id feature is : {} ".format(test.shape))
#Save the 'Id' column
train_ID = train['Id']
test_ID = test['Id']
print('\n\n')
print("The train ID :\n {} ".format(np.array(train_ID)))
print("The test ID :\n {} ".format(np.array(test_ID)))
#Now drop the 'Id' colum since it's unnecessary for the prediction process.
train.drop("Id", axis = 1, inplace = True)
test.drop("Id", axis = 1, inplace = True)
#check again the data size after dropping the 'Id' variable
print("\nThe train data size after dropping Id feature is : {} ".format(train.shape))
print("The test data size after dropping Id feature is : {} ".format(test.shape))
############################################
############ Data Processing ############
################################################
############## Outliers ############
# fig, ax = plt.subplots()
# ax.scatter(x = train['GrLivArea'], y = train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
#Deleting outliers
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
#Check the graphic again
# fig, ax = plt.subplots() # GRAPH 1
# ax.scatter(train['GrLivArea'], train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
############## Target variable #############
# Uncomment to PLOT
# # SalePrice is the variable we need to predict. So let's do some analysis on this variable first.
# sns.distplot(train['SalePrice'] , fit=norm) # GRAPH 2
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice']) # STANDART
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure() # GRAPH 3
# res = stats.probplot(train['SalePrice'], plot=plt)
# # The target variable is right skewed. As (linear) models love normally distributed data ,
# # we need to transform this variable and make it more normally distributed.
################################################################################
############## Log-transformation of the target variable ############
# Uncomment to PLOT
# #We use the numpy fuction log1p which applies log(1+x) to all elements of the column
# train["SalePrice"] = np.log1p(train["SalePrice"])
# #Check the new distribution
# sns.distplot(train['SalePrice'] , fit=norm)
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice'])
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure()
# res = stats.probplot(train['SalePrice'], plot=plt)
# plt.show()
############################################################
############## Features engineering ############
# let's first concatenate the train and test data in the same dataframe
ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(['SalePrice'], axis=1, inplace=True)
print("\nall_data size is : {}".format(all_data.shape))
##### Missing Data. #####
# Uncomment to PLOT
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
print(missing_data.head(20))
# f, ax = plt.subplots(figsize=(15, 12)) # GRAPH 4
# plt.xticks(rotation='90')
# sns.barplot(x=all_data_na.index, y=all_data_na)
# plt.xlabel('Features', fontsize=15)
# plt.ylabel('Percent of missing values', fontsize=15)
# plt.title('Percent missing data by feature', fontsize=15)
# plt.show()
############################################################
############## Data Correlation ############
#Correlation map to see how features are correlated with SalePrice
corrmat = train.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax=0.9, square=True, linewidth = 1)
plt.show()
############################################################
############## Imputing missing values ############
# --------------->>>>>>>> groupby() and mode()
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
all_data["Alley"] = all_data["Alley"].fillna("None")
all_data["Fence"] = all_data["Fence"].fillna("None")
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
# LotFrontage : Since the area of each street connected to the house property most likely have a similar area to other houses in its neighborhood ,
# we can fill in missing values by the median LotFrontage of the neighborhood.
# Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(lambda x: x.fillna(x.median()))
# GarageType, GarageFinish, GarageQual and GarageCond : Replacing missing data with None
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None')
# GarageYrBlt, GarageArea and GarageCars : Replacing missing data with 0 (Since No garage = no cars in such garage.)
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0)
# BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath :
# missing values are likely zero for having no basement
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[col] = all_data[col].fillna(0)
# BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2 :
# For all these categorical basement-related features, NaN means that there is no basement
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None')
# MasVnrArea and MasVnrType :
# NA most likely means no masonry veneer for these houses. We can fill 0 for the area and None for the type.
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
##### !!!!! #####
# MSZoning (The general zoning classification) :
# 'RL' is by far the most common value. So we can fill in missing values with 'RL'
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
# Utilities : For this categorical feature all records are
|
ignore_warn
|
identifier_name
|
|
main.py
|
eme zmenit clazz[3] nepojde to lebo tuple sa neda zmenit
den_string = clazz[3]
if not clazz[3] is None:
if den_string == u'štvrtok':
den_string = 'stvrtok'
day = root.find(den_string)
else:
if clazz[0] is not None:
print "Nepodarilo sa ziskat den pre " + clazz[0] + ". Rozvrh moze byt nekompletny alebo hodiny posunute"
elif clazz[0] is None and clazz[6] is not None:
print "***** Prazdny rozvrh *****" + clazz[6]
else:
print "***** Rozvrhu chyba nadpis *****"
if day is not None:
if not clazz[0] is None:
hodina = etree.Element('hodina')
hodina.text = clazz[0]
day.append(hodina)
if not clazz[1] is None:
vyucujuci = etree.Element('vyucujuci')
vyucujuci.text = clazz[1]
day.append(vyucujuci)
if not clazz[2] is None:
ucebna = etree.Element('ucebna')
ucebna.text = clazz[2]
day.append(ucebna)
if not clazz[4] is None:
zaciatok = etree.Element('zaciatok')
zaciatok.text = clazz[4]
day.append(zaciatok)
if not clazz[5] is None:
trvanie = etree.Element('trvanie')
trvanie.text = clazz[5]
day.append(trvanie)
def get_class_length(title):
return title['colspan']
def get_class_start(title):
# title obsahuje nieco ako "* streda : 14-15 *" - potrebujem dostat len prve cislo
# pretoze aka dlha hodina bude viem z ineho atributu (pozri funkciu get_lessons_of_class, colspan)
# toto mi vrati pole obsahujuce ['streda', '14-15'], vezmem prvy index cize cisla 14-15
zac_hod = title['title'].partition('*')[-1].rpartition('*')[0].replace(" ", "").split(':')[1]
# 1 alebo 15 to je v poriadku
# 1-2 a 9-10 -> v oboch pripadoch chcem len prve cislo (dlzka 3 a 4)
# 12-13 -> tu chcem prve dve cisla (dlzka 5)
if len(zac_hod) == 3 or len(zac_hod) == 4:
return zac_hod[0:1]
if len(zac_hod) == 5:
return zac_hod[0:2]
return zac_hod
def get_lessons_of_class(url_class):
global hlavicka_dni
print "Ziskavam rozvrh z URL " + url_class
src = requests.get(url_class)
txt = src.text
bsoup = BeautifulSoup(txt, "html.parser")
predmety = []
ucitelia = []
ucebne = []
dni = []
zacina_hod = []
poc_hod = []
# neni to bohvieako pekne ale budiz, v nultom indexe je nazov skoly, v prvom Triedy
nadpis = [bsoup.find_all(("div", {'class': 'Nadpis'}))[1].text]
for predmet in bsoup.find_all("font", {'class': 'Predmet'}):
if predmet.text == '':
predmety.append('chyba nazov predmetu')
else:
predmety.append(predmet.text)
for ucitel in bsoup.find_all("font", {'class': 'Vyucujuci'}):
if ucitel.text == '':
ucitelia.append('chyba ucitel')
else:
ucitelia.append(ucitel.text)
for ucebna in bsoup.find_all("font", {'class': 'Ucebna'}):
if ucebna.text == '':
ucebne.append('chyba ucebna')
else:
ucebne.append(ucebna.text)
ciste_trka = bsoup.find_all("tr", {'class': False})
ciste_trka = ciste_trka[1:-1] # Vyhodime to trko ktore to obsahuje vsetko, to nepotrebujem
for trko in ciste_trka:
if trko != '\n' and trko.find("td", {'class': 'HlavickaDni'}) is not None:
hlavicka_dni = trko.find("td", {'class': 'HlavickaDni'})['title']
# vsetky hodiny v ramci toho dna
hodiny = trko.find("td", {'class': 'HlavickaDni'}).parent.find_all("td")
# podla bgcolor viem ci je hodina alebo volnahodina
for hodinaInfo in hodiny:
if hodinaInfo.has_attr('bgcolor') or hodinaInfo.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaInfo))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaInfo))
# Ked je dve a viac predmetov v tom istom case a tom istom dni, tak ta druha alebo tretia
# je mimo hlavneho <tr> tagu v ktorom sa nachadza aj nazov dna, a preto treba hladat
# dalsie hodiny mimo tr tagu.
elif trko != '\n' and trko.find_all("td") is not None:
for hodinaMimoTr in trko.find_all("td"):
if hodinaMimoTr.has_attr('bgcolor') or hodinaMimoTr.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaMimoTr))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaMimoTr))
# Ak aj ucebna alebo meno vyucujuceho chyba, dlzka vsetkych zoznamov bude tak ci tak rovnaka
# avsak v pripade "Nadpis", ten bude vzdy len jeden, preto musime pouzit "izip_longest"
# ktory zo zoznamu kratsej dlzky, spravi dlhsi a doplni tam "None". keby to nespravim, kazdy
# zoznam skrati na jednu polozku a to by nam chybali hodiny...
# No a navyse je potrebne odstranit predmet ktory ma v nazve tri hviezdy pretoze ten ma v sebe
# data o dalsich hodinach a tie sa nedaju normalne sparsovat, teda ich odstranim. Keby to neodstranim
# hodiny sa mozu posunut a byt v nespravnych dnoch a mat zle ucebne
if u'***' in predmety:
predmety.remove(u'***')
print "Predmet obsahoval dalsie hodiny ktore sa neda spracovat kvoli nespravnym datam"
moj_list = list(itertools.izip_longest(predmety, ucitelia, ucebne, dni, zacina_hod, poc_hod, nadpis))
return moj_list
# Zobere vsetky linky a pozrie sa ci link obsahuje "rozvrh_tr", aby sa spracovali len rozvrhy tried
def remove_non_class_timetables(soup_links):
modified_urls = []
for link in soup_links.find_all("a"):
to_append = link.text
if "rozvrh_tr" in to_append:
final_url = url + to_append
modified_urls.append(final_url)
return modified_urls
def get_urls_to_process():
source = requests.get(url)
text = source.text
soup = BeautifulSoup(text, "html.parser")
urls = remove_non_class_timetables(soup)
# vyhodi sa prvy link ktory neobsahuje konkretnu triedu, ale sablonu pre vsetky triedy
urls = urls[1:-1]
return urls
def make_folder():
# spravime si zlozku na rozvrhy
try:
os.makedirs('rozvrhy')
except OSError:
pass # ak uz zlozka existuje da error, ten ignorujeme, chceme zapisat do zlozky
def g
|
enerate_xmls(
|
identifier_name
|
|
main.py
|
# Keby chceme zmenit clazz[3] nepojde to lebo tuple sa neda zmenit
den_string = clazz[3]
if not clazz[3] is None:
if den_string == u'štvrtok':
den_string = 'stvrtok'
day = root.find(den_string)
else:
if clazz[0] is not None:
print "Nepodarilo sa ziskat den pre " + clazz[0] + ". Rozvrh moze byt nekompletny alebo hodiny posunute"
elif clazz[0] is None and clazz[6] is not None:
print "***** Prazdny rozvrh *****" + clazz[6]
else:
print "***** Rozvrhu chyba nadpis *****"
if day is not None:
if not clazz[0] is None:
hodina = etree.Element('hodina')
hodina.text = clazz[0]
day.append(hodina)
if not clazz[1] is None:
vyucujuci = etree.Element('vyucujuci')
vyucujuci.text = clazz[1]
day.append(vyucujuci)
if not clazz[2] is None:
ucebna = etree.Element('ucebna')
ucebna.text = clazz[2]
day.append(ucebna)
if not clazz[4] is None:
zaciatok = etree.Element('zaciatok')
zaciatok.text = clazz[4]
day.append(zaciatok)
if not clazz[5] is None:
trvanie = etree.Element('trvanie')
trvanie.text = clazz[5]
day.append(trvanie)
def get_class_length(title):
r
|
def get_class_start(title):
# title obsahuje nieco ako "* streda : 14-15 *" - potrebujem dostat len prve cislo
# pretoze aka dlha hodina bude viem z ineho atributu (pozri funkciu get_lessons_of_class, colspan)
# toto mi vrati pole obsahujuce ['streda', '14-15'], vezmem prvy index cize cisla 14-15
zac_hod = title['title'].partition('*')[-1].rpartition('*')[0].replace(" ", "").split(':')[1]
# 1 alebo 15 to je v poriadku
# 1-2 a 9-10 -> v oboch pripadoch chcem len prve cislo (dlzka 3 a 4)
# 12-13 -> tu chcem prve dve cisla (dlzka 5)
if len(zac_hod) == 3 or len(zac_hod) == 4:
return zac_hod[0:1]
if len(zac_hod) == 5:
return zac_hod[0:2]
return zac_hod
def get_lessons_of_class(url_class):
global hlavicka_dni
print "Ziskavam rozvrh z URL " + url_class
src = requests.get(url_class)
txt = src.text
bsoup = BeautifulSoup(txt, "html.parser")
predmety = []
ucitelia = []
ucebne = []
dni = []
zacina_hod = []
poc_hod = []
# neni to bohvieako pekne ale budiz, v nultom indexe je nazov skoly, v prvom Triedy
nadpis = [bsoup.find_all(("div", {'class': 'Nadpis'}))[1].text]
for predmet in bsoup.find_all("font", {'class': 'Predmet'}):
if predmet.text == '':
predmety.append('chyba nazov predmetu')
else:
predmety.append(predmet.text)
for ucitel in bsoup.find_all("font", {'class': 'Vyucujuci'}):
if ucitel.text == '':
ucitelia.append('chyba ucitel')
else:
ucitelia.append(ucitel.text)
for ucebna in bsoup.find_all("font", {'class': 'Ucebna'}):
if ucebna.text == '':
ucebne.append('chyba ucebna')
else:
ucebne.append(ucebna.text)
ciste_trka = bsoup.find_all("tr", {'class': False})
ciste_trka = ciste_trka[1:-1] # Vyhodime to trko ktore to obsahuje vsetko, to nepotrebujem
for trko in ciste_trka:
if trko != '\n' and trko.find("td", {'class': 'HlavickaDni'}) is not None:
hlavicka_dni = trko.find("td", {'class': 'HlavickaDni'})['title']
# vsetky hodiny v ramci toho dna
hodiny = trko.find("td", {'class': 'HlavickaDni'}).parent.find_all("td")
# podla bgcolor viem ci je hodina alebo volnahodina
for hodinaInfo in hodiny:
if hodinaInfo.has_attr('bgcolor') or hodinaInfo.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaInfo))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaInfo))
# Ked je dve a viac predmetov v tom istom case a tom istom dni, tak ta druha alebo tretia
# je mimo hlavneho <tr> tagu v ktorom sa nachadza aj nazov dna, a preto treba hladat
# dalsie hodiny mimo tr tagu.
elif trko != '\n' and trko.find_all("td") is not None:
for hodinaMimoTr in trko.find_all("td"):
if hodinaMimoTr.has_attr('bgcolor') or hodinaMimoTr.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaMimoTr))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaMimoTr))
# Ak aj ucebna alebo meno vyucujuceho chyba, dlzka vsetkych zoznamov bude tak ci tak rovnaka
# avsak v pripade "Nadpis", ten bude vzdy len jeden, preto musime pouzit "izip_longest"
# ktory zo zoznamu kratsej dlzky, spravi dlhsi a doplni tam "None". keby to nespravim, kazdy
# zoznam skrati na jednu polozku a to by nam chybali hodiny...
# No a navyse je potrebne odstranit predmet ktory ma v nazve tri hviezdy pretoze ten ma v sebe
# data o dalsich hodinach a tie sa nedaju normalne sparsovat, teda ich odstranim. Keby to neodstranim
# hodiny sa mozu posunut a byt v nespravnych dnoch a mat zle ucebne
if u'***' in predmety:
predmety.remove(u'***')
print "Predmet obsahoval dalsie hodiny ktore sa neda spracovat kvoli nespravnym datam"
moj_list = list(itertools.izip_longest(predmety, ucitelia, ucebne, dni, zacina_hod, poc_hod, nadpis))
return moj_list
# Zobere vsetky linky a pozrie sa ci link obsahuje "rozvrh_tr", aby sa spracovali len rozvrhy tried
def remove_non_class_timetables(soup_links):
modified_urls = []
for link in soup_links.find_all("a"):
to_append = link.text
if "rozvrh_tr" in to_append:
final_url = url + to_append
modified_urls.append(final_url)
return modified_urls
def get_urls_to_process():
source = requests.get(url)
text = source.text
soup = BeautifulSoup(text, "html.parser")
urls = remove_non_class_timetables(soup)
# vyhodi sa prvy link ktory neobsahuje konkretnu triedu, ale sablonu pre vsetky triedy
urls = urls[1:-1]
return urls
def make_folder():
# spravime si zlozku na rozvrhy
try:
os.makedirs('rozvrhy')
except OSError:
pass # ak uz zlozka existuje da error, ten ignorujeme, chceme zapisat do zlozky
|
eturn title['colspan']
|
identifier_body
|
main.py
|
# Keby chceme zmenit clazz[3] nepojde to lebo tuple sa neda zmenit
den_string = clazz[3]
if not clazz[3] is None:
if den_string == u'štvrtok':
den_string = 'stvrtok'
day = root.find(den_string)
else:
if clazz[0] is not None:
print "Nepodarilo sa ziskat den pre " + clazz[0] + ". Rozvrh moze byt nekompletny alebo hodiny posunute"
elif clazz[0] is None and clazz[6] is not None:
print "***** Prazdny rozvrh *****" + clazz[6]
else:
print "***** Rozvrhu chyba nadpis *****"
if day is not None:
if not clazz[0] is None:
hodina = etree.Element('hodina')
hodina.text = clazz[0]
day.append(hodina)
if not clazz[1] is None:
vyucujuci = etree.Element('vyucujuci')
vyucujuci.text = clazz[1]
day.append(vyucujuci)
if not clazz[2] is None:
ucebna = etree.Element('ucebna')
ucebna.text = clazz[2]
day.append(ucebna)
if not clazz[4] is None:
zaciatok = etree.Element('zaciatok')
zaciatok.text = clazz[4]
day.append(zaciatok)
if not clazz[5] is None:
trvanie = etree.Element('trvanie')
trvanie.text = clazz[5]
day.append(trvanie)
def get_class_length(title):
return title['colspan']
def get_class_start(title):
|
zac_hod = title['title'].partition('*')[-1].rpartition('*')[0].replace(" ", "").split(':')[1]
# 1 alebo 15 to je v poriadku
# 1-2 a 9-10 -> v oboch pripadoch chcem len prve cislo (dlzka 3 a 4)
# 12-13 -> tu chcem prve dve cisla (dlzka 5)
if len(zac_hod) == 3 or len(zac_hod) == 4:
return zac_hod[0:1]
if len(zac_hod) == 5:
return zac_hod[0:2]
return zac_hod
def get_lessons_of_class(url_class):
global hlavicka_dni
print "Ziskavam rozvrh z URL " + url_class
src = requests.get(url_class)
txt = src.text
bsoup = BeautifulSoup(txt, "html.parser")
predmety = []
ucitelia = []
ucebne = []
dni = []
zacina_hod = []
poc_hod = []
# neni to bohvieako pekne ale budiz, v nultom indexe je nazov skoly, v prvom Triedy
nadpis = [bsoup.find_all(("div", {'class': 'Nadpis'}))[1].text]
for predmet in bsoup.find_all("font", {'class': 'Predmet'}):
if predmet.text == '':
predmety.append('chyba nazov predmetu')
else:
predmety.append(predmet.text)
for ucitel in bsoup.find_all("font", {'class': 'Vyucujuci'}):
if ucitel.text == '':
ucitelia.append('chyba ucitel')
else:
ucitelia.append(ucitel.text)
for ucebna in bsoup.find_all("font", {'class': 'Ucebna'}):
if ucebna.text == '':
ucebne.append('chyba ucebna')
else:
ucebne.append(ucebna.text)
ciste_trka = bsoup.find_all("tr", {'class': False})
ciste_trka = ciste_trka[1:-1] # Vyhodime to trko ktore to obsahuje vsetko, to nepotrebujem
for trko in ciste_trka:
if trko != '\n' and trko.find("td", {'class': 'HlavickaDni'}) is not None:
hlavicka_dni = trko.find("td", {'class': 'HlavickaDni'})['title']
# vsetky hodiny v ramci toho dna
hodiny = trko.find("td", {'class': 'HlavickaDni'}).parent.find_all("td")
# podla bgcolor viem ci je hodina alebo volnahodina
for hodinaInfo in hodiny:
if hodinaInfo.has_attr('bgcolor') or hodinaInfo.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaInfo))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaInfo))
# Ked je dve a viac predmetov v tom istom case a tom istom dni, tak ta druha alebo tretia
# je mimo hlavneho <tr> tagu v ktorom sa nachadza aj nazov dna, a preto treba hladat
# dalsie hodiny mimo tr tagu.
elif trko != '\n' and trko.find_all("td") is not None:
for hodinaMimoTr in trko.find_all("td"):
if hodinaMimoTr.has_attr('bgcolor') or hodinaMimoTr.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaMimoTr))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaMimoTr))
# Ak aj ucebna alebo meno vyucujuceho chyba, dlzka vsetkych zoznamov bude tak ci tak rovnaka
# avsak v pripade "Nadpis", ten bude vzdy len jeden, preto musime pouzit "izip_longest"
# ktory zo zoznamu kratsej dlzky, spravi dlhsi a doplni tam "None". keby to nespravim, kazdy
# zoznam skrati na jednu polozku a to by nam chybali hodiny...
# No a navyse je potrebne odstranit predmet ktory ma v nazve tri hviezdy pretoze ten ma v sebe
# data o dalsich hodinach a tie sa nedaju normalne sparsovat, teda ich odstranim. Keby to neodstranim
# hodiny sa mozu posunut a byt v nespravnych dnoch a mat zle ucebne
if u'***' in predmety:
predmety.remove(u'***')
print "Predmet obsahoval dalsie hodiny ktore sa neda spracovat kvoli nespravnym datam"
moj_list = list(itertools.izip_longest(predmety, ucitelia, ucebne, dni, zacina_hod, poc_hod, nadpis))
return moj_list
# Zobere vsetky linky a pozrie sa ci link obsahuje "rozvrh_tr", aby sa spracovali len rozvrhy tried
def remove_non_class_timetables(soup_links):
modified_urls = []
for link in soup_links.find_all("a"):
to_append = link.text
if "rozvrh_tr" in to_append:
final_url = url + to_append
modified_urls.append(final_url)
return modified_urls
def get_urls_to_process():
source = requests.get(url)
text = source.text
soup = BeautifulSoup(text, "html.parser")
urls = remove_non_class_timetables(soup)
# vyhodi sa prvy link ktory neobsahuje konkretnu triedu, ale sablonu pre vsetky triedy
urls = urls[1:-1]
return urls
def make_folder():
# spravime si zlozku na rozvrhy
try:
os.makedirs('rozvrhy')
except OSError:
pass # ak uz zlozka existuje da error, ten ignorujeme, chceme zapisat do zlozky
def
|
# title obsahuje nieco ako "* streda : 14-15 *" - potrebujem dostat len prve cislo
# pretoze aka dlha hodina bude viem z ineho atributu (pozri funkciu get_lessons_of_class, colspan)
# toto mi vrati pole obsahujuce ['streda', '14-15'], vezmem prvy index cize cisla 14-15
|
random_line_split
|
main.py
|
# Keby chceme zmenit clazz[3] nepojde to lebo tuple sa neda zmenit
den_string = clazz[3]
if not clazz[3] is None:
if den_string == u'štvrtok':
den_string = 'stvrtok'
day = root.find(den_string)
else:
if clazz[0] is not None:
print "Nepodarilo sa ziskat den pre " + clazz[0] + ". Rozvrh moze byt nekompletny alebo hodiny posunute"
elif clazz[0] is None and clazz[6] is not None:
print "***** Prazdny rozvrh *****" + clazz[6]
else:
print "***** Rozvrhu chyba nadpis *****"
if day is not None:
if not clazz[0] is None:
hodina = etree.Element('hodina')
hodina.text = clazz[0]
day.append(hodina)
if not clazz[1] is None:
vyucujuci = etree.Element('vyucujuci')
vyucujuci.text = clazz[1]
day.append(vyucujuci)
if not clazz[2] is None:
ucebna = etree.Element('ucebna')
ucebna.text = clazz[2]
day.append(ucebna)
if not clazz[4] is None:
zaciatok = etree.Element('zaciatok')
zaciatok.text = clazz[4]
day.append(zaciatok)
if not clazz[5] is None:
trvanie = etree.Element('trvanie')
trvanie.text = clazz[5]
day.append(trvanie)
def get_class_length(title):
return title['colspan']
def get_class_start(title):
# title obsahuje nieco ako "* streda : 14-15 *" - potrebujem dostat len prve cislo
# pretoze aka dlha hodina bude viem z ineho atributu (pozri funkciu get_lessons_of_class, colspan)
# toto mi vrati pole obsahujuce ['streda', '14-15'], vezmem prvy index cize cisla 14-15
zac_hod = title['title'].partition('*')[-1].rpartition('*')[0].replace(" ", "").split(':')[1]
# 1 alebo 15 to je v poriadku
# 1-2 a 9-10 -> v oboch pripadoch chcem len prve cislo (dlzka 3 a 4)
# 12-13 -> tu chcem prve dve cisla (dlzka 5)
if len(zac_hod) == 3 or len(zac_hod) == 4:
return zac_hod[0:1]
if len(zac_hod) == 5:
r
|
return zac_hod
def get_lessons_of_class(url_class):
global hlavicka_dni
print "Ziskavam rozvrh z URL " + url_class
src = requests.get(url_class)
txt = src.text
bsoup = BeautifulSoup(txt, "html.parser")
predmety = []
ucitelia = []
ucebne = []
dni = []
zacina_hod = []
poc_hod = []
# neni to bohvieako pekne ale budiz, v nultom indexe je nazov skoly, v prvom Triedy
nadpis = [bsoup.find_all(("div", {'class': 'Nadpis'}))[1].text]
for predmet in bsoup.find_all("font", {'class': 'Predmet'}):
if predmet.text == '':
predmety.append('chyba nazov predmetu')
else:
predmety.append(predmet.text)
for ucitel in bsoup.find_all("font", {'class': 'Vyucujuci'}):
if ucitel.text == '':
ucitelia.append('chyba ucitel')
else:
ucitelia.append(ucitel.text)
for ucebna in bsoup.find_all("font", {'class': 'Ucebna'}):
if ucebna.text == '':
ucebne.append('chyba ucebna')
else:
ucebne.append(ucebna.text)
ciste_trka = bsoup.find_all("tr", {'class': False})
ciste_trka = ciste_trka[1:-1] # Vyhodime to trko ktore to obsahuje vsetko, to nepotrebujem
for trko in ciste_trka:
if trko != '\n' and trko.find("td", {'class': 'HlavickaDni'}) is not None:
hlavicka_dni = trko.find("td", {'class': 'HlavickaDni'})['title']
# vsetky hodiny v ramci toho dna
hodiny = trko.find("td", {'class': 'HlavickaDni'}).parent.find_all("td")
# podla bgcolor viem ci je hodina alebo volnahodina
for hodinaInfo in hodiny:
if hodinaInfo.has_attr('bgcolor') or hodinaInfo.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaInfo))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaInfo))
# Ked je dve a viac predmetov v tom istom case a tom istom dni, tak ta druha alebo tretia
# je mimo hlavneho <tr> tagu v ktorom sa nachadza aj nazov dna, a preto treba hladat
# dalsie hodiny mimo tr tagu.
elif trko != '\n' and trko.find_all("td") is not None:
for hodinaMimoTr in trko.find_all("td"):
if hodinaMimoTr.has_attr('bgcolor') or hodinaMimoTr.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaMimoTr))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaMimoTr))
# Ak aj ucebna alebo meno vyucujuceho chyba, dlzka vsetkych zoznamov bude tak ci tak rovnaka
# avsak v pripade "Nadpis", ten bude vzdy len jeden, preto musime pouzit "izip_longest"
# ktory zo zoznamu kratsej dlzky, spravi dlhsi a doplni tam "None". keby to nespravim, kazdy
# zoznam skrati na jednu polozku a to by nam chybali hodiny...
# No a navyse je potrebne odstranit predmet ktory ma v nazve tri hviezdy pretoze ten ma v sebe
# data o dalsich hodinach a tie sa nedaju normalne sparsovat, teda ich odstranim. Keby to neodstranim
# hodiny sa mozu posunut a byt v nespravnych dnoch a mat zle ucebne
if u'***' in predmety:
predmety.remove(u'***')
print "Predmet obsahoval dalsie hodiny ktore sa neda spracovat kvoli nespravnym datam"
moj_list = list(itertools.izip_longest(predmety, ucitelia, ucebne, dni, zacina_hod, poc_hod, nadpis))
return moj_list
# Zobere vsetky linky a pozrie sa ci link obsahuje "rozvrh_tr", aby sa spracovali len rozvrhy tried
def remove_non_class_timetables(soup_links):
modified_urls = []
for link in soup_links.find_all("a"):
to_append = link.text
if "rozvrh_tr" in to_append:
final_url = url + to_append
modified_urls.append(final_url)
return modified_urls
def get_urls_to_process():
source = requests.get(url)
text = source.text
soup = BeautifulSoup(text, "html.parser")
urls = remove_non_class_timetables(soup)
# vyhodi sa prvy link ktory neobsahuje konkretnu triedu, ale sablonu pre vsetky triedy
urls = urls[1:-1]
return urls
def make_folder():
# spravime si zlozku na rozvrhy
try:
os.makedirs('rozvrhy')
except OSError:
pass # ak uz zlozka existuje da error, ten ignorujeme, chceme zapisat do zlozky
|
eturn zac_hod[0:2]
|
conditional_block
|
renderer.rs
|
Vec<PodComplex>;
type PodSlice = [PodComplex];
fn fft_as_pod(my_slice: &FftSlice) -> &PodSlice {
unsafe { std::slice::from_raw_parts(my_slice.as_ptr() as *const _, my_slice.len()) }
}
/// Sent to GPU. Controls FFT layout and options.
#[repr(C)]
#[derive(Copy, Clone)]
struct GpuRenderParameters {
/// Screen size.
screen_wx: u32,
screen_hy: u32,
/// Samples per second.
sample_rate: u32,
/// Number of FFT bins between 0 and Nyquist inclusive.
/// Equals nsamp/2 + 1.
fft_out_size: u32,
}
unsafe impl bytemuck::Zeroable for GpuRenderParameters {}
unsafe impl bytemuck::Pod for GpuRenderParameters {}
/// The longest allowed FFT is ???.
/// The real FFT produces ??? complex bins.
fn fft_out_size(fft_input_size: usize) -> usize {
fft_input_size / 2 + 1
}
// Docs: https://sotrh.github.io/learn-wgpu/beginner/tutorial2-swapchain/
// Code: https://github.com/sotrh/learn-wgpu/blob/master/code/beginner/tutorial2-swapchain/src/main.rs
// - https://github.com/sotrh/learn-wgpu/blob/3a46a215/code/beginner/tutorial2-swapchain/src/main.rs
pub struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
render_pipeline: wgpu::RenderPipeline,
render_parameters: GpuRenderParameters,
fft_vec: PodVec,
render_parameters_buffer: wgpu::Buffer,
fft_vec_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
fn load_from_file(fname: &str) -> Result<String> {
let mut buf: Vec<u8> = vec![];
File::open(PathBuf::from(fname))?.read_to_end(&mut buf)?;
Ok(String::from_utf8(buf)?)
}
impl State {
// Creating some of the wgpu types requires async code
pub async fn new(window: &Window, opt: &Opt, sample_rate: u32) -> anyhow::Result<State> {
let size = window.inner_size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // TODO change to Mailbox?
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let vs_src = load_from_file("shaders/shader.vert")?;
let fs_src = load_from_file("shaders/shader.frag")?;
let mut compiler = shaderc::Compiler::new().unwrap();
let vs_spirv = compiler.compile_into_spirv(
&vs_src,
shaderc::ShaderKind::Vertex,
"shader.vert",
"main",
None,
)?;
let fs_spirv = compiler.compile_into_spirv(
&fs_src,
shaderc::ShaderKind::Fragment,
"shader.frag",
"main",
None,
)?;
let vs_module =
device.create_shader_module(wgpu::util::make_spirv(&vs_spirv.as_binary_u8()));
let fs_module =
device.create_shader_module(wgpu::util::make_spirv(&fs_spirv.as_binary_u8()));
// # FFT SSBO
let fft_out_size = fft_out_size(opt.fft_size);
let render_parameters = GpuRenderParameters {
screen_wx: size.width,
screen_hy: size.height,
fft_out_size: fft_out_size as u32,
sample_rate,
};
let fft_vec: PodVec = vec![PodComplex(FftSample::zero()); fft_out_size];
let render_param_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT layout (size)"),
contents: bytemuck::cast_slice(slice::from_ref(&render_parameters)),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let fft_vec_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT data"),
contents: bytemuck::cast_slice(&fft_vec),
usage: wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST,
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
min_binding_size: None,
},
count: None,
},
],
label: Some("bind_group_layout"),
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(render_param_buffer.slice(..)),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(fft_vec_buffer.slice(..)),
},
],
label: Some("bind_group"),
});
// # Shader pipeline
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main", // 1.
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
// 2.
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
clamp_depth: false,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
primitive_topology: wgpu::PrimitiveTopology::TriangleList, // 1.
depth_stencil_state: None, // 2.
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16, // 3.
vertex_buffers: &[], // 4.
},
sample_count: 1, // 5.
sample_mask: !0, // 6.
alpha_to_coverage_enabled: false, // 7.
});
Ok(State {
surface,
|
sc_desc,
swap_chain,
size,
render_pipeline,
render_parameters,
fft_vec,
render_parameters_buffer: render_param_buffer,
fft_vec_buffer,
bind_group,
})
}
pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
pub fn input(&mut self, event: &WindowEvent) -> bool {
false
}
|
device,
queue,
|
random_line_split
|
renderer.rs
|
inclusive.
/// Equals nsamp/2 + 1.
fft_out_size: u32,
}
unsafe impl bytemuck::Zeroable for GpuRenderParameters {}
unsafe impl bytemuck::Pod for GpuRenderParameters {}
/// The longest allowed FFT is ???.
/// The real FFT produces ??? complex bins.
fn fft_out_size(fft_input_size: usize) -> usize {
fft_input_size / 2 + 1
}
// Docs: https://sotrh.github.io/learn-wgpu/beginner/tutorial2-swapchain/
// Code: https://github.com/sotrh/learn-wgpu/blob/master/code/beginner/tutorial2-swapchain/src/main.rs
// - https://github.com/sotrh/learn-wgpu/blob/3a46a215/code/beginner/tutorial2-swapchain/src/main.rs
pub struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
render_pipeline: wgpu::RenderPipeline,
render_parameters: GpuRenderParameters,
fft_vec: PodVec,
render_parameters_buffer: wgpu::Buffer,
fft_vec_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
fn load_from_file(fname: &str) -> Result<String> {
let mut buf: Vec<u8> = vec![];
File::open(PathBuf::from(fname))?.read_to_end(&mut buf)?;
Ok(String::from_utf8(buf)?)
}
impl State {
// Creating some of the wgpu types requires async code
pub async fn new(window: &Window, opt: &Opt, sample_rate: u32) -> anyhow::Result<State> {
let size = window.inner_size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // TODO change to Mailbox?
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let vs_src = load_from_file("shaders/shader.vert")?;
let fs_src = load_from_file("shaders/shader.frag")?;
let mut compiler = shaderc::Compiler::new().unwrap();
let vs_spirv = compiler.compile_into_spirv(
&vs_src,
shaderc::ShaderKind::Vertex,
"shader.vert",
"main",
None,
)?;
let fs_spirv = compiler.compile_into_spirv(
&fs_src,
shaderc::ShaderKind::Fragment,
"shader.frag",
"main",
None,
)?;
let vs_module =
device.create_shader_module(wgpu::util::make_spirv(&vs_spirv.as_binary_u8()));
let fs_module =
device.create_shader_module(wgpu::util::make_spirv(&fs_spirv.as_binary_u8()));
// # FFT SSBO
let fft_out_size = fft_out_size(opt.fft_size);
let render_parameters = GpuRenderParameters {
screen_wx: size.width,
screen_hy: size.height,
fft_out_size: fft_out_size as u32,
sample_rate,
};
let fft_vec: PodVec = vec![PodComplex(FftSample::zero()); fft_out_size];
let render_param_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT layout (size)"),
contents: bytemuck::cast_slice(slice::from_ref(&render_parameters)),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let fft_vec_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT data"),
contents: bytemuck::cast_slice(&fft_vec),
usage: wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST,
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
min_binding_size: None,
},
count: None,
},
],
label: Some("bind_group_layout"),
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(render_param_buffer.slice(..)),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(fft_vec_buffer.slice(..)),
},
],
label: Some("bind_group"),
});
// # Shader pipeline
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main", // 1.
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
// 2.
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
clamp_depth: false,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
primitive_topology: wgpu::PrimitiveTopology::TriangleList, // 1.
depth_stencil_state: None, // 2.
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16, // 3.
vertex_buffers: &[], // 4.
},
sample_count: 1, // 5.
sample_mask: !0, // 6.
alpha_to_coverage_enabled: false, // 7.
});
Ok(State {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
render_pipeline,
render_parameters,
fft_vec,
render_parameters_buffer: render_param_buffer,
fft_vec_buffer,
bind_group,
})
}
pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
pub fn input(&mut self, event: &WindowEvent) -> bool {
false
}
pub fn update(&mut self, spectrum: &FftSlice)
|
{
self.render_parameters = GpuRenderParameters {
screen_wx: self.size.width,
screen_hy: self.size.height,
..self.render_parameters
};
self.queue.write_buffer(
&self.render_parameters_buffer,
0,
bytemuck::cast_slice(slice::from_ref(&self.render_parameters)),
);
self.fft_vec.copy_from_slice(fft_as_pod(spectrum));
self.queue
.write_buffer(&self.fft_vec_buffer, 0, bytemuck::cast_slice(&self.fft_vec));
}
|
identifier_body
|
|
renderer.rs
|
Vec<PodComplex>;
type PodSlice = [PodComplex];
fn fft_as_pod(my_slice: &FftSlice) -> &PodSlice {
unsafe { std::slice::from_raw_parts(my_slice.as_ptr() as *const _, my_slice.len()) }
}
/// Sent to GPU. Controls FFT layout and options.
#[repr(C)]
#[derive(Copy, Clone)]
struct GpuRenderParameters {
/// Screen size.
screen_wx: u32,
screen_hy: u32,
/// Samples per second.
sample_rate: u32,
/// Number of FFT bins between 0 and Nyquist inclusive.
/// Equals nsamp/2 + 1.
fft_out_size: u32,
}
unsafe impl bytemuck::Zeroable for GpuRenderParameters {}
unsafe impl bytemuck::Pod for GpuRenderParameters {}
/// The longest allowed FFT is ???.
/// The real FFT produces ??? complex bins.
fn fft_out_size(fft_input_size: usize) -> usize {
fft_input_size / 2 + 1
}
// Docs: https://sotrh.github.io/learn-wgpu/beginner/tutorial2-swapchain/
// Code: https://github.com/sotrh/learn-wgpu/blob/master/code/beginner/tutorial2-swapchain/src/main.rs
// - https://github.com/sotrh/learn-wgpu/blob/3a46a215/code/beginner/tutorial2-swapchain/src/main.rs
pub struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
render_pipeline: wgpu::RenderPipeline,
render_parameters: GpuRenderParameters,
fft_vec: PodVec,
render_parameters_buffer: wgpu::Buffer,
fft_vec_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
fn load_from_file(fname: &str) -> Result<String> {
let mut buf: Vec<u8> = vec![];
File::open(PathBuf::from(fname))?.read_to_end(&mut buf)?;
Ok(String::from_utf8(buf)?)
}
impl State {
// Creating some of the wgpu types requires async code
pub async fn new(window: &Window, opt: &Opt, sample_rate: u32) -> anyhow::Result<State> {
let size = window.inner_size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // TODO change to Mailbox?
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let vs_src = load_from_file("shaders/shader.vert")?;
let fs_src = load_from_file("shaders/shader.frag")?;
let mut compiler = shaderc::Compiler::new().unwrap();
let vs_spirv = compiler.compile_into_spirv(
&vs_src,
shaderc::ShaderKind::Vertex,
"shader.vert",
"main",
None,
)?;
let fs_spirv = compiler.compile_into_spirv(
&fs_src,
shaderc::ShaderKind::Fragment,
"shader.frag",
"main",
None,
)?;
let vs_module =
device.create_shader_module(wgpu::util::make_spirv(&vs_spirv.as_binary_u8()));
let fs_module =
device.create_shader_module(wgpu::util::make_spirv(&fs_spirv.as_binary_u8()));
// # FFT SSBO
let fft_out_size = fft_out_size(opt.fft_size);
let render_parameters = GpuRenderParameters {
screen_wx: size.width,
screen_hy: size.height,
fft_out_size: fft_out_size as u32,
sample_rate,
};
let fft_vec: PodVec = vec![PodComplex(FftSample::zero()); fft_out_size];
let render_param_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT layout (size)"),
contents: bytemuck::cast_slice(slice::from_ref(&render_parameters)),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let fft_vec_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT data"),
contents: bytemuck::cast_slice(&fft_vec),
usage: wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST,
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
min_binding_size: None,
},
count: None,
},
],
label: Some("bind_group_layout"),
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(render_param_buffer.slice(..)),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(fft_vec_buffer.slice(..)),
},
],
label: Some("bind_group"),
});
// # Shader pipeline
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main", // 1.
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
// 2.
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
clamp_depth: false,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
primitive_topology: wgpu::PrimitiveTopology::TriangleList, // 1.
depth_stencil_state: None, // 2.
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16, // 3.
vertex_buffers: &[], // 4.
},
sample_count: 1, // 5.
sample_mask: !0, // 6.
alpha_to_coverage_enabled: false, // 7.
});
Ok(State {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
render_pipeline,
render_parameters,
fft_vec,
render_parameters_buffer: render_param_buffer,
fft_vec_buffer,
bind_group,
})
}
pub fn
|
(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
pub fn input(&mut self, event: &WindowEvent) -> bool {
false
|
resize
|
identifier_name
|
error.rs
|
trying to open a new connection.
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
PoolTimedOut(Option<Box<dyn StdError + Send + Sync>>),
/// [`Pool::close`] was called while we were waiting in [`Pool::acquire`].
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
/// [`Pool::close`]: crate::pool::Pool::close
PoolClosed,
/// An error occurred while attempting to setup TLS.
/// This should only be returned from an explicit ask for TLS.
Tls(Box<dyn StdError + Send + Sync>),
/// An error occurred decoding data received from the database.
Decode(Box<dyn StdError + Send + Sync>),
}
impl Error {
#[allow(dead_code)]
pub(crate) fn decode<E>(err: E) -> Self
where
E: StdError + Send + Sync + 'static,
{
Error::Decode(err.into())
}
#[allow(dead_code)]
pub(crate) fn mismatched_types<DB: Database, T>(expected: DB::TypeInfo) -> Self
where
T: Type<DB>,
{
let ty_name = type_name::<T>();
return decode_err!(
"mismatched types; Rust type `{}` (as SQL type {}) is not compatible with SQL type {}",
ty_name,
T::type_info(),
expected
);
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
Error::Io(error) => Some(error),
Error::UrlParse(error) => Some(error),
Error::PoolTimedOut(Some(error)) => Some(&**error),
Error::Decode(error) => Some(&**error),
Error::Tls(error) => Some(&**error),
Error::Database(error) => Some(error.as_ref_err()),
_ => None,
}
}
}
impl Display for Error {
// IntellijRust does not understand that [non_exhaustive] applies only for downstream crates
// noinspection RsMatchCheck
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::E(error) => write!(f, "{}", error),
Error::Io(error) => write!(f, "{}", error),
Error::UrlParse(error) => write!(f, "{}", error),
Error::Decode(error) => write!(f, "{}", error),
Error::Database(error) => Display::fmt(error, f),
Error::RowNotFound => f.write_str("found no row when we expected at least one"),
Error::ColumnNotFound(ref name) => {
write!(f, "no column found with the name {:?}", name)
}
Error::ColumnIndexOutOfBounds { index, len } => write!(
f,
"column index out of bounds: there are {} columns but the index is {}",
len, index
),
Error::Protocol(ref err) => f.write_str(err),
Error::PoolTimedOut(Some(ref err)) => {
write!(f, "timed out while waiting for an open connection: {}", err)
}
Error::PoolTimedOut(None) => {
write!(f, "timed out while waiting for an open connection")
}
Error::PoolClosed => f.write_str("attempted to acquire a connection on a closed pool"),
Error::Tls(ref err) => write!(f, "error during TLS upgrade: {}", err),
}
}
}
impl From<io::Error> for Error {
#[inline]
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl From<io::ErrorKind> for Error {
#[inline]
fn from(err: io::ErrorKind) -> Self {
Error::Io(err.into())
}
}
impl From<url::ParseError> for Error {
#[inline]
fn from(err: url::ParseError) -> Self {
Error::UrlParse(err)
}
}
impl From<ProtocolError<'_>> for Error {
#[inline]
fn from(err: ProtocolError) -> Self {
Error::Protocol(err.args.to_string().into_boxed_str())
}
}
impl From<UnexpectedNullError> for Error {
#[inline]
fn from(err: UnexpectedNullError) -> Self {
Error::Decode(err.into())
}
}
#[cfg(feature = "tls")]
#[cfg_attr(docsrs, doc(cfg(feature = "tls")))]
impl From<async_native_tls::Error> for Error {
#[inline]
fn from(err: async_native_tls::Error) -> Self {
Error::Tls(err.into())
}
}
impl From<TlsError<'_>> for Error {
#[inline]
fn from(err: TlsError<'_>) -> Self {
Error::Tls(err.args.to_string().into())
}
}
impl From<&str> for Error {
fn from(arg: &str) -> Self {
return Error::E(arg.to_string());
}
}
impl From<std::string::String> for Error {
fn from(arg: String) -> Self {
return Error::E(arg);
}
}
/// An error that was returned by the database.
pub trait DatabaseError: StdError + Send + Sync + 'static {
/// The primary, human-readable error message.
fn message(&self) -> &str;
/// The (SQLSTATE) code for the error.
fn code(&self) -> Option<&str> {
None
}
fn details(&self) -> Option<&str> {
None
}
fn hint(&self) -> Option<&str> {
None
}
fn table_name(&self) -> Option<&str> {
None
}
fn column_name(&self) -> Option<&str> {
None
}
fn constraint_name(&self) -> Option<&str> {
None
}
#[doc(hidden)]
fn as_ref_err(&self) -> &(dyn StdError + Send + Sync + 'static);
|
#[doc(hidden)]
fn into_box_err(self: Box<Self>) -> Box<dyn StdError + Send + Sync + 'static>;
}
impl dyn DatabaseError {
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [StdError::downcast_ref]
/// which returns `Option`. This was a deliberate design decision in favor of brevity as in
/// almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast_ref] instead.
pub fn downcast_ref<T: DatabaseError>(&self) -> &T {
self.try_downcast_ref::<T>().unwrap_or_else(|| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
})
}
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `None` if the downcast fails (the types do not match)
pub fn try_downcast_ref<T: DatabaseError>(&self) -> Option<&T> {
self.as_ref_err().downcast_ref()
}
/// Only meant for internal use so no `try_` variant is currently provided
#[allow(dead_code)]
pub(crate) fn downcast_mut<T: DatabaseError>(&mut self) -> &mut T {
// tried to express this as the following:
//
// if let Some(e) = self.as_mut_err().downcast_mut() { return e; }
//
// however it didn't like using `self` again in the panic format
if self.as_ref_err().is::<T>() {
return self.as_mut_err().downcast_mut().unwrap();
}
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active
|
#[doc(hidden)]
fn as_mut_err(&mut self) -> &mut (dyn StdError + Send + Sync + 'static);
|
random_line_split
|
error.rs
|
Error {
#[inline]
fn from(err: async_native_tls::Error) -> Self {
Error::Tls(err.into())
}
}
impl From<TlsError<'_>> for Error {
#[inline]
fn from(err: TlsError<'_>) -> Self {
Error::Tls(err.args.to_string().into())
}
}
impl From<&str> for Error {
fn from(arg: &str) -> Self {
return Error::E(arg.to_string());
}
}
impl From<std::string::String> for Error {
fn from(arg: String) -> Self {
return Error::E(arg);
}
}
/// An error that was returned by the database.
pub trait DatabaseError: StdError + Send + Sync + 'static {
/// The primary, human-readable error message.
fn message(&self) -> &str;
/// The (SQLSTATE) code for the error.
fn code(&self) -> Option<&str> {
None
}
fn details(&self) -> Option<&str> {
None
}
fn hint(&self) -> Option<&str> {
None
}
fn table_name(&self) -> Option<&str> {
None
}
fn column_name(&self) -> Option<&str> {
None
}
fn constraint_name(&self) -> Option<&str> {
None
}
#[doc(hidden)]
fn as_ref_err(&self) -> &(dyn StdError + Send + Sync + 'static);
#[doc(hidden)]
fn as_mut_err(&mut self) -> &mut (dyn StdError + Send + Sync + 'static);
#[doc(hidden)]
fn into_box_err(self: Box<Self>) -> Box<dyn StdError + Send + Sync + 'static>;
}
impl dyn DatabaseError {
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [StdError::downcast_ref]
/// which returns `Option`. This was a deliberate design decision in favor of brevity as in
/// almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast_ref] instead.
pub fn downcast_ref<T: DatabaseError>(&self) -> &T {
self.try_downcast_ref::<T>().unwrap_or_else(|| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
})
}
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `None` if the downcast fails (the types do not match)
pub fn try_downcast_ref<T: DatabaseError>(&self) -> Option<&T> {
self.as_ref_err().downcast_ref()
}
/// Only meant for internal use so no `try_` variant is currently provided
#[allow(dead_code)]
pub(crate) fn downcast_mut<T: DatabaseError>(&mut self) -> &mut T {
// tried to express this as the following:
//
// if let Some(e) = self.as_mut_err().downcast_mut() { return e; }
//
// however it didn't like using `self` again in the panic format
if self.as_ref_err().is::<T>() {
return self.as_mut_err().downcast_mut().unwrap();
}
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [std::error::Error::downcast]
/// which returns `Result`. This was a deliberate design decision in favor of
/// brevity as in almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast] instead.
pub fn downcast<T: DatabaseError>(self: Box<Self>) -> Box<T> {
self.try_downcast().unwrap_or_else(|e| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
e
)
})
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `Err(self)` if the downcast fails (the types do not match).
pub fn try_downcast<T: DatabaseError>(
self: Box<Self>,
) -> std::result::Result<Box<T>, Box<Self>> {
if self.as_ref_err().is::<T>() {
Ok(self
.into_box_err()
.downcast()
.expect("type mismatch between DatabaseError::as_ref_err() and into_box_err()"))
} else {
Err(self)
}
}
}
/// Used by the `protocol_error!()` macro for a lazily evaluated conversion to
/// `crate::Error::Protocol` so we can use the macro with `.ok_or()` without Clippy complaining.
pub(crate) struct ProtocolError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! protocol_err (
($($args:tt)*) => {
$crate::error::ProtocolError { args: format_args!($($args)*) }
}
);
pub(crate) struct TlsError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! tls_err {
($($args:tt)*) => { crate::error::TlsError { args: format_args!($($args)*)} };
}
/// An unexpected `NULL` was encountered during decoding.
///
/// Returned from `Row::get` if the value from the database is `NULL`
/// and you are not decoding into an `Option`.
#[derive(Debug, Clone, Copy)]
pub struct UnexpectedNullError;
impl Display for UnexpectedNullError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("unexpected null; try decoding as an `Option`")
}
}
impl StdError for UnexpectedNullError {}
impl Clone for Error {
fn clone(&self) -> Self {
Error::from(self.to_string())
}
fn clone_from(&mut self, source: &Self) {
*self = Self::from(source.to_string());
}
}
use serde::ser::{Serialize, Serializer};
use serde::{Deserialize, Deserializer};
use serde::de::{Visitor};
// This is what #[derive(Serialize)] would generate.
impl Serialize for Error {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.to_string().as_str())
}
}
struct ErrorVisitor;
impl<'de> Visitor<'de> for ErrorVisitor {
type Value = String;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a string")
}
fn visit_string<E>(self, v: String) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
{
Ok(v)
}
fn visit_str<E>(self, v: &str) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
|
{
Ok(v.to_string())
}
|
identifier_body
|
|
error.rs
|
trying to open a new connection.
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
PoolTimedOut(Option<Box<dyn StdError + Send + Sync>>),
/// [`Pool::close`] was called while we were waiting in [`Pool::acquire`].
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
/// [`Pool::close`]: crate::pool::Pool::close
PoolClosed,
/// An error occurred while attempting to setup TLS.
/// This should only be returned from an explicit ask for TLS.
Tls(Box<dyn StdError + Send + Sync>),
/// An error occurred decoding data received from the database.
Decode(Box<dyn StdError + Send + Sync>),
}
impl Error {
#[allow(dead_code)]
pub(crate) fn decode<E>(err: E) -> Self
where
E: StdError + Send + Sync + 'static,
{
Error::Decode(err.into())
}
#[allow(dead_code)]
pub(crate) fn
|
<DB: Database, T>(expected: DB::TypeInfo) -> Self
where
T: Type<DB>,
{
let ty_name = type_name::<T>();
return decode_err!(
"mismatched types; Rust type `{}` (as SQL type {}) is not compatible with SQL type {}",
ty_name,
T::type_info(),
expected
);
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
Error::Io(error) => Some(error),
Error::UrlParse(error) => Some(error),
Error::PoolTimedOut(Some(error)) => Some(&**error),
Error::Decode(error) => Some(&**error),
Error::Tls(error) => Some(&**error),
Error::Database(error) => Some(error.as_ref_err()),
_ => None,
}
}
}
impl Display for Error {
// IntellijRust does not understand that [non_exhaustive] applies only for downstream crates
// noinspection RsMatchCheck
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::E(error) => write!(f, "{}", error),
Error::Io(error) => write!(f, "{}", error),
Error::UrlParse(error) => write!(f, "{}", error),
Error::Decode(error) => write!(f, "{}", error),
Error::Database(error) => Display::fmt(error, f),
Error::RowNotFound => f.write_str("found no row when we expected at least one"),
Error::ColumnNotFound(ref name) => {
write!(f, "no column found with the name {:?}", name)
}
Error::ColumnIndexOutOfBounds { index, len } => write!(
f,
"column index out of bounds: there are {} columns but the index is {}",
len, index
),
Error::Protocol(ref err) => f.write_str(err),
Error::PoolTimedOut(Some(ref err)) => {
write!(f, "timed out while waiting for an open connection: {}", err)
}
Error::PoolTimedOut(None) => {
write!(f, "timed out while waiting for an open connection")
}
Error::PoolClosed => f.write_str("attempted to acquire a connection on a closed pool"),
Error::Tls(ref err) => write!(f, "error during TLS upgrade: {}", err),
}
}
}
impl From<io::Error> for Error {
#[inline]
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl From<io::ErrorKind> for Error {
#[inline]
fn from(err: io::ErrorKind) -> Self {
Error::Io(err.into())
}
}
impl From<url::ParseError> for Error {
#[inline]
fn from(err: url::ParseError) -> Self {
Error::UrlParse(err)
}
}
impl From<ProtocolError<'_>> for Error {
#[inline]
fn from(err: ProtocolError) -> Self {
Error::Protocol(err.args.to_string().into_boxed_str())
}
}
impl From<UnexpectedNullError> for Error {
#[inline]
fn from(err: UnexpectedNullError) -> Self {
Error::Decode(err.into())
}
}
#[cfg(feature = "tls")]
#[cfg_attr(docsrs, doc(cfg(feature = "tls")))]
impl From<async_native_tls::Error> for Error {
#[inline]
fn from(err: async_native_tls::Error) -> Self {
Error::Tls(err.into())
}
}
impl From<TlsError<'_>> for Error {
#[inline]
fn from(err: TlsError<'_>) -> Self {
Error::Tls(err.args.to_string().into())
}
}
impl From<&str> for Error {
fn from(arg: &str) -> Self {
return Error::E(arg.to_string());
}
}
impl From<std::string::String> for Error {
fn from(arg: String) -> Self {
return Error::E(arg);
}
}
/// An error that was returned by the database.
pub trait DatabaseError: StdError + Send + Sync + 'static {
/// The primary, human-readable error message.
fn message(&self) -> &str;
/// The (SQLSTATE) code for the error.
fn code(&self) -> Option<&str> {
None
}
fn details(&self) -> Option<&str> {
None
}
fn hint(&self) -> Option<&str> {
None
}
fn table_name(&self) -> Option<&str> {
None
}
fn column_name(&self) -> Option<&str> {
None
}
fn constraint_name(&self) -> Option<&str> {
None
}
#[doc(hidden)]
fn as_ref_err(&self) -> &(dyn StdError + Send + Sync + 'static);
#[doc(hidden)]
fn as_mut_err(&mut self) -> &mut (dyn StdError + Send + Sync + 'static);
#[doc(hidden)]
fn into_box_err(self: Box<Self>) -> Box<dyn StdError + Send + Sync + 'static>;
}
impl dyn DatabaseError {
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [StdError::downcast_ref]
/// which returns `Option`. This was a deliberate design decision in favor of brevity as in
/// almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast_ref] instead.
pub fn downcast_ref<T: DatabaseError>(&self) -> &T {
self.try_downcast_ref::<T>().unwrap_or_else(|| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
})
}
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `None` if the downcast fails (the types do not match)
pub fn try_downcast_ref<T: DatabaseError>(&self) -> Option<&T> {
self.as_ref_err().downcast_ref()
}
/// Only meant for internal use so no `try_` variant is currently provided
#[allow(dead_code)]
pub(crate) fn downcast_mut<T: DatabaseError>(&mut self) -> &mut T {
// tried to express this as the following:
//
// if let Some(e) = self.as_mut_err().downcast_mut() { return e; }
//
// however it didn't like using `self` again in the panic format
if self.as_ref_err().is::<T>() {
return self.as_mut_err().downcast_mut().unwrap();
}
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature
|
mismatched_types
|
identifier_name
|
emitter.rs
|
();
let mut vert_b = verts.next();
let first_point = vert_a.unwrap();
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
while let (Some(a), Some(b)) = (vert_a, vert_b) {
emit_shape(&a, &b);
vert_a = vert_b;
vert_b = verts.next();
}
if self.verts.len() > 2 {
emit_shape(&vert_a.unwrap(), &first_point);
}
}
}
pub struct Circle {
cx: f64,
cy: f64,
r: f64,
color: Color
}
impl Shape for Circle {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Circle for {}", id);
// arguments: center, radius, angle, segments, color
println!(
"{}->drawSolidCircle(Vec2({:.10}f,{:.10}f), {:.10}f, 0.0f, 20, {});",
Emitter::varname(id, drawnode),
self.cx, self.cy, self.r,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, _id: &str, _physicsbody: &str) {
panic!("Can't do physics for circle yet (no use case)");
}
}
pub struct Rect {
x: f64,
y: f64,
w: f64,
h: f64,
color: Color
}
impl Shape for Rect {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Rect for {}", id);
// arguments: origin, destination, color
println!(
"{}->drawSolidRect(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.x, self.y,
self.x+self.w, self.y+self.h,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
// bottom-left to bottom-right
emit_shape(&[self.x, self.y], &[self.x+self.w, self.y]);
// bottom-right to top-right
emit_shape(&[self.x+self.w, self.y], &[self.x+self.w, self.y+self.h]);
// top-right to top-left
emit_shape(&[self.x+self.w, self.y+self.h], &[self.x, self.y+self.h]);
// top-left to bottom-left
emit_shape(&[self.x, self.y+self.h], &[self.x, self.y]);
}
}
/// The emitter itself. Holds onto shapes relative to their id.
pub struct Emitter {
origin: Option<[f64; 2]>,
shapes: BTreeMap<String, Box<Shape>>
}
impl Emitter {
pub fn new() -> Emitter {
Emitter {
origin: None,
shapes: BTreeMap::new()
}
}
///
/// Emit a single shape to stdout.
/// Returns true if a shape under `id` was found and emitted.
/// Returns false if there was no shape under `id`.
///
pub fn emit(&self, id: &str, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) -> bool {
match self.shapes.get(id) {
Some(shape) =>{
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
true
}
None => false
}
}
///
/// Emit all shapes to stdout.
///
pub fn emit_all(&self, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) {
for (ref id, ref shape) in &self.shapes {
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
}
}
///
/// How many shapes we've added.
///
pub fn len(&self) -> usize {
self.shapes.len()
}
///
/// Add a shape from xml attributes.
///
pub fn add_shape(&mut self, id: &str, tag: &str, attributes: &Vec<xml::attribute::OwnedAttribute>) {
let new_shape: Box<Shape> = match tag {
"path" => Box::new(self.parse_polygon(attributes)),
"circle" => Box::new(self.parse_circle(attributes)),
"rect" => Box::new(self.parse_rect(attributes)),
_ => return
};
self.shapes.insert(id.to_owned(), new_shape);
}
///
/// Parse a polygon with vertex positions based on `self.origin`.
///
pub fn parse_polygon(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Polygon {
let ref d = attributes.iter().find(|ref a| a.name.local_name == "d")
.expect("Path lacking 'd' attribute")
.value;
// Fill a vec with all verts
let mut current_pos = [0.0, 0.0];
let mut verts = Vec::with_capacity(d.len() / 5);
{
let mut n = 0;
let mut exprs = d.split(' ');
while let Some(expr) = exprs.next() {
if expr == "m" {
if n == 0 {
// Starting point is origin
let expr = exprs.next().unwrap();
let mut xy = expr.split(',');
let sx = xy.next().expect("Invalid pair");
let sy = xy.next().expect("Invalid pair");
self.assign_position_from_origin(&mut current_pos, sx, sy);
verts.push(current_pos);
continue;
}
else { panic!("'m' showing up more than once???") }
}
else if expr == "z" {
break
}
else if !expr.contains(',') {
panic!("Unsupported expression: {}", expr);
}
let mut xy = expr.split(',');
let x = xy.next().expect("Invalid pair");
let y = xy.next().expect("Invalid pair");
current_pos[0] += parse_float!(x);
current_pos[1] -= parse_float!(y);
verts.push(current_pos);
n = n + 1;
}
}
let ref style = attributes.iter().find(|ref a| a.name.local_name == "style")
.expect("Path lacking 'style' attribute")
.value;
Polygon {
triangles: polyfill::triangle_indices(&verts, polyfill::TANGENTIAL),
verts: verts,
color: Emitter::parse_color_from_style(style).unwrap_or(Color::black())
}
}
///
/// Parse a circle with center point based on `self.origin`.
///
pub fn parse_circle(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Circle {
// cx cy r color
let mut params = (None, None, None, None);
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"cx" => params.0 = Some(attr.value.clone()),
"cy" => params.1 = Some(attr.value.clone()),
"r" => params.2 = Some(attr.value.clone()),
"style" => params.3 = Some(attr.value.clone()),
_ => {}
}
if let (Some(cx), Some(cy), Some(r), Some(style)) = params {
let mut cxy = [0.0, 0.0];
self.assign_position_from_origin(&mut cxy, &cx, &cy);
return Circle {
cx: cxy[0], cy: cxy[1], r: parse_float!(&r),
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
};
}
}
panic!("Invalid circle");
}
///
/// Parse a rect with origin at the bottom right (??)
///
pub fn
|
parse_rect
|
identifier_name
|
|
emitter.rs
|
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
while let (Some(a), Some(b)) = (vert_a, vert_b) {
emit_shape(&a, &b);
vert_a = vert_b;
vert_b = verts.next();
}
if self.verts.len() > 2 {
emit_shape(&vert_a.unwrap(), &first_point);
}
}
}
pub struct Circle {
cx: f64,
cy: f64,
r: f64,
color: Color
}
impl Shape for Circle {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Circle for {}", id);
// arguments: center, radius, angle, segments, color
println!(
"{}->drawSolidCircle(Vec2({:.10}f,{:.10}f), {:.10}f, 0.0f, 20, {});",
Emitter::varname(id, drawnode),
self.cx, self.cy, self.r,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, _id: &str, _physicsbody: &str) {
panic!("Can't do physics for circle yet (no use case)");
}
}
pub struct Rect {
x: f64,
y: f64,
w: f64,
h: f64,
color: Color
}
impl Shape for Rect {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Rect for {}", id);
// arguments: origin, destination, color
println!(
"{}->drawSolidRect(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.x, self.y,
self.x+self.w, self.y+self.h,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
// bottom-left to bottom-right
emit_shape(&[self.x, self.y], &[self.x+self.w, self.y]);
// bottom-right to top-right
emit_shape(&[self.x+self.w, self.y], &[self.x+self.w, self.y+self.h]);
// top-right to top-left
emit_shape(&[self.x+self.w, self.y+self.h], &[self.x, self.y+self.h]);
// top-left to bottom-left
emit_shape(&[self.x, self.y+self.h], &[self.x, self.y]);
}
}
/// The emitter itself. Holds onto shapes relative to their id.
pub struct Emitter {
origin: Option<[f64; 2]>,
shapes: BTreeMap<String, Box<Shape>>
}
impl Emitter {
pub fn new() -> Emitter {
Emitter {
origin: None,
shapes: BTreeMap::new()
}
}
///
/// Emit a single shape to stdout.
/// Returns true if a shape under `id` was found and emitted.
/// Returns false if there was no shape under `id`.
///
pub fn emit(&self, id: &str, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) -> bool {
match self.shapes.get(id) {
Some(shape) =>{
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
true
}
None => false
}
}
///
/// Emit all shapes to stdout.
///
pub fn emit_all(&self, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) {
for (ref id, ref shape) in &self.shapes {
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
}
}
///
/// How many shapes we've added.
///
pub fn len(&self) -> usize {
self.shapes.len()
}
///
/// Add a shape from xml attributes.
///
pub fn add_shape(&mut self, id: &str, tag: &str, attributes: &Vec<xml::attribute::OwnedAttribute>) {
let new_shape: Box<Shape> = match tag {
"path" => Box::new(self.parse_polygon(attributes)),
"circle" => Box::new(self.parse_circle(attributes)),
"rect" => Box::new(self.parse_rect(attributes)),
_ => return
};
self.shapes.insert(id.to_owned(), new_shape);
}
///
/// Parse a polygon with vertex positions based on `self.origin`.
///
pub fn parse_polygon(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Polygon {
let ref d = attributes.iter().find(|ref a| a.name.local_name == "d")
.expect("Path lacking 'd' attribute")
.value;
// Fill a vec with all verts
let mut current_pos = [0.0, 0.0];
let mut verts = Vec::with_capacity(d.len() / 5);
{
let mut n = 0;
let mut exprs = d.split(' ');
while let Some(expr) = exprs.next() {
if expr == "m" {
if n == 0 {
// Starting point is origin
let expr = exprs.next().unwrap();
let mut xy = expr.split(',');
let sx = xy.next().expect("Invalid pair");
let sy = xy.next().expect("Invalid pair");
self.assign_position_from_origin(&mut current_pos, sx, sy);
verts.push(current_pos);
continue;
}
else { panic!("'m' showing up more than once???") }
}
else if expr == "z" {
break
}
else if !expr.contains(',') {
panic!("Unsupported expression: {}", expr);
}
let mut xy = expr.split(',');
let x = xy.next().expect("Invalid pair");
let y = xy.next().expect("Invalid pair");
current_pos[0] += parse_float!(x);
current_pos[1] -= parse_float!(y);
verts.push(current_pos);
n = n + 1;
}
}
let ref style = attributes.iter().find(|ref a| a.name.local_name == "style")
.expect("Path lacking 'style' attribute")
.value;
Polygon {
triangles: polyfill::triangle_indices(&verts, polyfill::TANGENTIAL),
verts: verts,
color: Emitter::parse_color_from_style(style).unwrap_or(Color::black())
}
}
///
/// Parse a circle with center point based on `self.origin`.
///
pub fn parse_circle(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Circle {
// cx cy r color
let mut params = (None, None, None, None);
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"cx" => params.0 = Some(attr.value.clone()),
"cy" => params.1 = Some(attr.value.clone()),
"r" => params.2 = Some(attr.value.clone()),
"style" => params.3 = Some(attr.value.clone()),
_ => {}
}
if let (Some(cx), Some(cy), Some(r), Some(style)) = params {
let mut cxy = [0.0, 0.0];
self.assign_position_from_origin(&mut cxy, &cx, &cy);
return Circle {
cx: cxy[0], cy: cxy[1], r: parse_float!(&r),
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
};
}
}
panic!("Invalid circle");
}
///
|
/// Parse a rect with origin at the bottom right (??)
///
pub fn parse_rect(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Rect {
// x, y, w, h, style
let mut params = (None, None, None, None, None);
|
random_line_split
|
|
emitter.rs
|
to draw the shape on a cocos2dx DrawNode
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>);
/// Should generate code for edge segments, and an encompassing shape
///
/// TODO probably also make those generated shapes have certain categories
/// and the encompassing should be a sensor.
fn emit_physics(&self, id: &str, physicsbody: &str);
}
pub struct Polygon {
verts: Vec<[f64; 2]>,
triangles: Vec<[usize; 3]>,
color: Color
}
impl Shape for Polygon {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Triangles for {}", id);
for ref t in &self.triangles {
println!(
"{}->drawTriangle(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.verts[t[0]][0], self.verts[t[0]][1],
self.verts[t[1]][0], self.verts[t[1]][1],
self.verts[t[2]][0], self.verts[t[2]][1],
color.unwrap_or(&self.color.emit())
);
}
}
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
if self.verts.len() <= 1 {
println!("// {} does not have enough vertices for a polygon", id);
return;
}
let mut verts = self.verts.iter();
let mut vert_a = verts.next();
let mut vert_b = verts.next();
let first_point = vert_a.unwrap();
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
while let (Some(a), Some(b)) = (vert_a, vert_b) {
emit_shape(&a, &b);
vert_a = vert_b;
vert_b = verts.next();
}
if self.verts.len() > 2 {
emit_shape(&vert_a.unwrap(), &first_point);
}
}
}
pub struct Circle {
cx: f64,
cy: f64,
r: f64,
color: Color
}
impl Shape for Circle {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Circle for {}", id);
// arguments: center, radius, angle, segments, color
println!(
"{}->drawSolidCircle(Vec2({:.10}f,{:.10}f), {:.10}f, 0.0f, 20, {});",
Emitter::varname(id, drawnode),
self.cx, self.cy, self.r,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, _id: &str, _physicsbody: &str) {
panic!("Can't do physics for circle yet (no use case)");
}
}
pub struct Rect {
x: f64,
y: f64,
w: f64,
h: f64,
color: Color
}
impl Shape for Rect {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>)
|
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
// bottom-left to bottom-right
emit_shape(&[self.x, self.y], &[self.x+self.w, self.y]);
// bottom-right to top-right
emit_shape(&[self.x+self.w, self.y], &[self.x+self.w, self.y+self.h]);
// top-right to top-left
emit_shape(&[self.x+self.w, self.y+self.h], &[self.x, self.y+self.h]);
// top-left to bottom-left
emit_shape(&[self.x, self.y+self.h], &[self.x, self.y]);
}
}
/// The emitter itself. Holds onto shapes relative to their id.
pub struct Emitter {
origin: Option<[f64; 2]>,
shapes: BTreeMap<String, Box<Shape>>
}
impl Emitter {
pub fn new() -> Emitter {
Emitter {
origin: None,
shapes: BTreeMap::new()
}
}
///
/// Emit a single shape to stdout.
/// Returns true if a shape under `id` was found and emitted.
/// Returns false if there was no shape under `id`.
///
pub fn emit(&self, id: &str, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) -> bool {
match self.shapes.get(id) {
Some(shape) =>{
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
true
}
None => false
}
}
///
/// Emit all shapes to stdout.
///
pub fn emit_all(&self, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) {
for (ref id, ref shape) in &self.shapes {
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
}
}
///
/// How many shapes we've added.
///
pub fn len(&self) -> usize {
self.shapes.len()
}
///
/// Add a shape from xml attributes.
///
pub fn add_shape(&mut self, id: &str, tag: &str, attributes: &Vec<xml::attribute::OwnedAttribute>) {
let new_shape: Box<Shape> = match tag {
"path" => Box::new(self.parse_polygon(attributes)),
"circle" => Box::new(self.parse_circle(attributes)),
"rect" => Box::new(self.parse_rect(attributes)),
_ => return
};
self.shapes.insert(id.to_owned(), new_shape);
}
///
/// Parse a polygon with vertex positions based on `self.origin`.
///
pub fn parse_polygon(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Polygon {
let ref d = attributes.iter().find(|ref a| a.name.local_name == "d")
.expect("Path lacking 'd' attribute")
.value;
// Fill a vec with all verts
let mut current_pos = [0.0, 0.0];
let mut verts = Vec::with_capacity(d.len() / 5);
{
let mut n = 0;
let mut exprs = d.split(' ');
while let Some(expr) = exprs.next() {
if expr == "m" {
if n == 0 {
// Starting point is origin
let expr = exprs.next().unwrap();
let mut xy = expr.split(',');
let sx = xy.next().expect("Invalid pair");
let sy = xy.next().expect("Invalid pair");
self.assign_position_from_origin(&mut current_pos, sx, sy);
verts.push(current_pos);
continue;
}
else { panic!("'m' showing up more than once???") }
}
else if expr == "z" {
break
}
else if !expr.contains(',') {
panic!("Unsupported expression: {}", expr);
}
let mut xy = expr.split(',');
let x = xy.next().expect("Invalid pair");
let y = xy.next().expect("Invalid pair");
current_pos[0] += parse_float!(x);
|
{
println!("// Rect for {}", id);
// arguments: origin, destination, color
println!(
"{}->drawSolidRect(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.x, self.y,
self.x+self.w, self.y+self.h,
color.unwrap_or(&self.color.emit())
);
}
|
identifier_body
|
suicidegirls.py
|
.argument_lists) != 0:
if len(SuicideGirls.argument_lists) != 0:
print("Argument list found! Dispatching...")
argument_list = SuicideGirls.argument_lists.pop(0)
pool = multiprocessing.Pool(self.process_limit)
pool.map(self.download_image, argument_list)
# Girls: Riae (36), Fishball (28), Vandoll (7)
# Total photosets: 71
# Processes: 8
# map: 00:24:37
# map_async: 00:12:33
print("Exiting dispatcher thread...")
def startup(self):
SuicideGirls.driver = webdriver.Chrome(executable_path="dependencies/chromedriver.exe")
SuicideGirls.driver.maximize_window()
SuicideGirls.driver.implicitly_wait(5)
SuicideGirls.driver.get(self.root_url)
self.__login()
def shutdown(self):
SuicideGirls.driver.quit()
def __login(self):
login_button_xpath = "//a[@class='login button' or @class='button login']"
login_form_submit_xpath = "//button[@type='submit' and text()='Login']"
username_box_xpath = "//input[@name='username']"
password_box_xpath = "//input[@name='password']"
SuicideGirls.driver.find_element_by_xpath(login_button_xpath).click()
SuicideGirls.driver.find_element_by_xpath(username_box_xpath).send_keys(self.username)
SuicideGirls.driver.find_element_by_xpath(password_box_xpath).send_keys(self.password)
SuicideGirls.driver.find_element_by_xpath(login_form_submit_xpath).click()
time.sleep(5)
flag = False;
while True:
try:
image_select = SuicideGirls.driver.find_element_by_xpath("//iframe[@title='recaptcha challenge']")
if not flag:
print("Found a captcha!")
flag = True
except:
break;
print("No captcha found!")
def rip(self):
for url in self.urls:
SuicideGirls.driver.get(url)
if self.__type == "girl":
print("Single girl")
self.__rip_girl()
elif self.__type == "girls":
print("All Suicide Girls")
self.__rip_all_girls()
elif self.__type == "hopefuls":
print("All hopefuls")
self.__rip_all_hopefuls()
elif self.__type == "sotds":
print("All sets of the day")
self.__rip_all_sets_of_the_day()
elif self.__type == "set":
print("Single set")
self.__rip_set()
elif self.__type == "all":
print("All!")
self.__rip_all_photos()
SuicideGirls.stop_dispatching = True
SuicideGirls.dispatcher_thread.join()
print("Rip completed.")
print("Total girls/hopefuls ripped: " + str(self.girls_completed))
print("Total sets ripped: " + str(self.sets_completed))
def __rip_all_photos(self):
SuicideGirls.driver.get(self.urls[0])
self.__type = "hopefuls"
self.__rip_all_hopefuls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "girls"
self.__rip_all_girls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "sotds"
self.__rip_all_sets_of_the_day()
def __rip_all_girls(self):
suicide_girls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'SuicideGirls']"
self.__rip_all(suicide_girls_xpath)
def __rip_all_hopefuls(self):
hopefuls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Hopefuls']"
self.__rip_all(hopefuls_xpath)
def __rip_all_sets_of_the_day(self):
sotds_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Sets Of The Day']"
self.__rip_all(sotds_xpath)
def __rip_all(self, type_xpath):
time_period_xpath = "//li[@class='dropdown'][3]//ul/li/a[text() = '" + self.time_period + "']"
girl_name_xpath = "//article/header//h2/a"
load_more_xpath = "//a[@id='load-more']"
choice = SuicideGirls.driver.find_element_by_xpath(type_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
choice = SuicideGirls.driver.find_element_by_xpath(time_period_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
girls = []
iteration = 0
while True:
iteration += 1
names = SuicideGirls.driver.find_elements_by_xpath(girl_name_xpath)
for name in names:
girls.append(name.text)
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<24;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
girls = list(set(girls))
for girl in sorted(girls):
url = self.__build_url(girl)
SuicideGirls.driver.get(url)
self.__rip_girl()
def __rip_girl(self):
load_more_xpath = "//a[@id='load-more']"
photos_xpath = "//div[@id='content-container']//a[text()='Photos']"
photosets_xpath = "//div[@id='content-container']//a[text()='Photosets']"
set_title_xpath = "//article/header//h2/a"
url = SuicideGirls.driver.find_element_by_xpath(photos_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
url = SuicideGirls.driver.find_element_by_xpath(photosets_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
set_links = []
iteration = 0
while True:
iteration += 1
titles = SuicideGirls.driver.find_elements_by_xpath(set_title_xpath)
for title in titles:
set_links.append(title.get_attribute("href"))
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<9;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
set_links = list(set(set_links))
for link in set_links:
SuicideGirls.driver.get(link)
self.__rip_set()
self.girls_completed += 1
def __rip_set(self):
girl_xpath = "//h1/a"
title_xpath = "//header[@class='header']/div[@class='top-bar']/h2[@class='title']"
full_image_button_xpath = "//a[@id='button-view_full_size']"
full_image_url_xpath = "//div[@data-image_url]"
girl = SuicideGirls.driver.find_element_by_xpath(girl_xpath).text
title = SuicideGirls.driver.find_element_by_xpath(title_xpath).text
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
check = False
if os.path.exists(dir_name):
check = True
SuicideGirls.driver.find_element_by_xpath(full_image_button_xpath).click()
time.sleep(5)
images = SuicideGirls.driver.find_elements_by_xpath(full_image_url_xpath)
image_urls = []
for i in range(0, len(images)):
url = images[i].get_attribute("data-image_url")
ext = url[url.rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if not os.path.exists(os.path.join(dir_name, file_name)):
image_urls.append(url)
else:
prin
|
t(girl.title() + "/" + title.title() + " Img" + str(i).zfill(3) + " already exists, skipping...")
|
conditional_block
|
|
suicidegirls.py
|
elif self.__type == "all":
print("All!")
self.__rip_all_photos()
SuicideGirls.stop_dispatching = True
SuicideGirls.dispatcher_thread.join()
print("Rip completed.")
print("Total girls/hopefuls ripped: " + str(self.girls_completed))
print("Total sets ripped: " + str(self.sets_completed))
def __rip_all_photos(self):
SuicideGirls.driver.get(self.urls[0])
self.__type = "hopefuls"
self.__rip_all_hopefuls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "girls"
self.__rip_all_girls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "sotds"
self.__rip_all_sets_of_the_day()
def __rip_all_girls(self):
suicide_girls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'SuicideGirls']"
self.__rip_all(suicide_girls_xpath)
def __rip_all_hopefuls(self):
hopefuls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Hopefuls']"
self.__rip_all(hopefuls_xpath)
def __rip_all_sets_of_the_day(self):
sotds_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Sets Of The Day']"
self.__rip_all(sotds_xpath)
def __rip_all(self, type_xpath):
time_period_xpath = "//li[@class='dropdown'][3]//ul/li/a[text() = '" + self.time_period + "']"
girl_name_xpath = "//article/header//h2/a"
load_more_xpath = "//a[@id='load-more']"
choice = SuicideGirls.driver.find_element_by_xpath(type_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
choice = SuicideGirls.driver.find_element_by_xpath(time_period_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
girls = []
iteration = 0
while True:
iteration += 1
names = SuicideGirls.driver.find_elements_by_xpath(girl_name_xpath)
for name in names:
girls.append(name.text)
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<24;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
girls = list(set(girls))
for girl in sorted(girls):
url = self.__build_url(girl)
SuicideGirls.driver.get(url)
self.__rip_girl()
def __rip_girl(self):
load_more_xpath = "//a[@id='load-more']"
photos_xpath = "//div[@id='content-container']//a[text()='Photos']"
photosets_xpath = "//div[@id='content-container']//a[text()='Photosets']"
set_title_xpath = "//article/header//h2/a"
url = SuicideGirls.driver.find_element_by_xpath(photos_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
url = SuicideGirls.driver.find_element_by_xpath(photosets_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
set_links = []
iteration = 0
while True:
iteration += 1
titles = SuicideGirls.driver.find_elements_by_xpath(set_title_xpath)
for title in titles:
set_links.append(title.get_attribute("href"))
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<9;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
set_links = list(set(set_links))
for link in set_links:
SuicideGirls.driver.get(link)
self.__rip_set()
self.girls_completed += 1
def __rip_set(self):
girl_xpath = "//h1/a"
title_xpath = "//header[@class='header']/div[@class='top-bar']/h2[@class='title']"
full_image_button_xpath = "//a[@id='button-view_full_size']"
full_image_url_xpath = "//div[@data-image_url]"
girl = SuicideGirls.driver.find_element_by_xpath(girl_xpath).text
title = SuicideGirls.driver.find_element_by_xpath(title_xpath).text
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
check = False
if os.path.exists(dir_name):
check = True
SuicideGirls.driver.find_element_by_xpath(full_image_button_xpath).click()
time.sleep(5)
images = SuicideGirls.driver.find_elements_by_xpath(full_image_url_xpath)
image_urls = []
for i in range(0, len(images)):
url = images[i].get_attribute("data-image_url")
ext = url[url.rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if not os.path.exists(os.path.join(dir_name, file_name)):
image_urls.append(url)
else:
print(girl.title() + "/" + title.title() + " Img" + str(i).zfill(3) + " already exists, skipping...")
self.__download_and_save_set(image_urls, girl, title)
self.sets_completed += 1
def __download_and_save_set(self, urls, girl, title):
aria_path = os.path.join(self.exec_dir, "dependencies", "aria2", "aria2c.exe")
error_strings = []
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
with multiprocessing.Pool(8) as pool:
args = []
for i in range (0, len(urls)):
command = [aria_path, "-d", dir_name, "-o"]
ext = urls[i][urls[i].rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if os.path.exists(dir_name + file_name):
continue
command.append(file_name)
command.append(urls[i])
args.append((error_strings, command, str(i + 1), urls[i], girl, title))
SuicideGirls.argument_lists.append(args)
if len(error_strings) > 0:
f = open(os.path.join(dir_name, "errors.txt", "w"))
f.write("\n".join(sorted(error_strings)))
f.close()
def __build_url(self, name):
if self.__type in ["girl", "girls", "sotds"]:
return "https://www.suicidegirls.com/girls/" + name
elif self.__type in ["hopeful", "hopefuls"]:
return "https://www.suicidegirls.com/members/" + name
def download_image(self, args):
process = subprocess.run(args[1])
if process.returncode != 0:
args[0].append("\tImage " + args[2] + " failed; URL: " + args[3])
print(args[4].title() + "/" + args[5].title() + " #" + args[2] + " complete")
def start_processes(async_result):
async_result.get()
def print_warning():
print("T
|
his file is meant to be imported by other Python files, not run directly. Exiting now.")
if __n
|
identifier_body
|
|
suicidegirls.py
|
print("Beginning dispatcher thread...")
while not SuicideGirls.stop_dispatching or len(SuicideGirls.argument_lists) != 0:
if len(SuicideGirls.argument_lists) != 0:
print("Argument list found! Dispatching...")
argument_list = SuicideGirls.argument_lists.pop(0)
pool = multiprocessing.Pool(self.process_limit)
pool.map(self.download_image, argument_list)
# Girls: Riae (36), Fishball (28), Vandoll (7)
# Total photosets: 71
# Processes: 8
# map: 00:24:37
# map_async: 00:12:33
print("Exiting dispatcher thread...")
def startup(self):
SuicideGirls.driver = webdriver.Chrome(executable_path="dependencies/chromedriver.exe")
SuicideGirls.driver.maximize_window()
SuicideGirls.driver.implicitly_wait(5)
SuicideGirls.driver.get(self.root_url)
self.__login()
def shutdown(self):
SuicideGirls.driver.quit()
def __login(self):
login_button_xpath = "//a[@class='login button' or @class='button login']"
login_form_submit_xpath = "//button[@type='submit' and text()='Login']"
username_box_xpath = "//input[@name='username']"
password_box_xpath = "//input[@name='password']"
SuicideGirls.driver.find_element_by_xpath(login_button_xpath).click()
SuicideGirls.driver.find_element_by_xpath(username_box_xpath).send_keys(self.username)
SuicideGirls.driver.find_element_by_xpath(password_box_xpath).send_keys(self.password)
SuicideGirls.driver.find_element_by_xpath(login_form_submit_xpath).click()
time.sleep(5)
flag = False;
while True:
try:
image_select = SuicideGirls.driver.find_element_by_xpath("//iframe[@title='recaptcha challenge']")
if not flag:
print("Found a captcha!")
flag = True
except:
break;
print("No captcha found!")
def rip(self):
for url in self.urls:
SuicideGirls.driver.get(url)
if self.__type == "girl":
print("Single girl")
self.__rip_girl()
elif self.__type == "girls":
print("All Suicide Girls")
self.__rip_all_girls()
elif self.__type == "hopefuls":
print("All hopefuls")
self.__rip_all_hopefuls()
elif self.__type == "sotds":
print("All sets of the day")
self.__rip_all_sets_of_the_day()
elif self.__type == "set":
print("Single set")
self.__rip_set()
elif self.__type == "all":
print("All!")
self.__rip_all_photos()
SuicideGirls.stop_dispatching = True
SuicideGirls.dispatcher_thread.join()
print("Rip completed.")
print("Total girls/hopefuls ripped: " + str(self.girls_completed))
print("Total sets ripped: " + str(self.sets_completed))
def __rip_all_photos(self):
SuicideGirls.driver.get(self.urls[0])
self.__type = "hopefuls"
self.__rip_all_hopefuls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "girls"
self.__rip_all_girls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "sotds"
self.__rip_all_sets_of_the_day()
def __rip_all_girls(self):
suicide_girls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'SuicideGirls']"
self.__rip_all(suicide_girls_xpath)
def __rip_all_hopefuls(self):
hopefuls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Hopefuls']"
self.__rip_all(hopefuls_xpath)
def __rip_all_sets_of_the_day(self):
sotds_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Sets Of The Day']"
self.__rip_all(sotds_xpath)
def
|
(self, type_xpath):
time_period_xpath = "//li[@class='dropdown'][3]//ul/li/a[text() = '" + self.time_period + "']"
girl_name_xpath = "//article/header//h2/a"
load_more_xpath = "//a[@id='load-more']"
choice = SuicideGirls.driver.find_element_by_xpath(type_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
choice = SuicideGirls.driver.find_element_by_xpath(time_period_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
girls = []
iteration = 0
while True:
iteration += 1
names = SuicideGirls.driver.find_elements_by_xpath(girl_name_xpath)
for name in names:
girls.append(name.text)
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<24;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
girls = list(set(girls))
for girl in sorted(girls):
url = self.__build_url(girl)
SuicideGirls.driver.get(url)
self.__rip_girl()
def __rip_girl(self):
load_more_xpath = "//a[@id='load-more']"
photos_xpath = "//div[@id='content-container']//a[text()='Photos']"
photosets_xpath = "//div[@id='content-container']//a[text()='Photosets']"
set_title_xpath = "//article/header//h2/a"
url = SuicideGirls.driver.find_element_by_xpath(photos_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
url = SuicideGirls.driver.find_element_by_xpath(photosets_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
set_links = []
iteration = 0
while True:
iteration += 1
titles = SuicideGirls.driver.find_elements_by_xpath(set_title_xpath)
for title in titles:
set_links.append(title.get_attribute("href"))
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<9;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
set_links = list(set(set_links))
for link in set_links:
SuicideGirls.driver.get(link)
self.__rip_set()
self.girls_completed += 1
def __rip_set(self):
girl_xpath = "//h1/a"
title_xpath = "//header[@class='header']/div[@class='top-bar']/h2[@class='title']"
full_image_button_xpath = "//a[@id='button-view_full_size']"
full_image_url_xpath = "//div[@data-image_url]"
girl = SuicideGirls.driver.find_element_by_xpath(girl_xpath).text
title = SuicideGirls.driver.find_element_by_xpath(title_xpath).text
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
check = False
if os.path.exists(dir_name):
check = True
SuicideGirls.driver.find_element_by_xpath(full_image_button_xpath).click()
time.sleep(5)
images = SuicideGirls.driver.find_elements_by_xpath(full_image_url_xpath)
image_urls = []
for i in range(0, len(images)):
url = images[i].get_attribute("data-image_url")
ext = url[url.rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if not os.path.exists(os.path.join(dir_name, file_name)):
image_urls.append(url)
else:
print(girl.title() + "/" + title.title() + "
|
__rip_all
|
identifier_name
|
suicidegirls.py
|
print("Beginning dispatcher thread...")
while not SuicideGirls.stop_dispatching or len(SuicideGirls.argument_lists) != 0:
if len(SuicideGirls.argument_lists) != 0:
print("Argument list found! Dispatching...")
argument_list = SuicideGirls.argument_lists.pop(0)
pool = multiprocessing.Pool(self.process_limit)
pool.map(self.download_image, argument_list)
# Girls: Riae (36), Fishball (28), Vandoll (7)
# Total photosets: 71
# Processes: 8
# map: 00:24:37
# map_async: 00:12:33
print("Exiting dispatcher thread...")
def startup(self):
SuicideGirls.driver = webdriver.Chrome(executable_path="dependencies/chromedriver.exe")
SuicideGirls.driver.maximize_window()
SuicideGirls.driver.implicitly_wait(5)
SuicideGirls.driver.get(self.root_url)
self.__login()
def shutdown(self):
SuicideGirls.driver.quit()
def __login(self):
login_button_xpath = "//a[@class='login button' or @class='button login']"
login_form_submit_xpath = "//button[@type='submit' and text()='Login']"
username_box_xpath = "//input[@name='username']"
password_box_xpath = "//input[@name='password']"
SuicideGirls.driver.find_element_by_xpath(login_button_xpath).click()
SuicideGirls.driver.find_element_by_xpath(username_box_xpath).send_keys(self.username)
SuicideGirls.driver.find_element_by_xpath(password_box_xpath).send_keys(self.password)
SuicideGirls.driver.find_element_by_xpath(login_form_submit_xpath).click()
time.sleep(5)
flag = False;
while True:
try:
image_select = SuicideGirls.driver.find_element_by_xpath("//iframe[@title='recaptcha challenge']")
if not flag:
print("Found a captcha!")
flag = True
except:
break;
print("No captcha found!")
def rip(self):
for url in self.urls:
SuicideGirls.driver.get(url)
if self.__type == "girl":
print("Single girl")
self.__rip_girl()
elif self.__type == "girls":
print("All Suicide Girls")
self.__rip_all_girls()
elif self.__type == "hopefuls":
print("All hopefuls")
self.__rip_all_hopefuls()
elif self.__type == "sotds":
print("All sets of the day")
self.__rip_all_sets_of_the_day()
elif self.__type == "set":
print("Single set")
self.__rip_set()
elif self.__type == "all":
print("All!")
self.__rip_all_photos()
SuicideGirls.stop_dispatching = True
SuicideGirls.dispatcher_thread.join()
print("Rip completed.")
print("Total girls/hopefuls ripped: " + str(self.girls_completed))
print("Total sets ripped: " + str(self.sets_completed))
def __rip_all_photos(self):
SuicideGirls.driver.get(self.urls[0])
self.__type = "hopefuls"
self.__rip_all_hopefuls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "girls"
self.__rip_all_girls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "sotds"
self.__rip_all_sets_of_the_day()
def __rip_all_girls(self):
suicide_girls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'SuicideGirls']"
self.__rip_all(suicide_girls_xpath)
def __rip_all_hopefuls(self):
hopefuls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Hopefuls']"
self.__rip_all(hopefuls_xpath)
def __rip_all_sets_of_the_day(self):
sotds_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Sets Of The Day']"
self.__rip_all(sotds_xpath)
def __rip_all(self, type_xpath):
time_period_xpath = "//li[@class='dropdown'][3]//ul/li/a[text() = '" + self.time_period + "']"
girl_name_xpath = "//article/header//h2/a"
load_more_xpath = "//a[@id='load-more']"
choice = SuicideGirls.driver.find_element_by_xpath(type_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
choice = SuicideGirls.driver.find_element_by_xpath(time_period_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
girls = []
iteration = 0
while True:
iteration += 1
names = SuicideGirls.driver.find_elements_by_xpath(girl_name_xpath)
for name in names:
girls.append(name.text)
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<24;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
girls = list(set(girls))
for girl in sorted(girls):
url = self.__build_url(girl)
SuicideGirls.driver.get(url)
self.__rip_girl()
def __rip_girl(self):
load_more_xpath = "//a[@id='load-more']"
photos_xpath = "//div[@id='content-container']//a[text()='Photos']"
photosets_xpath = "//div[@id='content-container']//a[text()='Photosets']"
set_title_xpath = "//article/header//h2/a"
url = SuicideGirls.driver.find_element_by_xpath(photos_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
url = SuicideGirls.driver.find_element_by_xpath(photosets_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
set_links = []
iteration = 0
while True:
iteration += 1
titles = SuicideGirls.driver.find_elements_by_xpath(set_title_xpath)
for title in titles:
set_links.append(title.get_attribute("href"))
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<9;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
|
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
set_links = list(set(set_links))
for link in set_links:
SuicideGirls.driver.get(link)
self.__rip_set()
self.girls_completed += 1
def __rip_set(self):
girl_xpath = "//h1/a"
title_xpath = "//header[@class='header']/div[@class='top-bar']/h2[@class='title']"
full_image_button_xpath = "//a[@id='button-view_full_size']"
full_image_url_xpath = "//div[@data-image_url]"
girl = SuicideGirls.driver.find_element_by_xpath(girl_xpath).text
title = SuicideGirls.driver.find_element_by_xpath(title_xpath).text
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
check = False
if os.path.exists(dir_name):
check = True
SuicideGirls.driver.find_element_by_xpath(full_image_button_xpath).click()
time.sleep(5)
images = SuicideGirls.driver.find_elements_by_xpath(full_image_url_xpath)
image_urls = []
for i in range(0, len(images)):
url = images[i].get_attribute("data-image_url")
ext = url[url.rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if not os.path.exists(os.path.join(dir_name, file_name)):
image_urls.append(url)
else:
print(girl.title() + "/" + title.title() + " Img
|
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
|
random_line_split
|
app.go
|
/" + slot + "/ads/" + ad.Id)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdWithId(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad != nil {
r.JSON(200, ad)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdAsset(r render.Render, res http.ResponseWriter, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
content_type := "application/octet-stream"
if ad.Type != "" {
content_type = ad.Type
}
res.Header().Set("Content-Type", content_type)
data, err := ioutil.ReadFile(assetFile(slot, id))
if err != nil {
panic(err)
}
range_str := req.Header.Get("Range")
if range_str == "" {
r.Data(200, []byte(data))
return
}
re := regexp.MustCompile("^bytes=(\\d*)-(\\d*)$")
m := re.FindAllStringSubmatch(range_str, -1)
if m == nil {
r.Status(416)
return
}
head_str := m[0][1]
tail_str := m[0][2]
if head_str == "" && tail_str == "" {
r.Status(416)
return
}
head := 0
tail := 0
if head_str != "" {
head, _ = strconv.Atoi(head_str)
}
if tail_str != "" {
tail, _ = strconv.Atoi(tail_str)
} else {
tail = len(data) - 1
}
if head < 0 || head >= len(data) || tail < 0 {
r.Status(416)
return
}
range_data := data[head:(tail + 1)]
content_range := fmt.Sprintf("bytes %d-%d/%d", head, tail, len(data))
res.Header().Set("Content-Range", content_range)
res.Header().Set("Content-Length", strconv.Itoa(len(range_data)))
r.Data(206, []byte(range_data))
}
func routeGetAdCount(r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
key := adKey(slot, id)
exists, _ := rd.Exists(key).Result()
if !exists {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
rd.HIncrBy(key, "impressions", 1).Result()
r.Status(204)
}
func routeGetAdRedirect(req *http.Request, r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
isuad := ""
cookie, err := req.Cookie("isuad")
if err != nil {
if err != http.ErrNoCookie {
panic(err)
}
} else {
isuad = cookie.Value
}
ua := req.Header.Get("User-Agent")
path := getLogPath(ad.Advertiser)
var f *os.File
f, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
if err != nil {
panic(err)
}
err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX)
if err != nil {
panic(err)
}
fmt.Fprintf(f, "%s\t%s\t%s\n", ad.Id, isuad, ua)
f.Close()
r.Redirect(ad.Destination)
}
func routeGetReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
report := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
report[ad["id"]] = data
}
for adId, clicks := range getLog(advrId) {
if _, exists := report[adId]; !exists {
report[adId] = &Report{}
}
report[adId].Clicks = len(clicks)
}
r.JSON(200, report)
}
func routeGetFinalReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
reports := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
reports[ad["id"]] = data
}
logs := getLog(advrId)
for adId, report := range reports {
log, exists := logs[adId]
if exists {
report.Clicks = len(log)
}
breakdown := &BreakdownReport{
map[string]int{},
map[string]int{},
map[string]int{},
}
for i := range log {
click := log[i]
incr_map(&breakdown.Gender, click.Gender)
incr_map(&breakdown.Agents, click.Agent)
generation := "unknown"
if click.Age != -1 {
generation = strconv.Itoa(click.Age / 10)
}
incr_map(&breakdown.Generations, generation)
}
report.Breakdown = breakdown
reports[adId] = report
}
r.JSON(200, reports)
}
func routePostInitialize() (int, string) {
keys, _ := rd.Keys("isu4:*").Result()
for i := range keys {
key := keys[i]
rd.Del(key)
}
path := getDir("log")
os.RemoveAll(path)
initAssetBaseDir()
if *isMaster {
for i, ip := range internalIP {
if i == 0 {
continue
}
_, err := http.Post("http://"+ip+"/initialize_slave", "", nil)
if err != nil {
panic(err)
}
}
}
return 200, "OK"
}
func routePostInitializeSlave() (int, string) {
initAssetBaseDir()
return 200, "OK"
}
var FSPathPrefix = "/fs"
var FSRoot = "/"
var FSDirPermission os.FileMode = 0777
// curl -XPOST --data-binary "hoge" -v http://127.0.0.1:8080/fs/foo
func routePostFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
dir := filepath.Dir(path)
log.Println(dir)
err := os.MkdirAll(dir, FSDirPermission)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
file, err := os.Create(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer file.Close()
written, err := io.Copy(file, r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Println(written)
return
}
// curl -XDELETE -v http://127.0.0.1:8080/fs/foo
func routeDeleteFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
info, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
http.Error(w, "", http.StatusNotFound)
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if info.IsDir() {
err = os.RemoveAll(path)
if err != nil
|
{
http.Error(w, err.Error(), http.StatusInternalServerError)
}
|
conditional_block
|
|
app.go
|
"})
}
}
func routeGetAdAsset(r render.Render, res http.ResponseWriter, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
content_type := "application/octet-stream"
if ad.Type != "" {
content_type = ad.Type
}
res.Header().Set("Content-Type", content_type)
data, err := ioutil.ReadFile(assetFile(slot, id))
if err != nil {
panic(err)
}
range_str := req.Header.Get("Range")
if range_str == "" {
r.Data(200, []byte(data))
return
}
re := regexp.MustCompile("^bytes=(\\d*)-(\\d*)$")
m := re.FindAllStringSubmatch(range_str, -1)
if m == nil {
r.Status(416)
return
}
head_str := m[0][1]
tail_str := m[0][2]
if head_str == "" && tail_str == "" {
r.Status(416)
return
}
head := 0
tail := 0
if head_str != "" {
head, _ = strconv.Atoi(head_str)
}
if tail_str != "" {
tail, _ = strconv.Atoi(tail_str)
} else {
tail = len(data) - 1
}
if head < 0 || head >= len(data) || tail < 0 {
r.Status(416)
return
}
range_data := data[head:(tail + 1)]
content_range := fmt.Sprintf("bytes %d-%d/%d", head, tail, len(data))
res.Header().Set("Content-Range", content_range)
res.Header().Set("Content-Length", strconv.Itoa(len(range_data)))
r.Data(206, []byte(range_data))
}
func routeGetAdCount(r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
key := adKey(slot, id)
exists, _ := rd.Exists(key).Result()
if !exists {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
rd.HIncrBy(key, "impressions", 1).Result()
r.Status(204)
}
func routeGetAdRedirect(req *http.Request, r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
isuad := ""
cookie, err := req.Cookie("isuad")
if err != nil {
if err != http.ErrNoCookie {
panic(err)
}
} else {
isuad = cookie.Value
}
ua := req.Header.Get("User-Agent")
path := getLogPath(ad.Advertiser)
var f *os.File
f, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
if err != nil {
panic(err)
}
err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX)
if err != nil {
panic(err)
}
fmt.Fprintf(f, "%s\t%s\t%s\n", ad.Id, isuad, ua)
f.Close()
r.Redirect(ad.Destination)
}
func routeGetReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
report := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
report[ad["id"]] = data
}
for adId, clicks := range getLog(advrId) {
if _, exists := report[adId]; !exists {
report[adId] = &Report{}
}
report[adId].Clicks = len(clicks)
}
r.JSON(200, report)
}
func routeGetFinalReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
reports := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
reports[ad["id"]] = data
}
logs := getLog(advrId)
for adId, report := range reports {
log, exists := logs[adId]
if exists {
report.Clicks = len(log)
}
breakdown := &BreakdownReport{
map[string]int{},
map[string]int{},
map[string]int{},
}
for i := range log {
click := log[i]
incr_map(&breakdown.Gender, click.Gender)
incr_map(&breakdown.Agents, click.Agent)
generation := "unknown"
if click.Age != -1 {
generation = strconv.Itoa(click.Age / 10)
}
incr_map(&breakdown.Generations, generation)
}
report.Breakdown = breakdown
reports[adId] = report
}
r.JSON(200, reports)
}
func routePostInitialize() (int, string) {
keys, _ := rd.Keys("isu4:*").Result()
for i := range keys {
key := keys[i]
rd.Del(key)
}
path := getDir("log")
os.RemoveAll(path)
initAssetBaseDir()
if *isMaster {
for i, ip := range internalIP {
if i == 0 {
continue
}
_, err := http.Post("http://"+ip+"/initialize_slave", "", nil)
if err != nil {
panic(err)
}
}
}
return 200, "OK"
}
func routePostInitializeSlave() (int, string) {
initAssetBaseDir()
return 200, "OK"
}
var FSPathPrefix = "/fs"
var FSRoot = "/"
var FSDirPermission os.FileMode = 0777
// curl -XPOST --data-binary "hoge" -v http://127.0.0.1:8080/fs/foo
func routePostFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
dir := filepath.Dir(path)
log.Println(dir)
err := os.MkdirAll(dir, FSDirPermission)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
file, err := os.Create(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer file.Close()
written, err := io.Copy(file, r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Println(written)
return
}
// curl -XDELETE -v http://127.0.0.1:8080/fs/foo
func routeDeleteFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
info, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
http.Error(w, "", http.StatusNotFound)
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if info.IsDir() {
err = os.RemoveAll(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
return
}
err = os.Remove(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
w.WriteHeader(http.StatusOK)
return
}
// curl -XDELETE -v http://127.0.0.1:8080/fs/foo
func routeGetFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params)
|
{
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
http.ServeFile(w, r, path)
return
}
|
identifier_body
|
|
app.go
|
["title"]; a != nil {
title = a[0]
}
destination := ""
if a := req.Form["destination"]; a != nil {
destination = a[0]
}
rd.HMSet(key,
"slot", slot,
"id", id,
"title", title,
"type", content_type,
"advertiser", advrId,
"destination", destination,
"impressions", "0",
)
f, _ := asset.Open()
defer f.Close()
buf := bytes.NewBuffer(nil)
io.Copy(buf, f)
fname := assetFile(slot, id)
err := os.MkdirAll(filepath.Dir(fname), os.ModePerm)
if err != nil {
panic(err)
}
err = ioutil.WriteFile(fname, buf.Bytes(), os.ModePerm)
if err != nil {
panic(err)
}
if *isMaster {
for i, ip := range internalIP {
if i == 0 {
continue
}
_, err := http.Post("http://"+ip+"/fs"+assetFile(slot, id), content_type, bytes.NewReader(buf.Bytes()))
if err != nil {
panic(err)
}
}
}
rd.RPush(slotKey(slot), id)
rd.SAdd(advertiserKey(advrId), key)
r.JSON(200, getAd(req, slot, id))
}
func routeGetAd(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
ad := nextAd(req, slot)
if ad != nil {
r.Redirect("/slots/" + slot + "/ads/" + ad.Id)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdWithId(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad != nil {
r.JSON(200, ad)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdAsset(r render.Render, res http.ResponseWriter, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
content_type := "application/octet-stream"
if ad.Type != "" {
content_type = ad.Type
}
res.Header().Set("Content-Type", content_type)
data, err := ioutil.ReadFile(assetFile(slot, id))
if err != nil {
panic(err)
}
range_str := req.Header.Get("Range")
if range_str == "" {
r.Data(200, []byte(data))
return
}
re := regexp.MustCompile("^bytes=(\\d*)-(\\d*)$")
m := re.FindAllStringSubmatch(range_str, -1)
if m == nil {
r.Status(416)
return
}
head_str := m[0][1]
tail_str := m[0][2]
if head_str == "" && tail_str == "" {
r.Status(416)
return
}
head := 0
tail := 0
if head_str != "" {
head, _ = strconv.Atoi(head_str)
}
if tail_str != "" {
tail, _ = strconv.Atoi(tail_str)
} else {
tail = len(data) - 1
}
if head < 0 || head >= len(data) || tail < 0 {
r.Status(416)
return
}
range_data := data[head:(tail + 1)]
content_range := fmt.Sprintf("bytes %d-%d/%d", head, tail, len(data))
res.Header().Set("Content-Range", content_range)
res.Header().Set("Content-Length", strconv.Itoa(len(range_data)))
r.Data(206, []byte(range_data))
}
func routeGetAdCount(r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
key := adKey(slot, id)
exists, _ := rd.Exists(key).Result()
if !exists {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
rd.HIncrBy(key, "impressions", 1).Result()
r.Status(204)
}
func routeGetAdRedirect(req *http.Request, r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
isuad := ""
cookie, err := req.Cookie("isuad")
if err != nil {
if err != http.ErrNoCookie {
panic(err)
}
} else {
isuad = cookie.Value
}
ua := req.Header.Get("User-Agent")
path := getLogPath(ad.Advertiser)
var f *os.File
f, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
if err != nil {
panic(err)
}
err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX)
if err != nil {
panic(err)
}
fmt.Fprintf(f, "%s\t%s\t%s\n", ad.Id, isuad, ua)
f.Close()
r.Redirect(ad.Destination)
}
func routeGetReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
report := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
report[ad["id"]] = data
}
for adId, clicks := range getLog(advrId) {
if _, exists := report[adId]; !exists {
report[adId] = &Report{}
}
report[adId].Clicks = len(clicks)
}
r.JSON(200, report)
}
func routeGetFinalReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
reports := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
reports[ad["id"]] = data
}
logs := getLog(advrId)
for adId, report := range reports {
log, exists := logs[adId]
if exists {
report.Clicks = len(log)
}
breakdown := &BreakdownReport{
map[string]int{},
map[string]int{},
map[string]int{},
}
for i := range log {
click := log[i]
incr_map(&breakdown.Gender, click.Gender)
incr_map(&breakdown.Agents, click.Agent)
generation := "unknown"
if click.Age != -1 {
generation = strconv.Itoa(click.Age / 10)
}
incr_map(&breakdown.Generations, generation)
}
report.Breakdown = breakdown
reports[adId] = report
}
r.JSON(200, reports)
}
func routePostInitialize() (int, string) {
keys, _ := rd.Keys("isu4:*").Result()
for i := range keys {
key := keys[i]
rd.Del(key)
}
path := getDir("log")
os.RemoveAll(path)
initAssetBaseDir()
if *isMaster {
for i, ip := range internalIP {
if i == 0 {
continue
}
_, err := http.Post("http://"+ip+"/initialize_slave", "", nil)
if err != nil {
panic(err)
}
}
}
return 200, "OK"
}
func routePostInitializeSlave() (int, string) {
initAssetBaseDir()
return 200, "OK"
}
|
var FSPathPrefix = "/fs"
var FSRoot = "/"
var FSDirPermission os.FileMode = 0777
|
random_line_split
|
|
app.go
|
(path string, id string) string {
i, _ := strconv.Atoi(id)
host := internalIP[i%3]
if host != "" {
return "http://" + host + path
} else {
return path
}
}
func fetch(hash map[string]string, key string, defaultValue string) string {
if hash[key] == "" {
return defaultValue
} else {
return hash[key]
}
}
func incr_map(dict *map[string]int, key string) {
_, exists := (*dict)[key]
if !exists {
(*dict)[key] = 0
}
(*dict)[key]++
}
func advertiserId(req *http.Request) string {
return req.Header.Get("X-Advertiser-Id")
}
func adKey(slot string, id string) string {
return "isu4:ad:" + slot + "-" + id
}
func assetKey(slot string, id string) string {
return "isu4:asset:" + slot + "-" + id
}
const assetBaseDir = "/var/tmp/isu4"
func initAssetBaseDir() {
err := os.RemoveAll(assetBaseDir)
if err != nil {
panic(err)
}
err = os.MkdirAll(assetBaseDir, os.ModePerm)
if err != nil {
panic(err)
}
}
func assetFile(slot string, id string) string {
return assetBaseDir + "/slots/" + slot + "/ads/" + id + "/asset"
}
func advertiserKey(id string) string {
return "isu4:advertiser:" + id
}
func slotKey(slot string) string {
return "isu4:slot:" + slot
}
func nextAdId() string {
id, _ := rd.Incr("isu4:ad-next").Result()
return strconv.FormatInt(id, 10)
}
func nextAd(req *http.Request, slot string) *AdWithEndpoints {
key := slotKey(slot)
id, _ := rd.RPopLPush(key, key).Result()
if id == "" {
return nil
}
ad := getAd(req, slot, id)
if ad != nil {
return ad
} else {
rd.LRem(key, 0, id).Result()
return nextAd(req, slot)
}
}
func getAd(req *http.Request, slot string, id string) *AdWithEndpoints {
key := adKey(slot, id)
m, _ := rd.HGetAllMap(key).Result()
if m == nil {
return nil
}
if _, exists := m["id"]; !exists {
return nil
}
imp, _ := strconv.Atoi(m["impressions"])
path_base := "/slots/" + slot + "/ads/" + id
var ad *AdWithEndpoints
ad = &AdWithEndpoints{
Ad{
m["slot"],
m["id"],
m["title"],
m["type"],
m["advertiser"],
m["destination"],
imp,
},
urlFor2(path_base+"/asset", id),
urlFor(req, path_base+"/redirect"),
urlFor(req, path_base+"/count"),
}
return ad
}
func decodeUserKey(id string) (string, int) {
if id == "" {
return "unknown", -1
}
splitted := strings.Split(id, "/")
gender := "male"
if splitted[0] == "0" {
gender = "female"
}
age, _ := strconv.Atoi(splitted[1])
return gender, age
}
func getLogPath(advrId string) string {
dir := getDir("log")
splitted := strings.Split(advrId, "/")
return dir + "/" + splitted[len(splitted)-1]
}
func getLog(id string) map[string][]ClickLog {
path := getLogPath(id)
result := map[string][]ClickLog{}
if _, err := os.Stat(path); os.IsNotExist(err) {
return result
}
f, err := os.Open(path)
if err != nil {
panic(err)
}
defer f.Close()
err = syscall.Flock(int(f.Fd()), syscall.LOCK_SH)
if err != nil {
panic(err)
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
line = strings.TrimRight(line, "\n")
sp := strings.Split(line, "\t")
ad_id := sp[0]
user := sp[1]
agent := sp[2]
if agent == "" {
agent = "unknown"
}
gender, age := decodeUserKey(sp[1])
if result[ad_id] == nil {
result[ad_id] = []ClickLog{}
}
data := ClickLog{ad_id, user, agent, gender, age}
result[ad_id] = append(result[ad_id], data)
}
return result
}
func routePostAd(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
advrId := advertiserId(req)
if advrId == "" {
r.Status(404)
return
}
req.ParseMultipartForm(100000)
asset := req.MultipartForm.File["asset"][0]
id := nextAdId()
key := adKey(slot, id)
content_type := ""
if len(req.Form["type"]) > 0 {
content_type = req.Form["type"][0]
}
if content_type == "" && len(asset.Header["Content-Type"]) > 0 {
content_type = asset.Header["Content-Type"][0]
}
if content_type == "" {
content_type = "video/mp4"
}
title := ""
if a := req.Form["title"]; a != nil {
title = a[0]
}
destination := ""
if a := req.Form["destination"]; a != nil {
destination = a[0]
}
rd.HMSet(key,
"slot", slot,
"id", id,
"title", title,
"type", content_type,
"advertiser", advrId,
"destination", destination,
"impressions", "0",
)
f, _ := asset.Open()
defer f.Close()
buf := bytes.NewBuffer(nil)
io.Copy(buf, f)
fname := assetFile(slot, id)
err := os.MkdirAll(filepath.Dir(fname), os.ModePerm)
if err != nil {
panic(err)
}
err = ioutil.WriteFile(fname, buf.Bytes(), os.ModePerm)
if err != nil {
panic(err)
}
if *isMaster {
for i, ip := range internalIP {
if i == 0 {
continue
}
_, err := http.Post("http://"+ip+"/fs"+assetFile(slot, id), content_type, bytes.NewReader(buf.Bytes()))
if err != nil {
panic(err)
}
}
}
rd.RPush(slotKey(slot), id)
rd.SAdd(advertiserKey(advrId), key)
r.JSON(200, getAd(req, slot, id))
}
func routeGetAd(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
ad := nextAd(req, slot)
if ad != nil {
r.Redirect("/slots/" + slot + "/ads/" + ad.Id)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdWithId(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad != nil {
r.JSON(200, ad)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdAsset(r render.Render, res http.ResponseWriter, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
content_type := "application/octet-stream"
if ad.Type != "" {
content_type = ad.Type
}
res.Header().Set("Content-Type", content_type)
data, err := ioutil.ReadFile(assetFile(slot, id))
if err != nil {
panic(err)
}
range_str := req.Header.Get("Range")
if range_str == "" {
r.Data(200, []byte(data))
return
}
re := regexp.MustCompile("^bytes=(\\d*)-(\\d*)$")
m := re.FindAllStringSubmatch(range_str, -1)
if m == nil {
r.Status(416)
return
}
head_str := m[0][1]
tail_str := m[0][2]
if head_str == "" && tail_str == "" {
r.Status(416)
return
}
head := 0
tail := 0
if head_str != "" {
head, _ = strconv.Atoi(head_str)
}
if tail_str != "" {
tail, _ = strconv.Atoi(tail_str)
} else {
tail = len(data) - 1
}
if head < 0 || head >= len(data) || tail < 0 {
r.Status(416)
return
}
range_data := data[head:(tail + 1)]
content_range := fmt.Sprintf("bytes %
|
urlFor2
|
identifier_name
|
|
tk.py
|
(self):
# Setup number pad screen
self.number_pad = Toplevel(root)
self.keypad_entery = Entry(self.number_pad,width=5,font=("Helvetica", 55))
self.keypad_entery.grid(row=0, column=0, columnspan=3, ipady=5)
self.number_pad.attributes('-fullscreen',True)
# Variables of keys to loop though
self.keys = [
['1', '2', '3'],
['4', '5', '6'],
['7', '8', '9'],
['Clear', '0', 'Exit'], ]
# Loop threw the keys and create the button with lambda command
for self.y, self.row in enumerate(self.keys, 1):
for self.x, self.key in enumerate(self.row):
self.b = Button(self.number_pad, text=self.key, command=lambda val=self.key:__numb_enter(val))
self.b.grid(row=self.y, column=self.x, ipadx=108, ipady=30)
self.exit = Button(
self.number_pad,
text="Exit",
command=self.number_pad.destroy).grid(
row=self.y, column=self.x, ipadx=100, ipady=30)
# Set the exit button at the end of the loop
def __numb_enter(arg):
# All globals required for updating the timer daily_timer_input_value
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
global timer_error_string
self.pin = ''
# Set the pin var to empty
if arg == 'Clear':
# remove last number from `pin`
self.pin = self.pin[:-1]
self.keypad_entery.delete('0', 'end')
self.keypad_entery.insert('end', self.pin)
elif arg == 'Exit':
self.number_pad.destroy
# Exit the keypad window
else:
# add number to pin
self.pin += arg
# add number to `entry`
self.keypad_entery.insert('end', arg)
self.pad_val = self.keypad_entery.get()
daily_timer_input_value.set(self.pad_val)
timer_input_value.set(self.pad_val)
# Set calculate the minuets and seconds for the label
minute, sec = divmod(int(self.pad_val), 60)
hours, minute = divmod(minute, 60)
# Set the label to update the current seconds/minutes
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
class Timers(object):
def __init__(self):
global timer_set_run_text
global daily_timer_input_value
global timer_status
global timer_error_string
global keyboard_img
self.timer_set_page = Toplevel(root)
# Setup the window for the timer selections
# Strings for all of the buttons
self.timer_run_text = Label(
self.timer_set_page,
text="Please choose a time of day to run the pump.",
font=('Helvetica', 20)).grid(row= 1,columnspan=8)
self.hours_in_day = [["1AM", "2AM", "3AM", "4AM", "5AM", "6AM", "7AM","8AM"],
["9AM", "10AM", "11AM", "12PM", "1PM", "2PM","3PM","4PM"],
["5PM", "6PM", "7PM", "8PM", "9PM", "10PM","11PM","12AM"]]
self.timer_entery = Entry(
self.timer_set_page,
textvariable=daily_timer_input_value,
width=23).grid(row=9, columnspan=3, column=0)
# Entery box for run time
daily_timer_input_value.set("") # Set the eatery to blank
self.keyboard_button = Button(self.timer_set_page,command=NumPad) # Button Image to open number pad
self.keyboard_img = ImageTk.PhotoImage(Image.open(keyboard_image)) #
self.keyboard_button.config(image=self.keyboard_img) #
self.keyboard_button.image = self.keyboard_img # Keep an instance of the image so
self.keyboard_button.grid(row=9, sticky=E, columnspan=2, column=1) # that it doesnt get garbage collected
self.exit = Button(
self.timer_set_page,
text="Exit",
command=self.timer_set_page.destroy).grid(row=9, columnspan=4,column=6, ipadx=50, ipady=15)
# Exit button back to main screen
self.set_timer = Button(
self.timer_set_page,
text="Set Timer",
command=self.__set_timer_cron,
bg="green").grid(row=9, columnspan=4, column=3, ipadx=50, ipady=15)
# Set the timer outputs the data to CVS
self.timer_run_text = Label(
self.timer_set_page,
textvariable=timer_set_run_text,
font=('Helvetica', 14)).grid(row=10, columnspan=8)
# Set the text variable for timer run label
Timers.timer_run_failed = Label(
self.timer_set_page,
textvariable=timer_status,
font=('Helvetica', 14), foreground='red')
Timers.timer_run_failed.grid(row=11, columnspan=8)
# Set the text variable for a failed CSV
timer_status.set("")
Timers.err_label = Label(
self.timer_set_page,
textvariable=timer_error_string,
font=('Helvetica', 14), foreground='red')
Timers.err_label.grid(row=12, columnspan=8)
# Set the text variable for a failed CSV
timer_error_string.set("")
self.timer_length_text = Label(
self.timer_set_page,
text="Please choose how long to run the timer for in seconds.",
font=('Helvetica', 20)).grid(row=7, columnspan=8)
self.z = 0
# Loop threw the hours in the day z will provide the hour of the day to return in lambda to timer_return function
# which manipulates the string and outputs to the label
for self.y, self.row in enumerate(self.hours_in_day, 1):
for self.x, self.key in enumerate(self.row):
self.z += 1
if self.z == 24:
self.z = 0
self.b = Button(self.timer_set_page, text=self.key, command=lambda val=self.z:self.__timer_return(val))
self.b.grid(row=self.y + 1, column=self.x, ipadx=20, ipady=10)
self.timer_set_page.attributes('-fullscreen', True)
# Strings for all recurrence rate
self.recurrence = ["1 Day", "2 Day", "3 Day", "4 Day", "5 Day", "6 Day","7 Day"]
self.timer_reoc_text = Label(
self.timer_set_page, text="Please choose how often you would like to run the timer.",
font=('Helvetica', 20)).grid(row=5, columnspan=8)
self.r = 0
self.col = 0
# Loop threw the recurrence options r will provide the amount
# of days between running and return in lambda to recurrence_return function
# which manipulates the string and outputs to the label
for self.d in self.recurrence:
self.r += 1
self.c = Button(self.timer_set_page, text=self.d, command=lambda val=self.r:self.__recurrence_return(val))
self.c.grid(row=6, column=self.col, ipadx=12, ipady=12)
self.col += 1
def __recurrence_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the recurrence rate, and set the new label string
timer_recurrence_string = str(arg)
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __timer_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the time of day, and set the new label string
self.pump_run_time = str(arg)
timer_time_string = str(str(arg) + ":00")
if len(timer_time_string) <= 4:
timer_time_string = "0" + timer_time_string
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __set_timer_cron(self):
global timer_status
global timer_status
|
__init__
|
identifier_name
|
|
tk.py
|
timer_recurrence_string = str(arg)
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __timer_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the time of day, and set the new label string
self.pump_run_time = str(arg)
timer_time_string = str(str(arg) + ":00")
if len(timer_time_string) <= 4:
timer_time_string = "0" + timer_time_string
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __set_timer_cron(self):
global timer_status
global timer_status_1
# Remove all existing timer cron jobs.
try:
run_time = self.pump_run_time
repeat = int(timer_recurrence_string)
run_length = int(daily_timer_input_value.get())
cron = CronTab(user=True)
cron.remove_all(comment='water_timer')
cron.write()
# Insert new cron job timer.
cron = CronTab(user=True)
job = cron.new(
command='sudo python3 /home/pi/wateringsys/crontimer.py {}'.format(run_length),
comment='water_timer')
if repeat == 1:
job.hour.on(run_time)
job.minute.on(0)
if repeat >= 2:
job.setall(0, run_time, '*/{}'.format(repeat), None, None)
cron.write()
daily_timer_input_value.set("")
timer_input_value.set("")
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length")
def __set_timer_csv(self):
global timer_status
global timer_status_1
try:
run_time = self.pump_run_time
repeat = str(timer_recurrence_string)
run_length = str(daily_timer_input_value.get())
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
# Set both enterys back to empty
daily_timer_input_value.set("")
timer_input_value.set("")
call(["sudo", "systemctl", "restart", "pumptimer.service"])
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length")
def timer(): # Simple timer class,
try: # If any errors usually due to no input pass
run_time = timer_input_value.get()
root_status_string.set(str("Pump Running"))
timer_input_value.set("")
if GPIO.input(23) == 1:
GPIO.output(24, 1)
for i in range(1, run_time + 1, +1):
m, s = divmod(i, 60)
h, m = divmod(m, 60)
root_status_string.set(str("{} Minutes {} Seconds".format(m, s)))
root.update()
time.sleep(1)
GPIO.output(24, 0)
root_status_string.set(str("The pump run has finished"))
except:
GPIO.output(24, 0) # Turn the pump off.
print("failed")
pass
manual_timer = 0
def man_start(force=True):
global running
global manual_timer
try:
if force:
running = True
if running:
if GPIO.input(23) == 1:
root_status_string.set(str("Pump Running"))
GPIO.output(24, 1)
manual_timer += 1
m, s = divmod(manual_timer, 60)
h, m = divmod(m, 60)
root_status_string.set(str("{} Minutes {} Seconds".format(m, s)))
root.update()
root.after(1000, man_start, False)
if GPIO.input(23) == 0:
root_status_string.set(str("The pump will not run when the water level is low."))
except:
GPIO.output(24, 0) # Stop the pump.
def man_stop():
global running
global manual_timer
GPIO.output(24, 0)
running = False
manual_timer = 0
root_status_string.set(str("The Pump has been manually stopped."))
def img_updater(): # Auto image updater for home screen.
# Open image
try:
global counter
timer_set_time, time_until_run = csv_read()
if GPIO.input(23) == 0:
water_level_label.config(fg="Red")
water_level.set(str("The water level is LOW."))
if GPIO.input(23) == 1:
water_level_label.config(fg="Green")
water_level.set(str("The water level is OK."))
# Every 10 seconds change the timer_status_1 string which is the label on the front page.
counter += 1
if counter >= 1:
timer_status_1.set(str(timer_set_time))
plant_stat_img = ImageTk.PhotoImage(Image.open(plot_img))
plant_stat_panel.config(image=plant_stat_img)
plant_stat_panel.image = plant_stat_img
if counter >= 11:
timer_status_1.set(str(time_until_run))
speed_img = ImageTk.PhotoImage(Image.open(speed_image)) # /home/pi/html/
plant_stat_panel.config(image=speed_img)
plant_stat_panel.image = speed_img
if counter >= 21:
counter = 0
# Re load page every 10 seconds
root.after(1000, img_updater)
except:
timer_status_1.set(str('Please enter a timer, there is currently no timer set.'))
root.after(1000, img_updater)
pass
def back_light():
# Start the perl script which turns off the screen back light when the screensaver is active.
# The perl script calls back light.py which turns the back light on and off.
proc = Popen(
[screen_off], shell=True,
stdin=None, stdout=None, stderr=None, close_fds=True)
def csv_read():
# Consider changing the times of day to a dict to use AM PM times inline with the loop.
try:
with open(timer_data) as csvfile:
read_csv = csv.reader(csvfile, delimiter=',')
for row in read_csv:
days = int(row[1])
runtime = int(row[2])
time_of_day = int(row[0])
csvfile.close()
# Due to using plain numbers in the number pad loop must convert it to something people can read.
# Following is to read the set timer and make a label out of it.
if int(int(row[0])) <= 9:
run_hour = "0{}:00".format(str(int(row[0])))
if int(int(row[0])) >= 10:
run_hour = "{}:00".format(str(int(row[0])))
days = int(row[1])
m, s = divmod(int(row[2]), 60)
h, m = divmod(m, 60)
run_time = (str("{} Minutes and {} Seconds".format(m, s)))
current_runtime = "The timer is set to run for {} every {} day(s) at {}".format(run_time, days, run_hour)
# the following is to read the set timer and print out how much time is left on the timer.
now = datetime.now()
seconds_since_last_run = (now - now.replace(hour=time_of_day, minute=0, second=0, microsecond=0)).total_seconds()
if days == 1:
|
total_seconds = (days - 1) * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
if countdown <= 1:
total_seconds = days * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
|
conditional_block
|
|
tk.py
|
# add number to `entry`
self.keypad_entery.insert('end', arg)
self.pad_val = self.keypad_entery.get()
daily_timer_input_value.set(self.pad_val)
timer_input_value.set(self.pad_val)
# Set calculate the minuets and seconds for the label
minute, sec = divmod(int(self.pad_val), 60)
hours, minute = divmod(minute, 60)
# Set the label to update the current seconds/minutes
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
class Timers(object):
def __init__(self):
global timer_set_run_text
global daily_timer_input_value
global timer_status
global timer_error_string
global keyboard_img
self.timer_set_page = Toplevel(root)
# Setup the window for the timer selections
# Strings for all of the buttons
self.timer_run_text = Label(
self.timer_set_page,
text="Please choose a time of day to run the pump.",
font=('Helvetica', 20)).grid(row= 1,columnspan=8)
self.hours_in_day = [["1AM", "2AM", "3AM", "4AM", "5AM", "6AM", "7AM","8AM"],
["9AM", "10AM", "11AM", "12PM", "1PM", "2PM","3PM","4PM"],
["5PM", "6PM", "7PM", "8PM", "9PM", "10PM","11PM","12AM"]]
self.timer_entery = Entry(
self.timer_set_page,
textvariable=daily_timer_input_value,
width=23).grid(row=9, columnspan=3, column=0)
# Entery box for run time
daily_timer_input_value.set("") # Set the eatery to blank
self.keyboard_button = Button(self.timer_set_page,command=NumPad) # Button Image to open number pad
self.keyboard_img = ImageTk.PhotoImage(Image.open(keyboard_image)) #
self.keyboard_button.config(image=self.keyboard_img) #
self.keyboard_button.image = self.keyboard_img # Keep an instance of the image so
self.keyboard_button.grid(row=9, sticky=E, columnspan=2, column=1) # that it doesnt get garbage collected
self.exit = Button(
self.timer_set_page,
text="Exit",
command=self.timer_set_page.destroy).grid(row=9, columnspan=4,column=6, ipadx=50, ipady=15)
# Exit button back to main screen
self.set_timer = Button(
self.timer_set_page,
text="Set Timer",
command=self.__set_timer_cron,
bg="green").grid(row=9, columnspan=4, column=3, ipadx=50, ipady=15)
# Set the timer outputs the data to CVS
self.timer_run_text = Label(
self.timer_set_page,
textvariable=timer_set_run_text,
font=('Helvetica', 14)).grid(row=10, columnspan=8)
# Set the text variable for timer run label
Timers.timer_run_failed = Label(
self.timer_set_page,
textvariable=timer_status,
font=('Helvetica', 14), foreground='red')
Timers.timer_run_failed.grid(row=11, columnspan=8)
# Set the text variable for a failed CSV
timer_status.set("")
Timers.err_label = Label(
self.timer_set_page,
textvariable=timer_error_string,
font=('Helvetica', 14), foreground='red')
Timers.err_label.grid(row=12, columnspan=8)
# Set the text variable for a failed CSV
timer_error_string.set("")
self.timer_length_text = Label(
self.timer_set_page,
text="Please choose how long to run the timer for in seconds.",
font=('Helvetica', 20)).grid(row=7, columnspan=8)
self.z = 0
# Loop threw the hours in the day z will provide the hour of the day to return in lambda to timer_return function
# which manipulates the string and outputs to the label
for self.y, self.row in enumerate(self.hours_in_day, 1):
for self.x, self.key in enumerate(self.row):
self.z += 1
if self.z == 24:
self.z = 0
|
self.timer_set_page.attributes('-fullscreen', True)
# Strings for all recurrence rate
self.recurrence = ["1 Day", "2 Day", "3 Day", "4 Day", "5 Day", "6 Day","7 Day"]
self.timer_reoc_text = Label(
self.timer_set_page, text="Please choose how often you would like to run the timer.",
font=('Helvetica', 20)).grid(row=5, columnspan=8)
self.r = 0
self.col = 0
# Loop threw the recurrence options r will provide the amount
# of days between running and return in lambda to recurrence_return function
# which manipulates the string and outputs to the label
for self.d in self.recurrence:
self.r += 1
self.c = Button(self.timer_set_page, text=self.d, command=lambda val=self.r:self.__recurrence_return(val))
self.c.grid(row=6, column=self.col, ipadx=12, ipady=12)
self.col += 1
def __recurrence_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the recurrence rate, and set the new label string
timer_recurrence_string = str(arg)
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __timer_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the time of day, and set the new label string
self.pump_run_time = str(arg)
timer_time_string = str(str(arg) + ":00")
if len(timer_time_string) <= 4:
timer_time_string = "0" + timer_time_string
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __set_timer_cron(self):
global timer_status
global timer_status_1
# Remove all existing timer cron jobs.
try:
run_time = self.pump_run_time
repeat = int(timer_recurrence_string)
run_length = int(daily_timer_input_value.get())
cron = CronTab(user=True)
cron.remove_all(comment='water_timer')
cron.write()
# Insert new cron job timer.
cron = CronTab(user=True)
job = cron.new(
command='sudo python3 /home/pi/wateringsys/crontimer.py {}'.format(run_length),
comment='water_timer')
if repeat == 1:
job.hour.on(run_time)
job.minute.on(0)
if repeat >= 2:
job.setall(0, run_time, '*/{}'.format(repeat), None, None)
cron.write()
daily_timer_input_value.set("")
timer_input_value.set("")
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length")
def __set_timer_csv(self):
global timer_status
global timer_status_1
try:
run_time = self.pump_run_time
repeat = str(timer_recurrence_string)
run_length = str(daily_timer_input_value.get())
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
# Set both enterys back to empty
daily_timer_input_value
|
self.b = Button(self.timer_set_page, text=self.key, command=lambda val=self.z:self.__timer_return(val))
self.b.grid(row=self.y + 1, column=self.x, ipadx=20, ipady=10)
|
random_line_split
|
tk.py
|
# add number to `entry`
self.keypad_entery.insert('end', arg)
self.pad_val = self.keypad_entery.get()
daily_timer_input_value.set(self.pad_val)
timer_input_value.set(self.pad_val)
# Set calculate the minuets and seconds for the label
minute, sec = divmod(int(self.pad_val), 60)
hours, minute = divmod(minute, 60)
# Set the label to update the current seconds/minutes
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
class Timers(object):
|
self.timer_set_page,
textvariable=daily_timer_input_value,
width=23).grid(row=9, columnspan=3, column=0)
# Entery box for run time
daily_timer_input_value.set("") # Set the eatery to blank
self.keyboard_button = Button(self.timer_set_page,command=NumPad) # Button Image to open number pad
self.keyboard_img = ImageTk.PhotoImage(Image.open(keyboard_image)) #
self.keyboard_button.config(image=self.keyboard_img) #
self.keyboard_button.image = self.keyboard_img # Keep an instance of the image so
self.keyboard_button.grid(row=9, sticky=E, columnspan=2, column=1) # that it doesnt get garbage collected
self.exit = Button(
self.timer_set_page,
text="Exit",
command=self.timer_set_page.destroy).grid(row=9, columnspan=4,column=6, ipadx=50, ipady=15)
# Exit button back to main screen
self.set_timer = Button(
self.timer_set_page,
text="Set Timer",
command=self.__set_timer_cron,
bg="green").grid(row=9, columnspan=4, column=3, ipadx=50, ipady=15)
# Set the timer outputs the data to CVS
self.timer_run_text = Label(
self.timer_set_page,
textvariable=timer_set_run_text,
font=('Helvetica', 14)).grid(row=10, columnspan=8)
# Set the text variable for timer run label
Timers.timer_run_failed = Label(
self.timer_set_page,
textvariable=timer_status,
font=('Helvetica', 14), foreground='red')
Timers.timer_run_failed.grid(row=11, columnspan=8)
# Set the text variable for a failed CSV
timer_status.set("")
Timers.err_label = Label(
self.timer_set_page,
textvariable=timer_error_string,
font=('Helvetica', 14), foreground='red')
Timers.err_label.grid(row=12, columnspan=8)
# Set the text variable for a failed CSV
timer_error_string.set("")
self.timer_length_text = Label(
self.timer_set_page,
text="Please choose how long to run the timer for in seconds.",
font=('Helvetica', 20)).grid(row=7, columnspan=8)
self.z = 0
# Loop threw the hours in the day z will provide the hour of the day to return in lambda to timer_return function
# which manipulates the string and outputs to the label
for self.y, self.row in enumerate(self.hours_in_day, 1):
for self.x, self.key in enumerate(self.row):
self.z += 1
if self.z == 24:
self.z = 0
self.b = Button(self.timer_set_page, text=self.key, command=lambda val=self.z:self.__timer_return(val))
self.b.grid(row=self.y + 1, column=self.x, ipadx=20, ipady=10)
self.timer_set_page.attributes('-fullscreen', True)
# Strings for all recurrence rate
self.recurrence = ["1 Day", "2 Day", "3 Day", "4 Day", "5 Day", "6 Day","7 Day"]
self.timer_reoc_text = Label(
self.timer_set_page, text="Please choose how often you would like to run the timer.",
font=('Helvetica', 20)).grid(row=5, columnspan=8)
self.r = 0
self.col = 0
# Loop threw the recurrence options r will provide the amount
# of days between running and return in lambda to recurrence_return function
# which manipulates the string and outputs to the label
for self.d in self.recurrence:
self.r += 1
self.c = Button(self.timer_set_page, text=self.d, command=lambda val=self.r:self.__recurrence_return(val))
self.c.grid(row=6, column=self.col, ipadx=12, ipady=12)
self.col += 1
def __recurrence_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the recurrence rate, and set the new label string
timer_recurrence_string = str(arg)
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __timer_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the time of day, and set the new label string
self.pump_run_time = str(arg)
timer_time_string = str(str(arg) + ":00")
if len(timer_time_string) <= 4:
timer_time_string = "0" + timer_time_string
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __set_timer_cron(self):
global timer_status
global timer_status_1
# Remove all existing timer cron jobs.
try:
run_time = self.pump_run_time
repeat = int(timer_recurrence_string)
run_length = int(daily_timer_input_value.get())
cron = CronTab(user=True)
cron.remove_all(comment='water_timer')
cron.write()
# Insert new cron job timer.
cron = CronTab(user=True)
job = cron.new(
command='sudo python3 /home/pi/wateringsys/crontimer.py {}'.format(run_length),
comment='water_timer')
if repeat == 1:
job.hour.on(run_time)
job.minute.on(0)
if repeat >= 2:
job.setall(0, run_time, '*/{}'.format(repeat), None, None)
cron.write()
daily_timer_input_value.set("")
timer_input_value.set("")
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length")
def __set_timer_csv(self):
global timer_status
global timer_status_1
try:
run_time = self.pump_run_time
repeat = str(timer_recurrence_string)
run_length = str(daily_timer_input_value.get())
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
# Set both enterys back to empty
daily_timer_input_value
|
def __init__(self):
global timer_set_run_text
global daily_timer_input_value
global timer_status
global timer_error_string
global keyboard_img
self.timer_set_page = Toplevel(root)
# Setup the window for the timer selections
# Strings for all of the buttons
self.timer_run_text = Label(
self.timer_set_page,
text="Please choose a time of day to run the pump.",
font=('Helvetica', 20)).grid(row= 1,columnspan=8)
self.hours_in_day = [["1AM", "2AM", "3AM", "4AM", "5AM", "6AM", "7AM","8AM"],
["9AM", "10AM", "11AM", "12PM", "1PM", "2PM","3PM","4PM"],
["5PM", "6PM", "7PM", "8PM", "9PM", "10PM","11PM","12AM"]]
self.timer_entery = Entry(
|
identifier_body
|
graphql-client.js
|
Cursor: []}
}
if (limit) {
if (cursor) {
if (cursor.filter) {
if (!filterResource || deepEqual(filterResource, cursor.filter))
cursor = {endCursor: []}
}
}
cursor.endCursor = cursor.endCursor || []
cursor.modelName = modelName
cursor.filter = filterResource || null
let endCursor
let len = cursor.endCursor.length
if (len) {
if (direction === 'down')
endCursor = cursor.endCursor[len - 1]
else {
if (len > 2) {
cursor.endCursor.splice(len - 2, 1)
cursor.endCursor.splice(len - 1, 1)
len -= 2
}
else
cursor.endCursor = []
endCursor = (len - 1) ? cursor.endCursor[len - 2] : null
}
}
else
endCursor = null
if (endCursor)
query += `after: "${endCursor}"\n`
query += `first: ${limit}\n`
}
}
if (hasFilter)
query += `filter: { ${qq} },\n`
if (sortProperty) {
let sortBy
let ref = props[sortProperty].ref
if (ref) {
if (ref === MONEY)
sortBy = sortProperty + '__value'
else
sortBy = sortProperty + '__title'
}
else
sortBy = sortProperty
query += `\norderBy: {
property: ${sortBy},
desc: ${asc ? false : true}
}`
}
else
query += `\norderBy: {
property: _time,
desc: true
}`
// if (limit)
// query += `, limit: ${limit}`
query += ')'
query += `\n{\n`
query += `pageInfo {\n endCursor\n}\n`
query += `edges {\n node {\n`
let arr = this.getAllPropertiesForServerSearch(model)
query += `${arr.join(' \n')}`
query += `\n}` // close 'node'
query += `\n}` // close 'edges'
query += `\n}` // close properties block
query += `\n}` // close query
try {
let data = await client.query({
fetchPolicy: 'network-only',
query: gql(`${query}`),
})
let result = data.data[table]
if (!noCursorChange) {
let endCursor = result.pageInfo.endCursor
if (endCursor) {
// if (!params.direction || params.direction === 'down') {
let hasThisCursor = cursor.endCursor.some((c) => c === endCursor)
if (!hasThisCursor)
cursor.endCursor.push(endCursor)
// }
}
}
if (!result.edges.length) {
// this.trigger({action: 'list', resource: filterResource, isSearch: true, direction: direction, first: first})
return
}
// // if (result.edges.length < limit)
// // cursor.endCursor = null
// let to = this.getRepresentative(utils.getId(me.organization))
// let toId = utils.getId(to)
// let list = result.edges.map((r) => this.convertToResource(r.node))
// if (!noTrigger)
// this.trigger({action: 'list', list: list, resource: filterResource, direction: direction, first: first})
return result.edges
} catch(error) {
// debugger
console.error(error)
}
function prettify (obj) {
return JSON.stringify(obj, null, 2)
}
function addEqualsOrGreaterOrLesserNumber(val, op, prop) {
let isMoney = prop.ref === MONEY
let p = prop.name
if (isMoney)
p += '__value'
let ch = val.toString().charAt(0)
switch (ch) {
case '>':
if (val.charAt(1) === '=')
op.GTE += `\n ${p}: ${val.substring(2)},`
else
op.GT += `\n ${p}: ${val.substring(1)},`
break
case '<':
if (val.charAt(1) === '=')
op.LTE += `\n ${p}: ${val.substring(2)},`
else
op.LT += `\n ${p}: ${val.substring(1)},`
break
default:
op.EQ += `\n ${p}: ${val},`
}
}
},
// # _author: "3c67687a96fe59d8f98b1c90cc46f943b938d54cda852b12fb1d43396e28978a"
// # _inbound: false
// # _recipient: ${hash}
async getChat(params) {
let { author, recipient, client, context } = params
let table = `rl_${MESSAGE.replace(/\./g, '_')}`
let inbound = true
let query =
`query {
rl_tradle_Message(
first:20,
filter:{
EQ: {
_inbound: true
context: "${context}"
_author: "${author}"
}
},
orderBy:{
property: time
desc:true
}
) {
edges {
node {
_author
_recipient
object
}
}
}
}`
let promisses = []
promisses.push(client.query({
fetchPolicy: 'network-only',
query: gql(`${query}`),
}))
let queryOutbound = query.replace('_inbound: true', '_inbound: false').replace('_author', '_recipient')
// `query {
// rl_tradle_Message(
// first:20,
// filter:{
// EQ: {
// _inbound: false
// context: "${context}"
// _recipient: "${author}"
// }
// },
// orderBy:{
// property: time
// desc:true
// }
// ) {
// edges {
// node {
// _author
// _recipient
// object
// }
// }
// }
// }`
promisses.push(client.query({
fetchPolicy: 'network-only',
query: gql(`${queryOutbound}`),
}))
try {
let all = await Promise.all(promisses)
let result = []
all.forEach((data) => {
let list = data.data[table]
if (list.edges && list.edges.length)
list.edges.forEach(r => result.push(r.node))
})
// result.sort((a, b) => a.time - b.time)
return result
} catch (err) {
debugger
}
},
getAllPropertiesForServerSearch(model, inlined) {
let props = model.properties
let arr
if (model.inlined)
arr = []
else {
arr = ['_permalink', '_link', '_time', '_author', '_authorTitle', '_virtual', 'time']
if (model.id !== PUB_KEY && !inlined) {
let newarr = arr.concat(TYPE, SIG)
arr = newarr
}
}
for (let p in props) {
if (p.charAt(0) === '_')
continue
if (p === 'from' || p === 'to' || p === 'time' || p.indexOf('_group') !== -1)
continue
let prop = props[p]
if (prop.displayAs)
continue
let ptype = prop.type
if (ptype === 'array') {
// HACK
if (p === 'verifications')
continue
let iref = prop.items.ref
if (iref) {
if (iref === model.id) {
arr.push(
`${p} {
id
}`
)
}
else if (prop.inlined)
arr.push(this.addInlined(prop))
else
arr.push(
`${p} {
id
title
}`
)
}
continue
}
if (ptype !== 'object') {
arr.push(p)
continue
}
let ref = prop.ref
if (!ref) {
if (prop.range === 'json')
arr.push(p)
continue
}
if (ref === ORGANIZATION)
continue
if (prop.inlined)
arr.push(this.addInlined(prop))
else
arr.push(this.addRef(prop))
}
return arr
},
addRef(prop) {
let ref = prop.type === 'array' ? prop.items.ref : prop.ref
let p = prop.name
if (ref === MONEY) {
return (
`${p} {
value
currency
}`
)
}
if (ref === COUNTRY) {// || ref === CURRENCY)
return (
`${p} {
|
id
title
}`
|
random_line_split
|
|
graphql-client.js
|
] || p.charAt(0) === '_')
// continue
let val = filterResource[p]
// if (p === TYPE) {
// if (!Array.isArray(val))
// continue
// else {
// let s = `${p}: [`
// val.forEach((r, i) => {
// if (i)
// s += ', '
// s += `"${r}"`
// })
// s += ']'
// inClause.push(s)
// }
// }
// if (p.charAt(0) === '_')
// debugger
if (!props[p] && val) {
if (p.charAt(0) === '_') {
if (Array.isArray(val)) {
let s = `${p}: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${r}"`
})
s += ']'
inClause.push(s)
}
else
op.EQ += `\n ${p}: "${val}",`
}
continue
}
else if (props[p].type === 'string') {
if (Array.isArray(val)) {
let s = `${p}: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${r}"`
})
s += ']'
inClause.push(s)
continue
}
else if (!val || !val.trim().length)
continue
}
if (props[p].type === 'string') {
let len = val.length
if (val.indexOf('*') === -1)
op.EQ += `\n ${p}: "${val}",`
else if (len > 1) {
if (val.charAt(0) === '*')
op.STARTS_WITH = `\n ${p}: "${val.substring(1)}",`
else if (val.charAt(len - 1) === '*')
op.CONTAINS = `\n ${p}: "${val.substring(1, len - 1)}",`
}
}
else if (props[p].type === 'boolean') {
if (val)
op.EQ += `\n ${p}: ${val},`
else
op.NEQ += `\n ${p}: true,`
}
else if (props[p].type === 'number')
self.addEqualsOrGreaterOrLesserNumber(val, op, props[p])
else if (props[p].type === 'object') {
// if (Array.isArray(val)) {
// let s = `${p}: [`
// val.forEach((r, i) => {
// if (i)
// s += ', '
// s += `{id: "${utils.getId(r)}", title: "${utils.getDisplayName(r)}"}`
// })
// s += ']'
// inClause.push(s)
// }
if (Array.isArray(val)) {
if (!val.length)
continue
let s = `${p}__id: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${utils.getId(r)}"`
})
s += ']'
inClause.push(s)
}
else {
if (props[p].ref === MONEY) {
let {value, currency} = val
op.EQ += `\n ${p}__currency: "${currency}",`
if (val.value)
addEqualsOrGreaterOrLesserNumber(value, op, props[p])
}
else {
op.EQ += `\n ${p}__id: "${val.id}",`
}
}
}
}
}
op.IN = inClause ? inClause.join(',') : ''
let qq = ''
for (let o in op) {
let q = op[o]
if (q.length) {
qq +=
`\n ${o}: {
${op[o]}\n},`
}
}
query += '('
let hasFilter = qq.length
if (!noCursorChange) {
if (first || cursor.modelName !== modelName) {
cursor = {endCursor: []}
}
if (limit) {
if (cursor) {
if (cursor.filter) {
if (!filterResource || deepEqual(filterResource, cursor.filter))
cursor = {endCursor: []}
}
}
cursor.endCursor = cursor.endCursor || []
cursor.modelName = modelName
cursor.filter = filterResource || null
let endCursor
let len = cursor.endCursor.length
if (len) {
if (direction === 'down')
endCursor = cursor.endCursor[len - 1]
else {
if (len > 2) {
cursor.endCursor.splice(len - 2, 1)
cursor.endCursor.splice(len - 1, 1)
len -= 2
}
else
cursor.endCursor = []
endCursor = (len - 1) ? cursor.endCursor[len - 2] : null
}
}
else
endCursor = null
if (endCursor)
query += `after: "${endCursor}"\n`
query += `first: ${limit}\n`
}
}
if (hasFilter)
query += `filter: { ${qq} },\n`
if (sortProperty) {
let sortBy
let ref = props[sortProperty].ref
if (ref) {
if (ref === MONEY)
sortBy = sortProperty + '__value'
else
sortBy = sortProperty + '__title'
}
else
sortBy = sortProperty
query += `\norderBy: {
property: ${sortBy},
desc: ${asc ? false : true}
}`
}
else
query += `\norderBy: {
property: _time,
desc: true
}`
// if (limit)
// query += `, limit: ${limit}`
query += ')'
query += `\n{\n`
query += `pageInfo {\n endCursor\n}\n`
query += `edges {\n node {\n`
let arr = this.getAllPropertiesForServerSearch(model)
query += `${arr.join(' \n')}`
query += `\n}` // close 'node'
query += `\n}` // close 'edges'
query += `\n}` // close properties block
query += `\n}` // close query
try {
let data = await client.query({
fetchPolicy: 'network-only',
query: gql(`${query}`),
})
let result = data.data[table]
if (!noCursorChange) {
let endCursor = result.pageInfo.endCursor
if (endCursor) {
// if (!params.direction || params.direction === 'down') {
let hasThisCursor = cursor.endCursor.some((c) => c === endCursor)
if (!hasThisCursor)
cursor.endCursor.push(endCursor)
// }
}
}
if (!result.edges.length) {
// this.trigger({action: 'list', resource: filterResource, isSearch: true, direction: direction, first: first})
return
}
// // if (result.edges.length < limit)
// // cursor.endCursor = null
// let to = this.getRepresentative(utils.getId(me.organization))
// let toId = utils.getId(to)
// let list = result.edges.map((r) => this.convertToResource(r.node))
// if (!noTrigger)
// this.trigger({action: 'list', list: list, resource: filterResource, direction: direction, first: first})
return result.edges
} catch(error) {
// debugger
console.error(error)
}
function prettify (obj) {
return JSON.stringify(obj, null, 2)
}
function addEqualsOrGreaterOrLesserNumber(val, op, prop) {
let isMoney = prop.ref === MONEY
let p = prop.name
if (isMoney)
p += '__value'
let ch = val.toString().charAt(0)
switch (ch) {
case '>':
if (val.charAt(1) === '=')
op.GTE += `\n ${p}: ${val.substring(2)},`
else
op.GT += `\n ${p}: ${val.substring(1)},`
break
case '<':
if (val.charAt(1) === '=')
op.LTE += `\n ${p}: ${val.substring(2)},`
else
op.LT += `\n ${p}: ${val.substring(1)},`
break
default:
op.EQ += `\n ${p}: ${val},`
}
}
},
// # _author: "3c67687a96fe59d8f98b1c90cc46f943b938d54cda852b12fb1d43396e28978a"
// # _inbound: false
// # _recipient: ${hash}
async
|
getChat
|
identifier_name
|
|
graphql-client.js
|
s += ']'
// inClause.push(s)
// }
// }
// if (p.charAt(0) === '_')
// debugger
if (!props[p] && val) {
if (p.charAt(0) === '_') {
if (Array.isArray(val)) {
let s = `${p}: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${r}"`
})
s += ']'
inClause.push(s)
}
else
op.EQ += `\n ${p}: "${val}",`
}
continue
}
else if (props[p].type === 'string') {
if (Array.isArray(val)) {
let s = `${p}: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${r}"`
})
s += ']'
inClause.push(s)
continue
}
else if (!val || !val.trim().length)
continue
}
if (props[p].type === 'string') {
let len = val.length
if (val.indexOf('*') === -1)
op.EQ += `\n ${p}: "${val}",`
else if (len > 1) {
if (val.charAt(0) === '*')
op.STARTS_WITH = `\n ${p}: "${val.substring(1)}",`
else if (val.charAt(len - 1) === '*')
op.CONTAINS = `\n ${p}: "${val.substring(1, len - 1)}",`
}
}
else if (props[p].type === 'boolean') {
if (val)
op.EQ += `\n ${p}: ${val},`
else
op.NEQ += `\n ${p}: true,`
}
else if (props[p].type === 'number')
self.addEqualsOrGreaterOrLesserNumber(val, op, props[p])
else if (props[p].type === 'object') {
// if (Array.isArray(val)) {
// let s = `${p}: [`
// val.forEach((r, i) => {
// if (i)
// s += ', '
// s += `{id: "${utils.getId(r)}", title: "${utils.getDisplayName(r)}"}`
// })
// s += ']'
// inClause.push(s)
// }
if (Array.isArray(val)) {
if (!val.length)
continue
let s = `${p}__id: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${utils.getId(r)}"`
})
s += ']'
inClause.push(s)
}
else {
if (props[p].ref === MONEY) {
let {value, currency} = val
op.EQ += `\n ${p}__currency: "${currency}",`
if (val.value)
addEqualsOrGreaterOrLesserNumber(value, op, props[p])
}
else {
op.EQ += `\n ${p}__id: "${val.id}",`
}
}
}
}
}
op.IN = inClause ? inClause.join(',') : ''
let qq = ''
for (let o in op) {
let q = op[o]
if (q.length) {
qq +=
`\n ${o}: {
${op[o]}\n},`
}
}
query += '('
let hasFilter = qq.length
if (!noCursorChange) {
if (first || cursor.modelName !== modelName) {
cursor = {endCursor: []}
}
if (limit) {
if (cursor) {
if (cursor.filter) {
if (!filterResource || deepEqual(filterResource, cursor.filter))
cursor = {endCursor: []}
}
}
cursor.endCursor = cursor.endCursor || []
cursor.modelName = modelName
cursor.filter = filterResource || null
let endCursor
let len = cursor.endCursor.length
if (len) {
if (direction === 'down')
endCursor = cursor.endCursor[len - 1]
else {
if (len > 2) {
cursor.endCursor.splice(len - 2, 1)
cursor.endCursor.splice(len - 1, 1)
len -= 2
}
else
cursor.endCursor = []
endCursor = (len - 1) ? cursor.endCursor[len - 2] : null
}
}
else
endCursor = null
if (endCursor)
query += `after: "${endCursor}"\n`
query += `first: ${limit}\n`
}
}
if (hasFilter)
query += `filter: { ${qq} },\n`
if (sortProperty) {
let sortBy
let ref = props[sortProperty].ref
if (ref) {
if (ref === MONEY)
sortBy = sortProperty + '__value'
else
sortBy = sortProperty + '__title'
}
else
sortBy = sortProperty
query += `\norderBy: {
property: ${sortBy},
desc: ${asc ? false : true}
}`
}
else
query += `\norderBy: {
property: _time,
desc: true
}`
// if (limit)
// query += `, limit: ${limit}`
query += ')'
query += `\n{\n`
query += `pageInfo {\n endCursor\n}\n`
query += `edges {\n node {\n`
let arr = this.getAllPropertiesForServerSearch(model)
query += `${arr.join(' \n')}`
query += `\n}` // close 'node'
query += `\n}` // close 'edges'
query += `\n}` // close properties block
query += `\n}` // close query
try {
let data = await client.query({
fetchPolicy: 'network-only',
query: gql(`${query}`),
})
let result = data.data[table]
if (!noCursorChange) {
let endCursor = result.pageInfo.endCursor
if (endCursor) {
// if (!params.direction || params.direction === 'down') {
let hasThisCursor = cursor.endCursor.some((c) => c === endCursor)
if (!hasThisCursor)
cursor.endCursor.push(endCursor)
// }
}
}
if (!result.edges.length) {
// this.trigger({action: 'list', resource: filterResource, isSearch: true, direction: direction, first: first})
return
}
// // if (result.edges.length < limit)
// // cursor.endCursor = null
// let to = this.getRepresentative(utils.getId(me.organization))
// let toId = utils.getId(to)
// let list = result.edges.map((r) => this.convertToResource(r.node))
// if (!noTrigger)
// this.trigger({action: 'list', list: list, resource: filterResource, direction: direction, first: first})
return result.edges
} catch(error) {
// debugger
console.error(error)
}
function prettify (obj) {
return JSON.stringify(obj, null, 2)
}
function addEqualsOrGreaterOrLesserNumber(val, op, prop) {
let isMoney = prop.ref === MONEY
let p = prop.name
if (isMoney)
p += '__value'
let ch = val.toString().charAt(0)
switch (ch) {
case '>':
if (val.charAt(1) === '=')
op.GTE += `\n ${p}: ${val.substring(2)},`
else
op.GT += `\n ${p}: ${val.substring(1)},`
break
case '<':
if (val.charAt(1) === '=')
op.LTE += `\n ${p}: ${val.substring(2)},`
else
op.LT += `\n ${p}: ${val.substring(1)},`
break
default:
op.EQ += `\n ${p}: ${val},`
}
}
},
// # _author: "3c67687a96fe59d8f98b1c90cc46f943b938d54cda852b12fb1d43396e28978a"
// # _inbound: false
// # _recipient: ${hash}
async getChat(params)
|
{
let { author, recipient, client, context } = params
let table = `rl_${MESSAGE.replace(/\./g, '_')}`
let inbound = true
let query =
`query {
rl_tradle_Message(
first:20,
filter:{
EQ: {
_inbound: true
context: "${context}"
_author: "${author}"
}
},
orderBy:{
property: time
desc:true
}
) {
|
identifier_body
|
|
greader.go
|
// GReader is an implementation of the GReader API.
type GReader struct {
d *storage.Database
}
// GReaderHandler returns a new GReader handler.
func GReaderHandler(d *storage.Database) http.HandlerFunc {
return GReader{d}.Handler()
}
// Handler returns a handler function that implements the GReader API.
func (a GReader) Handler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
a.route(w, r)
}
}
func (a GReader) recordLatency(t time.Time, label string) {
utils.Elapsed(t, func(d time.Duration) {
// Record latency measurements in microseconds.
greaderLatencyMetric.WithLabelValues(label).Observe(float64(d) / float64(time.Microsecond))
})
}
func (a GReader) route(w http.ResponseWriter, r *http.Request) {
// Record the total server latency of each call.
defer a.recordLatency(time.Now(), "server")
w.Header().Set("Content-Type", "application/json")
switch r.URL.Path {
case "/greader/accounts/ClientLogin":
a.handleLogin(w, r)
case "/greader/reader/api/0/token":
a.withAuth(w, r, a.handlePostToken)
case "/greader/reader/api/0/user-info":
a.withAuth(w, r, a.handleUserInfo)
case "/greader/reader/api/0/subscription/list":
a.withAuth(w, r, a.handleSubscriptionList)
case "/greader/reader/api/0/stream/items/ids":
a.withAuth(w, r, a.handleStreamItemIds)
case "/greader/reader/api/0/stream/items/contents":
a.withAuth(w, r, a.handleStreamItemsContents)
case "/greader/reader/api/0/edit-tag":
a.withAuth(w, r, a.handleEditTag)
default:
log.Warningf("Got unexpected route: %s", r.URL.String())
dump, err := httputil.DumpRequest(r, true)
if err != nil {
log.Warningf("Failed to dump request: %s", err)
}
log.Warningf("%q", dump)
a.returnError(w, http.StatusBadRequest)
}
}
func (a GReader) handleLogin(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
formUser := r.Form.Get("Email")
formPass := r.Form.Get("Passwd")
user, err := a.d.GetUserByUsername(formUser)
if err != nil {
log.Warningf("Failed to find user: %s", formUser)
a.returnError(w, http.StatusUnauthorized)
return
}
ok := bcrypt.CompareHashAndPassword([]byte(user.HashPass), []byte(formPass))
if ok == nil {
token, err := createAuthToken(user.HashPass, formUser)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
a.returnSuccess(w, greaderHandlelogin{Auth: token})
} else {
a.returnError(w, http.StatusUnauthorized)
}
}
func (a GReader) handleUserInfo(w http.ResponseWriter, _ *http.Request, user models.User) {
a.returnSuccess(w, greaderUserInfo{
UserId: string(user.UserId),
Username: user.Username,
})
}
func (a GReader) handleSubscriptionList(w http.ResponseWriter, _ *http.Request, user models.User) {
folders, err := a.d.GetAllFoldersForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
feeds, err := a.d.GetAllFeedsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
faviconMap, err := a.d.GetAllFaviconsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
folderMap := map[int64]string{}
for _, folder := range folders {
folderMap[folder.ID] = folder.Name
}
subList := greaderSubscriptionList{}
for _, feed := range feeds {
subList.Subscriptions = append(subList.Subscriptions, greaderSubscription{
Title: feed.Title,
// No client seems to use this field, so let it as zero
FirstItemMsec: "0",
HtmlUrl: feed.Link,
IconUrl: fmt.Sprintf("data:%s", faviconMap[feed.ID]),
SortId: feed.Title,
Id: greaderFeedId(feed.ID),
Categories: []greaderCategory{{
Id: greaderFolderId(feed.FolderID),
Label: folderMap[feed.FolderID],
}},
})
}
a.returnSuccess(w, subList)
}
func (a GReader) handleStreamItemIds(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
limit, err := strconv.Atoi(r.Form.Get("n"))
if err != nil {
log.Warningf(
"Saw unexpected 'n' parameter, defaulting to 10,000: %s", r.PostForm.Get("n"))
limit = 10000
}
switch s := r.Form.Get("s"); s {
case starredStreamId:
// TODO: Support starred items
a.returnSuccess(w, greaderStreamItemIds{})
return
case readStreamId:
// Never return read items to the client, it's just simpler
a.returnSuccess(w, greaderStreamItemIds{})
return
case readingListStreamId:
// Handled below
break
default:
log.Warningf("Saw unexpected 's' parameter: %s", s)
a.returnError(w, http.StatusNotImplemented)
return
}
xt := r.Form.Get("xt")
if xt != readStreamId {
// Only support excluding read items
log.Warningf("Saw unexpected 'xt' parameter: %s", xt)
a.returnError(w, http.StatusNotImplemented)
return
}
// TODO: Support continuation tokens
articles, err := a.d.GetUnreadArticleMetaForUser(user, limit, -1)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemIds := greaderStreamItemIds{}
for _, article := range articles {
streamItemIds.ItemRefs = append(streamItemIds.ItemRefs, greaderItemRef{
Id: strconv.FormatInt(article.ID, 10),
DirectStreamIds: []string{
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
})
}
a.returnSuccess(w, streamItemIds)
}
func (a GReader) handlePostToken(w http.ResponseWriter, _ *http.Request, _ models.User) {
_, _ = fmt.Fprint(w, createPostToken())
a.returnSuccess(w, nil)
}
func (a GReader) handleStreamItemsContents(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
if !validatePostToken(r.Form.Get("T")) {
a.returnError(w, http.StatusUnauthorized)
return
}
articleIdsValue := r.Form["i"]
var articleIds []int64
for _, articleIdStr := range articleIdsValue {
id, err := strconv.ParseInt(articleIdStr, 16, 64)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
articleIds = append(articleIds, id)
}
articles, err := a.d.GetArticlesForUser(user, articleIds)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemContents := greaderStreamItemsContents{
Id: readingListStreamId,
Updated: time.Now().Unix(),
}
for _, article := range articles {
streamItemContents.Items = append(streamItemContents.Items, greaderItemContent{
CrawlTimeMsec: strconv.FormatInt(article.Date.UnixMilli(), 10),
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
Id: greaderArticleId(article.ID),
Categories: []string{
readingListStreamId,
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
Title: article.Title,
|
Alternate: []greaderCanonical{
{Href: article.Link},
},
Summary: greaderContent{
Content: article.GetContents(*serveParsedArticles),
},
Origin: greaderOrigin{
StreamId: greaderFeedId(article.FeedID),
},
})
}
a.returnSuccess(w, streamItemContents)
}
func (a GReader) handleEditTag(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
|
Published: article.Date.Unix(),
Canonical: []greaderCanonical{
{Href: article.Link},
},
|
random_line_split
|
greader.go
|
GReader is an implementation of the GReader API.
type GReader struct {
d *storage.Database
}
// GReaderHandler returns a new GReader handler.
func GReaderHandler(d *storage.Database) http.HandlerFunc {
return GReader{d}.Handler()
}
// Handler returns a handler function that implements the GReader API.
func (a GReader) Handler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
a.route(w, r)
}
}
func (a GReader) recordLatency(t time.Time, label string) {
utils.Elapsed(t, func(d time.Duration) {
// Record latency measurements in microseconds.
greaderLatencyMetric.WithLabelValues(label).Observe(float64(d) / float64(time.Microsecond))
})
}
func (a GReader) route(w http.ResponseWriter, r *http.Request) {
// Record the total server latency of each call.
defer a.recordLatency(time.Now(), "server")
w.Header().Set("Content-Type", "application/json")
switch r.URL.Path {
case "/greader/accounts/ClientLogin":
a.handleLogin(w, r)
case "/greader/reader/api/0/token":
a.withAuth(w, r, a.handlePostToken)
case "/greader/reader/api/0/user-info":
a.withAuth(w, r, a.handleUserInfo)
case "/greader/reader/api/0/subscription/list":
a.withAuth(w, r, a.handleSubscriptionList)
case "/greader/reader/api/0/stream/items/ids":
a.withAuth(w, r, a.handleStreamItemIds)
case "/greader/reader/api/0/stream/items/contents":
a.withAuth(w, r, a.handleStreamItemsContents)
case "/greader/reader/api/0/edit-tag":
a.withAuth(w, r, a.handleEditTag)
default:
log.Warningf("Got unexpected route: %s", r.URL.String())
dump, err := httputil.DumpRequest(r, true)
if err != nil {
log.Warningf("Failed to dump request: %s", err)
}
log.Warningf("%q", dump)
a.returnError(w, http.StatusBadRequest)
}
}
func (a GReader) handleLogin(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
formUser := r.Form.Get("Email")
formPass := r.Form.Get("Passwd")
user, err := a.d.GetUserByUsername(formUser)
if err != nil {
log.Warningf("Failed to find user: %s", formUser)
a.returnError(w, http.StatusUnauthorized)
return
}
ok := bcrypt.CompareHashAndPassword([]byte(user.HashPass), []byte(formPass))
if ok == nil {
token, err := createAuthToken(user.HashPass, formUser)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
a.returnSuccess(w, greaderHandlelogin{Auth: token})
} else {
a.returnError(w, http.StatusUnauthorized)
}
}
func (a GReader) handleUserInfo(w http.ResponseWriter, _ *http.Request, user models.User) {
a.returnSuccess(w, greaderUserInfo{
UserId: string(user.UserId),
Username: user.Username,
})
}
func (a GReader) handleSubscriptionList(w http.ResponseWriter, _ *http.Request, user models.User) {
folders, err := a.d.GetAllFoldersForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
feeds, err := a.d.GetAllFeedsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
faviconMap, err := a.d.GetAllFaviconsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
folderMap := map[int64]string{}
for _, folder := range folders
|
subList := greaderSubscriptionList{}
for _, feed := range feeds {
subList.Subscriptions = append(subList.Subscriptions, greaderSubscription{
Title: feed.Title,
// No client seems to use this field, so let it as zero
FirstItemMsec: "0",
HtmlUrl: feed.Link,
IconUrl: fmt.Sprintf("data:%s", faviconMap[feed.ID]),
SortId: feed.Title,
Id: greaderFeedId(feed.ID),
Categories: []greaderCategory{{
Id: greaderFolderId(feed.FolderID),
Label: folderMap[feed.FolderID],
}},
})
}
a.returnSuccess(w, subList)
}
func (a GReader) handleStreamItemIds(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
limit, err := strconv.Atoi(r.Form.Get("n"))
if err != nil {
log.Warningf(
"Saw unexpected 'n' parameter, defaulting to 10,000: %s", r.PostForm.Get("n"))
limit = 10000
}
switch s := r.Form.Get("s"); s {
case starredStreamId:
// TODO: Support starred items
a.returnSuccess(w, greaderStreamItemIds{})
return
case readStreamId:
// Never return read items to the client, it's just simpler
a.returnSuccess(w, greaderStreamItemIds{})
return
case readingListStreamId:
// Handled below
break
default:
log.Warningf("Saw unexpected 's' parameter: %s", s)
a.returnError(w, http.StatusNotImplemented)
return
}
xt := r.Form.Get("xt")
if xt != readStreamId {
// Only support excluding read items
log.Warningf("Saw unexpected 'xt' parameter: %s", xt)
a.returnError(w, http.StatusNotImplemented)
return
}
// TODO: Support continuation tokens
articles, err := a.d.GetUnreadArticleMetaForUser(user, limit, -1)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemIds := greaderStreamItemIds{}
for _, article := range articles {
streamItemIds.ItemRefs = append(streamItemIds.ItemRefs, greaderItemRef{
Id: strconv.FormatInt(article.ID, 10),
DirectStreamIds: []string{
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
})
}
a.returnSuccess(w, streamItemIds)
}
func (a GReader) handlePostToken(w http.ResponseWriter, _ *http.Request, _ models.User) {
_, _ = fmt.Fprint(w, createPostToken())
a.returnSuccess(w, nil)
}
func (a GReader) handleStreamItemsContents(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
if !validatePostToken(r.Form.Get("T")) {
a.returnError(w, http.StatusUnauthorized)
return
}
articleIdsValue := r.Form["i"]
var articleIds []int64
for _, articleIdStr := range articleIdsValue {
id, err := strconv.ParseInt(articleIdStr, 16, 64)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
articleIds = append(articleIds, id)
}
articles, err := a.d.GetArticlesForUser(user, articleIds)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemContents := greaderStreamItemsContents{
Id: readingListStreamId,
Updated: time.Now().Unix(),
}
for _, article := range articles {
streamItemContents.Items = append(streamItemContents.Items, greaderItemContent{
CrawlTimeMsec: strconv.FormatInt(article.Date.UnixMilli(), 10),
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
Id: greaderArticleId(article.ID),
Categories: []string{
readingListStreamId,
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
Title: article.Title,
Published: article.Date.Unix(),
Canonical: []greaderCanonical{
{Href: article.Link},
},
Alternate: []greaderCanonical{
{Href: article.Link},
},
Summary: greaderContent{
Content: article.GetContents(*serveParsedArticles),
},
Origin: greaderOrigin{
StreamId: greaderFeedId(article.FeedID),
},
})
}
a.returnSuccess(w, streamItemContents)
}
func (a GReader) handleEditTag(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
|
{
folderMap[folder.ID] = folder.Name
}
|
conditional_block
|
greader.go
|
GReader is an implementation of the GReader API.
type GReader struct {
d *storage.Database
}
// GReaderHandler returns a new GReader handler.
func GReaderHandler(d *storage.Database) http.HandlerFunc {
return GReader{d}.Handler()
}
// Handler returns a handler function that implements the GReader API.
func (a GReader) Handler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
a.route(w, r)
}
}
func (a GReader) recordLatency(t time.Time, label string)
|
func (a GReader) route(w http.ResponseWriter, r *http.Request) {
// Record the total server latency of each call.
defer a.recordLatency(time.Now(), "server")
w.Header().Set("Content-Type", "application/json")
switch r.URL.Path {
case "/greader/accounts/ClientLogin":
a.handleLogin(w, r)
case "/greader/reader/api/0/token":
a.withAuth(w, r, a.handlePostToken)
case "/greader/reader/api/0/user-info":
a.withAuth(w, r, a.handleUserInfo)
case "/greader/reader/api/0/subscription/list":
a.withAuth(w, r, a.handleSubscriptionList)
case "/greader/reader/api/0/stream/items/ids":
a.withAuth(w, r, a.handleStreamItemIds)
case "/greader/reader/api/0/stream/items/contents":
a.withAuth(w, r, a.handleStreamItemsContents)
case "/greader/reader/api/0/edit-tag":
a.withAuth(w, r, a.handleEditTag)
default:
log.Warningf("Got unexpected route: %s", r.URL.String())
dump, err := httputil.DumpRequest(r, true)
if err != nil {
log.Warningf("Failed to dump request: %s", err)
}
log.Warningf("%q", dump)
a.returnError(w, http.StatusBadRequest)
}
}
func (a GReader) handleLogin(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
formUser := r.Form.Get("Email")
formPass := r.Form.Get("Passwd")
user, err := a.d.GetUserByUsername(formUser)
if err != nil {
log.Warningf("Failed to find user: %s", formUser)
a.returnError(w, http.StatusUnauthorized)
return
}
ok := bcrypt.CompareHashAndPassword([]byte(user.HashPass), []byte(formPass))
if ok == nil {
token, err := createAuthToken(user.HashPass, formUser)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
a.returnSuccess(w, greaderHandlelogin{Auth: token})
} else {
a.returnError(w, http.StatusUnauthorized)
}
}
func (a GReader) handleUserInfo(w http.ResponseWriter, _ *http.Request, user models.User) {
a.returnSuccess(w, greaderUserInfo{
UserId: string(user.UserId),
Username: user.Username,
})
}
func (a GReader) handleSubscriptionList(w http.ResponseWriter, _ *http.Request, user models.User) {
folders, err := a.d.GetAllFoldersForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
feeds, err := a.d.GetAllFeedsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
faviconMap, err := a.d.GetAllFaviconsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
folderMap := map[int64]string{}
for _, folder := range folders {
folderMap[folder.ID] = folder.Name
}
subList := greaderSubscriptionList{}
for _, feed := range feeds {
subList.Subscriptions = append(subList.Subscriptions, greaderSubscription{
Title: feed.Title,
// No client seems to use this field, so let it as zero
FirstItemMsec: "0",
HtmlUrl: feed.Link,
IconUrl: fmt.Sprintf("data:%s", faviconMap[feed.ID]),
SortId: feed.Title,
Id: greaderFeedId(feed.ID),
Categories: []greaderCategory{{
Id: greaderFolderId(feed.FolderID),
Label: folderMap[feed.FolderID],
}},
})
}
a.returnSuccess(w, subList)
}
func (a GReader) handleStreamItemIds(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
limit, err := strconv.Atoi(r.Form.Get("n"))
if err != nil {
log.Warningf(
"Saw unexpected 'n' parameter, defaulting to 10,000: %s", r.PostForm.Get("n"))
limit = 10000
}
switch s := r.Form.Get("s"); s {
case starredStreamId:
// TODO: Support starred items
a.returnSuccess(w, greaderStreamItemIds{})
return
case readStreamId:
// Never return read items to the client, it's just simpler
a.returnSuccess(w, greaderStreamItemIds{})
return
case readingListStreamId:
// Handled below
break
default:
log.Warningf("Saw unexpected 's' parameter: %s", s)
a.returnError(w, http.StatusNotImplemented)
return
}
xt := r.Form.Get("xt")
if xt != readStreamId {
// Only support excluding read items
log.Warningf("Saw unexpected 'xt' parameter: %s", xt)
a.returnError(w, http.StatusNotImplemented)
return
}
// TODO: Support continuation tokens
articles, err := a.d.GetUnreadArticleMetaForUser(user, limit, -1)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemIds := greaderStreamItemIds{}
for _, article := range articles {
streamItemIds.ItemRefs = append(streamItemIds.ItemRefs, greaderItemRef{
Id: strconv.FormatInt(article.ID, 10),
DirectStreamIds: []string{
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
})
}
a.returnSuccess(w, streamItemIds)
}
func (a GReader) handlePostToken(w http.ResponseWriter, _ *http.Request, _ models.User) {
_, _ = fmt.Fprint(w, createPostToken())
a.returnSuccess(w, nil)
}
func (a GReader) handleStreamItemsContents(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
if !validatePostToken(r.Form.Get("T")) {
a.returnError(w, http.StatusUnauthorized)
return
}
articleIdsValue := r.Form["i"]
var articleIds []int64
for _, articleIdStr := range articleIdsValue {
id, err := strconv.ParseInt(articleIdStr, 16, 64)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
articleIds = append(articleIds, id)
}
articles, err := a.d.GetArticlesForUser(user, articleIds)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemContents := greaderStreamItemsContents{
Id: readingListStreamId,
Updated: time.Now().Unix(),
}
for _, article := range articles {
streamItemContents.Items = append(streamItemContents.Items, greaderItemContent{
CrawlTimeMsec: strconv.FormatInt(article.Date.UnixMilli(), 10),
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
Id: greaderArticleId(article.ID),
Categories: []string{
readingListStreamId,
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
Title: article.Title,
Published: article.Date.Unix(),
Canonical: []greaderCanonical{
{Href: article.Link},
},
Alternate: []greaderCanonical{
{Href: article.Link},
},
Summary: greaderContent{
Content: article.GetContents(*serveParsedArticles),
},
Origin: greaderOrigin{
StreamId: greaderFeedId(article.FeedID),
},
})
}
a.returnSuccess(w, streamItemContents)
}
func (a GReader) handleEditTag(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
|
{
utils.Elapsed(t, func(d time.Duration) {
// Record latency measurements in microseconds.
greaderLatencyMetric.WithLabelValues(label).Observe(float64(d) / float64(time.Microsecond))
})
}
|
identifier_body
|
greader.go
|
}
ok := bcrypt.CompareHashAndPassword([]byte(user.HashPass), []byte(formPass))
if ok == nil {
token, err := createAuthToken(user.HashPass, formUser)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
a.returnSuccess(w, greaderHandlelogin{Auth: token})
} else {
a.returnError(w, http.StatusUnauthorized)
}
}
func (a GReader) handleUserInfo(w http.ResponseWriter, _ *http.Request, user models.User) {
a.returnSuccess(w, greaderUserInfo{
UserId: string(user.UserId),
Username: user.Username,
})
}
func (a GReader) handleSubscriptionList(w http.ResponseWriter, _ *http.Request, user models.User) {
folders, err := a.d.GetAllFoldersForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
feeds, err := a.d.GetAllFeedsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
faviconMap, err := a.d.GetAllFaviconsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
folderMap := map[int64]string{}
for _, folder := range folders {
folderMap[folder.ID] = folder.Name
}
subList := greaderSubscriptionList{}
for _, feed := range feeds {
subList.Subscriptions = append(subList.Subscriptions, greaderSubscription{
Title: feed.Title,
// No client seems to use this field, so let it as zero
FirstItemMsec: "0",
HtmlUrl: feed.Link,
IconUrl: fmt.Sprintf("data:%s", faviconMap[feed.ID]),
SortId: feed.Title,
Id: greaderFeedId(feed.ID),
Categories: []greaderCategory{{
Id: greaderFolderId(feed.FolderID),
Label: folderMap[feed.FolderID],
}},
})
}
a.returnSuccess(w, subList)
}
func (a GReader) handleStreamItemIds(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
limit, err := strconv.Atoi(r.Form.Get("n"))
if err != nil {
log.Warningf(
"Saw unexpected 'n' parameter, defaulting to 10,000: %s", r.PostForm.Get("n"))
limit = 10000
}
switch s := r.Form.Get("s"); s {
case starredStreamId:
// TODO: Support starred items
a.returnSuccess(w, greaderStreamItemIds{})
return
case readStreamId:
// Never return read items to the client, it's just simpler
a.returnSuccess(w, greaderStreamItemIds{})
return
case readingListStreamId:
// Handled below
break
default:
log.Warningf("Saw unexpected 's' parameter: %s", s)
a.returnError(w, http.StatusNotImplemented)
return
}
xt := r.Form.Get("xt")
if xt != readStreamId {
// Only support excluding read items
log.Warningf("Saw unexpected 'xt' parameter: %s", xt)
a.returnError(w, http.StatusNotImplemented)
return
}
// TODO: Support continuation tokens
articles, err := a.d.GetUnreadArticleMetaForUser(user, limit, -1)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemIds := greaderStreamItemIds{}
for _, article := range articles {
streamItemIds.ItemRefs = append(streamItemIds.ItemRefs, greaderItemRef{
Id: strconv.FormatInt(article.ID, 10),
DirectStreamIds: []string{
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
})
}
a.returnSuccess(w, streamItemIds)
}
func (a GReader) handlePostToken(w http.ResponseWriter, _ *http.Request, _ models.User) {
_, _ = fmt.Fprint(w, createPostToken())
a.returnSuccess(w, nil)
}
func (a GReader) handleStreamItemsContents(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
if !validatePostToken(r.Form.Get("T")) {
a.returnError(w, http.StatusUnauthorized)
return
}
articleIdsValue := r.Form["i"]
var articleIds []int64
for _, articleIdStr := range articleIdsValue {
id, err := strconv.ParseInt(articleIdStr, 16, 64)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
articleIds = append(articleIds, id)
}
articles, err := a.d.GetArticlesForUser(user, articleIds)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemContents := greaderStreamItemsContents{
Id: readingListStreamId,
Updated: time.Now().Unix(),
}
for _, article := range articles {
streamItemContents.Items = append(streamItemContents.Items, greaderItemContent{
CrawlTimeMsec: strconv.FormatInt(article.Date.UnixMilli(), 10),
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
Id: greaderArticleId(article.ID),
Categories: []string{
readingListStreamId,
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
Title: article.Title,
Published: article.Date.Unix(),
Canonical: []greaderCanonical{
{Href: article.Link},
},
Alternate: []greaderCanonical{
{Href: article.Link},
},
Summary: greaderContent{
Content: article.GetContents(*serveParsedArticles),
},
Origin: greaderOrigin{
StreamId: greaderFeedId(article.FeedID),
},
})
}
a.returnSuccess(w, streamItemContents)
}
func (a GReader) handleEditTag(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
if !validatePostToken(r.Form.Get("T")) {
a.returnError(w, http.StatusUnauthorized)
return
}
articleIdsValue := r.Form["i"]
var articleIds []int64
for _, articleIdStr := range articleIdsValue {
id, err := strconv.ParseInt(articleIdStr, 16, 64)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
articleIds = append(articleIds, id)
}
var status string
// Only support updating one tag
switch r.Form.Get("a") {
case readStreamId:
status = "read"
case unreadStreamId:
status = "unread"
case starredStreamId, broadcastStreamId:
// TODO: Support starring items
a.returnError(w, http.StatusNotImplemented)
return
}
for _, articleId := range articleIds {
err = a.d.MarkArticleForUser(user, articleId, status)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
}
_, _ = w.Write([]byte("OK"))
a.returnSuccess(w, nil)
}
func (a GReader) withAuth(w http.ResponseWriter, r *http.Request, handler func(http.ResponseWriter, *http.Request, models.User)) {
// Header should be in format:
// Authorization: GoogleLogin auth=<token>
authHeader := r.Header.Get("Authorization")
if authHeader == "" {
a.returnError(w, http.StatusUnauthorized)
return
}
authFields := strings.Fields(authHeader)
if len(authFields) != 2 || !strings.EqualFold(authFields[0], "GoogleLogin") {
a.returnError(w, http.StatusBadRequest)
return
}
authStr, tokenStr, found := strings.Cut(authFields[1], "=")
if !found {
a.returnError(w, http.StatusBadRequest)
return
}
if !strings.EqualFold(authStr, "auth") {
a.returnError(w, http.StatusBadRequest)
return
}
username, token, err := extractAuthToken(tokenStr)
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
user, err := a.d.GetUserByUsername(username)
if err != nil {
a.returnError(w, http.StatusUnauthorized)
return
}
if validateAuthToken(token, username, user.HashPass) {
handler(w, r, user)
} else {
a.returnError(w, http.StatusUnauthorized)
}
}
func greaderArticleId(articleId int64) string {
return fmt.Sprintf("tag:google.com,2005:reader/item/%x", articleId)
}
func
|
greaderFeedId
|
identifier_name
|
|
locale.js
|
линарен сайт с множество избрани рецепти, любопитни факти и полезни приложения.",
"IT PRESENTS IN THE MOST":"Представя максимално достъпно най-добрите рецепти като акцентът е върху здравословното хранене и начин на живот. Аудиорията е предимно от жени, от добрата кухня и здраве.",
"KIDAMOM IS A FUN":"<strong>Kidamom.com</strong> забавна интерактивна среда за вашето дете с много анимационни и образователни филми.",
"XENIUM MEDIA COMPANY TOGETHER WITH":"Xenium Media Company с удоволствие ви представя онлайн проекта Kidamom.com, съвместно с Кидамом ООД и Eleven. Kidamom.com предоставя интерактивна среда за деца до 12 години. В специализирания сайт подрастващите обогатяват своите уменията и знанията по забавен и достъпен начин.",
"KIDAMOM IS A PLACE":"Kidamom.com е място, в което децата се въвеждат в съвременния дигитален свят, развивайки своите личностни качества и индивидуални навици. Платформата предлага подбрано качествено съдържание и възможност за родителски контрол.",
"SHVARGALO IS AN ART COMPANY":"<strong>Shvargalo.com</strong> е арт компания, която се занимава с <strong> продуциране и произвеждане на филми, книги, спектакли, музикално-танцови и сценични произведения.</strong>",
"KAMEN DONEV AND ELENA BOZOVA":"В проекта взимат участие <strong> Камен Донев (режисьор и актьор) Елена Бозова (актьор)</strong> в компанията на именити български актьори.",
"THE AUDIENCE CONSISTS OF":"Аудиторията се състои <strong> мъже и жени на възраст между 20 и 40 години,</strong> жители на големи и средни градове, икономически активни потребители, интересуващи се от <strong> култура, изкуство и развлечения</strong>.",
"NONSTANDARD FORMATS":"НЕСТАРДАНТНИ ФОРМАТИ",
"CLIENT":"Клиент: ",
"NESCAFE: THE GAME ATTRACTED":"Играта се проведе при голям интерес и за периода на конкурса получихме над 70 авторски рецепти. Страницата на конкурса беше широко промотирана в сайтовете на Xenium Media и за периода на кампанията беше посетена от над 50 000 потребители и събра над 20 000 харесвания на рецептите във Facebook.",
"NESCAFE: THE CONTEST WEBPAGE":" ",
"TEFAL: THE GAME":"Играта се проведе при голям интерес от потребителите. Всеки един от тях представи своето любимо модно съчетание, като покани приятелите си да гласуват за неговата снимка. Играта се проведе в рамките на петнадесет дни, като беше промотирана чрез брандиране на Bulevard.bg.",
"TEFAL: THE GAME TOOK PLACE":" ",
"ESCADA: USERS CHOSE THE AROMA":"Потребителите избираха аромата, който най-много им допада и избора им се отразяваше и на стените им във Facebook.",
"ESCADA: THIS CREATED A VIRAL EFFECT":"Това създаде вирусен ефект и интерес сред приятелите им. Само за 20 дни кампанията събра близо 1000 участника и над 150 коментара. Постигнахме добра интеракция с марката и накарахме потребителите да се асоцират с различните аромати.",
"BRUNO: A GAME URGING MALE AUDIENCE":"Игра подтикваща мъжката аудитория да разглежда и избира сексапилни девойки и да пише под формата на коментар реплики за свалки.",
"BRUNO: MORE THAN 230 USERS":"За периода на играта са регистрирани над 230 потребители и над 230 постинга, които се състезават за наградата. Събрани бяха над 9000 гласа. Кампанията беше много добре отразена с PR в цялата мрежа от сайтове на Xenium Media.",
"PLESIO: THE VIDEO":"Видео клипът „От любов към технологиите“ е създаден като концепция и реализиран от екипа на Ксениум Медия. ",
"PLESIO: POPULARIZED":"Популяризирането му бе извършено чрез различните социални канали – Facebook, You Tube, Vimeo и др. и сайтовете на Ксениум Медия.",
"AMSTEL: BEER CEREMONY":"„Бирена церемония“ e нестандартен рекламен видео формат (дълъг е 02:51 минути) и носи всички характеристики на късометражен игрален филм.",
"AMSTEL: CONCEPT":"Концепцията и реализацията на видео клипа са на Ксениум Медия.",
"MAIN GRAPH":"<img class='graph' src='img/graph1.jpg' />",
"RANGE IMG":'<img src="img/range.png" height="522"/>',
"SVEJO DATA IMG":'<img src="img/media-data.png" />',
"BULEVARD DATA IMG":'<img src="img/bulevard-data.png" />',
"KULINARIA DATA IMG":'<img src="img/kulinaria-data.png" />',
"TERMO DATA IMG":'<img src="img/termo-data.png" />',
"HARDWARE DATA IMG":'<img src="img/hardware-data.png" />',
"MOBILITY DATA IMG":'<img src="img/mobility-data.png" />',
"MOBILE DATA IMG":'<img src="img/mobile-data.png" />',
"MEGANEWS DATA IMG":'<img src="img/meganews-data.png" />',
"JENITE DATA IMG":'<img src="img/jenite-data.png" />',
"MEGALIFE DATA IMG":'<img src="img/megalife-data.png" />',
"FOODS DATA IMG":'<img src="img/foods-data.png" />',
"THANK YOU":"Благодаря Ви!",
"CS: THE VIDEO":"Гурме храна, гурме вино, гурме ресторант... Многоуважаваният проф. Донев дегустира различни кулинарни специалитети, вина и съчетанията между тях, отговаряйки на въпроса 'Има ли гурме култура в България?'"
}
var dictEN = {
"PORTFOLIO": "Portfolio",
"RANGE":"Reach",
"AUDIENCE":"Audience",
"BRAND": "Brand",
"CONTACT":"Contact",
"LEADING MEDIA COMPANY":"Xenium is a leading Bulgarian media company",
"MEDIA KIT":"MEDIA KIT",
"AUDIENCE REACH":"AUDIENCE REACH",
"AUDIENCE PROFILE":"Audience Profile",
"FEMALE":"female",
"MALE":"male",
"ON A DAILY BASIS":"use internet <br />on a daily<br /> basis",
"MIDDLE AND HIGH INCOME":"middle and <br />high <br />income",
"MLNS":"M",
"SECONDARY AND HIGHER EDUCATION":"secondary and higher education",
|
random_line_split
|
||
__init__.py
|
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None # wait for ever
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because "
"%s is not installed there" % self.interpreter
)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(
self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(
self._make_connection_string(self.hostname, use_sudo=False)
)
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())'
)
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
|
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.logger,
python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send("%s(%s)" % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
# Error will come as a string of a traceback, remove everything
# up to the actual exception since we do get garbage otherwise
# that points to non-existent lines in the compiled code
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(args))
else:
source = self._module_source + dump_template % (name, '()')
# check python interpreter
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))
if not out:
if not err:
err = [
'Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name
]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' % ('\n'.join(out), '\n'.join(err)))
# at this point, there was no stdout, and the exit code was 0,
# we must return so that we don't fail trying to serialize back
# the JSON
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.get
|
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (
self.ssh_options, hostname, interpreter
)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
|
conditional_block
|
__init__.py
|
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None # wait for ever
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because "
"%s is not installed there" % self.interpreter
)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(
self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(
self._make_connection_string(self.hostname, use_sudo=False)
)
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())'
)
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (
self.ssh_options, hostname, interpreter
)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.logger,
python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send("%s(%s)" % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
# Error will come as a string of a traceback, remove everything
# up to the actual exception since we do get garbage otherwise
# that points to non-existent lines in the compiled code
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(args))
else:
source = self._module_source + dump_template % (name, '()')
# check python interpreter
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))
if not out:
if not err:
err = [
'Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name
]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' % ('\n'.join(out), '\n'.join(err)))
# at this point, there was no stdout, and the exit code was 0,
# we must return so that we don't fail trying to serialize back
# the JSON
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
|
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True,
detect_sudo=False, use_ssh=False, interpreter=None, ssh_options=None):
self.sudo = sudo
|
random_line_split
|
|
__init__.py
|
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(
self._make_connection_string(self.hostname, use_sudo=False)
)
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())'
)
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (
self.ssh_options, hostname, interpreter
)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.logger,
python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send("%s(%s)" % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
# Error will come as a string of a traceback, remove everything
# up to the actual exception since we do get garbage otherwise
# that points to non-existent lines in the compiled code
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(args))
else:
source = self._module_source + dump_template % (name, '()')
# check python interpreter
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))
if not out:
if not err:
err = [
'Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name
]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' % ('\n'.join(out), '\n'.join(err)))
# at this point, there was no stdout, and the exit code was 0,
# we must return so that we don't fail trying to serialize back
# the JSON
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
def get_python_executable(conn):
|
"""
Try to determine the remote Python version so that it can be used
when executing. Avoids the problem of different Python versions, or distros
that do not use ``python`` but do ``python3``
"""
# executables in order of preference:
executables = ['python3', 'python', 'python2.7']
for executable in executables:
conn.logger.debug('trying to determine remote python executable with %s' % executable)
out, err, code = check(conn, ['which', executable])
if code:
conn.logger.warning('skipping %s, was not found in path' % executable)
else:
try:
return out[0].strip()
except IndexError:
conn.logger.warning('could not parse stdout: %s' % out)
# if all fails, we just return whatever the main connection had
conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter)
|
identifier_body
|
|
__init__.py
|
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None # wait for ever
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because "
"%s is not installed there" % self.interpreter
)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(
self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(
self._make_connection_string(self.hostname, use_sudo=False)
)
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())'
)
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (
self.ssh_options, hostname, interpreter
)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.logger,
python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send("%s(%s)" % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
# Error will come as a string of a traceback, remove everything
# up to the actual exception since we do get garbage otherwise
# that points to non-existent lines in the compiled code
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(args))
else:
source = self._module_source + dump_template % (name, '()')
# check python interpreter
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))
if not out:
if not err:
err = [
'Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name
]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' % ('\n'.join(out), '\n'.join(err)))
# at this point, there was no stdout, and the exit code was 0,
# we must return so that we don't fail trying to serialize back
# the JSON
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def
|
():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.get
|
basic_remote_logger
|
identifier_name
|
user_controller.py
|
= ret.to_hash()
ret = User.getNewest()
nuPic = ret.get_avatar(200)
nUser = ret.to_hash()
ret = User.find_by_id(8)
mfPic = ret.get_avatar(200)
mFollowed = ret.to_hash()
w = {"Result":"OK", "nUser":nUser, "nuPic": nuPic,"nWorkout":nWorkout, "mostFollowed":mFollowed, "mfPic":mfPic}
return jsonify(w)
@app.before_request
def before_request():
g.user = current_user
@app.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm(csrf_enabled=False)
if g.user is not None and g.user.is_authenticated():
if(request_wants_json()):
return g.user.to_json();
else:
return redirect(url_for('home_index'))
if request.method == 'GET':
return render_template('users/login.html',
title = 'Sign In',
form = form)
elif request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("Login successful")
session['username'] = form.username.data
user = User.find_by_username(form.username.data)
if(request_wants_json()):
return user.to_json();
else:
return redirect(request.args.get("next") or url_for("home_index"))
else:
if(request_wants_json()):
return form.to_json();
else:
return render_template('users/login.html',
title = 'Sign In',
form = form)
@app.route('/logout')
def logout():
logout_user()
session.pop('username', None)
if(request_wants_json()):
return json.dumps({'logged_out': 'true'})
else:
return redirect(url_for('home_index'))
@app.route("/users/")
def users_index():
users = User.all()
output = ""
for user in users:
output += user.username + "\n"
return output
@app.route("/users/getUsers", methods=["POST"])
def get_users():
"""
"""
result = []
userids = []
usernames = []
if 'username' in request.form:
usernames = User.find_all_by_username(request.form['username'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
elif 'email' in request.form:
usernames = User.find_all_by_email(request.form['email'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
w = {"Result":"OK", "Records": result}
return jsonify(w)
@app.route("/users/new", methods=['GET', 'POST'])
def new_user():
if request.headers['Content-Type'] == 'application/json':
form = RegistrationForm.from_json(request.json, csrf_enabled=False)
else:
form = RegistrationForm()
if request.method == 'GET':
return render_template('users/new.html', form=form)
elif request.method == 'POST':
if form.validate():
user = User(form.username.data,
form.password.data)
User.save_to_db(user)
user = user.follow(user)
User.add_newsfeed(user,"Has joined Lumberjack.")
flash("Registration Successful!")
if request.headers['Content-Type'] == 'application/json':
return user.to_json()
else:
login_user(user);
session['username'] = form.username.data
return redirect(url_for('display_user_profile',
username=user.username))
else:
if request.headers['Content-Type'] == 'application/json':
return form.to_json()
else:
return render_template('users/new.html', form=form)
@login_manager.user_loader
@app.route("/user/find/", methods=['GET'])
def load_user(id):
return User.find_by_id(int(id))
@app.route("/user/<username>")
@app.route('/user/<username>/<int:page>', methods = ['GET'])
def display_user_profile(username, page=1):
user = User.find_by_username(username)
posts = None
if not request_wants_json():
if user == None:
return render_template("users/user.html", user = user, posts = posts) #user not found
if g.user.is_authenticated() and g.user.id == user.id:
if user.firstname == None or user.firstname == "" or user.lastname == None or user.lastname == "" or user.email == None or user.email == "":
flash("We can't display your profile until you have filled out the form")
return render_template("users/update_info.html")
posts = g.user.followed_posts().paginate(page, 10, False)
else:
if user == None:
return json.dumps({"Error": "User not found."})
return user.to_json()
return render_template("users/user.html", user = user, posts = posts)
@app.route("/user/update-profile/", methods=['GET', 'POST'])
@login_required
def update_info():
if request.method == 'POST':
if not request_wants_json():
user = User.find_by_id(g.user.id)
else:
user = User.find_by_id(request.form['uid'])
user.firstname = request.form['firstname']
user.lastname = request.form['lastname']
user.email = request.form['email']
email_user = User.find_by_email(user.email)
if not request_wants_json():
if email_user != None and email_user.id != g.user.id:
flash("Our record shows that you have an account under the given email address already.")
return render_template("users/update_info.html")
else:
if email_user != None and str(email_user.id) != request.form['uid']:
ret = {"result": "Email address already exist."}
return json.dumps(ret)
if len(request.form['gender']) > 0:
user.sex = request.form['gender'][0].upper()
user.location = request.form['location']
user.date_of_birth = request.form['date_of_birth']
user.avatar = request.form['gravatar']
user.about_me = request.form['about-me']
User.save_to_db(user)
if request_wants_json():
ret = {"result": "OK"}
return json.dumps(ret)
flash('Your changes have been made!')
return render_template("users/update_info.html")
@app.route('/follow_btn')
def follow_btn():
followee = request.args.get('followee', '', type=int)
state = request.args.get('state', '', type=str)
user = User.find_by_id(followee)
if state.startswith("Follow"):
follower = g.user.follow(user)
User.save_to_db(follower)
if g.user.is_following(user):
return jsonify(result="Unfollow") #g.user successfully followed user. So, we must change the state of the button
else:
return jsonify(resul="error") #we could return 'Follow' to just keep the state. But returning 'error' will say that something went wrong. Could be a database problem.
follower = g.user.unfollow(user)
User.save_to_db(follower)
if not g.user.is_following(user):
return jsonify(result="Follow") #g.user successfully unfollowed user
else:
return jsonify(result="error")
@app.route("/<username>/followers")
@app.route('/<username>/followers/<int:page>', methods = ['GET'])
def followers(username, page=1):
|
@app.route("/user_feeds/")
@app.route('/user_feeds/<int:page>', methods = ['GET'])
def user_feeds(page=1):
user = g.user
if user.is_anonymous():
return jsonify(result="")
posts = g.user.followed_posts().paginate(request.args.get('page', '', type=int), 10, False)
if not posts.items:
return jsonify(result="")
feeds = "{\"feed\":["
for post in posts.items:
feeds += "{\"username\":\"" + post.userName + "\"," + "\"body\":\"" + post.body + "\"," + "\"time\":\"" + str(post.timestamp) + "\"," + "\"avatar\":\"" + post.get_feed_avatar(post.userName, 40) + "\"},"
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return jsonify(result = feeds)
@app.route("/all_user_feeds/<uid>", methods=["GET"])
def all_user_feeds(uid):
user = User.find_by_id(uid)
posts = user.followed_posts().paginate(1, 100, False)
feeds = '{"feed":['
for post in posts.items:
feeds += '{"username":"' + post.userName + '",' + '"body":"' + post.body + '",' + '"time":"' + str(post.timestamp) + '",' + '"avatar":"' + post.get_feed_avatar(post.userName, 80) + '"},'
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return feeds
@app.route("/post-status/", methods = ['POST'])
def post_status():
body = request.form['body']
if 'uid' not in request.form:
g.user.add_newsfeed(body)
else:
user = User.find_by_id(request.form['uid'])
user.add_newsfeed(body)
return jsonify(result="success");
@app.route("/followers/get_followers", methods=["POST
|
user = g.user
posts = g.user.followed_posts().paginate(page, POSTS_PER_PAGE, False)
return render_template("users/followers.html",user = user)
|
identifier_body
|
user_controller.py
|
= ret.to_hash()
ret = User.getNewest()
nuPic = ret.get_avatar(200)
nUser = ret.to_hash()
ret = User.find_by_id(8)
mfPic = ret.get_avatar(200)
mFollowed = ret.to_hash()
w = {"Result":"OK", "nUser":nUser, "nuPic": nuPic,"nWorkout":nWorkout, "mostFollowed":mFollowed, "mfPic":mfPic}
return jsonify(w)
@app.before_request
def before_request():
g.user = current_user
@app.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm(csrf_enabled=False)
if g.user is not None and g.user.is_authenticated():
if(request_wants_json()):
return g.user.to_json();
else:
return redirect(url_for('home_index'))
if request.method == 'GET':
return render_template('users/login.html',
title = 'Sign In',
form = form)
elif request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("Login successful")
session['username'] = form.username.data
user = User.find_by_username(form.username.data)
if(request_wants_json()):
return user.to_json();
else:
return redirect(request.args.get("next") or url_for("home_index"))
else:
if(request_wants_json()):
return form.to_json();
else:
return render_template('users/login.html',
title = 'Sign In',
form = form)
@app.route('/logout')
def logout():
logout_user()
session.pop('username', None)
if(request_wants_json()):
return json.dumps({'logged_out': 'true'})
else:
return redirect(url_for('home_index'))
@app.route("/users/")
def users_index():
users = User.all()
output = ""
for user in users:
output += user.username + "\n"
return output
@app.route("/users/getUsers", methods=["POST"])
def get_users():
"""
"""
result = []
userids = []
usernames = []
if 'username' in request.form:
usernames = User.find_all_by_username(request.form['username'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
elif 'email' in request.form:
usernames = User.find_all_by_email(request.form['email'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
w = {"Result":"OK", "Records": result}
return jsonify(w)
@app.route("/users/new", methods=['GET', 'POST'])
def new_user():
if request.headers['Content-Type'] == 'application/json':
form = RegistrationForm.from_json(request.json, csrf_enabled=False)
else:
form = RegistrationForm()
if request.method == 'GET':
return render_template('users/new.html', form=form)
elif request.method == 'POST':
|
@login_manager.user_loader
@app.route("/user/find/", methods=['GET'])
def load_user(id):
return User.find_by_id(int(id))
@app.route("/user/<username>")
@app.route('/user/<username>/<int:page>', methods = ['GET'])
def display_user_profile(username, page=1):
user = User.find_by_username(username)
posts = None
if not request_wants_json():
if user == None:
return render_template("users/user.html", user = user, posts = posts) #user not found
if g.user.is_authenticated() and g.user.id == user.id:
if user.firstname == None or user.firstname == "" or user.lastname == None or user.lastname == "" or user.email == None or user.email == "":
flash("We can't display your profile until you have filled out the form")
return render_template("users/update_info.html")
posts = g.user.followed_posts().paginate(page, 10, False)
else:
if user == None:
return json.dumps({"Error": "User not found."})
return user.to_json()
return render_template("users/user.html", user = user, posts = posts)
@app.route("/user/update-profile/", methods=['GET', 'POST'])
@login_required
def update_info():
if request.method == 'POST':
if not request_wants_json():
user = User.find_by_id(g.user.id)
else:
user = User.find_by_id(request.form['uid'])
user.firstname = request.form['firstname']
user.lastname = request.form['lastname']
user.email = request.form['email']
email_user = User.find_by_email(user.email)
if not request_wants_json():
if email_user != None and email_user.id != g.user.id:
flash("Our record shows that you have an account under the given email address already.")
return render_template("users/update_info.html")
else:
if email_user != None and str(email_user.id) != request.form['uid']:
ret = {"result": "Email address already exist."}
return json.dumps(ret)
if len(request.form['gender']) > 0:
user.sex = request.form['gender'][0].upper()
user.location = request.form['location']
user.date_of_birth = request.form['date_of_birth']
user.avatar = request.form['gravatar']
user.about_me = request.form['about-me']
User.save_to_db(user)
if request_wants_json():
ret = {"result": "OK"}
return json.dumps(ret)
flash('Your changes have been made!')
return render_template("users/update_info.html")
@app.route('/follow_btn')
def follow_btn():
followee = request.args.get('followee', '', type=int)
state = request.args.get('state', '', type=str)
user = User.find_by_id(followee)
if state.startswith("Follow"):
follower = g.user.follow(user)
User.save_to_db(follower)
if g.user.is_following(user):
return jsonify(result="Unfollow") #g.user successfully followed user. So, we must change the state of the button
else:
return jsonify(resul="error") #we could return 'Follow' to just keep the state. But returning 'error' will say that something went wrong. Could be a database problem.
follower = g.user.unfollow(user)
User.save_to_db(follower)
if not g.user.is_following(user):
return jsonify(result="Follow") #g.user successfully unfollowed user
else:
return jsonify(result="error")
@app.route("/<username>/followers")
@app.route('/<username>/followers/<int:page>', methods = ['GET'])
def followers(username, page=1):
user = g.user
posts = g.user.followed_posts().paginate(page, POSTS_PER_PAGE, False)
return render_template("users/followers.html",user = user)
@app.route("/user_feeds/")
@app.route('/user_feeds/<int:page>', methods = ['GET'])
def user_feeds(page=1):
user = g.user
if user.is_anonymous():
return jsonify(result="")
posts = g.user.followed_posts().paginate(request.args.get('page', '', type=int), 10, False)
if not posts.items:
return jsonify(result="")
feeds = "{\"feed\":["
for post in posts.items:
feeds += "{\"username\":\"" + post.userName + "\"," + "\"body\":\"" + post.body + "\"," + "\"time\":\"" + str(post.timestamp) + "\"," + "\"avatar\":\"" + post.get_feed_avatar(post.userName, 40) + "\"},"
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return jsonify(result = feeds)
@app.route("/all_user_feeds/<uid>", methods=["GET"])
def all_user_feeds(uid):
user = User.find_by_id(uid)
posts = user.followed_posts().paginate(1, 100, False)
feeds = '{"feed":['
for post in posts.items:
feeds += '{"username":"' + post.userName + '",' + '"body":"' + post.body + '",' + '"time":"' + str(post.timestamp) + '",' + '"avatar":"' + post.get_feed_avatar(post.userName, 80) + '"},'
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return feeds
@app.route("/post-status/", methods = ['POST'])
def post_status():
body = request.form['body']
if 'uid' not in request.form:
g.user.add_newsfeed(body)
else:
user = User.find_by_id(request.form['uid'])
user.add_newsfeed(body)
return jsonify(result="success");
@app.route("/followers/get_followers", methods=["POST
|
if form.validate():
user = User(form.username.data,
form.password.data)
User.save_to_db(user)
user = user.follow(user)
User.add_newsfeed(user,"Has joined Lumberjack.")
flash("Registration Successful!")
if request.headers['Content-Type'] == 'application/json':
return user.to_json()
else:
login_user(user);
session['username'] = form.username.data
return redirect(url_for('display_user_profile',
username=user.username))
else:
if request.headers['Content-Type'] == 'application/json':
return form.to_json()
else:
return render_template('users/new.html', form=form)
|
conditional_block
|
user_controller.py
|
", methods=["POST"])
def get_users():
"""
"""
result = []
userids = []
usernames = []
if 'username' in request.form:
usernames = User.find_all_by_username(request.form['username'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
elif 'email' in request.form:
usernames = User.find_all_by_email(request.form['email'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
w = {"Result":"OK", "Records": result}
return jsonify(w)
@app.route("/users/new", methods=['GET', 'POST'])
def new_user():
if request.headers['Content-Type'] == 'application/json':
form = RegistrationForm.from_json(request.json, csrf_enabled=False)
else:
form = RegistrationForm()
if request.method == 'GET':
return render_template('users/new.html', form=form)
elif request.method == 'POST':
if form.validate():
user = User(form.username.data,
form.password.data)
User.save_to_db(user)
user = user.follow(user)
User.add_newsfeed(user,"Has joined Lumberjack.")
flash("Registration Successful!")
if request.headers['Content-Type'] == 'application/json':
return user.to_json()
else:
login_user(user);
session['username'] = form.username.data
return redirect(url_for('display_user_profile',
username=user.username))
else:
if request.headers['Content-Type'] == 'application/json':
return form.to_json()
else:
return render_template('users/new.html', form=form)
@login_manager.user_loader
@app.route("/user/find/", methods=['GET'])
def load_user(id):
return User.find_by_id(int(id))
@app.route("/user/<username>")
@app.route('/user/<username>/<int:page>', methods = ['GET'])
def display_user_profile(username, page=1):
user = User.find_by_username(username)
posts = None
if not request_wants_json():
if user == None:
return render_template("users/user.html", user = user, posts = posts) #user not found
if g.user.is_authenticated() and g.user.id == user.id:
if user.firstname == None or user.firstname == "" or user.lastname == None or user.lastname == "" or user.email == None or user.email == "":
flash("We can't display your profile until you have filled out the form")
return render_template("users/update_info.html")
posts = g.user.followed_posts().paginate(page, 10, False)
else:
if user == None:
return json.dumps({"Error": "User not found."})
return user.to_json()
return render_template("users/user.html", user = user, posts = posts)
@app.route("/user/update-profile/", methods=['GET', 'POST'])
@login_required
def update_info():
if request.method == 'POST':
if not request_wants_json():
user = User.find_by_id(g.user.id)
else:
user = User.find_by_id(request.form['uid'])
user.firstname = request.form['firstname']
user.lastname = request.form['lastname']
user.email = request.form['email']
email_user = User.find_by_email(user.email)
if not request_wants_json():
if email_user != None and email_user.id != g.user.id:
flash("Our record shows that you have an account under the given email address already.")
return render_template("users/update_info.html")
else:
if email_user != None and str(email_user.id) != request.form['uid']:
ret = {"result": "Email address already exist."}
return json.dumps(ret)
if len(request.form['gender']) > 0:
user.sex = request.form['gender'][0].upper()
user.location = request.form['location']
user.date_of_birth = request.form['date_of_birth']
user.avatar = request.form['gravatar']
user.about_me = request.form['about-me']
User.save_to_db(user)
if request_wants_json():
ret = {"result": "OK"}
return json.dumps(ret)
flash('Your changes have been made!')
return render_template("users/update_info.html")
@app.route('/follow_btn')
def follow_btn():
followee = request.args.get('followee', '', type=int)
state = request.args.get('state', '', type=str)
user = User.find_by_id(followee)
if state.startswith("Follow"):
follower = g.user.follow(user)
User.save_to_db(follower)
if g.user.is_following(user):
return jsonify(result="Unfollow") #g.user successfully followed user. So, we must change the state of the button
else:
return jsonify(resul="error") #we could return 'Follow' to just keep the state. But returning 'error' will say that something went wrong. Could be a database problem.
follower = g.user.unfollow(user)
User.save_to_db(follower)
if not g.user.is_following(user):
return jsonify(result="Follow") #g.user successfully unfollowed user
else:
return jsonify(result="error")
@app.route("/<username>/followers")
@app.route('/<username>/followers/<int:page>', methods = ['GET'])
def followers(username, page=1):
user = g.user
posts = g.user.followed_posts().paginate(page, POSTS_PER_PAGE, False)
return render_template("users/followers.html",user = user)
@app.route("/user_feeds/")
@app.route('/user_feeds/<int:page>', methods = ['GET'])
def user_feeds(page=1):
user = g.user
if user.is_anonymous():
return jsonify(result="")
posts = g.user.followed_posts().paginate(request.args.get('page', '', type=int), 10, False)
if not posts.items:
return jsonify(result="")
feeds = "{\"feed\":["
for post in posts.items:
feeds += "{\"username\":\"" + post.userName + "\"," + "\"body\":\"" + post.body + "\"," + "\"time\":\"" + str(post.timestamp) + "\"," + "\"avatar\":\"" + post.get_feed_avatar(post.userName, 40) + "\"},"
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return jsonify(result = feeds)
@app.route("/all_user_feeds/<uid>", methods=["GET"])
def all_user_feeds(uid):
user = User.find_by_id(uid)
posts = user.followed_posts().paginate(1, 100, False)
feeds = '{"feed":['
for post in posts.items:
feeds += '{"username":"' + post.userName + '",' + '"body":"' + post.body + '",' + '"time":"' + str(post.timestamp) + '",' + '"avatar":"' + post.get_feed_avatar(post.userName, 80) + '"},'
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return feeds
@app.route("/post-status/", methods = ['POST'])
def post_status():
body = request.form['body']
if 'uid' not in request.form:
g.user.add_newsfeed(body)
else:
user = User.find_by_id(request.form['uid'])
user.add_newsfeed(body)
return jsonify(result="success");
@app.route("/followers/get_followers", methods=["POST"])
def get_followers():
user = g.user
fw = user.user_is_following()
users = []
for foll in fw:
users.append(foll.to_hash())
res ={"Result":"OK", "Records": users}
return jsonify(res)
@app.route("/followers/get_following_count", methods=["POST"])
def get_following_count():
count = g.user.followed.count()
return jsonify(following = count)
@app.route("/followers/get_top_dog")
def get_top_dog():
user = g.user
output = ""
fw = user.top_user()
return jsonify(topUser = fw)
@app.route("/submit_workout_history", methods = ['POST'])
def submit_workout_history():
wName = request.form['wName']
date = request.form['date']
desc = request.form['desc']
user = User.find_by_username(request.form['user'])
workout = Workout.find_single_workout_by_name_(wName)
if(workout == None):
return jsonify(result="errorName", content=" The workout name you have entered may not exist. Please double check the spelling of the workout name. Thank you")
if(date == ""):
return jsonify(result="errorDate", content=" Please enter the date and time of the completed workout")
wh = WorkoutHistory(user.id, workout.id, datetime.strptime(date, "%m/%d/%Y %I:%M:%S %p"), desc, True)
WorkoutHistory.save_to_db(wh)
feed = "comleted "+wName+" on "+date+" - "+desc;
user.add_newsfeed(feed);
return jsonify(result="success");
@app.route("/user/<username>/workouthistory", methods=['GET'])
def display_user_workout_history (username):
user = User.find_by_username(username)
return render_template("users/workout_history.html", user=user)
############################
##WORKOUT SEARCH
############################
#workout search
@app.route('/workout_search')
def workout_search():
return render_template('workout_search.html')
@app.route("/search", methods=['GET'])
def
|
search_for_key
|
identifier_name
|
|
user_controller.py
|
= ret.to_hash()
ret = User.getNewest()
nuPic = ret.get_avatar(200)
nUser = ret.to_hash()
ret = User.find_by_id(8)
mfPic = ret.get_avatar(200)
mFollowed = ret.to_hash()
w = {"Result":"OK", "nUser":nUser, "nuPic": nuPic,"nWorkout":nWorkout, "mostFollowed":mFollowed, "mfPic":mfPic}
return jsonify(w)
@app.before_request
def before_request():
g.user = current_user
@app.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm(csrf_enabled=False)
if g.user is not None and g.user.is_authenticated():
if(request_wants_json()):
return g.user.to_json();
else:
return redirect(url_for('home_index'))
if request.method == 'GET':
return render_template('users/login.html',
title = 'Sign In',
form = form)
elif request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("Login successful")
session['username'] = form.username.data
user = User.find_by_username(form.username.data)
if(request_wants_json()):
return user.to_json();
else:
return redirect(request.args.get("next") or url_for("home_index"))
else:
if(request_wants_json()):
return form.to_json();
else:
return render_template('users/login.html',
title = 'Sign In',
form = form)
@app.route('/logout')
def logout():
logout_user()
session.pop('username', None)
if(request_wants_json()):
return json.dumps({'logged_out': 'true'})
else:
return redirect(url_for('home_index'))
@app.route("/users/")
def users_index():
users = User.all()
output = ""
for user in users:
output += user.username + "\n"
return output
@app.route("/users/getUsers", methods=["POST"])
def get_users():
"""
|
"""
result = []
userids = []
usernames = []
if 'username' in request.form:
usernames = User.find_all_by_username(request.form['username'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
elif 'email' in request.form:
usernames = User.find_all_by_email(request.form['email'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
w = {"Result":"OK", "Records": result}
return jsonify(w)
@app.route("/users/new", methods=['GET', 'POST'])
def new_user():
if request.headers['Content-Type'] == 'application/json':
form = RegistrationForm.from_json(request.json, csrf_enabled=False)
else:
form = RegistrationForm()
if request.method == 'GET':
return render_template('users/new.html', form=form)
elif request.method == 'POST':
if form.validate():
user = User(form.username.data,
form.password.data)
User.save_to_db(user)
user = user.follow(user)
User.add_newsfeed(user,"Has joined Lumberjack.")
flash("Registration Successful!")
if request.headers['Content-Type'] == 'application/json':
return user.to_json()
else:
login_user(user);
session['username'] = form.username.data
return redirect(url_for('display_user_profile',
username=user.username))
else:
if request.headers['Content-Type'] == 'application/json':
return form.to_json()
else:
return render_template('users/new.html', form=form)
@login_manager.user_loader
@app.route("/user/find/", methods=['GET'])
def load_user(id):
return User.find_by_id(int(id))
@app.route("/user/<username>")
@app.route('/user/<username>/<int:page>', methods = ['GET'])
def display_user_profile(username, page=1):
user = User.find_by_username(username)
posts = None
if not request_wants_json():
if user == None:
return render_template("users/user.html", user = user, posts = posts) #user not found
if g.user.is_authenticated() and g.user.id == user.id:
if user.firstname == None or user.firstname == "" or user.lastname == None or user.lastname == "" or user.email == None or user.email == "":
flash("We can't display your profile until you have filled out the form")
return render_template("users/update_info.html")
posts = g.user.followed_posts().paginate(page, 10, False)
else:
if user == None:
return json.dumps({"Error": "User not found."})
return user.to_json()
return render_template("users/user.html", user = user, posts = posts)
@app.route("/user/update-profile/", methods=['GET', 'POST'])
@login_required
def update_info():
if request.method == 'POST':
if not request_wants_json():
user = User.find_by_id(g.user.id)
else:
user = User.find_by_id(request.form['uid'])
user.firstname = request.form['firstname']
user.lastname = request.form['lastname']
user.email = request.form['email']
email_user = User.find_by_email(user.email)
if not request_wants_json():
if email_user != None and email_user.id != g.user.id:
flash("Our record shows that you have an account under the given email address already.")
return render_template("users/update_info.html")
else:
if email_user != None and str(email_user.id) != request.form['uid']:
ret = {"result": "Email address already exist."}
return json.dumps(ret)
if len(request.form['gender']) > 0:
user.sex = request.form['gender'][0].upper()
user.location = request.form['location']
user.date_of_birth = request.form['date_of_birth']
user.avatar = request.form['gravatar']
user.about_me = request.form['about-me']
User.save_to_db(user)
if request_wants_json():
ret = {"result": "OK"}
return json.dumps(ret)
flash('Your changes have been made!')
return render_template("users/update_info.html")
@app.route('/follow_btn')
def follow_btn():
followee = request.args.get('followee', '', type=int)
state = request.args.get('state', '', type=str)
user = User.find_by_id(followee)
if state.startswith("Follow"):
follower = g.user.follow(user)
User.save_to_db(follower)
if g.user.is_following(user):
return jsonify(result="Unfollow") #g.user successfully followed user. So, we must change the state of the button
else:
return jsonify(resul="error") #we could return 'Follow' to just keep the state. But returning 'error' will say that something went wrong. Could be a database problem.
follower = g.user.unfollow(user)
User.save_to_db(follower)
if not g.user.is_following(user):
return jsonify(result="Follow") #g.user successfully unfollowed user
else:
return jsonify(result="error")
@app.route("/<username>/followers")
@app.route('/<username>/followers/<int:page>', methods = ['GET'])
def followers(username, page=1):
user = g.user
posts = g.user.followed_posts().paginate(page, POSTS_PER_PAGE, False)
return render_template("users/followers.html",user = user)
@app.route("/user_feeds/")
@app.route('/user_feeds/<int:page>', methods = ['GET'])
def user_feeds(page=1):
user = g.user
if user.is_anonymous():
return jsonify(result="")
posts = g.user.followed_posts().paginate(request.args.get('page', '', type=int), 10, False)
if not posts.items:
return jsonify(result="")
feeds = "{\"feed\":["
for post in posts.items:
feeds += "{\"username\":\"" + post.userName + "\"," + "\"body\":\"" + post.body + "\"," + "\"time\":\"" + str(post.timestamp) + "\"," + "\"avatar\":\"" + post.get_feed_avatar(post.userName, 40) + "\"},"
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return jsonify(result = feeds)
@app.route("/all_user_feeds/<uid>", methods=["GET"])
def all_user_feeds(uid):
user = User.find_by_id(uid)
posts = user.followed_posts().paginate(1, 100, False)
feeds = '{"feed":['
for post in posts.items:
feeds += '{"username":"' + post.userName + '",' + '"body":"' + post.body + '",' + '"time":"' + str(post.timestamp) + '",' + '"avatar":"' + post.get_feed_avatar(post.userName, 80) + '"},'
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return feeds
@app.route("/post-status/", methods = ['POST'])
def post_status():
body = request.form['body']
if 'uid' not in request.form:
g.user.add_newsfeed(body)
else:
user = User.find_by_id(request.form['uid'])
user.add_newsfeed(body)
return jsonify(result="success");
@app.route("/followers/get_followers", methods=["POST
|
random_line_split
|
|
block.rs
|
::execute;
const UPPER_HALF_BLOCK: &str = "\u{2580}";
const LOWER_HALF_BLOCK: &str = "\u{2584}";
const CHECKERBOARD_BACKGROUND_LIGHT: (u8, u8, u8) = (153, 153, 153);
const CHECKERBOARD_BACKGROUND_DARK: (u8, u8, u8) = (102, 102, 102);
pub struct BlockPrinter {}
impl Printer for BlockPrinter {
fn print(&self, img: &DynamicImage, config: &Config) -> ViuResult<(u32, u32)> {
// there are two types of buffers in this function:
// - stdout: Buffer, which is from termcolor crate. Used to buffer all writing
// required to print a single image or frame. Flushed on every line
// - row_buffer: Vec<ColorSpec>, which stores back- and foreground colors for a
// row of terminal cells. When flushed, its output goes into out_buffer.
// They are both flushed on every terminal line (i.e 2 pixel rows)
let stdout = BufferWriter::stdout(ColorChoice::Always);
let mut out_buffer = stdout.buffer();
// adjust y offset
if config.absolute_offset {
if config.y >= 0 {
// If absolute_offset, move to (0,y).
execute!(out_buffer, MoveTo(0, config.y as u16))?;
} else {
//Negative values do not make sense.
return Err(ViuError::InvalidConfiguration(
"absolute_offset is true but y offset is negative".to_owned(),
));
}
} else if config.y < 0 {
// MoveUp if negative
execute!(out_buffer, MoveToPreviousLine(-config.y as u16))?;
} else {
// Move down y lines
for _ in 0..config.y {
// writeln! is used instead of MoveDown to force scrolldown
// observed when config.y > 0 and cursor is on the last terminal line
writeln!(out_buffer)?;
}
}
// resize the image so that it fits in the constraints, if any
let resized_img;
let img = if config.resize {
resized_img = super::resize(&img, config.width, config.height);
&resized_img
} else {
img
};
let (width, _) = img.dimensions();
// TODO: position information is contained in the pixel
let mut curr_col_px = 0;
let mut curr_row_px = 0;
let mut row_buffer: Vec<ColorSpec> = Vec::with_capacity(width as usize);
// row_buffer building mode. At first the top colors are calculated and then the bottom
// Once the bottom row is ready, row_buffer is flushed
let mut mode = Mode::Top;
// iterate pixels and fill row_buffer
for pixel in img.pixels() {
// if the alpha of the pixel is 0, print a predefined pixel based on the position in order
// to mimic the checherboard background. If the transparent option was given, move right instead
let color = if is_pixel_transparent(pixel) {
if config.transparent {
None
} else {
Some(get_transparency_color(
curr_row_px,
curr_col_px,
config.truecolor,
))
}
} else {
Some(get_color_from_pixel(pixel, config.truecolor))
};
if mode == Mode::Top {
// add a new ColorSpec to row_buffer
let mut c = ColorSpec::new();
c.set_bg(color);
row_buffer.push(c);
} else {
// upgrade an already existing ColorSpec
let colorspec_to_upg = &mut row_buffer[curr_col_px as usize];
colorspec_to_upg.set_fg(color);
}
curr_col_px += 1;
// if the buffer is full start adding the second row of pixels
if row_buffer.len() == width as usize {
if mode == Mode::Top {
mode = Mode::Bottom;
curr_col_px = 0;
curr_row_px += 1;
}
// only if the second row is completed, flush the buffer and start again
else if curr_col_px == width {
curr_col_px = 0;
curr_row_px += 1;
// move right if x offset is specified
if config.x > 0 {
execute!(out_buffer, MoveRight(config.x))?;
}
// flush the row_buffer into out_buffer
fill_out_buffer(&mut row_buffer, &mut out_buffer, false)?;
// write the line to stdout
print_buffer(&stdout, &mut out_buffer)?;
mode = Mode::Top;
} else {
// in the middle of the second row, more iterations are required
}
}
}
// buffer will be flushed if the image has an odd height
if !row_buffer.is_empty() {
fill_out_buffer(&mut row_buffer, &mut out_buffer, true)?;
}
// do a final write to stdout to print last row if length is odd, and reset cursor position
print_buffer(&stdout, &mut out_buffer)?;
// TODO: might be +1/2 ?
Ok((width, curr_row_px / 2))
}
}
// Send out_buffer to stdout. Empties it when it's done
fn print_buffer(stdout: &BufferWriter, out_buffer: &mut Buffer) -> ViuResult {
match stdout.print(out_buffer) {
Ok(_) => {
out_buffer.clear();
Ok(())
}
Err(e) => match e.kind() {
// Ignore broken pipe errors. They arise when piping output to `head`, for example,
// and panic is not desired.
std::io::ErrorKind::BrokenPipe => Ok(()),
_ => Err(ViuError::IO(e)),
},
}
}
// Translates the row_buffer, containing colors, into the out_buffer which will be flushed to the terminal
fn fill_out_buffer(
row_buffer: &mut Vec<ColorSpec>,
out_buffer: &mut Buffer,
is_last_row: bool,
) -> ViuResult {
let mut out_color;
let mut out_char;
let mut new_color;
for c in row_buffer.iter() {
// If a flush is needed it means that only one row with UPPER_HALF_BLOCK must be printed
// because it is the last row, hence it contains only 1 pixel
if is_last_row {
new_color = ColorSpec::new();
if let Some(bg) = c.bg() {
new_color.set_fg(Some(*bg));
out_char = UPPER_HALF_BLOCK;
} else {
execute!(out_buffer, MoveRight(1))?;
continue;
}
out_color = &new_color;
} else {
match (c.fg(), c.bg()) {
(None, None) => {
// completely transparent
execute!(out_buffer, MoveRight(1))?;
continue;
}
(Some(bottom), None) => {
// only top transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*bottom));
out_color = &new_color;
out_char = LOWER_HALF_BLOCK;
}
(None, Some(top)) => {
// only bottom transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*top));
out_color = &new_color;
out_char = UPPER_HALF_BLOCK;
}
(Some(_top), Some(_bottom)) => {
// both parts have a color
out_color = c;
out_char = LOWER_HALF_BLOCK;
}
}
}
out_buffer.set_color(out_color)?;
write!(out_buffer, "{}", out_char)?;
}
out_buffer.reset()?;
writeln!(out_buffer)?;
row_buffer.clear();
Ok(())
}
fn is_pixel_transparent(pixel: (u32, u32, Rgba<u8>)) -> bool {
let (_x, _y, data) = pixel;
data[3] == 0
}
fn get_transparency_color(row: u32, col: u32, truecolor: bool) -> Color {
//imitate the transparent chess board pattern
let rgb = if row % 2 == col % 2 {
CHECKERBOARD_BACKGROUND_DARK
} else {
CHECKERBOARD_BACKGROUND_LIGHT
};
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
fn get_color_from_pixel(pixel: (u32, u32, Rgba<u8>), truecolor: bool) -> Color {
let (_x, _y, data) = pixel;
let rgb = (data[0], data[1], data[2]);
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else
|
}
// enum used to keep track where the current line of pixels processed should be displayed - as
// background or foreground color
#[derive(PartialEq)]
enum Mode {
Top,
Bottom,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_block_printer_small() {
|
{
Color::Ansi256(ansi256_from_rgb(rgb))
}
|
conditional_block
|
block.rs
|
::execute;
const UPPER_HALF_BLOCK: &str = "\u{2580}";
const LOWER_HALF_BLOCK: &str = "\u{2584}";
const CHECKERBOARD_BACKGROUND_LIGHT: (u8, u8, u8) = (153, 153, 153);
const CHECKERBOARD_BACKGROUND_DARK: (u8, u8, u8) = (102, 102, 102);
pub struct BlockPrinter {}
impl Printer for BlockPrinter {
fn print(&self, img: &DynamicImage, config: &Config) -> ViuResult<(u32, u32)> {
// there are two types of buffers in this function:
// - stdout: Buffer, which is from termcolor crate. Used to buffer all writing
// required to print a single image or frame. Flushed on every line
// - row_buffer: Vec<ColorSpec>, which stores back- and foreground colors for a
// row of terminal cells. When flushed, its output goes into out_buffer.
// They are both flushed on every terminal line (i.e 2 pixel rows)
let stdout = BufferWriter::stdout(ColorChoice::Always);
let mut out_buffer = stdout.buffer();
// adjust y offset
if config.absolute_offset {
if config.y >= 0 {
// If absolute_offset, move to (0,y).
execute!(out_buffer, MoveTo(0, config.y as u16))?;
} else {
//Negative values do not make sense.
return Err(ViuError::InvalidConfiguration(
"absolute_offset is true but y offset is negative".to_owned(),
));
}
} else if config.y < 0 {
// MoveUp if negative
execute!(out_buffer, MoveToPreviousLine(-config.y as u16))?;
} else {
// Move down y lines
for _ in 0..config.y {
// writeln! is used instead of MoveDown to force scrolldown
// observed when config.y > 0 and cursor is on the last terminal line
writeln!(out_buffer)?;
}
}
// resize the image so that it fits in the constraints, if any
let resized_img;
let img = if config.resize {
resized_img = super::resize(&img, config.width, config.height);
&resized_img
} else {
img
};
let (width, _) = img.dimensions();
// TODO: position information is contained in the pixel
let mut curr_col_px = 0;
let mut curr_row_px = 0;
let mut row_buffer: Vec<ColorSpec> = Vec::with_capacity(width as usize);
// row_buffer building mode. At first the top colors are calculated and then the bottom
// Once the bottom row is ready, row_buffer is flushed
let mut mode = Mode::Top;
// iterate pixels and fill row_buffer
for pixel in img.pixels() {
// if the alpha of the pixel is 0, print a predefined pixel based on the position in order
// to mimic the checherboard background. If the transparent option was given, move right instead
let color = if is_pixel_transparent(pixel) {
if config.transparent {
None
} else {
Some(get_transparency_color(
curr_row_px,
curr_col_px,
config.truecolor,
))
}
} else {
Some(get_color_from_pixel(pixel, config.truecolor))
};
if mode == Mode::Top {
// add a new ColorSpec to row_buffer
let mut c = ColorSpec::new();
c.set_bg(color);
row_buffer.push(c);
} else {
// upgrade an already existing ColorSpec
let colorspec_to_upg = &mut row_buffer[curr_col_px as usize];
colorspec_to_upg.set_fg(color);
}
curr_col_px += 1;
// if the buffer is full start adding the second row of pixels
if row_buffer.len() == width as usize {
if mode == Mode::Top {
mode = Mode::Bottom;
curr_col_px = 0;
curr_row_px += 1;
}
// only if the second row is completed, flush the buffer and start again
else if curr_col_px == width {
curr_col_px = 0;
curr_row_px += 1;
// move right if x offset is specified
if config.x > 0 {
execute!(out_buffer, MoveRight(config.x))?;
}
// flush the row_buffer into out_buffer
fill_out_buffer(&mut row_buffer, &mut out_buffer, false)?;
// write the line to stdout
print_buffer(&stdout, &mut out_buffer)?;
mode = Mode::Top;
} else {
// in the middle of the second row, more iterations are required
}
}
}
// buffer will be flushed if the image has an odd height
if !row_buffer.is_empty() {
fill_out_buffer(&mut row_buffer, &mut out_buffer, true)?;
}
// do a final write to stdout to print last row if length is odd, and reset cursor position
print_buffer(&stdout, &mut out_buffer)?;
// TODO: might be +1/2 ?
Ok((width, curr_row_px / 2))
}
}
// Send out_buffer to stdout. Empties it when it's done
fn print_buffer(stdout: &BufferWriter, out_buffer: &mut Buffer) -> ViuResult {
match stdout.print(out_buffer) {
Ok(_) => {
out_buffer.clear();
Ok(())
}
Err(e) => match e.kind() {
// Ignore broken pipe errors. They arise when piping output to `head`, for example,
// and panic is not desired.
std::io::ErrorKind::BrokenPipe => Ok(()),
_ => Err(ViuError::IO(e)),
},
}
}
// Translates the row_buffer, containing colors, into the out_buffer which will be flushed to the terminal
fn fill_out_buffer(
row_buffer: &mut Vec<ColorSpec>,
out_buffer: &mut Buffer,
is_last_row: bool,
) -> ViuResult {
let mut out_color;
let mut out_char;
let mut new_color;
for c in row_buffer.iter() {
// If a flush is needed it means that only one row with UPPER_HALF_BLOCK must be printed
// because it is the last row, hence it contains only 1 pixel
if is_last_row {
|
if let Some(bg) = c.bg() {
new_color.set_fg(Some(*bg));
out_char = UPPER_HALF_BLOCK;
} else {
execute!(out_buffer, MoveRight(1))?;
continue;
}
out_color = &new_color;
} else {
match (c.fg(), c.bg()) {
(None, None) => {
// completely transparent
execute!(out_buffer, MoveRight(1))?;
continue;
}
(Some(bottom), None) => {
// only top transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*bottom));
out_color = &new_color;
out_char = LOWER_HALF_BLOCK;
}
(None, Some(top)) => {
// only bottom transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*top));
out_color = &new_color;
out_char = UPPER_HALF_BLOCK;
}
(Some(_top), Some(_bottom)) => {
// both parts have a color
out_color = c;
out_char = LOWER_HALF_BLOCK;
}
}
}
out_buffer.set_color(out_color)?;
write!(out_buffer, "{}", out_char)?;
}
out_buffer.reset()?;
writeln!(out_buffer)?;
row_buffer.clear();
Ok(())
}
fn is_pixel_transparent(pixel: (u32, u32, Rgba<u8>)) -> bool {
let (_x, _y, data) = pixel;
data[3] == 0
}
fn get_transparency_color(row: u32, col: u32, truecolor: bool) -> Color {
//imitate the transparent chess board pattern
let rgb = if row % 2 == col % 2 {
CHECKERBOARD_BACKGROUND_DARK
} else {
CHECKERBOARD_BACKGROUND_LIGHT
};
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
fn get_color_from_pixel(pixel: (u32, u32, Rgba<u8>), truecolor: bool) -> Color {
let (_x, _y, data) = pixel;
let rgb = (data[0], data[1], data[2]);
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
// enum used to keep track where the current line of pixels processed should be displayed - as
// background or foreground color
#[derive(PartialEq)]
enum Mode {
Top,
Bottom,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_block_printer_small() {
let img
|
new_color = ColorSpec::new();
|
random_line_split
|
block.rs
|
DynamicImage, config: &Config) -> ViuResult<(u32, u32)> {
// there are two types of buffers in this function:
// - stdout: Buffer, which is from termcolor crate. Used to buffer all writing
// required to print a single image or frame. Flushed on every line
// - row_buffer: Vec<ColorSpec>, which stores back- and foreground colors for a
// row of terminal cells. When flushed, its output goes into out_buffer.
// They are both flushed on every terminal line (i.e 2 pixel rows)
let stdout = BufferWriter::stdout(ColorChoice::Always);
let mut out_buffer = stdout.buffer();
// adjust y offset
if config.absolute_offset {
if config.y >= 0 {
// If absolute_offset, move to (0,y).
execute!(out_buffer, MoveTo(0, config.y as u16))?;
} else {
//Negative values do not make sense.
return Err(ViuError::InvalidConfiguration(
"absolute_offset is true but y offset is negative".to_owned(),
));
}
} else if config.y < 0 {
// MoveUp if negative
execute!(out_buffer, MoveToPreviousLine(-config.y as u16))?;
} else {
// Move down y lines
for _ in 0..config.y {
// writeln! is used instead of MoveDown to force scrolldown
// observed when config.y > 0 and cursor is on the last terminal line
writeln!(out_buffer)?;
}
}
// resize the image so that it fits in the constraints, if any
let resized_img;
let img = if config.resize {
resized_img = super::resize(&img, config.width, config.height);
&resized_img
} else {
img
};
let (width, _) = img.dimensions();
// TODO: position information is contained in the pixel
let mut curr_col_px = 0;
let mut curr_row_px = 0;
let mut row_buffer: Vec<ColorSpec> = Vec::with_capacity(width as usize);
// row_buffer building mode. At first the top colors are calculated and then the bottom
// Once the bottom row is ready, row_buffer is flushed
let mut mode = Mode::Top;
// iterate pixels and fill row_buffer
for pixel in img.pixels() {
// if the alpha of the pixel is 0, print a predefined pixel based on the position in order
// to mimic the checherboard background. If the transparent option was given, move right instead
let color = if is_pixel_transparent(pixel) {
if config.transparent {
None
} else {
Some(get_transparency_color(
curr_row_px,
curr_col_px,
config.truecolor,
))
}
} else {
Some(get_color_from_pixel(pixel, config.truecolor))
};
if mode == Mode::Top {
// add a new ColorSpec to row_buffer
let mut c = ColorSpec::new();
c.set_bg(color);
row_buffer.push(c);
} else {
// upgrade an already existing ColorSpec
let colorspec_to_upg = &mut row_buffer[curr_col_px as usize];
colorspec_to_upg.set_fg(color);
}
curr_col_px += 1;
// if the buffer is full start adding the second row of pixels
if row_buffer.len() == width as usize {
if mode == Mode::Top {
mode = Mode::Bottom;
curr_col_px = 0;
curr_row_px += 1;
}
// only if the second row is completed, flush the buffer and start again
else if curr_col_px == width {
curr_col_px = 0;
curr_row_px += 1;
// move right if x offset is specified
if config.x > 0 {
execute!(out_buffer, MoveRight(config.x))?;
}
// flush the row_buffer into out_buffer
fill_out_buffer(&mut row_buffer, &mut out_buffer, false)?;
// write the line to stdout
print_buffer(&stdout, &mut out_buffer)?;
mode = Mode::Top;
} else {
// in the middle of the second row, more iterations are required
}
}
}
// buffer will be flushed if the image has an odd height
if !row_buffer.is_empty() {
fill_out_buffer(&mut row_buffer, &mut out_buffer, true)?;
}
// do a final write to stdout to print last row if length is odd, and reset cursor position
print_buffer(&stdout, &mut out_buffer)?;
// TODO: might be +1/2 ?
Ok((width, curr_row_px / 2))
}
}
// Send out_buffer to stdout. Empties it when it's done
fn print_buffer(stdout: &BufferWriter, out_buffer: &mut Buffer) -> ViuResult {
match stdout.print(out_buffer) {
Ok(_) => {
out_buffer.clear();
Ok(())
}
Err(e) => match e.kind() {
// Ignore broken pipe errors. They arise when piping output to `head`, for example,
// and panic is not desired.
std::io::ErrorKind::BrokenPipe => Ok(()),
_ => Err(ViuError::IO(e)),
},
}
}
// Translates the row_buffer, containing colors, into the out_buffer which will be flushed to the terminal
fn fill_out_buffer(
row_buffer: &mut Vec<ColorSpec>,
out_buffer: &mut Buffer,
is_last_row: bool,
) -> ViuResult {
let mut out_color;
let mut out_char;
let mut new_color;
for c in row_buffer.iter() {
// If a flush is needed it means that only one row with UPPER_HALF_BLOCK must be printed
// because it is the last row, hence it contains only 1 pixel
if is_last_row {
new_color = ColorSpec::new();
if let Some(bg) = c.bg() {
new_color.set_fg(Some(*bg));
out_char = UPPER_HALF_BLOCK;
} else {
execute!(out_buffer, MoveRight(1))?;
continue;
}
out_color = &new_color;
} else {
match (c.fg(), c.bg()) {
(None, None) => {
// completely transparent
execute!(out_buffer, MoveRight(1))?;
continue;
}
(Some(bottom), None) => {
// only top transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*bottom));
out_color = &new_color;
out_char = LOWER_HALF_BLOCK;
}
(None, Some(top)) => {
// only bottom transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*top));
out_color = &new_color;
out_char = UPPER_HALF_BLOCK;
}
(Some(_top), Some(_bottom)) => {
// both parts have a color
out_color = c;
out_char = LOWER_HALF_BLOCK;
}
}
}
out_buffer.set_color(out_color)?;
write!(out_buffer, "{}", out_char)?;
}
out_buffer.reset()?;
writeln!(out_buffer)?;
row_buffer.clear();
Ok(())
}
fn is_pixel_transparent(pixel: (u32, u32, Rgba<u8>)) -> bool {
let (_x, _y, data) = pixel;
data[3] == 0
}
fn get_transparency_color(row: u32, col: u32, truecolor: bool) -> Color {
//imitate the transparent chess board pattern
let rgb = if row % 2 == col % 2 {
CHECKERBOARD_BACKGROUND_DARK
} else {
CHECKERBOARD_BACKGROUND_LIGHT
};
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
fn get_color_from_pixel(pixel: (u32, u32, Rgba<u8>), truecolor: bool) -> Color {
let (_x, _y, data) = pixel;
let rgb = (data[0], data[1], data[2]);
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
// enum used to keep track where the current line of pixels processed should be displayed - as
// background or foreground color
#[derive(PartialEq)]
enum Mode {
Top,
Bottom,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_block_printer_small() {
let img = DynamicImage::ImageRgba8(image::RgbaImage::new(20, 6));
let config = Config {
width: Some(40),
height: None,
absolute_offset: false,
transparent: true,
..Default::default()
};
let (w, h) = BlockPrinter {}.print(&img, &config).unwrap();
assert_eq!(w, 20);
assert_eq!(h, 3);
}
// TODO: failing on Windows. Why?
#[test]
fn
|
test_block_printer_large
|
identifier_name
|
|
immutable.rs
|
<U: ArrowNativeType, T: AsRef<[U]>>(items: &T) -> Self {
let slice = items.as_ref();
let capacity = slice.len() * std::mem::size_of::<U>();
let mut buffer = MutableBuffer::with_capacity(capacity);
buffer.extend_from_slice(slice);
buffer.into()
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` will free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `capacity` - Total allocated memory for the pointer `ptr`, in **bytes**
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes. If the `ptr` and `capacity` come from a `Buffer`, then this is guaranteed.
pub unsafe fn from_raw_parts(ptr: NonNull<u8>, len: usize, capacity: usize) -> Self {
assert!(len <= capacity);
Buffer::build_with_arguments(ptr, len, Deallocation::Native(capacity))
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` **does not** free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `data` - An [ffi::FFI_ArrowArray] with the data
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes and that the foreign deallocator frees the region.
pub unsafe fn from_unowned(
ptr: NonNull<u8>,
len: usize,
data: Arc<ffi::FFI_ArrowArray>,
) -> Self {
Buffer::build_with_arguments(ptr, len, Deallocation::Foreign(data))
}
/// Auxiliary method to create a new Buffer
unsafe fn build_with_arguments(
ptr: NonNull<u8>,
len: usize,
deallocation: Deallocation,
) -> Self {
let bytes = Bytes::new(ptr, len, deallocation);
Buffer {
data: Arc::new(bytes),
offset: 0,
}
}
/// Returns the number of bytes in the buffer
pub fn len(&self) -> usize {
self.data.len() - self.offset
}
/// Returns the capacity of this buffer.
/// For externally owned buffers, this returns zero
pub fn capacity(&self) -> usize {
self.data.capacity()
}
/// Returns whether the buffer is empty.
pub fn is_empty(&self) -> bool {
self.data.len() - self.offset == 0
}
/// Returns the byte slice stored in this buffer
pub fn as_slice(&self) -> &[u8] {
&self.data[self.offset..]
}
/// Returns a new [Buffer] that is a slice of this buffer starting at `offset`.
/// Doing so allows the same memory region to be shared between buffers.
/// # Panics
/// Panics iff `offset` is larger than `len`.
pub fn slice(&self, offset: usize) -> Self {
assert!(
offset <= self.len(),
"the offset of the new Buffer cannot exceed the existing length"
);
Self {
data: self.data.clone(),
offset: self.offset + offset,
}
}
/// Returns a pointer to the start of this buffer.
///
/// Note that this should be used cautiously, and the returned pointer should not be
/// stored anywhere, to avoid dangling pointers.
pub fn as_ptr(&self) -> *const u8 {
unsafe { self.data.ptr().as_ptr().add(self.offset) }
}
/// View buffer as typed slice.
///
/// # Safety
///
/// `ArrowNativeType` is public so that it can be used as a trait bound for other public
/// components, such as the `ToByteSlice` trait. However, this means that it can be
/// implemented by user defined types, which it is not intended for.
pub unsafe fn typed_data<T: ArrowNativeType + num::Num>(&self) -> &[T] {
// JUSTIFICATION
// Benefit
// Many of the buffers represent specific types, and consumers of `Buffer` often need to re-interpret them.
// Soundness
// * The pointer is non-null by construction
// * alignment asserted below.
let (prefix, offsets, suffix) = self.as_slice().align_to::<T>();
assert!(prefix.is_empty() && suffix.is_empty());
offsets
}
/// Returns a slice of this buffer starting at a certain bit offset.
/// If the offset is byte-aligned the returned buffer is a shallow clone,
/// otherwise a new buffer is allocated and filled with a copy of the bits in the range.
pub fn bit_slice(&self, offset: usize, len: usize) -> Self {
if offset % 8 == 0 {
return self.slice(offset / 8);
}
bitwise_unary_op_helper(self, offset, len, |a| a)
}
/// Returns a `BitChunks` instance which can be used to iterate over this buffers bits
/// in larger chunks and starting at arbitrary bit offsets.
/// Note that both `offset` and `length` are measured in bits.
pub fn bit_chunks(&self, offset: usize, len: usize) -> BitChunks {
BitChunks::new(self.as_slice(), offset, len)
}
/// Returns the number of 1-bits in this buffer.
pub fn count_set_bits(&self) -> usize {
let len_in_bits = self.len() * 8;
// self.offset is already taken into consideration by the bit_chunks implementation
self.count_set_bits_offset(0, len_in_bits)
}
/// Returns the number of 1-bits in this buffer, starting from `offset` with `length` bits
/// inspected. Note that both `offset` and `length` are measured in bits.
pub fn count_set_bits_offset(&self, offset: usize, len: usize) -> usize {
let chunks = self.bit_chunks(offset, len);
let mut count = chunks.iter().map(|c| c.count_ones() as usize).sum();
count += chunks.remainder_bits().count_ones() as usize;
count
}
}
/// Creating a `Buffer` instance by copying the memory from a `AsRef<[u8]>` into a newly
/// allocated memory region.
impl<T: AsRef<[u8]>> From<T> for Buffer {
fn from(p: T) -> Self {
// allocate aligned memory buffer
let slice = p.as_ref();
let len = slice.len();
let mut buffer = MutableBuffer::new(len);
buffer.extend_from_slice(slice);
buffer.into()
}
}
/// Creating a `Buffer` instance by storing the boolean values into the buffer
impl std::iter::FromIterator<bool> for Buffer {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = bool>,
{
MutableBuffer::from_iter(iter).into()
}
}
impl std::ops::Deref for Buffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len()) }
}
}
unsafe impl Sync for Buffer {}
unsafe impl Send for Buffer {}
impl From<MutableBuffer> for Buffer {
#[inline]
fn from(buffer: MutableBuffer) -> Self {
buffer.into_buffer()
}
}
impl Buffer {
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Example
/// ```
/// # use arrow::buffer::Buffer;
/// let v = vec![1u32];
/// let iter = v.iter().map(|x| x * 2);
/// let buffer = unsafe { Buffer::from_trusted_len_iter(iter) };
/// assert_eq!(buffer.len(), 4) // u32 has 4 bytes
/// ```
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
// This implementation is required for two reasons:
// 1. there is no trait `TrustedLen` in stable rust and therefore
// we can't specialize `extend` for `TrustedLen` like `Vec` does.
// 2. `from_trusted_len_iter` is faster.
#[inline]
pub unsafe fn from_trusted_len_iter<T: ArrowNativeType, I: Iterator<Item = T>>(
iterator: I,
) -> Self {
MutableBuffer::from_trusted_len_iter(iterator).into()
}
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length or errors
/// if any of the items of the iterator is an error.
/// Prefer this to `collect` whenever possible
|
from_slice_ref
|
identifier_name
|
|
immutable.rs
|
of bytes in the buffer
pub fn len(&self) -> usize {
self.data.len() - self.offset
}
/// Returns the capacity of this buffer.
/// For externally owned buffers, this returns zero
pub fn capacity(&self) -> usize {
self.data.capacity()
}
/// Returns whether the buffer is empty.
pub fn is_empty(&self) -> bool {
self.data.len() - self.offset == 0
}
/// Returns the byte slice stored in this buffer
pub fn as_slice(&self) -> &[u8] {
&self.data[self.offset..]
}
/// Returns a new [Buffer] that is a slice of this buffer starting at `offset`.
/// Doing so allows the same memory region to be shared between buffers.
/// # Panics
/// Panics iff `offset` is larger than `len`.
pub fn slice(&self, offset: usize) -> Self {
assert!(
offset <= self.len(),
"the offset of the new Buffer cannot exceed the existing length"
);
Self {
data: self.data.clone(),
offset: self.offset + offset,
}
}
/// Returns a pointer to the start of this buffer.
///
/// Note that this should be used cautiously, and the returned pointer should not be
/// stored anywhere, to avoid dangling pointers.
pub fn as_ptr(&self) -> *const u8 {
unsafe { self.data.ptr().as_ptr().add(self.offset) }
}
/// View buffer as typed slice.
///
/// # Safety
///
/// `ArrowNativeType` is public so that it can be used as a trait bound for other public
/// components, such as the `ToByteSlice` trait. However, this means that it can be
/// implemented by user defined types, which it is not intended for.
pub unsafe fn typed_data<T: ArrowNativeType + num::Num>(&self) -> &[T]
|
/// Returns a slice of this buffer starting at a certain bit offset.
/// If the offset is byte-aligned the returned buffer is a shallow clone,
/// otherwise a new buffer is allocated and filled with a copy of the bits in the range.
pub fn bit_slice(&self, offset: usize, len: usize) -> Self {
if offset % 8 == 0 {
return self.slice(offset / 8);
}
bitwise_unary_op_helper(self, offset, len, |a| a)
}
/// Returns a `BitChunks` instance which can be used to iterate over this buffers bits
/// in larger chunks and starting at arbitrary bit offsets.
/// Note that both `offset` and `length` are measured in bits.
pub fn bit_chunks(&self, offset: usize, len: usize) -> BitChunks {
BitChunks::new(self.as_slice(), offset, len)
}
/// Returns the number of 1-bits in this buffer.
pub fn count_set_bits(&self) -> usize {
let len_in_bits = self.len() * 8;
// self.offset is already taken into consideration by the bit_chunks implementation
self.count_set_bits_offset(0, len_in_bits)
}
/// Returns the number of 1-bits in this buffer, starting from `offset` with `length` bits
/// inspected. Note that both `offset` and `length` are measured in bits.
pub fn count_set_bits_offset(&self, offset: usize, len: usize) -> usize {
let chunks = self.bit_chunks(offset, len);
let mut count = chunks.iter().map(|c| c.count_ones() as usize).sum();
count += chunks.remainder_bits().count_ones() as usize;
count
}
}
/// Creating a `Buffer` instance by copying the memory from a `AsRef<[u8]>` into a newly
/// allocated memory region.
impl<T: AsRef<[u8]>> From<T> for Buffer {
fn from(p: T) -> Self {
// allocate aligned memory buffer
let slice = p.as_ref();
let len = slice.len();
let mut buffer = MutableBuffer::new(len);
buffer.extend_from_slice(slice);
buffer.into()
}
}
/// Creating a `Buffer` instance by storing the boolean values into the buffer
impl std::iter::FromIterator<bool> for Buffer {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = bool>,
{
MutableBuffer::from_iter(iter).into()
}
}
impl std::ops::Deref for Buffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len()) }
}
}
unsafe impl Sync for Buffer {}
unsafe impl Send for Buffer {}
impl From<MutableBuffer> for Buffer {
#[inline]
fn from(buffer: MutableBuffer) -> Self {
buffer.into_buffer()
}
}
impl Buffer {
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Example
/// ```
/// # use arrow::buffer::Buffer;
/// let v = vec![1u32];
/// let iter = v.iter().map(|x| x * 2);
/// let buffer = unsafe { Buffer::from_trusted_len_iter(iter) };
/// assert_eq!(buffer.len(), 4) // u32 has 4 bytes
/// ```
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
// This implementation is required for two reasons:
// 1. there is no trait `TrustedLen` in stable rust and therefore
// we can't specialize `extend` for `TrustedLen` like `Vec` does.
// 2. `from_trusted_len_iter` is faster.
#[inline]
pub unsafe fn from_trusted_len_iter<T: ArrowNativeType, I: Iterator<Item = T>>(
iterator: I,
) -> Self {
MutableBuffer::from_trusted_len_iter(iterator).into()
}
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length or errors
/// if any of the items of the iterator is an error.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
#[inline]
pub unsafe fn try_from_trusted_len_iter<
E,
T: ArrowNativeType,
I: Iterator<Item = std::result::Result<T, E>>,
>(
iterator: I,
) -> std::result::Result<Self, E> {
Ok(MutableBuffer::try_from_trusted_len_iter(iterator)?.into())
}
}
impl<T: ArrowNativeType> FromIterator<T> for Buffer {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut iterator = iter.into_iter();
let size = std::mem::size_of::<T>();
// first iteration, which will likely reserve sufficient space for the buffer.
let mut buffer = match iterator.next() {
None => MutableBuffer::new(0),
Some(element) => {
let (lower, _) = iterator.size_hint();
let mut buffer = MutableBuffer::new(lower.saturating_add(1) * size);
unsafe {
std::ptr::write(buffer.as_mut_ptr() as *mut T, element);
buffer.set_len(size);
}
buffer
}
};
buffer.extend_from_iter(iterator);
buffer.into()
}
}
#[cfg(test)]
mod tests {
use std::thread;
use super::*;
#[test]
fn test_buffer_data_equality() {
let buf1 = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(buf1, buf2);
// slice with same offset should still preserve equality
let buf3 = buf1.slice(2);
assert_ne!(buf1, buf3);
let buf4 = buf2.slice(2);
assert_eq!(buf3, buf4);
// Different capacities should still preserve equality
let mut buf2 = MutableBuffer::new(65);
buf2.extend_from_slice(&[0u8, 1, 2, 3, 4]);
let buf2 = buf2.into();
assert_eq!(buf1, buf2);
// unequal because of different elements
let buf2 = Buffer::from(&[0, 0, 2, 3, 4]);
assert_ne
|
{
// JUSTIFICATION
// Benefit
// Many of the buffers represent specific types, and consumers of `Buffer` often need to re-interpret them.
// Soundness
// * The pointer is non-null by construction
// * alignment asserted below.
let (prefix, offsets, suffix) = self.as_slice().align_to::<T>();
assert!(prefix.is_empty() && suffix.is_empty());
offsets
}
|
identifier_body
|
immutable.rs
|
}
/// Returns the number of 1-bits in this buffer.
pub fn count_set_bits(&self) -> usize {
let len_in_bits = self.len() * 8;
// self.offset is already taken into consideration by the bit_chunks implementation
self.count_set_bits_offset(0, len_in_bits)
}
/// Returns the number of 1-bits in this buffer, starting from `offset` with `length` bits
/// inspected. Note that both `offset` and `length` are measured in bits.
pub fn count_set_bits_offset(&self, offset: usize, len: usize) -> usize {
let chunks = self.bit_chunks(offset, len);
let mut count = chunks.iter().map(|c| c.count_ones() as usize).sum();
count += chunks.remainder_bits().count_ones() as usize;
count
}
}
/// Creating a `Buffer` instance by copying the memory from a `AsRef<[u8]>` into a newly
/// allocated memory region.
impl<T: AsRef<[u8]>> From<T> for Buffer {
fn from(p: T) -> Self {
// allocate aligned memory buffer
let slice = p.as_ref();
let len = slice.len();
let mut buffer = MutableBuffer::new(len);
buffer.extend_from_slice(slice);
buffer.into()
}
}
/// Creating a `Buffer` instance by storing the boolean values into the buffer
impl std::iter::FromIterator<bool> for Buffer {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = bool>,
{
MutableBuffer::from_iter(iter).into()
}
}
impl std::ops::Deref for Buffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len()) }
}
}
unsafe impl Sync for Buffer {}
unsafe impl Send for Buffer {}
impl From<MutableBuffer> for Buffer {
#[inline]
fn from(buffer: MutableBuffer) -> Self {
buffer.into_buffer()
}
}
impl Buffer {
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Example
/// ```
/// # use arrow::buffer::Buffer;
/// let v = vec![1u32];
/// let iter = v.iter().map(|x| x * 2);
/// let buffer = unsafe { Buffer::from_trusted_len_iter(iter) };
/// assert_eq!(buffer.len(), 4) // u32 has 4 bytes
/// ```
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
// This implementation is required for two reasons:
// 1. there is no trait `TrustedLen` in stable rust and therefore
// we can't specialize `extend` for `TrustedLen` like `Vec` does.
// 2. `from_trusted_len_iter` is faster.
#[inline]
pub unsafe fn from_trusted_len_iter<T: ArrowNativeType, I: Iterator<Item = T>>(
iterator: I,
) -> Self {
MutableBuffer::from_trusted_len_iter(iterator).into()
}
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length or errors
/// if any of the items of the iterator is an error.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
#[inline]
pub unsafe fn try_from_trusted_len_iter<
E,
T: ArrowNativeType,
I: Iterator<Item = std::result::Result<T, E>>,
>(
iterator: I,
) -> std::result::Result<Self, E> {
Ok(MutableBuffer::try_from_trusted_len_iter(iterator)?.into())
}
}
impl<T: ArrowNativeType> FromIterator<T> for Buffer {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut iterator = iter.into_iter();
let size = std::mem::size_of::<T>();
// first iteration, which will likely reserve sufficient space for the buffer.
let mut buffer = match iterator.next() {
None => MutableBuffer::new(0),
Some(element) => {
let (lower, _) = iterator.size_hint();
let mut buffer = MutableBuffer::new(lower.saturating_add(1) * size);
unsafe {
std::ptr::write(buffer.as_mut_ptr() as *mut T, element);
buffer.set_len(size);
}
buffer
}
};
buffer.extend_from_iter(iterator);
buffer.into()
}
}
#[cfg(test)]
mod tests {
use std::thread;
use super::*;
#[test]
fn test_buffer_data_equality() {
let buf1 = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(buf1, buf2);
// slice with same offset should still preserve equality
let buf3 = buf1.slice(2);
assert_ne!(buf1, buf3);
let buf4 = buf2.slice(2);
assert_eq!(buf3, buf4);
// Different capacities should still preserve equality
let mut buf2 = MutableBuffer::new(65);
buf2.extend_from_slice(&[0u8, 1, 2, 3, 4]);
let buf2 = buf2.into();
assert_eq!(buf1, buf2);
// unequal because of different elements
let buf2 = Buffer::from(&[0, 0, 2, 3, 4]);
assert_ne!(buf1, buf2);
// unequal because of different length
let buf2 = Buffer::from(&[0, 1, 2, 3]);
assert_ne!(buf1, buf2);
}
#[test]
fn test_from_raw_parts() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_from_vec() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_copy() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = buf;
assert_eq!(5, buf2.len());
assert_eq!(64, buf2.capacity());
assert!(!buf2.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf2.as_slice());
}
#[test]
fn test_slice() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
let buf2 = buf.slice(2);
assert_eq!([6, 8, 10], buf2.as_slice());
assert_eq!(3, buf2.len());
assert_eq!(unsafe { buf.as_ptr().offset(2) }, buf2.as_ptr());
let buf3 = buf2.slice(1);
assert_eq!([8, 10], buf3.as_slice());
assert_eq!(2, buf3.len());
assert_eq!(unsafe { buf.as_ptr().offset(3) }, buf3.as_ptr());
let buf4 = buf.slice(5);
let empty_slice: [u8; 0] = [];
assert_eq!(empty_slice, buf4.as_slice());
assert_eq!(0, buf4.len());
assert!(buf4.is_empty());
assert_eq!(buf2.slice(2).as_slice(), &[10]);
}
#[test]
#[should_panic(
expected = "the offset of the new Buffer cannot exceed the existing length"
)]
fn test_slice_offset_out_of_bound() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
buf.slice(6);
}
#[test]
fn test_access_concurrently() {
let buffer = Buffer::from(vec![1, 2, 3, 4, 5]);
let buffer2 = buffer.clone();
assert_eq!([1, 2, 3, 4, 5], buffer.as_slice());
let buffer_copy = thread::spawn(move || {
// access buffer in another thread.
buffer
})
.join();
assert!(buffer_copy.is_ok());
|
assert_eq!(buffer2, buffer_copy.ok().unwrap());
}
|
random_line_split
|
|
ZH.js
|
summary_footer_tip: '飞呀辅助订购为你提供快捷预订通道,无需跳转到第三方平台。但出票和退改签服务仍由第三方机票服务平台提供。',
summary_no_result: '暂无平台报价',
summary_research: '重新搜索',
summary_timeout_tips1: '航班价格可能发生变化,将为你刷新',
summary_timeout_tips2: '以获取最新价格',
summary_quick_login: '动态码登录',
summary_quick_login_tip: '下单前先花10秒登录账号哦',
summary_other_login: '其他登录方式',
summary_setting_password: '设置密码',
summary_setting_password_tip: '首次登录设置个密码吧',
summary_register_tip: '点击完成表示阅读并同意',
summary_third_booking: '跳转第三方订购',
rule_tip_one: '行李/ 退改签规则>',
rule_tip_two: '航司直营 公正退改',
ota_redirect_tip: '以下供应商需跳转至官网预订',
member_quick_login: '验证码登录',
member_login: '密码登录',
phone: '手机号码',
phone_email: '手机号码/电子邮箱',
password: '密码',
forget_password: '忘记密码',
validation_code: '验证码',
validation_code_phone: '短信验证码',
get_validation_code: '获取验证码',
resend_validation_code: '重新发送',
other_ways: '第三方登录',
login: '登录',
go_login: '去登录',
register: '注册',
sign_up_tips: '没有账号?注册一个',
sign_in_tips: '已有账号?直接登录',
sign_up_have_account: '该账号已注册,去登录 >',
sign_in_account_lock: '帐号异常,请联系客服',
sign_in_account_wrong: '该帐号无效,请重新输入',
sign_in_not_phone: '该号码无效,请重新输入',
invalid_phone_err: '该号码已被绑定',
sign_in_not_account: '该账号还未注册,去注册 >',
sign_in_not_password: '您还未设置密码',
sign_in_code_wrong: '验证码错误',
sign_in_password_wrong: '密码错误',
account_manage_auth_reach_limit: '当天发送次数已达上限,请明天再试',
member_timeout: '请求超时,请稍候再试!',
server_error: '服务错误',
repeat_error: '请求过于频繁',
set_pwd_placeholder: '请设置8-16位密码',
pwd_warning: '密码为8-16个字符(不包含空格)',
pwd_same_warning: '新密码不能与旧密码相同哦!',
pwd_error: '密码不正确',
set_new_pwd_error: '您已设置密码',
no_pwd_tip: '该账号还没设置密码',
pwd_invalid_warning: '密码不符合规范',
right_pre_tips: '点击注册即同意',
right_flya: '《注册协议》',
term_service: '服务条款',
reset_pwd_not_account: '账号错误或未注册',
reset_pwd_success: '新密码设置成功',
step_next: '下一步',
set_new_pwd: '设置新密码',
set_new_pwd_placeholder: '请设置8-16位新密码',
old_pwd_check: '密码验证',
old_pwd_placeholder: '请输入旧密码',
submit: '提交',
member_relogin_tip: '您的账号信息已过期,请重新登录',
member_signin_tip: '登录后可查看订单和优惠券信息',
more_title: '更多',
version: '当前版本',
about_igola: '关于我们',
feedback: '意见反馈',
contact_customer_service: '联系客服',
customer_service_choose_tip: '请选择客服联系方式',
edit_nickName: '编辑昵称',
nickName_err: '长度为1-16个字符,且不支持特殊符号或空格!',
coupons: '优惠劵',
flya_zh: '飞呀',
flya_en: 'Flya',
flya_design: 'Designed by iGola',
copyright: 'Copyright © 2014-2017',
igola_company_name: '广州市骑鹅游信息技术咨询服务有限公司',
tab_all_order: '全部订单',
tab_pay_order: '待支付',
tab_unused_order: '待出行',
tab_refund_order: '退改签',
order_not_paid: '待支付',
order_paid: '支付成功',
order_expired: '订单过期',
order_booking: '出票中',
order_pending: '出票中',
order_success: '出票成功',
order_failed: '出票失败',
order_cancelled: '已取消',
order_refunding: '退款中',
order_refunded: '已退款',
btn_search_flights: '搜索机票',
btn_before_order: '一年前订单',
order_details_title: '订单详情',
order_details_to_pay1: '订单已生成,请在',
order_details_to_pay2: '内支付',
order_details_not_paid: '未支付',
order_details_booking: '支付成功,等待出票',
order_details_success: '出票成功',
order_details_failed: '出票失败',
order_details_expired: '订单过期',
order_details_pay_desc1: '由供应商/航司直接出票;',
order_details_pay_desc2: '支付后最快2小时内出票,最终预订成功需以出票成功为准;',
order_details_pay_desc3: '若没成功出票,预付款项将全额退还。',
order_details_booking_desc1: '您的订单最快在2小时内出票,出票成功后,您将会收到短信和邮件通知,请留意。',
order_details_booking_desc2: '若您预订的供应商出票失败,igola会自动匹配其他优质供应商为您出票,可能存在一定差价。',
order_details_booking_desc3: '若仍无法出票成功,客服人员会联系您,及时处理退款事宜。',
order_details_failed_desc: '客服将在24小时内与您联系并处理退款,如没有收到客服电话,请主动联系客服。',
order_details_orderId: '订单号:',
order_details_igola_Id: 'iGola订单号:',
order_details_orderTime: '下单时间:',
order_details_seat: '订座记录:',
order_details_price: '票价',
order_details_tax: '税费',
order_details_baggage: '行李费',
order_details_insurances: '保险费',
order_details_discount: '优惠',
order_details_total_price: '总价',
order_details_total: '总额:',
order_deatils_pay_btn: '去支付',
order_deatils_again_btn: '再订一张',
order_deatils_change_btn: '辅助改签',
order_deatils_refund_btn: '辅助退票',
order_detail_change_ticket: '请联系客服辅助改签',
order_detail_refund_ticket: '请联系客服辅助退票',
order_detail_online_service: '在线客服',
order_detail_phone_service: '热线电话',
order_detail_change_ticket_record: '改签记录',
order_passengers_title: '乘机人',
order_passengerInfo_title: '乘机人信息',
order_passengers_ticketNo: '票号:',
order_contact_title: '联系人',
order_suppliers_title: '供应商',
passengerDetails_cardType: '证件类型',
passengerDetails_cardNum: '证件号码',
passengerDetails_cardExpired: '证件有效期',
passengerDetails_issueAt: '签发地',
passenger_lastName: '姓',
passenger_firstName: '名',
passenger_birthday: '出生日期',
passenger_nationality: '国籍',
passenger_gender: '性别',
passenger_firstName_placeholder_ZH: '请填写中文名字',
passenger_lastName_placeholder_ZH: '请填写中文姓氏',
passenger_firstName_placeholder_EN: '如张岩应填 YAN',
passenger_lastName_placeholder_EN: '如张岩应填 ZHANG',
flights_detail_tip: '*航班起降均为当地时间',
profile_title: '个人资料',
avatar_title: '头像',
nickname_title: '昵称',
nickname_tip: '起个名字吧',
phone_title: '手机号码',
phone_bind: '绑定手机',
phone_change: '更换手机',
email_title: '邮箱',
pwd_title: '密码',
pwd_change: '修改密码',
pwd_change_success: '密码修改成功',
pwd_set: '设置密码',
pwd_set_success: '密码设置成功',
other_account_title: '第三方账号',
other_account_tip: '绑定后,下次可用第三方账号快速登录',
wechat: '微信',
qq: 'QQ',
bind_tip: '去绑定',
logout_title: '退出登录',
logout_confirm: '确定退出登录?',
take_picture: '照相',
pick_picture: '从相册选取',
login_success: '登录成功',
login_cancel: '登录已取消',
login_failed: '登录失败',
not_available_wechat: '无法正常使用微信(微信未安装)',
not_available_qq: '无法正常使用QQ(QQ未安装)',
update_success: '修改成功',
update_failed: '修改失败
|
summary_price: '各大平台实时价格',
|
random_line_split
|
|
surface.rs
|
Mark nonsendable.
#[allow(missing_copy_implementations)]
pub struct NativePaintingGraphicsContext {
pub display: *mut Display,
visual_info: *mut XVisualInfo,
}
impl NativePaintingGraphicsContext {
pub fn from_metadata(metadata: &NativeGraphicsMetadata) -> NativePaintingGraphicsContext {
// FIXME(pcwalton): It would be more robust to actually have the compositor pass the
// visual.
let (compositor_visual_info, _) =
NativeCompositingGraphicsContext::compositor_visual_info(metadata.display);
NativePaintingGraphicsContext {
display: metadata.display,
visual_info: compositor_visual_info,
}
}
}
/// The display, visual info, and framebuffer configuration. This is needed in order to bind to a
/// texture on the compositor side. This holds only a *weak* reference to the display and does not
/// close it.
///
/// FIXME(pcwalton): Unchecked weak references are bad and can violate memory safety. This is hard
/// to fix because the Display is given to us by the native windowing system, but we should fix it
/// someday.
///
/// FIXME(pcwalton): Mark nonsendable.
#[derive(Copy, Clone)]
pub struct NativeCompositingGraphicsContext {
display: *mut Display,
framebuffer_configuration: Option<glx::types::GLXFBConfig>,
}
impl NativeCompositingGraphicsContext {
/// Chooses the compositor visual info using the same algorithm that the compositor uses.
///
/// FIXME(pcwalton): It would be more robust to actually have the compositor pass the visual.
fn compositor_visual_info(display: *mut Display) -> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
// If display is null, we'll assume we are going to be rendering
// in headless mode without X running.
if display == ptr::null_mut() {
return (ptr::null_mut(), None);
}
unsafe {
let fbconfig_attributes = [
glx::DOUBLEBUFFER as i32, 0,
glx::DRAWABLE_TYPE as i32, glx::PIXMAP_BIT as i32 | glx::WINDOW_BIT as i32,
glx::BIND_TO_TEXTURE_RGBA_EXT as i32, 1,
glx::RENDER_TYPE as i32, glx::RGBA_BIT as i32,
glx::ALPHA_SIZE as i32, 8,
0
];
let screen = XDefaultScreen(display);
let mut number_of_configs = 0;
let configs = glx::ChooseFBConfig(mem::transmute(display),
screen,
fbconfig_attributes.as_ptr(),
&mut number_of_configs);
NativeCompositingGraphicsContext::get_compatible_configuration(display,
configs,
number_of_configs)
}
}
fn get_compatible_configuration(display: *mut Display,
configs: *mut glx::types::GLXFBConfig,
number_of_configs: i32)
-> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
unsafe {
if number_of_configs == 0 {
panic!("glx::ChooseFBConfig returned no configurations.");
}
if !NativeCompositingGraphicsContext::need_to_find_32_bit_depth_visual(display) {
let config = *configs.offset(0);
let visual = glx::GetVisualFromFBConfig(mem::transmute(display), config);
return (mem::transmute(visual), Some(config));
}
// NVidia (and AMD/ATI) drivers have RGBA configurations that use 24-bit
// XVisual, not capable of representing an alpha-channel in Pixmap form,
// so we look for the configuration with a full set of 32 bits.
for i in 0..number_of_configs as isize {
let config = *configs.offset(i);
let visual: *mut XVisualInfo =
mem::transmute(glx::GetVisualFromFBConfig(mem::transmute(display), config));
if (*visual).depth == 32 {
return (mem::transmute(visual), Some(config));
}
XFree(mem::transmute(visual));
}
panic!("Could not find 32-bit visual.");
}
}
fn need_to_find_32_bit_depth_visual(display: *mut Display) -> bool {
unsafe {
let glXGetClientString: extern "C" fn(*mut Display, c_int) -> *const c_char =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXGetClientString\x00".as_bytes()[0])));
assert!(glXGetClientString as *mut c_void != ptr::null_mut());
let glx_vendor = glx::GetClientString(mem::transmute(display), glx::VENDOR as i32);
if glx_vendor == ptr::null() {
panic!("Could not determine GLX vendor.");
}
let glx_vendor =
str::from_utf8(CStr::from_ptr(glx_vendor).to_bytes())
.ok()
.expect("GLX client vendor string not in UTF-8 format.");
let glx_vendor = String::from_str(glx_vendor).into_ascii_lowercase();
glx_vendor.contains("nvidia") || glx_vendor.contains("ati")
}
}
/// Creates a native graphics context from the given X display connection. This uses GLX. Only
/// the compositor is allowed to call this.
pub fn from_display(display: *mut Display) -> NativeCompositingGraphicsContext {
let (_, fbconfig) = NativeCompositingGraphicsContext::compositor_visual_info(display);
NativeCompositingGraphicsContext {
display: display,
framebuffer_configuration: fbconfig,
}
}
}
/// The X display.
#[derive(Clone, Copy)]
pub struct NativeGraphicsMetadata {
pub display: *mut Display,
}
unsafe impl Send for NativeGraphicsMetadata {}
impl NativeGraphicsMetadata {
/// Creates graphics metadata from a metadata descriptor.
pub fn from_descriptor(descriptor: &NativeGraphicsMetadataDescriptor)
-> NativeGraphicsMetadata {
// WARNING: We currently rely on the X display connection being the
// same in both the Painting and Compositing contexts, as otherwise
// the X Pixmap will not be sharable across them. Using this
// method breaks that assumption.
unsafe {
let c_str = CString::new(descriptor.display.as_bytes()).unwrap();
let display = XOpenDisplay(c_str.as_ptr() as *mut _);
if display.is_null() {
panic!("XOpenDisplay() failed!");
}
NativeGraphicsMetadata {
display: display,
}
}
}
}
/// A sendable form of the X display string.
#[derive(Clone, RustcDecodable, RustcEncodable)]
pub struct NativeGraphicsMetadataDescriptor {
display: String,
}
impl NativeGraphicsMetadataDescriptor {
/// Creates a metadata descriptor from metadata.
pub fn from_metadata(metadata: NativeGraphicsMetadata) -> NativeGraphicsMetadataDescriptor {
unsafe {
let c_str = XDisplayString(metadata.display) as *const _;
let bytes = CStr::from_ptr(c_str).to_bytes();
NativeGraphicsMetadataDescriptor {
display: str::from_utf8(bytes).unwrap().to_string(),
}
}
}
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct PixmapNativeSurface {
/// The pixmap.
pixmap: Pixmap,
/// Whether this pixmap will leak if the destructor runs. This is for debugging purposes.
will_leak: bool,
}
impl Drop for PixmapNativeSurface {
fn drop(&mut self) {
if self.will_leak {
panic!("You should have disposed of the pixmap properly with destroy()! This pixmap \
will leak!");
}
}
}
impl PixmapNativeSurface {
fn from_pixmap(pixmap: Pixmap) -> PixmapNativeSurface {
PixmapNativeSurface {
pixmap: pixmap,
will_leak: true,
}
}
pub fn from_skia_shared_gl_context(context: SkiaSkNativeSharedGLContextRef)
-> PixmapNativeSurface {
unsafe {
let surface = SkiaSkNativeSharedGLContextStealSurface(context);
PixmapNativeSurface::from_pixmap(mem::transmute(surface))
}
}
pub fn new(native_context: &NativePaintingGraphicsContext, size: Size2D<i32>, _stride: i32)
-> PixmapNativeSurface {
unsafe {
// Create the pixmap.
let screen = XDefaultScreen(native_context.display);
let window = XRootWindow(native_context.display, screen);
// The X server we use for testing on build machines always returns
// visuals that report 24 bit depth. But creating a 32 bit pixmap does work, so
// hard code the depth here.
let pixmap = XCreatePixmap(native_context.display,
window,
size.width as c_uint,
size.height as c_uint,
32);
PixmapNativeSurface::from_pixmap(pixmap)
}
}
/// This may only be called on the compositor side.
pub fn
|
(&self,
native_context: &NativeCompositingGraphicsContext,
texture: &Texture,
size: Size2D<isize>) {
// Create the GLX pixmap.
//
// FIXME(pcwalton): RAII for exception
|
bind_to_texture
|
identifier_name
|
surface.rs
|
Mark nonsendable.
#[allow(missing_copy_implementations)]
pub struct NativePaintingGraphicsContext {
pub display: *mut Display,
visual_info: *mut XVisualInfo,
}
impl NativePaintingGraphicsContext {
pub fn from_metadata(metadata: &NativeGraphicsMetadata) -> NativePaintingGraphicsContext {
// FIXME(pcwalton): It would be more robust to actually have the compositor pass the
// visual.
let (compositor_visual_info, _) =
NativeCompositingGraphicsContext::compositor_visual_info(metadata.display);
NativePaintingGraphicsContext {
display: metadata.display,
visual_info: compositor_visual_info,
}
}
}
/// The display, visual info, and framebuffer configuration. This is needed in order to bind to a
/// texture on the compositor side. This holds only a *weak* reference to the display and does not
/// close it.
///
/// FIXME(pcwalton): Unchecked weak references are bad and can violate memory safety. This is hard
/// to fix because the Display is given to us by the native windowing system, but we should fix it
/// someday.
///
/// FIXME(pcwalton): Mark nonsendable.
#[derive(Copy, Clone)]
pub struct NativeCompositingGraphicsContext {
display: *mut Display,
framebuffer_configuration: Option<glx::types::GLXFBConfig>,
}
impl NativeCompositingGraphicsContext {
/// Chooses the compositor visual info using the same algorithm that the compositor uses.
///
/// FIXME(pcwalton): It would be more robust to actually have the compositor pass the visual.
fn compositor_visual_info(display: *mut Display) -> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
// If display is null, we'll assume we are going to be rendering
// in headless mode without X running.
if display == ptr::null_mut() {
return (ptr::null_mut(), None);
}
unsafe {
let fbconfig_attributes = [
glx::DOUBLEBUFFER as i32, 0,
glx::DRAWABLE_TYPE as i32, glx::PIXMAP_BIT as i32 | glx::WINDOW_BIT as i32,
glx::BIND_TO_TEXTURE_RGBA_EXT as i32, 1,
glx::RENDER_TYPE as i32, glx::RGBA_BIT as i32,
glx::ALPHA_SIZE as i32, 8,
0
];
let screen = XDefaultScreen(display);
let mut number_of_configs = 0;
let configs = glx::ChooseFBConfig(mem::transmute(display),
screen,
fbconfig_attributes.as_ptr(),
&mut number_of_configs);
NativeCompositingGraphicsContext::get_compatible_configuration(display,
configs,
number_of_configs)
}
}
fn get_compatible_configuration(display: *mut Display,
configs: *mut glx::types::GLXFBConfig,
number_of_configs: i32)
-> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
unsafe {
if number_of_configs == 0 {
panic!("glx::ChooseFBConfig returned no configurations.");
}
if !NativeCompositingGraphicsContext::need_to_find_32_bit_depth_visual(display) {
let config = *configs.offset(0);
let visual = glx::GetVisualFromFBConfig(mem::transmute(display), config);
return (mem::transmute(visual), Some(config));
}
// NVidia (and AMD/ATI) drivers have RGBA configurations that use 24-bit
// XVisual, not capable of representing an alpha-channel in Pixmap form,
// so we look for the configuration with a full set of 32 bits.
for i in 0..number_of_configs as isize {
let config = *configs.offset(i);
let visual: *mut XVisualInfo =
mem::transmute(glx::GetVisualFromFBConfig(mem::transmute(display), config));
if (*visual).depth == 32 {
return (mem::transmute(visual), Some(config));
}
XFree(mem::transmute(visual));
}
panic!("Could not find 32-bit visual.");
}
}
fn need_to_find_32_bit_depth_visual(display: *mut Display) -> bool {
unsafe {
let glXGetClientString: extern "C" fn(*mut Display, c_int) -> *const c_char =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXGetClientString\x00".as_bytes()[0])));
assert!(glXGetClientString as *mut c_void != ptr::null_mut());
let glx_vendor = glx::GetClientString(mem::transmute(display), glx::VENDOR as i32);
if glx_vendor == ptr::null() {
panic!("Could not determine GLX vendor.");
}
let glx_vendor =
str::from_utf8(CStr::from_ptr(glx_vendor).to_bytes())
.ok()
.expect("GLX client vendor string not in UTF-8 format.");
let glx_vendor = String::from_str(glx_vendor).into_ascii_lowercase();
glx_vendor.contains("nvidia") || glx_vendor.contains("ati")
}
}
/// Creates a native graphics context from the given X display connection. This uses GLX. Only
/// the compositor is allowed to call this.
pub fn from_display(display: *mut Display) -> NativeCompositingGraphicsContext {
let (_, fbconfig) = NativeCompositingGraphicsContext::compositor_visual_info(display);
NativeCompositingGraphicsContext {
display: display,
framebuffer_configuration: fbconfig,
}
}
}
/// The X display.
#[derive(Clone, Copy)]
pub struct NativeGraphicsMetadata {
pub display: *mut Display,
}
|
/// Creates graphics metadata from a metadata descriptor.
pub fn from_descriptor(descriptor: &NativeGraphicsMetadataDescriptor)
-> NativeGraphicsMetadata {
// WARNING: We currently rely on the X display connection being the
// same in both the Painting and Compositing contexts, as otherwise
// the X Pixmap will not be sharable across them. Using this
// method breaks that assumption.
unsafe {
let c_str = CString::new(descriptor.display.as_bytes()).unwrap();
let display = XOpenDisplay(c_str.as_ptr() as *mut _);
if display.is_null() {
panic!("XOpenDisplay() failed!");
}
NativeGraphicsMetadata {
display: display,
}
}
}
}
/// A sendable form of the X display string.
#[derive(Clone, RustcDecodable, RustcEncodable)]
pub struct NativeGraphicsMetadataDescriptor {
display: String,
}
impl NativeGraphicsMetadataDescriptor {
/// Creates a metadata descriptor from metadata.
pub fn from_metadata(metadata: NativeGraphicsMetadata) -> NativeGraphicsMetadataDescriptor {
unsafe {
let c_str = XDisplayString(metadata.display) as *const _;
let bytes = CStr::from_ptr(c_str).to_bytes();
NativeGraphicsMetadataDescriptor {
display: str::from_utf8(bytes).unwrap().to_string(),
}
}
}
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct PixmapNativeSurface {
/// The pixmap.
pixmap: Pixmap,
/// Whether this pixmap will leak if the destructor runs. This is for debugging purposes.
will_leak: bool,
}
impl Drop for PixmapNativeSurface {
fn drop(&mut self) {
if self.will_leak {
panic!("You should have disposed of the pixmap properly with destroy()! This pixmap \
will leak!");
}
}
}
impl PixmapNativeSurface {
fn from_pixmap(pixmap: Pixmap) -> PixmapNativeSurface {
PixmapNativeSurface {
pixmap: pixmap,
will_leak: true,
}
}
pub fn from_skia_shared_gl_context(context: SkiaSkNativeSharedGLContextRef)
-> PixmapNativeSurface {
unsafe {
let surface = SkiaSkNativeSharedGLContextStealSurface(context);
PixmapNativeSurface::from_pixmap(mem::transmute(surface))
}
}
pub fn new(native_context: &NativePaintingGraphicsContext, size: Size2D<i32>, _stride: i32)
-> PixmapNativeSurface {
unsafe {
// Create the pixmap.
let screen = XDefaultScreen(native_context.display);
let window = XRootWindow(native_context.display, screen);
// The X server we use for testing on build machines always returns
// visuals that report 24 bit depth. But creating a 32 bit pixmap does work, so
// hard code the depth here.
let pixmap = XCreatePixmap(native_context.display,
window,
size.width as c_uint,
size.height as c_uint,
32);
PixmapNativeSurface::from_pixmap(pixmap)
}
}
/// This may only be called on the compositor side.
pub fn bind_to_texture(&self,
native_context: &NativeCompositingGraphicsContext,
texture: &Texture,
size: Size2D<isize>) {
// Create the GLX pixmap.
//
// FIXME(pcwalton): RAII for exception safety
|
unsafe impl Send for NativeGraphicsMetadata {}
impl NativeGraphicsMetadata {
|
random_line_split
|
surface.rs
|
(mem::transmute(visual), Some(config));
}
// NVidia (and AMD/ATI) drivers have RGBA configurations that use 24-bit
// XVisual, not capable of representing an alpha-channel in Pixmap form,
// so we look for the configuration with a full set of 32 bits.
for i in 0..number_of_configs as isize {
let config = *configs.offset(i);
let visual: *mut XVisualInfo =
mem::transmute(glx::GetVisualFromFBConfig(mem::transmute(display), config));
if (*visual).depth == 32 {
return (mem::transmute(visual), Some(config));
}
XFree(mem::transmute(visual));
}
panic!("Could not find 32-bit visual.");
}
}
fn need_to_find_32_bit_depth_visual(display: *mut Display) -> bool {
unsafe {
let glXGetClientString: extern "C" fn(*mut Display, c_int) -> *const c_char =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXGetClientString\x00".as_bytes()[0])));
assert!(glXGetClientString as *mut c_void != ptr::null_mut());
let glx_vendor = glx::GetClientString(mem::transmute(display), glx::VENDOR as i32);
if glx_vendor == ptr::null() {
panic!("Could not determine GLX vendor.");
}
let glx_vendor =
str::from_utf8(CStr::from_ptr(glx_vendor).to_bytes())
.ok()
.expect("GLX client vendor string not in UTF-8 format.");
let glx_vendor = String::from_str(glx_vendor).into_ascii_lowercase();
glx_vendor.contains("nvidia") || glx_vendor.contains("ati")
}
}
/// Creates a native graphics context from the given X display connection. This uses GLX. Only
/// the compositor is allowed to call this.
pub fn from_display(display: *mut Display) -> NativeCompositingGraphicsContext {
let (_, fbconfig) = NativeCompositingGraphicsContext::compositor_visual_info(display);
NativeCompositingGraphicsContext {
display: display,
framebuffer_configuration: fbconfig,
}
}
}
/// The X display.
#[derive(Clone, Copy)]
pub struct NativeGraphicsMetadata {
pub display: *mut Display,
}
unsafe impl Send for NativeGraphicsMetadata {}
impl NativeGraphicsMetadata {
/// Creates graphics metadata from a metadata descriptor.
pub fn from_descriptor(descriptor: &NativeGraphicsMetadataDescriptor)
-> NativeGraphicsMetadata {
// WARNING: We currently rely on the X display connection being the
// same in both the Painting and Compositing contexts, as otherwise
// the X Pixmap will not be sharable across them. Using this
// method breaks that assumption.
unsafe {
let c_str = CString::new(descriptor.display.as_bytes()).unwrap();
let display = XOpenDisplay(c_str.as_ptr() as *mut _);
if display.is_null() {
panic!("XOpenDisplay() failed!");
}
NativeGraphicsMetadata {
display: display,
}
}
}
}
/// A sendable form of the X display string.
#[derive(Clone, RustcDecodable, RustcEncodable)]
pub struct NativeGraphicsMetadataDescriptor {
display: String,
}
impl NativeGraphicsMetadataDescriptor {
/// Creates a metadata descriptor from metadata.
pub fn from_metadata(metadata: NativeGraphicsMetadata) -> NativeGraphicsMetadataDescriptor {
unsafe {
let c_str = XDisplayString(metadata.display) as *const _;
let bytes = CStr::from_ptr(c_str).to_bytes();
NativeGraphicsMetadataDescriptor {
display: str::from_utf8(bytes).unwrap().to_string(),
}
}
}
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct PixmapNativeSurface {
/// The pixmap.
pixmap: Pixmap,
/// Whether this pixmap will leak if the destructor runs. This is for debugging purposes.
will_leak: bool,
}
impl Drop for PixmapNativeSurface {
fn drop(&mut self) {
if self.will_leak {
panic!("You should have disposed of the pixmap properly with destroy()! This pixmap \
will leak!");
}
}
}
impl PixmapNativeSurface {
fn from_pixmap(pixmap: Pixmap) -> PixmapNativeSurface {
PixmapNativeSurface {
pixmap: pixmap,
will_leak: true,
}
}
pub fn from_skia_shared_gl_context(context: SkiaSkNativeSharedGLContextRef)
-> PixmapNativeSurface {
unsafe {
let surface = SkiaSkNativeSharedGLContextStealSurface(context);
PixmapNativeSurface::from_pixmap(mem::transmute(surface))
}
}
pub fn new(native_context: &NativePaintingGraphicsContext, size: Size2D<i32>, _stride: i32)
-> PixmapNativeSurface {
unsafe {
// Create the pixmap.
let screen = XDefaultScreen(native_context.display);
let window = XRootWindow(native_context.display, screen);
// The X server we use for testing on build machines always returns
// visuals that report 24 bit depth. But creating a 32 bit pixmap does work, so
// hard code the depth here.
let pixmap = XCreatePixmap(native_context.display,
window,
size.width as c_uint,
size.height as c_uint,
32);
PixmapNativeSurface::from_pixmap(pixmap)
}
}
/// This may only be called on the compositor side.
pub fn bind_to_texture(&self,
native_context: &NativeCompositingGraphicsContext,
texture: &Texture,
size: Size2D<isize>) {
// Create the GLX pixmap.
//
// FIXME(pcwalton): RAII for exception safety?
unsafe {
let pixmap_attributes = [
glx::TEXTURE_TARGET_EXT as i32, glx::TEXTURE_2D_EXT as i32,
glx::TEXTURE_FORMAT_EXT as i32, glx::TEXTURE_FORMAT_RGBA_EXT as i32,
0
];
let glx_display = mem::transmute(native_context.display);
let glx_pixmap = glx::CreatePixmap(glx_display,
native_context.framebuffer_configuration.expect(
"GLX 1.3 should have a framebuffer_configuration"),
self.pixmap,
pixmap_attributes.as_ptr());
let glXBindTexImageEXT: extern "C" fn(*mut Display, glx::types::GLXDrawable, c_int, *mut c_int) =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXBindTexImageEXT\x00".as_bytes()[0])));
assert!(glXBindTexImageEXT as *mut c_void != ptr::null_mut());
let _bound = texture.bind();
glXBindTexImageEXT(native_context.display,
mem::transmute(glx_pixmap),
glx::FRONT_EXT as i32,
ptr::null_mut());
assert_eq!(gl::GetError(), gl::NO_ERROR);
// FIXME(pcwalton): Recycle these for speed?
glx::DestroyPixmap(glx_display, glx_pixmap);
}
}
/// This may only be called on the painting side.
pub fn upload(&mut self, graphics_context: &NativePaintingGraphicsContext, data: &[u8]) {
unsafe {
// Ensure that we're running on the render task. Take the display.
let pixmap = self.pixmap;
// Figure out the width, height, and depth of the pixmap.
let mut root_window = 0;
let mut x = 0;
let mut y = 0;
let mut width = 0;
let mut height = 0;
let mut border_width = 0;
let mut depth = 0;
let _ = XGetGeometry(graphics_context.display,
mem::transmute(pixmap),
&mut root_window,
&mut x,
&mut y,
&mut width,
&mut height,
&mut border_width,
&mut depth);
// Create the image.
let image = XCreateImage(graphics_context.display,
(*graphics_context.visual_info).visual,
depth,
ZPixmap,
0,
mem::transmute(&data[0]),
width as c_uint,
height as c_uint,
32,
0);
// Create the X graphics context.
let gc = XCreateGC(graphics_context.display, pixmap, 0, ptr::null_mut());
// Draw the image.
let _ = XPutImage(graphics_context.display,
pixmap,
gc,
image,
0,
0,
0,
0,
width,
height);
}
}
pub fn get_id(&self) -> isize {
self.pixmap as isize
}
pub fn destroy(&mut self, graphics_context: &NativePaintingGraphicsContext) {
unsafe {
assert!(self.pixmap != 0);
XFreePixmap(graphics_context.display, self.pixmap);
self.mark_wont_leak()
}
}
pub fn mark_will_leak(&mut self)
|
{
self.will_leak = true;
}
|
identifier_body
|
|
surface.rs
|
Mark nonsendable.
#[allow(missing_copy_implementations)]
pub struct NativePaintingGraphicsContext {
pub display: *mut Display,
visual_info: *mut XVisualInfo,
}
impl NativePaintingGraphicsContext {
pub fn from_metadata(metadata: &NativeGraphicsMetadata) -> NativePaintingGraphicsContext {
// FIXME(pcwalton): It would be more robust to actually have the compositor pass the
// visual.
let (compositor_visual_info, _) =
NativeCompositingGraphicsContext::compositor_visual_info(metadata.display);
NativePaintingGraphicsContext {
display: metadata.display,
visual_info: compositor_visual_info,
}
}
}
/// The display, visual info, and framebuffer configuration. This is needed in order to bind to a
/// texture on the compositor side. This holds only a *weak* reference to the display and does not
/// close it.
///
/// FIXME(pcwalton): Unchecked weak references are bad and can violate memory safety. This is hard
/// to fix because the Display is given to us by the native windowing system, but we should fix it
/// someday.
///
/// FIXME(pcwalton): Mark nonsendable.
#[derive(Copy, Clone)]
pub struct NativeCompositingGraphicsContext {
display: *mut Display,
framebuffer_configuration: Option<glx::types::GLXFBConfig>,
}
impl NativeCompositingGraphicsContext {
/// Chooses the compositor visual info using the same algorithm that the compositor uses.
///
/// FIXME(pcwalton): It would be more robust to actually have the compositor pass the visual.
fn compositor_visual_info(display: *mut Display) -> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
// If display is null, we'll assume we are going to be rendering
// in headless mode without X running.
if display == ptr::null_mut() {
return (ptr::null_mut(), None);
}
unsafe {
let fbconfig_attributes = [
glx::DOUBLEBUFFER as i32, 0,
glx::DRAWABLE_TYPE as i32, glx::PIXMAP_BIT as i32 | glx::WINDOW_BIT as i32,
glx::BIND_TO_TEXTURE_RGBA_EXT as i32, 1,
glx::RENDER_TYPE as i32, glx::RGBA_BIT as i32,
glx::ALPHA_SIZE as i32, 8,
0
];
let screen = XDefaultScreen(display);
let mut number_of_configs = 0;
let configs = glx::ChooseFBConfig(mem::transmute(display),
screen,
fbconfig_attributes.as_ptr(),
&mut number_of_configs);
NativeCompositingGraphicsContext::get_compatible_configuration(display,
configs,
number_of_configs)
}
}
fn get_compatible_configuration(display: *mut Display,
configs: *mut glx::types::GLXFBConfig,
number_of_configs: i32)
-> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
unsafe {
if number_of_configs == 0 {
panic!("glx::ChooseFBConfig returned no configurations.");
}
if !NativeCompositingGraphicsContext::need_to_find_32_bit_depth_visual(display) {
let config = *configs.offset(0);
let visual = glx::GetVisualFromFBConfig(mem::transmute(display), config);
return (mem::transmute(visual), Some(config));
}
// NVidia (and AMD/ATI) drivers have RGBA configurations that use 24-bit
// XVisual, not capable of representing an alpha-channel in Pixmap form,
// so we look for the configuration with a full set of 32 bits.
for i in 0..number_of_configs as isize {
let config = *configs.offset(i);
let visual: *mut XVisualInfo =
mem::transmute(glx::GetVisualFromFBConfig(mem::transmute(display), config));
if (*visual).depth == 32 {
return (mem::transmute(visual), Some(config));
}
XFree(mem::transmute(visual));
}
panic!("Could not find 32-bit visual.");
}
}
fn need_to_find_32_bit_depth_visual(display: *mut Display) -> bool {
unsafe {
let glXGetClientString: extern "C" fn(*mut Display, c_int) -> *const c_char =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXGetClientString\x00".as_bytes()[0])));
assert!(glXGetClientString as *mut c_void != ptr::null_mut());
let glx_vendor = glx::GetClientString(mem::transmute(display), glx::VENDOR as i32);
if glx_vendor == ptr::null() {
panic!("Could not determine GLX vendor.");
}
let glx_vendor =
str::from_utf8(CStr::from_ptr(glx_vendor).to_bytes())
.ok()
.expect("GLX client vendor string not in UTF-8 format.");
let glx_vendor = String::from_str(glx_vendor).into_ascii_lowercase();
glx_vendor.contains("nvidia") || glx_vendor.contains("ati")
}
}
/// Creates a native graphics context from the given X display connection. This uses GLX. Only
/// the compositor is allowed to call this.
pub fn from_display(display: *mut Display) -> NativeCompositingGraphicsContext {
let (_, fbconfig) = NativeCompositingGraphicsContext::compositor_visual_info(display);
NativeCompositingGraphicsContext {
display: display,
framebuffer_configuration: fbconfig,
}
}
}
/// The X display.
#[derive(Clone, Copy)]
pub struct NativeGraphicsMetadata {
pub display: *mut Display,
}
unsafe impl Send for NativeGraphicsMetadata {}
impl NativeGraphicsMetadata {
/// Creates graphics metadata from a metadata descriptor.
pub fn from_descriptor(descriptor: &NativeGraphicsMetadataDescriptor)
-> NativeGraphicsMetadata {
// WARNING: We currently rely on the X display connection being the
// same in both the Painting and Compositing contexts, as otherwise
// the X Pixmap will not be sharable across them. Using this
// method breaks that assumption.
unsafe {
let c_str = CString::new(descriptor.display.as_bytes()).unwrap();
let display = XOpenDisplay(c_str.as_ptr() as *mut _);
if display.is_null() {
panic!("XOpenDisplay() failed!");
}
NativeGraphicsMetadata {
display: display,
}
}
}
}
/// A sendable form of the X display string.
#[derive(Clone, RustcDecodable, RustcEncodable)]
pub struct NativeGraphicsMetadataDescriptor {
display: String,
}
impl NativeGraphicsMetadataDescriptor {
/// Creates a metadata descriptor from metadata.
pub fn from_metadata(metadata: NativeGraphicsMetadata) -> NativeGraphicsMetadataDescriptor {
unsafe {
let c_str = XDisplayString(metadata.display) as *const _;
let bytes = CStr::from_ptr(c_str).to_bytes();
NativeGraphicsMetadataDescriptor {
display: str::from_utf8(bytes).unwrap().to_string(),
}
}
}
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct PixmapNativeSurface {
/// The pixmap.
pixmap: Pixmap,
/// Whether this pixmap will leak if the destructor runs. This is for debugging purposes.
will_leak: bool,
}
impl Drop for PixmapNativeSurface {
fn drop(&mut self) {
if self.will_leak
|
}
}
impl PixmapNativeSurface {
fn from_pixmap(pixmap: Pixmap) -> PixmapNativeSurface {
PixmapNativeSurface {
pixmap: pixmap,
will_leak: true,
}
}
pub fn from_skia_shared_gl_context(context: SkiaSkNativeSharedGLContextRef)
-> PixmapNativeSurface {
unsafe {
let surface = SkiaSkNativeSharedGLContextStealSurface(context);
PixmapNativeSurface::from_pixmap(mem::transmute(surface))
}
}
pub fn new(native_context: &NativePaintingGraphicsContext, size: Size2D<i32>, _stride: i32)
-> PixmapNativeSurface {
unsafe {
// Create the pixmap.
let screen = XDefaultScreen(native_context.display);
let window = XRootWindow(native_context.display, screen);
// The X server we use for testing on build machines always returns
// visuals that report 24 bit depth. But creating a 32 bit pixmap does work, so
// hard code the depth here.
let pixmap = XCreatePixmap(native_context.display,
window,
size.width as c_uint,
size.height as c_uint,
32);
PixmapNativeSurface::from_pixmap(pixmap)
}
}
/// This may only be called on the compositor side.
pub fn bind_to_texture(&self,
native_context: &NativeCompositingGraphicsContext,
texture: &Texture,
size: Size2D<isize>) {
// Create the GLX pixmap.
//
// FIXME(pcwalton): RAII for
|
{
panic!("You should have disposed of the pixmap properly with destroy()! This pixmap \
will leak!");
}
|
conditional_block
|
mountfs.go
|
mpathUtils, mpathRRs = mfs.ios.GetAllMpathUtils(now)
objutil, ok = mpathUtils[objmpath]
rr, _ = mpathRRs[objmpath] // GET round-robin counter (zeros out every iostats refresh i-val)
util = objutil
r = rr
)
fqn = objfqn
if !ok {
cmn.DassertMsg(false, objmpath, pkgName)
return
}
for copyfqn, copympi := range copies {
var (
u int64
c, rrcnt int32
)
if u, ok = mpathUtils[copympi.Path]; !ok {
continue
}
if r, ok = mpathRRs[copympi.Path]; !ok {
if u < util {
fqn, util, rr = copyfqn, u, r
}
continue
}
c = r.Load()
if rr != nil {
rrcnt = rr.Load()
}
if u < util && c <= rrcnt { // the obvious choice
fqn, util, rr = copyfqn, u, r
continue
}
if u+int64(c)*uQuantum < util+int64(rrcnt)*uQuantum { // heuristics - make uQuantum configurable?
fqn, util, rr = copyfqn, u, r
}
}
// NOTE: the counter could've been already inc-ed
// could keep track of the second best and use CAS to recerve-inc and compare
// can wait though
if rr != nil {
rr.Inc()
}
return
}
// ios delegators
func (mfs *MountedFS) GetMpathUtil(mpath string, now time.Time) int64 {
return mfs.ios.GetMpathUtil(mpath, now)
}
func (mfs *MountedFS) GetAllMpathUtils(now time.Time) (utils map[string]int64) {
utils, _ = mfs.ios.GetAllMpathUtils(now)
return
}
func (mfs *MountedFS) LogAppend(lines []string) []string {
return mfs.ios.LogAppend(lines)
}
func (mfs *MountedFS) GetSelectedDiskStats() (m map[string]*ios.SelectedDiskStats) {
return mfs.ios.GetSelectedDiskStats()
}
// Init prepares and adds provided mountpaths. Also validates the mountpaths
// for duplication and availability.
func (mfs *MountedFS) Init(fsPaths []string) error {
if len(fsPaths) == 0 {
// (usability) not to clutter the log with backtraces when starting up and validating config
return fmt.Errorf("FATAL: no fspaths - see README => Configuration and/or fspaths section in the config.sh")
}
for _, path := range fsPaths {
if err := mfs.Add(path); err != nil {
return err
}
}
return nil
}
// Add adds new mountpath to the target's mountpaths.
// FIXME: unify error messages for original and clean mountpath
func (mfs *MountedFS) Add(mpath string) error {
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return err
}
if err := Access(cleanMpath); err != nil {
return fmt.Errorf("fspath %q %s, err: %v", mpath, cmn.DoesNotExist, err)
}
statfs := syscall.Statfs_t{}
if err := syscall.Statfs(cleanMpath, &statfs); err != nil {
return fmt.Errorf("cannot statfs fspath %q, err: %w", mpath, err)
}
fs, err := fqn2fsAtStartup(cleanMpath)
if err != nil {
return fmt.Errorf("cannot get filesystem: %v", err)
}
mp := newMountpath(cleanMpath, mpath, statfs.Fsid, fs)
mfs.mu.Lock()
defer mfs.mu.Unlock()
availablePaths, disabledPaths := mfs.mountpathsCopy()
if _, exists := availablePaths[mp.Path]; exists {
return fmt.Errorf("tried to add already registered mountpath: %v", mp.Path)
}
if existingPath, exists := mfs.fsIDs[mp.Fsid]; exists && mfs.checkFsID {
return fmt.Errorf("tried to add path %v but same fsid (%v) was already registered by %v", mpath, mp.Fsid, existingPath)
}
mfs.ios.AddMpath(mp.Path, mp.FileSystem)
availablePaths[mp.Path] = mp
mfs.fsIDs[mp.Fsid] = cleanMpath
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
// Remove removes mountpaths from the target's mountpaths. It searches
// for the mountpath in available and disabled (if the mountpath is not found
// in available).
func (mfs *MountedFS) Remove(mpath string) error {
var (
mp *MountpathInfo
exists bool
)
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if mp, exists = availablePaths[cleanMpath]; !exists {
if mp, exists = disabledPaths[cleanMpath]; !exists {
return fmt.Errorf("tried to remove non-existing mountpath: %v", mpath)
}
delete(disabledPaths, cleanMpath)
delete(mfs.fsIDs, mp.Fsid)
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
delete(availablePaths, cleanMpath)
mfs.ios.RemoveMpath(cleanMpath)
delete(mfs.fsIDs, mp.Fsid)
go mp.evictLomCache()
if l := len(availablePaths); l == 0 {
glog.Errorf("removed the last available mountpath %s", mp)
} else {
glog.Infof("removed mountpath %s (%d remain(s) active)", mp, l)
}
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
// Enable enables previously disabled mountpath. enabled is set to
// true if mountpath has been moved from disabled to available and exists is
// set to true if such mountpath even exists.
func (mfs *MountedFS) Enable(mpath string) (enabled bool, err error) {
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return false, err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if _, ok := availablePaths[cleanMpath]; ok {
return false, nil
}
if mp, ok := disabledPaths[cleanMpath]; ok {
availablePaths[cleanMpath] = mp
mfs.ios.AddMpath(cleanMpath, mp.FileSystem)
delete(disabledPaths, cleanMpath)
mfs.updatePaths(availablePaths, disabledPaths)
return true, nil
}
return false, cmn.NewNoMountpathError(mpath)
}
// Disable disables an available mountpath. disabled is set to true if
// mountpath has been moved from available to disabled and exists is set to
// true if such mountpath even exists.
func (mfs *MountedFS) Disable(mpath string) (disabled bool, err error) {
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return false, err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if mpathInfo, ok := availablePaths[cleanMpath]; ok {
disabledPaths[cleanMpath] = mpathInfo
mfs.ios.RemoveMpath(cleanMpath)
delete(availablePaths, cleanMpath)
mfs.updatePaths(availablePaths, disabledPaths)
if l := len(availablePaths); l == 0 {
glog.Errorf("disabled the last available mountpath %s", mpathInfo)
} else {
glog.Infof("disabled mountpath %s (%d remain(s) active)", mpathInfo, l)
}
go mpathInfo.evictLomCache()
return true, nil
}
if _, ok := disabledPaths[cleanMpath]; ok {
return false, nil
}
return false, cmn.NewNoMountpathError(mpath)
}
// Returns number of available mountpaths
func (mfs *MountedFS) NumAvail() int {
availablePaths := (*MPI)(mfs.available.Load())
return len(*availablePaths)
}
// Mountpaths returns both available and disabled mountpaths.
func (mfs *MountedFS) Get() (MPI, MPI)
|
{
var (
availablePaths = (*MPI)(mfs.available.Load())
disabledPaths = (*MPI)(mfs.disabled.Load())
)
if availablePaths == nil {
tmp := make(MPI, 10)
availablePaths = &tmp
}
if disabledPaths == nil {
tmp := make(MPI, 10)
disabledPaths = &tmp
}
return *availablePaths, *disabledPaths
}
|
identifier_body
|
|
mountfs.go
|
) {
return nil
}
if err := cmn.CreateDir(filepath.Dir(tmpDir)); err != nil {
return err
}
if err := os.Rename(dir, tmpDir); err != nil {
if os.IsExist(err) {
// Slow path - `tmpDir` (or rather `nonExistingDir`) for some reason already exists...
//
// Even though `nonExistingDir` should not exist we cannot fully be sure.
|
// the counter will catch up with counter of the previous mountpath.
// If the directories from the previous mountpath were not yet removed
// (slow disk or filesystem) we can end up with the same name.
// For now we try to fight this with randomizing the initial counter.
// In background remove leftover directory.
go func() {
glog.Errorf("%s already exists, removing...", tmpDir)
if err := os.RemoveAll(tmpDir); err != nil {
glog.Errorf("removing leftover %s failed, err: %v", tmpDir, err)
}
}()
// This time generate fully unique name...
tmpDir, err = ioutil.TempDir(mi.Path, nonExistingDir)
if err != nil {
return err
}
// Retry renaming - hopefully it should succeed now.
err = os.Rename(dir, tmpDir)
}
// Someone removed dir before os.Rename, nothing more to do.
if os.IsNotExist(err) {
return nil
}
if err != nil {
return err
}
}
// Schedule removing temporary directory which is our old `dir`
go func() {
// TODO: in the future, the actual operation must be delegated to LRU
// that'd take of care of it while pacing itself with regards to the
// current disk utilization and space availability.
if err := os.RemoveAll(tmpDir); err != nil {
glog.Errorf("RemoveAll for %q failed with %v", tmpDir, err)
}
}()
return nil
}
func (mi *MountpathInfo) IsIdle(config *cmn.Config, timestamp time.Time) bool {
if config == nil {
config = cmn.GCO.Get()
}
curr := Mountpaths.ios.GetMpathUtil(mi.Path, timestamp)
return curr >= 0 && curr < config.Disk.DiskUtilLowWM
}
func (mi *MountpathInfo) String() string {
return fmt.Sprintf("mp[%s, fs=%s]", mi.Path, mi.FileSystem)
}
///////////////
// make-path //
///////////////
func (mi *MountpathInfo) makePathBuf(bck cmn.Bck, contentType string, extra int) (buf []byte) {
var (
nsLen, bckNameLen, ctLen int
provLen = 1 + 1 + len(bck.Provider)
)
if !bck.Ns.IsGlobal() {
nsLen = 1
if bck.Ns.IsRemote() {
nsLen += 1 + len(bck.Ns.UUID)
}
nsLen += 1 + len(bck.Ns.Name)
}
if bck.Name != "" {
bckNameLen = 1 + len(bck.Name)
}
if contentType != "" {
cmn.Assert(bckNameLen > 0)
cmn.Assert(len(contentType) == contentTypeLen)
ctLen = 1 + 1 + contentTypeLen
}
buf = make([]byte, 0, len(mi.Path)+provLen+nsLen+bckNameLen+ctLen+extra)
buf = append(buf, mi.Path...)
buf = append(buf, filepath.Separator, prefProvider)
buf = append(buf, bck.Provider...)
if nsLen > 0 {
buf = append(buf, filepath.Separator)
if bck.Ns.IsRemote() {
buf = append(buf, prefNsUUID)
buf = append(buf, bck.Ns.UUID...)
}
buf = append(buf, prefNsName)
buf = append(buf, bck.Ns.Name...)
}
if bckNameLen > 0 {
buf = append(buf, filepath.Separator)
buf = append(buf, bck.Name...)
}
if ctLen > 0 {
buf = append(buf, filepath.Separator, prefCT)
buf = append(buf, contentType...)
}
return
}
func (mi *MountpathInfo) MakePathBck(bck cmn.Bck) string {
buf := mi.makePathBuf(bck, "", 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathCT(bck cmn.Bck, contentType string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "")
buf := mi.makePathBuf(bck, contentType, 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathFQN(bck cmn.Bck, contentType, objName string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "" && objName != "")
buf := mi.makePathBuf(bck, contentType, 1+len(objName))
buf = append(buf, filepath.Separator)
buf = append(buf, objName...)
return *(*string)(unsafe.Pointer(&buf))
}
//
// MountedFS
//
func (mfs *MountedFS) LoadBalanceGET(objfqn, objmpath string, copies MPI, now time.Time) (fqn string) {
var (
mpathUtils, mpathRRs = mfs.ios.GetAllMpathUtils(now)
objutil, ok = mpathUtils[objmpath]
rr, _ = mpathRRs[objmpath] // GET round-robin counter (zeros out every iostats refresh i-val)
util = objutil
r = rr
)
fqn = objfqn
if !ok {
cmn.DassertMsg(false, objmpath, pkgName)
return
}
for copyfqn, copympi := range copies {
var (
u int64
c, rrcnt int32
)
if u, ok = mpathUtils[copympi.Path]; !ok {
continue
}
if r, ok = mpathRRs[copympi.Path]; !ok {
if u < util {
fqn, util, rr = copyfqn, u, r
}
continue
}
c = r.Load()
if rr != nil {
rrcnt = rr.Load()
}
if u < util && c <= rrcnt { // the obvious choice
fqn, util, rr = copyfqn, u, r
continue
}
if u+int64(c)*uQuantum < util+int64(rrcnt)*uQuantum { // heuristics - make uQuantum configurable?
fqn, util, rr = copyfqn, u, r
}
}
// NOTE: the counter could've been already inc-ed
// could keep track of the second best and use CAS to recerve-inc and compare
// can wait though
if rr != nil {
rr.Inc()
}
return
}
// ios delegators
func (mfs *MountedFS) GetMpathUtil(mpath string, now time.Time) int64 {
return mfs.ios.GetMpathUtil(mpath, now)
}
func (mfs *MountedFS) GetAllMpathUtils(now time.Time) (utils map[string]int64) {
utils, _ = mfs.ios.GetAllMpathUtils(now)
return
}
func (mfs *MountedFS) LogAppend(lines []string) []string {
return mfs.ios.LogAppend(lines)
}
func (mfs *MountedFS) GetSelectedDiskStats() (m map[string]*ios.SelectedDiskStats) {
return mfs.ios.GetSelectedDiskStats()
}
// Init prepares and adds provided mountpaths. Also validates the mountpaths
// for duplication and availability.
func (mfs *MountedFS) Init(fsPaths []string) error {
if len(fsPaths) == 0 {
// (usability) not to clutter the log with backtraces when starting up and validating config
return fmt.Errorf("FATAL: no fspaths - see README => Configuration and/or fspaths section in the config.sh")
}
for _, path := range fsPaths {
if err := mfs.Add(path); err != nil {
return err
}
}
return nil
}
// Add adds new mountpath to the target's mountpaths.
// FIXME: unify error messages for original and clean mountpath
func (mfs *MountedFS) Add(mpath string) error {
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return err
}
if err := Access(cleanMpath); err != nil {
return fmt.Errorf("fspath %q %s, err: %v", mpath, cmn.DoesNotExist, err)
}
statfs
|
// There are at least two cases when this might not be true:
// 1. `nonExistingDir` is leftover after target crash.
// 2. Mountpath was removed and then added again. The counter
// will be reset and if we will be removing dirs quickly enough
|
random_line_split
|
mountfs.go
|
{
return nil
}
if err := cmn.CreateDir(filepath.Dir(tmpDir)); err != nil {
return err
}
if err := os.Rename(dir, tmpDir); err != nil {
if os.IsExist(err) {
// Slow path - `tmpDir` (or rather `nonExistingDir`) for some reason already exists...
//
// Even though `nonExistingDir` should not exist we cannot fully be sure.
// There are at least two cases when this might not be true:
// 1. `nonExistingDir` is leftover after target crash.
// 2. Mountpath was removed and then added again. The counter
// will be reset and if we will be removing dirs quickly enough
// the counter will catch up with counter of the previous mountpath.
// If the directories from the previous mountpath were not yet removed
// (slow disk or filesystem) we can end up with the same name.
// For now we try to fight this with randomizing the initial counter.
// In background remove leftover directory.
go func() {
glog.Errorf("%s already exists, removing...", tmpDir)
if err := os.RemoveAll(tmpDir); err != nil {
glog.Errorf("removing leftover %s failed, err: %v", tmpDir, err)
}
}()
// This time generate fully unique name...
tmpDir, err = ioutil.TempDir(mi.Path, nonExistingDir)
if err != nil {
return err
}
// Retry renaming - hopefully it should succeed now.
err = os.Rename(dir, tmpDir)
}
// Someone removed dir before os.Rename, nothing more to do.
if os.IsNotExist(err) {
return nil
}
if err != nil {
return err
}
}
// Schedule removing temporary directory which is our old `dir`
go func() {
// TODO: in the future, the actual operation must be delegated to LRU
// that'd take of care of it while pacing itself with regards to the
// current disk utilization and space availability.
if err := os.RemoveAll(tmpDir); err != nil {
glog.Errorf("RemoveAll for %q failed with %v", tmpDir, err)
}
}()
return nil
}
func (mi *MountpathInfo) IsIdle(config *cmn.Config, timestamp time.Time) bool {
if config == nil {
config = cmn.GCO.Get()
}
curr := Mountpaths.ios.GetMpathUtil(mi.Path, timestamp)
return curr >= 0 && curr < config.Disk.DiskUtilLowWM
}
func (mi *MountpathInfo) String() string {
return fmt.Sprintf("mp[%s, fs=%s]", mi.Path, mi.FileSystem)
}
///////////////
// make-path //
///////////////
func (mi *MountpathInfo) makePathBuf(bck cmn.Bck, contentType string, extra int) (buf []byte) {
var (
nsLen, bckNameLen, ctLen int
provLen = 1 + 1 + len(bck.Provider)
)
if !bck.Ns.IsGlobal() {
nsLen = 1
if bck.Ns.IsRemote() {
nsLen += 1 + len(bck.Ns.UUID)
}
nsLen += 1 + len(bck.Ns.Name)
}
if bck.Name != "" {
bckNameLen = 1 + len(bck.Name)
}
if contentType != "" {
cmn.Assert(bckNameLen > 0)
cmn.Assert(len(contentType) == contentTypeLen)
ctLen = 1 + 1 + contentTypeLen
}
buf = make([]byte, 0, len(mi.Path)+provLen+nsLen+bckNameLen+ctLen+extra)
buf = append(buf, mi.Path...)
buf = append(buf, filepath.Separator, prefProvider)
buf = append(buf, bck.Provider...)
if nsLen > 0 {
buf = append(buf, filepath.Separator)
if bck.Ns.IsRemote() {
buf = append(buf, prefNsUUID)
buf = append(buf, bck.Ns.UUID...)
}
buf = append(buf, prefNsName)
buf = append(buf, bck.Ns.Name...)
}
if bckNameLen > 0 {
buf = append(buf, filepath.Separator)
buf = append(buf, bck.Name...)
}
if ctLen > 0 {
buf = append(buf, filepath.Separator, prefCT)
buf = append(buf, contentType...)
}
return
}
func (mi *MountpathInfo) MakePathBck(bck cmn.Bck) string {
buf := mi.makePathBuf(bck, "", 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathCT(bck cmn.Bck, contentType string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "")
buf := mi.makePathBuf(bck, contentType, 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathFQN(bck cmn.Bck, contentType, objName string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "" && objName != "")
buf := mi.makePathBuf(bck, contentType, 1+len(objName))
buf = append(buf, filepath.Separator)
buf = append(buf, objName...)
return *(*string)(unsafe.Pointer(&buf))
}
//
// MountedFS
//
func (mfs *MountedFS) LoadBalanceGET(objfqn, objmpath string, copies MPI, now time.Time) (fqn string) {
var (
mpathUtils, mpathRRs = mfs.ios.GetAllMpathUtils(now)
objutil, ok = mpathUtils[objmpath]
rr, _ = mpathRRs[objmpath] // GET round-robin counter (zeros out every iostats refresh i-val)
util = objutil
r = rr
)
fqn = objfqn
if !ok {
cmn.DassertMsg(false, objmpath, pkgName)
return
}
for copyfqn, copympi := range copies {
var (
u int64
c, rrcnt int32
)
if u, ok = mpathUtils[copympi.Path]; !ok {
continue
}
if r, ok = mpathRRs[copympi.Path]; !ok {
if u < util {
fqn, util, rr = copyfqn, u, r
}
continue
}
c = r.Load()
if rr != nil {
rrcnt = rr.Load()
}
if u < util && c <= rrcnt { // the obvious choice
fqn, util, rr = copyfqn, u, r
continue
}
if u+int64(c)*uQuantum < util+int64(rrcnt)*uQuantum { // heuristics - make uQuantum configurable?
fqn, util, rr = copyfqn, u, r
}
}
// NOTE: the counter could've been already inc-ed
// could keep track of the second best and use CAS to recerve-inc and compare
// can wait though
if rr != nil {
rr.Inc()
}
return
}
// ios delegators
func (mfs *MountedFS) GetMpathUtil(mpath string, now time.Time) int64 {
return mfs.ios.GetMpathUtil(mpath, now)
}
func (mfs *MountedFS) GetAllMpathUtils(now time.Time) (utils map[string]int64) {
utils, _ = mfs.ios.GetAllMpathUtils(now)
return
}
func (mfs *MountedFS) LogAppend(lines []string) []string {
return mfs.ios.LogAppend(lines)
}
func (mfs *MountedFS) GetSelectedDiskStats() (m map[string]*ios.SelectedDiskStats) {
return mfs.ios.GetSelectedDiskStats()
}
// Init prepares and adds provided mountpaths. Also validates the mountpaths
// for duplication and availability.
func (mfs *MountedFS) Init(fsPaths []string) error {
if len(fsPaths) == 0 {
// (usability) not to clutter the log with backtraces when starting up and validating config
return fmt.Errorf("FATAL: no fspaths - see README => Configuration and/or fspaths section in the config.sh")
}
for _, path := range fsPaths
|
return nil
}
// Add adds new mountpath to the target's mountpaths.
// FIXME: unify error messages for original and clean mountpath
func (mfs *MountedFS) Add(mpath string) error {
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return err
}
if err := Access(cleanMpath); err != nil {
return fmt.Errorf("fspath %q %s, err: %v", mpath, cmn.DoesNotExist, err)
}
|
{
if err := mfs.Add(path); err != nil {
return err
}
}
|
conditional_block
|
mountfs.go
|
= ioutil.TempDir(mi.Path, nonExistingDir)
if err != nil {
return err
}
// Retry renaming - hopefully it should succeed now.
err = os.Rename(dir, tmpDir)
}
// Someone removed dir before os.Rename, nothing more to do.
if os.IsNotExist(err) {
return nil
}
if err != nil {
return err
}
}
// Schedule removing temporary directory which is our old `dir`
go func() {
// TODO: in the future, the actual operation must be delegated to LRU
// that'd take of care of it while pacing itself with regards to the
// current disk utilization and space availability.
if err := os.RemoveAll(tmpDir); err != nil {
glog.Errorf("RemoveAll for %q failed with %v", tmpDir, err)
}
}()
return nil
}
func (mi *MountpathInfo) IsIdle(config *cmn.Config, timestamp time.Time) bool {
if config == nil {
config = cmn.GCO.Get()
}
curr := Mountpaths.ios.GetMpathUtil(mi.Path, timestamp)
return curr >= 0 && curr < config.Disk.DiskUtilLowWM
}
func (mi *MountpathInfo) String() string {
return fmt.Sprintf("mp[%s, fs=%s]", mi.Path, mi.FileSystem)
}
///////////////
// make-path //
///////////////
func (mi *MountpathInfo) makePathBuf(bck cmn.Bck, contentType string, extra int) (buf []byte) {
var (
nsLen, bckNameLen, ctLen int
provLen = 1 + 1 + len(bck.Provider)
)
if !bck.Ns.IsGlobal() {
nsLen = 1
if bck.Ns.IsRemote() {
nsLen += 1 + len(bck.Ns.UUID)
}
nsLen += 1 + len(bck.Ns.Name)
}
if bck.Name != "" {
bckNameLen = 1 + len(bck.Name)
}
if contentType != "" {
cmn.Assert(bckNameLen > 0)
cmn.Assert(len(contentType) == contentTypeLen)
ctLen = 1 + 1 + contentTypeLen
}
buf = make([]byte, 0, len(mi.Path)+provLen+nsLen+bckNameLen+ctLen+extra)
buf = append(buf, mi.Path...)
buf = append(buf, filepath.Separator, prefProvider)
buf = append(buf, bck.Provider...)
if nsLen > 0 {
buf = append(buf, filepath.Separator)
if bck.Ns.IsRemote() {
buf = append(buf, prefNsUUID)
buf = append(buf, bck.Ns.UUID...)
}
buf = append(buf, prefNsName)
buf = append(buf, bck.Ns.Name...)
}
if bckNameLen > 0 {
buf = append(buf, filepath.Separator)
buf = append(buf, bck.Name...)
}
if ctLen > 0 {
buf = append(buf, filepath.Separator, prefCT)
buf = append(buf, contentType...)
}
return
}
func (mi *MountpathInfo) MakePathBck(bck cmn.Bck) string {
buf := mi.makePathBuf(bck, "", 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathCT(bck cmn.Bck, contentType string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "")
buf := mi.makePathBuf(bck, contentType, 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathFQN(bck cmn.Bck, contentType, objName string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "" && objName != "")
buf := mi.makePathBuf(bck, contentType, 1+len(objName))
buf = append(buf, filepath.Separator)
buf = append(buf, objName...)
return *(*string)(unsafe.Pointer(&buf))
}
//
// MountedFS
//
func (mfs *MountedFS) LoadBalanceGET(objfqn, objmpath string, copies MPI, now time.Time) (fqn string) {
var (
mpathUtils, mpathRRs = mfs.ios.GetAllMpathUtils(now)
objutil, ok = mpathUtils[objmpath]
rr, _ = mpathRRs[objmpath] // GET round-robin counter (zeros out every iostats refresh i-val)
util = objutil
r = rr
)
fqn = objfqn
if !ok {
cmn.DassertMsg(false, objmpath, pkgName)
return
}
for copyfqn, copympi := range copies {
var (
u int64
c, rrcnt int32
)
if u, ok = mpathUtils[copympi.Path]; !ok {
continue
}
if r, ok = mpathRRs[copympi.Path]; !ok {
if u < util {
fqn, util, rr = copyfqn, u, r
}
continue
}
c = r.Load()
if rr != nil {
rrcnt = rr.Load()
}
if u < util && c <= rrcnt { // the obvious choice
fqn, util, rr = copyfqn, u, r
continue
}
if u+int64(c)*uQuantum < util+int64(rrcnt)*uQuantum { // heuristics - make uQuantum configurable?
fqn, util, rr = copyfqn, u, r
}
}
// NOTE: the counter could've been already inc-ed
// could keep track of the second best and use CAS to recerve-inc and compare
// can wait though
if rr != nil {
rr.Inc()
}
return
}
// ios delegators
func (mfs *MountedFS) GetMpathUtil(mpath string, now time.Time) int64 {
return mfs.ios.GetMpathUtil(mpath, now)
}
func (mfs *MountedFS) GetAllMpathUtils(now time.Time) (utils map[string]int64) {
utils, _ = mfs.ios.GetAllMpathUtils(now)
return
}
func (mfs *MountedFS) LogAppend(lines []string) []string {
return mfs.ios.LogAppend(lines)
}
func (mfs *MountedFS) GetSelectedDiskStats() (m map[string]*ios.SelectedDiskStats) {
return mfs.ios.GetSelectedDiskStats()
}
// Init prepares and adds provided mountpaths. Also validates the mountpaths
// for duplication and availability.
func (mfs *MountedFS) Init(fsPaths []string) error {
if len(fsPaths) == 0 {
// (usability) not to clutter the log with backtraces when starting up and validating config
return fmt.Errorf("FATAL: no fspaths - see README => Configuration and/or fspaths section in the config.sh")
}
for _, path := range fsPaths {
if err := mfs.Add(path); err != nil {
return err
}
}
return nil
}
// Add adds new mountpath to the target's mountpaths.
// FIXME: unify error messages for original and clean mountpath
func (mfs *MountedFS) Add(mpath string) error {
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return err
}
if err := Access(cleanMpath); err != nil {
return fmt.Errorf("fspath %q %s, err: %v", mpath, cmn.DoesNotExist, err)
}
statfs := syscall.Statfs_t{}
if err := syscall.Statfs(cleanMpath, &statfs); err != nil {
return fmt.Errorf("cannot statfs fspath %q, err: %w", mpath, err)
}
fs, err := fqn2fsAtStartup(cleanMpath)
if err != nil {
return fmt.Errorf("cannot get filesystem: %v", err)
}
mp := newMountpath(cleanMpath, mpath, statfs.Fsid, fs)
mfs.mu.Lock()
defer mfs.mu.Unlock()
availablePaths, disabledPaths := mfs.mountpathsCopy()
if _, exists := availablePaths[mp.Path]; exists {
return fmt.Errorf("tried to add already registered mountpath: %v", mp.Path)
}
if existingPath, exists := mfs.fsIDs[mp.Fsid]; exists && mfs.checkFsID {
return fmt.Errorf("tried to add path %v but same fsid (%v) was already registered by %v", mpath, mp.Fsid, existingPath)
}
mfs.ios.AddMpath(mp.Path, mp.FileSystem)
availablePaths[mp.Path] = mp
mfs.fsIDs[mp.Fsid] = cleanMpath
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
// Remove removes mountpaths from the target's mountpaths. It searches
// for the mountpath in available and disabled (if the mountpath is not found
// in available).
func (mfs *MountedFS)
|
Remove
|
identifier_name
|
|
webmux.py
|
_list['ivolethe']['global_ip'] == 'webmux.cflo.at':
try:
findTags = re.compile(r'<.*?>')
findIP = re.compile(r'\d+\.\d+\.\d+\.\d+')
html = requests.get('http://checkip.dyndns.org' ).text()
ipaddress = findIP.search(findTags.sub('', html))
if ipaddress is not None:
server_list['ivolethe']['global_ip'] = ipaddress.group(0)
logging.info("Found global IP to be %s"%(server_list['ivolethe']['global_ip']))
except:
pass
def get_local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("1.1.1.1", 80))
return s.getsockname()[0]
def reset_server_list():
global server_list
server_list = {
'ivolethe': {
'hostname': 'ivolethe',
'host_port': 22,
'webmux_port': 22,
'global_ip': 'webmux.cflo.at',
'local_ip': get_local_ip(),
'user': 'sabae',
'direct': True,
'last_direct_try': 1e100,
}
}
t = threading.Thread(target=get_global_ip)
t.daemon = True
t.start()
def kill_all_tunnels():
"""
Sometimes we just need to kill all the tunnels that have come in ever, so we
don't rely upon our list, we instead ask `lsof` to look for all processes
that are listening on the first 100 ports of our port_base and kill 'em all.
"""
lsof_cmd = "lsof -i:%d-%d -P -n"%(port_base, port_base+100)
try:
lsof_output = subprocess.check_output(lsof_cmd.split()).decode('utf-8')
except subprocess.CalledProcessError:
return []
except:
traceback.print_exc(file=sys.stdout)
logging.warning("Unable to probe active tunnels")
return []
ssh_procs = list(set([l.split()[1] for l in lsof_output.split('\n')[1:] if l]))
for p in ssh_procs:
|
return ssh_procs
class WebmuxTermManager(terminado.NamedTermManager):
"""Share terminals between websockets connected to the same endpoint.
"""
def __init__(self, max_terminals=None, **kwargs):
super(WebmuxTermManager, self).__init__(**kwargs)
def get_terminal(self, port_number):
from terminado.management import MaxTerminalsReached
# This is important lel
assert port_number is not None
if port_number in self.terminals:
return self.terminals[port_number]
if self.max_terminals and len(self.terminals) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
# Find server mapped to this port
name = next(filter(lambda n: server_list[n]['webmux_port'] == int(port_number), server_list.keys()))
s = server_list[name]
# Create new terminal
logging.info("Attempting to connect to: %s@%s:%d", s['user'], name, s['webmux_port'])
self.shell_command = ["ssh", "-C", "-o", "UserKnownHostsFile /dev/null", "-o", "StrictHostKeyChecking no", "-p", port_number, s['user']+"@webmux.cflo.at"]
term = self.new_terminal()
term.term_name = port_number
self.terminals[port_number] = term
self.start_reading(term)
return term
class IndexPageHandler(tornado.web.RequestHandler):
"""Render the index page"""
def get(self):
logging.info("Hit the index page")
return self.render("index.html", static=self.static_url, server_list=server_list)
class RegistrationPageHandler(tornado.web.RequestHandler):
"""Return a port number for a hostname"""
def post(self):
try:
data = json_decode(self.request.body)
except:
logging.warn("Couldn't decode JSON body \"%s\" from IP %s"%(self.request.body, self.request.headers.get('X-Real-Ip')))
return
# Always update the 'global_ip'
data['global_ip'] = self.request.headers.get("X-Real-IP")
# Convert `host_port` to an integer
data['host_port'] = int(data['host_port'])
# If this hostname does not already exist in server_list, then initialize some sane defaults for `data`
# before we put it into `server_list`.
if not data['hostname'] in server_list:
port_number = max([server_list[k]['webmux_port'] for k in server_list] + [port_base - 1]) + 1
data['webmux_port'] = port_number
data['direct'] = False
data['last_direct_try'] = 0
server_list[data['hostname']] = data
else:
# Otherwise update server_list with the given data
server_list[data['hostname']].update(data)
data = server_list[data['hostname']]
# Log out a little bit
logging.info("Registered %s at %s:%d on webmux port %d"%(data['hostname'], data['global_ip'], data['host_port'], data['webmux_port']))
self.write(str(data['webmux_port']))
class ResetPageHandler(tornado.web.RequestHandler):
"""Reset all SSH connections forwarding ports"""
def get(self):
ssh_procs = kill_all_tunnels()
reset_server_list()
logging.info("Killed %d live SSH tunnels"%(len(ssh_procs)))
self.write("Killed %d live SSH tunnels"%(len(ssh_procs)))
class TerminalPageHandler(tornado.web.RequestHandler):
def get_host(self, port_number):
for hostname in server_list:
if server_list[hostname]['webmux_port'] == port_number:
return hostname
return "host on port " + port_number
"""Render the /shell/[\\d]+ pages"""
def get(self, port_number):
return self.render("term.html", static=self.static_url,
ws_url_path="/_websocket/"+port_number,
hostname=self.get_host(port_number))
def sabanetify(hostname):
import hashlib
h = hashlib.sha256(hostname.encode('utf-8')).hexdigest()[:16]
return "fd37:5040::" + ":".join([h[idx:idx+4] for idx in range(0, len(h), 4)])
class BashPageHandler(tornado.web.RequestHandler):
"""Render the /bash page"""
def get(self):
global server_list
commands = "#webmuxbash\n"
# Add some helpful tools at the beginning
commands += """
# Helper function to see if we're on the same global subnet or not,
# (just checks if the X's are the same in X.X.X.Z, this is good enough
# 99% of the time)
same_global_subnet() {
if [[ -z "${GLOBAL_IP}" ]]; then
GLOBAL_IP="$(curl -s http://whatismyip.akamai.com)"
fi
[[ ${GLOBAL_IP%.*} == ${1%.*} ]]
}
# Check if an interface is "up"
wireguard_up()
{
if [[ $(uname 2>/dev/null) == "Darwin" ]]; then
[[ -n $(ifconfig 2>/dev/null | grep -e "^utun[^ ]: flags=.*UP[,>]" -A 4 | grep -e "inet6 fd37:5040::") ]]
else
[[ -n $(ip address show $(wg show interfaces 2>/dev/null) up 2>/dev/null) ]]
fi
}
"""
for name in server_list:
s = server_list[name]
build_command = lambda name, prog: "function %s() { title %s; tmux_escape %s \"$@\"; title; }\n"%(name, name, prog)
ssh_cmd = "ssh -A -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "
# Add .global for connecting to global host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['global_ip'])
commands += build_command(name+".global", prog)
# Add .local for connecting to local host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['local_ip'])
commands += build_command(name+".local", prog)
# Add .webmux command for connecting to webmux reverse-tunnel
prog = ssh_cmd + "-p %d %s@webmux.cflo.at"%(s['webmux_port'], s['user'])
commands += build_command(name+".webmux", prog)
# Add .sabanet command for connecting over wireguard
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], sabanetify(name))
commands += build_command(name+".sabanet", prog)
commands += """
function %s()
{
if
|
subprocess.call(["kill", p])
|
conditional_block
|
webmux.py
|
server_list['ivolethe']['global_ip'] == 'webmux.cflo.at':
try:
findTags = re.compile(r'<.*?>')
findIP = re.compile(r'\d+\.\d+\.\d+\.\d+')
html = requests.get('http://checkip.dyndns.org' ).text()
ipaddress = findIP.search(findTags.sub('', html))
if ipaddress is not None:
server_list['ivolethe']['global_ip'] = ipaddress.group(0)
logging.info("Found global IP to be %s"%(server_list['ivolethe']['global_ip']))
except:
pass
def get_local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("1.1.1.1", 80))
return s.getsockname()[0]
def reset_server_list():
global server_list
server_list = {
'ivolethe': {
'hostname': 'ivolethe',
'host_port': 22,
'webmux_port': 22,
'global_ip': 'webmux.cflo.at',
'local_ip': get_local_ip(),
'user': 'sabae',
'direct': True,
'last_direct_try': 1e100,
}
}
t = threading.Thread(target=get_global_ip)
t.daemon = True
t.start()
def kill_all_tunnels():
"""
Sometimes we just need to kill all the tunnels that have come in ever, so we
don't rely upon our list, we instead ask `lsof` to look for all processes
that are listening on the first 100 ports of our port_base and kill 'em all.
"""
lsof_cmd = "lsof -i:%d-%d -P -n"%(port_base, port_base+100)
try:
lsof_output = subprocess.check_output(lsof_cmd.split()).decode('utf-8')
except subprocess.CalledProcessError:
return []
except:
traceback.print_exc(file=sys.stdout)
logging.warning("Unable to probe active tunnels")
return []
ssh_procs = list(set([l.split()[1] for l in lsof_output.split('\n')[1:] if l]))
for p in ssh_procs:
subprocess.call(["kill", p])
return ssh_procs
class WebmuxTermManager(terminado.NamedTermManager):
"""Share terminals between websockets connected to the same endpoint.
"""
def __init__(self, max_terminals=None, **kwargs):
super(WebmuxTermManager, self).__init__(**kwargs)
def get_terminal(self, port_number):
from terminado.management import MaxTerminalsReached
# This is important lel
assert port_number is not None
if port_number in self.terminals:
return self.terminals[port_number]
if self.max_terminals and len(self.terminals) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
# Find server mapped to this port
name = next(filter(lambda n: server_list[n]['webmux_port'] == int(port_number), server_list.keys()))
s = server_list[name]
# Create new terminal
logging.info("Attempting to connect to: %s@%s:%d", s['user'], name, s['webmux_port'])
self.shell_command = ["ssh", "-C", "-o", "UserKnownHostsFile /dev/null", "-o", "StrictHostKeyChecking no", "-p", port_number, s['user']+"@webmux.cflo.at"]
term = self.new_terminal()
term.term_name = port_number
self.terminals[port_number] = term
self.start_reading(term)
return term
class IndexPageHandler(tornado.web.RequestHandler):
"""Render the index page"""
def get(self):
logging.info("Hit the index page")
return self.render("index.html", static=self.static_url, server_list=server_list)
class RegistrationPageHandler(tornado.web.RequestHandler):
"""Return a port number for a hostname"""
def post(self):
try:
data = json_decode(self.request.body)
except:
logging.warn("Couldn't decode JSON body \"%s\" from IP %s"%(self.request.body, self.request.headers.get('X-Real-Ip')))
return
# Always update the 'global_ip'
data['global_ip'] = self.request.headers.get("X-Real-IP")
# Convert `host_port` to an integer
data['host_port'] = int(data['host_port'])
# If this hostname does not already exist in server_list, then initialize some sane defaults for `data`
# before we put it into `server_list`.
if not data['hostname'] in server_list:
port_number = max([server_list[k]['webmux_port'] for k in server_list] + [port_base - 1]) + 1
data['webmux_port'] = port_number
data['direct'] = False
data['last_direct_try'] = 0
server_list[data['hostname']] = data
else:
# Otherwise update server_list with the given data
|
# Log out a little bit
logging.info("Registered %s at %s:%d on webmux port %d"%(data['hostname'], data['global_ip'], data['host_port'], data['webmux_port']))
self.write(str(data['webmux_port']))
class ResetPageHandler(tornado.web.RequestHandler):
"""Reset all SSH connections forwarding ports"""
def get(self):
ssh_procs = kill_all_tunnels()
reset_server_list()
logging.info("Killed %d live SSH tunnels"%(len(ssh_procs)))
self.write("Killed %d live SSH tunnels"%(len(ssh_procs)))
class TerminalPageHandler(tornado.web.RequestHandler):
def get_host(self, port_number):
for hostname in server_list:
if server_list[hostname]['webmux_port'] == port_number:
return hostname
return "host on port " + port_number
"""Render the /shell/[\\d]+ pages"""
def get(self, port_number):
return self.render("term.html", static=self.static_url,
ws_url_path="/_websocket/"+port_number,
hostname=self.get_host(port_number))
def sabanetify(hostname):
import hashlib
h = hashlib.sha256(hostname.encode('utf-8')).hexdigest()[:16]
return "fd37:5040::" + ":".join([h[idx:idx+4] for idx in range(0, len(h), 4)])
class BashPageHandler(tornado.web.RequestHandler):
"""Render the /bash page"""
def get(self):
global server_list
commands = "#webmuxbash\n"
# Add some helpful tools at the beginning
commands += """
# Helper function to see if we're on the same global subnet or not,
# (just checks if the X's are the same in X.X.X.Z, this is good enough
# 99% of the time)
same_global_subnet() {
if [[ -z "${GLOBAL_IP}" ]]; then
GLOBAL_IP="$(curl -s http://whatismyip.akamai.com)"
fi
[[ ${GLOBAL_IP%.*} == ${1%.*} ]]
}
# Check if an interface is "up"
wireguard_up()
{
if [[ $(uname 2>/dev/null) == "Darwin" ]]; then
[[ -n $(ifconfig 2>/dev/null | grep -e "^utun[^ ]: flags=.*UP[,>]" -A 4 | grep -e "inet6 fd37:5040::") ]]
else
[[ -n $(ip address show $(wg show interfaces 2>/dev/null) up 2>/dev/null) ]]
fi
}
"""
for name in server_list:
s = server_list[name]
build_command = lambda name, prog: "function %s() { title %s; tmux_escape %s \"$@\"; title; }\n"%(name, name, prog)
ssh_cmd = "ssh -A -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "
# Add .global for connecting to global host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['global_ip'])
commands += build_command(name+".global", prog)
# Add .local for connecting to local host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['local_ip'])
commands += build_command(name+".local", prog)
# Add .webmux command for connecting to webmux reverse-tunnel
prog = ssh_cmd + "-p %d %s@webmux.cflo.at"%(s['webmux_port'], s['user'])
commands += build_command(name+".webmux", prog)
# Add .sabanet command for connecting over wireguard
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], sabanetify(name))
commands += build_command(name+".sabanet", prog)
commands += """
function %s()
{
if wireguard_up
|
server_list[data['hostname']].update(data)
data = server_list[data['hostname']]
|
random_line_split
|
webmux.py
|
():
global server_list
while server_list['ivolethe']['global_ip'] == 'webmux.cflo.at':
try:
findTags = re.compile(r'<.*?>')
findIP = re.compile(r'\d+\.\d+\.\d+\.\d+')
html = requests.get('http://checkip.dyndns.org' ).text()
ipaddress = findIP.search(findTags.sub('', html))
if ipaddress is not None:
server_list['ivolethe']['global_ip'] = ipaddress.group(0)
logging.info("Found global IP to be %s"%(server_list['ivolethe']['global_ip']))
except:
pass
def get_local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("1.1.1.1", 80))
return s.getsockname()[0]
def reset_server_list():
global server_list
server_list = {
'ivolethe': {
'hostname': 'ivolethe',
'host_port': 22,
'webmux_port': 22,
'global_ip': 'webmux.cflo.at',
'local_ip': get_local_ip(),
'user': 'sabae',
'direct': True,
'last_direct_try': 1e100,
}
}
t = threading.Thread(target=get_global_ip)
t.daemon = True
t.start()
def kill_all_tunnels():
"""
Sometimes we just need to kill all the tunnels that have come in ever, so we
don't rely upon our list, we instead ask `lsof` to look for all processes
that are listening on the first 100 ports of our port_base and kill 'em all.
"""
lsof_cmd = "lsof -i:%d-%d -P -n"%(port_base, port_base+100)
try:
lsof_output = subprocess.check_output(lsof_cmd.split()).decode('utf-8')
except subprocess.CalledProcessError:
return []
except:
traceback.print_exc(file=sys.stdout)
logging.warning("Unable to probe active tunnels")
return []
ssh_procs = list(set([l.split()[1] for l in lsof_output.split('\n')[1:] if l]))
for p in ssh_procs:
subprocess.call(["kill", p])
return ssh_procs
class WebmuxTermManager(terminado.NamedTermManager):
"""Share terminals between websockets connected to the same endpoint.
"""
def __init__(self, max_terminals=None, **kwargs):
super(WebmuxTermManager, self).__init__(**kwargs)
def get_terminal(self, port_number):
from terminado.management import MaxTerminalsReached
# This is important lel
assert port_number is not None
if port_number in self.terminals:
return self.terminals[port_number]
if self.max_terminals and len(self.terminals) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
# Find server mapped to this port
name = next(filter(lambda n: server_list[n]['webmux_port'] == int(port_number), server_list.keys()))
s = server_list[name]
# Create new terminal
logging.info("Attempting to connect to: %s@%s:%d", s['user'], name, s['webmux_port'])
self.shell_command = ["ssh", "-C", "-o", "UserKnownHostsFile /dev/null", "-o", "StrictHostKeyChecking no", "-p", port_number, s['user']+"@webmux.cflo.at"]
term = self.new_terminal()
term.term_name = port_number
self.terminals[port_number] = term
self.start_reading(term)
return term
class IndexPageHandler(tornado.web.RequestHandler):
"""Render the index page"""
def get(self):
logging.info("Hit the index page")
return self.render("index.html", static=self.static_url, server_list=server_list)
class RegistrationPageHandler(tornado.web.RequestHandler):
"""Return a port number for a hostname"""
def post(self):
try:
data = json_decode(self.request.body)
except:
logging.warn("Couldn't decode JSON body \"%s\" from IP %s"%(self.request.body, self.request.headers.get('X-Real-Ip')))
return
# Always update the 'global_ip'
data['global_ip'] = self.request.headers.get("X-Real-IP")
# Convert `host_port` to an integer
data['host_port'] = int(data['host_port'])
# If this hostname does not already exist in server_list, then initialize some sane defaults for `data`
# before we put it into `server_list`.
if not data['hostname'] in server_list:
port_number = max([server_list[k]['webmux_port'] for k in server_list] + [port_base - 1]) + 1
data['webmux_port'] = port_number
data['direct'] = False
data['last_direct_try'] = 0
server_list[data['hostname']] = data
else:
# Otherwise update server_list with the given data
server_list[data['hostname']].update(data)
data = server_list[data['hostname']]
# Log out a little bit
logging.info("Registered %s at %s:%d on webmux port %d"%(data['hostname'], data['global_ip'], data['host_port'], data['webmux_port']))
self.write(str(data['webmux_port']))
class ResetPageHandler(tornado.web.RequestHandler):
"""Reset all SSH connections forwarding ports"""
def get(self):
ssh_procs = kill_all_tunnels()
reset_server_list()
logging.info("Killed %d live SSH tunnels"%(len(ssh_procs)))
self.write("Killed %d live SSH tunnels"%(len(ssh_procs)))
class TerminalPageHandler(tornado.web.RequestHandler):
def get_host(self, port_number):
for hostname in server_list:
if server_list[hostname]['webmux_port'] == port_number:
return hostname
return "host on port " + port_number
"""Render the /shell/[\\d]+ pages"""
def get(self, port_number):
return self.render("term.html", static=self.static_url,
ws_url_path="/_websocket/"+port_number,
hostname=self.get_host(port_number))
def sabanetify(hostname):
import hashlib
h = hashlib.sha256(hostname.encode('utf-8')).hexdigest()[:16]
return "fd37:5040::" + ":".join([h[idx:idx+4] for idx in range(0, len(h), 4)])
class BashPageHandler(tornado.web.RequestHandler):
"""Render the /bash page"""
def get(self):
global server_list
commands = "#webmuxbash\n"
# Add some helpful tools at the beginning
commands += """
# Helper function to see if we're on the same global subnet or not,
# (just checks if the X's are the same in X.X.X.Z, this is good enough
# 99% of the time)
same_global_subnet() {
if [[ -z "${GLOBAL_IP}" ]]; then
GLOBAL_IP="$(curl -s http://whatismyip.akamai.com)"
fi
[[ ${GLOBAL_IP%.*} == ${1%.*} ]]
}
# Check if an interface is "up"
wireguard_up()
{
if [[ $(uname 2>/dev/null) == "Darwin" ]]; then
[[ -n $(ifconfig 2>/dev/null | grep -e "^utun[^ ]: flags=.*UP[,>]" -A 4 | grep -e "inet6 fd37:5040::") ]]
else
[[ -n $(ip address show $(wg show interfaces 2>/dev/null) up 2>/dev/null) ]]
fi
}
"""
for name in server_list:
s = server_list[name]
build_command = lambda name, prog: "function %s() { title %s; tmux_escape %s \"$@\"; title; }\n"%(name, name, prog)
ssh_cmd = "ssh -A -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "
# Add .global for connecting to global host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['global_ip'])
commands += build_command(name+".global", prog)
# Add .local for connecting to local host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['local_ip'])
commands += build_command(name+".local", prog)
# Add .webmux command for connecting to webmux reverse-tunnel
prog = ssh_cmd + "-p %d %s@webmux.cflo.at"%(s['webmux_port'], s['user'])
commands += build_command(name+".webmux", prog)
# Add .sabanet command for connecting over wireguard
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], sabanetify(name))
commands += build_command(name+".sabanet", prog)
commands +=
|
get_global_ip
|
identifier_name
|
|
webmux.py
|
server_list['ivolethe']['global_ip'] == 'webmux.cflo.at':
try:
findTags = re.compile(r'<.*?>')
findIP = re.compile(r'\d+\.\d+\.\d+\.\d+')
html = requests.get('http://checkip.dyndns.org' ).text()
ipaddress = findIP.search(findTags.sub('', html))
if ipaddress is not None:
server_list['ivolethe']['global_ip'] = ipaddress.group(0)
logging.info("Found global IP to be %s"%(server_list['ivolethe']['global_ip']))
except:
pass
def get_local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("1.1.1.1", 80))
return s.getsockname()[0]
def reset_server_list():
global server_list
server_list = {
'ivolethe': {
'hostname': 'ivolethe',
'host_port': 22,
'webmux_port': 22,
'global_ip': 'webmux.cflo.at',
'local_ip': get_local_ip(),
'user': 'sabae',
'direct': True,
'last_direct_try': 1e100,
}
}
t = threading.Thread(target=get_global_ip)
t.daemon = True
t.start()
def kill_all_tunnels():
"""
Sometimes we just need to kill all the tunnels that have come in ever, so we
don't rely upon our list, we instead ask `lsof` to look for all processes
that are listening on the first 100 ports of our port_base and kill 'em all.
"""
lsof_cmd = "lsof -i:%d-%d -P -n"%(port_base, port_base+100)
try:
lsof_output = subprocess.check_output(lsof_cmd.split()).decode('utf-8')
except subprocess.CalledProcessError:
return []
except:
traceback.print_exc(file=sys.stdout)
logging.warning("Unable to probe active tunnels")
return []
ssh_procs = list(set([l.split()[1] for l in lsof_output.split('\n')[1:] if l]))
for p in ssh_procs:
subprocess.call(["kill", p])
return ssh_procs
class WebmuxTermManager(terminado.NamedTermManager):
|
# Create new terminal
logging.info("Attempting to connect to: %s@%s:%d", s['user'], name, s['webmux_port'])
self.shell_command = ["ssh", "-C", "-o", "UserKnownHostsFile /dev/null", "-o", "StrictHostKeyChecking no", "-p", port_number, s['user']+"@webmux.cflo.at"]
term = self.new_terminal()
term.term_name = port_number
self.terminals[port_number] = term
self.start_reading(term)
return term
class IndexPageHandler(tornado.web.RequestHandler):
"""Render the index page"""
def get(self):
logging.info("Hit the index page")
return self.render("index.html", static=self.static_url, server_list=server_list)
class RegistrationPageHandler(tornado.web.RequestHandler):
"""Return a port number for a hostname"""
def post(self):
try:
data = json_decode(self.request.body)
except:
logging.warn("Couldn't decode JSON body \"%s\" from IP %s"%(self.request.body, self.request.headers.get('X-Real-Ip')))
return
# Always update the 'global_ip'
data['global_ip'] = self.request.headers.get("X-Real-IP")
# Convert `host_port` to an integer
data['host_port'] = int(data['host_port'])
# If this hostname does not already exist in server_list, then initialize some sane defaults for `data`
# before we put it into `server_list`.
if not data['hostname'] in server_list:
port_number = max([server_list[k]['webmux_port'] for k in server_list] + [port_base - 1]) + 1
data['webmux_port'] = port_number
data['direct'] = False
data['last_direct_try'] = 0
server_list[data['hostname']] = data
else:
# Otherwise update server_list with the given data
server_list[data['hostname']].update(data)
data = server_list[data['hostname']]
# Log out a little bit
logging.info("Registered %s at %s:%d on webmux port %d"%(data['hostname'], data['global_ip'], data['host_port'], data['webmux_port']))
self.write(str(data['webmux_port']))
class ResetPageHandler(tornado.web.RequestHandler):
"""Reset all SSH connections forwarding ports"""
def get(self):
ssh_procs = kill_all_tunnels()
reset_server_list()
logging.info("Killed %d live SSH tunnels"%(len(ssh_procs)))
self.write("Killed %d live SSH tunnels"%(len(ssh_procs)))
class TerminalPageHandler(tornado.web.RequestHandler):
def get_host(self, port_number):
for hostname in server_list:
if server_list[hostname]['webmux_port'] == port_number:
return hostname
return "host on port " + port_number
"""Render the /shell/[\\d]+ pages"""
def get(self, port_number):
return self.render("term.html", static=self.static_url,
ws_url_path="/_websocket/"+port_number,
hostname=self.get_host(port_number))
def sabanetify(hostname):
import hashlib
h = hashlib.sha256(hostname.encode('utf-8')).hexdigest()[:16]
return "fd37:5040::" + ":".join([h[idx:idx+4] for idx in range(0, len(h), 4)])
class BashPageHandler(tornado.web.RequestHandler):
"""Render the /bash page"""
def get(self):
global server_list
commands = "#webmuxbash\n"
# Add some helpful tools at the beginning
commands += """
# Helper function to see if we're on the same global subnet or not,
# (just checks if the X's are the same in X.X.X.Z, this is good enough
# 99% of the time)
same_global_subnet() {
if [[ -z "${GLOBAL_IP}" ]]; then
GLOBAL_IP="$(curl -s http://whatismyip.akamai.com)"
fi
[[ ${GLOBAL_IP%.*} == ${1%.*} ]]
}
# Check if an interface is "up"
wireguard_up()
{
if [[ $(uname 2>/dev/null) == "Darwin" ]]; then
[[ -n $(ifconfig 2>/dev/null | grep -e "^utun[^ ]: flags=.*UP[,>]" -A 4 | grep -e "inet6 fd37:5040::") ]]
else
[[ -n $(ip address show $(wg show interfaces 2>/dev/null) up 2>/dev/null) ]]
fi
}
"""
for name in server_list:
s = server_list[name]
build_command = lambda name, prog: "function %s() { title %s; tmux_escape %s \"$@\"; title; }\n"%(name, name, prog)
ssh_cmd = "ssh -A -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "
# Add .global for connecting to global host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['global_ip'])
commands += build_command(name+".global", prog)
# Add .local for connecting to local host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['local_ip'])
commands += build_command(name+".local", prog)
# Add .webmux command for connecting to webmux reverse-tunnel
prog = ssh_cmd + "-p %d %s@webmux.cflo.at"%(s['webmux_port'], s['user'])
commands += build_command(name+".webmux", prog)
# Add .sabanet command for connecting over wireguard
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], sabanetify(name))
commands += build_command(name+".sabanet", prog)
commands += """
function %s()
{
if
|
"""Share terminals between websockets connected to the same endpoint.
"""
def __init__(self, max_terminals=None, **kwargs):
super(WebmuxTermManager, self).__init__(**kwargs)
def get_terminal(self, port_number):
from terminado.management import MaxTerminalsReached
# This is important lel
assert port_number is not None
if port_number in self.terminals:
return self.terminals[port_number]
if self.max_terminals and len(self.terminals) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
# Find server mapped to this port
name = next(filter(lambda n: server_list[n]['webmux_port'] == int(port_number), server_list.keys()))
s = server_list[name]
|
identifier_body
|
server.py
|
% 8 == 0:
smaller_key.append("8")
counter = 1
else:
|
# Apply the IP Key encryption algorithm
final_list = []
next_index = 56
for i in range(28):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 65)
next_index = 62
for i in range(28, 52):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 63)
next_index = 27
for i in range(52, 56):
final_list.append(smaller_key[next_index])
next_index = next_index - 8
return "".join(final_list)
def string_l_shift(my_str):
my_list = list(my_str)
first_char = my_list[0]
my_list.pop(0)
my_list.append(first_char)
return "".join(my_list)
def P_Key(my_U, my_V):
my_list = list(my_U + my_V)
final_list = []
my_perm = [13, 16, 10, 23, 0, 4, 2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7, 15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54, 29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52, 45, 41, 49, 35, 28, 31]
for i in range(len(my_perm)):
final_list.append(my_list[my_perm[i]])
return "".join(final_list)
def XOR_Encrypt(my_left, my_right, my_key):
# Perform the major part of Step 2 of DES Encryption
my_EP = string_xor(EP_fun(my_right), my_key)
my_S = S_fun(my_EP)
my_P = P_fun(my_S)
return string_xor(my_left, my_P)
def EP_fun(my_string):
my_list = list(my_string)
result = []
next_index = 31
counter = 0
# Perform expansion permutation
for i in range(48):
result.append(my_list[next_index])
counter += 1
if next_index == 31:
next_index = 0
else:
next_index += 1
if counter == 6:
counter = 0
next_index -= 2
return "".join(result)
def string_xor(s1, s2):
l1 = list(s1)
l2 = list(s2)
result = []
# Perform XOR
for i in range(len(l1)):
if l1[i] == l2[i]:
result.append("0")
else:
result.append("1")
return "".join(result)
def S_fun(my_string):
# S-Boxes (yes, all of them)
S = [[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]],
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]],
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]],
[[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]],
[[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]],
[[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 1
|
smaller_key.append(my_key_list[i])
counter += 1
|
conditional_block
|
server.py
|
(K_List):
new_list = []
# Convert to bits
for i in range(len(K_List)):
new_list.append(format(ord(K_List[i]), '07b'))
if K_List[i].count("1") % 2 == 0:
new_list.append("0")
else:
new_list.append("1")
return "".join(new_list)
def apply_IP(M):
rev_M = list(M[len(M)::-1])
my_mat = [["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"]]
counter = 0
# Create "A" Matrix
counting_list = [6, 4, 2, 0, 7, 5, 3, 1]
for i in range(len(counting_list)):
for j in range(8):
my_mat[counting_list[i]][j] = rev_M[counter]
counter += 1
# Apply IP(M)
final_list = []
for i in range(len(counting_list)):
for j in range(len(counting_list)):
final_list.append(my_mat[counting_list[j]][counting_list[i]])
return "".join(final_list)
def apply_IP_C(C):
final_list = [C[39], C[7], C[47], C[15], C[55], C[23], C[63], C[31], C[38], C[6], C[46], C[14], C[54], C[22], C[62],
C[30],
C[37], C[5], C[45], C[13], C[53], C[21], C[61], C[29], C[36], C[4], C[44], C[12], C[52], C[20], C[60],
C[28],
C[35], C[3], C[43], C[11], C[51], C[19], C[59], C[27], C[34], C[2], C[42], C[10], C[50], C[18], C[58],
C[26],
C[33], C[1], C[41], C[9], C[49], C[17], C[57], C[25], C[32], C[0], C[40], C[8], C[48], C[16], C[56],
C[24]]
return "".join(final_list)
def apply_IPKey(my_key):
my_key_list = list(my_key)
smaller_key = []
counter = 1
# Convert every 8th bit from the key to an "8", to be removed later
for i in range(len(my_key_list)):
if counter % 8 == 0:
smaller_key.append("8")
counter = 1
else:
smaller_key.append(my_key_list[i])
counter += 1
# Apply the IP Key encryption algorithm
final_list = []
next_index = 56
for i in range(28):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 65)
next_index = 62
for i in range(28, 52):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 63)
next_index = 27
for i in range(52, 56):
final_list.append(smaller_key[next_index])
next_index = next_index - 8
return "".join(final_list)
def string_l_shift(my_str):
my_list = list(my_str)
first_char = my_list[0]
my_list.pop(0)
my_list.append(first_char)
return "".join(my_list)
def P_Key(my_U, my_V):
my_list = list(my_U + my_V)
final_list = []
my_perm = [13, 16, 10, 23, 0, 4, 2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7, 15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54, 29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52, 45, 41, 49, 35, 28, 31]
for i in range(len(my_perm)):
final_list.append(my_list[my_perm[i]])
return "".join(final_list)
def XOR_Encrypt(my_left, my_right, my_key):
# Perform the major part of Step 2 of DES Encryption
my_EP = string_xor(EP_fun(my_right), my_key)
my_S = S_fun(my_EP)
my_P = P_fun(my_S)
return string_xor(my_left, my_P)
def EP_fun(my_string):
my_list = list(my_string)
result = []
next_index = 31
counter = 0
# Perform expansion permutation
for i in range(48):
result.append(my_list[next_index])
counter += 1
if next_index == 31:
next_index = 0
else:
next_index += 1
if counter == 6:
counter = 0
next_index -= 2
return "".join(result)
def string_xor(s1, s2):
l1 = list(s1)
l2 = list(s2)
result = []
# Perform XOR
for i in range(len(l1)):
if l1[i] == l2[i]:
result.append("0")
else:
result.append("1")
return "".join(result)
def S_fun(my_string):
# S-Boxes (yes, all of them)
S = [[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]],
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]],
[[10, 0, 9, 14,
|
convert_K
|
identifier_name
|
|
server.py
|
def apply_IPKey(my_key):
my_key_list = list(my_key)
smaller_key = []
counter = 1
# Convert every 8th bit from the key to an "8", to be removed later
for i in range(len(my_key_list)):
if counter % 8 == 0:
smaller_key.append("8")
counter = 1
else:
smaller_key.append(my_key_list[i])
counter += 1
# Apply the IP Key encryption algorithm
final_list = []
next_index = 56
for i in range(28):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 65)
next_index = 62
for i in range(28, 52):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 63)
next_index = 27
for i in range(52, 56):
final_list.append(smaller_key[next_index])
next_index = next_index - 8
return "".join(final_list)
def string_l_shift(my_str):
my_list = list(my_str)
first_char = my_list[0]
my_list.pop(0)
my_list.append(first_char)
return "".join(my_list)
def P_Key(my_U, my_V):
my_list = list(my_U + my_V)
final_list = []
my_perm = [13, 16, 10, 23, 0, 4, 2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7, 15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54, 29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52, 45, 41, 49, 35, 28, 31]
for i in range(len(my_perm)):
final_list.append(my_list[my_perm[i]])
return "".join(final_list)
def XOR_Encrypt(my_left, my_right, my_key):
# Perform the major part of Step 2 of DES Encryption
my_EP = string_xor(EP_fun(my_right), my_key)
my_S = S_fun(my_EP)
my_P = P_fun(my_S)
return string_xor(my_left, my_P)
def EP_fun(my_string):
my_list = list(my_string)
result = []
next_index = 31
counter = 0
# Perform expansion permutation
for i in range(48):
result.append(my_list[next_index])
counter += 1
if next_index == 31:
next_index = 0
else:
next_index += 1
if counter == 6:
counter = 0
next_index -= 2
return "".join(result)
def string_xor(s1, s2):
l1 = list(s1)
l2 = list(s2)
result = []
# Perform XOR
for i in range(len(l1)):
if l1[i] == l2[i]:
result.append("0")
else:
result.append("1")
return "".join(result)
def S_fun(my_string):
# S-Boxes (yes, all of them)
S = [[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]],
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]],
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]],
[[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]],
[[2, 12, 4, 1, 7,
|
final_list = [C[39], C[7], C[47], C[15], C[55], C[23], C[63], C[31], C[38], C[6], C[46], C[14], C[54], C[22], C[62],
C[30],
C[37], C[5], C[45], C[13], C[53], C[21], C[61], C[29], C[36], C[4], C[44], C[12], C[52], C[20], C[60],
C[28],
C[35], C[3], C[43], C[11], C[51], C[19], C[59], C[27], C[34], C[2], C[42], C[10], C[50], C[18], C[58],
C[26],
C[33], C[1], C[41], C[9], C[49], C[17], C[57], C[25], C[32], C[0], C[40], C[8], C[48], C[16], C[56],
C[24]]
return "".join(final_list)
|
identifier_body
|
|
server.py
|
return "".join(new_list)
def apply_IP(M):
rev_M = list(M[len(M)::-1])
my_mat = [["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"]]
counter = 0
# Create "A" Matrix
counting_list = [6, 4, 2, 0, 7, 5, 3, 1]
for i in range(len(counting_list)):
for j in range(8):
my_mat[counting_list[i]][j] = rev_M[counter]
counter += 1
# Apply IP(M)
final_list = []
for i in range(len(counting_list)):
for j in range(len(counting_list)):
final_list.append(my_mat[counting_list[j]][counting_list[i]])
return "".join(final_list)
def apply_IP_C(C):
final_list = [C[39], C[7], C[47], C[15], C[55], C[23], C[63], C[31], C[38], C[6], C[46], C[14], C[54], C[22], C[62],
C[30],
C[37], C[5], C[45], C[13], C[53], C[21], C[61], C[29], C[36], C[4], C[44], C[12], C[52], C[20], C[60],
C[28],
C[35], C[3], C[43], C[11], C[51], C[19], C[59], C[27], C[34], C[2], C[42], C[10], C[50], C[18], C[58],
C[26],
C[33], C[1], C[41], C[9], C[49], C[17], C[57], C[25], C[32], C[0], C[40], C[8], C[48], C[16], C[56],
C[24]]
return "".join(final_list)
def apply_IPKey(my_key):
my_key_list = list(my_key)
smaller_key = []
counter = 1
# Convert every 8th bit from the key to an "8", to be removed later
for i in range(len(my_key_list)):
if counter % 8 == 0:
smaller_key.append("8")
counter = 1
else:
smaller_key.append(my_key_list[i])
counter += 1
# Apply the IP Key encryption algorithm
final_list = []
next_index = 56
for i in range(28):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 65)
next_index = 62
for i in range(28, 52):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 63)
next_index = 27
for i in range(52, 56):
final_list.append(smaller_key[next_index])
next_index = next_index - 8
return "".join(final_list)
def string_l_shift(my_str):
my_list = list(my_str)
first_char = my_list[0]
my_list.pop(0)
my_list.append(first_char)
return "".join(my_list)
def P_Key(my_U, my_V):
my_list = list(my_U + my_V)
final_list = []
my_perm = [13, 16, 10, 23, 0, 4, 2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7, 15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54, 29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52, 45, 41, 49, 35, 28, 31]
for i in range(len(my_perm)):
final_list.append(my_list[my_perm[i]])
return "".join(final_list)
def XOR_Encrypt(my_left, my_right, my_key):
# Perform the major part of Step 2 of DES Encryption
my_EP = string_xor(EP_fun(my_right), my_key)
my_S = S_fun(my_EP)
my_P = P_fun(my_S)
return string_xor(my_left, my_P)
def EP_fun(my_string):
my_list = list(my_string)
result = []
next_index = 31
counter = 0
# Perform expansion permutation
for i in range(48):
result.append(my_list[next_index])
counter += 1
if next_index == 31:
next_index = 0
else:
next_index += 1
if counter == 6:
counter = 0
next_index -= 2
return "".join(result)
def string_xor(s1, s2):
l1 = list(s1)
l2 = list(s2)
result = []
# Perform XOR
for i in range(len(l1)):
if l1[i] == l2[i]:
result.append("0")
else:
result.append("1")
return "".join(result)
def S_fun(my_string):
# S-Boxes (yes, all of them)
S = [[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]],
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]],
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2
|
new_list.append("1")
|
random_line_split
|
|
read.go
|
) {
s.Nodes[0].Data = "p"
}
}
})
// Loop through all paragraphs, and assign a score to them based on how content-y they look.
// Then add their score to their parent node.
// A score is determined by things like number of commas, class names, etc. Maybe eventually link density.
r.candidates = make(map[string]candidateItem)
doc.Find("p").Each(func(i int, s *goquery.Selection) {
// If this paragraph is less than 25 characters, don't even count it.
innerText := normalizeText(s.Text())
if strLen(innerText) < 25 {
return
}
// Exclude nodes with no ancestor.
ancestors := r.getNodeAncestors(s, 3)
if len(ancestors) == 0 {
return
}
// Calculate content score
// Add a point for the paragraph itself as a base.
contentScore := 1.0
// Add points for any commas within this paragraph.
contentScore += float64(strings.Count(innerText, ","))
contentScore += float64(strings.Count(innerText, ","))
// For every 100 characters in this paragraph, add another point. Up to 3 points.
contentScore += math.Min(math.Floor(float64(strLen(innerText)/100)), 3)
// Initialize and score ancestors.
for level, ancestor := range ancestors {
// Node score divider:
// - parent: 1 (no division)
// - grandparent: 2
// - great grandparent+: ancestor level * 3
scoreDivider := 0
if level == 0 {
scoreDivider = 1
} else if level == 1 {
scoreDivider = 2
} else {
scoreDivider = level * 3
}
ancestorHash := hashStr(ancestor)
if _, ok := r.candidates[ancestorHash]; !ok {
r.candidates[ancestorHash] = r.initializeNodeScore(ancestor)
}
candidate := r.candidates[ancestorHash]
candidate.score += contentScore / float64(scoreDivider)
r.candidates[ancestorHash] = candidate
}
})
// After we've calculated scores, loop through all of the possible
// candidate nodes we found and find the one with the highest score.
var topCandidate *candidateItem
for hash, candidate := range r.candidates {
candidate.score = candidate.score * (1 - r.getLinkDensity(candidate.node))
r.candidates[hash] = candidate
if topCandidate == nil || candidate.score > topCandidate.score {
if topCandidate == nil {
topCandidate = new(candidateItem)
}
topCandidate.score = candidate.score
topCandidate.node = candidate.node
}
}
// If top candidate not found, stop
if topCandidate == nil {
return nil
}
r.prepArticle(topCandidate.node)
return topCandidate.node
}
// Check if a node is empty
func (r *readability) isElementEmpty(s *goquery.Selection) bool {
html, _ := s.Html()
html = strings.TrimSpace(html)
return html == ""
}
// Get tag name from a node
func (r *readability) getTagName(s *goquery.Selection) string {
if s == nil || len(s.Nodes) == 0 {
return ""
}
return s.Nodes[0].Data
}
func (r *readability) getNodeAncestors(node *goquery.Selection, maxDepth int) []*goquery.Selection {
ancestors := []*goquery.Selection{}
parent := *node
for i := 0; i < maxDepth; i++ {
parent = *parent.Parent()
if len(parent.Nodes) == 0 {
return ancestors
}
ancestors = append(ancestors, &parent)
}
return ancestors
}
// Check if a given node has one of its ancestor tag name matching the provided one.
func (r *readability) hasAncestorTag(node *goquery.Selection, tag string) bool {
for parent := *node; len(parent.Nodes) > 0; parent = *parent.Parent() {
if parent.Nodes[0].Data == tag {
return true
}
}
return false
}
// Initialize a node and checks the className/id for special names
// to add to its score.
func (r *readability) initializeNodeScore(node *goquery.Selection) candidateItem {
contentScore := 0.0
switch r.getTagName(node) {
case "article":
contentScore += 10
case "section":
contentScore += 8
case "div":
contentScore += 5
case "pre", "blockquote", "td":
contentScore += 3
case "form", "ol", "ul", "dl", "dd", "dt", "li", "address":
contentScore -= 3
case "th", "h1", "h2", "h3", "h4", "h5", "h6":
contentScore -= 5
}
contentScore += r.getClassWeight(node)
return candidateItem{contentScore, node}
}
// Get an elements class/id weight. Uses regular expressions to tell if this
// element looks good or bad.
func (r *readability) getClassWeight(node *goquery.Selection) float64 {
weight := 0.0
if str, b := node.Attr("class"); b {
if negative.MatchString(str) {
weight -= 25
}
if positive.MatchString(str) {
weight += 25
}
}
if str, b := node.Attr("id"); b {
if negative.MatchString(str) {
weight -= 25
}
if positive.MatchString(str) {
weight += 25
}
}
return weight
}
// Get the density of links as a percentage of the content
// This is the amount of text that is inside a link divided by the total text in the node.
func (r *readability) getLinkDensity(node *goquery.Selection) float64 {
if node == nil {
return 0
}
textLength := strLen(normalizeText(node.Text()))
if textLength == 0 {
return 0
}
linkLength := 0
node.Find("a").Each(func(_ int, link *goquery.Selection) {
linkLength += strLen(link.Text())
})
return float64(linkLength) / float64(textLength)
}
// Prepare the article node for display. Clean out any inline styles,
// iframes, forms, strip extraneous <p> tags, etc.
func (r *readability) prepArticle(content *goquery.Selection) {
if content == nil {
return
}
// Remove styling attribute
r.cleanStyle(content)
// Clean out junk from the article content
r.cleanConditionally(content, "form")
r.cleanConditionally(content, "fieldset")
r.clean(content, "h1")
r.clean(content, "object")
r.clean(content, "embed")
r.clean(content, "footer")
r.clean(content, "link")
// If there is only one h2 or h3 and its text content substantially equals article title,
// they are probably using it as a header and not a subheader,
// so remove it since we already extract the title separately.
if content.Find("h2").Length() == 1 {
r.clean(content, "h2")
}
if content.Find("h3").Length() == 1 {
r.clean(content, "h3")
}
r.clean(content, "iframe")
r.clean(content, "input")
r.clean(content, "textarea")
r.clean(content, "select")
r.clean(content, "button")
r.cleanHeaders(content)
// Do these last as the previous stuff may have removed junk
// that will affect these
r.cleanConditionally(content, "table")
r.cleanConditionally(content, "ul")
r.cleanConditionally(content, "div")
// Fix all relative URL
r.fixRelativeURIs(content)
// Last time, clean all empty tags and remove class name
content.Find("*").Each(func(_ int, s *goquery.Selection) {
if r.isElementEmpty(s) {
s.Remove()
}
s.RemoveAttr("class")
s.RemoveAttr("id")
})
}
// Remove the style attribute on every e and under.
func (r *readability) cleanStyle(s *goquery.Selection) {
s.Find("*").Each(func(i int, s1 *goquery.Selection) {
tagName := s1.Nodes[0].Data
if tagName == "svg" {
return
}
s1.RemoveAttr("align")
s1.RemoveAttr("background")
s1.RemoveAttr("bgcolor")
s1.RemoveAttr("border")
s1.RemoveAttr("cellpadding")
s1.RemoveAttr("cellspacing")
s1.RemoveAttr("frame")
s1.RemoveAttr("hspace")
s1.RemoveAttr("rules")
s1.RemoveAttr("style")
s1.RemoveAttr("valign")
s1.RemoveAttr("vspace")
s1.RemoveAttr("onclick")
s1.RemoveAttr("onmouseover")
s1.RemoveAttr("border")
s1.RemoveAttr("style")
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.