file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
lib.rs
|
: usize, width: usize) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|needed| {
let ptr = {
let mut alloc = Vec::<u8>::with_capacity(needed);
let ptr = alloc.as_mut_ptr();
std::mem::forget(alloc);
ptr
};
unsafe { std::ptr::write_bytes(ptr, 0, needed); }
Some(Bitmap {
entries: entries,
width: width,
data: ptr as *mut u8
})
})
}
}
/// Create a new Bitmap from raw parts. Will return None if the given
/// entry and width would overflow the number of bits or bytes needed to
/// store the Bitmap.
pub unsafe fn from_raw_parts(entries: usize, width: usize, ptr: *mut u8) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|_| {
Some(Bitmap {
entries: entries,
width: width,
data: ptr
})
})
}
}
/// Get the `i`th bitslice, returning None on out-of-bounds
pub fn get(&self, i: usize) -> Option<usize> {
if i >= self.entries {
None
} else {
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
let mut bits_left = self.width;
let mut value: usize = 0;
while bits_left > 0 {
// how many bits can we need to set in this byte?
let can_get = std::cmp::min(8 - in_byte_offset, bits_left);
// alright, pull them out.
let byte = unsafe { *self.data.offset(byte_offset as isize) };
let got = get_n_bits_at(byte, can_get as u8, in_byte_offset as u8) as usize;
// make room for the bits we just read
value <<= can_get;
value |= got;
// update all the state
bit_offset += can_get;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_get;
}
Some(value)
}
}
/// Set the `i`th bitslice to `value`, returning false on out-of-bounds or if `value` contains
/// bits outside of the least significant `self.width` bits.
pub fn set(&mut self, i: usize, mut value: usize) -> bool {
let usize = std::mem::size_of::<usize>() * 8;
if i >= self.entries || value & !(usize::max_value() >> (std::cmp::min(usize-1, usize - self.width))) != 0 {
false
} else {
// shift over into the high bits
value <<= std::cmp::min(usize - 1, usize - self.width);
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
|
while bits_left > 0 {
let can_set = std::cmp::min(8 - in_byte_offset, bits_left);
// pull out the highest can_set bits from value
let mut to_set: usize = value >> (usize - can_set);
// move them into where they will live
to_set <<= 8 - can_set - in_byte_offset;
let addr = unsafe { self.data.offset(byte_offset as isize) };
let mut byte = unsafe { *addr };
debug_assert!(to_set <= 255);
// clear the bits we'll be setting
byte &= !(0xFF
>>
(7 - in_byte_offset)
<<
(8usize.saturating_sub(in_byte_offset).saturating_sub(self.width)));
byte |= to_set as u8;
unsafe { *addr = byte };
// update all the state
value <<= can_set;
bit_offset += can_set;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_set;
}
true
}
}
/// Length in number of bitslices cointained.
pub fn len(&self) -> usize {
self.entries
}
/// Size of the internal buffer, in bytes.
pub fn byte_len(&self) -> usize {
// can't overflow, since creation asserts that it doesn't.
let w = self.entries * self.width;
let r = w % 8;
(w + r) / 8
}
pub fn iter(&self) -> Slices {
Slices { idx: 0, bm: self }
}
/// Get the raw pointer to this Bitmap's data.
pub unsafe fn get_ptr(&self) -> *mut u8 {
self.data
}
/// Set the raw pointer to this Bitmap's data, returning the old one. It needs to be free'd
/// with `Vec`'s destructor if the Bitmap was not made with `from_raw_parts`. In general this
/// operation should really be avoided. The destructor will call `Vec`s destructor on the
/// internal pointer.
pub unsafe fn set_ptr(&mut self, ptr: *mut u8) -> *mut u8 {
let p = self.data;
self.data = ptr;
p
}
}
/// Iterator over the bitslices in the bitmap
pub struct Slices<'a> {
idx: usize,
bm: &'a Bitmap
}
impl<'a> Iterator for Slices<'a> {
type Item = usize;
/// *NOTE*: This iterator is not "well-behaved", in that if you keep calling
/// `next` after it returns None, eventually it will overflow and start
/// yielding elements again. Use the `fuse` method to make this
/// "well-behaved".
fn next(&mut self) -> Option<usize> {
let rv = self.bm.get(self.idx);
self.idx += 1;
rv
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.bm.len(), Some(self.bm.len()))
}
}
impl<'a> std::iter::IntoIterator for &'a Bitmap {
type Item = usize;
type IntoIter = Slices<'a>;
fn into_iter(self) -> Slices<'a> {
self.iter()
}
}
#[cfg(test)]
mod test {
extern crate quickcheck;
use self::quickcheck::quickcheck;
use super::{get_n_bits_at, Bitmap};
use std;
#[test]
fn empty() {
let bm = Bitmap::new(10, 10).unwrap();
for i in 0..10 {
assert_eq!(bm.get(i), Some(0));
}
assert_eq!(bm.get(11), None);
}
#[test]
fn get() {
let mut data: [u8; 4] = [0b000_001_01, 0b0_011_100_1, 0b01_110_111, 0];
let bm = Bitmap {
entries: 8,
width: 3,
data: &mut data as *mut [u8; 4] as *mut u8
};
for i in 0..8 {
assert_eq!(bm.get(i), Some(i));
}
assert_eq!(bm.get(8), None);
assert_eq!(bm.get(9), None);
// we don't use real data here, so don't bother freeing it
let mut bm = bm;
unsafe { bm.set_ptr(std::ptr::null_mut()); }
}
#[test]
fn set() {
let mut bm = Bitmap::new(10, 3).unwrap();
for i in 0..8 {
assert!(bm.set(i, i));
assert_eq!(bm.get(i), Some(i));
}
assert_eq!(bm.get(8), Some(0));
assert_eq!(bm.get(9), Some(0));
assert_eq!(bm.get(10), None);
}
#[test]
fn get_n_bits() {
macro_rules! t {
( $( $e:expr, $n:expr, $s:expr, $g:expr; )* ) => (
{
$(
assert_eq!(get_n_bits_at($e, $n, $s), $g);
)*
}
|
let mut bits_left = self.width;
|
random_line_split
|
lib.rs
|
: usize, width: usize) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|needed| {
let ptr = {
let mut alloc = Vec::<u8>::with_capacity(needed);
let ptr = alloc.as_mut_ptr();
std::mem::forget(alloc);
ptr
};
unsafe { std::ptr::write_bytes(ptr, 0, needed); }
Some(Bitmap {
entries: entries,
width: width,
data: ptr as *mut u8
})
})
}
}
/// Create a new Bitmap from raw parts. Will return None if the given
/// entry and width would overflow the number of bits or bytes needed to
/// store the Bitmap.
pub unsafe fn from_raw_parts(entries: usize, width: usize, ptr: *mut u8) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|_| {
Some(Bitmap {
entries: entries,
width: width,
data: ptr
})
})
}
}
/// Get the `i`th bitslice, returning None on out-of-bounds
pub fn get(&self, i: usize) -> Option<usize> {
if i >= self.entries {
None
} else {
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
let mut bits_left = self.width;
let mut value: usize = 0;
while bits_left > 0 {
// how many bits can we need to set in this byte?
let can_get = std::cmp::min(8 - in_byte_offset, bits_left);
// alright, pull them out.
let byte = unsafe { *self.data.offset(byte_offset as isize) };
let got = get_n_bits_at(byte, can_get as u8, in_byte_offset as u8) as usize;
// make room for the bits we just read
value <<= can_get;
value |= got;
// update all the state
bit_offset += can_get;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_get;
}
Some(value)
}
}
/// Set the `i`th bitslice to `value`, returning false on out-of-bounds or if `value` contains
/// bits outside of the least significant `self.width` bits.
pub fn set(&mut self, i: usize, mut value: usize) -> bool {
let usize = std::mem::size_of::<usize>() * 8;
if i >= self.entries || value & !(usize::max_value() >> (std::cmp::min(usize-1, usize - self.width))) != 0 {
false
} else {
// shift over into the high bits
value <<= std::cmp::min(usize - 1, usize - self.width);
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
let mut bits_left = self.width;
while bits_left > 0 {
let can_set = std::cmp::min(8 - in_byte_offset, bits_left);
// pull out the highest can_set bits from value
let mut to_set: usize = value >> (usize - can_set);
// move them into where they will live
to_set <<= 8 - can_set - in_byte_offset;
let addr = unsafe { self.data.offset(byte_offset as isize) };
let mut byte = unsafe { *addr };
debug_assert!(to_set <= 255);
// clear the bits we'll be setting
byte &= !(0xFF
>>
(7 - in_byte_offset)
<<
(8usize.saturating_sub(in_byte_offset).saturating_sub(self.width)));
byte |= to_set as u8;
unsafe { *addr = byte };
// update all the state
value <<= can_set;
bit_offset += can_set;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_set;
}
true
}
}
/// Length in number of bitslices cointained.
pub fn len(&self) -> usize {
self.entries
}
/// Size of the internal buffer, in bytes.
pub fn byte_len(&self) -> usize
|
pub fn iter(&self) -> Slices {
Slices { idx: 0, bm: self }
}
/// Get the raw pointer to this Bitmap's data.
pub unsafe fn get_ptr(&self) -> *mut u8 {
self.data
}
/// Set the raw pointer to this Bitmap's data, returning the old one. It needs to be free'd
/// with `Vec`'s destructor if the Bitmap was not made with `from_raw_parts`. In general this
/// operation should really be avoided. The destructor will call `Vec`s destructor on the
/// internal pointer.
pub unsafe fn set_ptr(&mut self, ptr: *mut u8) -> *mut u8 {
let p = self.data;
self.data = ptr;
p
}
}
/// Iterator over the bitslices in the bitmap
pub struct Slices<'a> {
idx: usize,
bm: &'a Bitmap
}
impl<'a> Iterator for Slices<'a> {
type Item = usize;
/// *NOTE*: This iterator is not "well-behaved", in that if you keep calling
/// `next` after it returns None, eventually it will overflow and start
/// yielding elements again. Use the `fuse` method to make this
/// "well-behaved".
fn next(&mut self) -> Option<usize> {
let rv = self.bm.get(self.idx);
self.idx += 1;
rv
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.bm.len(), Some(self.bm.len()))
}
}
impl<'a> std::iter::IntoIterator for &'a Bitmap {
type Item = usize;
type IntoIter = Slices<'a>;
fn into_iter(self) -> Slices<'a> {
self.iter()
}
}
#[cfg(test)]
mod test {
extern crate quickcheck;
use self::quickcheck::quickcheck;
use super::{get_n_bits_at, Bitmap};
use std;
#[test]
fn empty() {
let bm = Bitmap::new(10, 10).unwrap();
for i in 0..10 {
assert_eq!(bm.get(i), Some(0));
}
assert_eq!(bm.get(11), None);
}
#[test]
fn get() {
let mut data: [u8; 4] = [0b000_001_01, 0b0_011_100_1, 0b01_110_111, 0];
let bm = Bitmap {
entries: 8,
width: 3,
data: &mut data as *mut [u8; 4] as *mut u8
};
for i in 0..8 {
assert_eq!(bm.get(i), Some(i));
}
assert_eq!(bm.get(8), None);
assert_eq!(bm.get(9), None);
// we don't use real data here, so don't bother freeing it
let mut bm = bm;
unsafe { bm.set_ptr(std::ptr::null_mut()); }
}
#[test]
fn set() {
let mut bm = Bitmap::new(10, 3).unwrap();
for i in 0..8 {
assert!(bm.set(i, i));
assert_eq!(bm.get(i), Some(i));
}
assert_eq!(bm.get(8), Some(0));
assert_eq!(bm.get(9), Some(0));
assert_eq!(bm.get(10), None);
}
#[test]
fn get_n_bits() {
macro_rules! t {
( $( $e:expr, $n:expr, $s:expr, $g:expr; )* ) => (
{
$(
assert_eq!(get_n_bits_at($e, $n, $s), $g);
)*
|
{
// can't overflow, since creation asserts that it doesn't.
let w = self.entries * self.width;
let r = w % 8;
(w + r) / 8
}
|
identifier_body
|
message.go
|
interface implementation here).
******************************************************************************/
/*
Interface type: For messages sent to the application server.
*/
type MessageServer interface {
tcp(l *Lobby)
socketio(l *Lobby)
}
/*
An application specific message from a User.
*/
type MsgServer struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
func (m MsgServer) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(m)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (m MsgServer) socketio(l *Lobby) {
(*l.socket).Emit("in", m)
}
/*
Sends an event to the Application server with parameters for creating a new
lobby instance.
*/
type NewSession struct {
Event string `json:"event"`
Players float64 `json:"players"`
MaxPlayers float64 `json:"maxplayers"`
}
func (n NewSession) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(n)
if err != nil
|
(*l.tcpConn).Write(jsonMsg)
}
func (n NewSession) socketio(l *Lobby) {
(*l.socket).Emit("in", n)
}
/*
Sends an event from the Host User to inform the Application server that
it has loaded the Application and is ready to communicate.
Also implements Command (multiple interface implementation)
*/
type Launch struct {
Event string `json:"event"`
}
func (la Launch) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(la)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (la Launch) socketio(l *Lobby) {
(*l.socket).Emit("in", la)
}
/*
Sends an event to state that the Host has ended the session (so the
server can run it's lobby end functionality)
*/
type End struct {
Event string `json:"event"`
}
func (e End) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(e)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (e End) socketio(l *Lobby) {
(*l.socket).Emit("in", e)
}
/*
Interface type: Commands received by the lobby for accessing/changing
the lobby data structure.
*/
type Command interface {
execute(l *Lobby)
}
/*
Contains all potential fields in the three specified events received from
Application servers: "created", "msgplayer" and "msgall"
Using omitempty, only required fields will be received, however this cannot
be used for Player, as the 0 value for player is the 'empty' value - and
the value 0 might be intentional when a message needs to be sent to a lobby host.
*/
type ServerMessage struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg,omitempty"`
}
func (s ServerMessage) execute(l *Lobby) {
switch s.Event {
case "msgplayer":
l.command <- MsgPlayer{
Player: int(s.Player),
Msg: s.Msg,
}
case "msgall":
l.command <- MsgAll{
Msg: s.Msg,
}
case "created":
l.command <- Created{}
}
}
/*
*** MsgPlayer, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgPlayer struct {
Player int
Msg map[string]interface{}
}
func (m MsgPlayer) execute(l *Lobby) {
if m.Player >= len(l.users) || m.Player < 0 {
log.Print("MsgPlayer: invalid player index")
return
}
l.users[m.Player].send <- m
}
/*
*** MsgAll, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgAll struct {
Room string
Msg map[string]interface{}
}
func (m MsgAll) execute(l *Lobby) {
m.Room = l.lobbyId
l.users[0].send <- m
}
/*
An Event for connecting an Application server over Socket.IO to the lobby.
*/
type ServerSocket struct {
Socket *socketio.Socket
}
func (s ServerSocket) execute(l *Lobby) {
if l.timeout != nil {
if l.socket == nil {
l.socket = s.Socket
(*l.socket).Emit("connectlobby", true)
}
l.timeout <- false
}
}
/*
An event to instantiate a new desktop user as the host user of a lobby.
*/
type HostLobby struct {
Username string
Socket *socketio.Socket
}
func (h HostLobby) execute(l *Lobby) {
if len(l.users) != 0 {
(*h.Socket).Emit("hostlobby", false)
log.Print("manager.desktopSetup: lobby id entered already has a host user.")
return
}
err := l.addNewUser(h.Username, h.Socket)
if err != nil {
(*h.Socket).Emit("hostlobby", false)
log.Print(err)
return
}
(*h.Socket).Emit("hostlobby", true)
l.command <- Update{}
}
/*
An event to attempt to add a new connecting mobile user to the lobby.
*/
type JoinLobby struct {
Username string
Socket *socketio.Socket
}
func (j JoinLobby) execute(l *Lobby) {
if len(l.users) == 0 {
(*j.Socket).Emit("joinlobby", false)
log.Print("manager.desktopSetup: lobby id entered does not have a host user.")
return
}
err := l.addNewUser(j.Username, j.Socket)
if err != nil {
(*j.Socket).Emit("joinlobby", false)
log.Print(err)
return
}
(*j.Socket).Emit("joinlobby", true)
l.command <- Update{}
}
/*
An event to force emit update for the list of users in the lobby.
*/
type Update struct {}
func (u Update) execute(l *Lobby) {
l.updateLobby()
}
/*
An event to attempt to prepare the Application server to begin the Application.
*/
type Start struct {}
func (s Start) execute(l *Lobby) {
room := l.lobbyId
var err error
//establish connection
if l.game.connType == "tcp" {
if l.tcpConn == nil {
err = l.connectTcp()
}
} else { //l.game.connType == "socketio"
if l.socket == nil {
err = l.connectSocketio()
}
}
if err != nil {
log.Print(err)
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: true,
Feedback: "Unable to connect to application server.",
}
return
}
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: false,
Feedback: "Connected to application server.",
}
l.createSession()
}
/*
Confirms success of a Lobby being created on the Application server.
*/
type Created struct {}
func (c Created) execute(l *Lobby) {
if l.timeout != nil {
l.timeout <- false
}
}
/*
Also implements MessageServer (multiple interface implementation)
Sets the Lobby.started to true, locking the slice data structure
for removal of users.
*/
func (la Launch) execute(l *Lobby) {
l.started = true
la.Event = "launch"
l.send <- la
}
/*
Struct to execute the removal of a user from the lobby data structure.
Caused by a leave or a kick event.
*/
type RemovedUser struct {
Player float64 `json:"-"`
Username string `json:"username"`
Reason string `json:"reason,omitempty"`
}
func (r RemovedUser) execute(l *Lobby) {
kicked := false
if r.Player == 0 { //kicked by username
kicked = true
for i := 1; i < len(l.users); i++ {
if r.Username == l.users[i].username {
r.Player = float64(i)
break
}
}
}
if r.Player == 0 { //if still not set, does not exist
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was not found in lobby.", r.Username),
}
return
}
err := l.removeUser(r.Player)
if err != nil {
log.Print(err)
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was unable to be removed.", r.Username),
}
return
}
if kicked {
l.users[0].send <- Kick{
Response: true,
Feedback: fmt.Sprintf("%s was removed from the lobby.", r.Username),
}
}
//message removed user:
if r.Reason != "" {
|
{
log.Print(err)
return
}
|
conditional_block
|
message.go
|
new interface implementation here).
******************************************************************************/
/*
Interface type: For messages sent to the application server.
*/
type MessageServer interface {
tcp(l *Lobby)
socketio(l *Lobby)
}
/*
An application specific message from a User.
*/
type MsgServer struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
func (m MsgServer) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(m)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (m MsgServer) socketio(l *Lobby) {
(*l.socket).Emit("in", m)
}
/*
Sends an event to the Application server with parameters for creating a new
lobby instance.
*/
type NewSession struct {
Event string `json:"event"`
Players float64 `json:"players"`
MaxPlayers float64 `json:"maxplayers"`
}
func (n NewSession) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(n)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (n NewSession) socketio(l *Lobby) {
(*l.socket).Emit("in", n)
}
/*
Sends an event from the Host User to inform the Application server that
it has loaded the Application and is ready to communicate.
Also implements Command (multiple interface implementation)
*/
type Launch struct {
Event string `json:"event"`
}
func (la Launch) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(la)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (la Launch) socketio(l *Lobby) {
(*l.socket).Emit("in", la)
}
/*
Sends an event to state that the Host has ended the session (so the
server can run it's lobby end functionality)
*/
type End struct {
Event string `json:"event"`
}
func (e End) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(e)
if err != nil {
log.Print(err)
return
}
|
}
/*
Interface type: Commands received by the lobby for accessing/changing
the lobby data structure.
*/
type Command interface {
execute(l *Lobby)
}
/*
Contains all potential fields in the three specified events received from
Application servers: "created", "msgplayer" and "msgall"
Using omitempty, only required fields will be received, however this cannot
be used for Player, as the 0 value for player is the 'empty' value - and
the value 0 might be intentional when a message needs to be sent to a lobby host.
*/
type ServerMessage struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg,omitempty"`
}
func (s ServerMessage) execute(l *Lobby) {
switch s.Event {
case "msgplayer":
l.command <- MsgPlayer{
Player: int(s.Player),
Msg: s.Msg,
}
case "msgall":
l.command <- MsgAll{
Msg: s.Msg,
}
case "created":
l.command <- Created{}
}
}
/*
*** MsgPlayer, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgPlayer struct {
Player int
Msg map[string]interface{}
}
func (m MsgPlayer) execute(l *Lobby) {
if m.Player >= len(l.users) || m.Player < 0 {
log.Print("MsgPlayer: invalid player index")
return
}
l.users[m.Player].send <- m
}
/*
*** MsgAll, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgAll struct {
Room string
Msg map[string]interface{}
}
func (m MsgAll) execute(l *Lobby) {
m.Room = l.lobbyId
l.users[0].send <- m
}
/*
An Event for connecting an Application server over Socket.IO to the lobby.
*/
type ServerSocket struct {
Socket *socketio.Socket
}
func (s ServerSocket) execute(l *Lobby) {
if l.timeout != nil {
if l.socket == nil {
l.socket = s.Socket
(*l.socket).Emit("connectlobby", true)
}
l.timeout <- false
}
}
/*
An event to instantiate a new desktop user as the host user of a lobby.
*/
type HostLobby struct {
Username string
Socket *socketio.Socket
}
func (h HostLobby) execute(l *Lobby) {
if len(l.users) != 0 {
(*h.Socket).Emit("hostlobby", false)
log.Print("manager.desktopSetup: lobby id entered already has a host user.")
return
}
err := l.addNewUser(h.Username, h.Socket)
if err != nil {
(*h.Socket).Emit("hostlobby", false)
log.Print(err)
return
}
(*h.Socket).Emit("hostlobby", true)
l.command <- Update{}
}
/*
An event to attempt to add a new connecting mobile user to the lobby.
*/
type JoinLobby struct {
Username string
Socket *socketio.Socket
}
func (j JoinLobby) execute(l *Lobby) {
if len(l.users) == 0 {
(*j.Socket).Emit("joinlobby", false)
log.Print("manager.desktopSetup: lobby id entered does not have a host user.")
return
}
err := l.addNewUser(j.Username, j.Socket)
if err != nil {
(*j.Socket).Emit("joinlobby", false)
log.Print(err)
return
}
(*j.Socket).Emit("joinlobby", true)
l.command <- Update{}
}
/*
An event to force emit update for the list of users in the lobby.
*/
type Update struct {}
func (u Update) execute(l *Lobby) {
l.updateLobby()
}
/*
An event to attempt to prepare the Application server to begin the Application.
*/
type Start struct {}
func (s Start) execute(l *Lobby) {
room := l.lobbyId
var err error
//establish connection
if l.game.connType == "tcp" {
if l.tcpConn == nil {
err = l.connectTcp()
}
} else { //l.game.connType == "socketio"
if l.socket == nil {
err = l.connectSocketio()
}
}
if err != nil {
log.Print(err)
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: true,
Feedback: "Unable to connect to application server.",
}
return
}
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: false,
Feedback: "Connected to application server.",
}
l.createSession()
}
/*
Confirms success of a Lobby being created on the Application server.
*/
type Created struct {}
func (c Created) execute(l *Lobby) {
if l.timeout != nil {
l.timeout <- false
}
}
/*
Also implements MessageServer (multiple interface implementation)
Sets the Lobby.started to true, locking the slice data structure
for removal of users.
*/
func (la Launch) execute(l *Lobby) {
l.started = true
la.Event = "launch"
l.send <- la
}
/*
Struct to execute the removal of a user from the lobby data structure.
Caused by a leave or a kick event.
*/
type RemovedUser struct {
Player float64 `json:"-"`
Username string `json:"username"`
Reason string `json:"reason,omitempty"`
}
func (r RemovedUser) execute(l *Lobby) {
kicked := false
if r.Player == 0 { //kicked by username
kicked = true
for i := 1; i < len(l.users); i++ {
if r.Username == l.users[i].username {
r.Player = float64(i)
break
}
}
}
if r.Player == 0 { //if still not set, does not exist
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was not found in lobby.", r.Username),
}
return
}
err := l.removeUser(r.Player)
if err != nil {
log.Print(err)
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was unable to be removed.", r.Username),
}
return
}
if kicked {
l.users[0].send <- Kick{
Response: true,
Feedback: fmt.Sprintf("%s was removed from the lobby.", r.Username),
}
}
//message removed user:
if r.Reason != "" {
l
|
(*l.tcpConn).Write(jsonMsg)
}
func (e End) socketio(l *Lobby) {
(*l.socket).Emit("in", e)
|
random_line_split
|
message.go
|
new interface implementation here).
******************************************************************************/
/*
Interface type: For messages sent to the application server.
*/
type MessageServer interface {
tcp(l *Lobby)
socketio(l *Lobby)
}
/*
An application specific message from a User.
*/
type MsgServer struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
func (m MsgServer) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(m)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (m MsgServer) socketio(l *Lobby) {
(*l.socket).Emit("in", m)
}
/*
Sends an event to the Application server with parameters for creating a new
lobby instance.
*/
type NewSession struct {
Event string `json:"event"`
Players float64 `json:"players"`
MaxPlayers float64 `json:"maxplayers"`
}
func (n NewSession) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(n)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (n NewSession) socketio(l *Lobby) {
(*l.socket).Emit("in", n)
}
/*
Sends an event from the Host User to inform the Application server that
it has loaded the Application and is ready to communicate.
Also implements Command (multiple interface implementation)
*/
type Launch struct {
Event string `json:"event"`
}
func (la Launch) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(la)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (la Launch) socketio(l *Lobby) {
(*l.socket).Emit("in", la)
}
/*
Sends an event to state that the Host has ended the session (so the
server can run it's lobby end functionality)
*/
type End struct {
Event string `json:"event"`
}
func (e End) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(e)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (e End) socketio(l *Lobby) {
(*l.socket).Emit("in", e)
}
/*
Interface type: Commands received by the lobby for accessing/changing
the lobby data structure.
*/
type Command interface {
execute(l *Lobby)
}
/*
Contains all potential fields in the three specified events received from
Application servers: "created", "msgplayer" and "msgall"
Using omitempty, only required fields will be received, however this cannot
be used for Player, as the 0 value for player is the 'empty' value - and
the value 0 might be intentional when a message needs to be sent to a lobby host.
*/
type ServerMessage struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg,omitempty"`
}
func (s ServerMessage) execute(l *Lobby) {
switch s.Event {
case "msgplayer":
l.command <- MsgPlayer{
Player: int(s.Player),
Msg: s.Msg,
}
case "msgall":
l.command <- MsgAll{
Msg: s.Msg,
}
case "created":
l.command <- Created{}
}
}
/*
*** MsgPlayer, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgPlayer struct {
Player int
Msg map[string]interface{}
}
func (m MsgPlayer) execute(l *Lobby) {
if m.Player >= len(l.users) || m.Player < 0 {
log.Print("MsgPlayer: invalid player index")
return
}
l.users[m.Player].send <- m
}
/*
*** MsgAll, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgAll struct {
Room string
Msg map[string]interface{}
}
func (m MsgAll) execute(l *Lobby) {
m.Room = l.lobbyId
l.users[0].send <- m
}
/*
An Event for connecting an Application server over Socket.IO to the lobby.
*/
type ServerSocket struct {
Socket *socketio.Socket
}
func (s ServerSocket) execute(l *Lobby) {
if l.timeout != nil {
if l.socket == nil {
l.socket = s.Socket
(*l.socket).Emit("connectlobby", true)
}
l.timeout <- false
}
}
/*
An event to instantiate a new desktop user as the host user of a lobby.
*/
type HostLobby struct {
Username string
Socket *socketio.Socket
}
func (h HostLobby)
|
(l *Lobby) {
if len(l.users) != 0 {
(*h.Socket).Emit("hostlobby", false)
log.Print("manager.desktopSetup: lobby id entered already has a host user.")
return
}
err := l.addNewUser(h.Username, h.Socket)
if err != nil {
(*h.Socket).Emit("hostlobby", false)
log.Print(err)
return
}
(*h.Socket).Emit("hostlobby", true)
l.command <- Update{}
}
/*
An event to attempt to add a new connecting mobile user to the lobby.
*/
type JoinLobby struct {
Username string
Socket *socketio.Socket
}
func (j JoinLobby) execute(l *Lobby) {
if len(l.users) == 0 {
(*j.Socket).Emit("joinlobby", false)
log.Print("manager.desktopSetup: lobby id entered does not have a host user.")
return
}
err := l.addNewUser(j.Username, j.Socket)
if err != nil {
(*j.Socket).Emit("joinlobby", false)
log.Print(err)
return
}
(*j.Socket).Emit("joinlobby", true)
l.command <- Update{}
}
/*
An event to force emit update for the list of users in the lobby.
*/
type Update struct {}
func (u Update) execute(l *Lobby) {
l.updateLobby()
}
/*
An event to attempt to prepare the Application server to begin the Application.
*/
type Start struct {}
func (s Start) execute(l *Lobby) {
room := l.lobbyId
var err error
//establish connection
if l.game.connType == "tcp" {
if l.tcpConn == nil {
err = l.connectTcp()
}
} else { //l.game.connType == "socketio"
if l.socket == nil {
err = l.connectSocketio()
}
}
if err != nil {
log.Print(err)
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: true,
Feedback: "Unable to connect to application server.",
}
return
}
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: false,
Feedback: "Connected to application server.",
}
l.createSession()
}
/*
Confirms success of a Lobby being created on the Application server.
*/
type Created struct {}
func (c Created) execute(l *Lobby) {
if l.timeout != nil {
l.timeout <- false
}
}
/*
Also implements MessageServer (multiple interface implementation)
Sets the Lobby.started to true, locking the slice data structure
for removal of users.
*/
func (la Launch) execute(l *Lobby) {
l.started = true
la.Event = "launch"
l.send <- la
}
/*
Struct to execute the removal of a user from the lobby data structure.
Caused by a leave or a kick event.
*/
type RemovedUser struct {
Player float64 `json:"-"`
Username string `json:"username"`
Reason string `json:"reason,omitempty"`
}
func (r RemovedUser) execute(l *Lobby) {
kicked := false
if r.Player == 0 { //kicked by username
kicked = true
for i := 1; i < len(l.users); i++ {
if r.Username == l.users[i].username {
r.Player = float64(i)
break
}
}
}
if r.Player == 0 { //if still not set, does not exist
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was not found in lobby.", r.Username),
}
return
}
err := l.removeUser(r.Player)
if err != nil {
log.Print(err)
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was unable to be removed.", r.Username),
}
return
}
if kicked {
l.users[0].send <- Kick{
Response: true,
Feedback: fmt.Sprintf("%s was removed from the lobby.", r.Username),
}
}
//message removed user:
if r.Reason != "" {
|
execute
|
identifier_name
|
message.go
|
interface implementation here).
******************************************************************************/
/*
Interface type: For messages sent to the application server.
*/
type MessageServer interface {
tcp(l *Lobby)
socketio(l *Lobby)
}
/*
An application specific message from a User.
*/
type MsgServer struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
func (m MsgServer) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(m)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (m MsgServer) socketio(l *Lobby) {
(*l.socket).Emit("in", m)
}
/*
Sends an event to the Application server with parameters for creating a new
lobby instance.
*/
type NewSession struct {
Event string `json:"event"`
Players float64 `json:"players"`
MaxPlayers float64 `json:"maxplayers"`
}
func (n NewSession) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(n)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (n NewSession) socketio(l *Lobby) {
(*l.socket).Emit("in", n)
}
/*
Sends an event from the Host User to inform the Application server that
it has loaded the Application and is ready to communicate.
Also implements Command (multiple interface implementation)
*/
type Launch struct {
Event string `json:"event"`
}
func (la Launch) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(la)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (la Launch) socketio(l *Lobby) {
(*l.socket).Emit("in", la)
}
/*
Sends an event to state that the Host has ended the session (so the
server can run it's lobby end functionality)
*/
type End struct {
Event string `json:"event"`
}
func (e End) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(e)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (e End) socketio(l *Lobby) {
(*l.socket).Emit("in", e)
}
/*
Interface type: Commands received by the lobby for accessing/changing
the lobby data structure.
*/
type Command interface {
execute(l *Lobby)
}
/*
Contains all potential fields in the three specified events received from
Application servers: "created", "msgplayer" and "msgall"
Using omitempty, only required fields will be received, however this cannot
be used for Player, as the 0 value for player is the 'empty' value - and
the value 0 might be intentional when a message needs to be sent to a lobby host.
*/
type ServerMessage struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg,omitempty"`
}
func (s ServerMessage) execute(l *Lobby) {
switch s.Event {
case "msgplayer":
l.command <- MsgPlayer{
Player: int(s.Player),
Msg: s.Msg,
}
case "msgall":
l.command <- MsgAll{
Msg: s.Msg,
}
case "created":
l.command <- Created{}
}
}
/*
*** MsgPlayer, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgPlayer struct {
Player int
Msg map[string]interface{}
}
func (m MsgPlayer) execute(l *Lobby) {
if m.Player >= len(l.users) || m.Player < 0 {
log.Print("MsgPlayer: invalid player index")
return
}
l.users[m.Player].send <- m
}
/*
*** MsgAll, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgAll struct {
Room string
Msg map[string]interface{}
}
func (m MsgAll) execute(l *Lobby) {
m.Room = l.lobbyId
l.users[0].send <- m
}
/*
An Event for connecting an Application server over Socket.IO to the lobby.
*/
type ServerSocket struct {
Socket *socketio.Socket
}
func (s ServerSocket) execute(l *Lobby) {
if l.timeout != nil {
if l.socket == nil {
l.socket = s.Socket
(*l.socket).Emit("connectlobby", true)
}
l.timeout <- false
}
}
/*
An event to instantiate a new desktop user as the host user of a lobby.
*/
type HostLobby struct {
Username string
Socket *socketio.Socket
}
func (h HostLobby) execute(l *Lobby) {
if len(l.users) != 0 {
(*h.Socket).Emit("hostlobby", false)
log.Print("manager.desktopSetup: lobby id entered already has a host user.")
return
}
err := l.addNewUser(h.Username, h.Socket)
if err != nil {
(*h.Socket).Emit("hostlobby", false)
log.Print(err)
return
}
(*h.Socket).Emit("hostlobby", true)
l.command <- Update{}
}
/*
An event to attempt to add a new connecting mobile user to the lobby.
*/
type JoinLobby struct {
Username string
Socket *socketio.Socket
}
func (j JoinLobby) execute(l *Lobby)
|
/*
An event to force emit update for the list of users in the lobby.
*/
type Update struct {}
func (u Update) execute(l *Lobby) {
l.updateLobby()
}
/*
An event to attempt to prepare the Application server to begin the Application.
*/
type Start struct {}
func (s Start) execute(l *Lobby) {
room := l.lobbyId
var err error
//establish connection
if l.game.connType == "tcp" {
if l.tcpConn == nil {
err = l.connectTcp()
}
} else { //l.game.connType == "socketio"
if l.socket == nil {
err = l.connectSocketio()
}
}
if err != nil {
log.Print(err)
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: true,
Feedback: "Unable to connect to application server.",
}
return
}
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: false,
Feedback: "Connected to application server.",
}
l.createSession()
}
/*
Confirms success of a Lobby being created on the Application server.
*/
type Created struct {}
func (c Created) execute(l *Lobby) {
if l.timeout != nil {
l.timeout <- false
}
}
/*
Also implements MessageServer (multiple interface implementation)
Sets the Lobby.started to true, locking the slice data structure
for removal of users.
*/
func (la Launch) execute(l *Lobby) {
l.started = true
la.Event = "launch"
l.send <- la
}
/*
Struct to execute the removal of a user from the lobby data structure.
Caused by a leave or a kick event.
*/
type RemovedUser struct {
Player float64 `json:"-"`
Username string `json:"username"`
Reason string `json:"reason,omitempty"`
}
func (r RemovedUser) execute(l *Lobby) {
kicked := false
if r.Player == 0 { //kicked by username
kicked = true
for i := 1; i < len(l.users); i++ {
if r.Username == l.users[i].username {
r.Player = float64(i)
break
}
}
}
if r.Player == 0 { //if still not set, does not exist
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was not found in lobby.", r.Username),
}
return
}
err := l.removeUser(r.Player)
if err != nil {
log.Print(err)
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was unable to be removed.", r.Username),
}
return
}
if kicked {
l.users[0].send <- Kick{
Response: true,
Feedback: fmt.Sprintf("%s was removed from the lobby.", r.Username),
}
}
//message removed user:
if r.Reason != "" {
|
{
if len(l.users) == 0 {
(*j.Socket).Emit("joinlobby", false)
log.Print("manager.desktopSetup: lobby id entered does not have a host user.")
return
}
err := l.addNewUser(j.Username, j.Socket)
if err != nil {
(*j.Socket).Emit("joinlobby", false)
log.Print(err)
return
}
(*j.Socket).Emit("joinlobby", true)
l.command <- Update{}
}
|
identifier_body
|
ja-jp.ts
|
PlaylistOverviewDesignMessage: "使用中、この領域には、表示されている再生リスト内の他の資産が表示されます。",
PlaylistOverviewHeading: "再生リストのすべての手順",
BurgerButton: "バーガー",
LinkButton: "リンク",
SearchButton: "検索",
AdministerPlaylist: "再生リストの管理",
NoSearchResults: "検索結果がクエリと一致しません。",
DefaultCDNLabel: "学習ソースを選択する",
DefaultCDNDescription: "学習コンテンツの代替ソースを選択します。",
DescriptionFieldLabel: "説明フィールド",
ImageSelectorLabel: "画像を選択する",
ImageSelectorButton: "画像を選択する",
DetailEditTitle: "タイトル",
DetailEditDescription: "説明",
DetailEditUrl: "Url",
DetailEditCategory: "カテゴリ",
DetailEditCategoryNoData: "使用できるカテゴリはありません",
DetailEditTechnology: "テクノロジ",
DetailEditLevel: "レベル",
DetailEditAudience: "対象ユーザー",
AssetSearchPlaceHolderLabel: "既存の資産を検索する",
CategoryHeading: "新しい下位カテゴリを追加する",
CategoryHeadingLabel: "下位カテゴリ",
CategoryPlaylistSavedMessage: "再生リストが保存されました。",
CategoryPlaylistSaveFailedMessage: "再生リストを保存できませんでした。",
CategoryNewPlayListMessage: "新しい再生リストを変更しました。変更を破棄して続行しますか?",
CategoryEditedPlayListMessage: "この再生リストを変更しました。変更を破棄して続行しますか?",
PlaylistEditAssetSavedMessage: "資産が保存されました。",
PlaylistEditAssetSaveFailedMessage: "資産を保存できませんでした。",
PlaylistEditCreatePlaylistHeader: "新しい再生リストを作成する",
PlaylistEditPlaylistDetailsHeader: "再生リストの詳細:",
PlaylistEditPlaylistAssetsHeader: "再生リストの資産",
PlaylistEditEditLabel: "詳細を編集する",
PlaylistEditSaveLabel: "詳細を保存する",
PlaylistEditCancelLabel: "詳細をキャンセルする",
PlaylistEditCloseLabel: "再生リストを閉じる",
AssetDetailsCreateHeader: "新しい資産を作成する",
AssetDetailsManageHeader: "カスタム資産の管理:",
AssetDetailsDetailsHeader: "資産の詳細:",
AssetDetailsSaveLabel: "資産を保存する",
AssetDetailsCancelLabel: "資産をキャンセルする",
AssetDetailsCloseLabel: "資産を閉じる",
AssetDetailsOpenPage: "ページを開く",
TechnologyHeadingLabel: "テクノロジ",
SubcategoryHeadingLabel: "下位カテゴリの見出し",
PlaylistItemPlaylistHeadingLabel: "再生リスト",
PlaylistItemPlaylistDelete: "再生リストを削除する",
PlaylistEditAssetNewLabel: "新しい資産",
PlaylistRemove: "再生リストから削除する",
ImageSelectorImageAlt: "カスタム再生リスト",
ImageSelectorUrlPlaceholder: "イメージ Url",
CategoryHeadingAddPlaylistToSubcategory: "新しい再生リストを下位カテゴリに追加する",
AdminMenuTechnologyLabel: "テクノロジ",
AdminMenuCategoryLabel: "カテゴリ",
Show: "表示",
Hide: "非表示",
Add: "追加",
Edit: "編集",
CloseButton: "閉じる",
EditButton: "編集",
StepButton: "手順",
MoveUpButton: "上に移動",
MoveDownButton: "下に移動",
SaveButton: "保存",
CancelButton: "キャンセル",
UpdateButton: "更新する",
DeleteButton: "削除",
DetailEditNewPageMessage: "サイト ページ ライブラリの新しい資産ページ。",
DetailEditExistingPageMessage: "既存ページの URL を入力します。",
DetailEditNewPageButton: "資産ページを作成する",
DetailEditExistingPageButton: "Url を入力する",
AdminAddCdnLabel: "コンテンツ パックを追加する",
AdminEditCdnLabel: "コンテンツ パックを編集する",
AdminDeleteCdnLabel: "コンテンツ パックを削除する",
AdminCdnIdLabel: "コンテンツ パック ID/パートナー ID",
AdminCdnDisplayName: "表示名",
AdminCdnBaseUrl: "ベース URL",
AdminCustomCdnTitle: "カスタム コンテンツ パック",
AdminCustomCdnDescription: "カスタム コンテンツ パックは、高度な機能です。Web コンテンツの管理経験のある管理者だけが使用してください。信頼できないコンテンツ ソースにより、サイトに安全でないコンテンツが取り込まれる場合があります。信頼できるソースのみを追加する必要があります。詳細については、「学習経路の文書化のパートナー ガイダンス」」を参照してください。",
AdminConfirmContentPack: "新しいコンテンツ パックをプロビジョニングできる新しいブラウザー タブが開始されました。正常に完了したら、下の [完了] をクリックして代替コンテンツ リストを更新します。エラーがある場合、またはコンテンツ パックをインストールしない場合は、[キャンセル] をクリックしてください。いつでも戻ることができます。",
AdminCdnCompleteButton: "完了",
AdminCdnCancelButton: "キャンセル",
AdminCdnSaveButton: "保存",
AdminCdnUpdateButton: "更新する",
AdminRemoveCdn: "選択されている CDN を削除しますか?",
AdminAbout: "Web パーツについて",
DocumentationLinkLabel: "M365 の学習経路の文書を開く",
CategoryCopyPlaylistFail: "再生リストをコピーできませんでした。詳細については、管理者にお問い合わせるか、ブラウザー コンソールを参照してください。",
PlaylistEditCopyLabel: "再生リストをコピーする",
PlaylistNext: "次へ",
PlaylistPrevious: "前へ",
PlaylistFullScreen: "全画面表示モードを切り替える",
FilterNotSet: "すべて",
AdminVersionUpdateNotice: "インストールされている Microsoft 365 の学習経路 Web パーツは最新ではありません。現在、バージョン %0% を実行しており、最新バージョンは %1% です。",
AdminVersionUpdateInstructions: "「ソリューションの更新」の手順を参照してください。",
AdminSecurityMessage: "Microsoft 365の学習経路を管理する権限がありません。所有者またはメンバー グループへの参加について管理者に相談してください。",
AdminConfigIssueMessage: "Microsoft 365 の学習経路には構成の問題があります。管理者に問い合わせてください。[管理者:詳細なログについては、ブラウザー コンソールをご覧ください。技術サポートについては、https://github.com/pnp/custom-learning-office-365/issues の問題の一覧をご確認ください。]",
AdminAwaitingUrlPrompt: "Url:タイトルを待っています...",
CreatingPage: "ページを作成する",
TryAgain: "もう一度お試しください",
AddLanguagePlaceholder: "言語の追加",
NotApplicable: "該当なし",
DataUpgradeTitle: "データのアップグレード",
DataUpgradeIntro: "Microsoft 365 の学習経路をメジャーな新しいリリースに更新しました。アップグレード プロセスを開始するには、[開始] を押します。",
DataUpgradeStart: "開始",
DataUpgradeClose: "閉じる",
DataUpgradeIssue: "アップデートの実行中に問題が発生した場合は、Microsoft 365 の学習経路 GitHub の問題リストに ",
DataUpgradeIssueLink: "問題を送信してサポートを依頼できます。",
DataUpgradeLog: "アップグレード ログ",
DataUpgradeComplete: "アップグレードが完了しました。",
DataUpgradeErrors: "エラー",
LogLevel: "レベル",
LogMessage: "メッセージ",
AboutGroupHeader: "M365 の学習経路",
AboutGroupTitle: "現在の構成*",
AboutGroupTitle2: "(* は現在のコンテンツ パック固有のものです)",
AboutLearningSiteUrl: "学習サイトの Url:",
AboutBaseCDNPath: "ベース CDN パス:",
AboutTelemetryStatus: "テレメトリの状態:",
AboutCurrentWPVersion: "現在の Web パーツのバージョン:",
AboutMultilingualEnabled: "多言語ページの有効化:",
AboutMultilingualLanguages: "多言語ページの翻訳言語:",
AboutConfiguredLanguages: "構成された言語:",
AboutSupportedLanguages: "コンテンツパックでサポートされる言語:",
AboutDefaultSiteLanguage: "既定のサイトの言語:",
|
LinkPanelCopyLabel: "コピー",
HeaderPlaylistPanelCurrentPlaylistLabel: "現在の再生リスト:",
HeaderPlaylistPanelAdminHeader: "管理",
|
random_line_split
|
|
torrent.go
|
!= 0 {
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
}
length := v["length"].(int64)
file, err := os.OpenFile(fullPath, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, begin, length, k)
file.Close()
}
torrent.files = append(torrent.files, File{fullPath, begin, length})
begin += length
}
} else {
// Single file
fileName := filepath.Join("Downloads", info["name"].(string))
if err := torrent.validatePath(base, fileName); err != nil {
return err
}
length := info["length"].(int64)
torrent.totalSize = length
file, err := os.OpenFile(fileName, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, 0, length, 0)
file.Close()
}
torrent.files = []File{{fileName, 0, length}}
}
return nil
}
func (torrent *Torrent) findCompletedPieces(file *os.File, begin, length int64, fileIndex int) {
fi, err := file.Stat()
if err != nil {
return
}
size := fi.Size()
if size == 0 {
return
} else if size > length {
file.Truncate(0)
return
}
buf := make([]byte, torrent.pieceLength)
var pieceIndex uint32
if begin != 0 {
pieceIndex = uint32(begin / torrent.pieceLength)
}
fileEnd := begin + length
pos := int64(pieceIndex) * torrent.pieceLength
pieceLength := torrent.getPieceLength(pieceIndex)
if pos+pieceLength > fileEnd {
return
}
if pos < begin {
bufPos := begin - pos
if _, err := file.Read(buf[bufPos:]); err != nil {
return
}
for bufPos != 0 {
fileIndex--
f := torrent.files[fileIndex]
handle, err := os.OpenFile(f.path, os.O_RDONLY, 0600)
if err != nil {
return
}
defer handle.Close()
if bufPos > f.length {
if n, err := handle.Read(buf[bufPos-f.length : bufPos]); err != nil || int64(n) != f.length {
return
}
bufPos -= f.length
} else {
if n, err := handle.ReadAt(buf[:bufPos], f.length-bufPos); err != nil || int64(n) != bufPos {
return
}
break
}
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += pieceLength
pieceIndex++
}
if _, err := file.Seek(pos-begin, os.SEEK_SET); err != nil {
return
}
reader := bufio.NewReaderSize(file, int(pieceLength))
for pos+torrent.pieceLength <= fileEnd {
if n, err := reader.Read(buf); err != nil || n != len(buf) {
return
}
if torrent.checkPieceHash(buf, pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += torrent.pieceLength
pieceIndex++
}
if int(pieceIndex) == len(torrent.pieces)-1 {
pieceLength = torrent.getLastPieceLength()
if n, err := reader.Read(buf[:pieceLength]); err != nil || int64(n) != pieceLength {
return
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
}
}
func (torrent *Torrent) getTrackerRequestData(event uint32) *TrackerRequestData {
downloaded := torrent.getDownloadedSize()
return &TrackerRequestData{
event: event,
downloaded: uint64(downloaded),
uploaded: torrent.uploaded,
remaining: uint64(torrent.totalSize - downloaded),
}
}
func (torrent *Torrent) startTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStarted)
for _, tracker := range torrent.trackers {
go tracker.start(data)
}
}
func (torrent *Torrent) stopTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStopped)
for _, trackerID := range torrent.activeTrackers {
go func(announceChannel chan *TrackerRequestData, stopChannel chan struct{}) {
announceChannel <- data
stopChannel <- struct{}{}
}(torrent.trackers[trackerID].announceChannel, torrent.trackers[trackerID].stopChannel)
}
}
func (torrent *Torrent) announceToTrackers(event uint32) {
data := torrent.getTrackerRequestData(event)
for _, trackerID := range torrent.activeTrackers {
go func(channel chan *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel)
}
}
func (torrent *Torrent) download() {
if torrent.completedPieces == len(torrent.pieces) {
return
}
torrent.activeTrackerChannel = make(chan int)
torrent.stoppedTrackerChannel = make(chan int)
torrent.requestAnnounceDataChannel = make(chan int)
torrent.peersChannel = make(chan interface{})
torrent.startTrackers()
torrent.pieceChannel = make(chan *PieceMessage)
torrent.bitfieldChannel = make(chan *BitfieldMessage)
torrent.havePieceChannel = make(chan *HavePieceMessage)
torrent.addPeerChannel = make(chan *Peer)
torrent.removePeerChannel = make(chan *Peer)
torrent.blockRequestChannel = make(chan *BlockRequestMessage)
torrent.fileWriteDone = make(chan struct{})
torrent.decrementPeerCount = make(chan struct{})
torrent.knownPeers = make(map[string]struct{})
for torrent.completedPieces != len(torrent.pieces) || torrent.totalPeerCount != 0 {
select {
case havePieceMessage := <-torrent.havePieceChannel:
torrent.handleHaveMessage(havePieceMessage)
case bitfieldMessage := <-torrent.bitfieldChannel:
torrent.handleBitfieldMessage(bitfieldMessage)
case pieceMessage := <-torrent.pieceChannel:
torrent.handlePieceMessage(pieceMessage)
case blockRequestMessage := <-torrent.blockRequestChannel:
torrent.handleBlockRequestMessage(blockRequestMessage)
case peer := <-torrent.addPeerChannel:
torrent.handleAddPeer(peer)
case peer := <-torrent.removePeerChannel:
torrent.handleRemovePeer(peer)
case <-torrent.fileWriteDone:
torrent.pendingFileWrites--
case <-torrent.decrementPeerCount:
torrent.totalPeerCount--
case peers := <-torrent.peersChannel:
torrent.connectToPeers(peers)
case trackerID := <-torrent.activeTrackerChannel:
torrent.activeTrackers = append(torrent.activeTrackers, trackerID)
fmt.Printf("[%s] %d active trackers\n", torrent.name, len(torrent.activeTrackers))
case trackerID := <-torrent.requestAnnounceDataChannel:
go func(channel chan *TrackerRequestData, data *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel, torrent.getTrackerRequestData(TrackerEventNone))
}
}
torrent.stopTrackers()
if torrent.pendingFileWrites != 0 {
fmt.Printf("[%s] Waiting for %d pending file writes...\n", torrent.name, torrent.pendingFileWrites)
for torrent.pendingFileWrites != 0 {
<-torrent.fileWriteDone
torrent.pendingFileWrites--
}
}
if len(torrent.activeTrackers) != 0 {
fmt.Printf("[%s] Waiting for %d trackers to stop...\n", torrent.name, len(torrent.activeTrackers))
for len(torrent.activeTrackers) != 0 {
select {
case trackerID := <-torrent.stoppedTrackerChannel:
for k, v := range torrent.activeTrackers {
if v == trackerID {
torrent.activeTrackers = append(torrent.activeTrackers[:k], torrent.activeTrackers[k+1:]...)
break
}
}
// Handle other messages that a Tracker may send
case <-torrent.activeTrackerChannel:
case <-torrent.peersChannel:
case <-torrent.requestAnnounceDataChannel:
}
}
}
}
func (torrent *Torrent) checkPieceHash(data []byte, pieceIndex uint32) bool {
dataHash := sha1.Sum(data)
return bytes.Equal(dataHash[:], []byte(torrent.pieces[pieceIndex].hash))
}
func (torrent *Torrent) getPieceLength(pieceIndex uint32) int64 {
if pieceIndex == uint32(len(torrent.pieces))-1 {
if res := torrent.totalSize % torrent.pieceLength; res != 0 {
return res
}
}
return torrent.pieceLength
}
func (torrent *Torrent) getLastPieceLength() int64
|
{
if res := torrent.totalSize % torrent.pieceLength; res != 0 {
return res
}
return torrent.pieceLength
}
|
identifier_body
|
|
torrent.go
|
announce"].(string))
}
if comment, ok := data["comment"]; ok {
torrent.comment = comment.(string)
}
info := data["info"].(map[string]interface{})
torrent.name = info["name"].(string)
torrent.pieceLength = info["piece length"].(int64)
infoHash := sha1.Sum(bencode.Encode(info))
// Set handshake
var buffer bytes.Buffer
buffer.WriteByte(19) // length of the string "BitTorrent Protocol"
buffer.WriteString("BitTorrent protocol")
buffer.WriteString("\x00\x00\x00\x00\x00\x00\x00\x00") // reserved
buffer.Write(infoHash[:])
buffer.Write(client.peerID)
torrent.handshake = buffer.Bytes()
// Set pieces
pieces := info["pieces"].(string)
for i := 0; i < len(pieces); i += 20 {
torrent.pieces = append(torrent.pieces, TorrentPiece{
hash: pieces[i : i+20],
})
}
if err := os.Mkdir("Downloads", 0700); err != nil && !os.IsExist(err) {
return err
}
cwd, err := os.Getwd()
if err != nil {
return err
}
base := filepath.Join(cwd, "Downloads")
// Set files
if files, exists := info["files"]; exists {
dirName := filepath.Join("Downloads", info["name"].(string))
if err := torrent.validatePath(base, dirName); err != nil {
return err
}
base := filepath.Join(cwd, dirName)
for _, v := range files.([]interface{}) {
v := v.(map[string]interface{})
torrent.totalSize += v["length"].(int64)
}
// Multiple files
var begin int64
for k, v := range files.([]interface{}) {
v := v.(map[string]interface{})
// Set up directory structure
pathList := v["path"].([]interface{})
pathElements := []string{dirName}
for i := 0; i < len(pathList)-1; i++ {
pathElements = append(pathElements, pathList[i].(string))
}
path := filepath.Join(pathElements...)
fullPath := filepath.Join(path, pathList[len(pathList)-1].(string))
if err := torrent.validatePath(base, fullPath); err != nil {
return err
}
if len(path) != 0 {
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
}
length := v["length"].(int64)
file, err := os.OpenFile(fullPath, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, begin, length, k)
file.Close()
}
torrent.files = append(torrent.files, File{fullPath, begin, length})
begin += length
}
} else {
// Single file
fileName := filepath.Join("Downloads", info["name"].(string))
if err := torrent.validatePath(base, fileName); err != nil {
return err
}
length := info["length"].(int64)
torrent.totalSize = length
file, err := os.OpenFile(fileName, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, 0, length, 0)
file.Close()
}
torrent.files = []File{{fileName, 0, length}}
}
return nil
}
func (torrent *Torrent) findCompletedPieces(file *os.File, begin, length int64, fileIndex int) {
fi, err := file.Stat()
if err != nil {
return
}
size := fi.Size()
if size == 0 {
return
} else if size > length {
file.Truncate(0)
return
}
buf := make([]byte, torrent.pieceLength)
var pieceIndex uint32
if begin != 0 {
pieceIndex = uint32(begin / torrent.pieceLength)
}
fileEnd := begin + length
pos := int64(pieceIndex) * torrent.pieceLength
pieceLength := torrent.getPieceLength(pieceIndex)
if pos+pieceLength > fileEnd {
return
}
if pos < begin {
bufPos := begin - pos
if _, err := file.Read(buf[bufPos:]); err != nil {
return
}
for bufPos != 0 {
fileIndex--
f := torrent.files[fileIndex]
handle, err := os.OpenFile(f.path, os.O_RDONLY, 0600)
if err != nil {
return
}
defer handle.Close()
if bufPos > f.length {
if n, err := handle.Read(buf[bufPos-f.length : bufPos]); err != nil || int64(n) != f.length {
return
|
} else {
if n, err := handle.ReadAt(buf[:bufPos], f.length-bufPos); err != nil || int64(n) != bufPos {
return
}
break
}
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += pieceLength
pieceIndex++
}
if _, err := file.Seek(pos-begin, os.SEEK_SET); err != nil {
return
}
reader := bufio.NewReaderSize(file, int(pieceLength))
for pos+torrent.pieceLength <= fileEnd {
if n, err := reader.Read(buf); err != nil || n != len(buf) {
return
}
if torrent.checkPieceHash(buf, pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += torrent.pieceLength
pieceIndex++
}
if int(pieceIndex) == len(torrent.pieces)-1 {
pieceLength = torrent.getLastPieceLength()
if n, err := reader.Read(buf[:pieceLength]); err != nil || int64(n) != pieceLength {
return
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
}
}
func (torrent *Torrent) getTrackerRequestData(event uint32) *TrackerRequestData {
downloaded := torrent.getDownloadedSize()
return &TrackerRequestData{
event: event,
downloaded: uint64(downloaded),
uploaded: torrent.uploaded,
remaining: uint64(torrent.totalSize - downloaded),
}
}
func (torrent *Torrent) startTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStarted)
for _, tracker := range torrent.trackers {
go tracker.start(data)
}
}
func (torrent *Torrent) stopTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStopped)
for _, trackerID := range torrent.activeTrackers {
go func(announceChannel chan *TrackerRequestData, stopChannel chan struct{}) {
announceChannel <- data
stopChannel <- struct{}{}
}(torrent.trackers[trackerID].announceChannel, torrent.trackers[trackerID].stopChannel)
}
}
func (torrent *Torrent) announceToTrackers(event uint32) {
data := torrent.getTrackerRequestData(event)
for _, trackerID := range torrent.activeTrackers {
go func(channel chan *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel)
}
}
func (torrent *Torrent) download() {
if torrent.completedPieces == len(torrent.pieces) {
return
}
torrent.activeTrackerChannel = make(chan int)
torrent.stoppedTrackerChannel = make(chan int)
torrent.requestAnnounceDataChannel = make(chan int)
torrent.peersChannel = make(chan interface{})
torrent.startTrackers()
torrent.pieceChannel = make(chan *PieceMessage)
torrent.bitfieldChannel = make(chan *BitfieldMessage)
torrent.havePieceChannel = make(chan *HavePieceMessage)
torrent.addPeerChannel = make(chan *Peer)
torrent.removePeerChannel = make(chan *Peer)
torrent.blockRequestChannel = make(chan *BlockRequestMessage)
torrent.fileWriteDone = make(chan struct{})
torrent.decrementPeerCount = make(chan struct{})
torrent.knownPeers = make(map[string]struct{})
for torrent.completedPieces != len(torrent.pieces) || torrent.totalPeerCount != 0 {
select {
case havePieceMessage := <-torrent.havePieceChannel:
torrent.handleHaveMessage(havePieceMessage)
case bitfieldMessage := <-torrent.bitfieldChannel:
torrent.handleBitfieldMessage(bitfieldMessage)
case pieceMessage := <-torrent.pieceChannel:
torrent.handlePieceMessage(pieceMessage)
case blockRequestMessage := <-torrent.blockRequestChannel:
torrent.handleBlockRequestMessage(blockRequestMessage)
case peer := <-torrent.addPeerChannel:
torrent.handleAddPeer(peer)
case peer := <-torrent.removePeerChannel:
torrent.handleRemovePeer(peer)
case <-torrent.fileWriteDone:
torrent.pendingFileWrites--
case <-torrent
|
}
bufPos -= f.length
|
random_line_split
|
torrent.go
|
4)
}
// Multiple files
var begin int64
for k, v := range files.([]interface{}) {
v := v.(map[string]interface{})
// Set up directory structure
pathList := v["path"].([]interface{})
pathElements := []string{dirName}
for i := 0; i < len(pathList)-1; i++ {
pathElements = append(pathElements, pathList[i].(string))
}
path := filepath.Join(pathElements...)
fullPath := filepath.Join(path, pathList[len(pathList)-1].(string))
if err := torrent.validatePath(base, fullPath); err != nil {
return err
}
if len(path) != 0 {
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
}
length := v["length"].(int64)
file, err := os.OpenFile(fullPath, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, begin, length, k)
file.Close()
}
torrent.files = append(torrent.files, File{fullPath, begin, length})
begin += length
}
} else {
// Single file
fileName := filepath.Join("Downloads", info["name"].(string))
if err := torrent.validatePath(base, fileName); err != nil {
return err
}
length := info["length"].(int64)
torrent.totalSize = length
file, err := os.OpenFile(fileName, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, 0, length, 0)
file.Close()
}
torrent.files = []File{{fileName, 0, length}}
}
return nil
}
func (torrent *Torrent) findCompletedPieces(file *os.File, begin, length int64, fileIndex int) {
fi, err := file.Stat()
if err != nil {
return
}
size := fi.Size()
if size == 0 {
return
} else if size > length {
file.Truncate(0)
return
}
buf := make([]byte, torrent.pieceLength)
var pieceIndex uint32
if begin != 0 {
pieceIndex = uint32(begin / torrent.pieceLength)
}
fileEnd := begin + length
pos := int64(pieceIndex) * torrent.pieceLength
pieceLength := torrent.getPieceLength(pieceIndex)
if pos+pieceLength > fileEnd {
return
}
if pos < begin {
bufPos := begin - pos
if _, err := file.Read(buf[bufPos:]); err != nil {
return
}
for bufPos != 0 {
fileIndex--
f := torrent.files[fileIndex]
handle, err := os.OpenFile(f.path, os.O_RDONLY, 0600)
if err != nil {
return
}
defer handle.Close()
if bufPos > f.length {
if n, err := handle.Read(buf[bufPos-f.length : bufPos]); err != nil || int64(n) != f.length {
return
}
bufPos -= f.length
} else {
if n, err := handle.ReadAt(buf[:bufPos], f.length-bufPos); err != nil || int64(n) != bufPos {
return
}
break
}
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += pieceLength
pieceIndex++
}
if _, err := file.Seek(pos-begin, os.SEEK_SET); err != nil {
return
}
reader := bufio.NewReaderSize(file, int(pieceLength))
for pos+torrent.pieceLength <= fileEnd {
if n, err := reader.Read(buf); err != nil || n != len(buf) {
return
}
if torrent.checkPieceHash(buf, pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += torrent.pieceLength
pieceIndex++
}
if int(pieceIndex) == len(torrent.pieces)-1 {
pieceLength = torrent.getLastPieceLength()
if n, err := reader.Read(buf[:pieceLength]); err != nil || int64(n) != pieceLength {
return
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
}
}
func (torrent *Torrent) getTrackerRequestData(event uint32) *TrackerRequestData {
downloaded := torrent.getDownloadedSize()
return &TrackerRequestData{
event: event,
downloaded: uint64(downloaded),
uploaded: torrent.uploaded,
remaining: uint64(torrent.totalSize - downloaded),
}
}
func (torrent *Torrent) startTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStarted)
for _, tracker := range torrent.trackers {
go tracker.start(data)
}
}
func (torrent *Torrent) stopTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStopped)
for _, trackerID := range torrent.activeTrackers {
go func(announceChannel chan *TrackerRequestData, stopChannel chan struct{}) {
announceChannel <- data
stopChannel <- struct{}{}
}(torrent.trackers[trackerID].announceChannel, torrent.trackers[trackerID].stopChannel)
}
}
func (torrent *Torrent) announceToTrackers(event uint32) {
data := torrent.getTrackerRequestData(event)
for _, trackerID := range torrent.activeTrackers {
go func(channel chan *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel)
}
}
func (torrent *Torrent) download() {
if torrent.completedPieces == len(torrent.pieces) {
return
}
torrent.activeTrackerChannel = make(chan int)
torrent.stoppedTrackerChannel = make(chan int)
torrent.requestAnnounceDataChannel = make(chan int)
torrent.peersChannel = make(chan interface{})
torrent.startTrackers()
torrent.pieceChannel = make(chan *PieceMessage)
torrent.bitfieldChannel = make(chan *BitfieldMessage)
torrent.havePieceChannel = make(chan *HavePieceMessage)
torrent.addPeerChannel = make(chan *Peer)
torrent.removePeerChannel = make(chan *Peer)
torrent.blockRequestChannel = make(chan *BlockRequestMessage)
torrent.fileWriteDone = make(chan struct{})
torrent.decrementPeerCount = make(chan struct{})
torrent.knownPeers = make(map[string]struct{})
for torrent.completedPieces != len(torrent.pieces) || torrent.totalPeerCount != 0 {
select {
case havePieceMessage := <-torrent.havePieceChannel:
torrent.handleHaveMessage(havePieceMessage)
case bitfieldMessage := <-torrent.bitfieldChannel:
torrent.handleBitfieldMessage(bitfieldMessage)
case pieceMessage := <-torrent.pieceChannel:
torrent.handlePieceMessage(pieceMessage)
case blockRequestMessage := <-torrent.blockRequestChannel:
torrent.handleBlockRequestMessage(blockRequestMessage)
case peer := <-torrent.addPeerChannel:
torrent.handleAddPeer(peer)
case peer := <-torrent.removePeerChannel:
torrent.handleRemovePeer(peer)
case <-torrent.fileWriteDone:
torrent.pendingFileWrites--
case <-torrent.decrementPeerCount:
torrent.totalPeerCount--
case peers := <-torrent.peersChannel:
torrent.connectToPeers(peers)
case trackerID := <-torrent.activeTrackerChannel:
torrent.activeTrackers = append(torrent.activeTrackers, trackerID)
fmt.Printf("[%s] %d active trackers\n", torrent.name, len(torrent.activeTrackers))
case trackerID := <-torrent.requestAnnounceDataChannel:
go func(channel chan *TrackerRequestData, data *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel, torrent.getTrackerRequestData(TrackerEventNone))
}
}
torrent.stopTrackers()
if torrent.pendingFileWrites != 0 {
fmt.Printf("[%s] Waiting for %d pending file writes...\n", torrent.name, torrent.pendingFileWrites)
for torrent.pendingFileWrites != 0 {
<-torrent.fileWriteDone
torrent.pendingFileWrites--
}
}
if len(torrent.activeTrackers) != 0 {
fmt.Printf("[%s] Waiting for %d trackers to stop...\n", torrent.name, len(torrent.activeTrackers))
for len(torrent.activeTrackers) != 0
|
{
select {
case trackerID := <-torrent.stoppedTrackerChannel:
for k, v := range torrent.activeTrackers {
if v == trackerID {
torrent.activeTrackers = append(torrent.activeTrackers[:k], torrent.activeTrackers[k+1:]...)
break
}
}
// Handle other messages that a Tracker may send
case <-torrent.activeTrackerChannel:
case <-torrent.peersChannel:
case <-torrent.requestAnnounceDataChannel:
}
}
|
conditional_block
|
|
torrent.go
|
)-1].(string))
if err := torrent.validatePath(base, fullPath); err != nil {
return err
}
if len(path) != 0 {
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
}
length := v["length"].(int64)
file, err := os.OpenFile(fullPath, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, begin, length, k)
file.Close()
}
torrent.files = append(torrent.files, File{fullPath, begin, length})
begin += length
}
} else {
// Single file
fileName := filepath.Join("Downloads", info["name"].(string))
if err := torrent.validatePath(base, fileName); err != nil {
return err
}
length := info["length"].(int64)
torrent.totalSize = length
file, err := os.OpenFile(fileName, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, 0, length, 0)
file.Close()
}
torrent.files = []File{{fileName, 0, length}}
}
return nil
}
func (torrent *Torrent) findCompletedPieces(file *os.File, begin, length int64, fileIndex int) {
fi, err := file.Stat()
if err != nil {
return
}
size := fi.Size()
if size == 0 {
return
} else if size > length {
file.Truncate(0)
return
}
buf := make([]byte, torrent.pieceLength)
var pieceIndex uint32
if begin != 0 {
pieceIndex = uint32(begin / torrent.pieceLength)
}
fileEnd := begin + length
pos := int64(pieceIndex) * torrent.pieceLength
pieceLength := torrent.getPieceLength(pieceIndex)
if pos+pieceLength > fileEnd {
return
}
if pos < begin {
bufPos := begin - pos
if _, err := file.Read(buf[bufPos:]); err != nil {
return
}
for bufPos != 0 {
fileIndex--
f := torrent.files[fileIndex]
handle, err := os.OpenFile(f.path, os.O_RDONLY, 0600)
if err != nil {
return
}
defer handle.Close()
if bufPos > f.length {
if n, err := handle.Read(buf[bufPos-f.length : bufPos]); err != nil || int64(n) != f.length {
return
}
bufPos -= f.length
} else {
if n, err := handle.ReadAt(buf[:bufPos], f.length-bufPos); err != nil || int64(n) != bufPos {
return
}
break
}
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += pieceLength
pieceIndex++
}
if _, err := file.Seek(pos-begin, os.SEEK_SET); err != nil {
return
}
reader := bufio.NewReaderSize(file, int(pieceLength))
for pos+torrent.pieceLength <= fileEnd {
if n, err := reader.Read(buf); err != nil || n != len(buf) {
return
}
if torrent.checkPieceHash(buf, pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += torrent.pieceLength
pieceIndex++
}
if int(pieceIndex) == len(torrent.pieces)-1 {
pieceLength = torrent.getLastPieceLength()
if n, err := reader.Read(buf[:pieceLength]); err != nil || int64(n) != pieceLength {
return
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
}
}
func (torrent *Torrent) getTrackerRequestData(event uint32) *TrackerRequestData {
downloaded := torrent.getDownloadedSize()
return &TrackerRequestData{
event: event,
downloaded: uint64(downloaded),
uploaded: torrent.uploaded,
remaining: uint64(torrent.totalSize - downloaded),
}
}
func (torrent *Torrent) startTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStarted)
for _, tracker := range torrent.trackers {
go tracker.start(data)
}
}
func (torrent *Torrent) stopTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStopped)
for _, trackerID := range torrent.activeTrackers {
go func(announceChannel chan *TrackerRequestData, stopChannel chan struct{}) {
announceChannel <- data
stopChannel <- struct{}{}
}(torrent.trackers[trackerID].announceChannel, torrent.trackers[trackerID].stopChannel)
}
}
func (torrent *Torrent) announceToTrackers(event uint32) {
data := torrent.getTrackerRequestData(event)
for _, trackerID := range torrent.activeTrackers {
go func(channel chan *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel)
}
}
func (torrent *Torrent) download() {
if torrent.completedPieces == len(torrent.pieces) {
return
}
torrent.activeTrackerChannel = make(chan int)
torrent.stoppedTrackerChannel = make(chan int)
torrent.requestAnnounceDataChannel = make(chan int)
torrent.peersChannel = make(chan interface{})
torrent.startTrackers()
torrent.pieceChannel = make(chan *PieceMessage)
torrent.bitfieldChannel = make(chan *BitfieldMessage)
torrent.havePieceChannel = make(chan *HavePieceMessage)
torrent.addPeerChannel = make(chan *Peer)
torrent.removePeerChannel = make(chan *Peer)
torrent.blockRequestChannel = make(chan *BlockRequestMessage)
torrent.fileWriteDone = make(chan struct{})
torrent.decrementPeerCount = make(chan struct{})
torrent.knownPeers = make(map[string]struct{})
for torrent.completedPieces != len(torrent.pieces) || torrent.totalPeerCount != 0 {
select {
case havePieceMessage := <-torrent.havePieceChannel:
torrent.handleHaveMessage(havePieceMessage)
case bitfieldMessage := <-torrent.bitfieldChannel:
torrent.handleBitfieldMessage(bitfieldMessage)
case pieceMessage := <-torrent.pieceChannel:
torrent.handlePieceMessage(pieceMessage)
case blockRequestMessage := <-torrent.blockRequestChannel:
torrent.handleBlockRequestMessage(blockRequestMessage)
case peer := <-torrent.addPeerChannel:
torrent.handleAddPeer(peer)
case peer := <-torrent.removePeerChannel:
torrent.handleRemovePeer(peer)
case <-torrent.fileWriteDone:
torrent.pendingFileWrites--
case <-torrent.decrementPeerCount:
torrent.totalPeerCount--
case peers := <-torrent.peersChannel:
torrent.connectToPeers(peers)
case trackerID := <-torrent.activeTrackerChannel:
torrent.activeTrackers = append(torrent.activeTrackers, trackerID)
fmt.Printf("[%s] %d active trackers\n", torrent.name, len(torrent.activeTrackers))
case trackerID := <-torrent.requestAnnounceDataChannel:
go func(channel chan *TrackerRequestData, data *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel, torrent.getTrackerRequestData(TrackerEventNone))
}
}
torrent.stopTrackers()
if torrent.pendingFileWrites != 0 {
fmt.Printf("[%s] Waiting for %d pending file writes...\n", torrent.name, torrent.pendingFileWrites)
for torrent.pendingFileWrites != 0 {
<-torrent.fileWriteDone
torrent.pendingFileWrites--
}
}
if len(torrent.activeTrackers) != 0 {
fmt.Printf("[%s] Waiting for %d trackers to stop...\n", torrent.name, len(torrent.activeTrackers))
for len(torrent.activeTrackers) != 0 {
select {
case trackerID := <-torrent.stoppedTrackerChannel:
for k, v := range torrent.activeTrackers {
if v == trackerID {
torrent.activeTrackers = append(torrent.activeTrackers[:k], torrent.activeTrackers[k+1:]...)
break
}
}
// Handle other messages that a Tracker may send
case <-torrent.activeTrackerChannel:
case <-torrent.peersChannel:
case <-torrent.requestAnnounceDataChannel:
}
}
}
}
func (torrent *Torrent) checkPieceHash(data []byte, pieceIndex uint32) bool {
dataHash := sha1.Sum(data)
return bytes.Equal(dataHash[:], []byte(torrent.pieces[pieceIndex].hash))
}
func (torrent *Torrent) getPieceLength(pieceIndex uint32) int64 {
if pieceIndex == uint32(len(torrent.pieces))-1 {
if res := torrent.totalSize % torrent.pieceLength; res != 0 {
return res
}
}
return torrent.pieceLength
}
func (torrent *Torrent)
|
getLastPieceLength
|
identifier_name
|
|
project_config.py
|
1[0:len(LINKFLAG1) - 1]
HighTecDirNOW = self.HithTecDir.text()
DebugNameNOW = self.ProjectName.text()
inn = self.includeList.count()
inpathNOW = []
exn = self.excludeList.count()
expathNOW = []
Ln = self.Llist.count()
LnNOW = []
ln = self.llist.count()
lnNOW = []
try:
for i in range(inn):
inpathNOW.append(self.includeList.item(i).text())
for i in range(exn):
expathNOW.append(self.excludeList.item(i).text())
f = open('./py.pyconfig', 'w', encoding='utf-8')
# lines=f.readlines()
tLink = re.split(' ',LINKFLAGNOW)
Linkchange=''
for iii in tLink:
if '-L' not in iii and '-l:' not in iii:
Linkchange+=iii+' '
for i in range(Ln):
p = re.split('{workspace}/',self.Llist.item(i).text())
#print(p)
if len(p)==1:
Linkchange+='''-L"'''+os.path.abspath(p[0])+'''" '''
else:
Linkchange += '''-L"''' + os.path.abspath(p[1]) + '''" '''
LnNOW.append(self.Llist.item(i).text())
for i in range(ln):
Linkchange+='-l'+self.llist.item(i).text()+' '
lnNOW.append(self.llist.item(i).text())
f.write('CCFLAG=' + CCFLAGNOW + "\n")
f.write('LINKFLAG=' + Linkchange + "\n")
f.write('HighTecDir=' + HighTecDirNOW + "\n")
f.write('DebugName=' + DebugNameNOW + "\n")
aa = "includepath="
for a in inpathNOW:
if a != "":
aa += a + ','
f.write(aa + '\n')
bb = "excludefiles="
for b in expathNOW:
if b != "":
bb += b + ','
f.write(bb + '\n')
cc = "LibraryPath="
for c in LnNOW:
if c != "":
cc += c + ','
dd = "libraties="
for d in lnNOW:
if d != "":
dd += d + ','
f.write(cc + '\n')
f.write(dd + '\n')
f.close()
self.LINKFLAGName.setText('')
self.LINKFLAGName.setText(Linkchange)
except:
f.close()
def CleanProject(self):
print('Cleanning project...... ')
if os.path.exists('./Default'):
shutil.rmtree('./Default')
if os.path.exists('./delivery'):
shutil.rmtree('./delivery')
QMessageBox.about(self, "消息", "Clean has finished!")
#tkinter.messagebox.showinfo('提示','Clean has finished!')
print('Clean has finished!')
def testaa(self):
print("1")
def CloseTools(self):
print(1)
def delPath(self,id):
if id==1:
self.includeList.clear()
if id == 2:
self.excludeList.clear()
def ShowDialog(self,id):
#self.di=QDialog()
#fileselect1 = fileselect.Ui_Dialog()
#fileselect1.setupUi(self.di)
self.idPath=id
self.di.exec()
# for path,dir,files in os.walk(os.getcwd()):
# for file in files:
# i=i+1
# if file.endswith('.h') and "TOOLS" not in path:
# if "TOOLS" not in path:
# a='child'+str(i)
# a=QTreeWidgetItem(child0)
def adds(self,paths, root):
if os.path.isdir(paths):
list = os.listdir(paths)
for i in list:
# j=0
# for path1 ,dirs,files in os.walk(os.path.join(paths,i)):
# for file in files:
# if file.endswith('.h') or file.endswith('.c'):
# j=1
if 'Default' not in i and '.' not in i and '_pycache_' not in os.path.join(paths,i) and os.path.join(
paths, i) in self.AllPath:
# self.adds(os.path.join(paths, i),root)
if os.path.isdir(os.path.join(paths, i)):
childs = QTreeWidgetItem(root)
childs.setText(0, i)
childs.setIcon(0, QIcon('./Compile/01.png'))
self.adds(os.path.join(paths, i), childs)
#注意:是对QDialog对象show(),并不是自己生成的Ui_Dialog对象 show(),开始没有写self.di,弹窗总是一闪而过,类的的函数加上self之后成功
#print(QFileDialog.getExistingDirectory(None, "请选择要添加的文件", os.getcwd()))
def GetPath(self):
if self.index==3:
pathlist = self.fileselect.treeWidget.selectedItems()
# pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
# print(pathlist.value().childCount())
tempinclude = []
for pathss in pathlist:
tpathss = pathss
tp = ""
while 1:
if tpathss.text(0)!=self.DebugName:
tp = tpathss.text(0) + tp
if tpathss.parent():
tpathss = tpathss.parent()
tp = '/' + tp
else:
break
if tp not in tempinclude and tp!="":
tempinclude.append(tp)
pathss.setSelected(False)
self.includeList.addItems(sorted(tempinclude))
elif self.idPath==2:
pathlist = self.fileselect.treeWidget.selectedItems()
#pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
#print(pathlist.value().childCount())
tempexclude=[]
for pathss in pathlist:
tpathss=pathss
tp=""
while 1:
if tpathss.text(0) != self.DebugName:
tp = tpathss.text(0)+tp
if tpathss.parent():
tpathss=tpathss.parent()
tp='/'+tp
else:
break
if tp not in tempexclude and tp!="":
tempexclude.append(tp)
self.excludeList.addItems(sorted(tempexclude))
elif self.index==2:
pathlist = self.fileselect.treeWidget.selectedItems()
# pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
# print(pathlist.value().childCount())
tempexclude = []
for pathss in pathlist:
tpathss = pathss
tp = ""
while 1:
if tpathss.text(0) != self.DebugName:
tp = tpathss.text(0) + tp
if tpathss.parent():
tpathss = tpathss.parent()
tp = '/' + tp
else:
break
if tp not in tempexclude and tp != "":
tempexclude.append("{workspace}"+tp)
pathss.setSelected(False)
self.Llist.addItems(tempexclude)
self.LWin.close()#如果是通过workspace选的直接关掉选择框
self.di.close()
'''for selectedPath in pathlist:
print(selectedPath.text(0))
print(pathlist)'''
#if pathlist.value().checkState(0) == Qt.Checked:
#n=self.fileselect.treeWidget.topLevelItemCount()
'''while pathlist.value():
if pathlist.value().checkState(0)==Qt.Checked:
print(pathlist.value.text(0))
break'''
def Cleartree(self):
pathlist = self.fileselect.treeWidget.selectedItems()
for pathss in pathlist:
pathss.setSelected(False)
self.di.close()
def AddExpath(self):
dir1,file1 = QFileDialog.getOpenFileNames (self,'选择过滤文件',os.getcwd(),"C FILES(*.c)")
#print(dir1,file1)
for ii in dir1:
if ii!='' :
dir2 = re.split(os.getcwd().replace('\\','/'),ii)[1]
self.excludeList.addItem(dir2)
#Library的具体操作
def AddLibraryPath(self):
txt=self.LWUI.LibraryP.text()
if txt:
self.Llist.addItem(txt)
self.LWin.close()
def AddLibraries(self):
txt = self.lWUI.libraries.text()
if txt:
self.llist.addItem(txt)
self.lWin.close()
def DelLibraryPath(self):
items1 = self.Llist.selectedIndexes()
if items1:
for jj in items1:
self.Llist.removeItemWidget(self.Llist.takeItem(jj.row()))
def DelLibraries(self):
items1 = self.llist.selectedIndexes()
if items1:
for jj in items1:
self.llist.removeItemWidget(self.llist.takeItem(jj.row()))
if __name__ == '__main__':
cmd1 = ""
NUM=0
VAL=0
app = QApplication(sys.argv)
app.setWindowIcon(QIcon('./Compile/mainwindowIcon.png'))
a=basePage()
a.ChooseProDir()
a.show()
#进入程序的主循环,并通过exit函数确保主循环安全结束
sys.exit(app.exec_())
|
conditional_block
|
||
project_config.py
|
class BackendTread1(QThread):
startcompile1 = pyqtSignal(str)
endSig = pyqtSignal()
def __init__(self, parent=None):
super(BackendTread1, self).__init__(parent)
def startCom(self):
self.process = subprocess.Popen(cmd1)
def run(self):
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
'''os.chdir(self.ProjectName_2.text() + '/Default')
self.process = subprocess.call(cmd1)'''
f=open('conerr.err','w+')
self.process = subprocess.Popen(cmd1,stdout=subprocess.PIPE,stderr=f,bufsize=1)
'''self.bt=BackendTread()
self.bt.startcompile.connect(self.PrintConsole)
self.bt.start()'''
self.sleep(3)
while self.process.poll() is None:
#print(1)
r = self.process.stdout.readline().decode('gbk')
if r:
self.startcompile1.emit(r)
if 'tool>pause'in r:
break
os.system(r"taskkill /f /t /im make.exe")#因为在做post-build的时候,al2的工具需要按回车键才能结束进程,因为在这里强制性的使其结束
self.endSig.emit()
class basePage(QMainWindow,Ui_MainWindow):
def __init__(self):
super(basePage, self).__init__()
self.setupUi(self)
self.startpath=os.getcwd()
self.actionbuild.triggered.connect(self.checkFLAG)
#self.menuclean.triggered.connect(self.CleanProject)
self.actionclean.triggered.connect(self.CleanProject)
self.actionopen_project.triggered.connect(self.ChooseProDir)
self.actionsave_project.triggered.connect(self.modifyFLAG)
#self.quitApp.triggered.connect(QCoreApplication.instance().quit) #关闭程序的第一种方式
self.actionexit.triggered.connect(qApp.quit)#关闭程序的第二种方式
#添加工具栏:停止和退出
self.tb1=self.addToolBar('tool')
actionopen1=QAction(QIcon('./Compile/file.png'),"打开工程",self)
self.tb1.addAction(actionopen1)
actionopen1.triggered.connect(self.ChooseProDir)
self.tb1.addSeparator()
actionstop=QAction(QIcon('./Compile/stop.png'),"停止",self)
self.tb1.addAction(actionstop)
actionstop.triggered.connect(self.KillProcess)
self.tb1.addSeparator()
actionExit=QAction(QIcon('./Compile/exit.png'),"退出",self)
self.tb1.addAction(actionExit)
actionExit.triggered.connect(qApp.quit)
##创建右键菜单
#self.includeList.setContextMenuPolicy(Qt.CustomContextMenu)
#self.includeList.customContextMenuRequested.connect(self.showRightMenu)
#self.includeList.customContextMenuRequested[QPoint].connect(self.remove)
#单击一个选项
#self.f=""
#self.includeList.clicked.connect(self.check)
self.includeList.setContextMenuPolicy(Qt.CustomContextMenu)
self.excludeList.setContextMenuPolicy(Qt.CustomContextMenu)
self.contextMenu=QMenu(self)
self.actionA=self.contextMenu.addAction("删除")
self.actionA.triggered.connect(self.remove)
self.includeList.customContextMenuRequested.connect(lambda :self.showContextMenu(1))
#self.contextMenu.triggered[QAction].connect(self.remove)
#self.includeList.customContextMenuRequested[QPoint].connect(self.remove1)#[]里的代表传入的参数,自带的
self.excludeList.customContextMenuRequested.connect(lambda :self.showContextMenu(2))
#self.excludeList.customContextMenuRequested[QPoint].connect(self.remove2) # []里的代表传入的参数,自带的
self.delPath1.clicked.connect(self.includeList.clear)
self.delPath2.clicked.connect(self.excludeList.clear)
self.addPath1.clicked.connect(lambda :self.ShowDialog(1))
self.addPath2.clicked.connect(self.AddExpath)
self.fileselect = fileselect.Ui_Dialog()
#初始化page
self.listWidget.currentRowChanged.connect(self.display)
#Library的初始化
self.initLibraryWindow()
self.Llist.setSelectionMode(3)
self.llist.setSelectionMode(3)
#self.add2.clidken.connect(self.ShowLWindow)
#状态栏的部件
self.barlabel = QLabel('barlabel')
#self.initDialog()
#self.fileselect.buttonBox
#print(self.fileselect.treeWidget.currentItem().text(0))
def initUI(self):
self.includeList.clear()
self.excludeList.clear()
self.Llist.clear()
self.llist.clear()
self.ProjectName.setText(self.DebugName)
self.HithTecDir.setText(self.HighTecDir)
self.GCCFLAGName.setText(self.CCFLAG)
self.LINKFLAGName.setText(self.LINKFLAG)
self.ProjectName_2.setText(self.PROJECTDIR)
self.ProjectName_2.setEnabled(False)
self.barlabel.setText('准备中')
self.statusBar.addPermanentWidget(self.barlabel)
self.Result.clear()
if self.includepath:
#a=1
self.includeList.addItems(self.includepath)
if self.excludefiles:
#a=1
self.excludeList.addItems(self.excludefiles)
if self.LibraryPath:
#a=1
self.Llist.addItems(self.LibraryPath)
if self.libraties:
#a=1
self.llist.addItems(self.libraties)
def display(self,index):
self.index=index
self.stackedWidget.setCurrentIndex(index)
def initLibraryWindow(self):
self.LWUI=AddLibraryPath.Ui_LSelect()
self.LWin=QWidget()
self.LWin.setWindowModality(Qt.ApplicationModal)#设置模态对话框
self.LWUI.setupUi(self.LWin)
self.LWUI.LibraryP.setText("")
self.add1.clicked.connect(self.LWin.show)
self.LWUI.L_Cancel.clicked.connect(self.LWin.close)
self.LWUI.L_Workspace.clicked.connect(lambda: self.ShowDialog(1))
self.LWUI.L_OK.clicked.connect(self.AddLibraryPath)
self.del1.clicked.connect(self.DelLibraryPath)
self.lWUI = Enterlibraries.Ui_LSelect()
self.lWin = QWidget()
self.lWin.setWindowModality(Qt.ApplicationModal)
self.lWUI.setupUi(self.lWin)
self.LWUI.LibraryP.setText("")
self.add2.clicked.connect(self.lWin.show)
self.lWUI.l_OK.clicked.connect(self.AddLibraries)
self.lWUI.l_Cancel.clicked.connect(self.lWin.close)
self.del2.clicked.connect(self.DelLibraries)
def KillProcess(self):
#self.process.kill()
#self.process.pid
os.system(r"taskkill /f /t /im make.exe")
self.Result.append('用户终止执行')
def ChooseProDir(self):
dir=QFileDialog.getExistingDirectory()
dir=dir.replace('/','\\')
self.ProjectName_2.setText(dir)
if dir!='':
os.chdir(dir)
import automake_config as ac
(DebugName, HighTecDir, CCFLAG, LINKFLAG, includepath, excludefiles, g_except_dir_list,
g_except_file_list,LibraryPath,libraties) = ac.maininit()
self.includepath=includepath
self.excludefiles=excludefiles
self.DebugName=DebugName
self.CCFLAG=CCFLAG
self.LINKFLAG=LINKFLAG
self.HighTecDir=HighTecDir
self.PROJECTDIR=dir
self.LibraryPath=LibraryPath
self.libraties=libraties
#print(os.getcwd())
self.AllPath=ac.FindAllPath(dir)
#print(self.AllPath)
self.initDialog()
#对Dialog按钮的设置
self.fileselect.buttonBox.accepted.connect(self.GetPath)
self.fileselect.treeWidget.setSelectionMode(3)
self.fileselect.buttonBox.rejected.connect(self.Cleartree)
#self.adds(dir,self.child0)
a.initUI()
def initDialog(self):
self.di = QDialog()
fileselect1 = self.fileselect
fileselect1.setupUi(self.di)
# self.di.show()
child0 = QTreeWidgetItem(fileselect1.treeWidget)
child0.setText(0, self.DebugName)
child0.setIcon(0, QIcon('./Compile/01.png'))
self.adds(os.getcwd(), child0)
child1 = QTreeWidgetItem(child0)
child1.setText(0, 'TOOLS')
child1.setIcon(0, QIcon('./Compile/0
|
pyqtSignal(int)
def __init__(self, parent=None):
super(BackendTread, self).__init__(parent)
self.working=True
def stopSig(self):
self.working=False
def run(self):
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
'''os.chdir(self.ProjectName_2.text() + '/Default')
self.process = subprocess.call(cmd1)'''
while VAL<NUM and self.working:
num=0
for path,dir,files in os.walk(os.getcwd()):
for file in files:
if file.endswith('.o'):
num=num+1
self.setvalue.emit(num)
#开编译的线程
|
identifier_body
|
|
project_config.py
|
self.working=False
def run(self):
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
'''os.chdir(self.ProjectName_2.text() + '/Default')
self.process = subprocess.call(cmd1)'''
while VAL<NUM and self.working:
num=0
for path,dir,files in os.walk(os.getcwd()):
for file in files:
if file.endswith('.o'):
num=num+1
self.setvalue.emit(num)
#开编译的线程
class BackendTread1(QThread):
startcompile1 = pyqtSignal(str)
endSig = pyqtSignal()
def __init__(self, parent=None):
super(BackendTread1, self).__init__(parent)
def startCom(self):
self.process = subprocess.Popen(cmd1)
def run(self):
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
'''os.chdir(self.ProjectName_2.text() + '/Default')
self.process = subprocess.call(cmd1)'''
f=open('conerr.err','w+')
self.process = subprocess.Popen(cmd1,stdout=subprocess.PIPE,stderr=f,bufsize=1)
'''self.bt=BackendTread()
self.bt.startcompile.connect(self.PrintConsole)
self.bt.start()'''
self.sleep(3)
while self.process.poll() is None:
#print(1)
r = self.process.stdout.readline().decode('gbk')
if r:
self.startcompile1.emit(r)
if 'tool>pause'in r:
break
os.system(r"taskkill /f /t /im make.exe")#因为在做post-build的时候,al2的工具需要按回车键才能结束进程,因为在这里强制性的使其结束
self.endSig.emit()
class basePage(QMainWindow,Ui_MainWindow):
def __init__(self):
super(basePage, self).__init__()
self.setupUi(self)
self.startpath=os.getcwd()
self.actionbuild.triggered.connect(self.checkFLAG)
#self.menuclean.triggered.connect(self.CleanProject)
self.actionclean.triggered.connect(self.CleanProject)
self.actionopen_project.triggered.connect(self.ChooseProDir)
self.actionsave_project.triggered.connect(self.modifyFLAG)
#self.quitApp.triggered.connect(QCoreApplication.instance().quit) #关闭程序的第一种方式
self.actionexit.triggered.connect(qApp.quit)#关闭程序的第二种方式
#添加工具栏:停止和退出
self.tb1=self.addToolBar('tool')
actionopen1=QAction(QIcon('./Compile/file.png'),"打开工程",self)
self.tb1.addAction(actionopen1)
actionopen1.triggered.connect(self.ChooseProDir)
self.tb1.addSeparator()
actionstop=QAction(QIcon('./Compile/stop.png'),"停止",self)
self.tb1.addAction(actionstop)
actionstop.triggered.connect(self.KillProcess)
self.tb1.addSeparator()
actionExit=QAction(QIcon('./Compile/exit.png'),"退出",self)
self.tb1.addAction(actionExit)
actionExit.triggered.connect(qApp.quit)
##创建右键菜单
#self.includeList.setContextMenuPolicy(Qt.CustomContextMenu)
#self.includeList.customContextMenuRequested.connect(self.showRightMenu)
#self.includeList.customContextMenuRequested[QPoint].connect(self.remove)
#单击一个选项
#self.f=""
#self.includeList.clicked.connect(self.check)
self.includeList.setContextMenuPolicy(Qt.CustomContextMenu)
self.excludeList.setContextMenuPolicy(Qt.CustomContextMenu)
self.contextMenu=QMenu(self)
self.actionA=self.contextMenu.addAction("删除")
self.actionA.triggered.connect(self.remove)
self.includeList.customContextMenuRequested.connect(lambda :self.showContextMenu(1))
#self.contextMenu.triggered[QAction].connect(self.remove)
#self.includeList.customContextMenuRequested[QPoint].connect(self.remove1)#[]里的代表传入的参数,自带的
self.excludeList.customContextMenuRequested.connect(lambda :self.showContextMenu(2))
#self.excludeList.customContextMenuRequested[QPoint].connect(self.remove2) # []里的代表传入的参数,自带的
self.delPath1.clicked.connect(self.includeList.clear)
self.delPath2.clicked.connect(self.excludeList.clear)
self.addPath1.clicked.connect(lambda :self.ShowDialog(1))
self.addPath2.clicked.connect(self.AddExpath)
self.fileselect = fileselect.Ui_Dialog()
#初始化page
self.listWidget.currentRowChanged.connect(self.display)
#Library的初始化
self.initLibraryWindow()
self.Llist.setSelectionMode(3)
self.llist.setSelectionMode(3)
#self.add2.clidken.connect(self.ShowLWindow)
#状态栏的部件
self.barlabel = QLabel('barlabel')
#self.initDialog()
#self.fileselect.buttonBox
#print(self.fileselect.treeWidget.currentItem().text(0))
def initUI(self):
self.includeList.clear()
self.excludeList.clear()
self.Llist.clear()
self.llist.clear()
self.ProjectName.setText(self.DebugName)
self.HithTecDir.setText(self.HighTecDir)
self.GCCFLAGName.setText(self.CCFLAG)
self.LINKFLAGName.setText(self.LINKFLAG)
self.ProjectName_2.setText(self.PROJECTDIR)
self.ProjectName_2.setEnabled(False)
self.barlabel.setText('准备中')
self.statusBar.addPermanentWidget(self.barlabel)
self.Result.clear()
if self.includepath:
#a=1
self.includeList.addItems(self.includepath)
if self.excludefiles:
#a=1
self.excludeList.addItems(self.excludefiles)
if self.LibraryPath:
#a=1
self.Llist.addItems(self.LibraryPath)
if self.libraties:
#a=1
self.llist.addItems(self.libraties)
def display(self,index):
self.index=index
self.stackedWidget.setCurrentIndex(index)
def initLibraryWindow(self):
self.LWUI=AddLibraryPath.Ui_LSelect()
self.LWin=QWidget()
self.LWin.setWindowModality(Qt.ApplicationModal)#设置模态对话框
self.LWUI.setupUi(self.LWin)
self.LWUI.LibraryP.setText("")
self.add1.clicked.connect(self.LWin.show)
self.LWUI.L_Cancel.clicked.connect(self.LWin.close)
self.LWUI.L_Workspace.clicked.connect(lambda: self.ShowDialog(1))
self.LWUI.L_OK.clicked.connect(self.AddLibraryPath)
self.del1.clicked.connect(self.DelLibraryPath)
self.lWUI = Enterlibraries.Ui_LSelect()
self.lWin = QWidget()
self.lWin.setWindowModality(Qt.ApplicationModal)
self.lWUI.setupUi(self.lWin)
self.LWUI.LibraryP.setText("")
self.add2.clicked.connect(self.lWin.show)
self.lWUI.l_OK.clicked.connect(self.AddLibraries)
self.lWUI.l_Cancel.clicked.connect(self.lWin.close)
self.del2.clicked.connect(self.DelLibraries)
def KillProcess(self):
#self.process.kill()
#self.process.pid
os.system(r"taskkill /f /t /im make.exe")
self.Result.append('用户终止执行')
def ChooseProDir(self):
dir=QFileDialog.getExistingDirectory()
dir=dir.replace('/','\\')
self.ProjectName_2.setText(dir)
if dir!='':
os.chdir(dir)
import automake_config as ac
(DebugName, HighTecDir, CCFLAG, LINKFLAG, includepath, excludefiles, g_except_dir_list,
g_except_file_list,LibraryPath,libraties) = ac.maininit()
self.includepath=includepath
self.excludefiles=excludefiles
self.DebugName=DebugName
self.CCFLAG=CCFLAG
self.LINKFLAG=LINKFLAG
self.HighTecDir=HighTecDir
self.PROJECTDIR=dir
self.LibraryPath=LibraryPath
self.libraties=libraties
#print(os.getcwd())
self.AllPath=ac.FindAllPath(dir)
#print(self.AllPath)
self.initDialog()
#对Dialog按钮的设置
self.fileselect.buttonBox.accepted.connect(self.GetPath)
self.fileselect.treeWidget.setSelectionMode(3)
self.fileselect.buttonBox.rejected.connect(self.Cleartree)
#self.adds(dir,self.child0)
a.initUI()
def initDialog(self):
self.di = QDialog()
fileselect1 = self.fileselect
fileselect1.setupUi(self.di)
# self.di.show()
child0 = QTreeWidgetItem(fileselect1.treeWidget)
child0.setText(0, self.DebugName)
child0.setIcon(0, QIcon('./Compile/01.png'))
self.adds(os.getcwd(), child0)
child1 = QTreeWidgetItem(child0)
child1.setText(0, 'TOOLS')
child1.setIcon(0, QIcon('./Compile/01.png'))
#展开所有节点
fileselect1.treeWidget.expandAll()
def showContextMenu(self,id):
# 如果有选中项,则显示显示菜单
|
lf):
|
identifier_name
|
|
project_config.py
|
.connect(self.DelLibraryPath)
self.lWUI = Enterlibraries.Ui_LSelect()
self.lWin = QWidget()
self.lWin.setWindowModality(Qt.ApplicationModal)
self.lWUI.setupUi(self.lWin)
self.LWUI.LibraryP.setText("")
self.add2.clicked.connect(self.lWin.show)
self.lWUI.l_OK.clicked.connect(self.AddLibraries)
self.lWUI.l_Cancel.clicked.connect(self.lWin.close)
self.del2.clicked.connect(self.DelLibraries)
def KillProcess(self):
#self.process.kill()
#self.process.pid
os.system(r"taskkill /f /t /im make.exe")
self.Result.append('用户终止执行')
def ChooseProDir(self):
dir=QFileDialog.getExistingDirectory()
dir=dir.replace('/','\\')
self.ProjectName_2.setText(dir)
if dir!='':
os.chdir(dir)
import automake_config as ac
(DebugName, HighTecDir, CCFLAG, LINKFLAG, includepath, excludefiles, g_except_dir_list,
g_except_file_list,LibraryPath,libraties) = ac.maininit()
self.includepath=includepath
self.excludefiles=excludefiles
self.DebugName=DebugName
self.CCFLAG=CCFLAG
self.LINKFLAG=LINKFLAG
self.HighTecDir=HighTecDir
self.PROJECTDIR=dir
self.LibraryPath=LibraryPath
self.libraties=libraties
#print(os.getcwd())
self.AllPath=ac.FindAllPath(dir)
#print(self.AllPath)
self.initDialog()
#对Dialog按钮的设置
self.fileselect.buttonBox.accepted.connect(self.GetPath)
self.fileselect.treeWidget.setSelectionMode(3)
self.fileselect.buttonBox.rejected.connect(self.Cleartree)
#self.adds(dir,self.child0)
a.initUI()
def initDialog(self):
self.di = QDialog()
fileselect1 = self.fileselect
fileselect1.setupUi(self.di)
# self.di.show()
child0 = QTreeWidgetItem(fileselect1.treeWidget)
child0.setText(0, self.DebugName)
child0.setIcon(0, QIcon('./Compile/01.png'))
self.adds(os.getcwd(), child0)
child1 = QTreeWidgetItem(child0)
child1.setText(0, 'TOOLS')
child1.setIcon(0, QIcon('./Compile/01.png'))
#展开所有节点
fileselect1.treeWidget.expandAll()
def showContextMenu(self,id):
# 如果有选中项,则显示显示菜单
#if id==1:
items1 = self.includeList.selectedIndexes()
#self.idRm=id
#print(items)
#elif id==2:
items2 = self.excludeList.selectedIndexes()
#self.idRm = id
if items1 or items2:
self.contextMenu.show()
#self.f=QPoint
self.contextMenu.exec_(QCursor.pos()) # 在鼠标位置显示
def remove(self):
items1 = self.includeList.selectedIndexes()
items2 = self.excludeList.selectedIndexes()
if self.index==3:
if items1:
for jj in items1:
self.includeList.removeItemWidget(self.includeList.takeItem(jj.row()))
if self.index == 4:
if items2:
for jj in items2:
self.excludeList.removeItemWidget(self.excludeList.takeItem(jj.row()))
def EndResult(self):
print(os.getcwd())
f=open('./conerr.err','r')
lines=f.readlines()
j=0
for ii in lines:
if "error:"in ii:
self.Result.append("<font color=\"#FF0000\">%s</font> "%ii)
j=1
if j!=1:
self.Result.append("<font color=\"#FF0000\">finished!!!!!!!!</font> ")
self.barlabel.setText('已完成')
f.close()
os.remove('./conerr.err')
self.backend.working=False
self.statusBar.removeWidget(self.progressBar)
self.barlabel.setText('准备中')
os.chdir(self.ProjectName_2.text())
def initBar(self):
global NUM
self.progressBar = QProgressBar()
self.Result.clear()
self.barlabel.setText('正在编译:')
self.statusBar.addPermanentWidget(self.progressBar, stretch=2)
f = open('./Default/Default.objectlist','r')
lines = f.readlines()
f.close()
NUM=len(lines)
#self.progressBar.setGeometry(0,0,100,5)
self.progressBar.setRange(0,len(lines))
global VAL
VAL=0
def SetProgressBarVal(self,val):
#global VAL
n=VAL+val
self.progressBar.setValue(n)
def StartCompile(self,Hdir):
global cmd1
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
cmd1 = r'''%s\bin\make -j8 all''' % Hdir
#cmd1 = self.startpath+'\Compile\compile.bat '+Hdir
# cmd1='cd ..'
# print(includepath)
# self.process =subprocess.Popen(self.startpath+ '\Compile\compile.bat ' + cmd1)
os.chdir(self.ProjectName_2.text() + '/Default')
#f=open('ccccc.txt','w')
#self.process = subprocess.Popen(cmd1)
self.backend1 = BackendTread1()
self.backend1.startcompile1.connect(self.PrintConsole)
self.backend1.endSig.connect(self.EndResult)
#time.sleep(3)
self.backend1.start()
self.backend = BackendTread()
self.backend.setvalue.connect(self.SetProgressBarVal)
#self.backend.endSig.connect(self.EndResult)
# time.sleep(3)
self.backend.start()
'''self.process = subprocess.call(cmd1)
self.process.wait()
f= open('console.log','r')
lines =f.readlines()
for ii in lines:
if 'error:'in ii:
self.Result.insertText(ii+'\n')'''
#os.chdir(self.ProjectName_2.text())
def PrintConsole(self,r):
#print(2222)
# None表示正在执行中
#r = self.process.stdout.readline()
#self.Result.append(r)
self.Result.append("<font color=\"#000000\">%s</font> "%r)
#self.backend.stopSig()
# 可修改输出方式,比如控制台、文件等
#print(self.process.poll())
# 重定向错误输出
def checkFLAG(self):
CCFLAG1 = self.GCCFLAGName.toPlainText()
#CCFLAG1 = CCFLAG1[0:len(CCFLAG1) - 1]
LINKFLAG1 = self.LINKFLAGName.toPlainText()
#LINKFLAG1 = LINKFLAG1[0:len(LINKFLAG1) - 1]
Hdir = self.HithTecDir.text()
DebugName1 = self.ProjectName.text()
inn=self.includeList.count()
inpath=[]
exn = self.excludeList.count()
expath = []
for i in range(inn):
inpath.append(self.includeList.item(i).text())
for i in range(exn):
expath.append(self.excludeList.item(i).text())
#print(CCFLAG1)
# POSTBUILD1 = pb.get()
# Hdir = Hdir[0:len(Hdir) - 1]
#if CCFLAG1 != self.CCFLAG or self.LINKFLAG != LINKFLAG1 or Hdir != self.HighTecDir or DebugName1 != self.DebugName or expath != self.excludefiles or inpath != self.includepath:
self.modifyFLAG()
'''for i in range(0,len(CCFALG)):
if CCFALG1[i]!=CCFALG[i]:
print(i)'''
cmd=self.startpath+'\Compile\python '+self.startpath+"\Compile/automake.py "+self.startpath
a=subprocess.call(cmd)
self.initBar()
#a.wait()
#cmd1 = Hdir + r'\bin\make'
#self.backend.update_date.connect(self.handleDisplay)
try:
self.StartCompile(Hdir)
except BaseException as e:
print(333333)
f=open('cons.log','w')
f.write(e.args)
f.close()
#def
def modifyFLAG(self):
# f=open('./TOOLS/Compile/automake_config.py','r',encoding='utf-8')
CCFLAGNOW = self.GCCFLAGName.toPlainText()
# CCFLAG1 = CCFLAG1[0:len(CCFLAG1) - 1]
LINKFLAGNOW = self.LINKFLAGName.toPlainText()
# LINKFLAG1 = LINKFLAG1[0:len(LINKFLAG1) - 1]
HighTecDirNOW = self.HithTecDir.text()
DebugNameNOW = self.ProjectName.text()
inn = self.includeList.count()
inpathNOW = []
exn = self.excludeList.count()
expathNOW = []
Ln = self.Llist.count()
LnNOW = []
|
ln = self.llist.count()
lnNOW = []
try:
|
random_line_split
|
|
feature.pb.go
|
int) { return fileDescriptor0, []int{3} }
type isFeature_Kind interface {
isFeature_Kind()
}
type Feature_BytesList struct {
BytesList *BytesList `protobuf:"bytes,1,opt,name=bytes_list,json=bytesList,oneof"`
}
type Feature_FloatList struct {
FloatList *FloatList `protobuf:"bytes,2,opt,name=float_list,json=floatList,oneof"`
}
type Feature_Int64List struct {
Int64List *Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,oneof"`
}
func (*Feature_BytesList) isFeature_Kind() {}
func (*Feature_FloatList) isFeature_Kind() {}
func (*Feature_Int64List) isFeature_Kind() {}
func (m *Feature) GetKind() isFeature_Kind {
if m != nil {
return m.Kind
}
return nil
}
func (m *Feature) GetBytesList() *BytesList {
if x, ok := m.GetKind().(*Feature_BytesList); ok {
return x.BytesList
}
return nil
}
func (m *Feature) GetFloatList() *FloatList {
if x, ok := m.GetKind().(*Feature_FloatList); ok {
return x.FloatList
}
return nil
}
func (m *Feature) GetInt64List() *Int64List {
if x, ok := m.GetKind().(*Feature_Int64List); ok {
return x.Int64List
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Feature) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Feature_OneofMarshaler, _Feature_OneofUnmarshaler, _Feature_OneofSizer, []interface{}{
(*Feature_BytesList)(nil),
(*Feature_FloatList)(nil),
(*Feature_Int64List)(nil),
}
}
func _Feature_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.BytesList); err != nil {
return err
}
case *Feature_FloatList:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.FloatList); err != nil {
return err
}
case *Feature_Int64List:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Int64List); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Feature.Kind has unexpected type %T", x)
}
return nil
}
func _Feature_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Feature)
switch tag {
case 1: // kind.bytes_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(BytesList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_BytesList{msg}
return true, err
case 2: // kind.float_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(FloatList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_FloatList{msg}
return true, err
case 3: // kind.int64_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Int64List)
err := b.DecodeMessage(msg)
m.Kind = &Feature_Int64List{msg}
return true, err
default:
return false, nil
}
}
func _Feature_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
s := proto.Size(x.BytesList)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_FloatList:
s := proto.Size(x.FloatList)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_Int64List:
s := proto.Size(x.Int64List)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type Features struct {
// Map from feature name to feature.
Feature map[string]*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Features) Reset() { *m = Features{} }
func (m *Features) String() string { return proto.CompactTextString(m) }
func (*Features) ProtoMessage() {}
func (*Features) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Features) GetFeature() map[string]*Feature {
if m != nil {
return m.Feature
}
return nil
}
// Containers for sequential data.
//
// A FeatureList contains lists of Features. These may hold zero or more
// Feature values.
//
// FeatureLists are organized into categories by name. The FeatureLists message
// contains the mapping from name to FeatureList.
//
type FeatureList struct {
Feature []*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty"`
}
func (m *FeatureList) Reset() { *m = FeatureList{} }
func (m *FeatureList)
|
() string { return proto.CompactTextString(m) }
func (*FeatureList) ProtoMessage() {}
func (*FeatureList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *FeatureList) GetFeature() []*Feature {
if m != nil {
return m.Feature
}
return nil
}
type FeatureLists struct {
// Map from feature name to feature list.
FeatureList map[string]*FeatureList `protobuf:"bytes,1,rep,name=feature_list,json=featureList" json:"feature_list,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *FeatureLists) Reset() { *m = FeatureLists{} }
func (m *FeatureLists) String() string { return proto.CompactTextString(m) }
func (*FeatureLists) ProtoMessage() {}
func (*FeatureLists) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *FeatureLists) GetFeatureList() map[string]*FeatureList {
if m != nil {
return m.FeatureList
}
return nil
}
func init() {
proto.RegisterType((*BytesList)(nil), "tensorflow.BytesList")
proto.RegisterType((*FloatList)(nil), "tensorflow.FloatList")
proto.RegisterType((*Int64List)(nil), "tensorflow.Int64List")
proto.RegisterType((*Feature)(nil), "tensorflow.Feature")
proto.RegisterType((*Features)(nil), "tensorflow.Features")
proto.RegisterType((*FeatureList)(nil), "tensorflow.FeatureList")
proto.RegisterType((*FeatureLists)(nil), "tensorflow.FeatureLists")
}
func init() { proto.RegisterFile("tensorflow/core/example/feature.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 371 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xdf, 0x4a, 0xc3, 0x30,
0x14, 0xc6, 0x4d, 0xab, 0x9b, 0x3d, 0x9d, 0x30, 0xe2, 0xbf, 0xb1, 0xab, 0xad, 0x30, 0xd8, 0xc0,
0x6d, 0x30, 0xa5, 0x88, 0x7a, 0x55, 0x70, 0x28, 0x0c, 0x1c, 0xbd, 0xf1, 0x52, 0x3a, 0x4d, 0xa
|
String
|
identifier_name
|
feature.pb.go
|
) { return fileDescriptor0, []int{3} }
type isFeature_Kind interface {
isFeature_Kind()
}
type Feature_BytesList struct {
BytesList *BytesList `protobuf:"bytes,1,opt,name=bytes_list,json=bytesList,oneof"`
}
type Feature_FloatList struct {
FloatList *FloatList `protobuf:"bytes,2,opt,name=float_list,json=floatList,oneof"`
}
type Feature_Int64List struct {
Int64List *Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,oneof"`
}
func (*Feature_BytesList) isFeature_Kind() {}
func (*Feature_FloatList) isFeature_Kind() {}
func (*Feature_Int64List) isFeature_Kind() {}
func (m *Feature) GetKind() isFeature_Kind {
if m != nil {
return m.Kind
}
return nil
}
func (m *Feature) GetBytesList() *BytesList {
if x, ok := m.GetKind().(*Feature_BytesList); ok {
return x.BytesList
}
return nil
}
func (m *Feature) GetFloatList() *FloatList {
if x, ok := m.GetKind().(*Feature_FloatList); ok {
return x.FloatList
}
return nil
}
func (m *Feature) GetInt64List() *Int64List {
if x, ok := m.GetKind().(*Feature_Int64List); ok {
return x.Int64List
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Feature) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Feature_OneofMarshaler, _Feature_OneofUnmarshaler, _Feature_OneofSizer, []interface{}{
(*Feature_BytesList)(nil),
(*Feature_FloatList)(nil),
(*Feature_Int64List)(nil),
}
}
func _Feature_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.BytesList); err != nil {
return err
}
case *Feature_FloatList:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.FloatList); err != nil {
return err
}
case *Feature_Int64List:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Int64List); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Feature.Kind has unexpected type %T", x)
}
return nil
}
func _Feature_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Feature)
switch tag {
case 1: // kind.bytes_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(BytesList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_BytesList{msg}
return true, err
case 2: // kind.float_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(FloatList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_FloatList{msg}
return true, err
case 3: // kind.int64_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Int64List)
err := b.DecodeMessage(msg)
m.Kind = &Feature_Int64List{msg}
return true, err
default:
return false, nil
}
}
func _Feature_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
s := proto.Size(x.BytesList)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_FloatList:
s := proto.Size(x.FloatList)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_Int64List:
s := proto.Size(x.Int64List)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type Features struct {
// Map from feature name to feature.
Feature map[string]*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Features) Reset() { *m = Features{} }
func (m *Features) String() string { return proto.CompactTextString(m) }
func (*Features) ProtoMessage() {}
func (*Features) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Features) GetFeature() map[string]*Feature {
if m != nil {
return m.Feature
}
return nil
}
// Containers for sequential data.
//
// A FeatureList contains lists of Features. These may hold zero or more
// Feature values.
//
// FeatureLists are organized into categories by name. The FeatureLists message
// contains the mapping from name to FeatureList.
//
type FeatureList struct {
Feature []*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty"`
}
func (m *FeatureList) Reset() { *m = FeatureList{} }
func (m *FeatureList) String() string { return proto.CompactTextString(m) }
func (*FeatureList) ProtoMessage() {}
func (*FeatureList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *FeatureList) GetFeature() []*Feature {
if m != nil {
return m.Feature
}
return nil
}
type FeatureLists struct {
// Map from feature name to feature list.
FeatureList map[string]*FeatureList `protobuf:"bytes,1,rep,name=feature_list,json=featureList" json:"feature_list,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *FeatureLists) Reset() { *m = FeatureLists{} }
func (m *FeatureLists) String() string { return proto.CompactTextString(m) }
func (*FeatureLists) ProtoMessage() {}
func (*FeatureLists) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *FeatureLists) GetFeatureList() map[string]*FeatureList {
if m != nil {
return m.FeatureList
}
return nil
}
func init()
|
func init() { proto.RegisterFile("tensorflow/core/example/feature.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 371 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xdf, 0x4a, 0xc3, 0x30,
0x14, 0xc6, 0x4d, 0xab, 0x9b, 0x3d, 0x9d, 0x30, 0xe2, 0xbf, 0xb1, 0xab, 0xad, 0x30, 0xd8, 0xc0,
0x6d, 0x30, 0xa5, 0x88, 0x7a, 0x55, 0x70, 0x28, 0x0c, 0x1c, 0xbd, 0xf1, 0x52, 0x3a, 0x4d, 0xa
|
{
proto.RegisterType((*BytesList)(nil), "tensorflow.BytesList")
proto.RegisterType((*FloatList)(nil), "tensorflow.FloatList")
proto.RegisterType((*Int64List)(nil), "tensorflow.Int64List")
proto.RegisterType((*Feature)(nil), "tensorflow.Feature")
proto.RegisterType((*Features)(nil), "tensorflow.Features")
proto.RegisterType((*FeatureList)(nil), "tensorflow.FeatureList")
proto.RegisterType((*FeatureLists)(nil), "tensorflow.FeatureLists")
}
|
identifier_body
|
feature.pb.go
|
) { return fileDescriptor0, []int{3} }
type isFeature_Kind interface {
isFeature_Kind()
}
type Feature_BytesList struct {
BytesList *BytesList `protobuf:"bytes,1,opt,name=bytes_list,json=bytesList,oneof"`
}
type Feature_FloatList struct {
FloatList *FloatList `protobuf:"bytes,2,opt,name=float_list,json=floatList,oneof"`
}
type Feature_Int64List struct {
Int64List *Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,oneof"`
}
func (*Feature_BytesList) isFeature_Kind() {}
func (*Feature_FloatList) isFeature_Kind() {}
func (*Feature_Int64List) isFeature_Kind() {}
func (m *Feature) GetKind() isFeature_Kind {
if m != nil {
return m.Kind
}
return nil
}
func (m *Feature) GetBytesList() *BytesList {
if x, ok := m.GetKind().(*Feature_BytesList); ok {
return x.BytesList
}
return nil
}
func (m *Feature) GetFloatList() *FloatList {
if x, ok := m.GetKind().(*Feature_FloatList); ok {
return x.FloatList
}
return nil
}
func (m *Feature) GetInt64List() *Int64List {
if x, ok := m.GetKind().(*Feature_Int64List); ok {
return x.Int64List
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Feature) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Feature_OneofMarshaler, _Feature_OneofUnmarshaler, _Feature_OneofSizer, []interface{}{
(*Feature_BytesList)(nil),
(*Feature_FloatList)(nil),
(*Feature_Int64List)(nil),
}
}
func _Feature_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.BytesList); err != nil {
return err
}
case *Feature_FloatList:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.FloatList); err != nil {
return err
}
case *Feature_Int64List:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Int64List); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Feature.Kind has unexpected type %T", x)
}
return nil
}
func _Feature_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Feature)
switch tag {
case 1: // kind.bytes_list
if wire != proto.WireBytes
|
msg := new(BytesList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_BytesList{msg}
return true, err
case 2: // kind.float_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(FloatList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_FloatList{msg}
return true, err
case 3: // kind.int64_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Int64List)
err := b.DecodeMessage(msg)
m.Kind = &Feature_Int64List{msg}
return true, err
default:
return false, nil
}
}
func _Feature_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
s := proto.Size(x.BytesList)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_FloatList:
s := proto.Size(x.FloatList)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_Int64List:
s := proto.Size(x.Int64List)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type Features struct {
// Map from feature name to feature.
Feature map[string]*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Features) Reset() { *m = Features{} }
func (m *Features) String() string { return proto.CompactTextString(m) }
func (*Features) ProtoMessage() {}
func (*Features) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Features) GetFeature() map[string]*Feature {
if m != nil {
return m.Feature
}
return nil
}
// Containers for sequential data.
//
// A FeatureList contains lists of Features. These may hold zero or more
// Feature values.
//
// FeatureLists are organized into categories by name. The FeatureLists message
// contains the mapping from name to FeatureList.
//
type FeatureList struct {
Feature []*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty"`
}
func (m *FeatureList) Reset() { *m = FeatureList{} }
func (m *FeatureList) String() string { return proto.CompactTextString(m) }
func (*FeatureList) ProtoMessage() {}
func (*FeatureList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *FeatureList) GetFeature() []*Feature {
if m != nil {
return m.Feature
}
return nil
}
type FeatureLists struct {
// Map from feature name to feature list.
FeatureList map[string]*FeatureList `protobuf:"bytes,1,rep,name=feature_list,json=featureList" json:"feature_list,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *FeatureLists) Reset() { *m = FeatureLists{} }
func (m *FeatureLists) String() string { return proto.CompactTextString(m) }
func (*FeatureLists) ProtoMessage() {}
func (*FeatureLists) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *FeatureLists) GetFeatureList() map[string]*FeatureList {
if m != nil {
return m.FeatureList
}
return nil
}
func init() {
proto.RegisterType((*BytesList)(nil), "tensorflow.BytesList")
proto.RegisterType((*FloatList)(nil), "tensorflow.FloatList")
proto.RegisterType((*Int64List)(nil), "tensorflow.Int64List")
proto.RegisterType((*Feature)(nil), "tensorflow.Feature")
proto.RegisterType((*Features)(nil), "tensorflow.Features")
proto.RegisterType((*FeatureList)(nil), "tensorflow.FeatureList")
proto.RegisterType((*FeatureLists)(nil), "tensorflow.FeatureLists")
}
func init() { proto.RegisterFile("tensorflow/core/example/feature.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 371 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xdf, 0x4a, 0xc3, 0x30,
0x14, 0xc6, 0x4d, 0xab, 0x9b, 0x3d, 0x9d, 0x30, 0xe2, 0xbf, 0xb1, 0xab, 0xad, 0x30, 0xd8, 0xc0,
0x6d, 0x30, 0xa5, 0x88, 0x7a, 0x55, 0x70, 0x28, 0x0c, 0x1c, 0xbd, 0xf1, 0x52, 0x3a, 0x4d, 0xa
|
{
return true, proto.ErrInternalBadWireType
}
|
conditional_block
|
feature.pb.go
|
type Int64List struct {
Value []int64 `protobuf:"varint,1,rep,packed,name=value" json:"value,omitempty"`
}
func (m *Int64List) Reset() { *m = Int64List{} }
func (m *Int64List) String() string { return proto.CompactTextString(m) }
func (*Int64List) ProtoMessage() {}
func (*Int64List) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Int64List) GetValue() []int64 {
if m != nil {
return m.Value
}
return nil
}
// Containers for non-sequential data.
type Feature struct {
// Each feature can be exactly one kind.
//
// Types that are valid to be assigned to Kind:
// *Feature_BytesList
// *Feature_FloatList
// *Feature_Int64List
Kind isFeature_Kind `protobuf_oneof:"kind"`
}
func (m *Feature) Reset() { *m = Feature{} }
func (m *Feature) String() string { return proto.CompactTextString(m) }
func (*Feature) ProtoMessage() {}
func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type isFeature_Kind interface {
isFeature_Kind()
}
type Feature_BytesList struct {
BytesList *BytesList `protobuf:"bytes,1,opt,name=bytes_list,json=bytesList,oneof"`
}
type Feature_FloatList struct {
FloatList *FloatList `protobuf:"bytes,2,opt,name=float_list,json=floatList,oneof"`
}
type Feature_Int64List struct {
Int64List *Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,oneof"`
}
func (*Feature_BytesList) isFeature_Kind() {}
func (*Feature_FloatList) isFeature_Kind() {}
func (*Feature_Int64List) isFeature_Kind() {}
func (m *Feature) GetKind() isFeature_Kind {
if m != nil {
return m.Kind
}
return nil
}
func (m *Feature) GetBytesList() *BytesList {
if x, ok := m.GetKind().(*Feature_BytesList); ok {
return x.BytesList
}
return nil
}
func (m *Feature) GetFloatList() *FloatList {
if x, ok := m.GetKind().(*Feature_FloatList); ok {
return x.FloatList
}
return nil
}
func (m *Feature) GetInt64List() *Int64List {
if x, ok := m.GetKind().(*Feature_Int64List); ok {
return x.Int64List
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Feature) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Feature_OneofMarshaler, _Feature_OneofUnmarshaler, _Feature_OneofSizer, []interface{}{
(*Feature_BytesList)(nil),
(*Feature_FloatList)(nil),
(*Feature_Int64List)(nil),
}
}
func _Feature_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.BytesList); err != nil {
return err
}
case *Feature_FloatList:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.FloatList); err != nil {
return err
}
case *Feature_Int64List:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Int64List); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Feature.Kind has unexpected type %T", x)
}
return nil
}
func _Feature_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Feature)
switch tag {
case 1: // kind.bytes_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(BytesList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_BytesList{msg}
return true, err
case 2: // kind.float_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(FloatList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_FloatList{msg}
return true, err
case 3: // kind.int64_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Int64List)
err := b.DecodeMessage(msg)
m.Kind = &Feature_Int64List{msg}
return true, err
default:
return false, nil
}
}
func _Feature_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
s := proto.Size(x.BytesList)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_FloatList:
s := proto.Size(x.FloatList)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_Int64List:
s := proto.Size(x.Int64List)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type Features struct {
// Map from feature name to feature.
Feature map[string]*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Features) Reset() { *m = Features{} }
func (m *Features) String() string { return proto.CompactTextString(m) }
func (*Features) ProtoMessage() {}
func (*Features) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Features) GetFeature() map[string]*Feature {
if m != nil {
return m.Feature
}
return nil
}
// Containers for sequential data.
//
// A FeatureList contains lists of Features. These may hold zero or more
// Feature values.
//
// FeatureLists are organized into categories by name. The FeatureLists message
// contains the mapping from name to FeatureList.
//
type FeatureList struct {
Feature []*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty"`
}
func (m *FeatureList) Reset() { *m = FeatureList{} }
func (m *FeatureList) String() string { return proto.CompactTextString(m) }
func (*FeatureList) ProtoMessage() {}
func (*FeatureList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *FeatureList) GetFeature() []*Feature {
if m != nil {
return m.Feature
}
return nil
}
type FeatureLists struct {
// Map from feature name to feature list.
FeatureList map[string]*FeatureList `protobuf:"bytes,1,rep,name=feature_list,json=featureList" json:"feature_list,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *FeatureLists) Reset() { *m = FeatureLists{} }
func (m *FeatureLists) String() string { return proto.CompactTextString(m) }
func (*FeatureLists) ProtoMessage() {}
func (*FeatureLists) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *FeatureLists) GetFeatureList() map[string]*FeatureList {
if m != nil {
return m.FeatureList
}
return nil
}
func init() {
proto.RegisterType((*BytesList)(nil), "tensorflow.BytesList")
proto.RegisterType((*FloatList)(nil), "tensorflow.FloatList")
proto.RegisterType((*Int64List)(nil), "tensorflow.Int64List")
proto.RegisterType((*Feature)(nil), "tensorflow.Feature")
proto.RegisterType((*Features)(nil), "tensorflow.Features")
proto.RegisterType((*FeatureList)(nil), "tensorflow.FeatureList")
proto.RegisterType((*FeatureLists)(nil), "tensorflow.FeatureLists")
}
func init() { proto.RegisterFile("tensorflow/core/example/feature.proto", fileDescriptor0) }
var fileDescriptor
|
return nil
}
|
random_line_split
|
|
GridSave.py
|
.
dpix = (referencevalue - crval2)/cdelt2
crpix2 = crpix2 + dpix
# change x axis
header['CRVAL2'] = referencevalue
header['CRPIX2'] = crpix2
header['EQUINOX'] = 2.000000000000E+03 # Equinox of equatorial coordinates
header['BMAJ'] = 18.1 # Beam major axis in degrees: 80cm horn at 21.1cm
header['BMIN'] = 18.1 # Beam minor axis in degrees
header['BPA'] = 0.000000000000E+00 # Beam position angle in degrees
header['RESTFRQ'] = 1.42040575177E+09 # Line rest frequency, Hz
header['RESTWAV'] = 0.211061140551 # Line wavelength (m)
header['DATE-OBS'] = dateobs
header['DATE'] = mydate
header['OBSERVER'] = 'Science Aficionado'
header['OBJECT'] = 'Milky Way'
header['TELESCOP'] = 'Aficionado Horn'
header['HISTORY'] = "GridSave.py -- Glen Langston -- 20 May 13"
header['HISTORY'] = "Observations in March + April 2020"
# while len(header) < (36 * 4 - 1):
# header.append() # Adds a blank card to the end
# header.delval("EXTEND")
header.update()
# hdu = fits.PrimaryHDU(header=header, data=imageData)
hdu = fits.PrimaryHDU(header=header, data=imageCopy)
# As file at filePath is deleted now, so we should check if file exists or not not before deleting them
outname = ("Aficionado_T%d" % (cpuIndex)) + "-" + maptype + projection + ".fit"
if os.path.exists(outname):
os.remove(outname)
hdu.writeto(outname)
# create a second file with new projection
fixImageCoordinates( outname, projection)
return
def gridratio( grid1, grid2):
"""
gridratio computes the ratio of two grids when the values in both grids are non-zero
This function is used to compute gain ratios
The average and rms of the ratios are provided along as the grid of ratios
"""
nx1 = grid1.img_width
ny1 = grid1.img_height
nx2 = grid2.img_width
ny2 = grid2.img_height
ratio = 0.
rms = 0.
if nx1 != nx2:
print("GridRatio: Nx1 != Nx2 (%d, %d)" % (nx1, nx2))
return ratio, rms
if ny1 != ny2:
print("GridRatio: Ny1 != Ny2 (%d, %d)" % (ny1, ny2))
return ratio, rms
count = 0
nonzero = np.zeros(nx1*ny1)
# copy to ratio array
gridratio = copy.deepcopy( grid1)
for iii in range(nx1):
for jjj in range(ny1):
# put in zero as default
gridratio.image[jjj,iii] = 0.
if grid1.image[jjj,iii] > EPSILON:
if grid2.image[jjj,iii] > EPSILON:
nonzero[count] = grid1.image[jjj,iii]/grid2.image[jjj,iii]
count = count + 1
if count < 2:
print ("No overlap in non-zero samples")
return ratio, rms, gridratio
nonzero = nonzero[0:count]
asum = np.sum( nonzero)
ratio = asum/float(count)
rms = np.std( nonzero)
print ("Grid Ratio: %.4f +/- %.4f for %d samples" % (ratio, rms/np.sqrt(count), count))
# return the ratio grid
return ratio, rms, gridratio
def main():
"""
Main executable for gridding astronomical data
"""
dpi = 1
dpi = 2
width = int(360)
height = int(130)
mywidth = int(width*dpi)
myheight = int(height*dpi)
FWHM = 7.5 # degrees
FWHM = 10.0 # degrees
FWHM = 5.0 # degrees
FWHM = 3.0 # degrees
FWHM = 1.0 # degrees
weight = 1.
nargs = len(sys.argv)
if nargs < 2:
print('GR: GRid Observations of integrated intensity produced by the T Command')
print('GR produces fits images for each of the horns used for the observations.')
print('For observations at the same coordinates, the ratios of intensities are also produced.')
print('The FITS format files require header information, which is copied from the')
print('Cold Load File provided by the user')
print('GR RA|GAL <cold file name> <savefile1> [<savefile2> ... <savefileN>]')
print("")
print('Glen Langston, National Science Foundation -- 20 May 12')
exit()
gridtype = sys.argv[1]
gridtype = gridtype.upper()
print('Grid Type: ', gridtype)
# enable having ra going from 24 to 0 hours == 360 to 0 degrees
xsign = 1.
xoffset = 0.
if gridtype == 'RA':
xmin = 0.
xmax = 360.
ymin = -40.
ymax = 90.
maptype = 'RA'
elif gridtype == '-RA':
xmin = 0.
xmax = 360.
ymin = -40.
ymax = 90.
xsign = -1.
xoffset = 360. # when x = 360. should be at zero.
maptype = 'RA'
elif gridtype == '-EL':
xmin = 0.
xmax = 360.
ymin = 0.
ymax = 90.
xsign = -1.
xoffset = 360. # when x = 360. should be at zero.
maptype = 'AZEL'
elif gridtype == 'RA0':
xmin = 0.
xmax = 360.
ymin = -41.
ymax = 89.
xsign = -1.
xoffset = 180. # when x = 360. should be at zero.
gridtype = 'RA'
elif gridtype == 'GAL':
xmin = -180.
xmax = 180.
ymin = -90.
ymax = 90.
maptype = 'GAL'
if gridtype != 'RA' and gridtype != 'GAL' and gridtype != '-RA' and gridtype != "RA0":
print('Error parsing grid type: ', gridtype)
print('1st argument should be either RA, -RA or GAL')
exit()
rs = radioastronomy.Spectrum()
if doRatio:
#create the grid with map parameters
grid1 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid2 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid3 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid4 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
# put each telescope in a different grid
grids = [grid1, grid2, grid3, grid4]
gridall = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
projection = "-AIT"
# coldfile
coldfile = sys.argv[2]
# get telescope geographic location etc
|
print("Reading Observing parameters from: %s" % (coldfile))
|
random_line_split
|
|
GridSave.py
|
2 dimensinoal
array of image data and writes a FITS image
This program produces two images. It expects an grid that is in cartisian format.
The second format described by the input: projection
"""
# print("Image: ", imageData)
imageData = grid.image
size = imageData.shape
imageCopy = copy.deepcopy( imageData)
nx = size[1]
ny = size[0]
# now flip the Y axis of the image to match the FITS Convention
iy = ny - 1
for iii in range(ny):
imageCopy[iii][:] = imageData[iy][:]
iy = iy - 1
pixcrd = np.array([[0, 0], [24, 38]], dtype=np.float64)
# Create a new WCS object. The number of axes must be set
# from the start
w = wcs.WCS(naxis=2)
gridtype = grid.gridtype.upper()
print("Grid Type: %s %d" % (gridtype, gridtype.find('RA')))
# gridtype = "RA"
if gridtype.find('RA') > -1:
maptype = 'RA'
XTYPE = 'RA--'
YTYPE = 'DEC-'
else:
maptype = 'GAL'
XTYPE = 'GLON'
YTYPE = 'GLAT'
xstart = 360.
ystart = 90.
# select the projection here:
# projection = "-CYP"
# projection = "-CAR"
crval1 = grid.crval1
crval2 = grid.crval2
crpix1 = grid.crpix1
crpix2 = grid.crpix2
cdelt1 = grid.cdelt1
cdelt2 = grid.cdelt2
print('--------- Grid Type: %s (%f,%f %f,%f ' % (gridtype, crval1, crval2, cdelt1, cdelt2))
hdu = fits.PrimaryHDU()
header = hdu.header
dateobs = "%s" % (rs.utc)
dateobs = dateobs.replace(" ","T")
mydate = datetime.datetime.now()
mydate = "%s" % (mydate)
mydate = mydate[2:10]
mydate.replace('-','/')
header['NAXIS1'] = int(nx)
header['NAXIS2'] = int(ny)
header['BUNIT'] = 'K-km/s/BEAM'
maptype = "RA"
if maptype[0:2] == "RA":
maptype = "RA"
header['CTYPE1'] = 'RA---CAR'
else:
maptype = "GAL"
header['CTYPE1'] = 'GLON-CAR'
# create a cartesian x centered iamge
header['CRPIX1'] = nx/2.
header['CRVAL1'] = 180.
grid.crval1 = header['CRVAL1']
header['CDELT1'] = cdelt1
header['CUNIT1'] = 'deg'
header['CRVAL2'] = (grid.ymax+grid.ymin)/2.
grid.crval2 = header['CRVAL2']
header['CRPIX2'] = ny/2.
header['CDELT2'] = cdelt2
header['CUNIT2'] = 'deg'
grid.gridtype = maptype
if maptype[0:2] == "RA":
print("RA: writeFits: %s" % (maptype))
header['CTYPE2'] = 'DEC--CAR'
else:
print("GAL: writeFits: %s" % (maptype))
header['CTYPE2'] = 'GLAT-CAR'
header['WCAXES'] = 2
header['RADESYS'] ='FK5'
# temporarily replace ref coordinate iwth zero
crval2 = header['CRVAL2']
crpix2 = header['CRPIX2']
# redefine the reference for the best cartisian format
referencevalue = 0.
dpix = (referencevalue - crval2)/cdelt2
crpix2 = crpix2 + dpix
# change x axis
header['CRVAL2'] = referencevalue
header['CRPIX2'] = crpix2
header['EQUINOX'] = 2.000000000000E+03 # Equinox of equatorial coordinates
header['BMAJ'] = 18.1 # Beam major axis in degrees: 80cm horn at 21.1cm
header['BMIN'] = 18.1 # Beam minor axis in degrees
header['BPA'] = 0.000000000000E+00 # Beam position angle in degrees
header['RESTFRQ'] = 1.42040575177E+09 # Line rest frequency, Hz
header['RESTWAV'] = 0.211061140551 # Line wavelength (m)
header['DATE-OBS'] = dateobs
header['DATE'] = mydate
header['OBSERVER'] = 'Science Aficionado'
header['OBJECT'] = 'Milky Way'
header['TELESCOP'] = 'Aficionado Horn'
header['HISTORY'] = "GridSave.py -- Glen Langston -- 20 May 13"
header['HISTORY'] = "Observations in March + April 2020"
# while len(header) < (36 * 4 - 1):
# header.append() # Adds a blank card to the end
# header.delval("EXTEND")
header.update()
# hdu = fits.PrimaryHDU(header=header, data=imageData)
hdu = fits.PrimaryHDU(header=header, data=imageCopy)
# As file at filePath is deleted now, so we should check if file exists or not not before deleting them
outname = ("Aficionado_T%d" % (cpuIndex)) + "-" + maptype + projection + ".fit"
if os.path.exists(outname):
os.remove(outname)
hdu.writeto(outname)
# create a second file with new projection
fixImageCoordinates( outname, projection)
return
def
|
( grid1, grid2):
"""
gridratio computes the ratio of two grids when the values in both grids are non-zero
This function is used to compute gain ratios
The average and rms of the ratios are provided along as the grid of ratios
"""
nx1 = grid1.img_width
ny1 = grid1.img_height
nx2 = grid2.img_width
ny2 = grid2.img_height
ratio = 0.
rms = 0.
if nx1 != nx2:
print("GridRatio: Nx1 != Nx2 (%d, %d)" % (nx1, nx2))
return ratio, rms
if ny1 != ny2:
print("GridRatio: Ny1 != Ny2 (%d, %d)" % (ny1, ny2))
return ratio, rms
count = 0
nonzero = np.zeros(nx1*ny1)
# copy to ratio array
gridratio = copy.deepcopy( grid1)
for iii in range(nx1):
for jjj in range(ny1):
# put in zero as default
gridratio.image[jjj,iii] = 0.
if grid1.image[jjj,iii] > EPSILON:
if grid2.image[jjj,iii] > EPSILON:
nonzero[count] = grid1.image[jjj,iii]/grid2.image[jjj,iii]
count = count + 1
if count < 2:
print ("No overlap in non-zero samples")
return ratio, rms, gridratio
nonzero = nonzero[0:count]
asum = np.sum( nonzero)
ratio = asum/float(count)
rms = np.std( nonzero)
print ("Grid Ratio: %.4f +/- %.4f for %d samples" % (ratio, rms/np.sqrt(count), count))
# return the ratio grid
return ratio, rms, gridratio
def main():
"""
Main executable for gridding astronomical data
"""
dpi = 1
dpi = 2
width = int(360)
height = int(130)
mywidth = int(width*dpi)
myheight = int(height*dpi)
FWHM = 7.5 # degrees
FWHM = 10.0 # degrees
FWHM = 5.0 # degrees
FWHM = 3.0 # degrees
FWHM = 1.0 # degrees
weight = 1.
nargs = len(sys.argv)
if nargs < 2:
print('GR: GRid Observations of integrated intensity produced by the T Command')
print('GR produces fits images for each of the
|
gridratio
|
identifier_name
|
GridSave.py
|
array of image data and writes a FITS image
This program produces two images. It expects an grid that is in cartisian format.
The second format described by the input: projection
"""
# print("Image: ", imageData)
imageData = grid.image
size = imageData.shape
imageCopy = copy.deepcopy( imageData)
nx = size[1]
ny = size[0]
# now flip the Y axis of the image to match the FITS Convention
iy = ny - 1
for iii in range(ny):
imageCopy[iii][:] = imageData[iy][:]
iy = iy - 1
pixcrd = np.array([[0, 0], [24, 38]], dtype=np.float64)
# Create a new WCS object. The number of axes must be set
# from the start
w = wcs.WCS(naxis=2)
gridtype = grid.gridtype.upper()
print("Grid Type: %s %d" % (gridtype, gridtype.find('RA')))
# gridtype = "RA"
if gridtype.find('RA') > -1:
maptype = 'RA'
XTYPE = 'RA--'
YTYPE = 'DEC-'
else:
maptype = 'GAL'
XTYPE = 'GLON'
YTYPE = 'GLAT'
xstart = 360.
ystart = 90.
# select the projection here:
# projection = "-CYP"
# projection = "-CAR"
crval1 = grid.crval1
crval2 = grid.crval2
crpix1 = grid.crpix1
crpix2 = grid.crpix2
cdelt1 = grid.cdelt1
cdelt2 = grid.cdelt2
print('--------- Grid Type: %s (%f,%f %f,%f ' % (gridtype, crval1, crval2, cdelt1, cdelt2))
hdu = fits.PrimaryHDU()
header = hdu.header
dateobs = "%s" % (rs.utc)
dateobs = dateobs.replace(" ","T")
mydate = datetime.datetime.now()
mydate = "%s" % (mydate)
mydate = mydate[2:10]
mydate.replace('-','/')
header['NAXIS1'] = int(nx)
header['NAXIS2'] = int(ny)
header['BUNIT'] = 'K-km/s/BEAM'
maptype = "RA"
if maptype[0:2] == "RA":
maptype = "RA"
header['CTYPE1'] = 'RA---CAR'
else:
maptype = "GAL"
header['CTYPE1'] = 'GLON-CAR'
# create a cartesian x centered iamge
header['CRPIX1'] = nx/2.
header['CRVAL1'] = 180.
grid.crval1 = header['CRVAL1']
header['CDELT1'] = cdelt1
header['CUNIT1'] = 'deg'
header['CRVAL2'] = (grid.ymax+grid.ymin)/2.
grid.crval2 = header['CRVAL2']
header['CRPIX2'] = ny/2.
header['CDELT2'] = cdelt2
header['CUNIT2'] = 'deg'
grid.gridtype = maptype
if maptype[0:2] == "RA":
print("RA: writeFits: %s" % (maptype))
header['CTYPE2'] = 'DEC--CAR'
else:
print("GAL: writeFits: %s" % (maptype))
header['CTYPE2'] = 'GLAT-CAR'
header['WCAXES'] = 2
header['RADESYS'] ='FK5'
# temporarily replace ref coordinate iwth zero
crval2 = header['CRVAL2']
crpix2 = header['CRPIX2']
# redefine the reference for the best cartisian format
referencevalue = 0.
dpix = (referencevalue - crval2)/cdelt2
crpix2 = crpix2 + dpix
# change x axis
header['CRVAL2'] = referencevalue
header['CRPIX2'] = crpix2
header['EQUINOX'] = 2.000000000000E+03 # Equinox of equatorial coordinates
header['BMAJ'] = 18.1 # Beam major axis in degrees: 80cm horn at 21.1cm
header['BMIN'] = 18.1 # Beam minor axis in degrees
header['BPA'] = 0.000000000000E+00 # Beam position angle in degrees
header['RESTFRQ'] = 1.42040575177E+09 # Line rest frequency, Hz
header['RESTWAV'] = 0.211061140551 # Line wavelength (m)
header['DATE-OBS'] = dateobs
header['DATE'] = mydate
header['OBSERVER'] = 'Science Aficionado'
header['OBJECT'] = 'Milky Way'
header['TELESCOP'] = 'Aficionado Horn'
header['HISTORY'] = "GridSave.py -- Glen Langston -- 20 May 13"
header['HISTORY'] = "Observations in March + April 2020"
# while len(header) < (36 * 4 - 1):
# header.append() # Adds a blank card to the end
# header.delval("EXTEND")
header.update()
# hdu = fits.PrimaryHDU(header=header, data=imageData)
hdu = fits.PrimaryHDU(header=header, data=imageCopy)
# As file at filePath is deleted now, so we should check if file exists or not not before deleting them
outname = ("Aficionado_T%d" % (cpuIndex)) + "-" + maptype + projection + ".fit"
if os.path.exists(outname):
os.remove(outname)
hdu.writeto(outname)
# create a second file with new projection
fixImageCoordinates( outname, projection)
return
def gridratio( grid1, grid2):
"""
gridratio computes the ratio of two grids when the values in both grids are non-zero
This function is used to compute gain ratios
The average and rms of the ratios are provided along as the grid of ratios
"""
nx1 = grid1.img_width
ny1 = grid1.img_height
nx2 = grid2.img_width
ny2 = grid2.img_height
ratio = 0.
rms = 0.
if nx1 != nx2:
print("GridRatio: Nx1 != Nx2 (%d, %d)" % (nx1, nx2))
return ratio, rms
if ny1 != ny2:
print("GridRatio: Ny1 != Ny2 (%d, %d)" % (ny1, ny2))
return ratio, rms
count = 0
nonzero = np.zeros(nx1*ny1)
# copy to ratio array
gridratio = copy.deepcopy( grid1)
for iii in range(nx1):
for jjj in range(ny1):
# put in zero as default
gridratio.image[jjj,iii] = 0.
if grid1.image[jjj,iii] > EPSILON:
if grid2.image[jjj,iii] > EPSILON:
nonzero[count] = grid1.image[jjj,iii]/grid2.image[jjj,iii]
count = count + 1
if count < 2:
print ("No overlap in non-zero samples")
return ratio, rms, gridratio
nonzero = nonzero[0:count]
asum = np.sum( nonzero)
ratio = asum/float(count)
rms = np.std( nonzero)
print ("Grid Ratio: %.4f +/- %.4f for %d samples" % (ratio, rms/np.sqrt(count), count))
# return the ratio grid
return ratio, rms, gridratio
def main():
|
"""
Main executable for gridding astronomical data
"""
dpi = 1
dpi = 2
width = int(360)
height = int(130)
mywidth = int(width*dpi)
myheight = int(height*dpi)
FWHM = 7.5 # degrees
FWHM = 10.0 # degrees
FWHM = 5.0 # degrees
FWHM = 3.0 # degrees
FWHM = 1.0 # degrees
weight = 1.
nargs = len(sys.argv)
if nargs < 2:
print('GR: GRid Observations of integrated intensity produced by the T Command')
print('GR produces fits images for each of the horns used for the observations.')
|
identifier_body
|
|
GridSave.py
|
# now for output image check all pixel values
for jjj in range (ny):
for iii in range (nx):
# if this image pixal has no value
pixout[0] = (iii,jjj)
oworld = wout.wcs_pix2world(pixout, 0)
xy = oworld[0]
if np.isnan(xy[0]):
continue
# print("pixout: %d,%d : world %.f,%.2f" % (iii,jjj,xy[0],xy[1]))
pixin[0] = oworld[0]
ipixels = win.wcs_world2pix(pixin, 0)
# get input pixels for coordinate
ixy = ipixels[0]
# if outside of current image skip this pixel
if np.isnan( ixy[0]):
continue
ix = int(ixy[0])
iy = int(ixy[1])
ix = max( min( nx-1, ix), 0)
iy = max( min( ny-1, iy), 0)
ix = int(ix)
iy = int(iy)
# print("pixin : %d,%d : world %.f,%.2f" % (ix,iy,xy[0],xy[1]))
# print("OX,OY:%d,%d <= IX,IY:%d,%d" %( ox,oy, ix,iy))
imageCopy[jjj][iii] = imageData[iy][ix]
print("Preparing to write new coordiante transform: %s" % (outname))
if os.path.exists(outname):
os.remove(outname)
newhdu = fits.PrimaryHDU(header=header, data=imageCopy)
newhdu.writeto(outname)
print("Wrote new")
return
def writeFitsImage( rs, cpuIndex, grid, projection):
"""
writeFitsImage() takes a spectrum for describing the observation and a 2 dimensinoal
array of image data and writes a FITS image
This program produces two images. It expects an grid that is in cartisian format.
The second format described by the input: projection
"""
# print("Image: ", imageData)
imageData = grid.image
size = imageData.shape
imageCopy = copy.deepcopy( imageData)
nx = size[1]
ny = size[0]
# now flip the Y axis of the image to match the FITS Convention
iy = ny - 1
for iii in range(ny):
imageCopy[iii][:] = imageData[iy][:]
iy = iy - 1
pixcrd = np.array([[0, 0], [24, 38]], dtype=np.float64)
# Create a new WCS object. The number of axes must be set
# from the start
w = wcs.WCS(naxis=2)
gridtype = grid.gridtype.upper()
print("Grid Type: %s %d" % (gridtype, gridtype.find('RA')))
# gridtype = "RA"
if gridtype.find('RA') > -1:
maptype = 'RA'
XTYPE = 'RA--'
YTYPE = 'DEC-'
else:
maptype = 'GAL'
XTYPE = 'GLON'
YTYPE = 'GLAT'
xstart = 360.
ystart = 90.
# select the projection here:
# projection = "-CYP"
# projection = "-CAR"
crval1 = grid.crval1
crval2 = grid.crval2
crpix1 = grid.crpix1
crpix2 = grid.crpix2
cdelt1 = grid.cdelt1
cdelt2 = grid.cdelt2
print('--------- Grid Type: %s (%f,%f %f,%f ' % (gridtype, crval1, crval2, cdelt1, cdelt2))
hdu = fits.PrimaryHDU()
header = hdu.header
dateobs = "%s" % (rs.utc)
dateobs = dateobs.replace(" ","T")
mydate = datetime.datetime.now()
mydate = "%s" % (mydate)
mydate = mydate[2:10]
mydate.replace('-','/')
header['NAXIS1'] = int(nx)
header['NAXIS2'] = int(ny)
header['BUNIT'] = 'K-km/s/BEAM'
maptype = "RA"
if maptype[0:2] == "RA":
maptype = "RA"
header['CTYPE1'] = 'RA---CAR'
else:
maptype = "GAL"
header['CTYPE1'] = 'GLON-CAR'
# create a cartesian x centered iamge
header['CRPIX1'] = nx/2.
header['CRVAL1'] = 180.
grid.crval1 = header['CRVAL1']
header['CDELT1'] = cdelt1
header['CUNIT1'] = 'deg'
header['CRVAL2'] = (grid.ymax+grid.ymin)/2.
grid.crval2 = header['CRVAL2']
header['CRPIX2'] = ny/2.
header['CDELT2'] = cdelt2
header['CUNIT2'] = 'deg'
grid.gridtype = maptype
if maptype[0:2] == "RA":
print("RA: writeFits: %s" % (maptype))
header['CTYPE2'] = 'DEC--CAR'
else:
print("GAL: writeFits: %s" % (maptype))
header['CTYPE2'] = 'GLAT-CAR'
header['WCAXES'] = 2
header['RADESYS'] ='FK5'
# temporarily replace ref coordinate iwth zero
crval2 = header['CRVAL2']
crpix2 = header['CRPIX2']
# redefine the reference for the best cartisian format
referencevalue = 0.
dpix = (referencevalue - crval2)/cdelt2
crpix2 = crpix2 + dpix
# change x axis
header['CRVAL2'] = referencevalue
header['CRPIX2'] = crpix2
header['EQUINOX'] = 2.000000000000E+03 # Equinox of equatorial coordinates
header['BMAJ'] = 18.1 # Beam major axis in degrees: 80cm horn at 21.1cm
header['BMIN'] = 18.1 # Beam minor axis in degrees
header['BPA'] = 0.000000000000E+00 # Beam position angle in degrees
header['RESTFRQ'] = 1.42040575177E+09 # Line rest frequency, Hz
header['RESTWAV'] = 0.211061140551 # Line wavelength (m)
header['DATE-OBS'] = dateobs
header['DATE'] = mydate
header['OBSERVER'] = 'Science Aficionado'
header['OBJECT'] = 'Milky Way'
header['TELESCOP'] = 'Aficionado Horn'
header['HISTORY'] = "GridSave.py -- Glen Langston -- 20 May 13"
header['HISTORY'] = "Observations in March + April 2020"
# while len(header) < (36 * 4 - 1):
# header.append() # Adds a blank card to the end
# header.delval("EXTEND")
header.update()
# hdu = fits.PrimaryHDU(header=header, data=imageData)
hdu = fits.PrimaryHDU(header=header, data=imageCopy)
# As file at filePath is deleted now, so we should check if file exists or not not before deleting them
outname = ("Aficionado_T%d" % (cpuIndex)) + "-" + maptype + projection + ".fit"
if os.path.exists(outname):
os.remove(outname)
hdu.writeto(outname)
# create a second file with new projection
fixImageCoordinates( outname, projection)
return
def gridratio( grid1, grid2):
"""
gridratio computes the ratio of two grids when the values in both grids are non-zero
This function is used to compute gain ratios
The average and rms of the ratios are provided along as the grid of ratios
"""
nx1 = grid1.img_width
ny1 = grid1.img_height
nx2 = grid2.img_width
ny2 = grid2.img_height
ratio = 0.
rms = 0.
if nx1 != nx2:
print("GridRatio: Nx1 != Nx2 (%d, %d)" % (nx1, nx2))
return ratio, rms
if ny1 != ny
|
for iii in range (nx):
imageCopy[jjj][iii] = nan
|
conditional_block
|
|
prec_climber.rs
|
(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = prec_climber![
/// L plus | minus,
/// L times | divide,
/// R power,
/// ];
/// ```
#[cfg(feature = "const_prec_climber")]
#[macro_export]
macro_rules! prec_climber {
(
$( $assoc:ident $rule:ident $( | $rules:ident )* ),+ $(,)?
) => {{
prec_climber!(
@precedences { 1u32 }
$( [ $rule $( $rules )* ] )*
);
$crate::prec_climber::PrecClimber::new_const(
prec_climber!(
@array
$( $assoc $rule $(, $assoc $rules )* ),*
)
)
}};
( @assoc L ) => { $crate::prec_climber::Assoc::Left };
( @assoc R ) => { $crate::prec_climber::Assoc::Right };
(
@array
$(
$assoc:ident $rule:ident
),*
) => {
&[
$(
(
Rule::$rule,
$rule,
prec_climber!( @assoc $assoc ),
)
),*
]
};
(
@precedences { $precedence:expr }
) => {};
(
@precedences { $precedence:expr }
[ $( $rule:ident )* ]
$( [ $( $rules:ident )* ] )*
) => {
$(
#[allow(non_upper_case_globals)]
const $rule: u32 = $precedence;
)*
prec_climber!(
@precedences { 1u32 + $precedence }
$( [ $( $rules )* ] )*
);
};
}
/// Associativity of an [`Operator`].
///
/// [`Operator`]: struct.Operator.html
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Assoc {
/// Left `Operator` associativity
Left,
/// Right `Operator` associativity
Right,
}
/// Infix operator used in [`PrecClimber`].
///
/// [`PrecClimber`]: struct.PrecClimber.html
#[derive(Debug)]
pub struct Operator<R: RuleType> {
rule: R,
assoc: Assoc,
next: Option<Box<Operator<R>>>,
}
impl<R: RuleType> Operator<R> {
/// Creates a new `Operator` from a `Rule` and `Assoc`.
|
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus
/// # }
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Right);
/// ```
pub fn new(rule: R, assoc: Assoc) -> Operator<R> {
Operator {
rule,
assoc,
next: None,
}
}
}
impl<R: RuleType> BitOr for Operator<R> {
type Output = Self;
fn bitor(mut self, rhs: Self) -> Self {
fn assign_next<R: RuleType>(op: &mut Operator<R>, next: Operator<R>) {
if let Some(ref mut child) = op.next {
assign_next(child, next);
} else {
op.next = Some(Box::new(next));
}
}
assign_next(&mut self, rhs);
self
}
}
/// List of operators and precedences, which can perform [precedence climbing][1] on infix
/// expressions contained in a [`Pairs`]. The token pairs contained in the `Pairs` should start
/// with a *primary* pair and then alternate between an *operator* and a *primary*.
///
/// [1]: https://en.wikipedia.org/wiki/Operator-precedence_parser#Precedence_climbing_method
/// [`Pairs`]: ../iterators/struct.Pairs.html
#[derive(Debug)]
pub struct PrecClimber<R: Clone + 'static> {
ops: Cow<'static, [(R, u32, Assoc)]>,
}
#[cfg(feature = "const_prec_climber")]
impl<R: Clone + 'static> PrecClimber<R> {
/// Creates a new `PrecClimber` directly from a static slice of
/// `(rule: Rule, precedence: u32, associativity: Assoc)` tuples.
///
/// Precedence starts from `1`. Entries don't have to be ordered in any way, but it's easier to read when
/// sorted.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = PrecClimber::new_const(&[
/// (Rule::plus, 1, Assoc::Left), (Rule::minus, 1, Assoc::Left),
/// (Rule::times, 2, Assoc::Left), (Rule::divide, 2, Assoc::Left),
/// (Rule::power, 3, Assoc::Right)
/// ]);
/// ```
pub const fn new_const(ops: &'static [(R, u32, Assoc)]) -> PrecClimber<R> {
PrecClimber {
ops: Cow::Borrowed(ops),
}
}
}
impl<R: RuleType> PrecClimber<R> {
// find matching operator by `rule`
fn get(&self, rule: &R) -> Option<(u32, Assoc)> {
self.ops
.iter()
.find(|(r, _, _)| r == rule)
.map(|(_, precedence, assoc)| (*precedence, *assoc))
}
/// Creates a new `PrecClimber` from the `Operator`s contained in `ops`. Every entry in the
/// `Vec` has precedence *index + 1*. In order to have operators with same precedence, they need
/// to be chained with `|` between them.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// PrecClimber::new(vec![
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Left),
/// Operator::new(Rule::times, Assoc::Left) | Operator::new(Rule::divide, Assoc::Left),
/// Operator::new(Rule::power, Assoc::Right)
/// ]);
/// ```
pub fn new(ops: Vec<Operator<R>>) -> PrecClimber<R> {
let ops = ops
.into_iter()
.zip(1..)
.fold(Vec::new(), |mut vec, (op, prec)| {
let mut next = Some(op);
while let Some(op) = next.take() {
let Operator {
rule,
assoc,
next: op_next,
} = op;
vec.push((rule, prec, assoc));
next = op_next.map(|op| *op);
}
vec
});
PrecClimber {
ops: Cow::Owned(ops),
}
}
/// Performs the precedence climbing algorithm on the `pairs` in a similar manner to map-reduce.
/// *Primary* pairs are mapped with `primary` and then reduced to one single result with
/// `infix`.
///
/// # Panics
///
/// Panics will occur when `pairs` is empty or when the alternating *primary*, *operator*,
/// *primary* order is not respected.
///
/// # Examples
///
/// ```ignore
/// let primary = |pair| {
/// consume(pair, climber)
/// };
/// let infix = |lhs: i32, op: Pair<Rule>, rhs: i32| {
/// match op.rule() {
/// Rule::plus => lhs + rhs,
/// Rule::minus => lhs - rhs,
///
|
random_line_split
|
|
prec_climber.rs
|
(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = prec_climber![
/// L plus | minus,
/// L times | divide,
/// R power,
/// ];
/// ```
#[cfg(feature = "const_prec_climber")]
#[macro_export]
macro_rules! prec_climber {
(
$( $assoc:ident $rule:ident $( | $rules:ident )* ),+ $(,)?
) => {{
prec_climber!(
@precedences { 1u32 }
$( [ $rule $( $rules )* ] )*
);
$crate::prec_climber::PrecClimber::new_const(
prec_climber!(
@array
$( $assoc $rule $(, $assoc $rules )* ),*
)
)
}};
( @assoc L ) => { $crate::prec_climber::Assoc::Left };
( @assoc R ) => { $crate::prec_climber::Assoc::Right };
(
@array
$(
$assoc:ident $rule:ident
),*
) => {
&[
$(
(
Rule::$rule,
$rule,
prec_climber!( @assoc $assoc ),
)
),*
]
};
(
@precedences { $precedence:expr }
) => {};
(
@precedences { $precedence:expr }
[ $( $rule:ident )* ]
$( [ $( $rules:ident )* ] )*
) => {
$(
#[allow(non_upper_case_globals)]
const $rule: u32 = $precedence;
)*
prec_climber!(
@precedences { 1u32 + $precedence }
$( [ $( $rules )* ] )*
);
};
}
/// Associativity of an [`Operator`].
///
/// [`Operator`]: struct.Operator.html
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Assoc {
/// Left `Operator` associativity
Left,
/// Right `Operator` associativity
Right,
}
/// Infix operator used in [`PrecClimber`].
///
/// [`PrecClimber`]: struct.PrecClimber.html
#[derive(Debug)]
pub struct Operator<R: RuleType> {
rule: R,
assoc: Assoc,
next: Option<Box<Operator<R>>>,
}
impl<R: RuleType> Operator<R> {
/// Creates a new `Operator` from a `Rule` and `Assoc`.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus
/// # }
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Right);
/// ```
pub fn new(rule: R, assoc: Assoc) -> Operator<R> {
Operator {
rule,
assoc,
next: None,
}
}
}
impl<R: RuleType> BitOr for Operator<R> {
type Output = Self;
fn bitor(mut self, rhs: Self) -> Self {
|
}
/// List of operators and precedences, which can perform [precedence climbing][1] on infix
/// expressions contained in a [`Pairs`]. The token pairs contained in the `Pairs` should start
/// with a *primary* pair and then alternate between an *operator* and a *primary*.
///
/// [1]: https://en.wikipedia.org/wiki/Operator-precedence_parser#Precedence_climbing_method
/// [`Pairs`]: ../iterators/struct.Pairs.html
#[derive(Debug)]
pub struct PrecClimber<R: Clone + 'static> {
ops: Cow<'static, [(R, u32, Assoc)]>,
}
#[cfg(feature = "const_prec_climber")]
impl<R: Clone + 'static> PrecClimber<R> {
/// Creates a new `PrecClimber` directly from a static slice of
/// `(rule: Rule, precedence: u32, associativity: Assoc)` tuples.
///
/// Precedence starts from `1`. Entries don't have to be ordered in any way, but it's easier to read when
/// sorted.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = PrecClimber::new_const(&[
/// (Rule::plus, 1, Assoc::Left), (Rule::minus, 1, Assoc::Left),
/// (Rule::times, 2, Assoc::Left), (Rule::divide, 2, Assoc::Left),
/// (Rule::power, 3, Assoc::Right)
/// ]);
/// ```
pub const fn new_const(ops: &'static [(R, u32, Assoc)]) -> PrecClimber<R> {
PrecClimber {
ops: Cow::Borrowed(ops),
}
}
}
impl<R: RuleType> PrecClimber<R> {
// find matching operator by `rule`
fn get(&self, rule: &R) -> Option<(u32, Assoc)> {
self.ops
.iter()
.find(|(r, _, _)| r == rule)
.map(|(_, precedence, assoc)| (*precedence, *assoc))
}
/// Creates a new `PrecClimber` from the `Operator`s contained in `ops`. Every entry in the
/// `Vec` has precedence *index + 1*. In order to have operators with same precedence, they need
/// to be chained with `|` between them.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// PrecClimber::new(vec![
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Left),
/// Operator::new(Rule::times, Assoc::Left) | Operator::new(Rule::divide, Assoc::Left),
/// Operator::new(Rule::power, Assoc::Right)
/// ]);
/// ```
pub fn new(ops: Vec<Operator<R>>) -> PrecClimber<R> {
let ops = ops
.into_iter()
.zip(1..)
.fold(Vec::new(), |mut vec, (op, prec)| {
let mut next = Some(op);
while let Some(op) = next.take() {
let Operator {
rule,
assoc,
next: op_next,
} = op;
vec.push((rule, prec, assoc));
next = op_next.map(|op| *op);
}
vec
});
PrecClimber {
ops: Cow::Owned(ops),
}
}
/// Performs the precedence climbing algorithm on the `pairs` in a similar manner to map-reduce.
/// *Primary* pairs are mapped with `primary` and then reduced to one single result with
/// `infix`.
///
/// # Panics
///
/// Panics will occur when `pairs` is empty or when the alternating *primary*, *operator*,
/// *primary* order is not respected.
///
/// # Examples
///
/// ```ignore
/// let primary = |pair| {
/// consume(pair, climber)
/// };
/// let infix = |lhs: i32, op: Pair<Rule>, rhs: i32| {
/// match op.rule() {
/// Rule::plus => lhs + rhs,
/// Rule::minus => lhs - rhs,
///
|
fn assign_next<R: RuleType>(op: &mut Operator<R>, next: Operator<R>) {
if let Some(ref mut child) = op.next {
assign_next(child, next);
} else {
op.next = Some(Box::new(next));
}
}
assign_next(&mut self, rhs);
self
}
|
identifier_body
|
prec_climber.rs
|
Rule> = prec_climber![
/// L plus | minus,
/// L times | divide,
/// R power,
/// ];
/// ```
#[cfg(feature = "const_prec_climber")]
#[macro_export]
macro_rules! prec_climber {
(
$( $assoc:ident $rule:ident $( | $rules:ident )* ),+ $(,)?
) => {{
prec_climber!(
@precedences { 1u32 }
$( [ $rule $( $rules )* ] )*
);
$crate::prec_climber::PrecClimber::new_const(
prec_climber!(
@array
$( $assoc $rule $(, $assoc $rules )* ),*
)
)
}};
( @assoc L ) => { $crate::prec_climber::Assoc::Left };
( @assoc R ) => { $crate::prec_climber::Assoc::Right };
(
@array
$(
$assoc:ident $rule:ident
),*
) => {
&[
$(
(
Rule::$rule,
$rule,
prec_climber!( @assoc $assoc ),
)
),*
]
};
(
@precedences { $precedence:expr }
) => {};
(
@precedences { $precedence:expr }
[ $( $rule:ident )* ]
$( [ $( $rules:ident )* ] )*
) => {
$(
#[allow(non_upper_case_globals)]
const $rule: u32 = $precedence;
)*
prec_climber!(
@precedences { 1u32 + $precedence }
$( [ $( $rules )* ] )*
);
};
}
/// Associativity of an [`Operator`].
///
/// [`Operator`]: struct.Operator.html
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Assoc {
/// Left `Operator` associativity
Left,
/// Right `Operator` associativity
Right,
}
/// Infix operator used in [`PrecClimber`].
///
/// [`PrecClimber`]: struct.PrecClimber.html
#[derive(Debug)]
pub struct Operator<R: RuleType> {
rule: R,
assoc: Assoc,
next: Option<Box<Operator<R>>>,
}
impl<R: RuleType> Operator<R> {
/// Creates a new `Operator` from a `Rule` and `Assoc`.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus
/// # }
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Right);
/// ```
pub fn new(rule: R, assoc: Assoc) -> Operator<R> {
Operator {
rule,
assoc,
next: None,
}
}
}
impl<R: RuleType> BitOr for Operator<R> {
type Output = Self;
fn bitor(mut self, rhs: Self) -> Self {
fn assign_next<R: RuleType>(op: &mut Operator<R>, next: Operator<R>) {
if let Some(ref mut child) = op.next {
assign_next(child, next);
} else {
op.next = Some(Box::new(next));
}
}
assign_next(&mut self, rhs);
self
}
}
/// List of operators and precedences, which can perform [precedence climbing][1] on infix
/// expressions contained in a [`Pairs`]. The token pairs contained in the `Pairs` should start
/// with a *primary* pair and then alternate between an *operator* and a *primary*.
///
/// [1]: https://en.wikipedia.org/wiki/Operator-precedence_parser#Precedence_climbing_method
/// [`Pairs`]: ../iterators/struct.Pairs.html
#[derive(Debug)]
pub struct PrecClimber<R: Clone + 'static> {
ops: Cow<'static, [(R, u32, Assoc)]>,
}
#[cfg(feature = "const_prec_climber")]
impl<R: Clone + 'static> PrecClimber<R> {
/// Creates a new `PrecClimber` directly from a static slice of
/// `(rule: Rule, precedence: u32, associativity: Assoc)` tuples.
///
/// Precedence starts from `1`. Entries don't have to be ordered in any way, but it's easier to read when
/// sorted.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = PrecClimber::new_const(&[
/// (Rule::plus, 1, Assoc::Left), (Rule::minus, 1, Assoc::Left),
/// (Rule::times, 2, Assoc::Left), (Rule::divide, 2, Assoc::Left),
/// (Rule::power, 3, Assoc::Right)
/// ]);
/// ```
pub const fn new_const(ops: &'static [(R, u32, Assoc)]) -> PrecClimber<R> {
PrecClimber {
ops: Cow::Borrowed(ops),
}
}
}
impl<R: RuleType> PrecClimber<R> {
// find matching operator by `rule`
fn get(&self, rule: &R) -> Option<(u32, Assoc)> {
self.ops
.iter()
.find(|(r, _, _)| r == rule)
.map(|(_, precedence, assoc)| (*precedence, *assoc))
}
/// Creates a new `PrecClimber` from the `Operator`s contained in `ops`. Every entry in the
/// `Vec` has precedence *index + 1*. In order to have operators with same precedence, they need
/// to be chained with `|` between them.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// PrecClimber::new(vec![
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Left),
/// Operator::new(Rule::times, Assoc::Left) | Operator::new(Rule::divide, Assoc::Left),
/// Operator::new(Rule::power, Assoc::Right)
/// ]);
/// ```
pub fn new(ops: Vec<Operator<R>>) -> PrecClimber<R> {
let ops = ops
.into_iter()
.zip(1..)
.fold(Vec::new(), |mut vec, (op, prec)| {
let mut next = Some(op);
while let Some(op) = next.take() {
let Operator {
rule,
assoc,
next: op_next,
} = op;
vec.push((rule, prec, assoc));
next = op_next.map(|op| *op);
}
vec
});
PrecClimber {
ops: Cow::Owned(ops),
}
}
/// Performs the precedence climbing algorithm on the `pairs` in a similar manner to map-reduce.
/// *Primary* pairs are mapped with `primary` and then reduced to one single result with
/// `infix`.
///
/// # Panics
///
/// Panics will occur when `pairs` is empty or when the alternating *primary*, *operator*,
/// *primary* order is not respected.
///
/// # Examples
///
/// ```ignore
/// let primary = |pair| {
/// consume(pair, climber)
/// };
/// let infix = |lhs: i32, op: Pair<Rule>, rhs: i32| {
/// match op.rule() {
/// Rule::plus => lhs + rhs,
/// Rule::minus => lhs - rhs,
/// Rule::times => lhs * rhs,
/// Rule::divide => lhs / rhs,
/// Rule::power => lhs.pow(rhs as u32),
/// _ => unreachable!()
/// }
/// };
///
/// let result = climber.climb(pairs, primary, infix);
/// ```
pub fn c
|
limb<
|
identifier_name
|
|
prec_climber.rs
|
,
}
/// Infix operator used in [`PrecClimber`].
///
/// [`PrecClimber`]: struct.PrecClimber.html
#[derive(Debug)]
pub struct Operator<R: RuleType> {
rule: R,
assoc: Assoc,
next: Option<Box<Operator<R>>>,
}
impl<R: RuleType> Operator<R> {
/// Creates a new `Operator` from a `Rule` and `Assoc`.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus
/// # }
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Right);
/// ```
pub fn new(rule: R, assoc: Assoc) -> Operator<R> {
Operator {
rule,
assoc,
next: None,
}
}
}
impl<R: RuleType> BitOr for Operator<R> {
type Output = Self;
fn bitor(mut self, rhs: Self) -> Self {
fn assign_next<R: RuleType>(op: &mut Operator<R>, next: Operator<R>) {
if let Some(ref mut child) = op.next {
assign_next(child, next);
} else {
op.next = Some(Box::new(next));
}
}
assign_next(&mut self, rhs);
self
}
}
/// List of operators and precedences, which can perform [precedence climbing][1] on infix
/// expressions contained in a [`Pairs`]. The token pairs contained in the `Pairs` should start
/// with a *primary* pair and then alternate between an *operator* and a *primary*.
///
/// [1]: https://en.wikipedia.org/wiki/Operator-precedence_parser#Precedence_climbing_method
/// [`Pairs`]: ../iterators/struct.Pairs.html
#[derive(Debug)]
pub struct PrecClimber<R: Clone + 'static> {
ops: Cow<'static, [(R, u32, Assoc)]>,
}
#[cfg(feature = "const_prec_climber")]
impl<R: Clone + 'static> PrecClimber<R> {
/// Creates a new `PrecClimber` directly from a static slice of
/// `(rule: Rule, precedence: u32, associativity: Assoc)` tuples.
///
/// Precedence starts from `1`. Entries don't have to be ordered in any way, but it's easier to read when
/// sorted.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = PrecClimber::new_const(&[
/// (Rule::plus, 1, Assoc::Left), (Rule::minus, 1, Assoc::Left),
/// (Rule::times, 2, Assoc::Left), (Rule::divide, 2, Assoc::Left),
/// (Rule::power, 3, Assoc::Right)
/// ]);
/// ```
pub const fn new_const(ops: &'static [(R, u32, Assoc)]) -> PrecClimber<R> {
PrecClimber {
ops: Cow::Borrowed(ops),
}
}
}
impl<R: RuleType> PrecClimber<R> {
// find matching operator by `rule`
fn get(&self, rule: &R) -> Option<(u32, Assoc)> {
self.ops
.iter()
.find(|(r, _, _)| r == rule)
.map(|(_, precedence, assoc)| (*precedence, *assoc))
}
/// Creates a new `PrecClimber` from the `Operator`s contained in `ops`. Every entry in the
/// `Vec` has precedence *index + 1*. In order to have operators with same precedence, they need
/// to be chained with `|` between them.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// PrecClimber::new(vec![
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Left),
/// Operator::new(Rule::times, Assoc::Left) | Operator::new(Rule::divide, Assoc::Left),
/// Operator::new(Rule::power, Assoc::Right)
/// ]);
/// ```
pub fn new(ops: Vec<Operator<R>>) -> PrecClimber<R> {
let ops = ops
.into_iter()
.zip(1..)
.fold(Vec::new(), |mut vec, (op, prec)| {
let mut next = Some(op);
while let Some(op) = next.take() {
let Operator {
rule,
assoc,
next: op_next,
} = op;
vec.push((rule, prec, assoc));
next = op_next.map(|op| *op);
}
vec
});
PrecClimber {
ops: Cow::Owned(ops),
}
}
/// Performs the precedence climbing algorithm on the `pairs` in a similar manner to map-reduce.
/// *Primary* pairs are mapped with `primary` and then reduced to one single result with
/// `infix`.
///
/// # Panics
///
/// Panics will occur when `pairs` is empty or when the alternating *primary*, *operator*,
/// *primary* order is not respected.
///
/// # Examples
///
/// ```ignore
/// let primary = |pair| {
/// consume(pair, climber)
/// };
/// let infix = |lhs: i32, op: Pair<Rule>, rhs: i32| {
/// match op.rule() {
/// Rule::plus => lhs + rhs,
/// Rule::minus => lhs - rhs,
/// Rule::times => lhs * rhs,
/// Rule::divide => lhs / rhs,
/// Rule::power => lhs.pow(rhs as u32),
/// _ => unreachable!()
/// }
/// };
///
/// let result = climber.climb(pairs, primary, infix);
/// ```
pub fn climb<'i, P, F, G, T>(&self, mut pairs: P, mut primary: F, mut infix: G) -> T
where
P: Iterator<Item = Pair<'i, R>>,
F: FnMut(Pair<'i, R>) -> T,
G: FnMut(T, Pair<'i, R>, T) -> T,
{
let lhs = primary(
pairs
.next()
.expect("precedence climbing requires a non-empty Pairs"),
);
self.climb_rec(lhs, 0, &mut pairs.peekable(), &mut primary, &mut infix)
}
fn climb_rec<'i, P, F, G, T>(
&self,
mut lhs: T,
min_prec: u32,
pairs: &mut Peekable<P>,
primary: &mut F,
infix: &mut G,
) -> T
where
P: Iterator<Item = Pair<'i, R>>,
F: FnMut(Pair<'i, R>) -> T,
G: FnMut(T, Pair<'i, R>, T) -> T,
{
while pairs.peek().is_some() {
let rule = pairs.peek().unwrap().as_rule();
if let Some((prec, _)) = self.get(&rule) {
if prec >= min_prec {
let op = pairs.next().unwrap();
let mut rhs = primary(pairs.next().expect(
"infix operator must be followed by \
a primary expression",
));
while pairs.peek().is_some() {
let rule = pairs.peek().unwrap().as_rule();
if let Some((new_prec, assoc)) = self.get(&rule) {
if new_prec > prec || assoc == Assoc::Right && new_prec == prec {
rhs = self.climb_rec(rhs, new_prec, pairs, primary, infix);
} else {
break;
}
} else {
break;
}
}
lhs = infix(lhs, op, rhs);
} else {
|
break;
}
|
conditional_block
|
|
subscriber.rs
|
(Sender<MessageInfo>, Receiver<MessageInfo>) = bounded(8);
// Ends when subscriber or publisher sender is destroyed, which happens at Subscriber destruction
loop {
select! {
recv(data_rx) -> msg => {
match msg {
Err(_) => break,
Ok(v) => for sub in subs.values() {
if sub.0.try_send(v.clone()).is_err() {
error!("Failed to send data to subscriber");
}
}
}
}
recv(subscribers) -> msg => {
match msg {
Err(_) => break,
Ok(DataStreamConnectionChange::Connect(id, data, conn)) => {
for header in &existing_headers {
if conn.send(header.clone()).is_err() {
error!("Failed to send connection info for subscriber");
};
}
subs.insert(id, (data, conn));
}
Ok(DataStreamConnectionChange::Disconnect(id)) => {
if let Some((mut data, _)) = subs.remove(&id) {
if data.close().is_err() {
error!("Subscriber data stream to topic has already been killed");
}
}
}
}
}
recv(publishers) -> msg => {
match msg {
Err(_) => break,
Ok(publisher) => {
let result = join_connection(
&data_tx,
&publisher,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)
.chain_err(|| ErrorKind::TopicConnectionFail(topic.into()));
match result {
Ok(headers) => {
for sub in subs.values() {
if sub.1.send(headers.clone()).is_err() {
error!("Failed to send connection info for subscriber");
}
}
existing_headers.push(headers);
}
Err(err) => {
let info = err
.iter()
.map(|v| format!("{}", v))
.collect::<Vec<_>>()
.join("\nCaused by:");
error!("{}", info);
}
}
}
}
}
}
}
}
fn join_connection(
data_stream: &Sender<MessageInfo>,
publisher: &SocketAddr,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let mut stream = TcpStream::connect(publisher)?;
let headers = exchange_headers::<_>(
&mut stream,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)?;
let pub_caller_id = headers.get("callerid").cloned();
let target = data_stream.clone();
thread::spawn(move || {
let pub_caller_id = Arc::new(pub_caller_id.unwrap_or_default());
while let Ok(buffer) = package_to_vector(&mut stream) {
if let Err(TrySendError::Disconnected(_)) =
target.try_send(MessageInfo::new(Arc::clone(&pub_caller_id), buffer))
{
// Data receiver has been destroyed after
// Subscriber destructor's kill signal
break;
}
}
});
Ok(headers)
}
fn write_request<U: std::io::Write>(
mut stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<()> {
let mut fields = HashMap::<String, String>::new();
fields.insert(String::from("message_definition"), msg_definition.into());
fields.insert(String::from("callerid"), caller_id.into());
fields.insert(String::from("topic"), topic.into());
fields.insert(String::from("md5sum"), md5sum.into());
fields.insert(String::from("type"), msg_type.into());
encode(&mut stream, &fields)?;
Ok(())
}
fn read_response<U: std::io::Read>(
mut stream: &mut U,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let fields = decode(&mut stream)?;
if md5sum != "*" {
match_field(&fields, "md5sum", md5sum)?;
}
if msg_type != "*" {
match_field(&fields, "type", msg_type)?;
}
Ok(fields)
}
fn exchange_headers<U>(
stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>>
where
U: std::io::Write + std::io::Read,
{
write_request::<U>(stream, caller_id, topic, msg_definition, md5sum, msg_type)?;
read_response::<U>(stream, md5sum, msg_type)
}
#[inline]
fn package_to_vector<R: std::io::Read>(stream: &mut R) -> std::io::Result<Vec<u8>> {
let length = stream.read_u32::<LittleEndian>()?;
let u32_size = std::mem::size_of::<u32>();
let num_bytes = length as usize + u32_size;
// Allocate memory of the proper size for the incoming message. We
// do not initialize the memory to zero here (as would be safe)
// because it is expensive and ultimately unnecessary. We know the
// length of the message and if the length is incorrect, the
// stream reading functions will bail with an Error rather than
// leaving memory uninitialized.
let mut out = Vec::<u8>::with_capacity(num_bytes);
let out_ptr = out.as_mut_ptr();
// Read length from stream.
std::io::Cursor::new(unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, u32_size) })
.write_u32::<LittleEndian>(length)?;
// Read data from stream.
let read_buf = unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, num_bytes) };
stream.read_exact(&mut read_buf[u32_size..])?;
// Don't drop the original Vec which has size==0 and instead use
// its memory to initialize a new Vec with size == capacity == num_bytes.
std::mem::forget(out);
// Return the new, now full and "safely" initialized.
Ok(unsafe { Vec::from_raw_parts(out_ptr, num_bytes, num_bytes) })
}
#[derive(Clone)]
struct MessageInfo {
caller_id: Arc<String>,
data: Vec<u8>,
}
impl MessageInfo {
fn new(caller_id: Arc<String>, data: Vec<u8>) -> Self {
Self { caller_id, data }
}
}
#[cfg(test)]
mod tests {
use super::*;
static FAILED_TO_READ_WRITE_VECTOR: &str = "Failed to read or write from vector";
#[test]
fn package_to_vector_creates_right_buffer_from_reader() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7];
let data =
package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn package_to_vector_respects_provided_length() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let data =
package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn package_to_vector_fails_if_stream_is_shorter_than_annotated() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5];
package_to_vector(&mut std::io::Cursor::new(input)).unwrap_err();
}
#[test]
fn package_to_vector_fails_leaves_cursor_at_end_of_reading()
|
{
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 4, 0, 0, 0, 11, 12, 13, 14];
let mut cursor = std::io::Cursor::new(input);
let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [4, 0, 0, 0, 11, 12, 13, 14]);
}
|
identifier_body
|
|
subscriber.rs
|
<usize, Sub> = BTreeMap::new();
let mut existing_headers: Vec<HashMap<String, String>> = Vec::new();
let (data_tx, data_rx): (Sender<MessageInfo>, Receiver<MessageInfo>) = bounded(8);
// Ends when subscriber or publisher sender is destroyed, which happens at Subscriber destruction
loop {
select! {
recv(data_rx) -> msg => {
match msg {
Err(_) => break,
Ok(v) => for sub in subs.values() {
if sub.0.try_send(v.clone()).is_err() {
error!("Failed to send data to subscriber");
}
}
}
}
recv(subscribers) -> msg => {
match msg {
Err(_) => break,
Ok(DataStreamConnectionChange::Connect(id, data, conn)) => {
for header in &existing_headers {
if conn.send(header.clone()).is_err() {
error!("Failed to send connection info for subscriber");
};
}
subs.insert(id, (data, conn));
}
Ok(DataStreamConnectionChange::Disconnect(id)) => {
if let Some((mut data, _)) = subs.remove(&id) {
if data.close().is_err() {
error!("Subscriber data stream to topic has already been killed");
}
}
}
}
}
recv(publishers) -> msg => {
match msg {
Err(_) => break,
Ok(publisher) => {
let result = join_connection(
&data_tx,
&publisher,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)
.chain_err(|| ErrorKind::TopicConnectionFail(topic.into()));
match result {
Ok(headers) => {
for sub in subs.values() {
if sub.1.send(headers.clone()).is_err() {
error!("Failed to send connection info for subscriber");
}
}
existing_headers.push(headers);
}
Err(err) => {
let info = err
.iter()
.map(|v| format!("{}", v))
.collect::<Vec<_>>()
.join("\nCaused by:");
error!("{}", info);
}
}
}
}
}
}
}
}
fn join_connection(
data_stream: &Sender<MessageInfo>,
publisher: &SocketAddr,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let mut stream = TcpStream::connect(publisher)?;
let headers = exchange_headers::<_>(
&mut stream,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)?;
let pub_caller_id = headers.get("callerid").cloned();
let target = data_stream.clone();
thread::spawn(move || {
let pub_caller_id = Arc::new(pub_caller_id.unwrap_or_default());
while let Ok(buffer) = package_to_vector(&mut stream) {
if let Err(TrySendError::Disconnected(_)) =
target.try_send(MessageInfo::new(Arc::clone(&pub_caller_id), buffer))
{
// Data receiver has been destroyed after
// Subscriber destructor's kill signal
break;
}
}
});
Ok(headers)
}
fn write_request<U: std::io::Write>(
mut stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<()> {
let mut fields = HashMap::<String, String>::new();
fields.insert(String::from("message_definition"), msg_definition.into());
fields.insert(String::from("callerid"), caller_id.into());
fields.insert(String::from("topic"), topic.into());
fields.insert(String::from("md5sum"), md5sum.into());
fields.insert(String::from("type"), msg_type.into());
encode(&mut stream, &fields)?;
Ok(())
}
fn read_response<U: std::io::Read>(
mut stream: &mut U,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let fields = decode(&mut stream)?;
if md5sum != "*" {
match_field(&fields, "md5sum", md5sum)?;
}
if msg_type != "*" {
match_field(&fields, "type", msg_type)?;
}
Ok(fields)
}
fn exchange_headers<U>(
stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>>
where
U: std::io::Write + std::io::Read,
{
write_request::<U>(stream, caller_id, topic, msg_definition, md5sum, msg_type)?;
read_response::<U>(stream, md5sum, msg_type)
}
#[inline]
fn package_to_vector<R: std::io::Read>(stream: &mut R) -> std::io::Result<Vec<u8>> {
let length = stream.read_u32::<LittleEndian>()?;
let u32_size = std::mem::size_of::<u32>();
let num_bytes = length as usize + u32_size;
// Allocate memory of the proper size for the incoming message. We
// do not initialize the memory to zero here (as would be safe)
// because it is expensive and ultimately unnecessary. We know the
// length of the message and if the length is incorrect, the
// stream reading functions will bail with an Error rather than
// leaving memory uninitialized.
let mut out = Vec::<u8>::with_capacity(num_bytes);
let out_ptr = out.as_mut_ptr();
// Read length from stream.
std::io::Cursor::new(unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, u32_size) })
.write_u32::<LittleEndian>(length)?;
// Read data from stream.
let read_buf = unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, num_bytes) };
stream.read_exact(&mut read_buf[u32_size..])?;
// Don't drop the original Vec which has size==0 and instead use
// its memory to initialize a new Vec with size == capacity == num_bytes.
std::mem::forget(out);
// Return the new, now full and "safely" initialized.
Ok(unsafe { Vec::from_raw_parts(out_ptr, num_bytes, num_bytes) })
}
#[derive(Clone)]
struct MessageInfo {
caller_id: Arc<String>,
data: Vec<u8>,
}
impl MessageInfo {
fn new(caller_id: Arc<String>, data: Vec<u8>) -> Self {
Self { caller_id, data }
}
}
#[cfg(test)]
mod tests {
use super::*;
static FAILED_TO_READ_WRITE_VECTOR: &str = "Failed to read or write from vector";
#[test]
fn package_to_vector_creates_right_buffer_from_reader() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7];
let data =
package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn package_to_vector_respects_provided_length() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let data =
package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn package_to_vector_fails_if_stream_is_shorter_than_annotated() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5];
package_to_vector(&mut std::io::Cursor::new(input)).unwrap_err();
}
#[test]
fn package_to_vector_fails_leaves_cursor_at_end_of_reading() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 4, 0, 0, 0, 11, 12, 13, 14];
let mut cursor = std::io::Cursor::new(input);
|
let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR);
|
random_line_split
|
|
subscriber.rs
|
SubscriberRosConnection {
next_data_stream_id: 1,
data_stream_tx,
publishers_stream: pub_tx,
topic,
connected_ids: BTreeSet::new(),
connected_publishers: BTreeSet::new(),
}
}
// TODO: allow synchronous handling for subscribers
// This creates a new thread to call on_message. Next API change should
// allow subscribing with either callback or inline handler of the queue.
// The queue is lossy, so it wouldn't be blocking.
pub fn add_subscriber<T, F, G>(
&mut self,
queue_size: usize,
on_message: F,
on_connect: G,
) -> usize
where
T: Message,
F: Fn(T, &str) + Send + 'static,
G: Fn(HashMap<String, String>) + Send + 'static,
{
let data_stream_id = self.next_data_stream_id;
self.connected_ids.insert(data_stream_id);
self.next_data_stream_id += 1;
let (data_tx, data_rx) = lossy_channel(queue_size);
let (connection_tx, connection_rx) = bounded(8);
if self
.data_stream_tx
.send(DataStreamConnectionChange::Connect(
data_stream_id,
data_tx,
connection_tx,
))
.is_err()
{
// TODO: we might want to panic here
error!("Subscriber failed to connect to data stream");
}
thread::spawn(move || {
handle_data::<T, F, G>(data_rx, connection_rx, on_message, on_connect)
});
data_stream_id
}
pub fn remove_subscriber(&mut self, id: usize) {
self.connected_ids.remove(&id);
if self
.data_stream_tx
.send(DataStreamConnectionChange::Disconnect(id))
.is_err()
{
// TODO: we might want to panic here
error!("Subscriber failed to disconnect from data stream");
}
}
pub fn has_subscribers(&self) -> bool {
!self.connected_ids.is_empty()
}
#[inline]
pub fn publisher_count(&self) -> usize {
self.connected_publishers.len()
}
#[inline]
pub fn
|
(&self) -> Vec<String> {
self.connected_publishers.iter().cloned().collect()
}
#[allow(clippy::useless_conversion)]
pub fn connect_to<U: ToSocketAddrs>(
&mut self,
publisher: &str,
addresses: U,
) -> std::io::Result<()> {
for address in addresses.to_socket_addrs()? {
// This should never fail, so it's safe to unwrap
// Failure could only be caused by the join_connections
// thread not running, which only happens after
// Subscriber has been deconstructed
self.publishers_stream
.send(address)
.expect("Connected thread died");
}
self.connected_publishers.insert(publisher.to_owned());
Ok(())
}
pub fn is_connected_to(&self, publisher: &str) -> bool {
self.connected_publishers.contains(publisher)
}
pub fn limit_publishers_to(&mut self, publishers: &BTreeSet<String>) {
let difference: Vec<String> = self
.connected_publishers
.difference(publishers)
.cloned()
.collect();
for item in difference {
self.connected_publishers.remove(&item);
}
}
pub fn get_topic(&self) -> &Topic {
&self.topic
}
}
fn handle_data<T, F, G>(
data: LossyReceiver<MessageInfo>,
connections: Receiver<HashMap<String, String>>,
on_message: F,
on_connect: G,
) where
T: Message,
F: Fn(T, &str),
G: Fn(HashMap<String, String>) + Send + 'static,
{
loop {
select! {
recv(data.kill_rx.kill_rx) -> _ => break,
recv(data.data_rx) -> msg => match msg {
Err(_) => break,
Ok(buffer) => match RosMsg::decode_slice(&buffer.data) {
Ok(value) => on_message(value, &buffer.caller_id),
Err(err) => error!("Failed to decode message: {}", err),
},
},
recv(connections) -> msg => match msg {
Err(_) => break,
Ok(conn) => on_connect(conn),
},
}
}
}
fn join_connections(
subscribers: Receiver<DataStreamConnectionChange>,
publishers: Receiver<SocketAddr>,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) {
type Sub = (LossySender<MessageInfo>, Sender<HashMap<String, String>>);
let mut subs: BTreeMap<usize, Sub> = BTreeMap::new();
let mut existing_headers: Vec<HashMap<String, String>> = Vec::new();
let (data_tx, data_rx): (Sender<MessageInfo>, Receiver<MessageInfo>) = bounded(8);
// Ends when subscriber or publisher sender is destroyed, which happens at Subscriber destruction
loop {
select! {
recv(data_rx) -> msg => {
match msg {
Err(_) => break,
Ok(v) => for sub in subs.values() {
if sub.0.try_send(v.clone()).is_err() {
error!("Failed to send data to subscriber");
}
}
}
}
recv(subscribers) -> msg => {
match msg {
Err(_) => break,
Ok(DataStreamConnectionChange::Connect(id, data, conn)) => {
for header in &existing_headers {
if conn.send(header.clone()).is_err() {
error!("Failed to send connection info for subscriber");
};
}
subs.insert(id, (data, conn));
}
Ok(DataStreamConnectionChange::Disconnect(id)) => {
if let Some((mut data, _)) = subs.remove(&id) {
if data.close().is_err() {
error!("Subscriber data stream to topic has already been killed");
}
}
}
}
}
recv(publishers) -> msg => {
match msg {
Err(_) => break,
Ok(publisher) => {
let result = join_connection(
&data_tx,
&publisher,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)
.chain_err(|| ErrorKind::TopicConnectionFail(topic.into()));
match result {
Ok(headers) => {
for sub in subs.values() {
if sub.1.send(headers.clone()).is_err() {
error!("Failed to send connection info for subscriber");
}
}
existing_headers.push(headers);
}
Err(err) => {
let info = err
.iter()
.map(|v| format!("{}", v))
.collect::<Vec<_>>()
.join("\nCaused by:");
error!("{}", info);
}
}
}
}
}
}
}
}
fn join_connection(
data_stream: &Sender<MessageInfo>,
publisher: &SocketAddr,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let mut stream = TcpStream::connect(publisher)?;
let headers = exchange_headers::<_>(
&mut stream,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)?;
let pub_caller_id = headers.get("callerid").cloned();
let target = data_stream.clone();
thread::spawn(move || {
let pub_caller_id = Arc::new(pub_caller_id.unwrap_or_default());
while let Ok(buffer) = package_to_vector(&mut stream) {
if let Err(TrySendError::Disconnected(_)) =
target.try_send(MessageInfo::new(Arc::clone(&pub_caller_id), buffer))
{
// Data receiver has been destroyed after
// Subscriber destructor's kill signal
break;
}
}
});
Ok(headers)
}
fn write_request<U: std::io::Write>(
mut stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<()> {
let mut fields = HashMap::<String, String>::new();
fields.insert(String::from("message_definition"), msg_definition.into());
fields.insert(String::from("callerid"), caller_id.into());
fields.insert(String::from("topic"), topic.into());
fields.insert(String::from("md5sum"), md5sum.into());
fields.insert(String::from("type"), msg_type.into());
encode(&mut stream, &fields)?;
Ok(())
}
fn read_response<U: std::io::Read>(
mut stream: &mut U,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let fields = decode(&mut stream)?;
if md5sum != "*" {
match_field(&fields, "md5sum", md5sum)?;
}
if msg_type != "*" {
match_field(&fields, "type", msg_type)?;
}
Ok(fields)
}
fn exchange_headers<U>(
stream: &mut U,
caller_id: &str,
topic: &
|
publisher_uris
|
identifier_name
|
subscriber.rs
|
SubscriberRosConnection {
next_data_stream_id: 1,
data_stream_tx,
publishers_stream: pub_tx,
topic,
connected_ids: BTreeSet::new(),
connected_publishers: BTreeSet::new(),
}
}
// TODO: allow synchronous handling for subscribers
// This creates a new thread to call on_message. Next API change should
// allow subscribing with either callback or inline handler of the queue.
// The queue is lossy, so it wouldn't be blocking.
pub fn add_subscriber<T, F, G>(
&mut self,
queue_size: usize,
on_message: F,
on_connect: G,
) -> usize
where
T: Message,
F: Fn(T, &str) + Send + 'static,
G: Fn(HashMap<String, String>) + Send + 'static,
{
let data_stream_id = self.next_data_stream_id;
self.connected_ids.insert(data_stream_id);
self.next_data_stream_id += 1;
let (data_tx, data_rx) = lossy_channel(queue_size);
let (connection_tx, connection_rx) = bounded(8);
if self
.data_stream_tx
.send(DataStreamConnectionChange::Connect(
data_stream_id,
data_tx,
connection_tx,
))
.is_err()
{
// TODO: we might want to panic here
error!("Subscriber failed to connect to data stream");
}
thread::spawn(move || {
handle_data::<T, F, G>(data_rx, connection_rx, on_message, on_connect)
});
data_stream_id
}
pub fn remove_subscriber(&mut self, id: usize) {
self.connected_ids.remove(&id);
if self
.data_stream_tx
.send(DataStreamConnectionChange::Disconnect(id))
.is_err()
{
// TODO: we might want to panic here
error!("Subscriber failed to disconnect from data stream");
}
}
pub fn has_subscribers(&self) -> bool {
!self.connected_ids.is_empty()
}
#[inline]
pub fn publisher_count(&self) -> usize {
self.connected_publishers.len()
}
#[inline]
pub fn publisher_uris(&self) -> Vec<String> {
self.connected_publishers.iter().cloned().collect()
}
#[allow(clippy::useless_conversion)]
pub fn connect_to<U: ToSocketAddrs>(
&mut self,
publisher: &str,
addresses: U,
) -> std::io::Result<()> {
for address in addresses.to_socket_addrs()? {
// This should never fail, so it's safe to unwrap
// Failure could only be caused by the join_connections
// thread not running, which only happens after
// Subscriber has been deconstructed
self.publishers_stream
.send(address)
.expect("Connected thread died");
}
self.connected_publishers.insert(publisher.to_owned());
Ok(())
}
pub fn is_connected_to(&self, publisher: &str) -> bool {
self.connected_publishers.contains(publisher)
}
pub fn limit_publishers_to(&mut self, publishers: &BTreeSet<String>) {
let difference: Vec<String> = self
.connected_publishers
.difference(publishers)
.cloned()
.collect();
for item in difference {
self.connected_publishers.remove(&item);
}
}
pub fn get_topic(&self) -> &Topic {
&self.topic
}
}
fn handle_data<T, F, G>(
data: LossyReceiver<MessageInfo>,
connections: Receiver<HashMap<String, String>>,
on_message: F,
on_connect: G,
) where
T: Message,
F: Fn(T, &str),
G: Fn(HashMap<String, String>) + Send + 'static,
{
loop {
select! {
recv(data.kill_rx.kill_rx) -> _ => break,
recv(data.data_rx) -> msg => match msg {
Err(_) => break,
Ok(buffer) => match RosMsg::decode_slice(&buffer.data) {
Ok(value) => on_message(value, &buffer.caller_id),
Err(err) => error!("Failed to decode message: {}", err),
},
},
recv(connections) -> msg => match msg {
Err(_) => break,
Ok(conn) => on_connect(conn),
},
}
}
}
fn join_connections(
subscribers: Receiver<DataStreamConnectionChange>,
publishers: Receiver<SocketAddr>,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) {
type Sub = (LossySender<MessageInfo>, Sender<HashMap<String, String>>);
let mut subs: BTreeMap<usize, Sub> = BTreeMap::new();
let mut existing_headers: Vec<HashMap<String, String>> = Vec::new();
let (data_tx, data_rx): (Sender<MessageInfo>, Receiver<MessageInfo>) = bounded(8);
// Ends when subscriber or publisher sender is destroyed, which happens at Subscriber destruction
loop {
select! {
recv(data_rx) -> msg => {
match msg {
Err(_) => break,
Ok(v) => for sub in subs.values() {
if sub.0.try_send(v.clone()).is_err() {
error!("Failed to send data to subscriber");
}
}
}
}
recv(subscribers) -> msg => {
match msg {
Err(_) => break,
Ok(DataStreamConnectionChange::Connect(id, data, conn)) => {
for header in &existing_headers {
if conn.send(header.clone()).is_err() {
error!("Failed to send connection info for subscriber");
};
}
subs.insert(id, (data, conn));
}
Ok(DataStreamConnectionChange::Disconnect(id)) => {
if let Some((mut data, _)) = subs.remove(&id) {
if data.close().is_err() {
error!("Subscriber data stream to topic has already been killed");
}
}
}
}
}
recv(publishers) -> msg => {
match msg {
Err(_) => break,
Ok(publisher) => {
let result = join_connection(
&data_tx,
&publisher,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)
.chain_err(|| ErrorKind::TopicConnectionFail(topic.into()));
match result {
Ok(headers) => {
for sub in subs.values() {
if sub.1.send(headers.clone()).is_err() {
error!("Failed to send connection info for subscriber");
}
}
existing_headers.push(headers);
}
Err(err) => {
let info = err
.iter()
.map(|v| format!("{}", v))
.collect::<Vec<_>>()
.join("\nCaused by:");
error!("{}", info);
}
}
}
}
}
}
}
}
fn join_connection(
data_stream: &Sender<MessageInfo>,
publisher: &SocketAddr,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let mut stream = TcpStream::connect(publisher)?;
let headers = exchange_headers::<_>(
&mut stream,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)?;
let pub_caller_id = headers.get("callerid").cloned();
let target = data_stream.clone();
thread::spawn(move || {
let pub_caller_id = Arc::new(pub_caller_id.unwrap_or_default());
while let Ok(buffer) = package_to_vector(&mut stream) {
if let Err(TrySendError::Disconnected(_)) =
target.try_send(MessageInfo::new(Arc::clone(&pub_caller_id), buffer))
|
}
});
Ok(headers)
}
fn write_request<U: std::io::Write>(
mut stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<()> {
let mut fields = HashMap::<String, String>::new();
fields.insert(String::from("message_definition"), msg_definition.into());
fields.insert(String::from("callerid"), caller_id.into());
fields.insert(String::from("topic"), topic.into());
fields.insert(String::from("md5sum"), md5sum.into());
fields.insert(String::from("type"), msg_type.into());
encode(&mut stream, &fields)?;
Ok(())
}
fn read_response<U: std::io::Read>(
mut stream: &mut U,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let fields = decode(&mut stream)?;
if md5sum != "*" {
match_field(&fields, "md5sum", md5sum)?;
}
if msg_type != "*" {
match_field(&fields, "type", msg_type)?;
}
Ok(fields)
}
fn exchange_headers<U>(
stream: &mut U,
caller_id: &str,
topic: &
|
{
// Data receiver has been destroyed after
// Subscriber destructor's kill signal
break;
}
|
conditional_block
|
snva.py
|
_logger_fn,
args=(log_queue, child_log_queue))
child_logger_thread.start()
child_logger_thread_map[video_file_path] = child_logger_thread
if 'signalstate' == args.processormode:
child_process = Process(
target=process_video_signalstate,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writebbox, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
else:
child_process = Process(
target=process_video,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writeinferencereports, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
logging.debug('starting child process.')
child_process.start()
child_process_map[video_file_path] = child_process
async def close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, websocket_conn):
for video_file_path in list(return_code_queue_map.keys()):
return_code_queue = return_code_queue_map[video_file_path]
try:
return_code_map = return_code_queue.get_nowait()
return_code = return_code_map['return_code']
return_value = return_code_map['return_value']
child_process = child_process_map[video_file_path]
logging.debug(
'child process {} returned with exit code {} and exit value '
'{}'.format(child_process.pid, return_code, return_value))
if return_code == 'success':
total_num_processed_videos += 1
total_num_processed_frames += return_value
total_analysis_duration += return_code_map['analysis_duration']
logging.info('notifying control node of completion')
complete_request = json.dumps({
'action': 'COMPLETE',
'video': os.path.basename(video_file_path),
'output': return_code_map['output_locations']})
await websocket_conn.send(complete_request)
child_logger_thread = child_logger_thread_map[video_file_path]
logging.debug('joining logger thread for child process {}'.format(
child_process.pid))
child_logger_thread.join(timeout=15)
if child_logger_thread.is_alive():
logging.warning(
'logger thread for child process {} remained alive following join '
'timeout'.format(child_process.pid))
logging.debug('joining child process {}'.format(child_process.pid))
child_process.join(timeout=15)
# if the child process has not yet terminated, kill the child process at
# the risk of losing any log message not yet buffered by the main logger
try:
os.kill(child_process.pid, signal.SIGKILL)
logging.warning(
'child process {} remained alive following join timeout and had to '
'be killed'.format(child_process.pid))
except:
pass
return_code_queue.close()
return_code_queue_map.pop(video_file_path)
child_logger_thread_map.pop(video_file_path)
child_process_map.pop(video_file_path)
except Empty:
pass
return total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration
start = time()
sleep_duration = 1
breakLoop = False
connectionId = None
isIdle = False
while True:
try:
if breakLoop:
break
wsUrl = 'ws://' + args.controlnodehost + '/registerProcess'
if connectionId is not None:
wsUrl = wsUrl + '?id=' + connectionId
logging.debug("Connecting with URL {}".format(wsUrl))
async with ws.connect(wsUrl) as conn:
response = await conn.recv()
response = json.loads(response)
logging.info(response)
if response['action'] != 'CONNECTION_SUCCESS':
raise ConnectionError(
'control node connection failed with response: {}'.format(response))
if connectionId is None:
connectionId = response['id']
logging.debug("Assigned id {}".format(connectionId))
while True:
# block if num_processes child processes are active
while len(return_code_queue_map) >= num_processes:
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
sleep(sleep_duration)
try: # todo poll for termination signal from control node
_ = main_interrupt_queue.get_nowait()
logging.debug(
'breaking out of child process generation following interrupt signal')
break
except:
pass
if not isIdle:
logging.info('requesting video')
request = json.dumps({'action': 'REQUEST_VIDEO'})
await conn.send(request)
logging.info('reading response')
response = await conn.recv()
else:
# If idle, we will try to close completed processors until all are done
while len(return_code_queue_map) > 0:
# Before checking for completed processes, check for a new message
logging.info('Checking for new message')
try:
# If we get a response quickly, break our waiting loop and process the command
response = await asyncio.wait_for(conn.recv(), 1)
break
except asyncio.TimeoutError:
# Otherwise, go back to finishing our current tasks
logging.debug('No new message from control node, continuing...')
pass
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
# by now, the last device_id_queue_len videos are being processed,
# so we can afford to poll for their completion infrequently
if len(return_code_queue_map) > 0:
sleep(sleep_duration)
# Once all are complete, if still idle we have no work left to do - we just wait for a new message
response = await conn.recv()
response = json.loads(response)
if response['action'] == 'STATUS_REQUEST':
logging.info('control node requested status request')
pass
elif response['action'] == 'CEASE_REQUESTS':
logging.info('control node has no more videos to process')
isIdle = True
pass
elif response['action'] == 'RESUME_REQUESTS':
logging.info('control node has instructed to resume requests')
isIdle = False
pass
elif response['action'] == 'SHUTDOWN':
logging.info('control node requested shutdown')
breakLoop = True
break
elif response['action'] == 'PROCESS':
# TODO Prepend input path
video_file_path = os.path.join(args.inputpath, response['path'])
request_received = json.dumps({'action': 'REQUEST_RECEIVED', 'video': response['path']})
await conn.send(request_received)
try:
start_video_processor(video_file_path)
except Exception as e:
logging.error('an unknown error has occured while processing {}'.format(video_file_path))
logging.error(e)
else:
raise ConnectionError(
'control node replied with unexpected response: {}'.format(response))
logging.debug('{} child processes remain enqueued'.format(len(return_code_queue_map)))
while len(return_code_queue_map) > 0:
#logging.debug('waiting for the final {} child processes to '
# 'terminate'.format(len(return_code_queue_map)))
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
# by now, the last device_id_queue_len videos are being processed,
# so we can afford to poll for their completion infrequently
if len(return_code_queue_map) > 0:
#logging.debug('sleeping for {} seconds'.format(sleep_duration))
sleep(sleep_duration)
end = time() - start
processing_duration = IO.get_processing_duration(
end, 'snva {} processed a total of {} videos and {} frames in:'.format(
snva_version_string, total_num_processed_videos,
total_num_processed_frames))
logging.info(processing_duration)
logging.info('Video analysis alone spanned a cumulative {:.02f} '
'seconds'.format(total_analysis_duration))
logging.info('exiting snva {} main process'.format(snva_version_string))
breakLoop = True
|
except socket.gaierror:
# log something
logging.info('gaierror')
|
random_line_split
|
|
snva.py
|
# Logger thread: listens for updates to log queue and writes them as they arrive
# Terminates after we add None to the queue
def child_logger_fn(main_log_queue, child_log_queue):
while True:
try:
message = child_log_queue.get()
if message is None:
break
main_log_queue.put(message)
except Exception as e:
logging.error(e)
break
def stringify_command(arg_list):
command_string = arg_list[0]
for elem in arg_list[1:]:
command_string += ' ' + elem
return 'command string: {}'.format(command_string)
#TODO: accomodate unbounded number of valid process counts
def get_valid_num_processes_per_device(device_type):
# valid_n_procs = {1, 2}
# if device_type == 'cpu':
# n_cpus = os.cpu_count()
# n_procs = 4
# while n_procs <= n_cpus:
# k = (n_cpus - n_procs) / n_procs
# if k == int(k):
# valid_n_procs.add(n_procs)
# n_procs += 2
# return valid_n_procs
return list(range(1, os.cpu_count() + 1))
async def main():
logging.info('entering snva {} main process'.format(snva_version_string))
# total_num_video_to_process = None
def interrupt_handler(signal_number, _):
logging.warning('Main process received interrupt signal '
'{}.'.format(signal_number))
main_interrupt_queue.put_nowait('_')
# if total_num_video_to_process is None \
# or total_num_video_to_process == len(video_file_paths):
# Signal the logging thread to finish up
logging.debug('signaling logger thread to end service.')
log_queue.put_nowait(None)
logger_thread.join()
logging.shutdown()
signal.signal(signal.SIGINT, interrupt_handler)
try:
ffmpeg_path = os.environ['FFMPEG_HOME']
except KeyError:
logging.warning('Environment variable FFMPEG_HOME not set. Attempting '
'to use default ffmpeg binary location.')
if platform.system() == 'Windows':
ffmpeg_path = 'ffmpeg.exe'
else:
ffmpeg_path = '/usr/local/bin/ffmpeg'
if not path.exists(ffmpeg_path):
ffmpeg_path = '/usr/bin/ffmpeg'
logging.debug('FFMPEG path set to: {}'.format(ffmpeg_path))
try:
ffprobe_path = os.environ['FFPROBE_HOME']
except KeyError:
logging.warning('Environment variable FFPROBE_HOME not set. '
'Attempting to use default ffprobe binary location.')
if platform.system() == 'Windows':
ffprobe_path = 'ffprobe.exe'
else:
ffprobe_path = '/usr/local/bin/ffprobe'
if not path.exists(ffprobe_path):
ffprobe_path = '/usr/bin/ffprobe'
logging.debug('FFPROBE path set to: {}'.format(ffprobe_path))
# # TODO validate all video file paths in the provided text file if args.inputpath is a text file
# if path.isdir(args.inputpath):
# video_file_names = set(IO.read_video_file_names(args.inputpath))
# video_file_paths = [path.join(args.inputpath, video_file_name)
# for video_file_name in video_file_names]
# elif path.isfile(args.inputpath):
# if args.inputpath[-3:] == 'txt':
# if args.inputlistrootdirpath is None:
# raise ValueError('--inputlistrootdirpath must be specified when using a'
# ' text file as the input.')
# with open(args.inputpath, newline='') as input_file:
# video_file_paths = []
#
# for line in input_file.readlines():
# line = line.rstrip()
# video_file_path = line.lstrip(args.inputlistrootdirpath)
# video_file_path = path.join('/media/root', video_file_path)
#
# if path.isfile(video_file_path):
# video_file_paths.append(video_file_path)
# else:
# logging.warning('The video file at host path {} could not be found '
# 'at mapped path {} and will not be processed'.
# format(line, video_file_path))
# else:
# video_file_paths = [args.inputpath]
# else:
# raise ValueError('The video file/folder specified at the path {} could '
# 'not be found.'.format(args.inputpath))
models_root_dir_path = path.join(snva_home, args.modelsdirpath)
models_dir_path = path.join(models_root_dir_path, args.modelname)
logging.debug('models_dir_path set to {}'.format(models_dir_path))
# model_file_path = path.join(models_dir_path, args.protobuffilename)
#
# if not path.isfile(model_file_path):
# raise ValueError('The model specified at the path {} could not be '
# 'found.'.format(model_file_path))
#
# logging.debug('model_file_path set to {}'.format(model_file_path))
model_input_size_file_path = path.join(models_dir_path, 'input_size.txt')
if not path.isfile(model_input_size_file_path):
raise ValueError('The model input size file specified at the path {} '
'could not be found.'.format(model_input_size_file_path))
logging.debug('model_input_size_file_path set to {}'.format(
model_input_size_file_path))
with open(model_input_size_file_path) as file:
model_input_size_string = file.readline().rstrip()
valid_size_set = ['224', '299']
if model_input_size_string not in valid_size_set:
raise ValueError('The model input size is not in the set {}.'.format(
valid_size_set))
model_input_size = int(model_input_size_string)
# if logpath is the default value, expand it using the SNVA_HOME prefix,
# otherwise, use the value explicitly passed by the user
if args.outputpath == 'reports':
output_dir_path = path.join(snva_home, args.outputpath)
else:
output_dir_path = args.outputpath
logging.info("Output path set to: {}".format(output_dir_path))
if not path.isdir(output_dir_path):
os.makedirs(output_dir_path)
if args.classnamesfilepath is None \
or not path.isfile(args.classnamesfilepath):
class_names_path = path.join(models_root_dir_path, 'class_names.txt')
else:
class_names_path = args.classnamesfilepath
logging.debug('labels path set to: {}'.format(class_names_path))
num_processes = args.numprocesses
class_name_map = IO.read_class_names(class_names_path)
return_code_queue_map = {}
child_logger_thread_map = {}
child_process_map = {}
total_num_processed_videos = 0
total_num_processed_frames = 0
total_analysis_duration = 0
def start_video_processor(video_file_path):
# Before popping the next video off of the list and creating a process to
# scan it, check to see if fewer than logical_device_count + 1 processes are
# active. If not, Wait for a child process to release its semaphore
# acquisition. If so, acquire the semaphore, pop the next video name,
# create the next child process, and pass the semaphore to it
return_code_queue = Queue()
return_code_queue_map[video_file_path] = return_code_queue
logging.debug('creating new child process.')
child_log_queue = Queue()
child_logger_thread = Thread(target=child_logger_fn,
args=(log_queue, child_log_queue))
child_logger_thread.start()
child_logger_thread_map[video_file_path] = child_logger_thread
if 'signalstate' == args.processormode:
child_process = Process(
target=process_video_signalstate,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writebbox, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
else:
child_process = Process(
target=process_video,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
|
while True:
try:
message = log_queue.get()
if message is None:
break
logger = logging.getLogger(__name__)
logger.handle(message)
except Exception as e:
logging.error(e)
break
|
identifier_body
|
|
snva.py
|
logging.debug('FFPROBE path set to: {}'.format(ffprobe_path))
# # TODO validate all video file paths in the provided text file if args.inputpath is a text file
# if path.isdir(args.inputpath):
# video_file_names = set(IO.read_video_file_names(args.inputpath))
# video_file_paths = [path.join(args.inputpath, video_file_name)
# for video_file_name in video_file_names]
# elif path.isfile(args.inputpath):
# if args.inputpath[-3:] == 'txt':
# if args.inputlistrootdirpath is None:
# raise ValueError('--inputlistrootdirpath must be specified when using a'
# ' text file as the input.')
# with open(args.inputpath, newline='') as input_file:
# video_file_paths = []
#
# for line in input_file.readlines():
# line = line.rstrip()
# video_file_path = line.lstrip(args.inputlistrootdirpath)
# video_file_path = path.join('/media/root', video_file_path)
#
# if path.isfile(video_file_path):
# video_file_paths.append(video_file_path)
# else:
# logging.warning('The video file at host path {} could not be found '
# 'at mapped path {} and will not be processed'.
# format(line, video_file_path))
# else:
# video_file_paths = [args.inputpath]
# else:
# raise ValueError('The video file/folder specified at the path {} could '
# 'not be found.'.format(args.inputpath))
models_root_dir_path = path.join(snva_home, args.modelsdirpath)
models_dir_path = path.join(models_root_dir_path, args.modelname)
logging.debug('models_dir_path set to {}'.format(models_dir_path))
# model_file_path = path.join(models_dir_path, args.protobuffilename)
#
# if not path.isfile(model_file_path):
# raise ValueError('The model specified at the path {} could not be '
# 'found.'.format(model_file_path))
#
# logging.debug('model_file_path set to {}'.format(model_file_path))
model_input_size_file_path = path.join(models_dir_path, 'input_size.txt')
if not path.isfile(model_input_size_file_path):
raise ValueError('The model input size file specified at the path {} '
'could not be found.'.format(model_input_size_file_path))
logging.debug('model_input_size_file_path set to {}'.format(
model_input_size_file_path))
with open(model_input_size_file_path) as file:
model_input_size_string = file.readline().rstrip()
valid_size_set = ['224', '299']
if model_input_size_string not in valid_size_set:
raise ValueError('The model input size is not in the set {}.'.format(
valid_size_set))
model_input_size = int(model_input_size_string)
# if logpath is the default value, expand it using the SNVA_HOME prefix,
# otherwise, use the value explicitly passed by the user
if args.outputpath == 'reports':
output_dir_path = path.join(snva_home, args.outputpath)
else:
output_dir_path = args.outputpath
logging.info("Output path set to: {}".format(output_dir_path))
if not path.isdir(output_dir_path):
os.makedirs(output_dir_path)
if args.classnamesfilepath is None \
or not path.isfile(args.classnamesfilepath):
class_names_path = path.join(models_root_dir_path, 'class_names.txt')
else:
class_names_path = args.classnamesfilepath
logging.debug('labels path set to: {}'.format(class_names_path))
num_processes = args.numprocesses
class_name_map = IO.read_class_names(class_names_path)
return_code_queue_map = {}
child_logger_thread_map = {}
child_process_map = {}
total_num_processed_videos = 0
total_num_processed_frames = 0
total_analysis_duration = 0
def start_video_processor(video_file_path):
# Before popping the next video off of the list and creating a process to
# scan it, check to see if fewer than logical_device_count + 1 processes are
# active. If not, Wait for a child process to release its semaphore
# acquisition. If so, acquire the semaphore, pop the next video name,
# create the next child process, and pass the semaphore to it
return_code_queue = Queue()
return_code_queue_map[video_file_path] = return_code_queue
logging.debug('creating new child process.')
child_log_queue = Queue()
child_logger_thread = Thread(target=child_logger_fn,
args=(log_queue, child_log_queue))
child_logger_thread.start()
child_logger_thread_map[video_file_path] = child_logger_thread
if 'signalstate' == args.processormode:
child_process = Process(
target=process_video_signalstate,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writebbox, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
else:
child_process = Process(
target=process_video,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writeinferencereports, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
logging.debug('starting child process.')
child_process.start()
child_process_map[video_file_path] = child_process
async def close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, websocket_conn):
for video_file_path in list(return_code_queue_map.keys()):
return_code_queue = return_code_queue_map[video_file_path]
try:
return_code_map = return_code_queue.get_nowait()
return_code = return_code_map['return_code']
return_value = return_code_map['return_value']
child_process = child_process_map[video_file_path]
logging.debug(
'child process {} returned with exit code {} and exit value '
'{}'.format(child_process.pid, return_code, return_value))
if return_code == 'success':
total_num_processed_videos += 1
total_num_processed_frames += return_value
total_analysis_duration += return_code_map['analysis_duration']
logging.info('notifying control node of completion')
complete_request = json.dumps({
'action': 'COMPLETE',
'video': os.path.basename(video_file_path),
'output': return_code_map['output_locations']})
await websocket_conn.send(complete_request)
child_logger_thread = child_logger_thread_map[video_file_path]
logging.debug('joining logger thread for child process {}'.format(
child_process.pid))
child_logger_thread.join(timeout=15)
if child_logger_thread.is_alive():
logging.warning(
'logger thread for child process {} remained alive following join '
'timeout'.format(child_process.pid))
logging.debug('joining child process {}'.format(child_process.pid))
child_process.join(timeout=15)
# if the child process has not yet terminated, kill the child process at
# the risk of losing any log message not yet buffered by the main logger
try:
os.kill(child_process.pid, signal.SIGKILL)
logging.warning(
'child process {} remained alive following join timeout and had to '
'be killed'.format(child_process.pid))
except:
pass
return_code_queue.close()
return_code_queue_map.pop(video_file_path)
child_logger_thread_map.pop(video_file_path)
child_process_map.pop(video_file_path)
except Empty:
pass
return total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration
start = time()
sleep_duration = 1
breakLoop = False
connectionId = None
isIdle = False
while True:
try:
if breakLoop:
break
wsUrl = 'ws://' + args.controlnodehost + '/registerProcess'
if connectionId is not None:
wsUrl = wsUrl + '?id=' + connectionId
logging.debug("Connecting with URL {}".format(wsUrl))
async with ws.connect(wsUrl) as conn:
response = await conn.recv()
|
ffprobe_path = '/usr/bin/ffprobe'
|
conditional_block
|
|
snva.py
|
.protobuffilename)
#
# if not path.isfile(model_file_path):
# raise ValueError('The model specified at the path {} could not be '
# 'found.'.format(model_file_path))
#
# logging.debug('model_file_path set to {}'.format(model_file_path))
model_input_size_file_path = path.join(models_dir_path, 'input_size.txt')
if not path.isfile(model_input_size_file_path):
raise ValueError('The model input size file specified at the path {} '
'could not be found.'.format(model_input_size_file_path))
logging.debug('model_input_size_file_path set to {}'.format(
model_input_size_file_path))
with open(model_input_size_file_path) as file:
model_input_size_string = file.readline().rstrip()
valid_size_set = ['224', '299']
if model_input_size_string not in valid_size_set:
raise ValueError('The model input size is not in the set {}.'.format(
valid_size_set))
model_input_size = int(model_input_size_string)
# if logpath is the default value, expand it using the SNVA_HOME prefix,
# otherwise, use the value explicitly passed by the user
if args.outputpath == 'reports':
output_dir_path = path.join(snva_home, args.outputpath)
else:
output_dir_path = args.outputpath
logging.info("Output path set to: {}".format(output_dir_path))
if not path.isdir(output_dir_path):
os.makedirs(output_dir_path)
if args.classnamesfilepath is None \
or not path.isfile(args.classnamesfilepath):
class_names_path = path.join(models_root_dir_path, 'class_names.txt')
else:
class_names_path = args.classnamesfilepath
logging.debug('labels path set to: {}'.format(class_names_path))
num_processes = args.numprocesses
class_name_map = IO.read_class_names(class_names_path)
return_code_queue_map = {}
child_logger_thread_map = {}
child_process_map = {}
total_num_processed_videos = 0
total_num_processed_frames = 0
total_analysis_duration = 0
def
|
(video_file_path):
# Before popping the next video off of the list and creating a process to
# scan it, check to see if fewer than logical_device_count + 1 processes are
# active. If not, Wait for a child process to release its semaphore
# acquisition. If so, acquire the semaphore, pop the next video name,
# create the next child process, and pass the semaphore to it
return_code_queue = Queue()
return_code_queue_map[video_file_path] = return_code_queue
logging.debug('creating new child process.')
child_log_queue = Queue()
child_logger_thread = Thread(target=child_logger_fn,
args=(log_queue, child_log_queue))
child_logger_thread.start()
child_logger_thread_map[video_file_path] = child_logger_thread
if 'signalstate' == args.processormode:
child_process = Process(
target=process_video_signalstate,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writebbox, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
else:
child_process = Process(
target=process_video,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writeinferencereports, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
logging.debug('starting child process.')
child_process.start()
child_process_map[video_file_path] = child_process
async def close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, websocket_conn):
for video_file_path in list(return_code_queue_map.keys()):
return_code_queue = return_code_queue_map[video_file_path]
try:
return_code_map = return_code_queue.get_nowait()
return_code = return_code_map['return_code']
return_value = return_code_map['return_value']
child_process = child_process_map[video_file_path]
logging.debug(
'child process {} returned with exit code {} and exit value '
'{}'.format(child_process.pid, return_code, return_value))
if return_code == 'success':
total_num_processed_videos += 1
total_num_processed_frames += return_value
total_analysis_duration += return_code_map['analysis_duration']
logging.info('notifying control node of completion')
complete_request = json.dumps({
'action': 'COMPLETE',
'video': os.path.basename(video_file_path),
'output': return_code_map['output_locations']})
await websocket_conn.send(complete_request)
child_logger_thread = child_logger_thread_map[video_file_path]
logging.debug('joining logger thread for child process {}'.format(
child_process.pid))
child_logger_thread.join(timeout=15)
if child_logger_thread.is_alive():
logging.warning(
'logger thread for child process {} remained alive following join '
'timeout'.format(child_process.pid))
logging.debug('joining child process {}'.format(child_process.pid))
child_process.join(timeout=15)
# if the child process has not yet terminated, kill the child process at
# the risk of losing any log message not yet buffered by the main logger
try:
os.kill(child_process.pid, signal.SIGKILL)
logging.warning(
'child process {} remained alive following join timeout and had to '
'be killed'.format(child_process.pid))
except:
pass
return_code_queue.close()
return_code_queue_map.pop(video_file_path)
child_logger_thread_map.pop(video_file_path)
child_process_map.pop(video_file_path)
except Empty:
pass
return total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration
start = time()
sleep_duration = 1
breakLoop = False
connectionId = None
isIdle = False
while True:
try:
if breakLoop:
break
wsUrl = 'ws://' + args.controlnodehost + '/registerProcess'
if connectionId is not None:
wsUrl = wsUrl + '?id=' + connectionId
logging.debug("Connecting with URL {}".format(wsUrl))
async with ws.connect(wsUrl) as conn:
response = await conn.recv()
response = json.loads(response)
logging.info(response)
if response['action'] != 'CONNECTION_SUCCESS':
raise ConnectionError(
'control node connection failed with response: {}'.format(response))
if connectionId is None:
connectionId = response['id']
logging.debug("Assigned id {}".format(connectionId))
while True:
# block if num_processes child processes are active
while len(return_code_queue_map) >= num_processes:
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
sleep(sleep_duration)
try: # todo poll for termination signal from control node
_ = main_interrupt_queue.get_nowait()
logging.debug(
'breaking out of child process generation following interrupt signal')
break
except:
pass
if not isIdle:
logging.info('requesting video')
request = json.dumps({'action': 'REQUEST_VIDEO'})
await conn.send(request)
logging.info('reading response')
response = await conn.recv()
else:
# If idle, we will try to close completed processors until all are done
while len(return_code_queue_map) > 0:
# Before checking for completed processes, check for a new message
logging.info('Checking for new message')
try:
# If we get a response quickly, break our waiting loop and process the command
response = await asyncio.wait_for(conn.recv(), 1)
break
except asyncio.TimeoutError:
# Otherwise, go back to finishing our current tasks
logging.debug('No new message from control node, continuing...')
pass
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
# by now, the last device_id_queue_len videos are being processed,
# so we can afford to poll for their completion
|
start_video_processor
|
identifier_name
|
selectionmenu.js
|
}
}
// EN: Publish addEvent as a static method
// EN: (attach it to the constructor object)
// DE: Mache addEvent als statische Methode öffentlich
// DE: (hefte die Methode an den Konstruktor, der zurückgegeben wird)
SelectionMenu.addEvent = addEvent;
function getSelection () {
// EN: Feature dection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
if (window.getSelection) {
return window.getSelection();
} else if (document.selection && document.selection.createRange) {
return document.selection.createRange();
} else {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return false;
}
}
function getSelectedText (selection) {
// EN: Feature detection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
return selection.toString ? selection.toString() : selection.text;
}
function contains (a, b) {
// EN: Feature detection DOM Core / Microsoft
// DE: Fähigkeitenweiche DOM Core / Microsoft
return a.compareDocumentPosition ? !!(a.compareDocumentPosition(b) & 16) : a.contains(b);
}
function mouseOnMenu (e) {
// Greife auf das Zielelement des Ereignisses zu
// EN: Feature detection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
var target = e.target || e.srcElement;
// Ist das Zielelement das Menü oder darin enthalten?
return target == span || contains(span, target);
}
// EN: Main constructor function
// DE: Konstruktorfunktion
function SelectionMenu (options) {
var instance = this;
// EN: Copy members from the options object to the instance
// DE: Kopiere Einstellungen aus dem options-Objekt herüber zur Instanz
instance.id = options.id || 'selection-menu';
instance.menuHTML = options.menuHTML;
instance.minimalSelection = options.minimalSelection || 5;
instance.container = options.container;
instance.handler = options.handler;
// EN: Initialisation
// DE: Initialisiere
instance.create();
instance.setupEvents();
}
SelectionMenu.prototype = {
create : function () {
var instance = this;
// EN: Create the menu container if necessary
// DE: Erzeuge den Menü-Container, sofern noch nicht passiert
if (span) {
return;
}
span = document.createElement('span');
span.id = instance.id;
},
setupEvents : function () {
var instance = this;
var container = instance.container;
// EN: Hide the menu on mouse down
// DE: Verstecke beim Mousedown
addEvent(container, 'mousedown', function (e) {
instance.hide(e);
});
// EN: Insert the menu on mouseup given some text is selected
// DE: Füge das Menü beim Mouseup ein, wenn Text ausgewählt wurde
addEvent(container, 'mouseup', function (e) {
instance.insert(e);
// EN: After a delay, check if the text was deselected
// DE: Prüfe nach einer Verzögerung, ob die Auswahl damit aufgehoben wurde
window.setTimeout(function () {
instance.hideIfNoSelection();
}, 0);
});
instance.setupMenuEvents();
},
setupMenuEvents : function () {
var instance = this;
// EN: Register the handler for clicks on the menu
// DE: Registiere Handlerfunktion für den Klick auf das Menü
addEvent(span, 'click', function (e) {
instance.handler.call(instance, e);
return false;
});
// EN: Prevent IE to select the text of the menu
// DE: Verhindere das Markieren des Menüs im IE
span.unselectable = true;
},
hide : function (e) {
// EN: Abort if an event object was passed and the click hit the menu itself
// Breche ab, wenn Event-Objekt übergeben wurde und der Klick beim Menü passierte
if (e && mouseOnMenu(e)) {
return;
}
// EN: Is the element attached to the DOM tree?
// DE: Ist das Element in den DOM-Baum gehängt?
var parent = span.parentNode;
if (parent) {
// EN: Remove the element from DOM (the element object remains
// EN: in memory and will be reused later)
// DE: Entferne das element aus dem DOM-Baum (Element bleibt im Speicher erhalten
// DE: und wird später wiederverwendet)
parent.removeChild(span);
}
},
hideIfNoSelection : function () {
var instance = this;
var selection = getSelection();
if (!selection) {
return;
}
var selectedText = getSelectedText(selection);
if (!selectedText.length) {
instance.hide();
}
},
insert : function (e) {
var instance = this;
// EN: Abort if the mouse event occured at the menu itself
// DE: Breche ab, wenn das Mausereignis beim Menü passierte
if (mouseOnMenu(e)) {
return;
}
// EN: Get a Selection object or a TextRange (IE)
// DE: Hole Selection bzw. TextRange (IE)
var selection = getSelection();
if (!selection) {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return;
}
// EN: Get the selected text
// DE: Hole markierten Text
var selectedText = getSelectedText(selection);
instance.selectedText = selectedText;
// EN: Abort if the selected text is too short
// DE: Breche ab, wenn der markierte Text zu kurz ist
if (selectedText.length < instance.minimalSelection) {
instance.hide(e);
return;
}
// EN : Feature detection DOM Range / Microsoft
// DE: Fähigkeitenweiche DOM Range / Microsoft
if (selection.getRangeAt) {
// EN: W3C DOM Range approach
// DE: Lösungsansatz mit W3C DOM Range
// EN: Get the first Range of the current Selection
// DE: Hole Range, die zur Selection gehört
var range = selection.getRangeAt(0);
// EN: Get the start and end nodes of the selection
// DE: Hole Start- und Endknoten der Auswahl
var startNode = range.startContainer;
var endNode = range.endContainer;
if (!(startNode && endNode && startNode.compareDocumentPosition)) {
// EN: Abort if we got bogus values or we can't compare their document position
// DE: Breche ab, wenn die Knoten nicht brauchbar sind
return;
}
// EN: If the start node succeeds the end node in the DOM tree, flip them
// DE: Wenn von hinten nach vorne markiert wurde, drehe Start und Ende um
if (startNode.compareDocumentPosition(endNode) & 2) {
startNode = endNode;
endNode = range.startContainer;
}
// EN: Get the end offset
// DE: Hole End-Offset
var endOffset = range.endOffset;
// EN: If the end node is an element, use its last text node as the end offset
// DE: Falls der Endknoten ein Element ist, nehme das Ende des letzten Textknoten
if (endNode.nodeType == 1) {
endNode = endNode.lastChild;
if (!endNode || endNode.nodeType != 3) {
return;
}
endOffset = endNode.data.length;
}
// EN: Create a new empty Range
// DE: Erzeuge neue, leere Range
var newRange = document.createRange();
// EN: Move the beginning of the new Range to the end of the selection
// DE: Verschiebe Anfang der neuen Range an das Ende der Auswahl
newRange.setStart(endNode, endOffset);
// EN: Fill the menu span
// DE: Befülle das Menü-span
span.innerHTML = instance.menuHTML;
// EN: Inject the span element into the new Range
|
// DE: Füge das span-Element in die neue Range ein
newRange.insertNode(span);
// EN: Adjust the selection by removing and adding the range.
// EN: This prevents the selection of the menu text.
|
random_line_split
|
|
selectionmenu.js
|
: (hefte die Methode an den Konstruktor, der zurückgegeben wird)
SelectionMenu.addEvent = addEvent;
function getSelection () {
// EN: Feature dection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
if (window.getSelection) {
return window.getSelection();
} else if (document.selection && document.selection.createRange) {
return document.selection.createRange();
} else {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return false;
}
}
function getSelectedText (selection) {
// EN: Feature detection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
return selection.toString ? selection.toString() : selection.text;
}
function contains (a, b) {
// EN: Feature detection DOM Core / Microsoft
// DE: Fähigkeitenweiche DOM Core / Microsoft
return a.compareDocumentPosition ? !!(a.compareDocumentPosition(b) & 16) : a.contains(b);
}
function mouseOnMenu (e) {
// Greife auf das Zielelement des Ereignisses zu
// EN: Feature detection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
var target = e.target || e.srcElement;
// Ist das Zielelement das Menü oder darin enthalten?
return target == span || contains(span, target);
}
// EN: Main constructor function
// DE: Konstruktorfunktion
function SelectionMenu (options) {
var instance = this;
// EN: Copy members from the options object to the instance
// DE: Kopiere Einstellungen aus dem options-Objekt herüber zur Instanz
instance.id = options.id || 'selection-menu';
instance.menuHTML = options.menuHTML;
instance.minimalSelection = options.minimalSelection || 5;
instance.container = options.container;
instance.handler = options.handler;
// EN: Initialisation
// DE: Initialisiere
instance.create();
instance.setupEvents();
}
SelectionMenu.prototype = {
create : function () {
var instance = this;
// EN: Create the menu container if necessary
// DE: Erzeuge den Menü-Container, sofern noch nicht passiert
if (span) {
return;
}
span = document.createElement('span');
span.id = instance.id;
},
setupEvents : function () {
var instance = this;
var container = instance.container;
// EN: Hide the menu on mouse down
// DE: Verstecke beim Mousedown
addEvent(container, 'mousedown', function (e) {
instance.hide(e);
});
// EN: Insert the menu on mouseup given some text is selected
// DE: Füge das Menü beim Mouseup ein, wenn Text ausgewählt wurde
addEvent(container, 'mouseup', function (e) {
instance.insert(e);
// EN: After a delay, check if the text was deselected
// DE: Prüfe nach einer Verzögerung, ob die Auswahl damit aufgehoben wurde
window.setTimeout(function () {
instance.hideIfNoSelection();
}, 0);
});
instance.setupMenuEvents();
},
setupMenuEvents : function () {
var instance = this;
// EN: Register the handler for clicks on the menu
// DE: Registiere Handlerfunktion für den Klick auf das Menü
addEvent(span, 'click', function (e) {
instance.handler.call(instance, e);
return false;
});
// EN: Prevent IE to select the text of the menu
// DE: Verhindere das Markieren des Menüs im IE
span.unselectable = true;
},
hide : function (e) {
// EN: Abort if an event object was passed and the click hit the menu itself
// Breche ab, wenn Event-Objekt übergeben wurde und der Klick beim Menü passierte
if (e && mouseOnMenu(e)) {
return;
}
// EN: Is the element attached to the DOM tree?
// DE: Ist das Element in den DOM-Baum gehängt?
var parent = span.parentNode;
if (parent) {
// EN: Remove the element from DOM (the element object remains
// EN: in memory and will be reused later)
// DE: Entferne das element aus dem DOM-Baum (Element bleibt im Speicher erhalten
// DE: und wird später wiederverwendet)
parent.removeChild(span);
}
},
hideIfNoSelection : function () {
var instance = this;
var selection = getSelection();
if (!selection) {
return;
}
var selectedText = getSelectedText(selection);
if (!selectedText.length) {
instance.hide();
}
},
insert : function (e) {
var instance = this;
// EN: Abort if the mouse event occured at the menu itself
// DE: Breche ab, wenn das Mausereignis beim Menü passierte
if (mouseOnMenu(e)) {
return;
}
// EN: Get a Selection object or a TextRange (IE)
// DE: Hole Selection bzw. TextRange (IE)
var selection = getSelection();
if (!selection) {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return;
}
// EN: Get the selected text
// DE: Hole markierten Text
var selectedText = getSelectedText(selection);
instance.selectedText = selectedText;
// EN: Abort if the selected text is too short
// DE: Breche ab, wenn der markierte Text zu kurz ist
if (selectedText.length < instance.minimalSelection) {
instance.hide(e);
return;
}
// EN : Feature detection DOM Range / Microsoft
// DE: Fähigkeitenweiche DOM Range / Microsoft
if (selection.getRangeAt) {
// EN: W3C DOM Range approach
// DE: Lösungsansatz mit W3C DOM Range
// EN: Get the first Range of the current Selection
// DE: Hole Range, die zur Selection gehört
var range = selection.getRangeAt(0);
// EN: Get the start and end nodes of the selection
// DE: Hole Start- und Endknoten der Auswahl
var startNode = range.startContainer;
var endNode = range.endContainer;
if (!(startNode && endNode && startNode.compareDocumentPosition)) {
// EN: Abort if we got bogus values or we can't compare their document position
// DE: Breche ab, wenn die Knoten nicht brauchbar sind
return;
}
// EN: If the start node succeeds the end node in the DOM tree, flip them
// DE: Wenn von hinten nach vorne markiert wurde, drehe Start und Ende um
if (startNode.compareDocumentPosition(endNode) & 2) {
startNode = endNode;
endNode = range.startContainer;
}
// EN: Get the end offset
// DE: Hole End-Offset
var endOffset = range.endOffset;
// EN: If the end node is an element, use its last text node as the end offset
// DE: Falls der Endknoten ein Element ist, nehme das Ende des letzten Textknoten
if (endNode.nodeType == 1) {
endNode = endNode.lastChild;
if (!endNode || endNode.nodeType != 3) {
return;
}
endOffset = endNode.data.length;
}
// EN: Create a new empty Range
// DE: Erzeuge neue, leere Range
var newRange = document.createRange();
// EN: Move the beginning of the new Range to the end of the selection
// DE: Verschiebe Anfang der neuen Range an das Ende der Auswahl
newRange.setStart(endNode, endOffset);
// EN: Fill the menu span
// DE: Befülle das Menü-span
span.innerHTML = instance.menuHTML;
// EN: Inject the span element into the new Range
// DE: Füge das span-Element in die neue Range ein
newRange.insertNode(span);
// EN: Adjust the selection by removing and adding the range.
// EN: This prevents the selection of the menu text.
// DE: Korrigiere Auswahl, verhindere das Markieren des Menüs
if (selection.removeRange) {
selection.removeRange(range);
|
} else {
selection.removeAllRanges()
|
conditional_block
|
|
selectionmenu.js
|
fn) {
// EN: Feature dection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
if (obj.addEventListener) {
obj.addEventListener(type, fn, false);
} else if (obj.attachEvent) {
obj.attachEvent('on' + type, function () {
return fn.call(obj, window.event);
});
}
}
// EN: Publish addEvent as a static method
// EN: (attach it to the constructor object)
// DE: Mache addEvent als statische Methode öffentlich
// DE: (hefte die Methode an den Konstruktor, der zurückgegeben wird)
SelectionMenu.addEvent = addEvent;
function getSelection () {
// EN: Feature dection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
if (window.getSelection) {
return window.getSelection();
} else if (document.selection && document.selection.createRange) {
return document.selection.createRange();
} else {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return false;
}
}
function getSelectedText (selection) {
// EN: Feature detection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
return selection.toString ? selection.toString() : selection.text;
}
function contains (a, b) {
// EN: Feature detection DOM Core / Microsoft
// DE: Fähigkeitenweiche DOM Core / Microsoft
return a.compareDocumentPosition ? !!(a.compareDocumentPosition(b) & 16) : a.contains(b);
}
function mouseOnMenu (e) {
// Greife auf das Zielelement des Ereignisses zu
// EN: Feature detection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
var target = e.target || e.srcElement;
// Ist das Zielelement das Menü oder darin enthalten?
return target == span || contains(span, target);
}
// EN: Main constructor function
// DE: Konstruktorfunktion
function SelectionMenu (options) {
var instanc
|
.prototype = {
create : function () {
var instance = this;
// EN: Create the menu container if necessary
// DE: Erzeuge den Menü-Container, sofern noch nicht passiert
if (span) {
return;
}
span = document.createElement('span');
span.id = instance.id;
},
setupEvents : function () {
var instance = this;
var container = instance.container;
// EN: Hide the menu on mouse down
// DE: Verstecke beim Mousedown
addEvent(container, 'mousedown', function (e) {
instance.hide(e);
});
// EN: Insert the menu on mouseup given some text is selected
// DE: Füge das Menü beim Mouseup ein, wenn Text ausgewählt wurde
addEvent(container, 'mouseup', function (e) {
instance.insert(e);
// EN: After a delay, check if the text was deselected
// DE: Prüfe nach einer Verzögerung, ob die Auswahl damit aufgehoben wurde
window.setTimeout(function () {
instance.hideIfNoSelection();
}, 0);
});
instance.setupMenuEvents();
},
setupMenuEvents : function () {
var instance = this;
// EN: Register the handler for clicks on the menu
// DE: Registiere Handlerfunktion für den Klick auf das Menü
addEvent(span, 'click', function (e) {
instance.handler.call(instance, e);
return false;
});
// EN: Prevent IE to select the text of the menu
// DE: Verhindere das Markieren des Menüs im IE
span.unselectable = true;
},
hide : function (e) {
// EN: Abort if an event object was passed and the click hit the menu itself
// Breche ab, wenn Event-Objekt übergeben wurde und der Klick beim Menü passierte
if (e && mouseOnMenu(e)) {
return;
}
// EN: Is the element attached to the DOM tree?
// DE: Ist das Element in den DOM-Baum gehängt?
var parent = span.parentNode;
if (parent) {
// EN: Remove the element from DOM (the element object remains
// EN: in memory and will be reused later)
// DE: Entferne das element aus dem DOM-Baum (Element bleibt im Speicher erhalten
// DE: und wird später wiederverwendet)
parent.removeChild(span);
}
},
hideIfNoSelection : function () {
var instance = this;
var selection = getSelection();
if (!selection) {
return;
}
var selectedText = getSelectedText(selection);
if (!selectedText.length) {
instance.hide();
}
},
insert : function (e) {
var instance = this;
// EN: Abort if the mouse event occured at the menu itself
// DE: Breche ab, wenn das Mausereignis beim Menü passierte
if (mouseOnMenu(e)) {
return;
}
// EN: Get a Selection object or a TextRange (IE)
// DE: Hole Selection bzw. TextRange (IE)
var selection = getSelection();
if (!selection) {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return;
}
// EN: Get the selected text
// DE: Hole markierten Text
var selectedText = getSelectedText(selection);
instance.selectedText = selectedText;
// EN: Abort if the selected text is too short
// DE: Breche ab, wenn der markierte Text zu kurz ist
if (selectedText.length < instance.minimalSelection) {
instance.hide(e);
return;
}
// EN : Feature detection DOM Range / Microsoft
// DE: Fähigkeitenweiche DOM Range / Microsoft
if (selection.getRangeAt) {
// EN: W3C DOM Range approach
// DE: Lösungsansatz mit W3C DOM Range
// EN: Get the first Range of the current Selection
// DE: Hole Range, die zur Selection gehört
var range = selection.getRangeAt(0);
// EN: Get the start and end nodes of the selection
// DE: Hole Start- und Endknoten der Auswahl
var startNode = range.startContainer;
var endNode = range.endContainer;
if (!(startNode && endNode && startNode.compareDocumentPosition)) {
// EN: Abort if we got bogus values or we can't compare their document position
// DE: Breche ab, wenn die Knoten nicht brauchbar sind
return;
}
// EN: If the start node succeeds the end node in the DOM tree, flip them
// DE: Wenn von hinten nach vorne markiert wurde, drehe Start und Ende um
if (startNode.compareDocumentPosition(endNode) & 2) {
startNode = endNode;
endNode = range.startContainer;
}
// EN: Get the end offset
// DE: Hole End-Offset
var endOffset = range.endOffset;
// EN: If the end node is an element, use its last text node as the end offset
// DE: Falls der Endknoten ein Element ist, nehme das Ende des letzten Textknoten
if (endNode.nodeType == 1) {
endNode = endNode.lastChild;
if (!endNode || endNode.nodeType != 3) {
return;
}
endOffset = endNode.data.length;
}
// EN: Create a new empty Range
// DE: Erzeuge neue, leere Range
var newRange = document.createRange();
// EN: Move the beginning of the new Range to the end of the selection
// DE: Verschiebe Anfang der neuen Range an das Ende der Auswahl
newRange.setStart(endNode, endOffset);
// EN: Fill the menu span
//
|
e = this;
// EN: Copy members from the options object to the instance
// DE: Kopiere Einstellungen aus dem options-Objekt herüber zur Instanz
instance.id = options.id || 'selection-menu';
instance.menuHTML = options.menuHTML;
instance.minimalSelection = options.minimalSelection || 5;
instance.container = options.container;
instance.handler = options.handler;
// EN: Initialisation
// DE: Initialisiere
instance.create();
instance.setupEvents();
}
SelectionMenu
|
identifier_body
|
selectionmenu.js
|
, fn) {
// EN: Feature dection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
if (obj.addEventListener) {
obj.addEventListener(type, fn, false);
} else if (obj.attachEvent) {
obj.attachEvent('on' + type, function () {
return fn.call(obj, window.event);
});
}
}
// EN: Publish addEvent as a static method
// EN: (attach it to the constructor object)
// DE: Mache addEvent als statische Methode öffentlich
// DE: (hefte die Methode an den Konstruktor, der zurückgegeben wird)
SelectionMenu.addEvent = addEvent;
function getSelection () {
// EN: Feature dection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
if (window.getSelection) {
return window.getSelection();
} else if (document.selection && document.selection.createRange) {
return document.selection.createRange();
} else {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return false;
}
}
function getSelected
|
) {
// EN: Feature detection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
return selection.toString ? selection.toString() : selection.text;
}
function contains (a, b) {
// EN: Feature detection DOM Core / Microsoft
// DE: Fähigkeitenweiche DOM Core / Microsoft
return a.compareDocumentPosition ? !!(a.compareDocumentPosition(b) & 16) : a.contains(b);
}
function mouseOnMenu (e) {
// Greife auf das Zielelement des Ereignisses zu
// EN: Feature detection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
var target = e.target || e.srcElement;
// Ist das Zielelement das Menü oder darin enthalten?
return target == span || contains(span, target);
}
// EN: Main constructor function
// DE: Konstruktorfunktion
function SelectionMenu (options) {
var instance = this;
// EN: Copy members from the options object to the instance
// DE: Kopiere Einstellungen aus dem options-Objekt herüber zur Instanz
instance.id = options.id || 'selection-menu';
instance.menuHTML = options.menuHTML;
instance.minimalSelection = options.minimalSelection || 5;
instance.container = options.container;
instance.handler = options.handler;
// EN: Initialisation
// DE: Initialisiere
instance.create();
instance.setupEvents();
}
SelectionMenu.prototype = {
create : function () {
var instance = this;
// EN: Create the menu container if necessary
// DE: Erzeuge den Menü-Container, sofern noch nicht passiert
if (span) {
return;
}
span = document.createElement('span');
span.id = instance.id;
},
setupEvents : function () {
var instance = this;
var container = instance.container;
// EN: Hide the menu on mouse down
// DE: Verstecke beim Mousedown
addEvent(container, 'mousedown', function (e) {
instance.hide(e);
});
// EN: Insert the menu on mouseup given some text is selected
// DE: Füge das Menü beim Mouseup ein, wenn Text ausgewählt wurde
addEvent(container, 'mouseup', function (e) {
instance.insert(e);
// EN: After a delay, check if the text was deselected
// DE: Prüfe nach einer Verzögerung, ob die Auswahl damit aufgehoben wurde
window.setTimeout(function () {
instance.hideIfNoSelection();
}, 0);
});
instance.setupMenuEvents();
},
setupMenuEvents : function () {
var instance = this;
// EN: Register the handler for clicks on the menu
// DE: Registiere Handlerfunktion für den Klick auf das Menü
addEvent(span, 'click', function (e) {
instance.handler.call(instance, e);
return false;
});
// EN: Prevent IE to select the text of the menu
// DE: Verhindere das Markieren des Menüs im IE
span.unselectable = true;
},
hide : function (e) {
// EN: Abort if an event object was passed and the click hit the menu itself
// Breche ab, wenn Event-Objekt übergeben wurde und der Klick beim Menü passierte
if (e && mouseOnMenu(e)) {
return;
}
// EN: Is the element attached to the DOM tree?
// DE: Ist das Element in den DOM-Baum gehängt?
var parent = span.parentNode;
if (parent) {
// EN: Remove the element from DOM (the element object remains
// EN: in memory and will be reused later)
// DE: Entferne das element aus dem DOM-Baum (Element bleibt im Speicher erhalten
// DE: und wird später wiederverwendet)
parent.removeChild(span);
}
},
hideIfNoSelection : function () {
var instance = this;
var selection = getSelection();
if (!selection) {
return;
}
var selectedText = getSelectedText(selection);
if (!selectedText.length) {
instance.hide();
}
},
insert : function (e) {
var instance = this;
// EN: Abort if the mouse event occured at the menu itself
// DE: Breche ab, wenn das Mausereignis beim Menü passierte
if (mouseOnMenu(e)) {
return;
}
// EN: Get a Selection object or a TextRange (IE)
// DE: Hole Selection bzw. TextRange (IE)
var selection = getSelection();
if (!selection) {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return;
}
// EN: Get the selected text
// DE: Hole markierten Text
var selectedText = getSelectedText(selection);
instance.selectedText = selectedText;
// EN: Abort if the selected text is too short
// DE: Breche ab, wenn der markierte Text zu kurz ist
if (selectedText.length < instance.minimalSelection) {
instance.hide(e);
return;
}
// EN : Feature detection DOM Range / Microsoft
// DE: Fähigkeitenweiche DOM Range / Microsoft
if (selection.getRangeAt) {
// EN: W3C DOM Range approach
// DE: Lösungsansatz mit W3C DOM Range
// EN: Get the first Range of the current Selection
// DE: Hole Range, die zur Selection gehört
var range = selection.getRangeAt(0);
// EN: Get the start and end nodes of the selection
// DE: Hole Start- und Endknoten der Auswahl
var startNode = range.startContainer;
var endNode = range.endContainer;
if (!(startNode && endNode && startNode.compareDocumentPosition)) {
// EN: Abort if we got bogus values or we can't compare their document position
// DE: Breche ab, wenn die Knoten nicht brauchbar sind
return;
}
// EN: If the start node succeeds the end node in the DOM tree, flip them
// DE: Wenn von hinten nach vorne markiert wurde, drehe Start und Ende um
if (startNode.compareDocumentPosition(endNode) & 2) {
startNode = endNode;
endNode = range.startContainer;
}
// EN: Get the end offset
// DE: Hole End-Offset
var endOffset = range.endOffset;
// EN: If the end node is an element, use its last text node as the end offset
// DE: Falls der Endknoten ein Element ist, nehme das Ende des letzten Textknoten
if (endNode.nodeType == 1) {
endNode = endNode.lastChild;
if (!endNode || endNode.nodeType != 3) {
return;
}
endOffset = endNode.data.length;
}
// EN: Create a new empty Range
// DE: Erzeuge neue, leere Range
var newRange = document.createRange();
// EN: Move the beginning of the new Range to the end of the selection
// DE: Verschiebe Anfang der neuen Range an das Ende der Auswahl
newRange.setStart(endNode, endOffset);
// EN: Fill the menu span
// DE
|
Text (selection
|
identifier_name
|
lib.rs
|
map>,
}
/// IoUring build params
#[derive(Clone, Default)]
pub struct Builder {
dontfork: bool,
params: sys::io_uring_params,
}
#[derive(Clone)]
pub struct Parameters(sys::io_uring_params);
unsafe impl Send for IoUring {}
unsafe impl Sync for IoUring {}
impl IoUring {
/// Create a IoUring instance
///
/// The `entries` sets the size of queue,
/// and it value should be the power of two.
#[inline]
pub fn new(entries: u32) -> io::Result<IoUring> {
IoUring::with_params(entries, Default::default())
}
fn with_params(entries: u32, mut p: sys::io_uring_params) -> io::Result<IoUring> {
// NOTE: The `SubmissionQueue` and `CompletionQueue` are references,
// and their lifetime can never exceed `MemoryMap`.
//
// The memory mapped regions of `MemoryMap` never move,
// so `SubmissionQueue` and `CompletionQueue` are `Unpin`.
//
// I really hope that Rust can safely use self-reference types.
#[inline]
unsafe fn setup_queue(
fd: &Fd,
p: &sys::io_uring_params,
) -> io::Result<(MemoryMap, SubmissionQueue, CompletionQueue)> {
let sq_len = p.sq_off.array as usize + p.sq_entries as usize * mem::size_of::<u32>();
let cq_len = p.cq_off.cqes as usize
+ p.cq_entries as usize * mem::size_of::<sys::io_uring_cqe>();
let sqe_len = p.sq_entries as usize * mem::size_of::<sys::io_uring_sqe>();
let sqe_mmap = Mmap::new(fd, sys::IORING_OFF_SQES as _, sqe_len)?;
if p.features & sys::IORING_FEAT_SINGLE_MMAP != 0 {
let scq_mmap =
Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, cmp::max(sq_len, cq_len))?;
let sq = SubmissionQueue::new(&scq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&scq_mmap, p);
let mm = MemoryMap {
sq_mmap: scq_mmap,
cq_mmap: None,
sqe_mmap,
};
Ok((mm, sq, cq))
} else {
let sq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, sq_len)?;
let cq_mmap = Mmap::new(fd, sys::IORING_OFF_CQ_RING as _, cq_len)?;
let sq = SubmissionQueue::new(&sq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&cq_mmap, p);
let mm = MemoryMap {
cq_mmap: Some(cq_mmap),
sq_mmap,
sqe_mmap,
};
Ok((mm, sq, cq))
}
}
let fd: Fd = unsafe {
sys::io_uring_setup(entries, &mut p)
.try_into()
.map_err(|_| io::Error::last_os_error())?
};
let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? };
Ok(IoUring {
fd,
sq,
cq,
params: Parameters(p),
memory: ManuallyDrop::new(mm),
})
}
|
}
#[inline]
pub fn params(&self) -> &Parameters {
&self.params
}
pub fn start_enter_syscall_thread(&self) {
sys::start_enter_syscall_thread(self.fd.as_raw_fd());
}
/// Initiate and/or complete asynchronous I/O
///
/// # Safety
///
/// This provides a raw interface so developer must ensure that parameters are correct.
#[inline]
pub unsafe fn enter(
&self,
to_submit: u32,
min_complete: u32,
flag: u32,
sig: Option<&libc::sigset_t>,
) -> io::Result<usize> {
self.submitter().enter(to_submit, min_complete, flag, sig)
}
/// Initiate asynchronous I/O.
#[inline]
pub fn submit(&self) -> io::Result<usize> {
self.submitter().submit()
}
/// Initiate and/or complete asynchronous I/O
#[inline]
pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> {
self.submitter().submit_and_wait(want)
}
/// Get submitter and submission queue and completion queue
pub fn split(&mut self) -> (Submitter<'_>, &mut SubmissionQueue, &mut CompletionQueue) {
let submit = Submitter::new(&self.fd, self.params.0.flags, &self.sq);
(submit, &mut self.sq, &mut self.cq)
}
/// Get submission queue
pub fn submission(&mut self) -> &mut SubmissionQueue {
&mut self.sq
}
/// Get completion queue
pub fn completion(&mut self) -> &mut CompletionQueue {
&mut self.cq
}
/// Make a concurrent IoUring.
#[cfg(any(feature = "concurrent", sgx))]
pub fn concurrent(self) -> concurrent::IoUring {
concurrent::IoUring::new(self)
}
}
impl Drop for IoUring {
fn drop(&mut self) {
unsafe {
ManuallyDrop::drop(&mut self.memory);
}
}
}
impl Builder {
pub fn dontfork(&mut self) -> &mut Self {
self.dontfork = true;
self
}
/// Perform busy-waiting for an I/O completion,
/// as opposed to getting notifications via an asynchronous IRQ (Interrupt Request).
pub fn setup_iopoll(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_IOPOLL;
self
}
/// When this flag is specified, a kernel thread is created to perform submission queue polling.
/// An io_uring instance configured in this way enables an application to issue I/O
/// without ever context switching into the kernel.
pub fn setup_sqpoll(&mut self, idle: impl Into<Option<u32>>) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQPOLL;
self.params.sq_thread_idle = idle.into().unwrap_or(0);
self
}
/// If this flag is specified,
/// then the poll thread will be bound to the cpu set in the value.
/// This flag is only meaningful when [Builder::setup_sqpoll] is enabled.
pub fn setup_sqpoll_cpu(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQ_AFF;
self.params.sq_thread_cpu = n;
self
}
/// Create the completion queue with struct `io_uring_params.cq_entries` entries.
/// The value must be greater than entries, and may be rounded up to the next power-of-two.
pub fn setup_cqsize(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CQSIZE;
self.params.cq_entries = n;
self
}
pub fn setup_clamp(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CLAMP;
self
}
pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_ATTACH_WQ;
self.params.wq_fd = fd as _;
self
}
#[cfg(feature = "unstable")]
pub fn setup_r_disabled(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_R_DISABLED;
self
}
/// Build a [IoUring].
#[inline]
pub fn build(&self, entries: u32) -> io::Result<IoUring> {
let ring = IoUring::with_params(entries, self.params)?;
if self.dontfork {
ring.memory.sq_mmap.dontfork()?;
ring.memory.sqe_mmap.dontfork()?;
if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() {
cq_mmap.dontfork()?;
}
}
Ok(ring)
}
}
impl Parameters {
pub fn is_setup_sqpoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_SQPOLL != 0
}
pub fn is_setup_iopoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_IOPOLL != 0
}
/// If this flag is set, the two SQ and CQ rings can be mapped with a single `mmap(2)` call.
/// The SQEs must still be allocated separately.
/// This brings the necessary `mmap(2)` calls down from three to two.
|
#[inline]
pub fn submitter(&self) -> Submitter<'_> {
Submitter::new(&self.fd, self.params.0.flags, &self.sq)
|
random_line_split
|
lib.rs
|
map, &sqe_mmap, p);
let cq = CompletionQueue::new(&cq_mmap, p);
let mm = MemoryMap {
cq_mmap: Some(cq_mmap),
sq_mmap,
sqe_mmap,
};
Ok((mm, sq, cq))
}
}
let fd: Fd = unsafe {
sys::io_uring_setup(entries, &mut p)
.try_into()
.map_err(|_| io::Error::last_os_error())?
};
let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? };
Ok(IoUring {
fd,
sq,
cq,
params: Parameters(p),
memory: ManuallyDrop::new(mm),
})
}
#[inline]
pub fn submitter(&self) -> Submitter<'_> {
Submitter::new(&self.fd, self.params.0.flags, &self.sq)
}
#[inline]
pub fn params(&self) -> &Parameters {
&self.params
}
pub fn start_enter_syscall_thread(&self) {
sys::start_enter_syscall_thread(self.fd.as_raw_fd());
}
/// Initiate and/or complete asynchronous I/O
///
/// # Safety
///
/// This provides a raw interface so developer must ensure that parameters are correct.
#[inline]
pub unsafe fn enter(
&self,
to_submit: u32,
min_complete: u32,
flag: u32,
sig: Option<&libc::sigset_t>,
) -> io::Result<usize> {
self.submitter().enter(to_submit, min_complete, flag, sig)
}
/// Initiate asynchronous I/O.
#[inline]
pub fn submit(&self) -> io::Result<usize> {
self.submitter().submit()
}
/// Initiate and/or complete asynchronous I/O
#[inline]
pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> {
self.submitter().submit_and_wait(want)
}
/// Get submitter and submission queue and completion queue
pub fn split(&mut self) -> (Submitter<'_>, &mut SubmissionQueue, &mut CompletionQueue) {
let submit = Submitter::new(&self.fd, self.params.0.flags, &self.sq);
(submit, &mut self.sq, &mut self.cq)
}
/// Get submission queue
pub fn submission(&mut self) -> &mut SubmissionQueue {
&mut self.sq
}
/// Get completion queue
pub fn completion(&mut self) -> &mut CompletionQueue {
&mut self.cq
}
/// Make a concurrent IoUring.
#[cfg(any(feature = "concurrent", sgx))]
pub fn concurrent(self) -> concurrent::IoUring {
concurrent::IoUring::new(self)
}
}
impl Drop for IoUring {
fn drop(&mut self) {
unsafe {
ManuallyDrop::drop(&mut self.memory);
}
}
}
impl Builder {
pub fn dontfork(&mut self) -> &mut Self {
self.dontfork = true;
self
}
/// Perform busy-waiting for an I/O completion,
/// as opposed to getting notifications via an asynchronous IRQ (Interrupt Request).
pub fn setup_iopoll(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_IOPOLL;
self
}
/// When this flag is specified, a kernel thread is created to perform submission queue polling.
/// An io_uring instance configured in this way enables an application to issue I/O
/// without ever context switching into the kernel.
pub fn setup_sqpoll(&mut self, idle: impl Into<Option<u32>>) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQPOLL;
self.params.sq_thread_idle = idle.into().unwrap_or(0);
self
}
/// If this flag is specified,
/// then the poll thread will be bound to the cpu set in the value.
/// This flag is only meaningful when [Builder::setup_sqpoll] is enabled.
pub fn setup_sqpoll_cpu(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQ_AFF;
self.params.sq_thread_cpu = n;
self
}
/// Create the completion queue with struct `io_uring_params.cq_entries` entries.
/// The value must be greater than entries, and may be rounded up to the next power-of-two.
pub fn setup_cqsize(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CQSIZE;
self.params.cq_entries = n;
self
}
pub fn setup_clamp(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CLAMP;
self
}
pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_ATTACH_WQ;
self.params.wq_fd = fd as _;
self
}
#[cfg(feature = "unstable")]
pub fn setup_r_disabled(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_R_DISABLED;
self
}
/// Build a [IoUring].
#[inline]
pub fn build(&self, entries: u32) -> io::Result<IoUring> {
let ring = IoUring::with_params(entries, self.params)?;
if self.dontfork {
ring.memory.sq_mmap.dontfork()?;
ring.memory.sqe_mmap.dontfork()?;
if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() {
cq_mmap.dontfork()?;
}
}
Ok(ring)
}
}
impl Parameters {
pub fn is_setup_sqpoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_SQPOLL != 0
}
pub fn is_setup_iopoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_IOPOLL != 0
}
/// If this flag is set, the two SQ and CQ rings can be mapped with a single `mmap(2)` call.
/// The SQEs must still be allocated separately.
/// This brings the necessary `mmap(2)` calls down from three to two.
pub fn is_feature_single_mmap(&self) -> bool {
self.0.features & sys::IORING_FEAT_SINGLE_MMAP != 0
}
/// If this flag is set, io_uring supports never dropping completion events. If a completion
/// event occurs and the CQ ring is full, the kernel stores the event internally until such a
/// time that the CQ ring has room for more entries.
pub fn is_feature_nodrop(&self) -> bool {
self.0.features & sys::IORING_FEAT_NODROP != 0
}
/// If this flag is set, applications can be certain that any data for async offload has been consumed
/// when the kernel has consumed the SQE
pub fn is_feature_submit_stable(&self) -> bool {
self.0.features & sys::IORING_FEAT_SUBMIT_STABLE != 0
}
/// If this flag is set, applications can specify offset == -1 with
/// `IORING_OP_{READV,WRITEV}`, `IORING_OP_{READ,WRITE}_FIXED`, and `IORING_OP_{READ,WRITE}`
/// to mean current file position, which behaves like `preadv2(2)` and `pwritev2(2)` with offset == -1.
/// It’ll use (and update) the current file position.
///
/// This obviously comes with the caveat that if the application has multiple reads or writes in flight,
/// then the end result will not be as expected.
/// This is similar to threads sharing a file descriptor and doing IO using the current file position.
pub fn is_feature_rw_cur_pos(&self) -> bool {
self.0.features & sys::IORING_FEAT_RW_CUR_POS != 0
}
/// If this flag is set, then io_uring guarantees that both sync and async execution of
/// a request assumes the credentials of the task that called `io_uring_enter(2)` to queue the requests.
/// If this flag isn’t set, then requests are issued with the credentials of the task that originally registered the io_uring.
/// If only one task is using a ring, then this flag doesn’t matter as the credentials will always be the same.
/// Note that this is the default behavior,
/// tasks can still register different personalities through
/// `io_uring_register(2)` with `IORING_REGISTER_PERSONALITY` and specify the personality to use in the sqe.
pub fn is_feature_cur_personality(&self) -> bool {
self.0.features & sys::IORING_FEAT_CUR_PERSONALITY != 0
}
#[cfg(feature = "unstable")]
pub fn is_feature_fast_poll(&self) -> bool {
|
self.0.features & sys::IORING_FEAT_FAST_POLL != 0
}
|
identifier_body
|
|
lib.rs
|
params
#[derive(Clone, Default)]
pub struct Builder {
dontfork: bool,
params: sys::io_uring_params,
}
#[derive(Clone)]
pub struct Parameters(sys::io_uring_params);
unsafe impl Send for IoUring {}
unsafe impl Sync for IoUring {}
impl IoUring {
/// Create a IoUring instance
///
/// The `entries` sets the size of queue,
/// and it value should be the power of two.
#[inline]
pub fn new(entries: u32) -> io::Result<IoUring> {
IoUring::with_params(entries, Default::default())
}
fn with_params(entries: u32, mut p: sys::io_uring_params) -> io::Result<IoUring> {
// NOTE: The `SubmissionQueue` and `CompletionQueue` are references,
// and their lifetime can never exceed `MemoryMap`.
//
// The memory mapped regions of `MemoryMap` never move,
// so `SubmissionQueue` and `CompletionQueue` are `Unpin`.
//
// I really hope that Rust can safely use self-reference types.
#[inline]
unsafe fn setup_queue(
fd: &Fd,
p: &sys::io_uring_params,
) -> io::Result<(MemoryMap, SubmissionQueue, CompletionQueue)> {
let sq_len = p.sq_off.array as usize + p.sq_entries as usize * mem::size_of::<u32>();
let cq_len = p.cq_off.cqes as usize
+ p.cq_entries as usize * mem::size_of::<sys::io_uring_cqe>();
let sqe_len = p.sq_entries as usize * mem::size_of::<sys::io_uring_sqe>();
let sqe_mmap = Mmap::new(fd, sys::IORING_OFF_SQES as _, sqe_len)?;
if p.features & sys::IORING_FEAT_SINGLE_MMAP != 0 {
let scq_mmap =
Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, cmp::max(sq_len, cq_len))?;
let sq = SubmissionQueue::new(&scq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&scq_mmap, p);
let mm = MemoryMap {
sq_mmap: scq_mmap,
cq_mmap: None,
sqe_mmap,
};
Ok((mm, sq, cq))
} else {
let sq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, sq_len)?;
let cq_mmap = Mmap::new(fd, sys::IORING_OFF_CQ_RING as _, cq_len)?;
let sq = SubmissionQueue::new(&sq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&cq_mmap, p);
let mm = MemoryMap {
cq_mmap: Some(cq_mmap),
sq_mmap,
sqe_mmap,
};
Ok((mm, sq, cq))
}
}
let fd: Fd = unsafe {
sys::io_uring_setup(entries, &mut p)
.try_into()
.map_err(|_| io::Error::last_os_error())?
};
let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? };
Ok(IoUring {
fd,
sq,
cq,
params: Parameters(p),
memory: ManuallyDrop::new(mm),
})
}
#[inline]
pub fn submitter(&self) -> Submitter<'_> {
Submitter::new(&self.fd, self.params.0.flags, &self.sq)
}
#[inline]
pub fn params(&self) -> &Parameters {
&self.params
}
pub fn start_enter_syscall_thread(&self) {
sys::start_enter_syscall_thread(self.fd.as_raw_fd());
}
/// Initiate and/or complete asynchronous I/O
///
/// # Safety
///
/// This provides a raw interface so developer must ensure that parameters are correct.
#[inline]
pub unsafe fn enter(
&self,
to_submit: u32,
min_complete: u32,
flag: u32,
sig: Option<&libc::sigset_t>,
) -> io::Result<usize> {
self.submitter().enter(to_submit, min_complete, flag, sig)
}
/// Initiate asynchronous I/O.
#[inline]
pub fn submit(&self) -> io::Result<usize> {
self.submitter().submit()
}
/// Initiate and/or complete asynchronous I/O
#[inline]
pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> {
self.submitter().submit_and_wait(want)
}
/// Get submitter and submission queue and completion queue
pub fn split(&mut self) -> (Submitter<'_>, &mut SubmissionQueue, &mut CompletionQueue) {
let submit = Submitter::new(&self.fd, self.params.0.flags, &self.sq);
(submit, &mut self.sq, &mut self.cq)
}
/// Get submission queue
pub fn submission(&mut self) -> &mut SubmissionQueue {
&mut self.sq
}
/// Get completion queue
pub fn completion(&mut self) -> &mut CompletionQueue {
&mut self.cq
}
/// Make a concurrent IoUring.
#[cfg(any(feature = "concurrent", sgx))]
pub fn concurrent(self) -> concurrent::IoUring {
concurrent::IoUring::new(self)
}
}
impl Drop for IoUring {
fn drop(&mut self) {
unsafe {
ManuallyDrop::drop(&mut self.memory);
}
}
}
impl Builder {
pub fn dontfork(&mut self) -> &mut Self {
self.dontfork = true;
self
}
/// Perform busy-waiting for an I/O completion,
/// as opposed to getting notifications via an asynchronous IRQ (Interrupt Request).
pub fn setup_iopoll(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_IOPOLL;
self
}
/// When this flag is specified, a kernel thread is created to perform submission queue polling.
/// An io_uring instance configured in this way enables an application to issue I/O
/// without ever context switching into the kernel.
pub fn setup_sqpoll(&mut self, idle: impl Into<Option<u32>>) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQPOLL;
self.params.sq_thread_idle = idle.into().unwrap_or(0);
self
}
/// If this flag is specified,
/// then the poll thread will be bound to the cpu set in the value.
/// This flag is only meaningful when [Builder::setup_sqpoll] is enabled.
pub fn setup_sqpoll_cpu(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQ_AFF;
self.params.sq_thread_cpu = n;
self
}
/// Create the completion queue with struct `io_uring_params.cq_entries` entries.
/// The value must be greater than entries, and may be rounded up to the next power-of-two.
pub fn setup_cqsize(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CQSIZE;
self.params.cq_entries = n;
self
}
pub fn setup_clamp(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CLAMP;
self
}
pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_ATTACH_WQ;
self.params.wq_fd = fd as _;
self
}
#[cfg(feature = "unstable")]
pub fn setup_r_disabled(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_R_DISABLED;
self
}
/// Build a [IoUring].
#[inline]
pub fn build(&self, entries: u32) -> io::Result<IoUring> {
let ring = IoUring::with_params(entries, self.params)?;
if self.dontfork {
ring.memory.sq_mmap.dontfork()?;
ring.memory.sqe_mmap.dontfork()?;
if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() {
cq_mmap.dontfork()?;
}
}
Ok(ring)
}
}
impl Parameters {
pub fn is_setup_sqpoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_SQPOLL != 0
}
pub fn is_setup_iopoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_IOPOLL != 0
}
/// If this flag is set, the two SQ and CQ rings can be mapped with a single `mmap(2)` call.
/// The SQEs must still be allocated separately.
/// This brings the necessary `mmap(2)` calls down from three to two.
pub fn
|
is_feature_single_mmap
|
identifier_name
|
|
lib.rs
|
map>,
}
/// IoUring build params
#[derive(Clone, Default)]
pub struct Builder {
dontfork: bool,
params: sys::io_uring_params,
}
#[derive(Clone)]
pub struct Parameters(sys::io_uring_params);
unsafe impl Send for IoUring {}
unsafe impl Sync for IoUring {}
impl IoUring {
/// Create a IoUring instance
///
/// The `entries` sets the size of queue,
/// and it value should be the power of two.
#[inline]
pub fn new(entries: u32) -> io::Result<IoUring> {
IoUring::with_params(entries, Default::default())
}
fn with_params(entries: u32, mut p: sys::io_uring_params) -> io::Result<IoUring> {
// NOTE: The `SubmissionQueue` and `CompletionQueue` are references,
// and their lifetime can never exceed `MemoryMap`.
//
// The memory mapped regions of `MemoryMap` never move,
// so `SubmissionQueue` and `CompletionQueue` are `Unpin`.
//
// I really hope that Rust can safely use self-reference types.
#[inline]
unsafe fn setup_queue(
fd: &Fd,
p: &sys::io_uring_params,
) -> io::Result<(MemoryMap, SubmissionQueue, CompletionQueue)> {
let sq_len = p.sq_off.array as usize + p.sq_entries as usize * mem::size_of::<u32>();
let cq_len = p.cq_off.cqes as usize
+ p.cq_entries as usize * mem::size_of::<sys::io_uring_cqe>();
let sqe_len = p.sq_entries as usize * mem::size_of::<sys::io_uring_sqe>();
let sqe_mmap = Mmap::new(fd, sys::IORING_OFF_SQES as _, sqe_len)?;
if p.features & sys::IORING_FEAT_SINGLE_MMAP != 0
|
else {
let sq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, sq_len)?;
let cq_mmap = Mmap::new(fd, sys::IORING_OFF_CQ_RING as _, cq_len)?;
let sq = SubmissionQueue::new(&sq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&cq_mmap, p);
let mm = MemoryMap {
cq_mmap: Some(cq_mmap),
sq_mmap,
sqe_mmap,
};
Ok((mm, sq, cq))
}
}
let fd: Fd = unsafe {
sys::io_uring_setup(entries, &mut p)
.try_into()
.map_err(|_| io::Error::last_os_error())?
};
let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? };
Ok(IoUring {
fd,
sq,
cq,
params: Parameters(p),
memory: ManuallyDrop::new(mm),
})
}
#[inline]
pub fn submitter(&self) -> Submitter<'_> {
Submitter::new(&self.fd, self.params.0.flags, &self.sq)
}
#[inline]
pub fn params(&self) -> &Parameters {
&self.params
}
pub fn start_enter_syscall_thread(&self) {
sys::start_enter_syscall_thread(self.fd.as_raw_fd());
}
/// Initiate and/or complete asynchronous I/O
///
/// # Safety
///
/// This provides a raw interface so developer must ensure that parameters are correct.
#[inline]
pub unsafe fn enter(
&self,
to_submit: u32,
min_complete: u32,
flag: u32,
sig: Option<&libc::sigset_t>,
) -> io::Result<usize> {
self.submitter().enter(to_submit, min_complete, flag, sig)
}
/// Initiate asynchronous I/O.
#[inline]
pub fn submit(&self) -> io::Result<usize> {
self.submitter().submit()
}
/// Initiate and/or complete asynchronous I/O
#[inline]
pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> {
self.submitter().submit_and_wait(want)
}
/// Get submitter and submission queue and completion queue
pub fn split(&mut self) -> (Submitter<'_>, &mut SubmissionQueue, &mut CompletionQueue) {
let submit = Submitter::new(&self.fd, self.params.0.flags, &self.sq);
(submit, &mut self.sq, &mut self.cq)
}
/// Get submission queue
pub fn submission(&mut self) -> &mut SubmissionQueue {
&mut self.sq
}
/// Get completion queue
pub fn completion(&mut self) -> &mut CompletionQueue {
&mut self.cq
}
/// Make a concurrent IoUring.
#[cfg(any(feature = "concurrent", sgx))]
pub fn concurrent(self) -> concurrent::IoUring {
concurrent::IoUring::new(self)
}
}
impl Drop for IoUring {
fn drop(&mut self) {
unsafe {
ManuallyDrop::drop(&mut self.memory);
}
}
}
impl Builder {
pub fn dontfork(&mut self) -> &mut Self {
self.dontfork = true;
self
}
/// Perform busy-waiting for an I/O completion,
/// as opposed to getting notifications via an asynchronous IRQ (Interrupt Request).
pub fn setup_iopoll(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_IOPOLL;
self
}
/// When this flag is specified, a kernel thread is created to perform submission queue polling.
/// An io_uring instance configured in this way enables an application to issue I/O
/// without ever context switching into the kernel.
pub fn setup_sqpoll(&mut self, idle: impl Into<Option<u32>>) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQPOLL;
self.params.sq_thread_idle = idle.into().unwrap_or(0);
self
}
/// If this flag is specified,
/// then the poll thread will be bound to the cpu set in the value.
/// This flag is only meaningful when [Builder::setup_sqpoll] is enabled.
pub fn setup_sqpoll_cpu(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQ_AFF;
self.params.sq_thread_cpu = n;
self
}
/// Create the completion queue with struct `io_uring_params.cq_entries` entries.
/// The value must be greater than entries, and may be rounded up to the next power-of-two.
pub fn setup_cqsize(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CQSIZE;
self.params.cq_entries = n;
self
}
pub fn setup_clamp(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CLAMP;
self
}
pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_ATTACH_WQ;
self.params.wq_fd = fd as _;
self
}
#[cfg(feature = "unstable")]
pub fn setup_r_disabled(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_R_DISABLED;
self
}
/// Build a [IoUring].
#[inline]
pub fn build(&self, entries: u32) -> io::Result<IoUring> {
let ring = IoUring::with_params(entries, self.params)?;
if self.dontfork {
ring.memory.sq_mmap.dontfork()?;
ring.memory.sqe_mmap.dontfork()?;
if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() {
cq_mmap.dontfork()?;
}
}
Ok(ring)
}
}
impl Parameters {
pub fn is_setup_sqpoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_SQPOLL != 0
}
pub fn is_setup_iopoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_IOPOLL != 0
}
/// If this flag is set, the two SQ and CQ rings can be mapped with a single `mmap(2)` call.
/// The SQEs must still be allocated separately.
/// This brings the necessary `mmap(2)` calls down from three to two.
|
{
let scq_mmap =
Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, cmp::max(sq_len, cq_len))?;
let sq = SubmissionQueue::new(&scq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&scq_mmap, p);
let mm = MemoryMap {
sq_mmap: scq_mmap,
cq_mmap: None,
sqe_mmap,
};
Ok((mm, sq, cq))
}
|
conditional_block
|
seq2seq.py
|
.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wz' + self.name)
self.Wh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wh' + self.name)
# Weights for hidden vectors of shape (hidden_size, hidden_size)
self.Ur = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Ur' + self.name)
self.Uz = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uz' + self.name)
self.Uh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uh' + self.name)
# Biases for hidden vectors of shape (hidden_size,)
self.br = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='br' + self.name)
self.bz = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bz' + self.name)
self.bh = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bh' + self.name)
def forward_pass(self, h_tm1, x_t): # Function though to be used by tf.scan
"""Perform a forward pass.
:param h_tm1: np.matrix. The hidden state at the previous timestep (h_{t-1}).
:param x_t: np.matrix. The input vector.
:return:
"""
# Convert vector-tensor form into matrix-tensor form
x_t = tf.reshape(x_t, shape=[1, -1])
h_tm1 = tf.reshape(h_tm1, shape=[1, -1])
# Definitions of z_t and r_t
z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)
r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)
# Definition of h~_t
h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)
# Compute the next hidden state
h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)
return tf.squeeze(h_t)
def process_sequence(self, sequence, h_0=None):
# Put the time-dimension upfront for the scan operator
self.x_t = tf.transpose(sequence, [1, 0], name='x_t') # [n_words, embedding_dim]
if h_0 is None:
# A little hack (to obtain the same shape as the input matrix) to define the initial hidden state h_0
self.h_0 = tf.zeros(dtype=tf.float64, shape=(self.hidden_size,), name='h_0')
else:
|
# Perform the scan operator (hacky as fac diud)
self.h_t_transposed = tf.scan(self.forward_pass, self.x_t, self.h_0, name='h_t_transposed')
# Transpose the result back
self.h_t = tf.transpose(self.h_t_transposed, [1, 0], name='h_t')
return self.h_t
def predict_sequence(self, h_0):
"""
Output sequence. This function iterates self.forward_pass until it gets the EOL.
:param h_0: Initial state
:return: predict_sentence
"""
# Inital values. The are required to be reshaped to rank2-tensor be concated afterwards
init_predict_sentence = tf.zeros([10, 1], dtype=tf.float64, name='whileloop_init_sentence')
init_prediction = tf.reshape(h_0, shape=[-1, 1], name='whileloop_init_prediction')
def loop_cond(prediction, predict_sentence): # predict_sentence argument is required by tf.while_loop
threshold = tf.constant(0.01, dtype=tf.float64, name='whileloop_threshold')
boolean = tf.greater((tf.reduce_sum(tf.pow(prediction, 2)) ** 0.5), threshold, name='whileloop_boolean')
return boolean
def loop_body(prev_prediction, prev_predict_sentence):
"""This function is a little bit hacky. Tensorflow's loops don't support neither fetching global scope variables
that are transformed but not returned from the loop nor modify the rank of the returned tensor in every
iteration of the loop.
This seems to be overcome defining the predict_sentence in two stages, one for the previous iter state an
another one for the next state.
:param prev_prediction:
:param prev_predict_sentence:
:return: [next_prediction, next_predict_sentence]
"""
# In the predict_model the previous state and the input state for the forward_pass are the same
next_prediction = self.forward_pass(prev_prediction, prev_prediction)
next_prediction = tf.reshape(next_prediction, shape=[-1, 1], name='whileloop_next_prediction')
# Concat the predicted word to the sentence (instead of list.append() cause tf.while_loop() doesn't support
# no-tensor arguments)
next_predict_sentence = tf.concat(axis=1, values=[prev_prediction, prev_predict_sentence],
name='whileloop_next_prediction_sentence')
return [next_prediction, next_predict_sentence]
# While loop that return the predict sentence
_, predict_sentence = tf.while_loop(cond=loop_cond,
body=loop_body,
loop_vars=[init_prediction, init_predict_sentence],
shape_invariants=[tf.TensorShape([10, 1]), tf.TensorShape([10, None])],
maximum_iterations=10,
name='whileloop_predict_sentence')
return predict_sentence
# Initialize the model
# The input has 2 dimensions: dimension 0 is reserved for the first term and dimension 1 is reserved for the second term
# Create a placeholder
input_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='input_data') # emb_dim x n_words
output_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='output_data')
# Create End Of Sentence vector
EOS = tf.zeros(dtype=tf.float64, shape=[Word2Vec_embedding_dim, 1], name='EOS')
input_sentence_ended = tf.concat([input_sentence, EOS], axis=1, name='input_data_ended')
output_sentence_ended = tf.concat([output_sentence, EOS], axis=1, name='output_data_ended')
# Create the GRU layer
gru_layer_encoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_encoder')
gru_layer_decoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_decoder')
# Training_process - ONE NN ENCODER - DECODER
input_encoded = gru_layer_encoder.process_sequence(input_sentence_ended, h_0=None) # Process the first sentence
thought_vector = input_encoded[:, -1] # Extract the last state vector (thought) from the input response
train_decoded = gru_layer_decoder.process_sequence(output_sentence_ended, h_0=thought_vector) # Train_answer
pred_decoded = gru_layer_decoder.predict_sequence(h_0=thought_vector)
# Output_data
train_predicted_output = tf.convert_to_tensor(train_decoded, dtype=tf.float64, name='train_output')
pred_predicted_output = tf.convert_to_tensor(pred_decoded, dtype=tf.float64, name='pred_output')
# Loss
loss = tf.reduce_sum(0.5 * tf.pow(train_predicted_output - output_sentence_ended, 2)) # / float(batch_size)
# loss = [sum((real_word-prediction)**2)/embedding_dim for (real_word, prediction) in zip(real_words, predictions)]
# Optimizer
train_step = tf.train.AdamOptimizer().minimize(loss)
if __name__ == "__main__":
from disintegrator import *
from Word2Vec import *
parameters.init()
# Prepare data for training the seq2seq
prepare = DataPreparation()
text = prepare.make_disintegration
sent = prepare.get_sentences(text)
dicc = prepare.get_dictionary(text, stopwords, vocab_size)
data = prepare.get_word_list(sent, stopwords, window_size=Word2Vec_window_size)
print('Propiedades del corpus: \n')
print('\tDiccionario con %d palabras' % (len(dicc['w2i'])))
word_to_vec = Word2Vec(vocab_size, Word2Vec_embedding_dim, Word2Vec_optimizer_step)
x_train, y_train = word_to_vec.training_data(data)
W1, b1 = word_to
|
self.h_0 = h_0
|
conditional_block
|
seq2seq.py
|
.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wz' + self.name)
self.Wh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wh' + self.name)
# Weights for hidden vectors of shape (hidden_size, hidden_size)
self.Ur = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Ur' + self.name)
self.Uz = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uz' + self.name)
self.Uh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uh' + self.name)
# Biases for hidden vectors of shape (hidden_size,)
self.br = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='br' + self.name)
self.bz = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bz' + self.name)
self.bh = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bh' + self.name)
def forward_pass(self, h_tm1, x_t): # Function though to be used by tf.scan
"""Perform a forward pass.
:param h_tm1: np.matrix. The hidden state at the previous timestep (h_{t-1}).
:param x_t: np.matrix. The input vector.
|
h_tm1 = tf.reshape(h_tm1, shape=[1, -1])
# Definitions of z_t and r_t
z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)
r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)
# Definition of h~_t
h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)
# Compute the next hidden state
h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)
return tf.squeeze(h_t)
def process_sequence(self, sequence, h_0=None):
# Put the time-dimension upfront for the scan operator
self.x_t = tf.transpose(sequence, [1, 0], name='x_t') # [n_words, embedding_dim]
if h_0 is None:
# A little hack (to obtain the same shape as the input matrix) to define the initial hidden state h_0
self.h_0 = tf.zeros(dtype=tf.float64, shape=(self.hidden_size,), name='h_0')
else:
self.h_0 = h_0
# Perform the scan operator (hacky as fac diud)
self.h_t_transposed = tf.scan(self.forward_pass, self.x_t, self.h_0, name='h_t_transposed')
# Transpose the result back
self.h_t = tf.transpose(self.h_t_transposed, [1, 0], name='h_t')
return self.h_t
def predict_sequence(self, h_0):
"""
Output sequence. This function iterates self.forward_pass until it gets the EOL.
:param h_0: Initial state
:return: predict_sentence
"""
# Inital values. The are required to be reshaped to rank2-tensor be concated afterwards
init_predict_sentence = tf.zeros([10, 1], dtype=tf.float64, name='whileloop_init_sentence')
init_prediction = tf.reshape(h_0, shape=[-1, 1], name='whileloop_init_prediction')
def loop_cond(prediction, predict_sentence): # predict_sentence argument is required by tf.while_loop
threshold = tf.constant(0.01, dtype=tf.float64, name='whileloop_threshold')
boolean = tf.greater((tf.reduce_sum(tf.pow(prediction, 2)) ** 0.5), threshold, name='whileloop_boolean')
return boolean
def loop_body(prev_prediction, prev_predict_sentence):
"""This function is a little bit hacky. Tensorflow's loops don't support neither fetching global scope variables
that are transformed but not returned from the loop nor modify the rank of the returned tensor in every
iteration of the loop.
This seems to be overcome defining the predict_sentence in two stages, one for the previous iter state an
another one for the next state.
:param prev_prediction:
:param prev_predict_sentence:
:return: [next_prediction, next_predict_sentence]
"""
# In the predict_model the previous state and the input state for the forward_pass are the same
next_prediction = self.forward_pass(prev_prediction, prev_prediction)
next_prediction = tf.reshape(next_prediction, shape=[-1, 1], name='whileloop_next_prediction')
# Concat the predicted word to the sentence (instead of list.append() cause tf.while_loop() doesn't support
# no-tensor arguments)
next_predict_sentence = tf.concat(axis=1, values=[prev_prediction, prev_predict_sentence],
name='whileloop_next_prediction_sentence')
return [next_prediction, next_predict_sentence]
# While loop that return the predict sentence
_, predict_sentence = tf.while_loop(cond=loop_cond,
body=loop_body,
loop_vars=[init_prediction, init_predict_sentence],
shape_invariants=[tf.TensorShape([10, 1]), tf.TensorShape([10, None])],
maximum_iterations=10,
name='whileloop_predict_sentence')
return predict_sentence
# Initialize the model
# The input has 2 dimensions: dimension 0 is reserved for the first term and dimension 1 is reserved for the second term
# Create a placeholder
input_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='input_data') # emb_dim x n_words
output_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='output_data')
# Create End Of Sentence vector
EOS = tf.zeros(dtype=tf.float64, shape=[Word2Vec_embedding_dim, 1], name='EOS')
input_sentence_ended = tf.concat([input_sentence, EOS], axis=1, name='input_data_ended')
output_sentence_ended = tf.concat([output_sentence, EOS], axis=1, name='output_data_ended')
# Create the GRU layer
gru_layer_encoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_encoder')
gru_layer_decoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_decoder')
# Training_process - ONE NN ENCODER - DECODER
input_encoded = gru_layer_encoder.process_sequence(input_sentence_ended, h_0=None) # Process the first sentence
thought_vector = input_encoded[:, -1] # Extract the last state vector (thought) from the input response
train_decoded = gru_layer_decoder.process_sequence(output_sentence_ended, h_0=thought_vector) # Train_answer
pred_decoded = gru_layer_decoder.predict_sequence(h_0=thought_vector)
# Output_data
train_predicted_output = tf.convert_to_tensor(train_decoded, dtype=tf.float64, name='train_output')
pred_predicted_output = tf.convert_to_tensor(pred_decoded, dtype=tf.float64, name='pred_output')
# Loss
loss = tf.reduce_sum(0.5 * tf.pow(train_predicted_output - output_sentence_ended, 2)) # / float(batch_size)
# loss = [sum((real_word-prediction)**2)/embedding_dim for (real_word, prediction) in zip(real_words, predictions)]
# Optimizer
train_step = tf.train.AdamOptimizer().minimize(loss)
if __name__ == "__main__":
from disintegrator import *
from Word2Vec import *
parameters.init()
# Prepare data for training the seq2seq
prepare = DataPreparation()
text = prepare.make_disintegration
sent = prepare.get_sentences(text)
dicc = prepare.get_dictionary(text, stopwords, vocab_size)
data = prepare.get_word_list(sent, stopwords, window_size=Word2Vec_window_size)
print('Propiedades del corpus: \n')
print('\tDiccionario con %d palabras' % (len(dicc['w2i'])))
word_to_vec = Word2Vec(vocab_size, Word2Vec_embedding_dim, Word2Vec_optimizer_step)
x_train, y_train = word_to_vec.training_data(data)
W1, b1 = word_to
|
:return:
"""
# Convert vector-tensor form into matrix-tensor form
x_t = tf.reshape(x_t, shape=[1, -1])
|
random_line_split
|
seq2seq.py
|
.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wz' + self.name)
self.Wh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wh' + self.name)
# Weights for hidden vectors of shape (hidden_size, hidden_size)
self.Ur = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Ur' + self.name)
self.Uz = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uz' + self.name)
self.Uh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uh' + self.name)
# Biases for hidden vectors of shape (hidden_size,)
self.br = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='br' + self.name)
self.bz = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bz' + self.name)
self.bh = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bh' + self.name)
def forward_pass(self, h_tm1, x_t): # Function though to be used by tf.scan
|
return tf.squeeze(h_t)
def process_sequence(self, sequence, h_0=None):
# Put the time-dimension upfront for the scan operator
self.x_t = tf.transpose(sequence, [1, 0], name='x_t') # [n_words, embedding_dim]
if h_0 is None:
# A little hack (to obtain the same shape as the input matrix) to define the initial hidden state h_0
self.h_0 = tf.zeros(dtype=tf.float64, shape=(self.hidden_size,), name='h_0')
else:
self.h_0 = h_0
# Perform the scan operator (hacky as fac diud)
self.h_t_transposed = tf.scan(self.forward_pass, self.x_t, self.h_0, name='h_t_transposed')
# Transpose the result back
self.h_t = tf.transpose(self.h_t_transposed, [1, 0], name='h_t')
return self.h_t
def predict_sequence(self, h_0):
"""
Output sequence. This function iterates self.forward_pass until it gets the EOL.
:param h_0: Initial state
:return: predict_sentence
"""
# Inital values. The are required to be reshaped to rank2-tensor be concated afterwards
init_predict_sentence = tf.zeros([10, 1], dtype=tf.float64, name='whileloop_init_sentence')
init_prediction = tf.reshape(h_0, shape=[-1, 1], name='whileloop_init_prediction')
def loop_cond(prediction, predict_sentence): # predict_sentence argument is required by tf.while_loop
threshold = tf.constant(0.01, dtype=tf.float64, name='whileloop_threshold')
boolean = tf.greater((tf.reduce_sum(tf.pow(prediction, 2)) ** 0.5), threshold, name='whileloop_boolean')
return boolean
def loop_body(prev_prediction, prev_predict_sentence):
"""This function is a little bit hacky. Tensorflow's loops don't support neither fetching global scope variables
that are transformed but not returned from the loop nor modify the rank of the returned tensor in every
iteration of the loop.
This seems to be overcome defining the predict_sentence in two stages, one for the previous iter state an
another one for the next state.
:param prev_prediction:
:param prev_predict_sentence:
:return: [next_prediction, next_predict_sentence]
"""
# In the predict_model the previous state and the input state for the forward_pass are the same
next_prediction = self.forward_pass(prev_prediction, prev_prediction)
next_prediction = tf.reshape(next_prediction, shape=[-1, 1], name='whileloop_next_prediction')
# Concat the predicted word to the sentence (instead of list.append() cause tf.while_loop() doesn't support
# no-tensor arguments)
next_predict_sentence = tf.concat(axis=1, values=[prev_prediction, prev_predict_sentence],
name='whileloop_next_prediction_sentence')
return [next_prediction, next_predict_sentence]
# While loop that return the predict sentence
_, predict_sentence = tf.while_loop(cond=loop_cond,
body=loop_body,
loop_vars=[init_prediction, init_predict_sentence],
shape_invariants=[tf.TensorShape([10, 1]), tf.TensorShape([10, None])],
maximum_iterations=10,
name='whileloop_predict_sentence')
return predict_sentence
# Initialize the model
# The input has 2 dimensions: dimension 0 is reserved for the first term and dimension 1 is reserved for the second term
# Create a placeholder
input_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='input_data') # emb_dim x n_words
output_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='output_data')
# Create End Of Sentence vector
EOS = tf.zeros(dtype=tf.float64, shape=[Word2Vec_embedding_dim, 1], name='EOS')
input_sentence_ended = tf.concat([input_sentence, EOS], axis=1, name='input_data_ended')
output_sentence_ended = tf.concat([output_sentence, EOS], axis=1, name='output_data_ended')
# Create the GRU layer
gru_layer_encoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_encoder')
gru_layer_decoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_decoder')
# Training_process - ONE NN ENCODER - DECODER
input_encoded = gru_layer_encoder.process_sequence(input_sentence_ended, h_0=None) # Process the first sentence
thought_vector = input_encoded[:, -1] # Extract the last state vector (thought) from the input response
train_decoded = gru_layer_decoder.process_sequence(output_sentence_ended, h_0=thought_vector) # Train_answer
pred_decoded = gru_layer_decoder.predict_sequence(h_0=thought_vector)
# Output_data
train_predicted_output = tf.convert_to_tensor(train_decoded, dtype=tf.float64, name='train_output')
pred_predicted_output = tf.convert_to_tensor(pred_decoded, dtype=tf.float64, name='pred_output')
# Loss
loss = tf.reduce_sum(0.5 * tf.pow(train_predicted_output - output_sentence_ended, 2)) # / float(batch_size)
# loss = [sum((real_word-prediction)**2)/embedding_dim for (real_word, prediction) in zip(real_words, predictions)]
# Optimizer
train_step = tf.train.AdamOptimizer().minimize(loss)
if __name__ == "__main__":
from disintegrator import *
from Word2Vec import *
parameters.init()
# Prepare data for training the seq2seq
prepare = DataPreparation()
text = prepare.make_disintegration
sent = prepare.get_sentences(text)
dicc = prepare.get_dictionary(text, stopwords, vocab_size)
data = prepare.get_word_list(sent, stopwords, window_size=Word2Vec_window_size)
print('Propiedades del corpus: \n')
print('\tDiccionario con %d palabras' % (len(dicc['w2i'])))
word_to_vec = Word2Vec(vocab_size, Word2Vec_embedding_dim, Word2Vec_optimizer_step)
x_train, y_train = word_to_vec.training_data(data)
W1, b1 = word
|
"""Perform a forward pass.
:param h_tm1: np.matrix. The hidden state at the previous timestep (h_{t-1}).
:param x_t: np.matrix. The input vector.
:return:
"""
# Convert vector-tensor form into matrix-tensor form
x_t = tf.reshape(x_t, shape=[1, -1])
h_tm1 = tf.reshape(h_tm1, shape=[1, -1])
# Definitions of z_t and r_t
z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)
r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)
# Definition of h~_t
h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)
# Compute the next hidden state
h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)
|
identifier_body
|
seq2seq.py
|
.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wz' + self.name)
self.Wh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wh' + self.name)
# Weights for hidden vectors of shape (hidden_size, hidden_size)
self.Ur = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Ur' + self.name)
self.Uz = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uz' + self.name)
self.Uh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uh' + self.name)
# Biases for hidden vectors of shape (hidden_size,)
self.br = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='br' + self.name)
self.bz = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bz' + self.name)
self.bh = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bh' + self.name)
def
|
(self, h_tm1, x_t): # Function though to be used by tf.scan
"""Perform a forward pass.
:param h_tm1: np.matrix. The hidden state at the previous timestep (h_{t-1}).
:param x_t: np.matrix. The input vector.
:return:
"""
# Convert vector-tensor form into matrix-tensor form
x_t = tf.reshape(x_t, shape=[1, -1])
h_tm1 = tf.reshape(h_tm1, shape=[1, -1])
# Definitions of z_t and r_t
z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)
r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)
# Definition of h~_t
h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)
# Compute the next hidden state
h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)
return tf.squeeze(h_t)
def process_sequence(self, sequence, h_0=None):
# Put the time-dimension upfront for the scan operator
self.x_t = tf.transpose(sequence, [1, 0], name='x_t') # [n_words, embedding_dim]
if h_0 is None:
# A little hack (to obtain the same shape as the input matrix) to define the initial hidden state h_0
self.h_0 = tf.zeros(dtype=tf.float64, shape=(self.hidden_size,), name='h_0')
else:
self.h_0 = h_0
# Perform the scan operator (hacky as fac diud)
self.h_t_transposed = tf.scan(self.forward_pass, self.x_t, self.h_0, name='h_t_transposed')
# Transpose the result back
self.h_t = tf.transpose(self.h_t_transposed, [1, 0], name='h_t')
return self.h_t
def predict_sequence(self, h_0):
"""
Output sequence. This function iterates self.forward_pass until it gets the EOL.
:param h_0: Initial state
:return: predict_sentence
"""
# Inital values. The are required to be reshaped to rank2-tensor be concated afterwards
init_predict_sentence = tf.zeros([10, 1], dtype=tf.float64, name='whileloop_init_sentence')
init_prediction = tf.reshape(h_0, shape=[-1, 1], name='whileloop_init_prediction')
def loop_cond(prediction, predict_sentence): # predict_sentence argument is required by tf.while_loop
threshold = tf.constant(0.01, dtype=tf.float64, name='whileloop_threshold')
boolean = tf.greater((tf.reduce_sum(tf.pow(prediction, 2)) ** 0.5), threshold, name='whileloop_boolean')
return boolean
def loop_body(prev_prediction, prev_predict_sentence):
"""This function is a little bit hacky. Tensorflow's loops don't support neither fetching global scope variables
that are transformed but not returned from the loop nor modify the rank of the returned tensor in every
iteration of the loop.
This seems to be overcome defining the predict_sentence in two stages, one for the previous iter state an
another one for the next state.
:param prev_prediction:
:param prev_predict_sentence:
:return: [next_prediction, next_predict_sentence]
"""
# In the predict_model the previous state and the input state for the forward_pass are the same
next_prediction = self.forward_pass(prev_prediction, prev_prediction)
next_prediction = tf.reshape(next_prediction, shape=[-1, 1], name='whileloop_next_prediction')
# Concat the predicted word to the sentence (instead of list.append() cause tf.while_loop() doesn't support
# no-tensor arguments)
next_predict_sentence = tf.concat(axis=1, values=[prev_prediction, prev_predict_sentence],
name='whileloop_next_prediction_sentence')
return [next_prediction, next_predict_sentence]
# While loop that return the predict sentence
_, predict_sentence = tf.while_loop(cond=loop_cond,
body=loop_body,
loop_vars=[init_prediction, init_predict_sentence],
shape_invariants=[tf.TensorShape([10, 1]), tf.TensorShape([10, None])],
maximum_iterations=10,
name='whileloop_predict_sentence')
return predict_sentence
# Initialize the model
# The input has 2 dimensions: dimension 0 is reserved for the first term and dimension 1 is reserved for the second term
# Create a placeholder
input_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='input_data') # emb_dim x n_words
output_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='output_data')
# Create End Of Sentence vector
EOS = tf.zeros(dtype=tf.float64, shape=[Word2Vec_embedding_dim, 1], name='EOS')
input_sentence_ended = tf.concat([input_sentence, EOS], axis=1, name='input_data_ended')
output_sentence_ended = tf.concat([output_sentence, EOS], axis=1, name='output_data_ended')
# Create the GRU layer
gru_layer_encoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_encoder')
gru_layer_decoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_decoder')
# Training_process - ONE NN ENCODER - DECODER
input_encoded = gru_layer_encoder.process_sequence(input_sentence_ended, h_0=None) # Process the first sentence
thought_vector = input_encoded[:, -1] # Extract the last state vector (thought) from the input response
train_decoded = gru_layer_decoder.process_sequence(output_sentence_ended, h_0=thought_vector) # Train_answer
pred_decoded = gru_layer_decoder.predict_sequence(h_0=thought_vector)
# Output_data
train_predicted_output = tf.convert_to_tensor(train_decoded, dtype=tf.float64, name='train_output')
pred_predicted_output = tf.convert_to_tensor(pred_decoded, dtype=tf.float64, name='pred_output')
# Loss
loss = tf.reduce_sum(0.5 * tf.pow(train_predicted_output - output_sentence_ended, 2)) # / float(batch_size)
# loss = [sum((real_word-prediction)**2)/embedding_dim for (real_word, prediction) in zip(real_words, predictions)]
# Optimizer
train_step = tf.train.AdamOptimizer().minimize(loss)
if __name__ == "__main__":
from disintegrator import *
from Word2Vec import *
parameters.init()
# Prepare data for training the seq2seq
prepare = DataPreparation()
text = prepare.make_disintegration
sent = prepare.get_sentences(text)
dicc = prepare.get_dictionary(text, stopwords, vocab_size)
data = prepare.get_word_list(sent, stopwords, window_size=Word2Vec_window_size)
print('Propiedades del corpus: \n')
print('\tDiccionario con %d palabras' % (len(dicc['w2i'])))
word_to_vec = Word2Vec(vocab_size, Word2Vec_embedding_dim, Word2Vec_optimizer_step)
x_train, y_train = word_to_vec.training_data(data)
W1, b1 = word
|
forward_pass
|
identifier_name
|
error.rs
|
pub fn new_service_specific_error(err: i32, message: Option<&CStr>) -> Status {
let ptr = if let Some(message) = message {
unsafe {
// Safety: Any i32 is a valid service specific error for the
// error code parameter. We construct a valid, null-terminated
// `CString` from the message, which must be a valid C-style
// string to pass as the message. This function always returns a
// new, heap allocated pointer to an `AStatus` object, so we
// know the returned pointer will be valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_fromServiceSpecificErrorWithMessage(err, message.as_ptr())
}
} else {
unsafe {
// Safety: Any i32 is a valid service specific error for the
// error code parameter. This function always returns a new,
// heap allocated pointer to an `AStatus` object, so we know the
// returned pointer will be valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_fromServiceSpecificError(err)
}
};
Self(ptr)
}
/// Create a status object from an exception code
pub fn new_exception(exception: ExceptionCode, message: Option<&CStr>) -> Status {
if let Some(message) = message {
let ptr = unsafe {
sys::AStatus_fromExceptionCodeWithMessage(exception as i32, message.as_ptr())
};
Self(ptr)
} else {
exception.into()
}
}
/// Create a status object from a raw `AStatus` pointer.
///
/// # Safety
///
/// This constructor is safe iff `ptr` is a valid pointer to an `AStatus`.
pub(crate) unsafe fn from_ptr(ptr: *mut sys::AStatus) -> Self {
Self(ptr)
}
/// Returns `true` if this status represents a successful transaction.
pub fn is_ok(&self) -> bool {
unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_isOk` here.
sys::AStatus_isOk(self.as_native())
}
}
/// Returns a description of the status.
pub fn get_description(&self) -> String {
let description_ptr = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getDescription`
// here.
//
// `AStatus_getDescription` always returns a valid pointer to a null
// terminated C string. Rust is responsible for freeing this pointer
// via `AStatus_deleteDescription`.
sys::AStatus_getDescription(self.as_native())
};
let description = unsafe {
// Safety: `AStatus_getDescription` always returns a valid C string,
// which can be safely converted to a `CStr`.
CStr::from_ptr(description_ptr)
};
let description = description.to_string_lossy().to_string();
unsafe {
// Safety: `description_ptr` was returned from
// `AStatus_getDescription` above, and must be freed via
// `AStatus_deleteDescription`. We must not access the pointer after
// this call, so we copy it into an owned string above and return
// that string.
sys::AStatus_deleteDescription(description_ptr);
}
description
}
/// Returns the exception code of the status.
pub fn exception_code(&self) -> ExceptionCode {
let code = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getExceptionCode`
// here.
sys::AStatus_getExceptionCode(self.as_native())
};
parse_exception_code(code)
}
/// Return a status code representing a transaction failure, or
/// `StatusCode::OK` if there was no transaction failure.
///
/// If this method returns `OK`, the status may still represent a different
/// exception or a service specific error. To find out if this transaction
/// as a whole is okay, use [`is_ok`](Self::is_ok) instead.
pub fn transaction_error(&self) -> StatusCode {
let code = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getStatus` here.
sys::AStatus_getStatus(self.as_native())
};
parse_status_code(code)
}
/// Return a service specific error if this status represents one.
///
/// This function will only ever return a non-zero result if
/// [`exception_code`](Self::exception_code) returns
/// `ExceptionCode::SERVICE_SPECIFIC`. If this function returns 0, the
/// status object may still represent a different exception or status. To
/// find out if this transaction as a whole is okay, use
/// [`is_ok`](Self::is_ok) instead.
pub fn service_specific_error(&self) -> i32 {
unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to
// `AStatus_getServiceSpecificError` here.
sys::AStatus_getServiceSpecificError(self.as_native())
}
}
/// Calls `op` if the status was ok, otherwise returns an `Err` value of
/// `self`.
pub fn and_then<T, F>(self, op: F) -> result::Result<T, Status>
where
F: FnOnce() -> result::Result<T, Status>,
{
<result::Result<(), Status>>::from(self)?;
op()
}
}
impl error::Error for Status {}
impl Display for Status {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.write_str(&self.get_description())
}
}
impl Debug for Status {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.write_str(&self.get_description())
}
}
impl PartialEq for Status {
fn eq(&self, other: &Status) -> bool {
let self_code = self.exception_code();
let other_code = other.exception_code();
match (self_code, other_code) {
(ExceptionCode::NONE, ExceptionCode::NONE) => true,
(ExceptionCode::TRANSACTION_FAILED, ExceptionCode::TRANSACTION_FAILED) => {
self.transaction_error() == other.transaction_error()
&& self.get_description() == other.get_description()
}
(ExceptionCode::SERVICE_SPECIFIC, ExceptionCode::SERVICE_SPECIFIC) => {
self.service_specific_error() == other.service_specific_error()
&& self.get_description() == other.get_description()
}
(e1, e2) => e1 == e2 && self.get_description() == other.get_description(),
}
}
}
impl Eq for Status {}
impl From<StatusCode> for Status {
fn from(status: StatusCode) -> Status {
(status as status_t).into()
}
}
impl From<status_t> for Status {
fn from(status: status_t) -> Status {
let ptr = unsafe {
// Safety: `AStatus_fromStatus` expects any `status_t` integer, so
// this is a safe FFI call. Unknown values will be coerced into
// UNKNOWN_ERROR.
sys::AStatus_fromStatus(status)
};
Self(ptr)
}
}
impl From<ExceptionCode> for Status {
fn from(code: ExceptionCode) -> Status {
let ptr = unsafe {
// Safety: `AStatus_fromExceptionCode` expects any
// `binder_exception_t` (i32) integer, so this is a safe FFI call.
// Unknown values will be coerced into EX_TRANSACTION_FAILED.
sys::AStatus_fromExceptionCode(code as i32)
};
Self(ptr)
}
}
// TODO: impl Try for Status when try_trait is stabilized
// https://github.com/rust-lang/rust/issues/42327
impl From<Status> for result::Result<(), Status> {
fn from(status: Status) -> result::Result<(), Status> {
if status.is_ok() {
Ok(())
} else {
Err(status)
}
}
}
impl From<Status> for status_t {
fn from(status: Status) -> status_t {
status.transaction_error() as status_t
}
}
impl Drop for Status {
fn drop(&mut self) {
unsafe {
// Safety: `Status` manages the lifetime of its inner `AStatus`
// pointee, so we need to delete it here. We know that the pointer
// will be valid here since `Status` always contains a valid pointer
// while it is alive.
sys::AStatus_delete(self.0);
}
}
}
/// # Safety
///
/// `Status` always contains a valid pointer to an `AStatus` object, so we can
/// trivially convert it to a correctly-typed raw pointer.
///
/// Care must be taken that the returned pointer is only dereferenced while the
/// `Status` object is still alive.
unsafe impl AsNative<sys::AStatus> for Status {
fn as_native(&self) -> *const sys::AStatus
|
{
self.0
}
|
identifier_body
|
|
error.rs
|
as i32 => StatusCode::DEAD_OBJECT,
e if e == StatusCode::FAILED_TRANSACTION as i32 => StatusCode::FAILED_TRANSACTION,
e if e == StatusCode::BAD_INDEX as i32 => StatusCode::BAD_INDEX,
e if e == StatusCode::NOT_ENOUGH_DATA as i32 => StatusCode::NOT_ENOUGH_DATA,
e if e == StatusCode::WOULD_BLOCK as i32 => StatusCode::WOULD_BLOCK,
e if e == StatusCode::TIMED_OUT as i32 => StatusCode::TIMED_OUT,
e if e == StatusCode::UNKNOWN_TRANSACTION as i32 => StatusCode::UNKNOWN_TRANSACTION,
e if e == StatusCode::FDS_NOT_ALLOWED as i32 => StatusCode::FDS_NOT_ALLOWED,
e if e == StatusCode::UNEXPECTED_NULL as i32 => StatusCode::UNEXPECTED_NULL,
_ => StatusCode::UNKNOWN_ERROR,
}
}
pub use sys::android_c_interface_ExceptionCode as ExceptionCode;
fn parse_exception_code(code: i32) -> ExceptionCode {
match code {
e if e == ExceptionCode::NONE as i32 => ExceptionCode::NONE,
e if e == ExceptionCode::SECURITY as i32 => ExceptionCode::SECURITY,
e if e == ExceptionCode::BAD_PARCELABLE as i32 => ExceptionCode::BAD_PARCELABLE,
e if e == ExceptionCode::ILLEGAL_ARGUMENT as i32 => ExceptionCode::ILLEGAL_ARGUMENT,
e if e == ExceptionCode::NULL_POINTER as i32 => ExceptionCode::NULL_POINTER,
e if e == ExceptionCode::ILLEGAL_STATE as i32 => ExceptionCode::ILLEGAL_STATE,
e if e == ExceptionCode::NETWORK_MAIN_THREAD as i32 => ExceptionCode::NETWORK_MAIN_THREAD,
e if e == ExceptionCode::UNSUPPORTED_OPERATION as i32 => {
ExceptionCode::UNSUPPORTED_OPERATION
}
e if e == ExceptionCode::SERVICE_SPECIFIC as i32 => ExceptionCode::SERVICE_SPECIFIC,
_ => ExceptionCode::TRANSACTION_FAILED,
}
}
// Safety: `Status` always contains a owning pointer to a valid `AStatus`. The
// lifetime of the contained pointer is the same as the `Status` object.
/// High-level binder status object that encapsulates a standard way to keep
/// track of and chain binder errors along with service specific errors.
///
/// Used in AIDL transactions to represent failed transactions.
pub struct Status(*mut sys::AStatus);
// Safety: The `AStatus` that the `Status` points to must have an entirely thread-safe API for the
// duration of the `Status` object's lifetime. We ensure this by not allowing mutation of a `Status`
// in Rust, and the NDK API says we're the owner of our `AStatus` objects so outside code should not
// be mutating them underneath us.
unsafe impl Sync for Status {}
// Safety: `Status` always contains an owning pointer to a global, immutable, interned `AStatus`.
// A thread-local `AStatus` would not be valid.
unsafe impl Send for Status {}
impl Status {
/// Create a status object representing a successful transaction.
pub fn ok() -> Self {
let ptr = unsafe {
// Safety: `AStatus_newOk` always returns a new, heap allocated
// pointer to an `ASTatus` object, so we know this pointer will be
// valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_newOk()
};
Self(ptr)
}
/// Create a status object from a service specific error
pub fn new_service_specific_error(err: i32, message: Option<&CStr>) -> Status {
let ptr = if let Some(message) = message {
unsafe {
// Safety: Any i32 is a valid service specific error for the
// error code parameter. We construct a valid, null-terminated
// `CString` from the message, which must be a valid C-style
// string to pass as the message. This function always returns a
// new, heap allocated pointer to an `AStatus` object, so we
// know the returned pointer will be valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_fromServiceSpecificErrorWithMessage(err, message.as_ptr())
}
} else {
unsafe {
// Safety: Any i32 is a valid service specific error for the
// error code parameter. This function always returns a new,
// heap allocated pointer to an `AStatus` object, so we know the
// returned pointer will be valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_fromServiceSpecificError(err)
}
};
Self(ptr)
}
/// Create a status object from an exception code
pub fn new_exception(exception: ExceptionCode, message: Option<&CStr>) -> Status {
if let Some(message) = message {
let ptr = unsafe {
sys::AStatus_fromExceptionCodeWithMessage(exception as i32, message.as_ptr())
};
Self(ptr)
} else {
exception.into()
}
}
/// Create a status object from a raw `AStatus` pointer.
///
/// # Safety
///
/// This constructor is safe iff `ptr` is a valid pointer to an `AStatus`.
pub(crate) unsafe fn from_ptr(ptr: *mut sys::AStatus) -> Self {
Self(ptr)
}
/// Returns `true` if this status represents a successful transaction.
pub fn is_ok(&self) -> bool {
unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_isOk` here.
sys::AStatus_isOk(self.as_native())
}
}
/// Returns a description of the status.
pub fn get_description(&self) -> String {
let description_ptr = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getDescription`
// here.
//
// `AStatus_getDescription` always returns a valid pointer to a null
// terminated C string. Rust is responsible for freeing this pointer
// via `AStatus_deleteDescription`.
sys::AStatus_getDescription(self.as_native())
};
let description = unsafe {
// Safety: `AStatus_getDescription` always returns a valid C string,
// which can be safely converted to a `CStr`.
CStr::from_ptr(description_ptr)
};
let description = description.to_string_lossy().to_string();
unsafe {
// Safety: `description_ptr` was returned from
// `AStatus_getDescription` above, and must be freed via
// `AStatus_deleteDescription`. We must not access the pointer after
// this call, so we copy it into an owned string above and return
// that string.
sys::AStatus_deleteDescription(description_ptr);
}
description
}
/// Returns the exception code of the status.
pub fn exception_code(&self) -> ExceptionCode {
let code = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getExceptionCode`
// here.
sys::AStatus_getExceptionCode(self.as_native())
};
parse_exception_code(code)
}
/// Return a status code representing a transaction failure, or
/// `StatusCode::OK` if there was no transaction failure.
///
/// If this method returns `OK`, the status may still represent a different
/// exception or a service specific error. To find out if this transaction
/// as a whole is okay, use [`is_ok`](Self::is_ok) instead.
pub fn transaction_error(&self) -> StatusCode {
let code = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getStatus` here.
sys::AStatus_getStatus(self.as_native())
};
parse_status_code(code)
}
/// Return a service specific error if this status represents one.
///
/// This function will only ever return a non-zero result if
/// [`exception_code`](Self::exception_code) returns
/// `ExceptionCode::SERVICE_SPECIFIC`. If this function returns 0, the
/// status object may still represent a different exception or status. To
/// find out if this transaction as a whole is okay, use
/// [`is_ok`](Self::is_ok) instead.
pub fn service_specific_error(&self) -> i32 {
unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to
// `AStatus_getServiceSpecificError` here.
sys::AStatus_getServiceSpecificError(self.as_native())
}
}
/// Calls `op` if the status was ok, otherwise returns an `Err` value of
/// `self`.
pub fn and_then<T, F>(self, op: F) -> result::Result<T, Status>
where
F: FnOnce() -> result::Result<T, Status>,
{
<result::Result<(), Status>>::from(self)?;
op()
}
}
impl error::Error for Status {}
impl Display for Status {
fn
|
fmt
|
identifier_name
|
|
error.rs
|
32 => StatusCode::FAILED_TRANSACTION,
e if e == StatusCode::BAD_INDEX as i32 => StatusCode::BAD_INDEX,
e if e == StatusCode::NOT_ENOUGH_DATA as i32 => StatusCode::NOT_ENOUGH_DATA,
e if e == StatusCode::WOULD_BLOCK as i32 => StatusCode::WOULD_BLOCK,
e if e == StatusCode::TIMED_OUT as i32 => StatusCode::TIMED_OUT,
e if e == StatusCode::UNKNOWN_TRANSACTION as i32 => StatusCode::UNKNOWN_TRANSACTION,
e if e == StatusCode::FDS_NOT_ALLOWED as i32 => StatusCode::FDS_NOT_ALLOWED,
e if e == StatusCode::UNEXPECTED_NULL as i32 => StatusCode::UNEXPECTED_NULL,
_ => StatusCode::UNKNOWN_ERROR,
}
}
pub use sys::android_c_interface_ExceptionCode as ExceptionCode;
fn parse_exception_code(code: i32) -> ExceptionCode {
match code {
e if e == ExceptionCode::NONE as i32 => ExceptionCode::NONE,
e if e == ExceptionCode::SECURITY as i32 => ExceptionCode::SECURITY,
e if e == ExceptionCode::BAD_PARCELABLE as i32 => ExceptionCode::BAD_PARCELABLE,
e if e == ExceptionCode::ILLEGAL_ARGUMENT as i32 => ExceptionCode::ILLEGAL_ARGUMENT,
e if e == ExceptionCode::NULL_POINTER as i32 => ExceptionCode::NULL_POINTER,
e if e == ExceptionCode::ILLEGAL_STATE as i32 => ExceptionCode::ILLEGAL_STATE,
e if e == ExceptionCode::NETWORK_MAIN_THREAD as i32 => ExceptionCode::NETWORK_MAIN_THREAD,
e if e == ExceptionCode::UNSUPPORTED_OPERATION as i32 => {
ExceptionCode::UNSUPPORTED_OPERATION
}
e if e == ExceptionCode::SERVICE_SPECIFIC as i32 => ExceptionCode::SERVICE_SPECIFIC,
_ => ExceptionCode::TRANSACTION_FAILED,
}
}
// Safety: `Status` always contains a owning pointer to a valid `AStatus`. The
// lifetime of the contained pointer is the same as the `Status` object.
/// High-level binder status object that encapsulates a standard way to keep
/// track of and chain binder errors along with service specific errors.
///
/// Used in AIDL transactions to represent failed transactions.
pub struct Status(*mut sys::AStatus);
// Safety: The `AStatus` that the `Status` points to must have an entirely thread-safe API for the
// duration of the `Status` object's lifetime. We ensure this by not allowing mutation of a `Status`
// in Rust, and the NDK API says we're the owner of our `AStatus` objects so outside code should not
// be mutating them underneath us.
unsafe impl Sync for Status {}
// Safety: `Status` always contains an owning pointer to a global, immutable, interned `AStatus`.
// A thread-local `AStatus` would not be valid.
unsafe impl Send for Status {}
impl Status {
/// Create a status object representing a successful transaction.
pub fn ok() -> Self {
let ptr = unsafe {
// Safety: `AStatus_newOk` always returns a new, heap allocated
// pointer to an `ASTatus` object, so we know this pointer will be
// valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_newOk()
};
Self(ptr)
}
/// Create a status object from a service specific error
pub fn new_service_specific_error(err: i32, message: Option<&CStr>) -> Status {
let ptr = if let Some(message) = message {
unsafe {
// Safety: Any i32 is a valid service specific error for the
// error code parameter. We construct a valid, null-terminated
// `CString` from the message, which must be a valid C-style
// string to pass as the message. This function always returns a
// new, heap allocated pointer to an `AStatus` object, so we
// know the returned pointer will be valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_fromServiceSpecificErrorWithMessage(err, message.as_ptr())
}
} else {
unsafe {
// Safety: Any i32 is a valid service specific error for the
// error code parameter. This function always returns a new,
// heap allocated pointer to an `AStatus` object, so we know the
// returned pointer will be valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_fromServiceSpecificError(err)
}
};
Self(ptr)
}
/// Create a status object from an exception code
pub fn new_exception(exception: ExceptionCode, message: Option<&CStr>) -> Status {
if let Some(message) = message {
let ptr = unsafe {
sys::AStatus_fromExceptionCodeWithMessage(exception as i32, message.as_ptr())
};
Self(ptr)
} else {
exception.into()
}
}
/// Create a status object from a raw `AStatus` pointer.
///
/// # Safety
///
/// This constructor is safe iff `ptr` is a valid pointer to an `AStatus`.
pub(crate) unsafe fn from_ptr(ptr: *mut sys::AStatus) -> Self {
Self(ptr)
}
/// Returns `true` if this status represents a successful transaction.
pub fn is_ok(&self) -> bool {
unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_isOk` here.
sys::AStatus_isOk(self.as_native())
}
}
/// Returns a description of the status.
pub fn get_description(&self) -> String {
let description_ptr = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getDescription`
// here.
//
// `AStatus_getDescription` always returns a valid pointer to a null
// terminated C string. Rust is responsible for freeing this pointer
// via `AStatus_deleteDescription`.
sys::AStatus_getDescription(self.as_native())
};
let description = unsafe {
// Safety: `AStatus_getDescription` always returns a valid C string,
// which can be safely converted to a `CStr`.
CStr::from_ptr(description_ptr)
};
let description = description.to_string_lossy().to_string();
unsafe {
// Safety: `description_ptr` was returned from
// `AStatus_getDescription` above, and must be freed via
// `AStatus_deleteDescription`. We must not access the pointer after
// this call, so we copy it into an owned string above and return
// that string.
sys::AStatus_deleteDescription(description_ptr);
}
description
}
/// Returns the exception code of the status.
pub fn exception_code(&self) -> ExceptionCode {
let code = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getExceptionCode`
// here.
sys::AStatus_getExceptionCode(self.as_native())
};
parse_exception_code(code)
}
/// Return a status code representing a transaction failure, or
/// `StatusCode::OK` if there was no transaction failure.
///
/// If this method returns `OK`, the status may still represent a different
/// exception or a service specific error. To find out if this transaction
/// as a whole is okay, use [`is_ok`](Self::is_ok) instead.
pub fn transaction_error(&self) -> StatusCode {
let code = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getStatus` here.
sys::AStatus_getStatus(self.as_native())
};
parse_status_code(code)
}
/// Return a service specific error if this status represents one.
///
/// This function will only ever return a non-zero result if
/// [`exception_code`](Self::exception_code) returns
/// `ExceptionCode::SERVICE_SPECIFIC`. If this function returns 0, the
/// status object may still represent a different exception or status. To
/// find out if this transaction as a whole is okay, use
/// [`is_ok`](Self::is_ok) instead.
pub fn service_specific_error(&self) -> i32 {
unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to
// `AStatus_getServiceSpecificError` here.
sys::AStatus_getServiceSpecificError(self.as_native())
}
}
/// Calls `op` if the status was ok, otherwise returns an `Err` value of
/// `self`.
pub fn and_then<T, F>(self, op: F) -> result::Result<T, Status>
where
F: FnOnce() -> result::Result<T, Status>,
{
<result::Result<(), Status>>::from(self)?;
op()
}
}
impl error::Error for Status {}
|
impl Display for Status {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.write_str(&self.get_description())
|
random_line_split
|
|
setup.rs
|
<'a, I, D: Dimensions, S> {
/// ID of this ship.
id: I,
/// Grid that the ship may occupy.
grid: &'a Grid<I, D>,
/// Placement info for the ship.
ship: &'a ShipPlacementInfo<S, D::Coordinate>,
}
impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> ShipEntry<'a, I, D, S> {
/// If the ship is placed, get the placement. Otherwise return `None`.
// Has to be specialized for mut and non-mut because mut variants can't return a
// projection that lives as long as 'a, since that would potentially alias the &mut
// ref. With a const ref, we can give back a ref that lives as long as self rather
// than just as long as this method call.
pub fn placement(&self) -> Option<&'a ShapeProjection<D::Coordinate>> {
self.ship.placement.as_ref()
}
}
/// Reference to a particular ship's placement info as well as the grid, providing access
/// to the methods necessary to check it's placement status and place or unplace it.
pub struct ShipEntryMut<'a, I, D: Dimensions, S> {
/// ID of this ship
id: I,
/// Grid that ships are being placed into.
grid: &'a mut Grid<I, D>,
/// Back ref to the ship.
ship: &'a mut ShipPlacementInfo<S, D::Coordinate>,
}
/// Implementation of the shared parts of ShipEntry.
macro_rules! ship_entry_shared {
($t:ident) => {
impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> $t<'a, I, D, S> {
/// Get the ID of this ship.
pub fn id(&self) -> &I {
&self.id
}
/// Returns true if this ship has been placed.
pub fn placed(&self) -> bool {
self.ship.placement.is_some()
}
/// Get an interator over possible projections of the shape for this ship that
/// start from the given [`Coordinate`]. If there are no possible placements
/// from the given coordinate, including if the coordinate is out of bounds,
/// the resulting iterator will be empty.
pub fn get_placements(
&self,
coord: D::Coordinate,
) -> ProjectIter<D, S::ProjectIterState> {
self.ship.shape.project(coord, &self.grid.dim)
}
/// Check if the specified placement is valid for this ship.
pub fn check_placement(
&self,
placement: &ShapeProjection<D::Coordinate>,
) -> Result<(), CannotPlaceReason> {
if self.placed() {
Err(CannotPlaceReason::AlreadyPlaced)
} else if !self
.ship
.shape
.is_valid_placement(placement, &self.grid.dim)
{
Err(CannotPlaceReason::InvalidProjection)
} else {
for coord in placement.iter() {
match self.grid.get(coord) {
None => return Err(CannotPlaceReason::InvalidProjection),
Some(cell) if cell.ship.is_some() => {
return Err(CannotPlaceReason::AlreadyOccupied)
}
_ => {}
}
}
Ok(())
}
}
}
};
}
ship_entry_shared!(ShipEntry);
ship_entry_shared!(ShipEntryMut);
impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> ShipEntryMut<'a, I, D, S> {
/// If the ship is placed, get the placement. Otherwise return `None`.
// Has to be specialized for mut and non-mut because mut variants can't return a
// projection that lives as long as 'a, since that would potentially alias the &mut
// ref.
pub fn placement(&self) -> Option<&ShapeProjection<D::Coordinate>> {
self.ship.placement.as_ref()
}
/// Attempts to place the ship with onto the given coordinates. If the ship is already
/// placed, returns `Err` with the attempted placement and reason placement failed,
/// otherwise returns `Ok(())`
pub fn place(
&mut self,
placement: ShapeProjection<D::Coordinate>,
) -> Result<(), PlaceError<ShapeProjection<D::Coordinate>>> {
if self.placed() {
Err(PlaceError::new(CannotPlaceReason::AlreadyPlaced, placement))
} else if !self
.ship
.shape
.is_valid_placement(&placement, &self.grid.dim)
{
Err(PlaceError::new(
CannotPlaceReason::InvalidProjection,
placement,
))
} else {
for coord in placement.iter() {
match self.grid.get(coord) {
None => {
// ShipShape should ensure that all coordinates are valid, but don't
// trust it.
return Err(PlaceError::new(
CannotPlaceReason::InvalidProjection,
placement,
));
}
Some(cell) if cell.ship.is_some() => {
return Err(PlaceError::new(
CannotPlaceReason::AlreadyOccupied,
placement,
));
}
_ => {}
}
}
// Already ensured that every position is valid and not occupied.
for coord in placement.iter() {
self.grid[coord].ship = Some(self.id.to_owned());
}
self.ship.placement = Some(placement);
Ok(())
}
}
/// Attempt to clear the placement of the ship. Returns the previous placement of the
/// ship if any. Returns `None` if the ship has not been placed.
pub fn unplace(&mut self) -> Option<ShapeProjection<D::Coordinate>> {
self.ship.placement.take().map(|placement| {
for coord in placement.iter() {
// We should only allow placement on valid cells, so unwrap is fine.
self.grid[coord].ship = None;
}
placement
})
}
}
/// Contains a ship's shape and current placement status in the grid.
struct ShipPlacementInfo<S, C> {
/// Shape being placed.
shape: S,
/// Placement of this ship, if it has been placed.
placement: Option<ShapeProjection<C>>,
}
/// Setup phase for a [`Board`]. Allows placing ships and does not allow shooting.
pub struct BoardSetup<I: ShipId, D: Dimensions, S: ShipShape<D>> {
/// Grid for placement of ships.
grid: Grid<I, D>,
/// Mapping of added ShipIds to coresponding placement info.
ships: HashMap<I, ShipPlacementInfo<S, D::Coordinate>>,
}
impl<I: ShipId, D: Dimensions, S: ShipShape<D>> BoardSetup<I, D, S> {
/// Begin game setup by constructing a new board with the given [`Dimensions`].
pub fn new(dim: D) -> Self {
Self {
grid: Grid::new(dim),
ships: HashMap::new(),
}
}
/// Get the [`Dimesnsions`] of this [`Board`].
pub fn dimensions(&self) -> &D {
&self.grid.dim
}
/// Tries to start the game. If all ships are placed, returns a [`Board`] with the
/// current placements. If no ships have been added or any ship has not been placed,
/// returns self.
pub fn start(self) -> Result<Board<I, D>, Self> {
if !self.ready() {
Err(self)
} else {
Ok(Board {
grid: self.grid,
ships: self
.ships
.into_iter()
.map(|(id, info)| match info.placement {
Some(placement) => (id, placement),
None => unreachable!(),
})
.collect(),
})
}
}
/// Checks if this board is ready to start. Returns `true` if at least one ship has
/// been added and all ships are placed.
pub fn ready(&self) -> bool {
!self.ships.is_empty() && self.ships.values().all(|ship| ship.placement.is_some())
}
/// Get an iterator over the ships configured on this board.
pub fn iter_ships(&self) -> impl Iterator<Item = ShipEntry<I, D, S>> {
let grid = &self.grid;
self.ships.iter().map(move |(id, ship)| ShipEntry {
id: id.clone(),
grid,
ship,
})
}
/// Attempts to add a ship with the given ID. If the given ShipID is already used,
/// returns the shape passed to this function. Otherwise adds the shape and returns
/// the ShipEntryMut for it to allow placement.
pub fn add_ship(
&mut self,
id: I,
shape: S,
) -> Result<ShipEntryMut<I, D, S>, AddShipError<I, S>> {
match self.ships.entry(id.clone()) {
Entry::Occupied(_) => Err(AddShipError::new(id, shape)),
Entry::Vacant(entry) => {
let ship = entry.insert(ShipPlacementInfo {
shape,
placement: None,
});
Ok(ShipEntryMut {
id,
grid: &mut self.grid,
ship,
})
}
}
}
/// Get the [`ShipEntry`] for the ship with the specified ID if such a ship exists.
pub fn
|
get_ship
|
identifier_name
|
|
setup.rs
|
setup phase of the board.
use std::collections::{hash_map::Entry, HashMap};
use crate::{
board::{AddShipError, Board, CannotPlaceReason, Dimensions, Grid, PlaceError},
ships::{ProjectIter, ShapeProjection, ShipId, ShipShape},
};
/// Reference to a particular ship's placement info as well as the grid, providing access
/// to the methods necessary to check it's placement status.
pub struct ShipEntry<'a, I, D: Dimensions, S> {
/// ID of this ship.
id: I,
/// Grid that the ship may occupy.
grid: &'a Grid<I, D>,
/// Placement info for the ship.
ship: &'a ShipPlacementInfo<S, D::Coordinate>,
}
impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> ShipEntry<'a, I, D, S> {
/// If the ship is placed, get the placement. Otherwise return `None`.
// Has to be specialized for mut and non-mut because mut variants can't return a
// projection that lives as long as 'a, since that would potentially alias the &mut
// ref. With a const ref, we can give back a ref that lives as long as self rather
// than just as long as this method call.
pub fn placement(&self) -> Option<&'a ShapeProjection<D::Coordinate>> {
self.ship.placement.as_ref()
}
}
/// Reference to a particular ship's placement info as well as the grid, providing access
/// to the methods necessary to check it's placement status and place or unplace it.
pub struct ShipEntryMut<'a, I, D: Dimensions, S> {
/// ID of this ship
id: I,
/// Grid that ships are being placed into.
grid: &'a mut Grid<I, D>,
/// Back ref to the ship.
ship: &'a mut ShipPlacementInfo<S, D::Coordinate>,
}
/// Implementation of the shared parts of ShipEntry.
macro_rules! ship_entry_shared {
($t:ident) => {
impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> $t<'a, I, D, S> {
/// Get the ID of this ship.
pub fn id(&self) -> &I {
&self.id
}
/// Returns true if this ship has been placed.
pub fn placed(&self) -> bool {
self.ship.placement.is_some()
}
/// Get an interator over possible projections of the shape for this ship that
/// start from the given [`Coordinate`]. If there are no possible placements
/// from the given coordinate, including if the coordinate is out of bounds,
/// the resulting iterator will be empty.
pub fn get_placements(
&self,
coord: D::Coordinate,
) -> ProjectIter<D, S::ProjectIterState> {
self.ship.shape.project(coord, &self.grid.dim)
}
/// Check if the specified placement is valid for this ship.
pub fn check_placement(
&self,
placement: &ShapeProjection<D::Coordinate>,
) -> Result<(), CannotPlaceReason> {
if self.placed() {
Err(CannotPlaceReason::AlreadyPlaced)
} else if !self
.ship
.shape
.is_valid_placement(placement, &self.grid.dim)
{
Err(CannotPlaceReason::InvalidProjection)
} else {
for coord in placement.iter() {
match self.grid.get(coord) {
None => return Err(CannotPlaceReason::InvalidProjection),
Some(cell) if cell.ship.is_some() => {
return Err(CannotPlaceReason::AlreadyOccupied)
}
_ => {}
}
}
Ok(())
}
}
}
};
}
ship_entry_shared!(ShipEntry);
ship_entry_shared!(ShipEntryMut);
impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> ShipEntryMut<'a, I, D, S> {
/// If the ship is placed, get the placement. Otherwise return `None`.
// Has to be specialized for mut and non-mut because mut variants can't return a
// projection that lives as long as 'a, since that would potentially alias the &mut
// ref.
pub fn placement(&self) -> Option<&ShapeProjection<D::Coordinate>> {
self.ship.placement.as_ref()
}
/// Attempts to place the ship with onto the given coordinates. If the ship is already
/// placed, returns `Err` with the attempted placement and reason placement failed,
/// otherwise returns `Ok(())`
pub fn place(
&mut self,
placement: ShapeProjection<D::Coordinate>,
) -> Result<(), PlaceError<ShapeProjection<D::Coordinate>>> {
if self.placed() {
Err(PlaceError::new(CannotPlaceReason::AlreadyPlaced, placement))
} else if !self
.ship
.shape
.is_valid_placement(&placement, &self.grid.dim)
{
Err(PlaceError::new(
CannotPlaceReason::InvalidProjection,
placement,
))
} else {
for coord in placement.iter() {
match self.grid.get(coord) {
None => {
// ShipShape should ensure that all coordinates are valid, but don't
// trust it.
return Err(PlaceError::new(
CannotPlaceReason::InvalidProjection,
placement,
|
placement,
));
}
_ => {}
}
}
// Already ensured that every position is valid and not occupied.
for coord in placement.iter() {
self.grid[coord].ship = Some(self.id.to_owned());
}
self.ship.placement = Some(placement);
Ok(())
}
}
/// Attempt to clear the placement of the ship. Returns the previous placement of the
/// ship if any. Returns `None` if the ship has not been placed.
pub fn unplace(&mut self) -> Option<ShapeProjection<D::Coordinate>> {
self.ship.placement.take().map(|placement| {
for coord in placement.iter() {
// We should only allow placement on valid cells, so unwrap is fine.
self.grid[coord].ship = None;
}
placement
})
}
}
/// Contains a ship's shape and current placement status in the grid.
struct ShipPlacementInfo<S, C> {
/// Shape being placed.
shape: S,
/// Placement of this ship, if it has been placed.
placement: Option<ShapeProjection<C>>,
}
/// Setup phase for a [`Board`]. Allows placing ships and does not allow shooting.
pub struct BoardSetup<I: ShipId, D: Dimensions, S: ShipShape<D>> {
/// Grid for placement of ships.
grid: Grid<I, D>,
/// Mapping of added ShipIds to coresponding placement info.
ships: HashMap<I, ShipPlacementInfo<S, D::Coordinate>>,
}
impl<I: ShipId, D: Dimensions, S: ShipShape<D>> BoardSetup<I, D, S> {
/// Begin game setup by constructing a new board with the given [`Dimensions`].
pub fn new(dim: D) -> Self {
Self {
grid: Grid::new(dim),
ships: HashMap::new(),
}
}
/// Get the [`Dimesnsions`] of this [`Board`].
pub fn dimensions(&self) -> &D {
&self.grid.dim
}
/// Tries to start the game. If all ships are placed, returns a [`Board`] with the
/// current placements. If no ships have been added or any ship has not been placed,
/// returns self.
pub fn start(self) -> Result<Board<I, D>, Self> {
if !self.ready() {
Err(self)
} else {
Ok(Board {
grid: self.grid,
ships: self
.ships
.into_iter()
.map(|(id, info)| match info.placement {
Some(placement) => (id, placement),
None => unreachable!(),
})
.collect(),
})
}
}
/// Checks if this board is ready to start. Returns `true` if at least one ship has
/// been added and all ships are placed.
pub fn ready(&self) -> bool {
!self.ships.is_empty() && self.ships.values().all(|ship| ship.placement.is_some())
}
/// Get an iterator over the ships configured on this board.
pub fn iter_ships(&self) -> impl Iterator<Item = ShipEntry<I, D, S>> {
let grid = &self.grid;
self.ships.iter().map(move |(id, ship)| ShipEntry {
id: id.clone(),
grid,
ship,
})
}
/// Attempts to add a ship with the given ID. If the given ShipID is already used,
/// returns the shape passed to this function. Otherwise adds the shape and returns
/// the ShipEntryMut for it to allow placement.
pub fn add_ship(
&mut self,
id: I,
shape: S,
) -> Result<ShipEntryMut<I, D, S>, AddShipError<I, S>> {
match self.ships.entry(id.clone()) {
Entry::Occupied(_) => Err(AddShipError::
|
));
}
Some(cell) if cell.ship.is_some() => {
return Err(PlaceError::new(
CannotPlaceReason::AlreadyOccupied,
|
random_line_split
|
ingredientsScraper.py
|
(fooddish):
#dictionary for ingredients
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
R[food][recipename] = {}
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][ingid] = amt
#normalize values
m = sum(R[food][recipename].values())
R[food][recipename]={ingid: R[food][recipename][ingid]/m for ingid in R[food][recipename].keys()}
#Recipes = {}
#ingsorted = sorted(I.keys())
#for food in R.keys():
##m = sum(R[food].values())
##normalize values
##R[food] = {ingid: R[food][ingid]/m for ingid in R[food].keys()}
#Recipes[food] = [0]*len(ingsorted)
#for i in range(len(ingsorted)):
###if ingredient is in dish R[food]
#if ingsorted[i] in R[food]:
#Recipes[food][i] = R[food][ingsorted[i]]
#m = sum(Recipes[food])
#Recipes[food] = [x/m for x in Recipes[food]]
pickle.dump((I,R),file('AllRecipesIngImageNet.npy','w'))
#return I,R
#=================================================================================
# Ingredient Scraper with cooking terms and nutritional info
def IngredientScraper(fooddish):
#dictionary for ingredients
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
#list containing ingredients, cookingterms, nutritionrating
R[food][recipename] = [{},[],[0]*7]
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][0][ingid] = amt
#normalize values
m = sum(R[food][recipename][0].values())
R[food][recipename][0]={ingid: R[food][recipename][0][ingid]/m for ingid in R[food][recipename][0].keys()}
#get cooking terms
directions = [step.text.lower() for step in recipe.find_all('span', class_='plaincharacterwrap break')]
R[food][recipename][1] = directions
#get nutrition
nutritionrating = recipe.find_all('ul', id='ulNutrient')
n = 0
for nutrient in nutritionrating:
#category = nutrient.find('li',class_='categories').text
R[food][recipename][2][n]=float(nutrient.find('li',id='divNutrientGradient').attrs['style'][6:-1])/100
n += 1
pickle.dump((I,R),file('AllRecipesIng50FoodExtra.npy','w'))
#================================================================================
#X = np.zeros((len(trainlabels),len(I.keys())-1),dtype=np.float32)
#ingsorted = sorted(I.keys())[1:]
#for i in xrange(len(trainlabels)):
##thresh = np.random.uniform(0,RecipeMax[trainlabels[i]],n)
#dish = fooddish[trainlabels[i]]
#X[i,:] = [1 if x != 0 else 0 for x in Recipes[dish][1:]]
##if len(R[dish].keys()) != 0:
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
##for j in xrange(len(ingsorted)):
##if ingsorted[j] in R[dish][recipe]:
###X[i,j] = R[dish][recipe][ingsorted[j]]
##X[i,j] = 1
###Recipes[food] = [0]*len(ingsorted)
###for i in range(len(ingsorted)):
####if ingredient is in dish R[food]
###if ingsorted[i] in R[food]:
###Recipes[food][i] = R[food][ingsorted[i]]
###X[i,:] = [1 if x>t else 0 for x,t in zip(Recipes[dish],thresh)]
###X[i,:] = Recipes[dish]
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,i]==1)
#print i, len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) == traindata.shape[0]:
#attr_labels[range(0,800,100)] = 0
#attributeclassifiers[i] = svm.SVC(kernel='linear',C=0.001)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())-1),dtype=np.float32)
#for i in xrange(len(testlabels)):
#print 'test case:', i
#Xtest[i,:] = [x.predict(testdata[i,:])[0] for x in attributeclassifiers]
#pickle.dump((X,Xtest),file('vlg_extractor_1.1.2/ImageNetSurveyMC/IngredientAttributes.npy','w'))
###fill out correlation matrix
#m = traindata.shape[1] #number of visual word
#n = len(I.keys()) #number of ingredients
#corr_mat = np.zeros((m,n))
#for i in xrange(len(trainlabels)):
#for visualword in xrange(m):
#if traindata[i,visualword] != 0:
##count co-occurrence of ingredient and visual word
##binaryIng = [1 if x!=0 else 0 for x in Recipes[fooddish[trainlabels[i]]]]
#corr_mat[visualword,:] = corr_mat[visualword,:] + X[i,:]
#pickle.dump(corr_mat,file('corr_mat50Food.npy','w'))
###traindata = np.concatenate((traindata,X),1)
##corr_mat = pickle.load(file('corr_mat.npy','r'))
###normalize corr_mat
#row_sums = corr_mat.sum(axis=1)
#row_sums = np.array([1 if x==0 else x for x in row_sums])
#corr_mat = corr_mat/row_sums[:,np.newaxis]
##avg = corr_mat.mean(axis=0)
#logcormat = np.log(corr_mat+1)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#x = np.dot(testdata[i,:],logcormat)
#Xtest[i,:] = x/sum(x)
##dish = fooddish[testlabels[i]]
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
#for j in
|
IngredientScraper2
|
identifier_name
|
|
ingredientsScraper.py
|
.float32)
#ingsorted = sorted(I.keys())[1:]
#for i in xrange(len(trainlabels)):
##thresh = np.random.uniform(0,RecipeMax[trainlabels[i]],n)
#dish = fooddish[trainlabels[i]]
#X[i,:] = [1 if x != 0 else 0 for x in Recipes[dish][1:]]
##if len(R[dish].keys()) != 0:
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
##for j in xrange(len(ingsorted)):
##if ingsorted[j] in R[dish][recipe]:
###X[i,j] = R[dish][recipe][ingsorted[j]]
##X[i,j] = 1
###Recipes[food] = [0]*len(ingsorted)
###for i in range(len(ingsorted)):
####if ingredient is in dish R[food]
###if ingsorted[i] in R[food]:
###Recipes[food][i] = R[food][ingsorted[i]]
###X[i,:] = [1 if x>t else 0 for x,t in zip(Recipes[dish],thresh)]
###X[i,:] = Recipes[dish]
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,i]==1)
#print i, len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) == traindata.shape[0]:
#attr_labels[range(0,800,100)] = 0
#attributeclassifiers[i] = svm.SVC(kernel='linear',C=0.001)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())-1),dtype=np.float32)
#for i in xrange(len(testlabels)):
#print 'test case:', i
#Xtest[i,:] = [x.predict(testdata[i,:])[0] for x in attributeclassifiers]
#pickle.dump((X,Xtest),file('vlg_extractor_1.1.2/ImageNetSurveyMC/IngredientAttributes.npy','w'))
###fill out correlation matrix
#m = traindata.shape[1] #number of visual word
#n = len(I.keys()) #number of ingredients
#corr_mat = np.zeros((m,n))
#for i in xrange(len(trainlabels)):
#for visualword in xrange(m):
#if traindata[i,visualword] != 0:
##count co-occurrence of ingredient and visual word
##binaryIng = [1 if x!=0 else 0 for x in Recipes[fooddish[trainlabels[i]]]]
#corr_mat[visualword,:] = corr_mat[visualword,:] + X[i,:]
#pickle.dump(corr_mat,file('corr_mat50Food.npy','w'))
###traindata = np.concatenate((traindata,X),1)
##corr_mat = pickle.load(file('corr_mat.npy','r'))
###normalize corr_mat
#row_sums = corr_mat.sum(axis=1)
#row_sums = np.array([1 if x==0 else x for x in row_sums])
#corr_mat = corr_mat/row_sums[:,np.newaxis]
##avg = corr_mat.mean(axis=0)
#logcormat = np.log(corr_mat+1)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#x = np.dot(testdata[i,:],logcormat)
#Xtest[i,:] = x/sum(x)
##dish = fooddish[testlabels[i]]
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
#for j in xrange(len(ingsorted)):
#if attributeclassifiers[j] is not None:
#Xtest[i,j]=attributeclassifiers[j].predict(testdata[i,:])
##if ingsorted[j] in R[dish][recipe]:
##Xtest[i,j] = 1
##Xtest[i,:] = [1 if xt>t else 0 for xt,t in zip(x,avg)]
#fig = plt.figure()
#ax = fig.add_subplot(5,2,10)
#count = [0]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find negative examples of attribute i
#pos_idx = np.where(X[np.where(trainlabels==9)[0],i]==1)
#count[i] = len(pos_idx[0])
#r = plt.bar(range(589),count)
#ax.set_xticks([])
#plt.xlabel(fooddish[9])
##ax = fig.add_subplot(522)
##r = plt.bar(range(440),Recipes['casserole'])
##ax.set_xticks([])
##plt.xlabel('casserole')
##ax = fig.add_subplot(523)
##r = plt.bar(range(440),Recipes['deviled%eggs'])
##ax.set_xticks([])
##plt.xlabel('deviledegg')
##ax = fig.add_subplot(524)
##r = plt.bar(range(440),Recipes['fried%rice'])
##ax.set_xticks([])
##plt.xlabel('friedrice')
##ax = fig.add_subplot(525)
##r = plt.bar(range(440),Recipes['kebab'])
##ax.set_xticks([])
##plt.xlabel('kebab')
##ax = fig.add_subplot(526)
##r = plt.bar(range(440),Recipes['samosa'])
##ax.set_xticks([])
##plt.xlabel('samosa')
##ax = fig.add_subplot(527)
##r = plt.bar(range(440),Recipes['pasta%salad'])
##ax.set_xticks([])
##plt.xlabel('pastasalad')
##ax = fig.add_subplot(528)
##r = plt.bar(range(440),Recipes['paella'])
##ax.set_xticks([])
##plt.xlabel('Paella')
##ax = fig.add_subplot(529)
##r = plt.bar(range(440),Recipes['spaghetti'])
##ax.set_xticks([])
##plt.xlabel('spaghetti')
##ax = fig.add_subplot(5,2,10)
##r = plt.bar(range(440),Recipes['roulade'])
##ax.set_xticks([])
##plt.xlabel('roulade')
#============== script to get top features ============================
#from sklearn.multiclass import OneVsRestClassifier
#import random as rnd
#recipedict='AllRecipesIng.npy'
#fooddish = fooddish[0]
#dataset = 'vlg_extractor/ImageNetSurveyMC/ImageNetSurveyMC'
#var=scipy.io.loadmat(dataset)
#traindata = np.ndarray.astype(var['X'],dtype=np.float32)
#trainlabels = np.ndarray.astype(var['trainlabels'].flatten(),dtype=np.int)
#testdata = np.ndarray.astype(var['Xtest'],dtype=np.float32)
#testlabels = var['testlabels'].flatten()
#Xtest = pickle.load(file("/".join(dataset.split('/')[0:2])+'/IngredientAttributes.npy','r'))
#I,R = pickle.load(file(recipedict,'r'))
#ingsorted = sorted(I.keys())[1:]
#X = np.zeros((len(trainlabels),len(ingsorted)),dtype=np.uint8)
#for i in xrange(len(trainlabels)):
#dish = fooddish[trainlabels[i]]
#if len(R[dish].keys()) != 0:
####randomly pick recipe
#recipe = rnd.choice(R[dish].keys())
##print recipe
#X[i,:] = [1 if ing in R[dish][recipe] else 0 for ing in ingsorted]
#k=5
##split training data into k-folds
#kfold = cross_validation.StratifiedKFold(trainlabels,k)
#param_grid = [
#{'estimator__C': [0.001, 0.01, 1, 10, 100], 'estimator__kernel': ['linear']},
##{'estimator__C': [1, 10, 100, 1000], 'estimator__gamma': [0.01, 0.001, 0.0001], 'estimator__kernel': ['rbf']},
#]
#svc = OneVsRestClassifier(svm.SVC(kernel='linear',C=1))
#svc.fit(X,trainlabels)
##clf = GridSearchCV(estimator=svc, param_grid=param_grid, cv=kfold, n_jobs=-1)
##clf.fit(np.concatenate((traindata,X),1),trainlabels)
#svm_weights = svc.coef_
|
#topfeatures = [None]*svm_weights.shape[0] #topfeatures for each class
#for i in xrange(svm_weights.shape[0]):
#featureIdx=np.argsort(abs(svm_weights[i,:]))
#topfeatures[i] = featureIdx[::-1][0:30] #get top 30
|
random_line_split
|
|
ingredientsScraper.py
|
#normalize values
m = sum(R[food][recipename].values())
R[food][recipename]={ingid: R[food][recipename][ingid]/m for ingid in R[food][recipename].keys()}
#Recipes = {}
#ingsorted = sorted(I.keys())
#for food in R.keys():
##m = sum(R[food].values())
##normalize values
##R[food] = {ingid: R[food][ingid]/m for ingid in R[food].keys()}
#Recipes[food] = [0]*len(ingsorted)
#for i in range(len(ingsorted)):
###if ingredient is in dish R[food]
#if ingsorted[i] in R[food]:
#Recipes[food][i] = R[food][ingsorted[i]]
#m = sum(Recipes[food])
#Recipes[food] = [x/m for x in Recipes[food]]
pickle.dump((I,R),file('AllRecipesIngImageNet.npy','w'))
#return I,R
#=================================================================================
# Ingredient Scraper with cooking terms and nutritional info
def IngredientScraper(fooddish):
#dictionary for ingredients
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
#list containing ingredients, cookingterms, nutritionrating
R[food][recipename] = [{},[],[0]*7]
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][0][ingid] = amt
#normalize values
m = sum(R[food][recipename][0].values())
R[food][recipename][0]={ingid: R[food][recipename][0][ingid]/m for ingid in R[food][recipename][0].keys()}
#get cooking terms
directions = [step.text.lower() for step in recipe.find_all('span', class_='plaincharacterwrap break')]
R[food][recipename][1] = directions
#get nutrition
nutritionrating = recipe.find_all('ul', id='ulNutrient')
n = 0
for nutrient in nutritionrating:
#category = nutrient.find('li',class_='categories').text
R[food][recipename][2][n]=float(nutrient.find('li',id='divNutrientGradient').attrs['style'][6:-1])/100
n += 1
pickle.dump((I,R),file('AllRecipesIng50FoodExtra.npy','w'))
#================================================================================
#X = np.zeros((len(trainlabels),len(I.keys())-1),dtype=np.float32)
#ingsorted = sorted(I.keys())[1:]
#for i in xrange(len(trainlabels)):
##thresh = np.random.uniform(0,RecipeMax[trainlabels[i]],n)
#dish = fooddish[trainlabels[i]]
#X[i,:] = [1 if x != 0 else 0 for x in Recipes[dish][1:]]
##if len(R[dish].keys()) != 0:
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
##for j in xrange(len(ingsorted)):
##if ingsorted[j] in R[dish][recipe]:
###X[i,j] = R[dish][recipe][ingsorted[j]]
##X[i,j] = 1
###Recipes[food] = [0]*len(ingsorted)
###for i in range(len(ingsorted)):
####if ingredient is in dish R[food]
###if ingsorted[i] in R[food]:
###Recipes[food][i] = R[food][ingsorted[i]]
###X[i,:] = [1 if x>t else 0 for x,t in zip(Recipes[dish],thresh)]
###X[i,:] = Recipes[dish]
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,i]==1)
#print i, len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) == traindata.shape[0]:
#attr_labels[range(0,800,100)] = 0
#attributeclassifiers[i] = svm.SVC(kernel='linear',C=0.001)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())-1),dtype=np.float32)
#for i in xrange(len(testlabels)):
#print 'test case:', i
#Xtest[i,:] = [x.predict(testdata[i,:])[0] for x in attributeclassifiers]
#pickle.dump((X,Xtest),file('vlg_extractor_1.1.2/ImageNetSurveyMC/IngredientAttributes.npy','w'))
###fill out correlation matrix
#m = traindata.shape[1] #number of visual word
#n = len(I.keys()) #number of ingredients
#corr_mat = np.zeros((m,n))
#for i in xrange(len(trainlabels)):
#for visualword in xrange(m):
#if traindata[i,visualword] != 0:
##count co-occurrence of ingredient and visual word
##binaryIng = [1 if x!=0 else 0 for x in Recipes[fooddish[trainlabels[i]]]]
#corr_mat[visualword,:] = corr_mat[visualword,:] + X[i,:]
#pickle.dump(corr_mat,file('corr_mat50Food.npy','w'))
###traindata = np.concatenate((traindata,X),1)
##corr_mat = pickle.load(file('corr_mat.npy','r'))
###normalize corr_mat
#row_sums = corr_mat.sum(axis=1)
#row_sums = np.array([1 if x==0 else x for x in row_sums])
#corr_mat = corr_mat/row_sums[:,np.newaxis]
##avg = corr_mat.mean(axis=0)
#logcormat = np.log(corr_mat+1)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#x = np.dot(testdata[i,:],logcormat)
#Xtest[i,:] = x/sum(x)
##dish = fooddish[testlabels[i]]
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
#for j in xrange(len(ingsorted)):
#if attributeclassifiers[j] is not None:
#Xtest[i,j]=attributeclassifiers[j].predict(testdata[i,:])
##if ingsorted[j] in R[dish][recipe]:
##Xtest[i,j] = 1
##Xtest[i,:] = [1 if xt>t else 0 for xt,t in zip(x,avg)]
#fig = plt.figure()
#ax = fig.add_subplot(5,2,10)
#count = [0]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find negative examples of attribute i
#pos_idx = np.where(X[np.where(trainlabels==9)[0],i]==1)
#count[i] = len(pos_idx[0])
#r = plt.bar(range(589),count)
#ax.set_xticks([])
#plt.xlabel(fooddish[9])
##ax = fig.add_subplot(522)
##r = plt.bar(range(440),Recipes['casserole'])
##ax.set_xticks([])
##plt.xlabel('c
|
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][ingid] = amt
|
conditional_block
|
|
ingredientsScraper.py
|
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
R[food][recipename] = {}
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][ingid] = amt
#normalize values
m = sum(R[food][recipename].values())
R[food][recipename]={ingid: R[food][recipename][ingid]/m for ingid in R[food][recipename].keys()}
#Recipes = {}
#ingsorted = sorted(I.keys())
#for food in R.keys():
##m = sum(R[food].values())
##normalize values
##R[food] = {ingid: R[food][ingid]/m for ingid in R[food].keys()}
#Recipes[food] = [0]*len(ingsorted)
#for i in range(len(ingsorted)):
###if ingredient is in dish R[food]
#if ingsorted[i] in R[food]:
#Recipes[food][i] = R[food][ingsorted[i]]
#m = sum(Recipes[food])
#Recipes[food] = [x/m for x in Recipes[food]]
pickle.dump((I,R),file('AllRecipesIngImageNet.npy','w'))
#return I,R
#=================================================================================
# Ingredient Scraper with cooking terms and nutritional info
def IngredientScraper(fooddish):
#dictionary for ingredients
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
#list containing ingredients, cookingterms, nutritionrating
R[food][recipename] = [{},[],[0]*7]
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][0][ingid] = amt
#normalize values
m = sum(R[food][recipename][0].values())
R[food][recipename][0]={ingid: R[food][recipename][0][ingid]/m for ingid in R[food][recipename][0].keys()}
#get cooking terms
directions = [step.text.lower() for step in recipe.find_all('span', class_='plaincharacterwrap break')]
R[food][recipename][1] = directions
#get nutrition
nutritionrating = recipe.find_all('ul', id='ulNutrient')
n = 0
for nutrient in nutritionrating:
#category = nutrient.find('li',class_='categories').text
R[food][recipename][2][n]=float(nutrient.find('li',id='divNutrientGradient').attrs['style'][6:-1])/100
n += 1
pickle.dump((I,R),file('AllRecipesIng50FoodExtra.npy','w'))
#================================================================================
#X = np.zeros((len(trainlabels),len(I.keys())-1),dtype=np.float32)
#ingsorted = sorted(I.keys())[1:]
#for i in xrange(len(trainlabels)):
##thresh = np.random.uniform(0,RecipeMax[trainlabels[i]],n)
#dish = fooddish[trainlabels[i]]
#X[i,:] = [1 if x != 0 else 0 for x in Recipes[dish][1:]]
##if len(R[dish].keys()) != 0:
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
##for j in xrange(len(ingsorted)):
##if ingsorted[j] in R[dish][recipe]:
###X[i,j] = R[dish][recipe][ingsorted[j]]
##X[i,j] = 1
###Recipes[food] = [0]*len(ingsorted)
###for i in range(len(ingsorted)):
####if ingredient is in dish R[food]
###if ingsorted[i] in R[food]:
###Recipes[food][i] = R[food][ingsorted[i]]
###X[i,:] = [1 if x>t else 0 for x,t in zip(Recipes[dish],thresh)]
###X[i,:] = Recipes[dish]
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,i]==1)
#print i, len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) == traindata.shape[0]:
#attr_labels[range(0,800,100)] = 0
#attributeclassifiers[i] = svm.SVC(kernel='linear',C=0.001)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())-1),dtype=np.float32)
#for i in xrange(len(testlabels)):
#print 'test case:', i
#Xtest[i,:] = [x.predict(testdata[i,:])[0] for x in attributeclassifiers]
#pickle.dump((X,Xtest),file('vlg_extractor_1.1.2/ImageNetSurveyMC/IngredientAttributes.npy','w'))
###fill out correlation matrix
#m = traindata.shape[1] #number of visual word
#n = len(I.keys()) #number of ingredients
#corr_mat = np.zeros((m,n))
#for i in xrange(len(trainlabels)):
#for visualword in xrange(m):
#if traindata[i,visualword] != 0:
##count co-occurrence of ingredient and visual word
##binaryIng = [1 if x!=0 else 0 for x in Recipes[fooddish[trainlabels[i]]]]
#corr_mat[visualword,:] = corr_mat[visualword,:] + X[i,:]
#pickle.dump(corr_mat,file('corr_mat50Food.npy','w'))
###traindata = np.concatenate((traindata,X),1)
##corr_mat = pickle.load(file('corr_mat.npy','r'))
###normalize corr_mat
#row_sums = corr_mat.sum(axis=1)
#row_sums = np.array([1 if x==0 else x for x in row_sums])
#corr_mat = corr_mat/row_sums[:,np.newaxis]
##avg = corr_mat.mean(axis=0)
#logcormat = np.log(corr_mat+1)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#x = np.dot(testdata[i,:],logcormat)
#Xtest[i,:] = x/sum(x)
##dish = fooddish[testlabels[i]]
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
#for j in xrange(len(ingsorted)):
#if attributeclassifiers[j
|
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
|
identifier_body
|
|
buffered.rs
|
is
/// in memory, like a `Vec<u8>`.
///
/// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
/// dropping will attempt to flush the the contents of the buffer, any errors
/// that happen in the process of dropping will be ignored. Calling [`flush`]
/// ensures that the buffer is empty and thus dropping will not even attempt
/// file operations.
///
/// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
/// together by the buffer and will all be written out in one system call when
/// the `stream` is flushed.
///
/// [`Write`]: ../../std/io/trait.Write.html
/// [`TcpStream::write`]: ../../std/net/struct.TcpStream.html#method.write
/// [`TcpStream`]: ../../std/net/struct.TcpStream.html
/// [`flush`]: #method.flush
pub struct BufWriter<W: Write> {
inner: Option<W>,
buf: Vec<u8>,
// #30888: If the inner writer panics in a call to write, we don't want to
// write the buffered data a second time in BufWriter's destructor. This
// flag tells the Drop impl if it should skip the flush.
panicked: bool,
}
/// An error returned by `into_inner` which combines an error that
/// happened while writing out the buffer, and the buffered writer object
/// which may be used to recover from the condition.
///
#[derive(Debug)]
pub struct IntoInnerError<W>(W, Error);
impl<W: Write> BufWriter<W> {
/// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
///
pub fn new(inner: W) -> BufWriter<W> {
BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter<W>` with the specified buffer capacity.
///
pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false }
}
fn flush_buf(&mut self) -> io::Result<()> {
let mut written = 0;
let len = self.buf.len();
let mut ret = Ok(());
while written < len {
self.panicked = true;
let r = self.inner.as_mut().unwrap().write(&self.buf[written..]);
self.panicked = false;
match r {
Ok(0) => {
ret =
Err(Error::new(ErrorKind::WriteZero, "failed to write the buffered data"));
break;
}
Ok(n) => written += n,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => {
ret = Err(e);
break;
}
}
}
if written > 0 {
self.buf.drain(..written);
}
ret
}
/// Gets a reference to the underlying writer.
///
pub fn get_ref(&self) -> &W {
self.inner.as_ref().unwrap()
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
///
pub fn get_mut(&mut self) -> &mut W {
self.inner.as_mut().unwrap()
}
/// Returns a reference to the internally buffered data.
///
pub fn buffer(&self) -> &[u8] {
&self.buf
}
/// Returns the number of bytes the internal buffer can hold without flushing.
///
pub fn capacity(&self) -> usize {
self.buf.capacity()
}
/// Unwraps this `BufWriter<W>`, returning the underlying writer.
///
/// The buffer is written out before returning the writer.
///
/// # Errors
///
/// An `Err` will be returned if an error occurs while flushing the buffer.
///
pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
match self.flush_buf() {
Err(e) => Err(IntoInnerError(self, e)),
Ok(()) => Ok(self.inner.take().unwrap()),
}
}
}
impl<W: Write> Write for BufWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.buf.len() + buf.len() > self.buf.capacity() {
self.flush_buf()?;
}
if buf.len() >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write(buf);
self.panicked = false;
r
} else {
self.buf.write(buf)
}
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.len() + total_len > self.buf.capacity() {
self.flush_buf()?;
}
if total_len >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write_vectored(bufs);
self.panicked = false;
r
} else {
self.buf.write_vectored(bufs)
}
}
fn flush(&mut self) -> io::Result<()> {
self.flush_buf().and_then(|()| self.get_mut().flush())
}
}
impl<W: Write> fmt::Debug for BufWriter<W>
where
W: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufWriter")
.field("writer", &self.inner.as_ref().unwrap())
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.finish()
}
}
impl<W: Write + Seek> Seek for BufWriter<W> {
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.flush_buf().and_then(|_| self.get_mut().seek(pos))
}
}
impl<W: Write> Drop for BufWriter<W> {
fn drop(&mut self) {
if self.inner.is_some() && !self.panicked {
// dtors should not panic, so we ignore a failed flush
let _r = self.flush_buf();
}
}
}
impl<W> IntoInnerError<W> {
/// Returns the error which caused the call to `into_inner()` to fail.
///
/// This error was returned when attempting to write the internal buffer.
///
pub fn error(&self) -> &Error {
&self.1
}
/// Returns the buffered writer instance which generated the error.
///
/// The returned object can be used for error recovery, such as
/// re-inspecting the buffer.
///
pub fn into_inner(self) -> W {
self.0
}
}
impl<W> From<IntoInnerError<W>> for Error {
fn from(iie: IntoInnerError<W>) -> Error {
iie.1
}
}
impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> {
fn description(&self) -> &str {
error::Error::description(self.error())
}
}
impl<W> fmt::Display for IntoInnerError<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.error().fmt(f)
}
}
/// Wraps a writer and buffers output to it, flushing whenever a newline
/// (`0x0a`, `'\n'`) is detected.
///
/// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output.
/// But it only does this batched write when it goes out of scope, or when the
/// internal buffer is full. Sometimes, you'd prefer to write each line as it's
/// completed, rather than the entire buffer at once. Enter `LineWriter`. It
/// does exactly that.
///
/// Like [`BufWriter`][bufwriter], a `LineWriter`’s buffer will also be flushed when the
/// `LineWriter` goes out of scope or when its internal buffer is full.
///
/// [bufwriter]: struct.BufWriter.html
///
/// If there's still a partial line in the buffer when the `LineWriter` is
/// dropped, it will flush those contents.
///
pub struct LineWriter<W: Write> {
inner: BufWriter<W>,
need_flush: bool,
}
impl<W: Write> LineWriter<W> {
/// Creates a new `LineWriter`.
///
pub fn new(inner: W) -> LineWriter<W> {
// Lines typically aren't that long, don't use a giant buffer
LineWriter::with_capacity(1024, inner)
}
/// Creates a new `LineWriter` with a specified capacity for the internal
/// buffer.
///
pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> {
LineWriter { inner: BufWriter::with_capacity(capacity, inner), need_flush: false }
}
/// Gets a reference to the underlying writer.
///
pub fn ge
|
t_ref(&
|
identifier_name
|
|
buffered.rs
|
of our internal buffer then we need to fetch
// some more data from the underlying reader.
// Branch using `>=` instead of the more correct `==`
// to tell the compiler that the pos..cap slice is always valid.
if self.pos >= self.cap {
debug_assert!(self.pos == self.cap);
self.cap = self.inner.read(&mut self.buf)?;
self.pos = 0;
}
Ok(&self.buf[self.pos..self.cap])
}
fn consume(&mut self, amt: usize) {
self.pos = cmp::min(self.pos + amt, self.cap);
}
}
impl<R> fmt::Debug for BufReader<R>
where
R: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufReader")
.field("reader", &self.inner)
.field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buf.len()))
.finish()
}
}
impl<R: Seek> Seek for BufReader<R> {
/// Seek to an offset, in bytes, in the underlying reader.
///
/// The position used for seeking with `SeekFrom::Current(_)` is the
/// position the underlying reader would be at if the `BufReader<R>` had no
/// internal buffer.
///
/// Seeking always discards the internal buffer, even if the seek position
/// would otherwise fall within it. This guarantees that calling
/// `.into_inner()` immediately after a seek yields the underlying reader
/// at the same position.
///
/// To seek without discarding the internal buffer, use [`BufReader::seek_relative`].
///
/// See [`std::io::Seek`] for more details.
///
/// Note: In the edge case where you're seeking with `SeekFrom::Current(n)`
/// where `n` minus the internal buffer length overflows an `i64`, two
/// seeks will be performed instead of one. If the second seek returns
/// `Err`, the underlying reader will be left at the same position it would
/// have if you called `seek` with `SeekFrom::Current(0)`.
///
/// [`BufReader::seek_relative`]: struct.BufReader.html#method.seek_relative
/// [`std::io::Seek`]: trait.Seek.html
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let result: u64;
if let SeekFrom::Current(n) = pos {
let remainder = (self.cap - self.pos) as i64;
// it should be safe to assume that remainder fits within an i64 as the alternative
// means we managed to allocate 8 exbibytes and that's absurd.
// But it's not out of the realm of possibility for some weird underlying reader to
// support seeking by i64::min_value() so we need to handle underflow when subtracting
// remainder.
if let Some(offset) = n.checked_sub(remainder) {
result = self.inner.seek(SeekFrom::Current(offset))?;
} else {
// seek backwards by our remainder, and then by the offset
self.inner.seek(SeekFrom::Current(-remainder))?;
self.discard_buffer();
result = self.inner.seek(SeekFrom::Current(n))?;
}
} else {
// Seeking with Start/End doesn't care about our buffer length.
result = self.inner.seek(pos)?;
}
self.discard_buffer();
Ok(result)
}
}
/// Wraps a writer and buffers its output.
///
/// It can be excessively inefficient to work directly with something that
/// implements [`Write`]. For example, every call to
/// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A
/// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying
/// writer in large, infrequent batches.
///
/// `BufWriter<W>` can improve the speed of programs that make *small* and
/// *repeated* write calls to the same file or network socket. It does not
/// help when writing very large amounts at once, or writing just one or a few
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a `Vec<u8>`.
///
/// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
/// dropping will attempt to flush the the contents of the buffer, any errors
/// that happen in the process of dropping will be ignored. Calling [`flush`]
/// ensures that the buffer is empty and thus dropping will not even attempt
/// file operations.
///
/// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
/// together by the buffer and will all be written out in one system call when
/// the `stream` is flushed.
///
/// [`Write`]: ../../std/io/trait.Write.html
/// [`TcpStream::write`]: ../../std/net/struct.TcpStream.html#method.write
/// [`TcpStream`]: ../../std/net/struct.TcpStream.html
/// [`flush`]: #method.flush
pub struct BufWriter<W: Write> {
inner: Option<W>,
buf: Vec<u8>,
// #30888: If the inner writer panics in a call to write, we don't want to
// write the buffered data a second time in BufWriter's destructor. This
// flag tells the Drop impl if it should skip the flush.
panicked: bool,
}
/// An error returned by `into_inner` which combines an error that
/// happened while writing out the buffer, and the buffered writer object
/// which may be used to recover from the condition.
///
#[derive(Debug)]
pub struct IntoInnerError<W>(W, Error);
impl<W: Write> BufWriter<W> {
/// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
///
pub fn new(inner: W) -> BufWriter<W> {
BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter<W>` with the specified buffer capacity.
///
pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false }
}
fn flush_buf(&mut self) -> io::Result<()> {
let mut written = 0;
let len = self.buf.len();
let mut ret = Ok(());
while written < len {
self.panicked = true;
let r = self.inner.as_mut().unwrap().write(&self.buf[written..]);
self.panicked = false;
match r {
Ok(0) => {
ret =
Err(Error::new(ErrorKind::WriteZero, "failed to write the buffered data"));
break;
}
Ok(n) => written += n,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => {
ret = Err(e);
break;
}
}
}
if written > 0 {
self.buf.drain(..written);
}
ret
}
/// Gets a reference to the underlying writer.
///
pub fn get_ref(&self) -> &W {
self.inner.as_ref().unwrap()
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
///
pub fn get_mut(&mut self) -> &mut W {
self.inner.as_mut().unwrap()
}
/// Returns a reference to the internally buffered data.
///
pub fn buffer(&self) -> &[u8] {
&self.buf
}
/// Returns the number of bytes the internal buffer can hold without flushing.
///
pub fn capacity(&self) -> usize {
self.buf.capacity()
}
/// Unwraps this `BufWriter<W>`, returning the underlying writer.
///
/// The buffer is written out before returning the writer.
///
/// # Errors
///
/// An `Err` will be returned if an error occurs while flushing the buffer.
///
pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
match self.flush_buf() {
Err(e) => Err(IntoInnerError(self, e)),
Ok(()) => Ok(self.inner.take().unwrap()),
}
}
}
impl<W: Write> Write for BufWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.buf.len() + buf.len() > self.buf.capacity() {
self.flush_buf()?;
}
if buf.len() >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write(buf);
self.panicked = false;
r
} else {
self.buf.write(buf)
}
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.len() + total_len > self.buf.capacity()
|
{
self.flush_buf()?;
}
|
conditional_block
|
|
buffered.rs
|
Writer<W: Write> {
inner: Option<W>,
buf: Vec<u8>,
// #30888: If the inner writer panics in a call to write, we don't want to
// write the buffered data a second time in BufWriter's destructor. This
// flag tells the Drop impl if it should skip the flush.
panicked: bool,
}
/// An error returned by `into_inner` which combines an error that
/// happened while writing out the buffer, and the buffered writer object
/// which may be used to recover from the condition.
///
#[derive(Debug)]
pub struct IntoInnerError<W>(W, Error);
impl<W: Write> BufWriter<W> {
/// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
///
pub fn new(inner: W) -> BufWriter<W> {
BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter<W>` with the specified buffer capacity.
///
pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false }
}
fn flush_buf(&mut self) -> io::Result<()> {
let mut written = 0;
let len = self.buf.len();
let mut ret = Ok(());
while written < len {
self.panicked = true;
let r = self.inner.as_mut().unwrap().write(&self.buf[written..]);
self.panicked = false;
match r {
Ok(0) => {
ret =
Err(Error::new(ErrorKind::WriteZero, "failed to write the buffered data"));
break;
}
Ok(n) => written += n,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => {
ret = Err(e);
break;
}
}
}
if written > 0 {
self.buf.drain(..written);
}
ret
}
/// Gets a reference to the underlying writer.
///
pub fn get_ref(&self) -> &W {
self.inner.as_ref().unwrap()
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
///
pub fn get_mut(&mut self) -> &mut W {
self.inner.as_mut().unwrap()
}
/// Returns a reference to the internally buffered data.
///
pub fn buffer(&self) -> &[u8] {
&self.buf
}
/// Returns the number of bytes the internal buffer can hold without flushing.
///
pub fn capacity(&self) -> usize {
self.buf.capacity()
}
/// Unwraps this `BufWriter<W>`, returning the underlying writer.
///
/// The buffer is written out before returning the writer.
///
/// # Errors
///
/// An `Err` will be returned if an error occurs while flushing the buffer.
///
pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
match self.flush_buf() {
Err(e) => Err(IntoInnerError(self, e)),
Ok(()) => Ok(self.inner.take().unwrap()),
}
}
}
impl<W: Write> Write for BufWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.buf.len() + buf.len() > self.buf.capacity() {
self.flush_buf()?;
}
if buf.len() >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write(buf);
self.panicked = false;
r
} else {
self.buf.write(buf)
}
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.len() + total_len > self.buf.capacity() {
self.flush_buf()?;
}
if total_len >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write_vectored(bufs);
self.panicked = false;
r
} else {
self.buf.write_vectored(bufs)
}
}
fn flush(&mut self) -> io::Result<()> {
self.flush_buf().and_then(|()| self.get_mut().flush())
}
}
impl<W: Write> fmt::Debug for BufWriter<W>
where
W: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufWriter")
.field("writer", &self.inner.as_ref().unwrap())
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.finish()
}
}
impl<W: Write + Seek> Seek for BufWriter<W> {
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.flush_buf().and_then(|_| self.get_mut().seek(pos))
}
}
impl<W: Write> Drop for BufWriter<W> {
fn drop(&mut self) {
if self.inner.is_some() && !self.panicked {
// dtors should not panic, so we ignore a failed flush
let _r = self.flush_buf();
}
}
}
impl<W> IntoInnerError<W> {
/// Returns the error which caused the call to `into_inner()` to fail.
///
/// This error was returned when attempting to write the internal buffer.
///
pub fn error(&self) -> &Error {
&self.1
}
/// Returns the buffered writer instance which generated the error.
///
/// The returned object can be used for error recovery, such as
/// re-inspecting the buffer.
///
pub fn into_inner(self) -> W {
self.0
}
}
impl<W> From<IntoInnerError<W>> for Error {
fn from(iie: IntoInnerError<W>) -> Error {
iie.1
}
}
impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> {
fn description(&self) -> &str {
error::Error::description(self.error())
}
}
impl<W> fmt::Display for IntoInnerError<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.error().fmt(f)
}
}
/// Wraps a writer and buffers output to it, flushing whenever a newline
/// (`0x0a`, `'\n'`) is detected.
///
/// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output.
/// But it only does this batched write when it goes out of scope, or when the
/// internal buffer is full. Sometimes, you'd prefer to write each line as it's
/// completed, rather than the entire buffer at once. Enter `LineWriter`. It
/// does exactly that.
///
/// Like [`BufWriter`][bufwriter], a `LineWriter`’s buffer will also be flushed when the
/// `LineWriter` goes out of scope or when its internal buffer is full.
///
/// [bufwriter]: struct.BufWriter.html
///
/// If there's still a partial line in the buffer when the `LineWriter` is
/// dropped, it will flush those contents.
///
pub struct LineWriter<W: Write> {
inner: BufWriter<W>,
need_flush: bool,
}
impl<W: Write> LineWriter<W> {
/// Creates a new `LineWriter`.
///
pub fn new(inner: W) -> LineWriter<W> {
// Lines typically aren't that long, don't use a giant buffer
LineWriter::with_capacity(1024, inner)
}
/// Creates a new `LineWriter` with a specified capacity for the internal
/// buffer.
///
pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> {
LineWriter { inner: BufWriter::with_capacity(capacity, inner), need_flush: false }
}
/// Gets a reference to the underlying writer.
///
pub fn get_ref(&self) -> &W {
self.inner.get_ref()
}
/// Gets a mutable reference to the underlying writer.
///
/// Caution must be taken when calling methods on the mutable reference
/// returned as extra writes could corrupt the output stream.
///
pub fn get_mut(&mut self) -> &mut W {
self.inner.get_mut()
}
/// Unwraps this `LineWriter`, returning the underlying writer.
///
/// The internal buffer is written out before returning the writer.
///
/// # Errors
///
/// An `Err` will be returned if an error occurs while flushing the buffer.
///
pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> {
|
self.inner.into_inner().map_err(|IntoInnerError(buf, e)| {
IntoInnerError(LineWriter { inner: buf, need_flush: false }, e)
})
}
}
|
identifier_body
|
|
buffered.rs
|
output.
///
/// It can be excessively inefficient to work directly with something that
/// implements [`Write`]. For example, every call to
/// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A
/// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying
/// writer in large, infrequent batches.
///
/// `BufWriter<W>` can improve the speed of programs that make *small* and
/// *repeated* write calls to the same file or network socket. It does not
/// help when writing very large amounts at once, or writing just one or a few
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a `Vec<u8>`.
///
/// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
/// dropping will attempt to flush the the contents of the buffer, any errors
/// that happen in the process of dropping will be ignored. Calling [`flush`]
/// ensures that the buffer is empty and thus dropping will not even attempt
/// file operations.
///
/// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
/// together by the buffer and will all be written out in one system call when
/// the `stream` is flushed.
///
/// [`Write`]: ../../std/io/trait.Write.html
/// [`TcpStream::write`]: ../../std/net/struct.TcpStream.html#method.write
/// [`TcpStream`]: ../../std/net/struct.TcpStream.html
/// [`flush`]: #method.flush
pub struct BufWriter<W: Write> {
inner: Option<W>,
buf: Vec<u8>,
// #30888: If the inner writer panics in a call to write, we don't want to
// write the buffered data a second time in BufWriter's destructor. This
// flag tells the Drop impl if it should skip the flush.
panicked: bool,
}
/// An error returned by `into_inner` which combines an error that
/// happened while writing out the buffer, and the buffered writer object
/// which may be used to recover from the condition.
///
#[derive(Debug)]
pub struct IntoInnerError<W>(W, Error);
impl<W: Write> BufWriter<W> {
/// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
///
pub fn new(inner: W) -> BufWriter<W> {
BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter<W>` with the specified buffer capacity.
///
pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false }
}
fn flush_buf(&mut self) -> io::Result<()> {
let mut written = 0;
let len = self.buf.len();
let mut ret = Ok(());
while written < len {
self.panicked = true;
let r = self.inner.as_mut().unwrap().write(&self.buf[written..]);
self.panicked = false;
match r {
Ok(0) => {
ret =
Err(Error::new(ErrorKind::WriteZero, "failed to write the buffered data"));
break;
}
Ok(n) => written += n,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => {
ret = Err(e);
break;
}
}
}
if written > 0 {
self.buf.drain(..written);
}
ret
}
/// Gets a reference to the underlying writer.
///
pub fn get_ref(&self) -> &W {
self.inner.as_ref().unwrap()
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
///
pub fn get_mut(&mut self) -> &mut W {
self.inner.as_mut().unwrap()
}
/// Returns a reference to the internally buffered data.
///
pub fn buffer(&self) -> &[u8] {
&self.buf
}
/// Returns the number of bytes the internal buffer can hold without flushing.
///
pub fn capacity(&self) -> usize {
self.buf.capacity()
}
/// Unwraps this `BufWriter<W>`, returning the underlying writer.
///
/// The buffer is written out before returning the writer.
///
/// # Errors
///
/// An `Err` will be returned if an error occurs while flushing the buffer.
///
pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
match self.flush_buf() {
Err(e) => Err(IntoInnerError(self, e)),
Ok(()) => Ok(self.inner.take().unwrap()),
}
}
}
impl<W: Write> Write for BufWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.buf.len() + buf.len() > self.buf.capacity() {
self.flush_buf()?;
}
if buf.len() >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write(buf);
self.panicked = false;
r
} else {
self.buf.write(buf)
}
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.len() + total_len > self.buf.capacity() {
self.flush_buf()?;
}
if total_len >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write_vectored(bufs);
self.panicked = false;
r
} else {
self.buf.write_vectored(bufs)
}
}
fn flush(&mut self) -> io::Result<()> {
self.flush_buf().and_then(|()| self.get_mut().flush())
}
}
impl<W: Write> fmt::Debug for BufWriter<W>
where
W: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufWriter")
.field("writer", &self.inner.as_ref().unwrap())
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.finish()
}
}
impl<W: Write + Seek> Seek for BufWriter<W> {
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.flush_buf().and_then(|_| self.get_mut().seek(pos))
}
}
impl<W: Write> Drop for BufWriter<W> {
fn drop(&mut self) {
if self.inner.is_some() && !self.panicked {
// dtors should not panic, so we ignore a failed flush
let _r = self.flush_buf();
}
}
}
impl<W> IntoInnerError<W> {
/// Returns the error which caused the call to `into_inner()` to fail.
///
/// This error was returned when attempting to write the internal buffer.
///
pub fn error(&self) -> &Error {
&self.1
}
/// Returns the buffered writer instance which generated the error.
///
/// The returned object can be used for error recovery, such as
/// re-inspecting the buffer.
///
pub fn into_inner(self) -> W {
self.0
}
}
impl<W> From<IntoInnerError<W>> for Error {
fn from(iie: IntoInnerError<W>) -> Error {
iie.1
}
}
impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> {
fn description(&self) -> &str {
error::Error::description(self.error())
}
}
impl<W> fmt::Display for IntoInnerError<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.error().fmt(f)
}
}
/// Wraps a writer and buffers output to it, flushing whenever a newline
/// (`0x0a`, `'\n'`) is detected.
///
/// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output.
/// But it only does this batched write when it goes out of scope, or when the
/// internal buffer is full. Sometimes, you'd prefer to write each line as it's
/// completed, rather than the entire buffer at once. Enter `LineWriter`. It
/// does exactly that.
///
/// Like [`BufWriter`][bufwriter], a `LineWriter`’s buffer will also be flushed when the
/// `LineWriter` goes out of scope or when its internal buffer is full.
///
/// [bufwriter]: struct.BufWriter.html
///
/// If there's still a partial line in the buffer when the `LineWriter` is
/// dropped, it will flush those contents.
///
|
pub struct LineWriter<W: Write> {
inner: BufWriter<W>,
need_flush: bool,
|
random_line_split
|
|
util.ts
|
uncapitalize = (str: string) => str.replace(/(?:^|\s)\S/g, (a) => a.toLowerCase())
return uncapitalize(mode.replace(/^Showdown$/, 'Solo Showdown').split(' ').join(''))
}
export const formatList = (l: string[], joiner = 'or') => l.slice(0, l.length - 1).join(', ') + ' ' + joiner + ' ' + l[l.length - 1]
export const clamp = (min: number, max: number, n: number) => Math.min(max, Math.max(min, n))
export const minMaxScale = (fromMin: number, fromMax: number, n: number) => (n - fromMin) / (fromMax - fromMin)
export const scaleInto = (fromMin: number, fromMax: number, toMax: number, n: number) => clamp(0, toMax, Math.floor(minMaxScale(fromMin, fromMax, n) * toMax))
export function xpToHours(xp: number) {
return xp / 220; // 145h for 30300 XP as measured by @schneefux
}
/**
* Suffix num with SI unit
* @param num number
* @param digits digits after comma
*/
export function formatSI(num: number, digits: number) {
const si = [
{ value: 1, symbol: '' },
{ value: 1E3, symbol: 'k' },
{ value: 1E6, symbol: 'M' },
]
const rx = /\.0+$|(\.[0-9]*[1-9])0+$/
let i
for (i = si.length - 1; i > 0; i--) {
if (num >= si[i].value) {
break
}
}
return Math.round(num / si[i].value)
.toFixed(digits)
.replace(rx, '$1') + si[i].symbol
}
const propPriority = ['winRateAdj', 'winRate', 'wins', 'rank1', 'duration', 'useRate', 'pickRate']
/**
* Get brawlers by event: {
* [eventId]: [
* brawler id,
* brawler name,
* brawler stats,
* sort prop
* ] }
* sorted by the preferred prop according to propPriority
*/
export function getBest(meta: MapMetaMap|ModeMetaMap): { [key: string]: unknown[] } {
return [...Object.entries(meta)]
.reduce((top, [key, entry]) => ({
...top,
[key]: [...Object.entries(entry.brawlers)]
.map(([brawlerId, brawler]) => ({
id: brawlerId,
title: brawler.name,
brawler: brawlerId,
sampleSize: brawler.sampleSize,
stats: brawler.stats,
sortProp: <string>propPriority.find(prop => prop in brawler.stats),
}))
.sort((brawler1, brawler2) => brawler2.stats[brawler2.sortProp] - brawler1.stats[brawler1.sortProp])
}), {})
}
export function getBestBrawlers(brawlers: any[]): any[] {
const sampleSizeThreshold = 300
brawlers = brawlers.filter(brawler => brawler.sampleSize >= sampleSizeThreshold)
if (brawlers.length == 0) {
return []
}
const sortProp = <string>propPriority.find(prop => prop in brawlers[0].stats)
brawlers.sort((brawler1, brawler2) => brawler2.stats[sortProp] - brawler1.stats[sortProp])
return brawlers
}
interface EventMetadata {
id: string
map: string
mode: string
start?: string
end?: string
}
export function formatAsJsonLd(event: EventMetadata, mediaUrl: string) {
const url = `/tier-list/mode/${slugify(event.mode.toLowerCase())}/map/${slugify(event.map)}`
return {
'@context': 'https://schema.org',
'@type': 'Event',
'name': `${event.mode} - ${event.map}`,
...(event.start != undefined ? {
'startDate': event.start,
} : {}),
...(event.end != undefined ? {
'endDate': event.end!,
} : {}),
'eventAttendanceMode': 'https://schema.org/OnlineEventAttendanceMode',
'eventStatus': 'https://schema.org/EventScheduled',
'url': url,
'image': [`${mediaUrl}/map/${event.id}.png`],
'location': {
'@type': 'VirtualLocation',
'url': url,
},
'description': `${event.map} is a Brawl Stars ${event.mode} map.`,
}
}
export function sloppyParseFloat(number: string) {
return Math.floor(parseFloat(number) * 10000) / 10000
}
/**
* Throw if a tag is invalid.
* Make sure tag starts with a hash.
*/
export function validateTag(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Invalid tag ' + tag)
}
if (!tag.startsWith('#')) {
return '#' + tag
}
return tag
}
// in clickhouse SQL (tag has to start with '#'):
/*
|
*/
/**
* Encode tag string into 64bit unsigned integer string.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function tagToId(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Cannot encode tag ' + tag)
}
if (tag.startsWith('#')) {
tag = tag.substring(1)
}
const result = tag.split('').reduce((sum, c) => sum*14 + '0289PYLQGRJCUV'.indexOf(c), 0)
return result.toString()
}
/**
* Decode 64bit unsigned integer string into tag string with hash.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function idToTag(idString: string) {
let id = Number(idString)
let tag = ''
while (id != 0) {
const i = id % 14
tag = '0289PYLQGRJCUV'[i] + tag
id = Math.floor(id / 14)
}
return '#' + tag
}
/*
in SQL:
date_add(from_days(ceil(to_days(date_sub(date_sub(timestamp, interval 8 hour), interval 1 day)) / 14) * 14 + 2), interval 8 hour)
in clickhouse SQL:
addHours(addDays(toStartOfInterval(subtractDays(subtractHours(timestamp, 8), 4), interval 336 hour, 'UTC'), 14+4), 8)
*/
/**
* Round timestamp up to next legacy trophy season interval.
* Seasons used to be 2 weeks, this is what the database uses.
* @param timestamp
*/
export function getSeasonEnd(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/2)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*2)
return trophySeasonEnd
}
/**
* Round timestamp up to next new trophy season interval.
* Seasons are now 4 weeks.
* @param timestamp
*/
export function getSeasonEndNew(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/4)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*4)
return trophySeasonEnd
}
/*
* Round timestamp down to start of day.
* @param timestamp
*/
export function getCompetitionMapDayStart(timestamp: Date) {
const dayStart = new Date(Date.parse('2020-07-13T09:30:00Z'))
const diff = timestamp.getTime() - dayStart.getTime()
const daysSince = Math.ceil(diff/1000/60/60/24)
dayStart.setUTCDate(dayStart.getUTCDate() + daysSince - 1)
return dayStart
}
export function getCompetitionWinnerMode(timestamp: Date) {
const order = ['duoShowdown', 'siege', 'hotZone', 'soloShowdown', '
|
arraySum((c, i) -> (position('0289PYLQGRJCUV', c)-1)*pow(14, length(player_club_tag)-i-1-1), arraySlice(splitByString('', player_club_tag), 2), range(if(player_club_tag <> '', toUInt64(length(player_club_tag)-1), 0))) as player_club_id,
|
random_line_split
|
util.ts
|
export const formatList = (l: string[], joiner = 'or') => l.slice(0, l.length - 1).join(', ') + ' ' + joiner + ' ' + l[l.length - 1]
export const clamp = (min: number, max: number, n: number) => Math.min(max, Math.max(min, n))
export const minMaxScale = (fromMin: number, fromMax: number, n: number) => (n - fromMin) / (fromMax - fromMin)
export const scaleInto = (fromMin: number, fromMax: number, toMax: number, n: number) => clamp(0, toMax, Math.floor(minMaxScale(fromMin, fromMax, n) * toMax))
export function xpToHours(xp: number) {
return xp / 220; // 145h for 30300 XP as measured by @schneefux
}
/**
* Suffix num with SI unit
* @param num number
* @param digits digits after comma
*/
export function formatSI(num: number, digits: number) {
const si = [
{ value: 1, symbol: '' },
{ value: 1E3, symbol: 'k' },
{ value: 1E6, symbol: 'M' },
]
const rx = /\.0+$|(\.[0-9]*[1-9])0+$/
let i
for (i = si.length - 1; i > 0; i--) {
if (num >= si[i].value) {
break
}
}
return Math.round(num / si[i].value)
.toFixed(digits)
.replace(rx, '$1') + si[i].symbol
}
const propPriority = ['winRateAdj', 'winRate', 'wins', 'rank1', 'duration', 'useRate', 'pickRate']
/**
* Get brawlers by event: {
* [eventId]: [
* brawler id,
* brawler name,
* brawler stats,
* sort prop
* ] }
* sorted by the preferred prop according to propPriority
*/
export function getBest(meta: MapMetaMap|ModeMetaMap): { [key: string]: unknown[] } {
return [...Object.entries(meta)]
.reduce((top, [key, entry]) => ({
...top,
[key]: [...Object.entries(entry.brawlers)]
.map(([brawlerId, brawler]) => ({
id: brawlerId,
title: brawler.name,
brawler: brawlerId,
sampleSize: brawler.sampleSize,
stats: brawler.stats,
sortProp: <string>propPriority.find(prop => prop in brawler.stats),
}))
.sort((brawler1, brawler2) => brawler2.stats[brawler2.sortProp] - brawler1.stats[brawler1.sortProp])
}), {})
}
export function getBestBrawlers(brawlers: any[]): any[] {
const sampleSizeThreshold = 300
brawlers = brawlers.filter(brawler => brawler.sampleSize >= sampleSizeThreshold)
if (brawlers.length == 0) {
return []
}
const sortProp = <string>propPriority.find(prop => prop in brawlers[0].stats)
brawlers.sort((brawler1, brawler2) => brawler2.stats[sortProp] - brawler1.stats[sortProp])
return brawlers
}
interface EventMetadata {
id: string
map: string
mode: string
start?: string
end?: string
}
export function formatAsJsonLd(event: EventMetadata, mediaUrl: string) {
const url = `/tier-list/mode/${slugify(event.mode.toLowerCase())}/map/${slugify(event.map)}`
return {
'@context': 'https://schema.org',
'@type': 'Event',
'name': `${event.mode} - ${event.map}`,
...(event.start != undefined ? {
'startDate': event.start,
} : {}),
...(event.end != undefined ? {
'endDate': event.end!,
} : {}),
'eventAttendanceMode': 'https://schema.org/OnlineEventAttendanceMode',
'eventStatus': 'https://schema.org/EventScheduled',
'url': url,
'image': [`${mediaUrl}/map/${event.id}.png`],
'location': {
'@type': 'VirtualLocation',
'url': url,
},
'description': `${event.map} is a Brawl Stars ${event.mode} map.`,
}
}
export function sloppyParseFloat(number: string) {
return Math.floor(parseFloat(number) * 10000) / 10000
}
/**
* Throw if a tag is invalid.
* Make sure tag starts with a hash.
*/
export function validateTag(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Invalid tag ' + tag)
}
if (!tag.startsWith('#')) {
return '#' + tag
}
return tag
}
// in clickhouse SQL (tag has to start with '#'):
/*
arraySum((c, i) -> (position('0289PYLQGRJCUV', c)-1)*pow(14, length(player_club_tag)-i-1-1), arraySlice(splitByString('', player_club_tag), 2), range(if(player_club_tag <> '', toUInt64(length(player_club_tag)-1), 0))) as player_club_id,
*/
/**
* Encode tag string into 64bit unsigned integer string.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function tagToId(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Cannot encode tag ' + tag)
}
if (tag.startsWith('#')) {
tag = tag.substring(1)
}
const result = tag.split('').reduce((sum, c) => sum*14 + '0289PYLQGRJCUV'.indexOf(c), 0)
return result.toString()
}
/**
* Decode 64bit unsigned integer string into tag string with hash.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function idToTag(idString: string) {
let id = Number(idString)
let tag = ''
while (id != 0) {
const i = id % 14
tag = '0289PYLQGRJCUV'[i] + tag
id = Math.floor(id / 14)
}
return '#' + tag
}
/*
in SQL:
date_add(from_days(ceil(to_days(date_sub(date_sub(timestamp, interval 8 hour), interval 1 day)) / 14) * 14 + 2), interval 8 hour)
in clickhouse SQL:
addHours(addDays(toStartOfInterval(subtractDays(subtractHours(timestamp, 8), 4), interval 336 hour, 'UTC'), 14+4), 8)
*/
/**
* Round timestamp up to next legacy trophy season interval.
* Seasons used to be 2 weeks, this is what the database uses.
* @param timestamp
*/
export function getSeasonEnd(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/2)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*2)
return trophySeasonEnd
}
/**
* Round timestamp up to next new trophy season interval.
* Seasons are now 4 weeks.
* @param timestamp
*/
export function getSeasonEndNew(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/4)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*4)
return trophySeasonEnd
}
/*
* Round timestamp down to start of day.
* @param timestamp
*/
export function getCompetitionMapDayStart(timestamp: Date) {
const dayStart = new Date(Date.parse('2020-07-13T09:30:00Z'))
const diff = timestamp.getTime() - dayStart.getTime()
const daysSince = Math.ceil(diff/1000/60/60/24)
dayStart.setUTCDate(dayStart.getUTCDate() + daysSince - 1)
return dayStart
}
export function getCompetitionWinnerMode(timestamp: Date) {
const order = ['duoShowdown', 'siege', 'hotZone', 'solo
|
{
const uncapitalize = (str: string) => str.replace(/(?:^|\s)\S/g, (a) => a.toLowerCase())
return uncapitalize(mode.replace(/^Showdown$/, 'Solo Showdown').split(' ').join(''))
}
|
identifier_body
|
|
util.ts
|
.min(max, Math.max(min, n))
export const minMaxScale = (fromMin: number, fromMax: number, n: number) => (n - fromMin) / (fromMax - fromMin)
export const scaleInto = (fromMin: number, fromMax: number, toMax: number, n: number) => clamp(0, toMax, Math.floor(minMaxScale(fromMin, fromMax, n) * toMax))
export function xpToHours(xp: number) {
return xp / 220; // 145h for 30300 XP as measured by @schneefux
}
/**
* Suffix num with SI unit
* @param num number
* @param digits digits after comma
*/
export function formatSI(num: number, digits: number) {
const si = [
{ value: 1, symbol: '' },
{ value: 1E3, symbol: 'k' },
{ value: 1E6, symbol: 'M' },
]
const rx = /\.0+$|(\.[0-9]*[1-9])0+$/
let i
for (i = si.length - 1; i > 0; i--) {
if (num >= si[i].value) {
break
}
}
return Math.round(num / si[i].value)
.toFixed(digits)
.replace(rx, '$1') + si[i].symbol
}
const propPriority = ['winRateAdj', 'winRate', 'wins', 'rank1', 'duration', 'useRate', 'pickRate']
/**
* Get brawlers by event: {
* [eventId]: [
* brawler id,
* brawler name,
* brawler stats,
* sort prop
* ] }
* sorted by the preferred prop according to propPriority
*/
export function getBest(meta: MapMetaMap|ModeMetaMap): { [key: string]: unknown[] } {
return [...Object.entries(meta)]
.reduce((top, [key, entry]) => ({
...top,
[key]: [...Object.entries(entry.brawlers)]
.map(([brawlerId, brawler]) => ({
id: brawlerId,
title: brawler.name,
brawler: brawlerId,
sampleSize: brawler.sampleSize,
stats: brawler.stats,
sortProp: <string>propPriority.find(prop => prop in brawler.stats),
}))
.sort((brawler1, brawler2) => brawler2.stats[brawler2.sortProp] - brawler1.stats[brawler1.sortProp])
}), {})
}
export function getBestBrawlers(brawlers: any[]): any[] {
const sampleSizeThreshold = 300
brawlers = brawlers.filter(brawler => brawler.sampleSize >= sampleSizeThreshold)
if (brawlers.length == 0) {
return []
}
const sortProp = <string>propPriority.find(prop => prop in brawlers[0].stats)
brawlers.sort((brawler1, brawler2) => brawler2.stats[sortProp] - brawler1.stats[sortProp])
return brawlers
}
interface EventMetadata {
id: string
map: string
mode: string
start?: string
end?: string
}
export function formatAsJsonLd(event: EventMetadata, mediaUrl: string) {
const url = `/tier-list/mode/${slugify(event.mode.toLowerCase())}/map/${slugify(event.map)}`
return {
'@context': 'https://schema.org',
'@type': 'Event',
'name': `${event.mode} - ${event.map}`,
...(event.start != undefined ? {
'startDate': event.start,
} : {}),
...(event.end != undefined ? {
'endDate': event.end!,
} : {}),
'eventAttendanceMode': 'https://schema.org/OnlineEventAttendanceMode',
'eventStatus': 'https://schema.org/EventScheduled',
'url': url,
'image': [`${mediaUrl}/map/${event.id}.png`],
'location': {
'@type': 'VirtualLocation',
'url': url,
},
'description': `${event.map} is a Brawl Stars ${event.mode} map.`,
}
}
export function sloppyParseFloat(number: string) {
return Math.floor(parseFloat(number) * 10000) / 10000
}
/**
* Throw if a tag is invalid.
* Make sure tag starts with a hash.
*/
export function validateTag(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Invalid tag ' + tag)
}
if (!tag.startsWith('#')) {
return '#' + tag
}
return tag
}
// in clickhouse SQL (tag has to start with '#'):
/*
arraySum((c, i) -> (position('0289PYLQGRJCUV', c)-1)*pow(14, length(player_club_tag)-i-1-1), arraySlice(splitByString('', player_club_tag), 2), range(if(player_club_tag <> '', toUInt64(length(player_club_tag)-1), 0))) as player_club_id,
*/
/**
* Encode tag string into 64bit unsigned integer string.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function tagToId(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Cannot encode tag ' + tag)
}
if (tag.startsWith('#')) {
tag = tag.substring(1)
}
const result = tag.split('').reduce((sum, c) => sum*14 + '0289PYLQGRJCUV'.indexOf(c), 0)
return result.toString()
}
/**
* Decode 64bit unsigned integer string into tag string with hash.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function idToTag(idString: string) {
let id = Number(idString)
let tag = ''
while (id != 0) {
const i = id % 14
tag = '0289PYLQGRJCUV'[i] + tag
id = Math.floor(id / 14)
}
return '#' + tag
}
/*
in SQL:
date_add(from_days(ceil(to_days(date_sub(date_sub(timestamp, interval 8 hour), interval 1 day)) / 14) * 14 + 2), interval 8 hour)
in clickhouse SQL:
addHours(addDays(toStartOfInterval(subtractDays(subtractHours(timestamp, 8), 4), interval 336 hour, 'UTC'), 14+4), 8)
*/
/**
* Round timestamp up to next legacy trophy season interval.
* Seasons used to be 2 weeks, this is what the database uses.
* @param timestamp
*/
export function getSeasonEnd(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/2)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*2)
return trophySeasonEnd
}
/**
* Round timestamp up to next new trophy season interval.
* Seasons are now 4 weeks.
* @param timestamp
*/
export function getSeasonEndNew(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/4)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*4)
return trophySeasonEnd
}
/*
* Round timestamp down to start of day.
* @param timestamp
*/
export function getCompetitionMapDayStart(timestamp: Date) {
const dayStart = new Date(Date.parse('2020-07-13T09:30:00Z'))
const diff = timestamp.getTime() - dayStart.getTime()
const daysSince = Math.ceil(diff/1000/60/60/24)
dayStart.setUTCDate(dayStart.getUTCDate() + daysSince - 1)
return dayStart
}
export function getCompetitionWinnerMode(timestamp: Date) {
const order = ['duoShowdown', 'siege', 'hotZone', 'soloShowdown', 'brawlBall', 'bounty', 'heist', 'gemGrab']
const dayStart = new Date(Date.parse('2021-04-24T09:30:00Z'))
const diff = timestamp.getTime() - dayStart.getTime()
const daysSince = Math.floor(diff/1000/60/60/24)
return order[daysSince % order.length]
}
/**
* Get the end date of the current and the last database-season
*/
export function
|
getMonthSeasonEnd
|
identifier_name
|
|
util.ts
|
uncapitalize = (str: string) => str.replace(/(?:^|\s)\S/g, (a) => a.toLowerCase())
return uncapitalize(mode.replace(/^Showdown$/, 'Solo Showdown').split(' ').join(''))
}
export const formatList = (l: string[], joiner = 'or') => l.slice(0, l.length - 1).join(', ') + ' ' + joiner + ' ' + l[l.length - 1]
export const clamp = (min: number, max: number, n: number) => Math.min(max, Math.max(min, n))
export const minMaxScale = (fromMin: number, fromMax: number, n: number) => (n - fromMin) / (fromMax - fromMin)
export const scaleInto = (fromMin: number, fromMax: number, toMax: number, n: number) => clamp(0, toMax, Math.floor(minMaxScale(fromMin, fromMax, n) * toMax))
export function xpToHours(xp: number) {
return xp / 220; // 145h for 30300 XP as measured by @schneefux
}
/**
* Suffix num with SI unit
* @param num number
* @param digits digits after comma
*/
export function formatSI(num: number, digits: number) {
const si = [
{ value: 1, symbol: '' },
{ value: 1E3, symbol: 'k' },
{ value: 1E6, symbol: 'M' },
]
const rx = /\.0+$|(\.[0-9]*[1-9])0+$/
let i
for (i = si.length - 1; i > 0; i--) {
if (num >= si[i].value)
|
}
return Math.round(num / si[i].value)
.toFixed(digits)
.replace(rx, '$1') + si[i].symbol
}
const propPriority = ['winRateAdj', 'winRate', 'wins', 'rank1', 'duration', 'useRate', 'pickRate']
/**
* Get brawlers by event: {
* [eventId]: [
* brawler id,
* brawler name,
* brawler stats,
* sort prop
* ] }
* sorted by the preferred prop according to propPriority
*/
export function getBest(meta: MapMetaMap|ModeMetaMap): { [key: string]: unknown[] } {
return [...Object.entries(meta)]
.reduce((top, [key, entry]) => ({
...top,
[key]: [...Object.entries(entry.brawlers)]
.map(([brawlerId, brawler]) => ({
id: brawlerId,
title: brawler.name,
brawler: brawlerId,
sampleSize: brawler.sampleSize,
stats: brawler.stats,
sortProp: <string>propPriority.find(prop => prop in brawler.stats),
}))
.sort((brawler1, brawler2) => brawler2.stats[brawler2.sortProp] - brawler1.stats[brawler1.sortProp])
}), {})
}
export function getBestBrawlers(brawlers: any[]): any[] {
const sampleSizeThreshold = 300
brawlers = brawlers.filter(brawler => brawler.sampleSize >= sampleSizeThreshold)
if (brawlers.length == 0) {
return []
}
const sortProp = <string>propPriority.find(prop => prop in brawlers[0].stats)
brawlers.sort((brawler1, brawler2) => brawler2.stats[sortProp] - brawler1.stats[sortProp])
return brawlers
}
interface EventMetadata {
id: string
map: string
mode: string
start?: string
end?: string
}
export function formatAsJsonLd(event: EventMetadata, mediaUrl: string) {
const url = `/tier-list/mode/${slugify(event.mode.toLowerCase())}/map/${slugify(event.map)}`
return {
'@context': 'https://schema.org',
'@type': 'Event',
'name': `${event.mode} - ${event.map}`,
...(event.start != undefined ? {
'startDate': event.start,
} : {}),
...(event.end != undefined ? {
'endDate': event.end!,
} : {}),
'eventAttendanceMode': 'https://schema.org/OnlineEventAttendanceMode',
'eventStatus': 'https://schema.org/EventScheduled',
'url': url,
'image': [`${mediaUrl}/map/${event.id}.png`],
'location': {
'@type': 'VirtualLocation',
'url': url,
},
'description': `${event.map} is a Brawl Stars ${event.mode} map.`,
}
}
export function sloppyParseFloat(number: string) {
return Math.floor(parseFloat(number) * 10000) / 10000
}
/**
* Throw if a tag is invalid.
* Make sure tag starts with a hash.
*/
export function validateTag(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Invalid tag ' + tag)
}
if (!tag.startsWith('#')) {
return '#' + tag
}
return tag
}
// in clickhouse SQL (tag has to start with '#'):
/*
arraySum((c, i) -> (position('0289PYLQGRJCUV', c)-1)*pow(14, length(player_club_tag)-i-1-1), arraySlice(splitByString('', player_club_tag), 2), range(if(player_club_tag <> '', toUInt64(length(player_club_tag)-1), 0))) as player_club_id,
*/
/**
* Encode tag string into 64bit unsigned integer string.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function tagToId(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Cannot encode tag ' + tag)
}
if (tag.startsWith('#')) {
tag = tag.substring(1)
}
const result = tag.split('').reduce((sum, c) => sum*14 + '0289PYLQGRJCUV'.indexOf(c), 0)
return result.toString()
}
/**
* Decode 64bit unsigned integer string into tag string with hash.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function idToTag(idString: string) {
let id = Number(idString)
let tag = ''
while (id != 0) {
const i = id % 14
tag = '0289PYLQGRJCUV'[i] + tag
id = Math.floor(id / 14)
}
return '#' + tag
}
/*
in SQL:
date_add(from_days(ceil(to_days(date_sub(date_sub(timestamp, interval 8 hour), interval 1 day)) / 14) * 14 + 2), interval 8 hour)
in clickhouse SQL:
addHours(addDays(toStartOfInterval(subtractDays(subtractHours(timestamp, 8), 4), interval 336 hour, 'UTC'), 14+4), 8)
*/
/**
* Round timestamp up to next legacy trophy season interval.
* Seasons used to be 2 weeks, this is what the database uses.
* @param timestamp
*/
export function getSeasonEnd(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/2)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*2)
return trophySeasonEnd
}
/**
* Round timestamp up to next new trophy season interval.
* Seasons are now 4 weeks.
* @param timestamp
*/
export function getSeasonEndNew(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/4)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*4)
return trophySeasonEnd
}
/*
* Round timestamp down to start of day.
* @param timestamp
*/
export function getCompetitionMapDayStart(timestamp: Date) {
const dayStart = new Date(Date.parse('2020-07-13T09:30:00Z'))
const diff = timestamp.getTime() - dayStart.getTime()
const daysSince = Math.ceil(diff/1000/60/60/24)
dayStart.setUTCDate(dayStart.getUTCDate() + daysSince - 1)
return dayStart
}
export function getCompetitionWinnerMode(timestamp: Date) {
const order = ['duoShowdown', 'siege', 'hotZone', 'soloShowdown
|
{
break
}
|
conditional_block
|
parameter_noise.py
|
return self.sub_exploration.get_exploration_action(
action_distribution=action_distribution, timestep=timestep, explore=explore
)
@override(Exploration)
def on_episode_start(
self,
policy: "Policy",
*,
environment: BaseEnv = None,
episode: int = None,
tf_sess: Optional["tf.Session"] = None
):
# We have to delay the noise-adding step by one forward call.
# This is due to the fact that the optimizer does it's step right
# after the episode was reset (and hence the noise was already added!).
# We don't want to update into a noisy net.
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
# Sample fresh noise and add to weights.
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
# Only sample, don't apply anything to the weights.
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None, tf_sess=None):
# Remove stored noise from weights (only if currently noisy).
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(
self,
policy: "Policy",
sample_batch: SampleBatch,
tf_sess: Optional["tf.Session"] = None,
):
noisy_action_dist = noise_free_action_dist = None
# Adjust the stddev depending on the action (pi)-distance.
# Also see [1] for details.
# TODO(sven): Find out whether this can be scrapped by simply using
# the `sample_batch` to get the noisy/noise-free action dist.
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError # TODO(sven): Other action-dist cases.
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=not self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
# TODO(sven): Allow KL-divergence to be calculated by our
# Distribution classes (don't support off-graph/numpy yet).
distance = np.nanmean(
np.sum(
noise_free_action_dist
* np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
),
1,
)
)
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
"cur_epsilon"
]
delta = -np.log(1 - current_epsilon + current_epsilon / self.action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
# Calculate MSE between noisy and non-noisy output (see [2]).
distance = np.sqrt(
np.mean(np.square(noise_free_action_dist - noisy_action_dist))
)
current_scale = self.sub_exploration.get_state(sess=tf_sess)["cur_scale"]
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * current_scale
# Adjust stddev according to the calculated action-distance.
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
# Update our state (self.stddev and self.stddev_val).
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == "tf":
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == "tf2":
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(
mean=torch.zeros(self.noise[i].size()), std=self.stddev
).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(
tf1.assign(
noise,
tf.random.normal(
shape=noise.shape, stddev=self.stddev, dtype=tf.float32
),
)
)
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == "tf":
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def _add_stored_noise(self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override: If True, undo any currently applied noise first,
then add the currently stored noise.
"""
# Make sure we only add noise to currently noise-free weights.
assert self.weights_are_currently_noisy is False
# Add stored noise to the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework == "tf2":
self._tf_add_stored_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Add noise to weights in-place.
var.requires_grad = False
var.add_(noise)
var.requires_grad = True
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
# Make sure we only remove noise iff currently noisy.
assert self.weights_are_currently_noisy is True
# Removes the stored noise from the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_remove_noise_op)
elif self.framework == "tf2":
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Remove noise from weights in-place.
var.requires_grad = False
var.add_(-noise)
var.requires_grad = True
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_state(self, sess=None):
|
return {"cur_stddev": self.stddev_val}
|
identifier_body
|
|
parameter_noise.py
|
()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
# Create convenience sample+add op for tf.
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
# Whether the Model's weights currently have noise added or not.
self.weights_are_currently_noisy = False
# Auto-detection of underlying exploration functionality.
if sub_exploration is None:
# For discrete action spaces, use an underlying EpsilonGreedy with
# a special schedule.
if isinstance(self.action_space, Discrete):
sub_exploration = {
"type": "EpsilonGreedy",
"epsilon_schedule": {
"type": "PiecewiseSchedule",
# Step function (see [2]).
"endpoints": [
(0, 1.0),
(random_timesteps + 1, 1.0),
(random_timesteps + 2, 0.01),
],
"outside_value": 0.01,
},
}
elif isinstance(self.action_space, Box):
sub_exploration = {
"type": "OrnsteinUhlenbeckNoise",
"random_timesteps": random_timesteps,
}
# TODO(sven): Implement for any action space.
else:
raise NotImplementedError
self.sub_exploration = from_config(
Exploration,
sub_exploration,
framework=self.framework,
action_space=self.action_space,
policy_config=self.policy_config,
model=self.model,
**kwargs
)
# Whether we need to call `self._delayed_on_episode_start` before
# the forward pass.
self.episode_started = False
@override(Exploration)
def before_compute_actions(
self,
*,
timestep: Optional[int] = None,
explore: Optional[bool] = None,
tf_sess: Optional["tf.Session"] = None
):
explore = explore if explore is not None else self.policy_config["explore"]
# Is this the first forward pass in the new episode? If yes, do the
# noise re-sampling and add to weights.
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
# Add noise if necessary.
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
# Remove noise if necessary.
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(
self,
*,
action_distribution: ActionDistribution,
timestep: Union[TensorType, int],
explore: Union[TensorType, bool]
):
# Use our sub-exploration object to handle the final exploration
# action (depends on the algo-type/action-space/etc..).
return self.sub_exploration.get_exploration_action(
action_distribution=action_distribution, timestep=timestep, explore=explore
)
@override(Exploration)
def on_episode_start(
self,
policy: "Policy",
*,
environment: BaseEnv = None,
episode: int = None,
tf_sess: Optional["tf.Session"] = None
):
# We have to delay the noise-adding step by one forward call.
# This is due to the fact that the optimizer does it's step right
# after the episode was reset (and hence the noise was already added!).
# We don't want to update into a noisy net.
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
# Sample fresh noise and add to weights.
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
# Only sample, don't apply anything to the weights.
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None, tf_sess=None):
# Remove stored noise from weights (only if currently noisy).
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(
self,
policy: "Policy",
sample_batch: SampleBatch,
tf_sess: Optional["tf.Session"] = None,
):
noisy_action_dist = noise_free_action_dist = None
# Adjust the stddev depending on the action (pi)-distance.
# Also see [1] for details.
# TODO(sven): Find out whether this can be scrapped by simply using
# the `sample_batch` to get the noisy/noise-free action dist.
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError # TODO(sven): Other action-dist cases.
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=not self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
# TODO(sven): Allow KL-divergence to be calculated by our
# Distribution classes (don't support off-graph/numpy yet).
distance = np.nanmean(
np.sum(
noise_free_action_dist
* np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
),
1,
)
)
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
"cur_epsilon"
]
delta = -np.log(1 - current_epsilon + current_epsilon / self.action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
# Calculate MSE between noisy and non-noisy output (see [2]).
distance = np.sqrt(
np.mean(np.square(noise_free_action_dist - noisy_action_dist))
)
current_scale = self.sub_exploration.get_state(sess=tf_sess)["cur_scale"]
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * current_scale
# Adjust stddev according to the calculated action-distance.
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
# Update our state (self.stddev and self.stddev_val).
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == "tf":
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == "tf2":
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(
mean=torch.zeros(self.noise[i].size()), std=self.stddev
).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(
tf1.assign(
noise,
tf.random.normal(
shape=noise.shape, stddev=self.stddev, dtype=tf.float32
),
)
)
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == "tf":
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def
|
_add_stored_noise
|
identifier_name
|
|
parameter_noise.py
|
[1]).
sub_exploration: Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(
action_space,
policy_config=policy_config,
model=model,
framework=framework,
**kwargs
)
self.stddev = get_variable(
initial_stddev, framework=self.framework, tf_name="stddev"
)
self.stddev_val = initial_stddev # Out-of-graph tf value holder.
# The weight variables of the Model where noise should be applied to.
# This excludes any variable, whose name contains "LayerNorm" (those
# are BatchNormalization layers, which should not be perturbed).
self.model_variables = [
v
for k, v in self.model.trainable_variables(as_dict=True).items()
if "LayerNorm" not in k
]
# Our noise to be added to the weights. Each item in `self.noise`
# corresponds to one Model variable and holding the Gaussian noise to
# be added to that variable (weight).
self.noise = []
for var in self.model_variables:
name_ = var.name.split(":")[0] + "_noisy" if var.name else ""
self.noise.append(
get_variable(
np.zeros(var.shape, dtype=np.float32),
framework=self.framework,
tf_name=name_,
torch_tensor=True,
device=self.device,
)
)
# tf-specific ops to sample, assign and remove noise.
if self.framework == "tf" and not tf.executing_eagerly():
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
# Create convenience sample+add op for tf.
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
# Whether the Model's weights currently have noise added or not.
self.weights_are_currently_noisy = False
# Auto-detection of underlying exploration functionality.
if sub_exploration is None:
# For discrete action spaces, use an underlying EpsilonGreedy with
# a special schedule.
if isinstance(self.action_space, Discrete):
|
elif isinstance(self.action_space, Box):
sub_exploration = {
"type": "OrnsteinUhlenbeckNoise",
"random_timesteps": random_timesteps,
}
# TODO(sven): Implement for any action space.
else:
raise NotImplementedError
self.sub_exploration = from_config(
Exploration,
sub_exploration,
framework=self.framework,
action_space=self.action_space,
policy_config=self.policy_config,
model=self.model,
**kwargs
)
# Whether we need to call `self._delayed_on_episode_start` before
# the forward pass.
self.episode_started = False
@override(Exploration)
def before_compute_actions(
self,
*,
timestep: Optional[int] = None,
explore: Optional[bool] = None,
tf_sess: Optional["tf.Session"] = None
):
explore = explore if explore is not None else self.policy_config["explore"]
# Is this the first forward pass in the new episode? If yes, do the
# noise re-sampling and add to weights.
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
# Add noise if necessary.
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
# Remove noise if necessary.
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(
self,
*,
action_distribution: ActionDistribution,
timestep: Union[TensorType, int],
explore: Union[TensorType, bool]
):
# Use our sub-exploration object to handle the final exploration
# action (depends on the algo-type/action-space/etc..).
return self.sub_exploration.get_exploration_action(
action_distribution=action_distribution, timestep=timestep, explore=explore
)
@override(Exploration)
def on_episode_start(
self,
policy: "Policy",
*,
environment: BaseEnv = None,
episode: int = None,
tf_sess: Optional["tf.Session"] = None
):
# We have to delay the noise-adding step by one forward call.
# This is due to the fact that the optimizer does it's step right
# after the episode was reset (and hence the noise was already added!).
# We don't want to update into a noisy net.
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
# Sample fresh noise and add to weights.
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
# Only sample, don't apply anything to the weights.
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None, tf_sess=None):
# Remove stored noise from weights (only if currently noisy).
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(
self,
policy: "Policy",
sample_batch: SampleBatch,
tf_sess: Optional["tf.Session"] = None,
):
noisy_action_dist = noise_free_action_dist = None
# Adjust the stddev depending on the action (pi)-distance.
# Also see [1] for details.
# TODO(sven): Find out whether this can be scrapped by simply using
# the `sample_batch` to get the noisy/noise-free action dist.
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError # TODO(sven): Other action-dist cases.
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=not self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
# TODO(sven): Allow KL-divergence to be calculated by our
# Distribution classes (don't support off-graph/numpy yet).
distance = np.nanmean(
np.sum(
noise_free_action_dist
* np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
),
1,
)
)
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
"cur_epsilon"
]
delta = -np.log(1 - current_epsilon + current_epsilon / self.action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
# Calculate MSE between noisy and non-noisy output (see [2]).
distance = np.sqrt(
np.mean(np.square(noise_free_action_dist - noisy_action_dist))
)
current_scale = self.sub_exploration.get_state(sess=tf_sess)["cur_scale"]
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * current_scale
# Adjust stddev according to the calculated action-distance.
if distance <=
|
sub_exploration = {
"type": "EpsilonGreedy",
"epsilon_schedule": {
"type": "PiecewiseSchedule",
# Step function (see [2]).
"endpoints": [
(0, 1.0),
(random_timesteps + 1, 1.0),
(random_timesteps + 2, 0.01),
],
"outside_value": 0.01,
},
}
|
conditional_block
|
parameter_noise.py
|
see [1]).
sub_exploration: Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(
action_space,
policy_config=policy_config,
model=model,
framework=framework,
**kwargs
)
self.stddev = get_variable(
initial_stddev, framework=self.framework, tf_name="stddev"
)
self.stddev_val = initial_stddev # Out-of-graph tf value holder.
# The weight variables of the Model where noise should be applied to.
# This excludes any variable, whose name contains "LayerNorm" (those
# are BatchNormalization layers, which should not be perturbed).
self.model_variables = [
v
for k, v in self.model.trainable_variables(as_dict=True).items()
if "LayerNorm" not in k
]
# Our noise to be added to the weights. Each item in `self.noise`
# corresponds to one Model variable and holding the Gaussian noise to
# be added to that variable (weight).
self.noise = []
for var in self.model_variables:
name_ = var.name.split(":")[0] + "_noisy" if var.name else ""
self.noise.append(
get_variable(
np.zeros(var.shape, dtype=np.float32),
framework=self.framework,
tf_name=name_,
torch_tensor=True,
device=self.device,
)
)
# tf-specific ops to sample, assign and remove noise.
if self.framework == "tf" and not tf.executing_eagerly():
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
# Create convenience sample+add op for tf.
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
# Whether the Model's weights currently have noise added or not.
self.weights_are_currently_noisy = False
# Auto-detection of underlying exploration functionality.
if sub_exploration is None:
# For discrete action spaces, use an underlying EpsilonGreedy with
|
# a special schedule.
if isinstance(self.action_space, Discrete):
sub_exploration = {
"type": "EpsilonGreedy",
"epsilon_schedule": {
"type": "PiecewiseSchedule",
# Step function (see [2]).
"endpoints": [
(0, 1.0),
(random_timesteps + 1, 1.0),
(random_timesteps + 2, 0.01),
],
"outside_value": 0.01,
},
}
elif isinstance(self.action_space, Box):
sub_exploration = {
"type": "OrnsteinUhlenbeckNoise",
"random_timesteps": random_timesteps,
}
# TODO(sven): Implement for any action space.
else:
raise NotImplementedError
self.sub_exploration = from_config(
Exploration,
sub_exploration,
framework=self.framework,
action_space=self.action_space,
policy_config=self.policy_config,
model=self.model,
**kwargs
)
# Whether we need to call `self._delayed_on_episode_start` before
# the forward pass.
self.episode_started = False
@override(Exploration)
def before_compute_actions(
self,
*,
timestep: Optional[int] = None,
explore: Optional[bool] = None,
tf_sess: Optional["tf.Session"] = None
):
explore = explore if explore is not None else self.policy_config["explore"]
# Is this the first forward pass in the new episode? If yes, do the
# noise re-sampling and add to weights.
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
# Add noise if necessary.
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
# Remove noise if necessary.
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(
self,
*,
action_distribution: ActionDistribution,
timestep: Union[TensorType, int],
explore: Union[TensorType, bool]
):
# Use our sub-exploration object to handle the final exploration
# action (depends on the algo-type/action-space/etc..).
return self.sub_exploration.get_exploration_action(
action_distribution=action_distribution, timestep=timestep, explore=explore
)
@override(Exploration)
def on_episode_start(
self,
policy: "Policy",
*,
environment: BaseEnv = None,
episode: int = None,
tf_sess: Optional["tf.Session"] = None
):
# We have to delay the noise-adding step by one forward call.
# This is due to the fact that the optimizer does it's step right
# after the episode was reset (and hence the noise was already added!).
# We don't want to update into a noisy net.
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
# Sample fresh noise and add to weights.
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
# Only sample, don't apply anything to the weights.
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None, tf_sess=None):
# Remove stored noise from weights (only if currently noisy).
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(
self,
policy: "Policy",
sample_batch: SampleBatch,
tf_sess: Optional["tf.Session"] = None,
):
noisy_action_dist = noise_free_action_dist = None
# Adjust the stddev depending on the action (pi)-distance.
# Also see [1] for details.
# TODO(sven): Find out whether this can be scrapped by simply using
# the `sample_batch` to get the noisy/noise-free action dist.
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError # TODO(sven): Other action-dist cases.
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=not self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
# TODO(sven): Allow KL-divergence to be calculated by our
# Distribution classes (don't support off-graph/numpy yet).
distance = np.nanmean(
np.sum(
noise_free_action_dist
* np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
),
1,
)
)
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
"cur_epsilon"
]
delta = -np.log(1 - current_epsilon + current_epsilon / self.action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
# Calculate MSE between noisy and non-noisy output (see [2]).
distance = np.sqrt(
np.mean(np.square(noise_free_action_dist - noisy_action_dist))
)
current_scale = self.sub_exploration.get_state(sess=tf_sess)["cur_scale"]
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * current_scale
# Adjust stddev according to the calculated action-distance.
if distance <= delta
|
random_line_split
|
|
hazard2.py
|
(instr):
return Cat(C(0, 12), instr[12:])
def imm_j(instr):
return Cat(C(0, 1), instr[21:31], instr[20], instr[12:20], Repl(instr[-1], 12))
class Hazard2Shifter(Elaboratable):
def __init__(self):
self.i = Signal(XLEN)
self.shamt = Signal(range(XLEN))
self.right = Signal()
self.arith = Signal()
self.o = Signal(XLEN)
def elaborate(self, platform):
m = Module()
accum = Signal(XLEN, name="shift_pre_reverse")
m.d.comb += accum.eq(Mux(self.right, self.i, self.i[::-1]))
for i in range(self.shamt.width):
accum_next = Signal(XLEN, name=f"shift_accum{i}")
m.d.comb += accum_next.eq(Mux(self.shamt[i],
Cat(accum[1 << i:], Repl(accum[-1] & self.arith, 1 << i)),
accum
))
accum = accum_next
m.d.comb += self.o.eq(Mux(self.right, accum, accum[::-1]))
return m
class Hazard2ALU(Elaboratable):
def __init__(self):
self.i0 = Signal(XLEN)
self.i1 = Signal(XLEN)
self.op = Signal(Shape.cast(ALUOp))
self.take4 = Signal()
self.cmp = Signal()
self.o = Signal(XLEN)
def elaborate(self, platform):
m = Module()
m.submodules.shifter = shifter = Hazard2Shifter()
# Add/subtract i0 and i1, then subtract 4 if take4 is true. Use of 3-input adder
# encourages tools to implement as carry-save.
adder = sum((
self.i0,
self.i1 ^ Repl(self.op != ALUOp.ADD, XLEN),
Cat(self.op != ALUOp.ADD, C(0, 1), Repl(self.take4, XLEN - 2))
))[:XLEN]
less_than = Mux(self.i0[-1] == self.i1[-1], adder[-1],
Mux(self.op == ALUOp.LTU, self.i1[-1], self.i0[-1])
)
m.d.comb += self.cmp.eq(Mux(self.op == ALUOp.SUB, self.i0 == self.i1, less_than))
# Bitwise ops can be implemented as a single rank of LUT4s. Try to encourage this.
bitwise = Signal(XLEN)
with m.Switch(self.op[0:2]):
with m.Case(ALUOp.AND & 0x3):
m.d.comb += bitwise.eq(self.i0 & self.i1)
with m.Case(ALUOp.OR & 0x3):
m.d.comb += bitwise.eq(self.i0 | self.i1)
with m.Case():
m.d.comb += bitwise.eq(self.i0 ^ self.i1)
m.d.comb += [
shifter.i.eq(self.i0),
shifter.shamt.eq(self.i1),
shifter.right.eq(self.op != ALUOp.SLL),
shifter.arith.eq(self.op == ALUOp.SRA)
]
with m.Switch(self.op):
with m.Case(ALUOp.ADD):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.SUB):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.LT):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.LTU):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.SRL):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SRA):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SLL):
m.d.comb += self.o.eq(shifter.o)
with m.Case():
m.d.comb += self.o.eq(bitwise)
return m
class Hazard2Regfile(Elaboratable):
def __init__(self):
self.raddr1 = Signal(5)
self.raddr2 = Signal(5)
self.ren = Signal()
self.rdata1 = Signal(XLEN)
self.rdata2 = Signal(XLEN)
self.waddr = Signal(5)
self.wdata = Signal(XLEN)
self.wen = Signal()
self.mem = Memory(width=XLEN, depth=32, init=[0] * 32)
def elaborate(self, platform):
m = Module()
m.submodules.wport = wport = self.mem.write_port()
m.submodules.rport1 = rport1 = self.mem.read_port(transparent=False)
m.submodules.rport2 = rport2 = self.mem.read_port(transparent=False)
# nMigen/Yosys do not support read enable on read ports with transparency
# enabled, so need to perform write-to-read bypass manually.
prev_wdata = Signal(XLEN)
forward_wdata_to_r1 = Signal()
forward_wdata_to_r2 = Signal()
next_is_forwarded = self.wen & self.ren & (self.waddr != 0)
with m.If(next_is_forwarded):
m.d.sync += prev_wdata.eq(self.wdata)
with m.If(self.ren):
m.d.sync += [
forward_wdata_to_r1.eq(next_is_forwarded & (self.waddr == self.raddr1)),
forward_wdata_to_r2.eq(next_is_forwarded & (self.waddr == self.raddr2))
]
m.d.comb += [
rport1.addr.eq(self.raddr1),
rport1.en.eq(self.ren),
self.rdata1.eq(Mux(forward_wdata_to_r1, prev_wdata, rport1.data)),
rport2.addr.eq(self.raddr2),
rport2.en.eq(self.ren),
self.rdata2.eq(Mux(forward_wdata_to_r2, prev_wdata, rport2.data)),
wport.addr.eq(self.waddr),
wport.data.eq(self.wdata),
wport.en.eq(self.wen & (self.waddr != 0))
]
return m
class Hazard2CPU(Elaboratable):
def __init__(self, reset_vector=0x0):
self.reset_vector = reset_vector
self.htrans = Signal(2)
self.hwrite = Signal()
self.hsize = Signal(3)
self.haddr = Signal(XLEN)
self.hwdata = Signal(XLEN)
self.hrdata = Signal(XLEN)
self.hready = Signal()
def elaborate(self, platform):
m = Module()
stall = ~self.hready
### Stage F ###
i_dph_active = Signal()
d_dph_active = Signal()
d_dph_write = Signal()
d_dph_addr = Signal(2)
d_dph_size = Signal(2)
d_dph_signed = Signal()
cir = Signal(32)
cir_valid = Signal()
load_rdata = Signal(XLEN)
with m.If(i_dph_active & ~stall):
m.d.sync += cir.eq(self.hrdata)
with m.Switch(d_dph_size):
with m.Case(2):
m.d.comb += load_rdata.eq(self.hrdata)
with m.Case(1):
hword_rdata = self.hrdata.word_select(d_dph_addr[1:], 16)
m.d.comb += load_rdata.eq(Cat(hword_rdata, Repl(hword_rdata[-1] & d_dph_signed, XLEN - 16)))
with m.Case():
byte_rdata = self.hrdata.word_select(d_dph_addr, 8)
m.d.comb += load_rdata.eq(Cat(byte_rdata, Repl(byte_rdata[-1] & d_dph_signed, XLEN - 8)))
### Stage D/X ###
opc = cir[2 :7 ]
cir_rd = cir[7 :12]
funct3 = cir[12:15]
cir_rs1 = cir[15:20]
cir_rs2 = cir[20:25]
funct7 = cir[25:32]
rs1 = Signal(XLEN)
rs2 = Signal(XLEN)
pc = Signal(XLEN, reset=self.reset_vector - 4)
# ALU, and operand/operation selection
m.submodules.alu = alu = Hazard2ALU()
aluop_r_i = Signal(alu.op.shape())
with m.Switch(funct3):
with m.Case(0b000):
# Mask funct7 for I-format (!cir[5]), as it's part of the immediate
m.d.comb += aluop_r_i.eq(Mux(funct7[5]
|
imm_u
|
identifier_name
|
|
hazard2.py
|
.shifter = shifter = Hazard2Shifter()
# Add/subtract i0 and i1, then subtract 4 if take4 is true. Use of 3-input adder
# encourages tools to implement as carry-save.
adder = sum((
self.i0,
self.i1 ^ Repl(self.op != ALUOp.ADD, XLEN),
Cat(self.op != ALUOp.ADD, C(0, 1), Repl(self.take4, XLEN - 2))
))[:XLEN]
less_than = Mux(self.i0[-1] == self.i1[-1], adder[-1],
Mux(self.op == ALUOp.LTU, self.i1[-1], self.i0[-1])
)
m.d.comb += self.cmp.eq(Mux(self.op == ALUOp.SUB, self.i0 == self.i1, less_than))
# Bitwise ops can be implemented as a single rank of LUT4s. Try to encourage this.
bitwise = Signal(XLEN)
with m.Switch(self.op[0:2]):
with m.Case(ALUOp.AND & 0x3):
m.d.comb += bitwise.eq(self.i0 & self.i1)
with m.Case(ALUOp.OR & 0x3):
m.d.comb += bitwise.eq(self.i0 | self.i1)
with m.Case():
m.d.comb += bitwise.eq(self.i0 ^ self.i1)
m.d.comb += [
shifter.i.eq(self.i0),
shifter.shamt.eq(self.i1),
shifter.right.eq(self.op != ALUOp.SLL),
shifter.arith.eq(self.op == ALUOp.SRA)
]
with m.Switch(self.op):
with m.Case(ALUOp.ADD):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.SUB):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.LT):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.LTU):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.SRL):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SRA):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SLL):
m.d.comb += self.o.eq(shifter.o)
with m.Case():
m.d.comb += self.o.eq(bitwise)
return m
class Hazard2Regfile(Elaboratable):
def __init__(self):
self.raddr1 = Signal(5)
self.raddr2 = Signal(5)
self.ren = Signal()
self.rdata1 = Signal(XLEN)
self.rdata2 = Signal(XLEN)
self.waddr = Signal(5)
self.wdata = Signal(XLEN)
self.wen = Signal()
self.mem = Memory(width=XLEN, depth=32, init=[0] * 32)
def elaborate(self, platform):
m = Module()
m.submodules.wport = wport = self.mem.write_port()
m.submodules.rport1 = rport1 = self.mem.read_port(transparent=False)
m.submodules.rport2 = rport2 = self.mem.read_port(transparent=False)
# nMigen/Yosys do not support read enable on read ports with transparency
# enabled, so need to perform write-to-read bypass manually.
prev_wdata = Signal(XLEN)
forward_wdata_to_r1 = Signal()
forward_wdata_to_r2 = Signal()
next_is_forwarded = self.wen & self.ren & (self.waddr != 0)
with m.If(next_is_forwarded):
m.d.sync += prev_wdata.eq(self.wdata)
with m.If(self.ren):
m.d.sync += [
forward_wdata_to_r1.eq(next_is_forwarded & (self.waddr == self.raddr1)),
forward_wdata_to_r2.eq(next_is_forwarded & (self.waddr == self.raddr2))
]
m.d.comb += [
rport1.addr.eq(self.raddr1),
rport1.en.eq(self.ren),
self.rdata1.eq(Mux(forward_wdata_to_r1, prev_wdata, rport1.data)),
rport2.addr.eq(self.raddr2),
rport2.en.eq(self.ren),
self.rdata2.eq(Mux(forward_wdata_to_r2, prev_wdata, rport2.data)),
wport.addr.eq(self.waddr),
wport.data.eq(self.wdata),
wport.en.eq(self.wen & (self.waddr != 0))
]
return m
class Hazard2CPU(Elaboratable):
def __init__(self, reset_vector=0x0):
self.reset_vector = reset_vector
self.htrans = Signal(2)
self.hwrite = Signal()
self.hsize = Signal(3)
self.haddr = Signal(XLEN)
self.hwdata = Signal(XLEN)
self.hrdata = Signal(XLEN)
self.hready = Signal()
def elaborate(self, platform):
|
### Stage F ###
i_dph_active = Signal()
d_dph_active = Signal()
d_dph_write = Signal()
d_dph_addr = Signal(2)
d_dph_size = Signal(2)
d_dph_signed = Signal()
cir = Signal(32)
cir_valid = Signal()
load_rdata = Signal(XLEN)
with m.If(i_dph_active & ~stall):
m.d.sync += cir.eq(self.hrdata)
with m.Switch(d_dph_size):
with m.Case(2):
m.d.comb += load_rdata.eq(self.hrdata)
with m.Case(1):
hword_rdata = self.hrdata.word_select(d_dph_addr[1:], 16)
m.d.comb += load_rdata.eq(Cat(hword_rdata, Repl(hword_rdata[-1] & d_dph_signed, XLEN - 16)))
with m.Case():
byte_rdata = self.hrdata.word_select(d_dph_addr, 8)
m.d.comb += load_rdata.eq(Cat(byte_rdata, Repl(byte_rdata[-1] & d_dph_signed, XLEN - 8)))
### Stage D/X ###
opc = cir[2 :7 ]
cir_rd = cir[7 :12]
funct3 = cir[12:15]
cir_rs1 = cir[15:20]
cir_rs2 = cir[20:25]
funct7 = cir[25:32]
rs1 = Signal(XLEN)
rs2 = Signal(XLEN)
pc = Signal(XLEN, reset=self.reset_vector - 4)
# ALU, and operand/operation selection
m.submodules.alu = alu = Hazard2ALU()
aluop_r_i = Signal(alu.op.shape())
with m.Switch(funct3):
with m.Case(0b000):
# Mask funct7 for I-format (!cir[5]), as it's part of the immediate
m.d.comb += aluop_r_i.eq(Mux(funct7[5] & cir[5], ALUOp.SUB, ALUOp.ADD))
with m.Case(0b001):
m.d.comb += aluop_r_i.eq(ALUOp.SLL)
with m.Case(0b010):
m.d.comb += aluop_r_i.eq(ALUOp.LT)
with m.Case(0b011):
m.d.comb += aluop_r_i.eq(ALUOp.LTU)
with m.Case(0b100):
m.d.comb += aluop_r_i.eq(ALUOp.XOR)
with m.Case(0b101):
m.d.comb += aluop_r_i.eq(Mux(funct7[5], ALUOp.SRA, ALUOp.SRL))
with m.Case(0b110):
m.d.comb += aluop_r_i.eq(ALUOp.OR)
with m.Case(0b111):
m.d.comb += aluop_r_i.eq(ALUOp.AND)
with m.Switch(opc):
with m.Case(RVOpc.OP):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(rs2),
alu.op.eq(aluop_r_i),
]
with m.Case(RVOpc.OP_IMM):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(imm_i(cir)),
alu.op.eq(aluop_r_i),
]
with m.Case(RVOpc.JAL):
m.d.com
|
m = Module()
stall = ~self.hready
|
random_line_split
|
hazard2.py
|
m.d.comb += self.o.eq(Mux(self.right, accum, accum[::-1]))
return m
class Hazard2ALU(Elaboratable):
def __init__(self):
self.i0 = Signal(XLEN)
self.i1 = Signal(XLEN)
self.op = Signal(Shape.cast(ALUOp))
self.take4 = Signal()
self.cmp = Signal()
self.o = Signal(XLEN)
def elaborate(self, platform):
m = Module()
m.submodules.shifter = shifter = Hazard2Shifter()
# Add/subtract i0 and i1, then subtract 4 if take4 is true. Use of 3-input adder
# encourages tools to implement as carry-save.
adder = sum((
self.i0,
self.i1 ^ Repl(self.op != ALUOp.ADD, XLEN),
Cat(self.op != ALUOp.ADD, C(0, 1), Repl(self.take4, XLEN - 2))
))[:XLEN]
less_than = Mux(self.i0[-1] == self.i1[-1], adder[-1],
Mux(self.op == ALUOp.LTU, self.i1[-1], self.i0[-1])
)
m.d.comb += self.cmp.eq(Mux(self.op == ALUOp.SUB, self.i0 == self.i1, less_than))
# Bitwise ops can be implemented as a single rank of LUT4s. Try to encourage this.
bitwise = Signal(XLEN)
with m.Switch(self.op[0:2]):
with m.Case(ALUOp.AND & 0x3):
m.d.comb += bitwise.eq(self.i0 & self.i1)
with m.Case(ALUOp.OR & 0x3):
m.d.comb += bitwise.eq(self.i0 | self.i1)
with m.Case():
m.d.comb += bitwise.eq(self.i0 ^ self.i1)
m.d.comb += [
shifter.i.eq(self.i0),
shifter.shamt.eq(self.i1),
shifter.right.eq(self.op != ALUOp.SLL),
shifter.arith.eq(self.op == ALUOp.SRA)
]
with m.Switch(self.op):
with m.Case(ALUOp.ADD):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.SUB):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.LT):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.LTU):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.SRL):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SRA):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SLL):
m.d.comb += self.o.eq(shifter.o)
with m.Case():
m.d.comb += self.o.eq(bitwise)
return m
class Hazard2Regfile(Elaboratable):
def __init__(self):
self.raddr1 = Signal(5)
self.raddr2 = Signal(5)
self.ren = Signal()
self.rdata1 = Signal(XLEN)
self.rdata2 = Signal(XLEN)
self.waddr = Signal(5)
self.wdata = Signal(XLEN)
self.wen = Signal()
self.mem = Memory(width=XLEN, depth=32, init=[0] * 32)
def elaborate(self, platform):
m = Module()
m.submodules.wport = wport = self.mem.write_port()
m.submodules.rport1 = rport1 = self.mem.read_port(transparent=False)
m.submodules.rport2 = rport2 = self.mem.read_port(transparent=False)
# nMigen/Yosys do not support read enable on read ports with transparency
# enabled, so need to perform write-to-read bypass manually.
prev_wdata = Signal(XLEN)
forward_wdata_to_r1 = Signal()
forward_wdata_to_r2 = Signal()
next_is_forwarded = self.wen & self.ren & (self.waddr != 0)
with m.If(next_is_forwarded):
m.d.sync += prev_wdata.eq(self.wdata)
with m.If(self.ren):
m.d.sync += [
forward_wdata_to_r1.eq(next_is_forwarded & (self.waddr == self.raddr1)),
forward_wdata_to_r2.eq(next_is_forwarded & (self.waddr == self.raddr2))
]
m.d.comb += [
rport1.addr.eq(self.raddr1),
rport1.en.eq(self.ren),
self.rdata1.eq(Mux(forward_wdata_to_r1, prev_wdata, rport1.data)),
rport2.addr.eq(self.raddr2),
rport2.en.eq(self.ren),
self.rdata2.eq(Mux(forward_wdata_to_r2, prev_wdata, rport2.data)),
wport.addr.eq(self.waddr),
wport.data.eq(self.wdata),
wport.en.eq(self.wen & (self.waddr != 0))
]
return m
class Hazard2CPU(Elaboratable):
def __init__(self, reset_vector=0x0):
self.reset_vector = reset_vector
self.htrans = Signal(2)
self.hwrite = Signal()
self.hsize = Signal(3)
self.haddr = Signal(XLEN)
self.hwdata = Signal(XLEN)
self.hrdata = Signal(XLEN)
self.hready = Signal()
def elaborate(self, platform):
m = Module()
stall = ~self.hready
### Stage F ###
i_dph_active = Signal()
d_dph_active = Signal()
d_dph_write = Signal()
d_dph_addr = Signal(2)
d_dph_size = Signal(2)
d_dph_signed = Signal()
cir = Signal(32)
cir_valid = Signal()
load_rdata = Signal(XLEN)
with m.If(i_dph_active & ~stall):
m.d.sync += cir.eq(self.hrdata)
with m.Switch(d_dph_size):
with m.Case(2):
m.d.comb += load_rdata.eq(self.hrdata)
with m.Case(1):
hword_rdata = self.hrdata.word_select(d_dph_addr[1:], 16)
m.d.comb += load_rdata.eq(Cat(hword_rdata, Repl(hword_rdata[-1] & d_dph_signed, XLEN - 16)))
with m.Case():
byte_rdata = self.hrdata.word_select(d_dph_addr, 8)
m.d.comb += load_rdata.eq(Cat(byte_rdata, Repl(byte_rdata[-1] & d_dph_signed, XLEN - 8)))
### Stage D/X ###
opc = cir[2 :7 ]
cir_rd = cir[7 :12]
funct3 = cir[12:15]
cir_rs1 = cir[15:20]
cir_rs2 = cir[20:25]
funct7 = cir[25:32]
rs1 = Signal(XLEN)
rs2 = Signal(XLEN)
pc = Signal(XLEN, reset=self.reset_vector - 4)
# ALU, and operand/operation selection
m.submodules.alu = alu = Hazard2ALU()
aluop_r_i = Signal(alu.op.shape())
with m.Switch(funct3):
with m.Case(0b000):
# Mask funct7 for I-format (!cir[5]), as it's part of the immediate
m.d.comb += aluop_r_i.eq(Mux(funct7[5] & cir[5], ALUOp.SUB, ALUOp.ADD))
with m.Case(0b001):
m.d.comb += aluop_r_i.eq(ALUOp.SLL)
with m.Case(0b010):
m.d.comb += aluop_r_i.eq(ALUOp.LT)
with m.Case(0b011):
m.d.comb += aluop_r_i.eq(ALUOp.LTU)
with m.Case(0b100):
m.d.comb += aluop_r_i.eq(ALUOp.XOR)
with m.Case(0b101):
m.d.comb += aluop_r_i.eq(Mux(funct7[5], ALUOp.SRA, ALUOp.SRL))
|
accum_next = Signal(XLEN, name=f"shift_accum{i}")
m.d.comb += accum_next.eq(Mux(self.shamt[i],
Cat(accum[1 << i:], Repl(accum[-1] & self.arith, 1 << i)),
accum
))
accum = accum_next
|
conditional_block
|
|
hazard2.py
|
class Hazard2ALU(Elaboratable):
def __init__(self):
self.i0 = Signal(XLEN)
self.i1 = Signal(XLEN)
self.op = Signal(Shape.cast(ALUOp))
self.take4 = Signal()
self.cmp = Signal()
self.o = Signal(XLEN)
def elaborate(self, platform):
m = Module()
m.submodules.shifter = shifter = Hazard2Shifter()
# Add/subtract i0 and i1, then subtract 4 if take4 is true. Use of 3-input adder
# encourages tools to implement as carry-save.
adder = sum((
self.i0,
self.i1 ^ Repl(self.op != ALUOp.ADD, XLEN),
Cat(self.op != ALUOp.ADD, C(0, 1), Repl(self.take4, XLEN - 2))
))[:XLEN]
less_than = Mux(self.i0[-1] == self.i1[-1], adder[-1],
Mux(self.op == ALUOp.LTU, self.i1[-1], self.i0[-1])
)
m.d.comb += self.cmp.eq(Mux(self.op == ALUOp.SUB, self.i0 == self.i1, less_than))
# Bitwise ops can be implemented as a single rank of LUT4s. Try to encourage this.
bitwise = Signal(XLEN)
with m.Switch(self.op[0:2]):
with m.Case(ALUOp.AND & 0x3):
m.d.comb += bitwise.eq(self.i0 & self.i1)
with m.Case(ALUOp.OR & 0x3):
m.d.comb += bitwise.eq(self.i0 | self.i1)
with m.Case():
m.d.comb += bitwise.eq(self.i0 ^ self.i1)
m.d.comb += [
shifter.i.eq(self.i0),
shifter.shamt.eq(self.i1),
shifter.right.eq(self.op != ALUOp.SLL),
shifter.arith.eq(self.op == ALUOp.SRA)
]
with m.Switch(self.op):
with m.Case(ALUOp.ADD):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.SUB):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.LT):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.LTU):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.SRL):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SRA):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SLL):
m.d.comb += self.o.eq(shifter.o)
with m.Case():
m.d.comb += self.o.eq(bitwise)
return m
class Hazard2Regfile(Elaboratable):
def __init__(self):
self.raddr1 = Signal(5)
self.raddr2 = Signal(5)
self.ren = Signal()
self.rdata1 = Signal(XLEN)
self.rdata2 = Signal(XLEN)
self.waddr = Signal(5)
self.wdata = Signal(XLEN)
self.wen = Signal()
self.mem = Memory(width=XLEN, depth=32, init=[0] * 32)
def elaborate(self, platform):
m = Module()
m.submodules.wport = wport = self.mem.write_port()
m.submodules.rport1 = rport1 = self.mem.read_port(transparent=False)
m.submodules.rport2 = rport2 = self.mem.read_port(transparent=False)
# nMigen/Yosys do not support read enable on read ports with transparency
# enabled, so need to perform write-to-read bypass manually.
prev_wdata = Signal(XLEN)
forward_wdata_to_r1 = Signal()
forward_wdata_to_r2 = Signal()
next_is_forwarded = self.wen & self.ren & (self.waddr != 0)
with m.If(next_is_forwarded):
m.d.sync += prev_wdata.eq(self.wdata)
with m.If(self.ren):
m.d.sync += [
forward_wdata_to_r1.eq(next_is_forwarded & (self.waddr == self.raddr1)),
forward_wdata_to_r2.eq(next_is_forwarded & (self.waddr == self.raddr2))
]
m.d.comb += [
rport1.addr.eq(self.raddr1),
rport1.en.eq(self.ren),
self.rdata1.eq(Mux(forward_wdata_to_r1, prev_wdata, rport1.data)),
rport2.addr.eq(self.raddr2),
rport2.en.eq(self.ren),
self.rdata2.eq(Mux(forward_wdata_to_r2, prev_wdata, rport2.data)),
wport.addr.eq(self.waddr),
wport.data.eq(self.wdata),
wport.en.eq(self.wen & (self.waddr != 0))
]
return m
class Hazard2CPU(Elaboratable):
def __init__(self, reset_vector=0x0):
self.reset_vector = reset_vector
self.htrans = Signal(2)
self.hwrite = Signal()
self.hsize = Signal(3)
self.haddr = Signal(XLEN)
self.hwdata = Signal(XLEN)
self.hrdata = Signal(XLEN)
self.hready = Signal()
def elaborate(self, platform):
m = Module()
stall = ~self.hready
### Stage F ###
i_dph_active = Signal()
d_dph_active = Signal()
d_dph_write = Signal()
d_dph_addr = Signal(2)
d_dph_size = Signal(2)
d_dph_signed = Signal()
cir = Signal(32)
cir_valid = Signal()
load_rdata = Signal(XLEN)
with m.If(i_dph_active & ~stall):
m.d.sync += cir.eq(self.hrdata)
with m.Switch(d_dph_size):
with m.Case(2):
m.d.comb += load_rdata.eq(self.hrdata)
with m.Case(1):
hword_rdata = self.hrdata.word_select(d_dph_addr[1:], 16)
m.d.comb += load_rdata.eq(Cat(hword_rdata, Repl(hword_rdata[-1] & d_dph_signed, XLEN - 16)))
with m.Case():
byte_rdata = self.hrdata.word_select(d_dph_addr, 8)
m.d.comb += load_rdata.eq(Cat(byte_rdata, Repl(byte_rdata[-1] & d_dph_signed, XLEN - 8)))
### Stage D/X ###
opc = cir[2 :7 ]
cir_rd = cir[7 :12]
funct3 = cir[12:15]
cir_rs1 = cir[15:20]
cir_rs2 = cir[20:25]
funct7 = cir[25:32]
rs1 = Signal(XLEN)
rs2 = Signal(XLEN)
pc = Signal(XLEN, reset=self.reset_vector - 4)
# ALU, and operand/operation selection
m.submodules.alu = alu = Hazard2ALU()
aluop_r_i = Signal(alu.op.shape())
with m.Switch(funct3):
with m.Case(0b000):
# Mask funct7 for I-format (!cir[5]), as it's part of the immediate
m.d.comb += aluop_r_i.eq(Mux(funct7[5] & cir[5], ALUOp.SUB, ALUOp.ADD))
with m.Case(0b001):
m.d.comb += aluop_r_i.eq(ALUOp.SLL)
with m.Case(0b010):
m.d.comb += aluop_r_i.eq(ALUOp.LT)
with m.Case(0b011):
m.d.comb += aluop_r_i.eq(ALUOp.LTU)
with m.Case(0b100):
m.d.comb += aluop_r_i.eq
|
m = Module()
accum = Signal(XLEN, name="shift_pre_reverse")
m.d.comb += accum.eq(Mux(self.right, self.i, self.i[::-1]))
for i in range(self.shamt.width):
accum_next = Signal(XLEN, name=f"shift_accum{i}")
m.d.comb += accum_next.eq(Mux(self.shamt[i],
Cat(accum[1 << i:], Repl(accum[-1] & self.arith, 1 << i)),
accum
))
accum = accum_next
m.d.comb += self.o.eq(Mux(self.right, accum, accum[::-1]))
return m
|
identifier_body
|
|
server.go
|
, error) {
// check message and boradcast to clients when necessary
checkMessage(in.Topic, in.Payload)
go func(a string, b []byte) {
web.PayloadMap.Store(a, b)
web.PayloadChan <- b
}(in.Topic, []byte(in.Payload))
// check whether should capture or not
buscfg := sys.GetBusManagerCfg()
for _, cap := range buscfg.CaptureOption {
match := false
for _, signal := range cap.Signals {
if strings.Contains(in.Topic, signal.Topic) {
// topic coincident, check value
var p public.MessagePayload
if err := json.Unmarshal([]byte(in.Payload), &p); err != nil {
continue
}
val := ""
switch p.Value.(type) {
case int:
val = strconv.Itoa(p.Value.(int))
case float64:
val = strconv.Itoa(int(p.Value.(float64)))
case string:
val = p.Value.(string)
}
if signal.Value == val {
match = true
break
}
}
}
// match, capture
if match {
var p public.CommandPayload
var para public.CommandParameter
muid := getUUID()
chid := "capture"
para.Channel = chid
p.MonitoringUnit = muid
p.SampleUnit = cap.SUID
p.Channel = chid
p.StartTime = public.UTCTimeStamp()
p.Phase = public.PhaseExcuting
p.Parameters = para
topic := "command/" + muid + "/" + cap.SUID + "/" + chid
msg, err := json.Marshal(p)
if err != nil {
continue
}
// publish
s.MqttClient.PublishSampleValues(topic, string(msg))
}
}
// enable cache
if s.enableCache {
// online or not
if GetNetworkStatus() == Online {
// check cache files exist or not, send cache files first
if err := s.publishCacheFile(); err != nil {
log.Printf("publish failed: %s", err)
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// check cache exist, publish
if len(s.cache) > 0 {
for _, m := range s.cache {
ms := strings.Split(m, "&")
if len(ms) == 2 {
topic := ms[0]
payload := ms[1]
log.Printf("publish cache: %s", m)
if err := s.MqttClient.PublishSampleValues(topic, payload); err != nil {
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
}
}
s.cache = []string{}
}
// then publish current message
s.MqttClient.PublishSampleValues(in.Topic, in.Payload)
} else {
// offline, save data to cache, check cache quantity
if len(s.cache) < sys.GetBusManagerCfg().Cache.MaxMessage {
log.Printf("save to cache, current number: %d", len(s.cache))
s.cache = append(s.cache, in.Topic+"&"+in.Payload)
} else {
log.Printf("save to file")
// save to file
if err := saveCacheToFile(s.cache); err != nil {
log.Printf("save cache faield: %s", err)
}
s.cache = []string{}
}
}
} else {
if err := s.MqttClient.PublishSampleValues(in.Topic, in.Payload); err != nil {
return &pb.PublishReply{Status: public.StatusErr, Message: public.MessageErrUnknown}, nil
}
}
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// Subscribe subscribe implement
func (s *BusServer) Subscribe(ctx context.Context, in *pb.SubscribeRequest) (*pb.SubscribeReply, error) {
if err := s.MqttClient.Subscribe(in.Topic); err != nil {
return &pb.SubscribeReply{Status: public.StatusErr, Message: err.Error()}, nil
}
return &pb.SubscribeReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// get uuid
func getUUID() string {
// address := config.Configuration.SystemServer.Host + ":" + config.Configuration.SystemServer.Port
// // get info from hmu
// var client public.SystemClient
// if err := client.ConnectSystemDaemon(address); err != nil {
// log.Fatalf("connect system server failed, errmsg {%v}", err)
// }
// resp, err := client.UUID()
// if err != nil {
// log.Fatalf("get uuid failed, errmsg {%v}", err)
// }
// defer client.Disconnect()
// return resp.UUID
// read id from config file
return sys.GetMonitoringUnitCfg().ID
}
// contorl app led status
func controlAppLEDStatus() {
var appled extend.AppLED
if err := appled.Prepare(sys.GetBusManagerCfg().Model); err != nil {
buslog.LOG.Warningf("prepare app led failed, errmsg: %v", err)
return
}
defer appled.CleanUp()
status := 0
// loop
for {
// toggle status
status = status ^ 1
// sleep for a moment, interval set by mqtt connect/disconnect handler
time.Sleep(time.Millisecond * time.Duration(LEDSetInterval))
if err := appled.SetLEDStatus(status); err != nil {
// log.Printf("set appled %v", err)
}
}
}
func checkMessage(topic, payload string) {
s := strings.Split(topic, "/")
if len(s) != 4 {
return
}
suid := s[2]
channelid := s[3]
if channelid != "_state" {
return
}
// parse payload, get value
var p public.MessagePayload
if err := json.Unmarshal([]byte(payload), &p); err != nil {
log.Printf("parse payload fail, payload: %s, errmsg: %v", payload, err)
return
}
v := int(p.Value.(float64))
if v == -1 {
v = 0
} else {
v = 1
}
// set status
lastvalue, ok := web.DeviceStatus[suid]
if !ok {
log.Printf("channel id `%s` do not exist", suid)
return
}
if v != lastvalue {
// update status, broadcast
web.DeviceStatus[suid] = v
bs, _ := web.DeviceStatusToBytes()
web.WSHub.BroadcastMessage(bs)
}
}
func checkNetworkStatus() {
timer := time.NewTicker(5 * time.Second)
lastRestartTime := time.Now()
cfg := sys.GetBusManagerCfg()
netCheckList := []string{cfg.MQTT.Host + ":" + cfg.MQTT.Port}
netCheckList = append(netCheckList, cfg.Web.NetChecking.Hosts...)
doTimeout := cfg.Web.NetChecking.Timeout
if doTimeout == 0 {
doTimeout = 5
}
doTimes := cfg.Web.NetChecking.DoTimes
for {
select {
case <-timer.C:
status := GetNetworkStatus()
// 大部分网络是正常的,优化走这个
if status == Online {
lastRestartTime = time.Now()
continue
}
if status == WaitingOnline && len(cfg.Web.NetChecking.Hosts) > 0 {
// 处理检测到网络正常时的ticker事件
sysd := sys.ConnectSystemDaemon(cfg.Model, &cfg.SystemServer)
if _, err := sysd.AutoCheckNetworking(netCheckList, time.Duration(doTimeout)*1e9); err == nil {
SetNetworkStatus(WaitingOnline, false)
} else {
// 在等待mqtt上线的过程中发如果检查到网络又下线了,恢复到网络不可用的状态。
SetNetworkStatus(Offline, false)
}
sysd.Disconnect()
continue
}
if status == Offline {
if doTimes > 0 && len(cfg.Web.NetChecking.Hosts) > 0 {
doTimes--
// 先尝试网络
sysd := sys.ConnectSystemDaemon(cfg.Model, &cfg.SystemServer)
if _, err := sysd.AutoCheckNetworking(netCheckList, time.Duration(doTimeout)*1e9); err == nil {
// 检测到网络正常了, 执行等待mqtt上线的逻辑。
sysd.Disconnect()
SetNetworkStatus(WaitingOnline, false)
continue
}
sysd.Disconnect()
// 网络失败,走失败的逻辑
}
now := time.Now()
d := now.Sub(lastRestartTime)
rd := time.Duration(cfg.Web.Restart.Duration) * time.Second
if d >= rd {
lastRestartTime = now
rt := cfg.Web.Restart.Times
if rt < cfg.Web.Restart.Max {
// add retart times
cfg.Web.Restart.Times++
|
if err := sys.SaveBusManagerCfg(cfg); err != nil {
buslog.LOG.Warningf("save bus config failed, errmsg {%v}", err)
}
|
random_line_split
|
|
server.go
|
: -1,
// Timestamp: public.UTCTimeStamp(),// 此值因与服务器发生了冲突,估不上报
Cov: true,
State: 0,
}
willpayload, _ := json.Marshal(payload)
payload.Value = 0
connpayload, _ := json.Marshal(payload)
s.MqttClient = NewMQTTClient(SubMessageHandler, willtopic, string(willpayload), conntopic, string(connpayload))
if err := s.MqttClient.ConnectServer(); err != nil {
log.Printf("connect mqtt server failed, errmsg {%v}, start reconnect...", err)
// start to reconnect
go s.MqttClient.ReconnectServer()
}
// check network status
go checkNetworkStatus()
s.MqttClient.Subscribe("sample-values/+/_/upgrade")
s.MqttClient.Subscribe("command/" + muid + "/#")
// init status
web.DeviceStatus = make(map[string]int)
mu := sys.GetMonitoringUnitCfg()
for _, sp := range mu.SamplePorts {
for _, su := range sp.SampleUnits {
web.DeviceStatus[su.ID] = 0
}
}
// set led status
go controlAppLEDStatus()
// check start log
go func() {
topic := "sample-values/" + getUUID() + "/_/restart"
payload := public.MessagePayload{
MonitoringUnitID: cfg.MQTT.ClientID,
SampleUnitID: "_",
ChannelID: "restart",
Name: "",
Value: 0,
Timestamp: public.UTCTimeStamp(),
Cov: true,
State: 0,
}
flag, err := bootflag.GetFlag()
if err != nil {
log.Warning(errors.As(err))
flag = "-1"
}
switch flag {
case "0":
payload.Value = 1
case "1":
payload.Value = 2
default:
// using 0
}
bp, _ := json.Marshal(payload)
for {
if GetNetworkStatus() == Online {
s.MqttClient.PublishSampleValues(topic, string(bp))
break
}
time.Sleep(time.Second)
}
if err := bootflag.CleanFlag(); err != nil {
log.Warning(errors.As(err))
}
}()
}
// Cleanup cleanup
func (s *BusServer) Cleanup() {
s.MqttClient.DisconnectServer()
}
func (s *BusServer) publishCacheFile() error {
files, err := filepath.Glob(filepath.Join(sys.GetBusManagerCfg().Cache.Directory, "*"))
if err != nil {
return err
}
if len(files) == 0 {
return nil
}
topic := "sample-block/" + getUUID()
for _, filename := range files {
f, err := os.Open(filename)
if err != nil {
continue
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
continue
}
sd := string(data)
ms := strings.Split(sd, "\n")
sp := []string{}
for _, m := range ms {
ps := strings.Split(m, "&")
if len(ps) < 2 {
continue
}
p := ps[1]
sp = append(sp, p)
}
d := strings.Join(sp, ",")
d = "[" + d + "]"
log.Printf("publish file: %s", filename)
// publish data
if err := s.MqttClient.PublishSampleValues(topic, d); err != nil {
return err
}
// remove cache file
os.Remove(filename)
}
return nil
}
func saveCacheToFile(cache []string) error {
cfg := sys.GetBusManagerCfg()
files, err := filepath.Glob(filepath.Join(cfg.Cache.Directory, "*"))
if err != nil {
return err
}
var ifl []int
for _, f := range files {
fn := filepath.Base(f)
i, _ := strconv.Atoi(fn)
ifl = append(ifl, i)
}
sort.Sort(sort.Reverse(sort.In
|
fs := ifl[cfg.Cache.MaxFile:]
// remove files
for _, f := range rfs {
os.Remove(filepath.Join(cfg.Cache.Directory, strconv.Itoa(f)))
}
}
var nf int
if l == 0 {
nf = 0
} else {
nf = ifl[0] + 1
}
filepath := filepath.Join(cfg.Cache.Directory, strconv.Itoa(nf))
f, err := os.Create(filepath)
if err != nil {
return err
}
defer f.Close()
data := strings.Join(cache, "\n")
if _, err = f.Write([]byte(data)); err != nil {
return fmt.Errorf("write file [%s] failed: %s", filepath, err)
}
return nil
}
// Publish publish implement
func (s *BusServer) Publish(ctx context.Context, in *pb.PublishRequest) (*pb.PublishReply, error) {
// check message and boradcast to clients when necessary
checkMessage(in.Topic, in.Payload)
go func(a string, b []byte) {
web.PayloadMap.Store(a, b)
web.PayloadChan <- b
}(in.Topic, []byte(in.Payload))
// check whether should capture or not
buscfg := sys.GetBusManagerCfg()
for _, cap := range buscfg.CaptureOption {
match := false
for _, signal := range cap.Signals {
if strings.Contains(in.Topic, signal.Topic) {
// topic coincident, check value
var p public.MessagePayload
if err := json.Unmarshal([]byte(in.Payload), &p); err != nil {
continue
}
val := ""
switch p.Value.(type) {
case int:
val = strconv.Itoa(p.Value.(int))
case float64:
val = strconv.Itoa(int(p.Value.(float64)))
case string:
val = p.Value.(string)
}
if signal.Value == val {
match = true
break
}
}
}
// match, capture
if match {
var p public.CommandPayload
var para public.CommandParameter
muid := getUUID()
chid := "capture"
para.Channel = chid
p.MonitoringUnit = muid
p.SampleUnit = cap.SUID
p.Channel = chid
p.StartTime = public.UTCTimeStamp()
p.Phase = public.PhaseExcuting
p.Parameters = para
topic := "command/" + muid + "/" + cap.SUID + "/" + chid
msg, err := json.Marshal(p)
if err != nil {
continue
}
// publish
s.MqttClient.PublishSampleValues(topic, string(msg))
}
}
// enable cache
if s.enableCache {
// online or not
if GetNetworkStatus() == Online {
// check cache files exist or not, send cache files first
if err := s.publishCacheFile(); err != nil {
log.Printf("publish failed: %s", err)
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// check cache exist, publish
if len(s.cache) > 0 {
for _, m := range s.cache {
ms := strings.Split(m, "&")
if len(ms) == 2 {
topic := ms[0]
payload := ms[1]
log.Printf("publish cache: %s", m)
if err := s.MqttClient.PublishSampleValues(topic, payload); err != nil {
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
}
}
s.cache = []string{}
}
// then publish current message
s.MqttClient.PublishSampleValues(in.Topic, in.Payload)
} else {
// offline, save data to cache, check cache quantity
if len(s.cache) < sys.GetBusManagerCfg().Cache.MaxMessage {
log.Printf("save to cache, current number: %d", len(s.cache))
s.cache = append(s.cache, in.Topic+"&"+in.Payload)
} else {
log.Printf("save to file")
// save to file
if err := saveCacheToFile(s.cache); err != nil {
log.Printf("save cache faield: %s", err)
}
s.cache = []string{}
}
}
} else {
if err := s.MqttClient.PublishSampleValues(in.Topic, in.Payload); err != nil {
return &pb.PublishReply{Status: public.StatusErr, Message: public.MessageErrUnknown}, nil
}
}
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// Subscribe subscribe implement
func (s *BusServer) Subscribe(ctx context.Context, in *pb.SubscribeRequest) (*pb.SubscribeReply, error) {
if err := s.MqttClient.Subscribe(in.Topic); err != nil {
return &pb.SubscribeReply{Status: public.StatusErr, Message: err.Error()}, nil
|
tSlice(ifl)))
log.Println(ifl)
l := len(ifl)
if l > cfg.Cache.MaxFile {
r
|
conditional_block
|
server.go
|
y
downloadDependentDeviceLibrary()
if err := web.ReadVideoConfig(); err != nil {
log.Printf("open video config file failed, errmsg {%v}", err)
}
s.enableCache = true
// check directory exist or not
_, err := os.Stat(sys.GetBusManagerCfg().Cache.Directory)
if err != nil {
if os.IsNotExist(err) {
// do not exist, create
if err := os.Mkdir(cfg.Cache.Directory, os.ModeDir); err != nil {
log.Printf("create directory failed: %s", err)
s.enableCache = false
}
} else {
s.enableCache = false
}
}
log.Printf("enable cache: %v", s.enableCache)
muid := getUUID()
willtopic := "sample-values/" + muid + "/_/_state"
conntopic := willtopic
payload := public.MessagePayload{
MonitoringUnitID: muid,
SampleUnitID: "_",
ChannelID: "_state",
Name: "采集器连接状态",
Value: -1,
// Timestamp: public.UTCTimeStamp(),// 此值因与服务器发生了冲突,估不上报
Cov: true,
State: 0,
}
willpayload, _ := json.Marshal(payload)
payload.Value = 0
connpayload, _ := json.Marshal(payload)
s.MqttClient = NewMQTTClient(SubMessageHandler, willtopic, string(willpayload), conntopic, string(connpayload))
if err := s.MqttClient.ConnectServer(); err != nil {
log.Printf("connect mqtt server failed, errmsg {%v}, start reconnect...", err)
// start to reconnect
go s.MqttClient.ReconnectServer()
}
// check network status
go checkNetworkStatus()
s.MqttClient.Subscribe("sample-values/+/_/upgrade")
s.MqttClient.Subscribe("command/" + muid + "/#")
// init status
web.DeviceStatus = make(map[string]int)
mu := sys.GetMonitoringUnitCfg()
for _, sp := range mu.SamplePorts {
for _, su := range sp.SampleUnits {
web.DeviceStatus[su.ID] = 0
}
}
// set led status
go controlAppLEDStatus()
// check start log
go func() {
topic := "sample-values/" + getUUID() + "/_/restart"
payload := public.MessagePayload{
MonitoringUnitID: cfg.MQTT.ClientID,
SampleUnitID: "_",
ChannelID: "restart",
Name: "",
Value: 0,
Timestamp: public.UTCTimeStamp(),
Cov: true,
State: 0,
}
flag, err := bootflag.GetFlag()
if err != nil {
log.Warning(errors.As(err))
flag = "-1"
}
switch flag {
case "0":
payload.Value = 1
case "1":
payload.Value = 2
default:
// using 0
}
bp, _ := json.Marshal(payload)
for {
if GetNetworkStatus() == Online {
s.MqttClient.PublishSampleValues(topic, string(bp))
break
}
time.Sleep(time.Second)
}
if err := bootflag.CleanFlag(); err != nil {
log.Warning(errors.As(err))
}
}()
}
// Cleanup cleanup
func (s *BusServer) Cleanup() {
s.MqttClient.DisconnectServer()
}
func (s *BusServer) publishCacheFile() error {
files, err := filepath.Glob(filepath.Join(sys.GetBusManagerCfg().Cache.Directory, "*"))
if err != nil {
return err
}
if len(files) == 0 {
return nil
}
topic := "sample-block/" + getUUID()
for _, filename := range files {
f, err := os.Open(filename)
if err != nil {
continue
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
continue
}
sd := string(data)
ms := strings.Split(sd, "\n")
sp := []string{}
for _, m := range ms {
ps := strings.Split(m, "&")
if len(ps) < 2 {
continue
}
p := ps[1]
sp = append(sp, p)
}
d := strings.Join(sp, ",")
d = "[" + d + "]"
log.Printf("publish file: %s", filename)
// publish data
if err := s.MqttClient.PublishSampleValues(topic, d); err != nil {
return err
}
// remove cache file
os.Remove(filename)
}
return nil
}
func saveCacheToFile(cache []string) error {
cfg := sys.GetBusManagerCfg()
files, err := filepath.Glob(filepath.Join(cfg.Cache.Directory, "*"))
if err != nil {
return err
}
var ifl []int
for _, f := range files {
fn := filepath.Base(f)
i, _ := strconv.Atoi(fn)
ifl = append(ifl, i)
}
sort.Sort(sort.Reverse(sort.IntSlice(ifl)))
log.Println(ifl)
l := len(ifl)
if l > cfg.Cache.MaxFile {
rfs := ifl[cfg.Cache.MaxFile:]
// remove files
for _, f := range rfs {
os.Remove(filepath.Join(cfg.Cache.Directory, strconv.Itoa(f)))
}
}
var nf int
if l == 0 {
nf = 0
} else {
nf = ifl[0] + 1
}
filepath := filepath.Join(cfg.Cache.Directory, strconv.Itoa(nf))
f, err := os.Create(filepath)
if err != nil {
return err
}
defer f.Close()
data := strings.Join(cache, "\n")
if _, err = f.Write([]byte(data)); err != nil {
return fmt.Errorf("write file [%s] failed: %s", filepath, err)
}
return nil
}
// Publish publish implement
func (s *BusServer) Publish(ctx context.Context, in *pb.PublishRequest) (*pb.PublishReply, error) {
// check message and boradcast to clients when necessary
checkMessage(in.Topic, in.Payload)
go func(a string, b []byte) {
web.PayloadMap.Store(a, b)
web.PayloadChan <- b
}(in.Topic, []byte(in.Payload))
// check whether should capture or not
buscfg := sys.GetBusManagerCfg()
for _, cap := range buscfg.CaptureOption {
match := false
for _, signal := range cap.Signals {
if strings.Contains(in.Topic, signal.Topic) {
// topic coincident, check value
var p public.MessagePayload
if err := json.Unmarshal([]byte(in.Payload), &p); err != nil {
continue
}
val := ""
switch p.Value.(type) {
case int:
val = strconv.Itoa(p.Value.(int))
case float64:
val = strconv.Itoa(int(p.Value.(float64)))
case string:
val = p.Value.(string)
}
if signal.Value == val {
match = true
break
}
}
}
// match, capture
if match {
var p public.CommandPayload
var para public.CommandParameter
muid := getUUID()
chid := "capture"
para.Channel = chid
p.MonitoringUnit = muid
p.SampleUnit = cap.SUID
p.Channel = chid
p.StartTime = public.UTCTimeStamp()
p.Phase = public.PhaseExcuting
p.Parameters = para
topic := "command/" + muid + "/" + cap.SUID + "/" + chid
msg, err := json.Marshal(p)
if err != nil {
continue
}
// publish
s.MqttClient.PublishSampleValues(topic, string(msg))
}
}
// enable cache
if s.enableCache {
// online or not
if GetNetworkStatus() == Online {
// check cache files exist or not, send cache files first
if err := s.publishCacheFile(); err != nil {
log.Printf("publish failed: %s", err)
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// check cache exist, publish
if len(s.cache) > 0 {
for _, m := range s.cache {
ms := strings.Split(m, "&")
if len(ms) == 2 {
topic := ms[0]
payload := ms[1]
log.Printf("publish cache: %s", m)
if err := s.MqttClient.PublishSampleValues(topic, payload); err != nil {
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
}
}
s.cache = []string{}
}
// then publish current message
s.MqttClient.PublishSampleValues(in.Topic, in.Payload)
} else {
// offline, save data to cache, check cache quantity
if len(s.cache) < sys.GetBusManagerCfg().
|
brar
|
identifier_name
|
|
server.go
|
fg := sys.GetBusManagerCfg()
// download element library
downloadDependentDeviceLibrary()
if err := web.ReadVideoConfig(); err != nil {
log.Printf("open video config file failed, errmsg {%v}", err)
}
s.enableCache = true
// check directory exist or not
_, err := os.Stat(sys.GetBusManagerCfg().Cache.Directory)
if err != nil {
if os.IsNotExist(err) {
// do not exist, create
if err := os.Mkdir(cfg.Cache.Directory, os.ModeDir); err != nil {
log.Printf("create directory failed: %s", err)
s.enableCache = false
}
} else {
s.enableCache = false
}
}
log.Printf("enable cache: %v", s.enableCache)
muid := getUUID()
willtopic := "sample-values/" + muid + "/_/_state"
conntopic := willtopic
payload := public.MessagePayload{
MonitoringUnitID: muid,
SampleUnitID: "_",
ChannelID: "_state",
Name: "采集器连接状态",
Value: -1,
// Timestamp: public.UTCTimeStamp(),// 此值因与服务器发生了冲突,估不上报
Cov: true,
State: 0,
}
willpayload, _ := json.Marshal(payload)
payload.Value = 0
connpayload, _ := json.Marshal(payload)
s.MqttClient = NewMQTTClient(SubMessageHandler, willtopic, string(willpayload), conntopic, string(connpayload))
if err := s.MqttClient.ConnectServer(); err != nil {
log.Printf("connect mqtt server failed, errmsg {%v}, start reconnect...", err)
// start to reconnect
go s.MqttClient.ReconnectServer()
}
// check network status
go checkNetworkStatus()
s.MqttClient.Subscribe("sample-values/+/_/upgrade")
s.MqttClient.Subscribe("command/" + muid + "/#")
// init status
web.DeviceStatus = make(map[string]int)
mu := sys.GetMonitoringUnitCfg()
for _, sp := range mu.SamplePorts {
for _, su := range sp.SampleUnits {
web.DeviceStatus[su.ID] = 0
}
}
// set led status
go controlAppLEDStatus()
// check start log
go func() {
topic := "sample-values/" + getUUID() + "/_/restart"
payload := public.MessagePayload{
MonitoringUnitID: cfg.MQTT.ClientID,
SampleUnitID: "_",
ChannelID: "restart",
Name: "",
Value: 0,
Timestamp: public.UTCTimeStamp(),
Cov: true,
State: 0,
}
flag, err := bootflag.GetFlag()
if err != nil {
log.Warning(errors.As(err))
flag = "-1"
}
switch flag {
case "0":
payload.Value = 1
case "1":
payload.Value = 2
default:
// using 0
}
bp, _ := json.Marshal(payload)
for {
if GetNetworkStatus() == Online {
s.MqttClient.PublishSampleValues(topic, string(bp))
break
}
time.Sleep(time.Second)
}
if err := bootflag.CleanFlag(); err != nil {
log.Warning(errors.As(err))
}
}()
}
// Cleanup cleanup
func (s *BusServer) Cleanup() {
s.MqttClient.DisconnectServer()
}
func (s *BusServer) publishCacheFile() error {
files, err := filepath.Glob(filepath.Join(sys.GetBusManagerCfg().Cache.Directory, "*"))
if err != nil {
return err
}
if len(files) == 0 {
return nil
}
topic := "sample-block/" + getUUID()
for _, filename := range files {
f, err := os.Open(filename)
if err != nil {
continue
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
continue
}
sd := string(data)
ms := strings.Split(sd, "\n")
sp := []string{}
for _, m := range ms {
ps := strings.Split(m, "&")
if len(ps) < 2 {
continue
}
p := ps[1]
sp = append(sp, p)
}
d := strings.Join(sp, ",")
d = "[" + d + "]"
log.Printf("publish file: %s", filename)
// publish data
if err := s.MqttClient.PublishSampleValues(topic, d); err != nil {
return err
}
// remove cache file
os.Remove(filename)
}
return nil
}
func saveCacheToFile(cache []string) error {
cfg := sys.GetBusManagerCfg()
files, err := filepath.Glob(filepath.Join(cfg.Cache.Directory, "*"))
if err != nil {
return err
}
var ifl []int
for _, f := range files {
fn := filepath.Base(f)
i, _ := strconv.Atoi(fn)
ifl = append(ifl, i)
}
sort.Sort(sort.Reverse(sort.IntSlice(ifl)))
log.Println(ifl)
l := len(ifl)
if l > cfg.Cache.MaxFile {
rfs := ifl[cfg.Cache.MaxFile:]
// remove files
for _, f := range rfs {
os.Remove(filepath.Join(cfg.Cache.Directory, strconv.Itoa(f)))
}
}
var nf int
if l == 0 {
nf = 0
} else {
nf = ifl[0] + 1
}
filepath := filepath.Join(cfg.Cache.Directory, strconv.Itoa(nf))
f, err := os.Create(filepath)
if err != nil {
return err
}
defer f.Close()
data := strings.Join(cache, "\n")
if _, err = f.Write([]byte(data)); err != nil {
return fmt.Errorf("write file [%s] failed: %s", filepath, err)
}
return nil
}
// Publish publish implement
func (s *BusServer) Publish(ctx context.Context, in *pb.PublishRequest) (*pb.PublishReply, error) {
// check message and boradcast to clients when necessary
checkMessage(in.Topic, in.Payload)
go func(a string, b []byte) {
web.PayloadMap.Store(a, b)
web.PayloadChan <- b
}(in.Topic, []byte(in.Payload))
// check whether should capture or not
buscfg := sys.GetBusManagerCfg()
for _, cap := range buscfg.CaptureOption {
match := false
for _, signal := range cap.Signals {
if strings.Contains(in.Topic, signal.Topic) {
// topic coincident, check value
var p public.MessagePayload
if err := json.Unmarshal([]byte(in.Payload), &p); err != nil {
continue
}
val := ""
switch p.Value.(type) {
case int:
val = strconv.Itoa(p.Value.(int))
case float64:
val = strconv.Itoa(int(p.Value.(float64)))
case string:
val = p.Value.(string)
}
if signal.Value == val {
match = true
break
}
}
}
// match, capture
if match {
var p public.CommandPayload
var para public.CommandParameter
muid := getUUID()
chid := "capture"
para.Channel = chid
p.MonitoringUnit = muid
p.SampleUnit = cap.SUID
p.Channel = chid
p.StartTime = public.UTCTimeStamp()
p.Phase = public.PhaseExcuting
p.Parameters = para
topic := "command/" + muid + "/" + cap.SUID + "/" + chid
msg, err := json.Marshal(p)
if err != nil {
continue
}
// publish
s.MqttClient.PublishSampleValues(topic, string(msg))
}
}
// enable cache
if s.enableCache {
// online or not
if GetNetworkStatus() == Online {
// check cache files exist or not, send cache files first
if err := s.publishCacheFile(); err != nil {
log.Printf("publish failed: %s", err)
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// check cache exist, publish
if len(s.cache) > 0 {
for _, m := range s.cache {
ms := strings.Split(m, "&")
if len(ms) == 2 {
topic := ms[0]
payload := ms[1]
log.Printf("publish cache: %s", m)
if err := s.MqttClient.PublishSampleValues(topic, payload); err != nil {
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
}
|
atusSync.Unlock()
if networkStatus == Online && !mqtt {
// 当mqtt在线时,只能由mqtt处理
return
}
networkStatus = status
}
// Init do some init operation
func (s *BusServer) Init() {
c
|
identifier_body
|
|
TempTaker_old.py
|
#ControlActionThreshold = [1,1,1,1,1,1,1,1]; # idea is to reduce wear on control elements by doing adjustments only when large changes occur, SRC,SRH,BRC,BRH,LRC,LRH
ControlActionThreshold = [2,2,4,3,2,2,2,2]; # idea is to reduce wear on control elements by doing adjustments only when large changes occur, SRC,SRH,BRC,BRH,LRC,LRH
PTab=[0,0,0,1] # contribution from table temperature to P gain
PSup=[1,1,1,0] # contribution from supply air (=incoming air) to P gain
PCoolingWater=[0,0,0,0] # contribution from cooling water (=cooling water) to P gain
ColdValveGain=[1,1,1,0] # correction for cold valve gain
HotValveGain=[1,1,1,0] # correction for hot valve gain, should be 1, however, better results with higher gain?
integfile = 'lastInteg.txt'#name of the file where last integrator array is kept.
pifile = 'PIparams.txt'#where PI parameters are kept and can be modified on the fly
mancontrolfile = 'manual_valve.txt' #used for manual control of valve positions
ser = serial.Serial('/dev/ttyUSB1') # open USB-serial port
if(not ser.isOpen()):
print 'Error: Serial Port Not Open'
ser.flushInput()
ser.flushOutput()
ser.baudrate = 9600;
ser.timeout = 0.1; #sets timeout of the serial port
counter = SignalDelay+1 #so that data is outputted on the first loop
WriteCounter = SignalDelay+1 #so that data is written on the first loop
errors_count = 0 #initial number of errors
notifier = email_notifier.notifier()
notifier.set_recepients(['micramm@gmail.com','hhaeffner@berkeley.edu','haeffnerlab@gmail.com'])
officialnotifier = email_notifier.notifier()
officialnotifier.set_recepients(['micramm@gmail.com','hhaeffner@berkeley.edu','haeffnerlab@gmail.com','physics-support@lists.berkeley.edu'])
class Valves():
def __init__(self):
self.previousSignal = zeros(ControlledValves)
self.newSignal = zeros(ControlledValves)
def sign(x):
if(x > 0.01):
return 1
if(x < -0.01):
return -1
else:
return 0
def ApplyValveSignal(self,incoming_signal):
self.newSignal = self.testResponseChange(incoming_signal)
for i in range(ControlledValves): # taking care of the hysteresis ....
newdirection = sign(self.newSignal[i] - oldvalvesignal[i])
if((newdirection != direction[i]) and (newdirection)): # valve turns around
direction[i] = newdirection
print str(time.strftime("%H:%M:%S", time.localtime())) + ': Direction change: Valve ' + str(i) + ' ' + str(direction[i])
oldvalvesignal[i] = self.newSignal[i]
self.newSignal[i] = clip(self.newSignal[i] + direction[i] * hysteresis[i]/2,ValveMin[i],ValveMax)
self.communicateSend()
return self.newSignal
#for for test in response to minimize valve motion and reduce wear and tear
def testResponseChange(self,signal):
for i in range(len(signal)):
if abs(signal[i]-self.previousSignal[i]) >= ControlActionThreshold[i]:
signal[i] = int(round(signal[i]))
self.previousSignal[i] = signal[i]
print str(time.strftime("%H:%M:%S", time.localtime())) + ': Changing Valve ' + str(i) + ' to ' + str(signal[i])
else:
signal[i] = int(round(self.previousSignal[i]))
return signal
def communicateSend(self):
signal = self.newSignal
for i in range(ControlledValves):
ser.write("d")
time.sleep(ComDelay)
ser.write(str(i))
time.sleep(ComDelay)
vsig = self.dec2hex(signal[i])
ser.write(vsig)
time.sleep(ComDelay)
ser.flushInput()
time.sleep(ComDelay)
def dec2hex(self, n):#"""return the hexadecimal string representation of integer n as a two digits representation in lowercase"""
string = "%x" % n
string = string.zfill(2)
return string
class ResponseCalculator():
def __init__(self):
self.lastErrSigArr = zeros(Ch) #initial vale of lastErrorSignal, used to disable Diff gain for the first time
self.loadExternalParams()
def loadExternalParams(self):
if(os.path.isfile(integfile)):#if integ file exists (with information about last integration), open it in read/write mode and read in last integrator setting
self.INTEGFILE = open(integfile,"r+");
self.integralerrorSigArr = array(pickle.load(self.INTEGFILE))
else: #if file does not exist, create it and specify initial integrator parameters.
self.INTEGFILE = open(integfile,"w");
self.integralerrorSigArr = zeros(Ch)
if(os.path.isfile(pifile)): #if file exists, load the PI parameters
self.PIFILE = open(pifile,"r+")
self.P = array(pickle.load(self.PIFILE))
self.I = array(pickle.load(self.PIFILE))
self.D = array(pickle.load(self.PIFILE))
self.PIFILE.close()
else:
self.PIFILE = open(pifile,"w") #if file doesn't not exist, create it
#proportionality constant for PID in the format [#144 big room / #140 small room / #144B Laser Room / #144A office]
self.P = array([-15,-15,-15,-0])
self.I = array([-.1,-.1,-.1,-0])
self.D = array([-40,-40,-40,0])
pickle.dump(self.P.tolist(),self.PIFILE)
pickle.dump(self.I.tolist(),self.PIFILE)
pickle.dump(self.D.tolist(),self.PIFILE)
self.PIFILE.close()
self.PImodtime = os.path.getmtime(pifile) #time when pifile is last modified
def updateExternalPIDParams(self):
if(os.path.getmtime(pifile) != self.PImodtime): #if PI parmeters have been modified externally, update them
self.PIFILE = open(pifile, 'r')
self.P = array(pickle.load(self.PIFILE))
self.I = array(pickle.load(self.PIFILE))
self.D = array(pickle.load(self.PIFILE))
self.PIFILE.close()
self.PImodtime = os.path.getmtime(pifile)
print("new P,I,D parameters are")
print self.P
print self.I
print self.D
def getResponse(self):
return [self.PIDresponseArr,self.valvesignalArr]
def calculateResponse(self, curTempArr):
self.errorSigArr = self.finderrorSig(curTempArr)
self.integralerrorSigArr = self.calcintegrator(self.integralerrorSigArr, self.errorSigArr)
self.saveIntegralError(self.integralerrorSigArr)
self.PIDresponseArr = self.findPIDresponse(self.errorSigArr, self.integralerrorSigArr,self.lastErrSigArr)
self.lastErrSigArr= self.errorSigArr
self.valvesignalArr = self.CalcValveSignal(self.PIDresponseArr, curTempArr)
|
def saveIntegralError(self,integError):
#print integError
self.INTEGFILE.seek(0) #moves position to the beginning of the file
pickle.dump(integError, self.INTEGFILE)
self.INTEGFILE.truncate()
def finderrorSig(self, CurTemp): #takes array with current temperatures and finds the error signal array
error = CurTemp - SetPoint
return error
def calcintegrator(self,oldArr, newArr):
TotalArr = oldArr + newArr
# Normalize maximum by the mean of the integration constants
minim = IntegrationMin/(-sum(self.I)/len(self.I))
maxim = IntegrationMax/(-sum(self.I)/len(self.I))
TotalArr=clip(TotalArr,minim,maxim)
return TotalArr
def findPIDresponse(self,curErrArr, IntErrArr, lastErrArr): #produces array containg signal to be sent to valves in format [Control1, Control2..] where each one is measured from -255 to 255 positive to hotter, negative for colder
P = self.P
I = self.I
D = self.D
propArr = zeros(ControlCh)
propArr[bigroomctrl] = PSup[bigroomctrl]*curErrArr[SupplyBigRoom-1]#0 + PTab[bigroomctrl]*curErrArr[Table1-1
|
random_line_split
|
|
TempTaker_old.py
|
IFILE)
pickle.dump(self.D.tolist(),self.PIFILE)
self.PIFILE.close()
self.PImodtime = os.path.getmtime(pifile) #time when pifile is last modified
def updateExternalPIDParams(self):
if(os.path.getmtime(pifile) != self.PImodtime): #if PI parmeters have been modified externally, update them
self.PIFILE = open(pifile, 'r')
self.P = array(pickle.load(self.PIFILE))
self.I = array(pickle.load(self.PIFILE))
self.D = array(pickle.load(self.PIFILE))
self.PIFILE.close()
self.PImodtime = os.path.getmtime(pifile)
print("new P,I,D parameters are")
print self.P
print self.I
print self.D
def getResponse(self):
return [self.PIDresponseArr,self.valvesignalArr]
def calculateResponse(self, curTempArr):
self.errorSigArr = self.finderrorSig(curTempArr)
self.integralerrorSigArr = self.calcintegrator(self.integralerrorSigArr, self.errorSigArr)
self.saveIntegralError(self.integralerrorSigArr)
self.PIDresponseArr = self.findPIDresponse(self.errorSigArr, self.integralerrorSigArr,self.lastErrSigArr)
self.lastErrSigArr= self.errorSigArr
self.valvesignalArr = self.CalcValveSignal(self.PIDresponseArr, curTempArr)
def saveIntegralError(self,integError):
#print integError
self.INTEGFILE.seek(0) #moves position to the beginning of the file
pickle.dump(integError, self.INTEGFILE)
self.INTEGFILE.truncate()
def finderrorSig(self, CurTemp): #takes array with current temperatures and finds the error signal array
error = CurTemp - SetPoint
return error
def calcintegrator(self,oldArr, newArr):
TotalArr = oldArr + newArr
# Normalize maximum by the mean of the integration constants
minim = IntegrationMin/(-sum(self.I)/len(self.I))
maxim = IntegrationMax/(-sum(self.I)/len(self.I))
TotalArr=clip(TotalArr,minim,maxim)
return TotalArr
def findPIDresponse(self,curErrArr, IntErrArr, lastErrArr): #produces array containg signal to be sent to valves in format [Control1, Control2..] where each one is measured from -255 to 255 positive to hotter, negative for colder
P = self.P
I = self.I
D = self.D
propArr = zeros(ControlCh)
propArr[bigroomctrl] = PSup[bigroomctrl]*curErrArr[SupplyBigRoom-1]#0 + PTab[bigroomctrl]*curErrArr[Table1-1]#0 + PCoolingWater[bigroomctrl]*curErrArr[ColdWaterBigRoom]
propArr[smlroomctrl] = PSup[smlroomctrl]*curErrArr[SupplySmallRoom-1]#0 + PTab[smlroomctrl]*curErrArr[Table3-1]#0 + PCoolingWater[smlroomctrl]*curErrArr[ColdWaterSmallRoom]
propArr[laserroomctrl] = PSup[laserroomctrl]*curErrArr[SupplyLaserRoom-1]#0 + PTab[laserroomctrl]*curErrArr[Table4-1]#0 + PCoolingWater[laserroomctrl]*curErrArr[ColdWaterLaserRoom]
propArr[officectrl] = 0 #no control in office
propArr = propArr - clip(propArr, -PropActionThreshold,PropActionThreshold)
proprespArr = (P * propArr) # when used with arrays, * is component by component multiplcation or dot product for 1D arrays
integArr = zeros(ControlCh)
integArr[bigroomctrl] = IntErrArr[Table1-1]
integArr[smlroomctrl] = IntErrArr[Table3-1]
integArr[laserroomctrl] = IntErrArr[Table4-1]
integArr[officectrl] = 0 #no control in office
integrespArr = (I * integArr) # when used with arrays, * is component by component multiplcation or dot product for 1D arrays
#print integArr
if((lastErrArr == zeros(Ch)).any()): #when the lastErrArr is the zero array, then don't do any diff because it's the first run
diffrespArr = zeros(ControlCh)
else:
diffArr = zeros(ControlCh)
DiffErrArr = curErrArr - lastErrArr
diffArr[bigroomctrl] = DiffErrArr[SupplyBigRoom-1] + ColdWaterDiffGain[bigroomctrl] * DiffErrArr[ColdWaterBigRoom-1] / D[bigroomctrl]
diffArr[smlroomctrl] = DiffErrArr[SupplySmallRoom-1] + ColdWaterDiffGain[smlroomctrl] * DiffErrArr[ColdWaterSmallRoom-1] / D[smlroomctrl]
diffArr[laserroomctrl] = DiffErrArr[SupplyLaserRoom-1] + ColdWaterDiffGain[laserroomctrl] * DiffErrArr[ColdWaterLaserRoom-1] / D[laserroomctrl]
diffArr[officectrl] = 0 # no control in office
diffArr = diffArr - clip(diffArr, -DiffActionThreshold,DiffActionThreshold)
diffrespArr = (D * diffArr)
diffrespArr = clip(diffrespArr, -DiffMax, DiffMax)
print 'P', proprespArr
print 'I', integrespArr
print 'D', diffrespArr
responseArr = proprespArr + integrespArr + diffrespArr
return responseArr
def CalcValveSignal(self,responseArr,curTempArr):#hard codes which control channel correspond to which output number
valvesignalArr = zeros(ControlledValves)
#ColdWater = array([curTempArr[ColdWaterBigRoom-1], curTempArr[ColdWaterSmallRoom-1], curTempArr[ColdWaterLaserRoom-1],0 ])
#ColdWater = clip(ColdWater,0,20)
ColdWater = array([13.0,13.0,13.0,0.0]); # set cold water temp to 13 degrees because the sensor is not working atm
HotWater = array([curTempArr[HotWaterBigRoom-1], curTempArr[HotWaterSmallRoom-1], curTempArr[HotWaterLaserRoom-1], 0])
SetPointAux = array([SetPoint[Table1-1], SetPoint[Table3-1], SetPoint[Table4-1], 0])
CoolingPower = clip(SetPointAux - ColdWater - ColdWaterTempCorrection,1.0,100.0) # estimate cooling power for valve settings, always assume some cooling power
HeatingPower = clip(HotWater - SetPointAux,20.0,200.0) # minum heating power corresponds to 20 degrees temp-difference
ColdValveSignal = - responseArr/CoolingPower*ColdValveGain + Coldoffset# + ColdWaterValveGain * (ColdWater-ColdWaterTempBase)
HotValveSignal = Hotoffset + responseArr/HeatingPower*HotValveGain
valvesignalArr[0] = ColdValveSignal[smlroomctrl]
valvesignalArr[1] = HotValveSignal[smlroomctrl]
valvesignalArr[2] = ColdValveSignal[bigroomctrl]
valvesignalArr[3] = HotValveSignal[bigroomctrl]
valvesignalArr[4] = ColdValveSignal[laserroomctrl]
valvesignalArr[5] = HotValveSignal[laserroomctrl]
valvesignalArr[6] = 0
valvesignalArr[7] = 0
# valvesignalArr[0] = clip(ColdValveSignal[smlroomctrl],ValveMin[0],ValveMax)
# valvesignalArr[1] = clip(HotValveSignal[smlroomctrl],ValveMin[1],ValveMax)
# valvesignalArr[2] = clip(ColdValveSignal[bigroomctrl],ValveMin[2],ValveMax)
# valvesignalArr[3] = clip(HotValveSignal[bigroomctrl],ValveMin[3],ValveMax)
# valvesignalArr[4] = clip(ColdValveSignal[laserroomctrl],ValveMin[4],ValveMax)
# valvesignalArr[5] = clip(HotValveSignal[laserroomctrl],ValveMin[5],ValveMax)
# valvesignalArr[6] = 0
# valvesignalArr[7] = 0
valvesignal = valvesignalArr.tolist()
return valvesignalArr
def
|
__del__
|
identifier_name
|
|
TempTaker_old.py
|
SmallRoom]
propArr[laserroomctrl] = PSup[laserroomctrl]*curErrArr[SupplyLaserRoom-1]#0 + PTab[laserroomctrl]*curErrArr[Table4-1]#0 + PCoolingWater[laserroomctrl]*curErrArr[ColdWaterLaserRoom]
propArr[officectrl] = 0 #no control in office
propArr = propArr - clip(propArr, -PropActionThreshold,PropActionThreshold)
proprespArr = (P * propArr) # when used with arrays, * is component by component multiplcation or dot product for 1D arrays
integArr = zeros(ControlCh)
integArr[bigroomctrl] = IntErrArr[Table1-1]
integArr[smlroomctrl] = IntErrArr[Table3-1]
integArr[laserroomctrl] = IntErrArr[Table4-1]
integArr[officectrl] = 0 #no control in office
integrespArr = (I * integArr) # when used with arrays, * is component by component multiplcation or dot product for 1D arrays
#print integArr
if((lastErrArr == zeros(Ch)).any()): #when the lastErrArr is the zero array, then don't do any diff because it's the first run
diffrespArr = zeros(ControlCh)
else:
diffArr = zeros(ControlCh)
DiffErrArr = curErrArr - lastErrArr
diffArr[bigroomctrl] = DiffErrArr[SupplyBigRoom-1] + ColdWaterDiffGain[bigroomctrl] * DiffErrArr[ColdWaterBigRoom-1] / D[bigroomctrl]
diffArr[smlroomctrl] = DiffErrArr[SupplySmallRoom-1] + ColdWaterDiffGain[smlroomctrl] * DiffErrArr[ColdWaterSmallRoom-1] / D[smlroomctrl]
diffArr[laserroomctrl] = DiffErrArr[SupplyLaserRoom-1] + ColdWaterDiffGain[laserroomctrl] * DiffErrArr[ColdWaterLaserRoom-1] / D[laserroomctrl]
diffArr[officectrl] = 0 # no control in office
diffArr = diffArr - clip(diffArr, -DiffActionThreshold,DiffActionThreshold)
diffrespArr = (D * diffArr)
diffrespArr = clip(diffrespArr, -DiffMax, DiffMax)
print 'P', proprespArr
print 'I', integrespArr
print 'D', diffrespArr
responseArr = proprespArr + integrespArr + diffrespArr
return responseArr
def CalcValveSignal(self,responseArr,curTempArr):#hard codes which control channel correspond to which output number
valvesignalArr = zeros(ControlledValves)
#ColdWater = array([curTempArr[ColdWaterBigRoom-1], curTempArr[ColdWaterSmallRoom-1], curTempArr[ColdWaterLaserRoom-1],0 ])
#ColdWater = clip(ColdWater,0,20)
ColdWater = array([13.0,13.0,13.0,0.0]); # set cold water temp to 13 degrees because the sensor is not working atm
HotWater = array([curTempArr[HotWaterBigRoom-1], curTempArr[HotWaterSmallRoom-1], curTempArr[HotWaterLaserRoom-1], 0])
SetPointAux = array([SetPoint[Table1-1], SetPoint[Table3-1], SetPoint[Table4-1], 0])
CoolingPower = clip(SetPointAux - ColdWater - ColdWaterTempCorrection,1.0,100.0) # estimate cooling power for valve settings, always assume some cooling power
HeatingPower = clip(HotWater - SetPointAux,20.0,200.0) # minum heating power corresponds to 20 degrees temp-difference
ColdValveSignal = - responseArr/CoolingPower*ColdValveGain + Coldoffset# + ColdWaterValveGain * (ColdWater-ColdWaterTempBase)
HotValveSignal = Hotoffset + responseArr/HeatingPower*HotValveGain
valvesignalArr[0] = ColdValveSignal[smlroomctrl]
valvesignalArr[1] = HotValveSignal[smlroomctrl]
valvesignalArr[2] = ColdValveSignal[bigroomctrl]
valvesignalArr[3] = HotValveSignal[bigroomctrl]
valvesignalArr[4] = ColdValveSignal[laserroomctrl]
valvesignalArr[5] = HotValveSignal[laserroomctrl]
valvesignalArr[6] = 0
valvesignalArr[7] = 0
# valvesignalArr[0] = clip(ColdValveSignal[smlroomctrl],ValveMin[0],ValveMax)
# valvesignalArr[1] = clip(HotValveSignal[smlroomctrl],ValveMin[1],ValveMax)
# valvesignalArr[2] = clip(ColdValveSignal[bigroomctrl],ValveMin[2],ValveMax)
# valvesignalArr[3] = clip(HotValveSignal[bigroomctrl],ValveMin[3],ValveMax)
# valvesignalArr[4] = clip(ColdValveSignal[laserroomctrl],ValveMin[4],ValveMax)
# valvesignalArr[5] = clip(HotValveSignal[laserroomctrl],ValveMin[5],ValveMax)
# valvesignalArr[6] = 0
# valvesignalArr[7] = 0
valvesignal = valvesignalArr.tolist()
return valvesignalArr
def __del__(self):
self.INTEGFILE.close()
class DataAcquisition():
def binarytoTempC(self,bin, ch): #converts binary output to a physical temperature in C
Vin = 2.56*(float(bin)+1)/1024 #voltage that is read in 1023 is 2.56 0 is 0
dV = (15/HardwareG[ch])*(Vin/1.2 - 1) #when G = 15 (most channels) dV of 2.4 corresponds to bridge voltage of 1 and dV of 0 is bridge voltage of -1
#G = 5 for low res channels for cold water, hot water supply
#G is determines by INA114 gain resistor
R = (dV/V0 +.5) / (- dV/V0 + .5) * 10 #convert bridge voltage to R in kohms
T = 1/(a + b*math.log(R/10.) + c * pow(math.log(R/10.),2) + d * pow(math.log(R/10.),3)) #consult datasheet for this
TempC = round(T - 273.15,2) #Kelvin to C
return TempC
def readTemp(self,ser):#processing the input in the format 03:1023<space>... where 03 is the number of the detector, 1023 is the voltage representation
#returns array with data
global errors_count
curTempArr = zeros(Ch)
ser.write('t') # command to output readings
curLine = ser.read(Ch*8) # reads 128 bytes, 16 channels 7 bytes each and 16 spaces
if(len(curLine)==128): # read everything correctly
for i in range(Ch):
# left and right ranges for number of voltages
lnum = 8*i + 0
rnum = 8*i + 2
lvol = 8*i + 3
rvol = 8*i + 7
num = curLine[lnum:rnum] #number of the detector is the first
vol = int(curLine[lvol:rvol]) #voltage readout
TempC = self.binarytoTempC(vol, i)
curTempArr[i] = TempC
else:
if(errors_count > 20):
notifier.set_content('AC ALARM','The program quit because there were too many errors with data acquisition')
notifier.send()
sys.exit()
errors_count = errors_count + 1
print "Error: Data not collected"
print curLine
time.sleep(DataFreq)
curTempArr = self.readTemp(ser)
return curTempArr
class RunningAverage():
def __init__(self):
self.RunningAvgNum = RunningAvgNum
self.historyArr = zeros([self.RunningAvgNum,Ch])
self.binfull = 0
self.historyCounter = 0
self.printintro()
def printintro(self):
|
print '\n' + 'Filling up history for ' + str(self.RunningAvgNum) +' seconds \n'
|
identifier_body
|
|
TempTaker_old.py
|
1]
integArr[laserroomctrl] = IntErrArr[Table4-1]
integArr[officectrl] = 0 #no control in office
integrespArr = (I * integArr) # when used with arrays, * is component by component multiplcation or dot product for 1D arrays
#print integArr
if((lastErrArr == zeros(Ch)).any()): #when the lastErrArr is the zero array, then don't do any diff because it's the first run
diffrespArr = zeros(ControlCh)
else:
diffArr = zeros(ControlCh)
DiffErrArr = curErrArr - lastErrArr
diffArr[bigroomctrl] = DiffErrArr[SupplyBigRoom-1] + ColdWaterDiffGain[bigroomctrl] * DiffErrArr[ColdWaterBigRoom-1] / D[bigroomctrl]
diffArr[smlroomctrl] = DiffErrArr[SupplySmallRoom-1] + ColdWaterDiffGain[smlroomctrl] * DiffErrArr[ColdWaterSmallRoom-1] / D[smlroomctrl]
diffArr[laserroomctrl] = DiffErrArr[SupplyLaserRoom-1] + ColdWaterDiffGain[laserroomctrl] * DiffErrArr[ColdWaterLaserRoom-1] / D[laserroomctrl]
diffArr[officectrl] = 0 # no control in office
diffArr = diffArr - clip(diffArr, -DiffActionThreshold,DiffActionThreshold)
diffrespArr = (D * diffArr)
diffrespArr = clip(diffrespArr, -DiffMax, DiffMax)
print 'P', proprespArr
print 'I', integrespArr
print 'D', diffrespArr
responseArr = proprespArr + integrespArr + diffrespArr
return responseArr
def CalcValveSignal(self,responseArr,curTempArr):#hard codes which control channel correspond to which output number
valvesignalArr = zeros(ControlledValves)
#ColdWater = array([curTempArr[ColdWaterBigRoom-1], curTempArr[ColdWaterSmallRoom-1], curTempArr[ColdWaterLaserRoom-1],0 ])
#ColdWater = clip(ColdWater,0,20)
ColdWater = array([13.0,13.0,13.0,0.0]); # set cold water temp to 13 degrees because the sensor is not working atm
HotWater = array([curTempArr[HotWaterBigRoom-1], curTempArr[HotWaterSmallRoom-1], curTempArr[HotWaterLaserRoom-1], 0])
SetPointAux = array([SetPoint[Table1-1], SetPoint[Table3-1], SetPoint[Table4-1], 0])
CoolingPower = clip(SetPointAux - ColdWater - ColdWaterTempCorrection,1.0,100.0) # estimate cooling power for valve settings, always assume some cooling power
HeatingPower = clip(HotWater - SetPointAux,20.0,200.0) # minum heating power corresponds to 20 degrees temp-difference
ColdValveSignal = - responseArr/CoolingPower*ColdValveGain + Coldoffset# + ColdWaterValveGain * (ColdWater-ColdWaterTempBase)
HotValveSignal = Hotoffset + responseArr/HeatingPower*HotValveGain
valvesignalArr[0] = ColdValveSignal[smlroomctrl]
valvesignalArr[1] = HotValveSignal[smlroomctrl]
valvesignalArr[2] = ColdValveSignal[bigroomctrl]
valvesignalArr[3] = HotValveSignal[bigroomctrl]
valvesignalArr[4] = ColdValveSignal[laserroomctrl]
valvesignalArr[5] = HotValveSignal[laserroomctrl]
valvesignalArr[6] = 0
valvesignalArr[7] = 0
# valvesignalArr[0] = clip(ColdValveSignal[smlroomctrl],ValveMin[0],ValveMax)
# valvesignalArr[1] = clip(HotValveSignal[smlroomctrl],ValveMin[1],ValveMax)
# valvesignalArr[2] = clip(ColdValveSignal[bigroomctrl],ValveMin[2],ValveMax)
# valvesignalArr[3] = clip(HotValveSignal[bigroomctrl],ValveMin[3],ValveMax)
# valvesignalArr[4] = clip(ColdValveSignal[laserroomctrl],ValveMin[4],ValveMax)
# valvesignalArr[5] = clip(HotValveSignal[laserroomctrl],ValveMin[5],ValveMax)
# valvesignalArr[6] = 0
# valvesignalArr[7] = 0
valvesignal = valvesignalArr.tolist()
return valvesignalArr
def __del__(self):
self.INTEGFILE.close()
class DataAcquisition():
def binarytoTempC(self,bin, ch): #converts binary output to a physical temperature in C
Vin = 2.56*(float(bin)+1)/1024 #voltage that is read in 1023 is 2.56 0 is 0
dV = (15/HardwareG[ch])*(Vin/1.2 - 1) #when G = 15 (most channels) dV of 2.4 corresponds to bridge voltage of 1 and dV of 0 is bridge voltage of -1
#G = 5 for low res channels for cold water, hot water supply
#G is determines by INA114 gain resistor
R = (dV/V0 +.5) / (- dV/V0 + .5) * 10 #convert bridge voltage to R in kohms
T = 1/(a + b*math.log(R/10.) + c * pow(math.log(R/10.),2) + d * pow(math.log(R/10.),3)) #consult datasheet for this
TempC = round(T - 273.15,2) #Kelvin to C
return TempC
def readTemp(self,ser):#processing the input in the format 03:1023<space>... where 03 is the number of the detector, 1023 is the voltage representation
#returns array with data
global errors_count
curTempArr = zeros(Ch)
ser.write('t') # command to output readings
curLine = ser.read(Ch*8) # reads 128 bytes, 16 channels 7 bytes each and 16 spaces
if(len(curLine)==128): # read everything correctly
for i in range(Ch):
# left and right ranges for number of voltages
lnum = 8*i + 0
rnum = 8*i + 2
lvol = 8*i + 3
rvol = 8*i + 7
num = curLine[lnum:rnum] #number of the detector is the first
vol = int(curLine[lvol:rvol]) #voltage readout
TempC = self.binarytoTempC(vol, i)
curTempArr[i] = TempC
else:
if(errors_count > 20):
notifier.set_content('AC ALARM','The program quit because there were too many errors with data acquisition')
notifier.send()
sys.exit()
errors_count = errors_count + 1
print "Error: Data not collected"
print curLine
time.sleep(DataFreq)
curTempArr = self.readTemp(ser)
return curTempArr
class RunningAverage():
def __init__(self):
self.RunningAvgNum = RunningAvgNum
self.historyArr = zeros([self.RunningAvgNum,Ch])
self.binfull = 0
self.historyCounter = 0
self.printintro()
def printintro(self):
print '\n' + 'Filling up history for ' + str(self.RunningAvgNum) +' seconds \n'
def printbinfull(self):
print 'Running Average Operational'
def addNumber(self,newnumber):
self.historyArr[self.historyCounter,:] = newnumber #updates history by cycling through rows of historyArr and replacing old data with readTemp
self.historyCounter = (self.historyCounter + 1) % self.RunningAvgNum
if(self.historyCounter == 0):
if(self.binfull == 0):
self.printbinfull()
self.binfull = 1
def getAverage(self):
if(self.binfull): #if bin is full, take the mean
average = mean(self.historyArr,axis=0) #current temperature is the average of the columns of the history array
else: #if bin is not filled, return mean of existing elements
|
average = sum(self.historyArr[0:(self.historyCounter+1),:],axis=0)/(self.historyCounter)
|
conditional_block
|
|
Decryption.py
|
()
#encrypt message
C = encryptor.update(message) + encryptor.finalize()
return C, IV
def MyFileEncrypt(filepath):
#generating key
key = os.urandom(KEY_LENGTH)
#Exclude private key, public key, and executable from encrypt
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
# loop throughh all files:
#for file in files:
#Retrieve full filepath
#filepath = pathTofile + "\\" + file
#reading file
file = open(filepath, "rb")
m = file.read()
file.close()
#calling encryption method
C, IV = MyEncrypt(m, key)
file = open(filepath, "wb")
file.write(C)
file.close()
return C, IV, key, ext
def MyEncryptMAC(message, EncKey, HMACKey):
#get ciphertext and IV
C, IV = MyEncrypt(message, EncKey)
#create HMAC object to make tag
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
tag = h.finalize()
return C, IV, tag
def MyFileEncryptMAC(filepath):
#create Keys
KeyLength = 32
HMACKey = os.urandom(KEY_LENGTH)
EncKey = os.urandom(KEY_LENGTH)
if len(EncKey) < KeyLength:
raise Exception("EncKey less than 32 bytes!")
if len(HMACKey) < KeyLength:
raise Exception("HMACKey less than 32 bytes!")
#open and read file to encrypt
file = open(filepath, "rb")
m = file.read()
file.close()
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
#encrypt & MAC
C, IV, tag = MyEncryptMAC(m, EncKey, HMACKey)
'''Not used for RSA
#storing values
encData = {"RSACipher": RSACipher.decode('cp437'),"C": C.decode('cp437'), "IV": IV.decode('cp437'), "ext": ext, "tag": tag.decode('cp437')}
#create and write to json
filenameJSON = filename + ".json"
#write json data to file
with open(filenameJSON, "w") as outfile:
json.dump(encData, outfile)
outfile.close()
#delete original file
os.remove(filepath)
'''
return C, IV, tag, EncKey, HMACKey, ext
def CheckRSAKeys(): # check if pem file exist
publicExists = os.path.isfile(RSA_PUBLIC_KEY_PATH)
privateExists = os.path.isfile(RSA_PRIVATE_KEY_PATH)
if not publicExists or not privateExists:
#generate and store private key
privateKey = rsa.generate_private_key(
public_exponent = PUBLIC_EXPONENT,
key_size = RSA_KEY_SIZE,
backend=default_backend()
)
privatepem = privateKey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
with open(RSA_PRIVATE_KEY_PATH, "wb") as privateKeyFile:
privateKeyFile.write(privatepem)
#generate and store public key
publicKey = privateKey.public_key()
publicpem = publicKey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with open(RSA_PUBLIC_KEY_PATH, "wb") as publicKeyFile:
publicKeyFile.write(publicpem)
# RSA Encrypt using AES CBC 256 Encryption with HMAC
def MyRSAEncrypt(filepath, RSA_Publickey_filepath):
(C, IV, tag, EncKey, HMACKey, ext) = MyFileEncryptMAC(filepath)
key = EncKey + HMACKey
with open(RSA_Publickey_filepath, 'rb') as key_file:
public_key = serialization.load_pem_public_key(
key_file.read(),
backend = default_backend()
)
RSACipher = public_key.encrypt(
key,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
key_file.close()
return (RSACipher, C, IV, tag, ext)
# AES requires plain text and ciphertext to be a multiple of 16
# We pad it so that the message is a multiple of the IV, 16
def addPadding(encoded):
# We pad it with 128 bits or 16 bytes
padder = padding.PKCS7(CONST_KEY_BYTES.CONST_PADDING_BITS).padder()
# update() pads the encoded message
padded_encoded = padder.update(encoded)
# .finalize () Returns the remainder of the data.
padded_encoded += padder.finalize()
return padded_encoded
def DirectoryEncrypt(directory):
|
encryptedFilepath = os.path.splittext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w')as jsonfile:
json.dump(data, jsonfile, indent=3)
except:
print("Error:Json file didnt create")
return
for file in dirs:
try:
RSACipher,C, IV, tag = MyRSAEncrypt(os.ath.json(root,file),key[0])
except:
("Error: MyRSAEncryptfailes:")
return
#create JSON file
try:
data = {'encrypted': [{'RSACipher':RSACipher,'C':C,'IV':IV,'tag':tag,'ext':ext}]}
except:
print("Error: Json file didnt create")
return
try:
encryptedFilepath = os.path.splitext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w') as jsonFile:
json.dump(data,jsonFile, indent=3)
except:
print("Error: Unable to create JSON file.")
return
except:
print("Directory doent excist")
return
def MyDecrypt(C, IV, key):
#make cipher
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
#make decryptor
decryptor = cipher.decryptor()
#decrypt ciphertext
plaintext_padded = decryptor.update(C) + decryptor.finalize()
#unpad message
unpadder = padding.PKCS7(128).unpadder()
plaintext = unpadder.update(plaintext_padded) + unpadder.finalize()
return plaintext
def MyFileDecrypt(filepath, IV, key, ext):
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
file = open(filepath, "rb")
C = file.read()
file.close()
message = MyDecrypt(C, IV, key)
writefile = open(filepath, "wb")
writefile.write(message)
writefile.close()
return message, IV, key
def MyDecryptMAC(C, IV, tag, HMACKey, EncKey):
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
verifyTag = h.finalize()
if verifyTag != tag:
raise Exception("Tags do not align")
message = MyDecrypt(C, IV, EncKey)
return message
def MyFileDecryptMAC(originalfilepath, HMACKey):
#getting file name and extension
filename_ext = os.path.basename(originalfilepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
jsonFile = filename + ".json"
#open file to decrypt
with open(jsonFile) as decryptFile:
data = json.load(decryptFile)
decryptFile.close()
#getting data from dictionary
C = (data['C']).encode('cp437')
IV = (data['IV']).encode('cp
|
try:
key = CheckRSAKeys()
except:
print("Error: keys has issue")
return
try:
for root, dirs, filres in os.walk(directory):
for file in filres:
try:
RSACipher, C, IV, tag, ext = MyRSAEncrypt(os.path.join(root,file),key[0])
except:
print("Error: MyRSAEncrypt failed")
return
#create JSon file
try:
data= {'encrypted': [{'RSACipher':RSACipher, 'C': C, 'IV': IV, 'tag': tag, 'ext': ext}]}
except:
print("Error: Not able to create Json file")
return
try:
|
identifier_body
|
Decryption.py
|
()
#encrypt message
C = encryptor.update(message) + encryptor.finalize()
return C, IV
def MyFileEncrypt(filepath):
#generating key
key = os.urandom(KEY_LENGTH)
#Exclude private key, public key, and executable from encrypt
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
# loop throughh all files:
#for file in files:
#Retrieve full filepath
#filepath = pathTofile + "\\" + file
#reading file
file = open(filepath, "rb")
m = file.read()
file.close()
#calling encryption method
C, IV = MyEncrypt(m, key)
file = open(filepath, "wb")
file.write(C)
file.close()
return C, IV, key, ext
def MyEncryptMAC(message, EncKey, HMACKey):
#get ciphertext and IV
C, IV = MyEncrypt(message, EncKey)
#create HMAC object to make tag
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
tag = h.finalize()
return C, IV, tag
def MyFileEncryptMAC(filepath):
#create Keys
KeyLength = 32
HMACKey = os.urandom(KEY_LENGTH)
EncKey = os.urandom(KEY_LENGTH)
if len(EncKey) < KeyLength:
raise Exception("EncKey less than 32 bytes!")
if len(HMACKey) < KeyLength:
raise Exception("HMACKey less than 32 bytes!")
#open and read file to encrypt
file = open(filepath, "rb")
m = file.read()
file.close()
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
#encrypt & MAC
C, IV, tag = MyEncryptMAC(m, EncKey, HMACKey)
'''Not used for RSA
#storing values
encData = {"RSACipher": RSACipher.decode('cp437'),"C": C.decode('cp437'), "IV": IV.decode('cp437'), "ext": ext, "tag": tag.decode('cp437')}
#create and write to json
filenameJSON = filename + ".json"
#write json data to file
with open(filenameJSON, "w") as outfile:
json.dump(encData, outfile)
outfile.close()
#delete original file
os.remove(filepath)
'''
return C, IV, tag, EncKey, HMACKey, ext
def CheckRSAKeys(): # check if pem file exist
publicExists = os.path.isfile(RSA_PUBLIC_KEY_PATH)
privateExists = os.path.isfile(RSA_PRIVATE_KEY_PATH)
if not publicExists or not privateExists:
#generate and store private key
privateKey = rsa.generate_private_key(
public_exponent = PUBLIC_EXPONENT,
key_size = RSA_KEY_SIZE,
backend=default_backend()
)
privatepem = privateKey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
with open(RSA_PRIVATE_KEY_PATH, "wb") as privateKeyFile:
privateKeyFile.write(privatepem)
#generate and store public key
publicKey = privateKey.public_key()
publicpem = publicKey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with open(RSA_PUBLIC_KEY_PATH, "wb") as publicKeyFile:
publicKeyFile.write(publicpem)
# RSA Encrypt using AES CBC 256 Encryption with HMAC
def MyRSAEncrypt(filepath, RSA_Publickey_filepath):
(C, IV, tag, EncKey, HMACKey, ext) = MyFileEncryptMAC(filepath)
key = EncKey + HMACKey
with open(RSA_Publickey_filepath, 'rb') as key_file:
public_key = serialization.load_pem_public_key(
key_file.read(),
backend = default_backend()
)
RSACipher = public_key.encrypt(
key,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
key_file.close()
return (RSACipher, C, IV, tag, ext)
# AES requires plain text and ciphertext to be a multiple of 16
# We pad it so that the message is a multiple of the IV, 16
def addPadding(encoded):
# We pad it with 128 bits or 16 bytes
padder = padding.PKCS7(CONST_KEY_BYTES.CONST_PADDING_BITS).padder()
# update() pads the encoded message
padded_encoded = padder.update(encoded)
# .finalize () Returns the remainder of the data.
padded_encoded += padder.finalize()
return padded_encoded
def DirectoryEncrypt(directory):
try:
key = CheckRSAKeys()
except:
print("Error: keys has issue")
return
try:
for root, dirs, filres in os.walk(directory):
for file in filres:
try:
RSACipher, C, IV, tag, ext = MyRSAEncrypt(os.path.join(root,file),key[0])
except:
print("Error: MyRSAEncrypt failed")
return
#create JSon file
try:
data= {'encrypted': [{'RSACipher':RSACipher, 'C': C, 'IV': IV, 'tag': tag, 'ext': ext}]}
except:
print("Error: Not able to create Json file")
return
try:
encryptedFilepath = os.path.splittext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w')as jsonfile:
json.dump(data, jsonfile, indent=3)
except:
print("Error:Json file didnt create")
return
for file in dirs:
try:
RSACipher,C, IV, tag = MyRSAEncrypt(os.ath.json(root,file),key[0])
except:
("Error: MyRSAEncryptfailes:")
return
#create JSON file
try:
data = {'encrypted': [{'RSACipher':RSACipher,'C':C,'IV':IV,'tag':tag,'ext':ext}]}
except:
print("Error: Json file didnt create")
return
try:
encryptedFilepath = os.path.splitext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w') as jsonFile:
json.dump(data,jsonFile, indent=3)
except:
print("Error: Unable to create JSON file.")
return
except:
print("Directory doent excist")
return
def MyDecrypt(C, IV, key):
#make cipher
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
#make decryptor
decryptor = cipher.decryptor()
#decrypt ciphertext
plaintext_padded = decryptor.update(C) + decryptor.finalize()
#unpad message
unpadder = padding.PKCS7(128).unpadder()
plaintext = unpadder.update(plaintext_padded) + unpadder.finalize()
return plaintext
def MyFileDecrypt(filepath, IV, key, ext):
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
file = open(filepath, "rb")
C = file.read()
file.close()
message = MyDecrypt(C, IV, key)
writefile = open(filepath, "wb")
writefile.write(message)
writefile.close()
return message, IV, key
def MyDecryptMAC(C, IV, tag, HMACKey, EncKey):
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
verifyTag = h.finalize()
if verifyTag != tag:
raise Exception("Tags do not align")
message = MyDecrypt(C, IV, EncKey)
return message
def MyFileDecryptMAC(originalfilepath, HMACKey):
#getting file name and extension
filename_ext = os.path.basename(originalfilepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
|
with open(jsonFile) as decryptFile:
data = json.load(decryptFile)
decryptFile.close()
#getting data from dictionary
C = (data['C']).encode('cp437')
IV = (data['IV']).encode('cp4
|
jsonFile = filename + ".json"
#open file to decrypt
|
random_line_split
|
Decryption.py
|
RSACipher.decode('cp437'),"C": C.decode('cp437'), "IV": IV.decode('cp437'), "ext": ext, "tag": tag.decode('cp437')}
#create and write to json
filenameJSON = filename + ".json"
#write json data to file
with open(filenameJSON, "w") as outfile:
json.dump(encData, outfile)
outfile.close()
#delete original file
os.remove(filepath)
'''
return C, IV, tag, EncKey, HMACKey, ext
def CheckRSAKeys(): # check if pem file exist
publicExists = os.path.isfile(RSA_PUBLIC_KEY_PATH)
privateExists = os.path.isfile(RSA_PRIVATE_KEY_PATH)
if not publicExists or not privateExists:
#generate and store private key
privateKey = rsa.generate_private_key(
public_exponent = PUBLIC_EXPONENT,
key_size = RSA_KEY_SIZE,
backend=default_backend()
)
privatepem = privateKey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
with open(RSA_PRIVATE_KEY_PATH, "wb") as privateKeyFile:
privateKeyFile.write(privatepem)
#generate and store public key
publicKey = privateKey.public_key()
publicpem = publicKey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with open(RSA_PUBLIC_KEY_PATH, "wb") as publicKeyFile:
publicKeyFile.write(publicpem)
# RSA Encrypt using AES CBC 256 Encryption with HMAC
def MyRSAEncrypt(filepath, RSA_Publickey_filepath):
(C, IV, tag, EncKey, HMACKey, ext) = MyFileEncryptMAC(filepath)
key = EncKey + HMACKey
with open(RSA_Publickey_filepath, 'rb') as key_file:
public_key = serialization.load_pem_public_key(
key_file.read(),
backend = default_backend()
)
RSACipher = public_key.encrypt(
key,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
key_file.close()
return (RSACipher, C, IV, tag, ext)
# AES requires plain text and ciphertext to be a multiple of 16
# We pad it so that the message is a multiple of the IV, 16
def addPadding(encoded):
# We pad it with 128 bits or 16 bytes
padder = padding.PKCS7(CONST_KEY_BYTES.CONST_PADDING_BITS).padder()
# update() pads the encoded message
padded_encoded = padder.update(encoded)
# .finalize () Returns the remainder of the data.
padded_encoded += padder.finalize()
return padded_encoded
def DirectoryEncrypt(directory):
try:
key = CheckRSAKeys()
except:
print("Error: keys has issue")
return
try:
for root, dirs, filres in os.walk(directory):
for file in filres:
try:
RSACipher, C, IV, tag, ext = MyRSAEncrypt(os.path.join(root,file),key[0])
except:
print("Error: MyRSAEncrypt failed")
return
#create JSon file
try:
data= {'encrypted': [{'RSACipher':RSACipher, 'C': C, 'IV': IV, 'tag': tag, 'ext': ext}]}
except:
print("Error: Not able to create Json file")
return
try:
encryptedFilepath = os.path.splittext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w')as jsonfile:
json.dump(data, jsonfile, indent=3)
except:
print("Error:Json file didnt create")
return
for file in dirs:
try:
RSACipher,C, IV, tag = MyRSAEncrypt(os.ath.json(root,file),key[0])
except:
("Error: MyRSAEncryptfailes:")
return
#create JSON file
try:
data = {'encrypted': [{'RSACipher':RSACipher,'C':C,'IV':IV,'tag':tag,'ext':ext}]}
except:
print("Error: Json file didnt create")
return
try:
encryptedFilepath = os.path.splitext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w') as jsonFile:
json.dump(data,jsonFile, indent=3)
except:
print("Error: Unable to create JSON file.")
return
except:
print("Directory doent excist")
return
def MyDecrypt(C, IV, key):
#make cipher
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
#make decryptor
decryptor = cipher.decryptor()
#decrypt ciphertext
plaintext_padded = decryptor.update(C) + decryptor.finalize()
#unpad message
unpadder = padding.PKCS7(128).unpadder()
plaintext = unpadder.update(plaintext_padded) + unpadder.finalize()
return plaintext
def MyFileDecrypt(filepath, IV, key, ext):
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
file = open(filepath, "rb")
C = file.read()
file.close()
message = MyDecrypt(C, IV, key)
writefile = open(filepath, "wb")
writefile.write(message)
writefile.close()
return message, IV, key
def MyDecryptMAC(C, IV, tag, HMACKey, EncKey):
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
verifyTag = h.finalize()
if verifyTag != tag:
raise Exception("Tags do not align")
message = MyDecrypt(C, IV, EncKey)
return message
def MyFileDecryptMAC(originalfilepath, HMACKey):
#getting file name and extension
filename_ext = os.path.basename(originalfilepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
jsonFile = filename + ".json"
#open file to decrypt
with open(jsonFile) as decryptFile:
data = json.load(decryptFile)
decryptFile.close()
#getting data from dictionary
C = (data['C']).encode('cp437')
IV = (data['IV']).encode('cp437')
tag = (data['tag']).encode('cp437')
EncKey = (data['EncKey']).encode('cp437')
message = MyDecryptMAC(C, IV, tag, HMACKey, EncKey)
#write recovered data to file
recoveredFile = open(originalfilepath, "wb")
recoveredFile.write(message)
recoveredFile.close()
#remove json file
os.remove(jsonFile)
return message
# RSA Decrypt # using AES CBC 256 Decryption with HMAC
'''
def MyRSADecrypt(RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath):
(C, IV, tag, EncKey, HMACKey, ext) = MyFileEncryptMAC(filepath)
key = EncKey + HMACKey
with open(RSA_Publickey_filepath, 'rb') as key_file:
public_key = serialization.load_pem_public_key(
_file.read(),
backend = default_backend()
)
RSACipher = public_key.encrypt(
key,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
key_file.close()
return (RSACipher, C, IV, tag, ext)
'''
def MyRSADecrypt(filepath, RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath):
with open(RSA_Privatekey_filepath, 'rb') as key_file:
private_key = serialization.load_pem_private_key(
key_file.read(),
password=None,
backend = default_backend()
)
key = private_key.decrypt(RSACipher,asymmetric,padding.OAEP(
mgf = asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
EncKey_start=0
EncKey_end = int((len(key)/2))
HMACKey_start=EncKey_end
HMACKey_end = int(len(key))
EncKey= key[EncKey_start:HMACKey_end]
key_file.close()
HMACKey= ""
MyFileDecryptMAC(filepath,EncKey, HMACKey, IV,tag)
def
|
main
|
identifier_name
|
|
Decryption.py
|
()
#encrypt message
C = encryptor.update(message) + encryptor.finalize()
return C, IV
def MyFileEncrypt(filepath):
#generating key
key = os.urandom(KEY_LENGTH)
#Exclude private key, public key, and executable from encrypt
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
# loop throughh all files:
#for file in files:
#Retrieve full filepath
#filepath = pathTofile + "\\" + file
#reading file
file = open(filepath, "rb")
m = file.read()
file.close()
#calling encryption method
C, IV = MyEncrypt(m, key)
file = open(filepath, "wb")
file.write(C)
file.close()
return C, IV, key, ext
def MyEncryptMAC(message, EncKey, HMACKey):
#get ciphertext and IV
C, IV = MyEncrypt(message, EncKey)
#create HMAC object to make tag
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
tag = h.finalize()
return C, IV, tag
def MyFileEncryptMAC(filepath):
#create Keys
KeyLength = 32
HMACKey = os.urandom(KEY_LENGTH)
EncKey = os.urandom(KEY_LENGTH)
if len(EncKey) < KeyLength:
raise Exception("EncKey less than 32 bytes!")
if len(HMACKey) < KeyLength:
|
#open and read file to encrypt
file = open(filepath, "rb")
m = file.read()
file.close()
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
#encrypt & MAC
C, IV, tag = MyEncryptMAC(m, EncKey, HMACKey)
'''Not used for RSA
#storing values
encData = {"RSACipher": RSACipher.decode('cp437'),"C": C.decode('cp437'), "IV": IV.decode('cp437'), "ext": ext, "tag": tag.decode('cp437')}
#create and write to json
filenameJSON = filename + ".json"
#write json data to file
with open(filenameJSON, "w") as outfile:
json.dump(encData, outfile)
outfile.close()
#delete original file
os.remove(filepath)
'''
return C, IV, tag, EncKey, HMACKey, ext
def CheckRSAKeys(): # check if pem file exist
publicExists = os.path.isfile(RSA_PUBLIC_KEY_PATH)
privateExists = os.path.isfile(RSA_PRIVATE_KEY_PATH)
if not publicExists or not privateExists:
#generate and store private key
privateKey = rsa.generate_private_key(
public_exponent = PUBLIC_EXPONENT,
key_size = RSA_KEY_SIZE,
backend=default_backend()
)
privatepem = privateKey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
with open(RSA_PRIVATE_KEY_PATH, "wb") as privateKeyFile:
privateKeyFile.write(privatepem)
#generate and store public key
publicKey = privateKey.public_key()
publicpem = publicKey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with open(RSA_PUBLIC_KEY_PATH, "wb") as publicKeyFile:
publicKeyFile.write(publicpem)
# RSA Encrypt using AES CBC 256 Encryption with HMAC
def MyRSAEncrypt(filepath, RSA_Publickey_filepath):
(C, IV, tag, EncKey, HMACKey, ext) = MyFileEncryptMAC(filepath)
key = EncKey + HMACKey
with open(RSA_Publickey_filepath, 'rb') as key_file:
public_key = serialization.load_pem_public_key(
key_file.read(),
backend = default_backend()
)
RSACipher = public_key.encrypt(
key,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
key_file.close()
return (RSACipher, C, IV, tag, ext)
# AES requires plain text and ciphertext to be a multiple of 16
# We pad it so that the message is a multiple of the IV, 16
def addPadding(encoded):
# We pad it with 128 bits or 16 bytes
padder = padding.PKCS7(CONST_KEY_BYTES.CONST_PADDING_BITS).padder()
# update() pads the encoded message
padded_encoded = padder.update(encoded)
# .finalize () Returns the remainder of the data.
padded_encoded += padder.finalize()
return padded_encoded
def DirectoryEncrypt(directory):
try:
key = CheckRSAKeys()
except:
print("Error: keys has issue")
return
try:
for root, dirs, filres in os.walk(directory):
for file in filres:
try:
RSACipher, C, IV, tag, ext = MyRSAEncrypt(os.path.join(root,file),key[0])
except:
print("Error: MyRSAEncrypt failed")
return
#create JSon file
try:
data= {'encrypted': [{'RSACipher':RSACipher, 'C': C, 'IV': IV, 'tag': tag, 'ext': ext}]}
except:
print("Error: Not able to create Json file")
return
try:
encryptedFilepath = os.path.splittext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w')as jsonfile:
json.dump(data, jsonfile, indent=3)
except:
print("Error:Json file didnt create")
return
for file in dirs:
try:
RSACipher,C, IV, tag = MyRSAEncrypt(os.ath.json(root,file),key[0])
except:
("Error: MyRSAEncryptfailes:")
return
#create JSON file
try:
data = {'encrypted': [{'RSACipher':RSACipher,'C':C,'IV':IV,'tag':tag,'ext':ext}]}
except:
print("Error: Json file didnt create")
return
try:
encryptedFilepath = os.path.splitext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w') as jsonFile:
json.dump(data,jsonFile, indent=3)
except:
print("Error: Unable to create JSON file.")
return
except:
print("Directory doent excist")
return
def MyDecrypt(C, IV, key):
#make cipher
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
#make decryptor
decryptor = cipher.decryptor()
#decrypt ciphertext
plaintext_padded = decryptor.update(C) + decryptor.finalize()
#unpad message
unpadder = padding.PKCS7(128).unpadder()
plaintext = unpadder.update(plaintext_padded) + unpadder.finalize()
return plaintext
def MyFileDecrypt(filepath, IV, key, ext):
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
file = open(filepath, "rb")
C = file.read()
file.close()
message = MyDecrypt(C, IV, key)
writefile = open(filepath, "wb")
writefile.write(message)
writefile.close()
return message, IV, key
def MyDecryptMAC(C, IV, tag, HMACKey, EncKey):
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
verifyTag = h.finalize()
if verifyTag != tag:
raise Exception("Tags do not align")
message = MyDecrypt(C, IV, EncKey)
return message
def MyFileDecryptMAC(originalfilepath, HMACKey):
#getting file name and extension
filename_ext = os.path.basename(originalfilepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
jsonFile = filename + ".json"
#open file to decrypt
with open(jsonFile) as decryptFile:
data = json.load(decryptFile)
decryptFile.close()
#getting data from dictionary
C = (data['C']).encode('cp437')
IV = (data['IV']).encode('cp
|
raise Exception("HMACKey less than 32 bytes!")
|
conditional_block
|
skim.rs
|
BONUS_MATCHED: i64 = 4;
const BONUS_CASE_MATCH: i64 = 4;
const BONUS_UPPER_MATCH: i64 = 6;
const BONUS_ADJACENCY: i64 = 10;
const BONUS_SEPARATOR: i64 = 8;
const BONUS_CAMEL: i64 = 8;
const PENALTY_CASE_UNMATCHED: i64 = -1;
const PENALTY_LEADING: i64 = -6; // penalty applied for every letter before the first match
const PENALTY_MAX_LEADING: i64 = -18; // maxing penalty for leading letters
const PENALTY_UNMATCHED: i64 = -2;
pub fn fuzzy_match(choice: &str, pattern: &str) -> Option<i64>
|
pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> {
if pattern.is_empty() {
return Some((0, Vec::new()));
}
let mut picked = vec![];
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (mut next_col, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
let mut pat_idx = scores.len() as i64 - 1;
while pat_idx >= 0 {
let status = scores[pat_idx as usize][next_col];
next_col = status.back_ref;
picked.push(status.idx);
pat_idx -= 1;
}
picked.reverse();
Some((final_score, picked))
}
#[derive(Clone, Copy, Debug)]
struct MatchingStatus {
pub idx: usize,
pub score: i64,
pub final_score: i64,
pub adj_num: usize,
pub back_ref: usize,
}
impl Default for MatchingStatus {
fn default() -> Self {
MatchingStatus {
idx: 0,
score: 0,
final_score: 0,
adj_num: 1,
back_ref: 0,
}
}
}
fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> {
let mut scores = vec![];
let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern
let mut pat_prev_ch = '\0';
// initialize the match positions and inline scores
for (pat_idx, pat_ch) in pattern.chars().enumerate() {
let mut vec = vec![];
let mut choice_prev_ch = '\0';
for (idx, ch) in choice.chars().enumerate() {
if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx {
let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch);
vec.push(MatchingStatus {
idx,
score,
final_score: score,
adj_num: 1,
back_ref: 0,
});
}
choice_prev_ch = ch;
}
if vec.is_empty() {
// not matched
return None;
}
match_start_idx = vec[0].idx + 1;
scores.push(vec);
pat_prev_ch = pat_ch;
}
// calculate max scores considering adjacent characters
for pat_idx in 1..scores.len() {
let (first_half, last_half) = scores.split_at_mut(pat_idx);
let prev_row = &first_half[first_half.len() - 1];
let cur_row = &mut last_half[0];
for idx in 0..cur_row.len() {
let next = cur_row[idx];
let prev = if idx > 0 {
cur_row[idx - 1]
} else {
MatchingStatus::default()
};
let mut score_before_idx = prev.final_score - prev.score + next.score;
score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64);
score_before_idx -= if prev.adj_num == 0 {
BONUS_ADJACENCY
} else {
0
};
let (back_ref, score, adj_num) = prev_row
.iter()
.enumerate()
.take_while(|&(_, &MatchingStatus { idx, .. })| idx < next.idx)
.skip_while(|&(_, &MatchingStatus { idx, .. })| idx < prev.idx)
.map(|(back_ref, cur)| {
let adj_num = next.idx - cur.idx - 1;
let mut final_score = cur.final_score + next.score;
final_score += if adj_num == 0 {
BONUS_ADJACENCY
} else {
PENALTY_UNMATCHED * adj_num as i64
};
(back_ref, final_score, adj_num)
})
.max_by_key(|&(_, x, _)| x)
.unwrap_or((prev.back_ref, score_before_idx, prev.adj_num));
cur_row[idx] = if idx > 0 && score < score_before_idx {
MatchingStatus {
final_score: score_before_idx,
back_ref: prev.back_ref,
adj_num,
..next
}
} else {
MatchingStatus {
final_score: score,
back_ref,
adj_num,
..next
}
};
}
}
Some(scores)
}
// judge how many scores the current index should get
fn fuzzy_score(
choice_ch: char,
choice_idx: usize,
choice_prev_ch: char,
pat_ch: char,
pat_idx: usize,
_pat_prev_ch: char,
) -> i64 {
let mut score = BONUS_MATCHED;
let choice_prev_ch_type = char_type_of(choice_prev_ch);
let choice_role = char_role(choice_prev_ch, choice_ch);
if pat_ch == choice_ch {
if pat_ch.is_uppercase() {
score += BONUS_UPPER_MATCH;
} else {
score += BONUS_CASE_MATCH;
}
} else {
score += PENALTY_CASE_UNMATCHED;
}
// apply bonus for camelCases
if choice_role == CharRole::Head {
score += BONUS_CAMEL;
}
// apply bonus for matches after a separator
if choice_prev_ch_type == CharType::Separ {
score += BONUS_SEPARATOR;
}
if pat_idx == 0 {
score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING);
}
score
}
#[cfg(test)]
mod tests {
use super::*;
fn wrap_matches(line: &str, indices: &[usize]) -> String {
let mut ret = String::new();
let mut peekable = indices.iter().peekable();
for (idx, ch) in line.chars().enumerate() {
let next_id = **peekable.peek().unwrap_or(&&line.len());
if next_id == idx {
ret.push_str(format!("[{}]", ch).as_str());
peekable.next();
} else {
ret.push(ch);
}
}
ret
}
fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> {
let mut lines_with_score: Vec<(i64, &'static str)> = lines
.into_iter()
.map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s))
.collect();
lines_with_score.sort_by_key(|(score, _)| -score);
lines_with_score
.into_iter()
.map(|(_, string)| string)
.collect()
}
fn wrap_fuzzy_match(line: &str, pattern: &str) -> Option<String> {
let (_score, indices) = fuzzy_indices(line, pattern)?;
Some(wrap_matches(line, &indices))
}
fn assert_order(pattern: &str, choices: &[&'static str]) {
let result = filter_and_sort(pattern, choices);
if result != choices {
// debug print
println!("pattern: {}", pattern);
for &choice in choices.iter() {
if let Some((score, indices)) = fuzzy_indices(choice, pattern) {
println!("{}: {:?}", score, wrap_matches(choice, &indices));
} else {
println!("NO MATCH for {}", choice);
}
}
}
assert_eq!(result, choices);
}
#[test]
fn test_match_or_not() {
assert_eq!(Some(0), fuzzy_match("", ""));
assert_eq!(Some(0), fuzzy_match("abcdefaghi", ""));
assert_eq!(None, fuzzy_match("", "a"));
assert_eq!(None, fuzzy_match("abcdefaghi", "
|
{
if pattern.is_empty() {
return Some(0);
}
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (_, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
Some(final_score)
}
|
identifier_body
|
skim.rs
|
(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
Some(final_score)
}
pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> {
if pattern.is_empty() {
return Some((0, Vec::new()));
}
let mut picked = vec![];
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (mut next_col, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
let mut pat_idx = scores.len() as i64 - 1;
while pat_idx >= 0 {
let status = scores[pat_idx as usize][next_col];
next_col = status.back_ref;
picked.push(status.idx);
pat_idx -= 1;
}
picked.reverse();
Some((final_score, picked))
}
#[derive(Clone, Copy, Debug)]
struct MatchingStatus {
pub idx: usize,
pub score: i64,
pub final_score: i64,
pub adj_num: usize,
pub back_ref: usize,
}
impl Default for MatchingStatus {
fn default() -> Self {
MatchingStatus {
idx: 0,
score: 0,
final_score: 0,
adj_num: 1,
back_ref: 0,
}
}
}
fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> {
let mut scores = vec![];
let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern
let mut pat_prev_ch = '\0';
// initialize the match positions and inline scores
for (pat_idx, pat_ch) in pattern.chars().enumerate() {
let mut vec = vec![];
let mut choice_prev_ch = '\0';
for (idx, ch) in choice.chars().enumerate() {
if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx {
let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch);
vec.push(MatchingStatus {
idx,
score,
final_score: score,
adj_num: 1,
back_ref: 0,
});
}
choice_prev_ch = ch;
}
if vec.is_empty() {
// not matched
return None;
}
match_start_idx = vec[0].idx + 1;
scores.push(vec);
pat_prev_ch = pat_ch;
}
// calculate max scores considering adjacent characters
for pat_idx in 1..scores.len() {
let (first_half, last_half) = scores.split_at_mut(pat_idx);
let prev_row = &first_half[first_half.len() - 1];
let cur_row = &mut last_half[0];
for idx in 0..cur_row.len() {
let next = cur_row[idx];
let prev = if idx > 0 {
cur_row[idx - 1]
} else {
MatchingStatus::default()
};
let mut score_before_idx = prev.final_score - prev.score + next.score;
score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64);
score_before_idx -= if prev.adj_num == 0 {
BONUS_ADJACENCY
} else {
0
};
let (back_ref, score, adj_num) = prev_row
.iter()
.enumerate()
.take_while(|&(_, &MatchingStatus { idx, .. })| idx < next.idx)
.skip_while(|&(_, &MatchingStatus { idx, .. })| idx < prev.idx)
.map(|(back_ref, cur)| {
let adj_num = next.idx - cur.idx - 1;
let mut final_score = cur.final_score + next.score;
final_score += if adj_num == 0 {
BONUS_ADJACENCY
} else {
PENALTY_UNMATCHED * adj_num as i64
};
(back_ref, final_score, adj_num)
})
.max_by_key(|&(_, x, _)| x)
.unwrap_or((prev.back_ref, score_before_idx, prev.adj_num));
cur_row[idx] = if idx > 0 && score < score_before_idx {
MatchingStatus {
final_score: score_before_idx,
back_ref: prev.back_ref,
adj_num,
..next
}
} else {
MatchingStatus {
final_score: score,
back_ref,
adj_num,
..next
}
};
}
}
Some(scores)
}
// judge how many scores the current index should get
fn fuzzy_score(
choice_ch: char,
choice_idx: usize,
choice_prev_ch: char,
pat_ch: char,
pat_idx: usize,
_pat_prev_ch: char,
) -> i64 {
let mut score = BONUS_MATCHED;
let choice_prev_ch_type = char_type_of(choice_prev_ch);
let choice_role = char_role(choice_prev_ch, choice_ch);
if pat_ch == choice_ch {
if pat_ch.is_uppercase() {
score += BONUS_UPPER_MATCH;
} else {
score += BONUS_CASE_MATCH;
}
} else {
score += PENALTY_CASE_UNMATCHED;
}
// apply bonus for camelCases
if choice_role == CharRole::Head {
score += BONUS_CAMEL;
}
// apply bonus for matches after a separator
if choice_prev_ch_type == CharType::Separ {
score += BONUS_SEPARATOR;
}
if pat_idx == 0 {
score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING);
}
score
}
#[cfg(test)]
mod tests {
use super::*;
fn wrap_matches(line: &str, indices: &[usize]) -> String {
let mut ret = String::new();
let mut peekable = indices.iter().peekable();
for (idx, ch) in line.chars().enumerate() {
let next_id = **peekable.peek().unwrap_or(&&line.len());
if next_id == idx {
ret.push_str(format!("[{}]", ch).as_str());
peekable.next();
} else {
ret.push(ch);
}
}
ret
}
fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> {
let mut lines_with_score: Vec<(i64, &'static str)> = lines
.into_iter()
.map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s))
.collect();
lines_with_score.sort_by_key(|(score, _)| -score);
lines_with_score
.into_iter()
.map(|(_, string)| string)
.collect()
}
fn wrap_fuzzy_match(line: &str, pattern: &str) -> Option<String> {
let (_score, indices) = fuzzy_indices(line, pattern)?;
Some(wrap_matches(line, &indices))
}
fn assert_order(pattern: &str, choices: &[&'static str]) {
let result = filter_and_sort(pattern, choices);
if result != choices {
// debug print
println!("pattern: {}", pattern);
for &choice in choices.iter() {
if let Some((score, indices)) = fuzzy_indices(choice, pattern) {
println!("{}: {:?}", score, wrap_matches(choice, &indices));
} else {
println!("NO MATCH for {}", choice);
}
}
}
assert_eq!(result, choices);
}
#[test]
fn test_match_or_not() {
assert_eq!(Some(0), fuzzy_match("", ""));
assert_eq!(Some(0), fuzzy_match("abcdefaghi", ""));
assert_eq!(None, fuzzy_match("", "a"));
assert_eq!(None, fuzzy_match("abcdefaghi", "中"));
assert_eq!(None, fuzzy_match("abc", "abx"));
assert!(fuzzy_match("axbycz", "abc").is_some());
assert!(fuzzy_match("axbycz", "xyz").is_some());
assert_eq!("[a]x[b]y[c]z", &wrap_fuzzy_match("axbycz", "abc").unwrap());
assert_eq!("a[x]b[y]c[z]", &wrap_fuzzy_match("axbycz", "xyz").unwrap());
assert_eq!(
"[H]ello, [世]界",
&wrap_fuzzy_match("Hello, 世界", "H世").unwrap()
);
}
#[test]
fn test_match_quality() {
// case
// assert_order("monad", &["monad", "Monad", "mONAD"]);
// initials
assert_order("ab", &["ab", "aoo_boo", "acb"]);
assert_order("CC", &["CamelCase", "camelCase", "camelcase"]);
assert_order("cC", &["camelCase", "CamelCase", "camelcase"]);
assert_order(
"cc",
|
random_line_split
|
||
skim.rs
|
BONUS_MATCHED: i64 = 4;
const BONUS_CASE_MATCH: i64 = 4;
const BONUS_UPPER_MATCH: i64 = 6;
const BONUS_ADJACENCY: i64 = 10;
const BONUS_SEPARATOR: i64 = 8;
const BONUS_CAMEL: i64 = 8;
const PENALTY_CASE_UNMATCHED: i64 = -1;
const PENALTY_LEADING: i64 = -6; // penalty applied for every letter before the first match
const PENALTY_MAX_LEADING: i64 = -18; // maxing penalty for leading letters
const PENALTY_UNMATCHED: i64 = -2;
pub fn fuzzy_match(choice: &str, pattern: &str) -> Option<i64> {
if pattern.is_empty() {
return Some(0);
}
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (_, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
Some(final_score)
}
pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> {
if pattern.is_empty() {
return Some((0, Vec::new()));
}
let mut picked = vec![];
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (mut next_col, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
let mut pat_idx = scores.len() as i64 - 1;
while pat_idx >= 0 {
let status = scores[pat_idx as usize][next_col];
next_col = status.back_ref;
picked.push(status.idx);
pat_idx -= 1;
}
picked.reverse();
Some((final_score, picked))
}
#[derive(Clone, Copy, Debug)]
struct MatchingStatus {
pub idx: usize,
pub score: i64,
pub final_score: i64,
pub adj_num: usize,
pub back_ref: usize,
}
impl Default for MatchingStatus {
fn default() -> Self {
MatchingStatus {
idx: 0,
score: 0,
final_score: 0,
adj_num: 1,
back_ref: 0,
}
}
}
fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> {
let mut scores = vec![];
let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern
let mut pat_prev_ch = '\0';
// initialize the match positions and inline scores
for (pat_idx, pat_ch) in pattern.chars().enumerate() {
let mut vec = vec![];
let mut choice_prev_ch = '\0';
for (idx, ch) in choice.chars().enumerate() {
if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx {
let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch);
vec.push(MatchingStatus {
idx,
score,
final_score: score,
adj_num: 1,
back_ref: 0,
});
}
choice_prev_ch = ch;
}
if vec.is_empty() {
// not matched
return None;
}
match_start_idx = vec[0].idx + 1;
scores.push(vec);
pat_prev_ch = pat_ch;
}
// calculate max scores considering adjacent characters
for pat_idx in 1..scores.len() {
let (first_half, last_half) = scores.split_at_mut(pat_idx);
let prev_row = &first_half[first_half.len() - 1];
let cur_row = &mut last_half[0];
for idx in 0..cur_row.len() {
let next = cur_row[idx];
let prev = if idx > 0 {
cur_row[idx - 1]
} else {
MatchingStatus::default()
};
let mut score_before_idx = prev.final_score - prev.score + next.score;
score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64);
score_before_idx -= if prev.adj_num == 0 {
BONUS_ADJACENCY
} else {
0
};
let (back_ref, score, adj_num) = prev_row
.iter()
.enumerate()
.take_while(|&(_, &MatchingStatus { idx, .. })| idx < next.idx)
.skip_while(|&(_, &MatchingStatus { idx, .. })| idx < prev.idx)
.map(|(back_ref, cur)| {
let adj_num = next.idx - cur.idx - 1;
let mut final_score = cur.final_score + next.score;
final_score += if adj_num == 0 {
BONUS_ADJACENCY
} else {
PENALTY_UNMATCHED * adj_num as i64
};
(back_ref, final_score, adj_num)
})
.max_by_key(|&(_, x, _)| x)
.unwrap_or((prev.back_ref, score_before_idx, prev.adj_num));
cur_row[idx] = if idx > 0 && score < score_before_idx {
MatchingStatus {
final_score: score_before_idx,
back_ref: prev.back_ref,
adj_num,
..next
}
} else {
MatchingStatus {
final_score: score,
back_ref,
adj_num,
..next
}
};
}
}
Some(scores)
}
// judge how many scores the current index should get
fn fuzzy_score(
choice_ch: char,
choice_idx: usize,
choice_prev_ch: char,
pat_ch: char,
pat_idx: usize,
_pat_prev_ch: char,
) -> i64 {
let mut score = BONUS_MATCHED;
let choice_prev_ch_type = char_type_of(choice_prev_ch);
let choice_role = char_role(choice_prev_ch, choice_ch);
if pat_ch == choice_ch
|
else {
score += PENALTY_CASE_UNMATCHED;
}
// apply bonus for camelCases
if choice_role == CharRole::Head {
score += BONUS_CAMEL;
}
// apply bonus for matches after a separator
if choice_prev_ch_type == CharType::Separ {
score += BONUS_SEPARATOR;
}
if pat_idx == 0 {
score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING);
}
score
}
#[cfg(test)]
mod tests {
use super::*;
fn wrap_matches(line: &str, indices: &[usize]) -> String {
let mut ret = String::new();
let mut peekable = indices.iter().peekable();
for (idx, ch) in line.chars().enumerate() {
let next_id = **peekable.peek().unwrap_or(&&line.len());
if next_id == idx {
ret.push_str(format!("[{}]", ch).as_str());
peekable.next();
} else {
ret.push(ch);
}
}
ret
}
fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> {
let mut lines_with_score: Vec<(i64, &'static str)> = lines
.into_iter()
.map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s))
.collect();
lines_with_score.sort_by_key(|(score, _)| -score);
lines_with_score
.into_iter()
.map(|(_, string)| string)
.collect()
}
fn wrap_fuzzy_match(line: &str, pattern: &str) -> Option<String> {
let (_score, indices) = fuzzy_indices(line, pattern)?;
Some(wrap_matches(line, &indices))
}
fn assert_order(pattern: &str, choices: &[&'static str]) {
let result = filter_and_sort(pattern, choices);
if result != choices {
// debug print
println!("pattern: {}", pattern);
for &choice in choices.iter() {
if let Some((score, indices)) = fuzzy_indices(choice, pattern) {
println!("{}: {:?}", score, wrap_matches(choice, &indices));
} else {
println!("NO MATCH for {}", choice);
}
}
}
assert_eq!(result, choices);
}
#[test]
fn test_match_or_not() {
assert_eq!(Some(0), fuzzy_match("", ""));
assert_eq!(Some(0), fuzzy_match("abcdefaghi", ""));
assert_eq!(None, fuzzy_match("", "a"));
assert_eq!(None, fuzzy_match("abcdefaghi", "中
|
{
if pat_ch.is_uppercase() {
score += BONUS_UPPER_MATCH;
} else {
score += BONUS_CASE_MATCH;
}
}
|
conditional_block
|
skim.rs
|
BONUS_MATCHED: i64 = 4;
const BONUS_CASE_MATCH: i64 = 4;
const BONUS_UPPER_MATCH: i64 = 6;
const BONUS_ADJACENCY: i64 = 10;
const BONUS_SEPARATOR: i64 = 8;
const BONUS_CAMEL: i64 = 8;
const PENALTY_CASE_UNMATCHED: i64 = -1;
const PENALTY_LEADING: i64 = -6; // penalty applied for every letter before the first match
const PENALTY_MAX_LEADING: i64 = -18; // maxing penalty for leading letters
const PENALTY_UNMATCHED: i64 = -2;
pub fn fuzzy_match(choice: &str, pattern: &str) -> Option<i64> {
if pattern.is_empty() {
return Some(0);
}
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (_, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
Some(final_score)
}
pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> {
if pattern.is_empty() {
return Some((0, Vec::new()));
}
let mut picked = vec![];
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (mut next_col, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
let mut pat_idx = scores.len() as i64 - 1;
while pat_idx >= 0 {
let status = scores[pat_idx as usize][next_col];
next_col = status.back_ref;
picked.push(status.idx);
pat_idx -= 1;
}
picked.reverse();
Some((final_score, picked))
}
#[derive(Clone, Copy, Debug)]
struct MatchingStatus {
pub idx: usize,
pub score: i64,
pub final_score: i64,
pub adj_num: usize,
pub back_ref: usize,
}
impl Default for MatchingStatus {
fn default() -> Self {
MatchingStatus {
idx: 0,
score: 0,
final_score: 0,
adj_num: 1,
back_ref: 0,
}
}
}
fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> {
let mut scores = vec![];
let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern
let mut pat_prev_ch = '\0';
// initialize the match positions and inline scores
for (pat_idx, pat_ch) in pattern.chars().enumerate() {
let mut vec = vec![];
let mut choice_prev_ch = '\0';
for (idx, ch) in choice.chars().enumerate() {
if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx {
let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch);
vec.push(MatchingStatus {
idx,
score,
final_score: score,
adj_num: 1,
back_ref: 0,
});
}
choice_prev_ch = ch;
}
if vec.is_empty() {
// not matched
return None;
}
match_start_idx = vec[0].idx + 1;
scores.push(vec);
pat_prev_ch = pat_ch;
}
// calculate max scores considering adjacent characters
for pat_idx in 1..scores.len() {
let (first_half, last_half) = scores.split_at_mut(pat_idx);
let prev_row = &first_half[first_half.len() - 1];
let cur_row = &mut last_half[0];
for idx in 0..cur_row.len() {
let next = cur_row[idx];
let prev = if idx > 0 {
cur_row[idx - 1]
} else {
MatchingStatus::default()
};
let mut score_before_idx = prev.final_score - prev.score + next.score;
score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64);
score_before_idx -= if prev.adj_num == 0 {
BONUS_ADJACENCY
} else {
0
};
let (back_ref, score, adj_num) = prev_row
.iter()
.enumerate()
.take_while(|&(_, &MatchingStatus { idx, .. })| idx < next.idx)
.skip_while(|&(_, &MatchingStatus { idx, .. })| idx < prev.idx)
.map(|(back_ref, cur)| {
let adj_num = next.idx - cur.idx - 1;
let mut final_score = cur.final_score + next.score;
final_score += if adj_num == 0 {
BONUS_ADJACENCY
} else {
PENALTY_UNMATCHED * adj_num as i64
};
(back_ref, final_score, adj_num)
})
.max_by_key(|&(_, x, _)| x)
.unwrap_or((prev.back_ref, score_before_idx, prev.adj_num));
cur_row[idx] = if idx > 0 && score < score_before_idx {
MatchingStatus {
final_score: score_before_idx,
back_ref: prev.back_ref,
adj_num,
..next
}
} else {
MatchingStatus {
final_score: score,
back_ref,
adj_num,
..next
}
};
}
}
Some(scores)
}
// judge how many scores the current index should get
fn fuzzy_score(
choice_ch: char,
choice_idx: usize,
choice_prev_ch: char,
pat_ch: char,
pat_idx: usize,
_pat_prev_ch: char,
) -> i64 {
let mut score = BONUS_MATCHED;
let choice_prev_ch_type = char_type_of(choice_prev_ch);
let choice_role = char_role(choice_prev_ch, choice_ch);
if pat_ch == choice_ch {
if pat_ch.is_uppercase() {
score += BONUS_UPPER_MATCH;
} else {
score += BONUS_CASE_MATCH;
}
} else {
score += PENALTY_CASE_UNMATCHED;
}
// apply bonus for camelCases
if choice_role == CharRole::Head {
score += BONUS_CAMEL;
}
// apply bonus for matches after a separator
if choice_prev_ch_type == CharType::Separ {
score += BONUS_SEPARATOR;
}
if pat_idx == 0 {
score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING);
}
score
}
#[cfg(test)]
mod tests {
use super::*;
fn wrap_matches(line: &str, indices: &[usize]) -> String {
let mut ret = String::new();
let mut peekable = indices.iter().peekable();
for (idx, ch) in line.chars().enumerate() {
let next_id = **peekable.peek().unwrap_or(&&line.len());
if next_id == idx {
ret.push_str(format!("[{}]", ch).as_str());
peekable.next();
} else {
ret.push(ch);
}
}
ret
}
fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> {
let mut lines_with_score: Vec<(i64, &'static str)> = lines
.into_iter()
.map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s))
.collect();
lines_with_score.sort_by_key(|(score, _)| -score);
lines_with_score
.into_iter()
.map(|(_, string)| string)
.collect()
}
fn
|
(line: &str, pattern: &str) -> Option<String> {
let (_score, indices) = fuzzy_indices(line, pattern)?;
Some(wrap_matches(line, &indices))
}
fn assert_order(pattern: &str, choices: &[&'static str]) {
let result = filter_and_sort(pattern, choices);
if result != choices {
// debug print
println!("pattern: {}", pattern);
for &choice in choices.iter() {
if let Some((score, indices)) = fuzzy_indices(choice, pattern) {
println!("{}: {:?}", score, wrap_matches(choice, &indices));
} else {
println!("NO MATCH for {}", choice);
}
}
}
assert_eq!(result, choices);
}
#[test]
fn test_match_or_not() {
assert_eq!(Some(0), fuzzy_match("", ""));
assert_eq!(Some(0), fuzzy_match("abcdefaghi", ""));
assert_eq!(None, fuzzy_match("", "a"));
assert_eq!(None, fuzzy_match("abcdefaghi", "中
|
wrap_fuzzy_match
|
identifier_name
|
ex1a_sim_vanillaSEIR_model_old.py
|
computed?
contact_rate = 10 # number of contacts an individual has per day
E0 = (contact_rate - 1)*I0 # Estimated exposed based on contact rate and inital infected
# Derived Model parameters and
beta = r0 / gamma_inv
sigma = 1.0 / sigma_inv
gamma = 1.0 / gamma_inv
tau_q = 1.0 /tau_q_inv
# Control variable: percentage quarantined
q = 0.01
print('***** Hyper-parameters *****')
print('N=',N,'days=',days, 'r0=',r0, 'gamma_inv (days) = ',gamma_inv)
print('***** Model-parameters *****')
print('beta=',beta,'gamma=', gamma, 'sigma', sigma)
######################################
######## Simulation Functions ########
######################################
# Equation to estimate final epidemic size (infected)
def
|
(x):
return np.log(x) + r0_test*(1-x)
###################################################
######## SEIR Model simulation Simulation ########
###################################################
if sim_num == 0:
''' Compartment structure of armed forces SEIR model
N = S + E + I + R
'''
# Initial conditions vector
S0 = N - E0 - I0 - R0
y0 = S0, E0, I0, R0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0)
# Simulation Options
seir_type = 0 # SEIR no deaths
solver_type = 1 # ivp - LSODA
else:
''' Compartment structure of armed forces SEIR model with deaths
N = S + E + I + R + D
'''
S0 = N - E0 - I0 - R0 - D0
y0 = S0, E0, I0, R0, D0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0, "D0", D0)
# Simulation Options
seir_type = 1 # SEIR with deaths
solver_type = 1 # ivp - LSODA
# Simulate ODE equations
SEIRparams = N, beta, gamma, sigma
sol_ode_timeseries = simulate_seirModel(seir_type, SEIRparams, solver_type, y0, N, days, 1)
# Unpack timseries
if sim_num == 0:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
R = sol_ode_timeseries[4]
else:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
Re = sol_ode_timeseries[4]
D = sol_ode_timeseries[5]
R = Re + D
# Accumulated Total Cases
T = I + R
print("t=", t[-1])
print("ST=", S[-1])
print("ET=", E[-1])
print("IT=", I[-1])
print("RT=", R[-1])
print("TT=", T[-1])
if sim_num > 0:
print("DT=", D[-1])
print("ReT=",Re[-1])
# Estimated Final epidemic size (analytic) not-dependent on simulation
init_guess = 0.0001
r0_test = r0
SinfN = fsolve(epi_size, init_guess)
One_SinfN = 1 - SinfN
print('***** Final Epidemic Size *****')
print('r0 = ', r0_test, '1 - Sinf/S0 = ', One_SinfN[0])
print('***** Results *****')
peak_inf_idx = np.argmax(I)
peak_inf = I[peak_inf_idx]
print('Peak Instant. Infected = ', peak_inf,'by day=', peak_inf_idx)
peak_total_inf = T[peak_inf_idx]
print('Total Cases when Peak = ', peak_total_inf,'by day=', peak_inf_idx)
#####################################################################
######## Plots Simulation with point estimates of parameters ########
#####################################################################
# Plot the data on three separate curves for S(t), I(t) and R(t)
fig, ax1 = plt.subplots()
if sim_num > 0:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f}, m={m:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv, m=m),fontsize=15)
else:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv),fontsize=15)
# Variable evolution
ax1.plot(t, S/N, 'k', lw=2, label='Susceptible')
ax1.plot(t, E/N, 'm', lw=2, label='Exposed')
ax1.plot(t, I/N, 'r', lw=2, label='Infected')
ax1.plot(t, T/N, 'y', lw=2, label='Total Cases')
if sim_num == 0:
ax1.plot(t, R/N, 'g--', lw=1, label='Recovered')
else:
ax1.plot(t, Re/N, 'g--', lw=1, label='Recovered')
ax1.plot(t, D/N, 'b--', lw=1, label='Dead')
# Plot Final Epidemic Size
ax1.plot(t, One_SinfN*np.ones(len(t)), 'm--')
txt1 = "{per:2.2f} infected"
ax1.text(t[0], One_SinfN - 0.05, txt1.format(per=One_SinfN[0]), fontsize=12, color='m')
# Plot peak points
ax1.plot(peak_inf_idx, peak_inf/N,'ro', markersize=8)
ax1.plot(peak_inf_idx, peak_total_inf/N,'ro', markersize=8)
if sim_num < 2:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f}"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f}"
else:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f} from March 21"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f} from March 21"
ax1.text(peak_inf_idx+10, peak_inf/N, txt_title.format(peak_inf=peak_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
ax1.text(peak_inf_idx+10, peak_total_inf/N, txt_title2.format(peak_total=peak_total_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
# Making things beautiful
ax1.set_xlabel('Time /days', fontsize=12)
ax1.set_ylabel('Percentage of Population', fontsize=12)
ax1.yaxis.set_tick_params(length=0)
ax1.xaxis.set_tick_params(length=0)
ax1.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax1.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax1.spines[spine].set_visible(True)
fig.subplots_adjust(left=.12, bottom=.14, right=.93, top=0.93)
fig.set_size_inches(20.5/2, 14.5/2, forward=True)
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.png'%sim_num, bbox_inches='tight')
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.pdf'%sim_num, bbox_inches='tight')
#################################################################
######## Plots Simulation with reproductive/growth rates ########
#################################################################
do_growth = 1
if
|
epi_size
|
identifier_name
|
ex1a_sim_vanillaSEIR_model_old.py
|
they computed?
contact_rate = 10 # number of contacts an individual has per day
E0 = (contact_rate - 1)*I0 # Estimated exposed based on contact rate and inital infected
# Derived Model parameters and
beta = r0 / gamma_inv
sigma = 1.0 / sigma_inv
gamma = 1.0 / gamma_inv
tau_q = 1.0 /tau_q_inv
# Control variable: percentage quarantined
q = 0.01
print('***** Hyper-parameters *****')
print('N=',N,'days=',days, 'r0=',r0, 'gamma_inv (days) = ',gamma_inv)
print('***** Model-parameters *****')
print('beta=',beta,'gamma=', gamma, 'sigma', sigma)
######################################
######## Simulation Functions ########
######################################
# Equation to estimate final epidemic size (infected)
def epi_size(x):
return np.log(x) + r0_test*(1-x)
###################################################
######## SEIR Model simulation Simulation ########
###################################################
if sim_num == 0:
''' Compartment structure of armed forces SEIR model
N = S + E + I + R
'''
# Initial conditions vector
S0 = N - E0 - I0 - R0
y0 = S0, E0, I0, R0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0)
# Simulation Options
seir_type = 0 # SEIR no deaths
solver_type = 1 # ivp - LSODA
else:
''' Compartment structure of armed forces SEIR model with deaths
N = S + E + I + R + D
'''
S0 = N - E0 - I0 - R0 - D0
y0 = S0, E0, I0, R0, D0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0, "D0", D0)
# Simulation Options
seir_type = 1 # SEIR with deaths
solver_type = 1 # ivp - LSODA
# Simulate ODE equations
|
# Unpack timseries
if sim_num == 0:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
R = sol_ode_timeseries[4]
else:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
Re = sol_ode_timeseries[4]
D = sol_ode_timeseries[5]
R = Re + D
# Accumulated Total Cases
T = I + R
print("t=", t[-1])
print("ST=", S[-1])
print("ET=", E[-1])
print("IT=", I[-1])
print("RT=", R[-1])
print("TT=", T[-1])
if sim_num > 0:
print("DT=", D[-1])
print("ReT=",Re[-1])
# Estimated Final epidemic size (analytic) not-dependent on simulation
init_guess = 0.0001
r0_test = r0
SinfN = fsolve(epi_size, init_guess)
One_SinfN = 1 - SinfN
print('***** Final Epidemic Size *****')
print('r0 = ', r0_test, '1 - Sinf/S0 = ', One_SinfN[0])
print('***** Results *****')
peak_inf_idx = np.argmax(I)
peak_inf = I[peak_inf_idx]
print('Peak Instant. Infected = ', peak_inf,'by day=', peak_inf_idx)
peak_total_inf = T[peak_inf_idx]
print('Total Cases when Peak = ', peak_total_inf,'by day=', peak_inf_idx)
#####################################################################
######## Plots Simulation with point estimates of parameters ########
#####################################################################
# Plot the data on three separate curves for S(t), I(t) and R(t)
fig, ax1 = plt.subplots()
if sim_num > 0:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f}, m={m:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv, m=m),fontsize=15)
else:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv),fontsize=15)
# Variable evolution
ax1.plot(t, S/N, 'k', lw=2, label='Susceptible')
ax1.plot(t, E/N, 'm', lw=2, label='Exposed')
ax1.plot(t, I/N, 'r', lw=2, label='Infected')
ax1.plot(t, T/N, 'y', lw=2, label='Total Cases')
if sim_num == 0:
ax1.plot(t, R/N, 'g--', lw=1, label='Recovered')
else:
ax1.plot(t, Re/N, 'g--', lw=1, label='Recovered')
ax1.plot(t, D/N, 'b--', lw=1, label='Dead')
# Plot Final Epidemic Size
ax1.plot(t, One_SinfN*np.ones(len(t)), 'm--')
txt1 = "{per:2.2f} infected"
ax1.text(t[0], One_SinfN - 0.05, txt1.format(per=One_SinfN[0]), fontsize=12, color='m')
# Plot peak points
ax1.plot(peak_inf_idx, peak_inf/N,'ro', markersize=8)
ax1.plot(peak_inf_idx, peak_total_inf/N,'ro', markersize=8)
if sim_num < 2:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f}"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f}"
else:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f} from March 21"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f} from March 21"
ax1.text(peak_inf_idx+10, peak_inf/N, txt_title.format(peak_inf=peak_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
ax1.text(peak_inf_idx+10, peak_total_inf/N, txt_title2.format(peak_total=peak_total_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
# Making things beautiful
ax1.set_xlabel('Time /days', fontsize=12)
ax1.set_ylabel('Percentage of Population', fontsize=12)
ax1.yaxis.set_tick_params(length=0)
ax1.xaxis.set_tick_params(length=0)
ax1.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax1.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax1.spines[spine].set_visible(True)
fig.subplots_adjust(left=.12, bottom=.14, right=.93, top=0.93)
fig.set_size_inches(20.5/2, 14.5/2, forward=True)
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.png'%sim_num, bbox_inches='tight')
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.pdf'%sim_num, bbox_inches='tight')
#################################################################
######## Plots Simulation with reproductive/growth rates ########
#################################################################
do_growth = 1
if do
|
SEIRparams = N, beta, gamma, sigma
sol_ode_timeseries = simulate_seirModel(seir_type, SEIRparams, solver_type, y0, N, days, 1)
|
random_line_split
|
ex1a_sim_vanillaSEIR_model_old.py
|
computed?
contact_rate = 10 # number of contacts an individual has per day
E0 = (contact_rate - 1)*I0 # Estimated exposed based on contact rate and inital infected
# Derived Model parameters and
beta = r0 / gamma_inv
sigma = 1.0 / sigma_inv
gamma = 1.0 / gamma_inv
tau_q = 1.0 /tau_q_inv
# Control variable: percentage quarantined
q = 0.01
print('***** Hyper-parameters *****')
print('N=',N,'days=',days, 'r0=',r0, 'gamma_inv (days) = ',gamma_inv)
print('***** Model-parameters *****')
print('beta=',beta,'gamma=', gamma, 'sigma', sigma)
######################################
######## Simulation Functions ########
######################################
# Equation to estimate final epidemic size (infected)
def epi_size(x):
return np.log(x) + r0_test*(1-x)
###################################################
######## SEIR Model simulation Simulation ########
###################################################
if sim_num == 0:
''' Compartment structure of armed forces SEIR model
N = S + E + I + R
'''
# Initial conditions vector
S0 = N - E0 - I0 - R0
y0 = S0, E0, I0, R0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0)
# Simulation Options
seir_type = 0 # SEIR no deaths
solver_type = 1 # ivp - LSODA
else:
''' Compartment structure of armed forces SEIR model with deaths
N = S + E + I + R + D
'''
S0 = N - E0 - I0 - R0 - D0
y0 = S0, E0, I0, R0, D0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0, "D0", D0)
# Simulation Options
seir_type = 1 # SEIR with deaths
solver_type = 1 # ivp - LSODA
# Simulate ODE equations
SEIRparams = N, beta, gamma, sigma
sol_ode_timeseries = simulate_seirModel(seir_type, SEIRparams, solver_type, y0, N, days, 1)
# Unpack timseries
if sim_num == 0:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
R = sol_ode_timeseries[4]
else:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
Re = sol_ode_timeseries[4]
D = sol_ode_timeseries[5]
R = Re + D
# Accumulated Total Cases
T = I + R
print("t=", t[-1])
print("ST=", S[-1])
print("ET=", E[-1])
print("IT=", I[-1])
print("RT=", R[-1])
print("TT=", T[-1])
if sim_num > 0:
print("DT=", D[-1])
print("ReT=",Re[-1])
# Estimated Final epidemic size (analytic) not-dependent on simulation
init_guess = 0.0001
r0_test = r0
SinfN = fsolve(epi_size, init_guess)
One_SinfN = 1 - SinfN
print('***** Final Epidemic Size *****')
print('r0 = ', r0_test, '1 - Sinf/S0 = ', One_SinfN[0])
print('***** Results *****')
peak_inf_idx = np.argmax(I)
peak_inf = I[peak_inf_idx]
print('Peak Instant. Infected = ', peak_inf,'by day=', peak_inf_idx)
peak_total_inf = T[peak_inf_idx]
print('Total Cases when Peak = ', peak_total_inf,'by day=', peak_inf_idx)
#####################################################################
######## Plots Simulation with point estimates of parameters ########
#####################################################################
# Plot the data on three separate curves for S(t), I(t) and R(t)
fig, ax1 = plt.subplots()
if sim_num > 0:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f}, m={m:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv, m=m),fontsize=15)
else:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv),fontsize=15)
# Variable evolution
ax1.plot(t, S/N, 'k', lw=2, label='Susceptible')
ax1.plot(t, E/N, 'm', lw=2, label='Exposed')
ax1.plot(t, I/N, 'r', lw=2, label='Infected')
ax1.plot(t, T/N, 'y', lw=2, label='Total Cases')
if sim_num == 0:
ax1.plot(t, R/N, 'g--', lw=1, label='Recovered')
else:
ax1.plot(t, Re/N, 'g--', lw=1, label='Recovered')
ax1.plot(t, D/N, 'b--', lw=1, label='Dead')
# Plot Final Epidemic Size
ax1.plot(t, One_SinfN*np.ones(len(t)), 'm--')
txt1 = "{per:2.2f} infected"
ax1.text(t[0], One_SinfN - 0.05, txt1.format(per=One_SinfN[0]), fontsize=12, color='m')
# Plot peak points
ax1.plot(peak_inf_idx, peak_inf/N,'ro', markersize=8)
ax1.plot(peak_inf_idx, peak_total_inf/N,'ro', markersize=8)
if sim_num < 2:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f}"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f}"
else:
|
ax1.text(peak_inf_idx+10, peak_inf/N, txt_title.format(peak_inf=peak_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
ax1.text(peak_inf_idx+10, peak_total_inf/N, txt_title2.format(peak_total=peak_total_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
# Making things beautiful
ax1.set_xlabel('Time /days', fontsize=12)
ax1.set_ylabel('Percentage of Population', fontsize=12)
ax1.yaxis.set_tick_params(length=0)
ax1.xaxis.set_tick_params(length=0)
ax1.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax1.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax1.spines[spine].set_visible(True)
fig.subplots_adjust(left=.12, bottom=.14, right=.93, top=0.93)
fig.set_size_inches(20.5/2, 14.5/2, forward=True)
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.png'%sim_num, bbox_inches='tight')
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.pdf'%sim_num, bbox_inches='tight')
#################################################################
######## Plots Simulation with reproductive/growth rates ########
#################################################################
do_growth = 1
if do
|
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f} from March 21"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f} from March 21"
|
conditional_block
|
ex1a_sim_vanillaSEIR_model_old.py
|
computed?
contact_rate = 10 # number of contacts an individual has per day
E0 = (contact_rate - 1)*I0 # Estimated exposed based on contact rate and inital infected
# Derived Model parameters and
beta = r0 / gamma_inv
sigma = 1.0 / sigma_inv
gamma = 1.0 / gamma_inv
tau_q = 1.0 /tau_q_inv
# Control variable: percentage quarantined
q = 0.01
print('***** Hyper-parameters *****')
print('N=',N,'days=',days, 'r0=',r0, 'gamma_inv (days) = ',gamma_inv)
print('***** Model-parameters *****')
print('beta=',beta,'gamma=', gamma, 'sigma', sigma)
######################################
######## Simulation Functions ########
######################################
# Equation to estimate final epidemic size (infected)
def epi_size(x):
|
###################################################
######## SEIR Model simulation Simulation ########
###################################################
if sim_num == 0:
''' Compartment structure of armed forces SEIR model
N = S + E + I + R
'''
# Initial conditions vector
S0 = N - E0 - I0 - R0
y0 = S0, E0, I0, R0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0)
# Simulation Options
seir_type = 0 # SEIR no deaths
solver_type = 1 # ivp - LSODA
else:
''' Compartment structure of armed forces SEIR model with deaths
N = S + E + I + R + D
'''
S0 = N - E0 - I0 - R0 - D0
y0 = S0, E0, I0, R0, D0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0, "D0", D0)
# Simulation Options
seir_type = 1 # SEIR with deaths
solver_type = 1 # ivp - LSODA
# Simulate ODE equations
SEIRparams = N, beta, gamma, sigma
sol_ode_timeseries = simulate_seirModel(seir_type, SEIRparams, solver_type, y0, N, days, 1)
# Unpack timseries
if sim_num == 0:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
R = sol_ode_timeseries[4]
else:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
Re = sol_ode_timeseries[4]
D = sol_ode_timeseries[5]
R = Re + D
# Accumulated Total Cases
T = I + R
print("t=", t[-1])
print("ST=", S[-1])
print("ET=", E[-1])
print("IT=", I[-1])
print("RT=", R[-1])
print("TT=", T[-1])
if sim_num > 0:
print("DT=", D[-1])
print("ReT=",Re[-1])
# Estimated Final epidemic size (analytic) not-dependent on simulation
init_guess = 0.0001
r0_test = r0
SinfN = fsolve(epi_size, init_guess)
One_SinfN = 1 - SinfN
print('***** Final Epidemic Size *****')
print('r0 = ', r0_test, '1 - Sinf/S0 = ', One_SinfN[0])
print('***** Results *****')
peak_inf_idx = np.argmax(I)
peak_inf = I[peak_inf_idx]
print('Peak Instant. Infected = ', peak_inf,'by day=', peak_inf_idx)
peak_total_inf = T[peak_inf_idx]
print('Total Cases when Peak = ', peak_total_inf,'by day=', peak_inf_idx)
#####################################################################
######## Plots Simulation with point estimates of parameters ########
#####################################################################
# Plot the data on three separate curves for S(t), I(t) and R(t)
fig, ax1 = plt.subplots()
if sim_num > 0:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f}, m={m:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv, m=m),fontsize=15)
else:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv),fontsize=15)
# Variable evolution
ax1.plot(t, S/N, 'k', lw=2, label='Susceptible')
ax1.plot(t, E/N, 'm', lw=2, label='Exposed')
ax1.plot(t, I/N, 'r', lw=2, label='Infected')
ax1.plot(t, T/N, 'y', lw=2, label='Total Cases')
if sim_num == 0:
ax1.plot(t, R/N, 'g--', lw=1, label='Recovered')
else:
ax1.plot(t, Re/N, 'g--', lw=1, label='Recovered')
ax1.plot(t, D/N, 'b--', lw=1, label='Dead')
# Plot Final Epidemic Size
ax1.plot(t, One_SinfN*np.ones(len(t)), 'm--')
txt1 = "{per:2.2f} infected"
ax1.text(t[0], One_SinfN - 0.05, txt1.format(per=One_SinfN[0]), fontsize=12, color='m')
# Plot peak points
ax1.plot(peak_inf_idx, peak_inf/N,'ro', markersize=8)
ax1.plot(peak_inf_idx, peak_total_inf/N,'ro', markersize=8)
if sim_num < 2:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f}"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f}"
else:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f} from March 21"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f} from March 21"
ax1.text(peak_inf_idx+10, peak_inf/N, txt_title.format(peak_inf=peak_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
ax1.text(peak_inf_idx+10, peak_total_inf/N, txt_title2.format(peak_total=peak_total_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
# Making things beautiful
ax1.set_xlabel('Time /days', fontsize=12)
ax1.set_ylabel('Percentage of Population', fontsize=12)
ax1.yaxis.set_tick_params(length=0)
ax1.xaxis.set_tick_params(length=0)
ax1.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax1.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax1.spines[spine].set_visible(True)
fig.subplots_adjust(left=.12, bottom=.14, right=.93, top=0.93)
fig.set_size_inches(20.5/2, 14.5/2, forward=True)
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.png'%sim_num, bbox_inches='tight')
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.pdf'%sim_num, bbox_inches='tight')
#################################################################
######## Plots Simulation with reproductive/growth rates ########
#################################################################
do_growth = 1
if
|
return np.log(x) + r0_test*(1-x)
|
identifier_body
|
clustering.py
|
# list of all maneuvers
maneuvers = ['Obstacles15', 'Obstacles35', 'RampA', 'StraightF', 'Turn90FR', 'Turn90FL', 'Turn180L', 'Turn180R']
# choose the feature subsets for clustering
featureSet_list = ['ALL', 'ALL_TORQUE', '2D_TORQUE', 'LR_TORQUE', 'LR_TORQUE_MEAN', '2D_TORQUE_MEAN']
dataset_to_import = 'featured_data' # choose dataset/datasets to import
featured_columns = ['AngVel_L', 'AngVel_R', 'Chair_LinVel', 'Chair_AngVel', 'Torque_L', 'Torque_R',
'Torque_sum', 'Torque_diff', 'Torque_L_roc', 'Torque_R_roc']
time_features = ['Mean', 'Std', 'Max', 'Min', 'RMS']
# clustering model parameters
clus_params = {'covar_types': 'full', 'n_components': 6, 'feat_list': 'ALL_TORQUE'}
# path to save labeled data and corresponding figures
CURR_PATH = os.path.abspath('.')
# Import processed data
dataset_paths = glob.glob(os.path.join(CURR_PATH, dataset_to_import, USER, 'WinSize' + str(WIN_SIZE), '*.csv'))
# create a color pallette
cmap = matplotlib.cm.get_cmap('tab10')
def import_func(path_):
""" function to import featured datasets"""
datasets_dic = {}
for dataset_path in path_:
# Parse labels from filenames
dataset_label = os.path.split(dataset_path)[1].split('.')[0]
# Read from csv to Pandas
dataset = pd.read_csv(dataset_path)
# insert dataset label to the dataframes
dataset.insert(0, 'trial', dataset_label)
dataset.insert(0, 'maneuver', dataset_label.split('_')[0])
# Datasets are stored in a dictionary
datasets_dic.update({dataset_label: dataset})
# list of imported maneuvers
dataset_names = list(datasets_dic.keys())
return datasets_dic, dataset_names
def prep_func(data_dic):
"""Prepare dataframes for clustering"""
df_all = pd.DataFrame(columns=datasets[dataset_labels[0]].columns.tolist())
# combine desired datasets into one dataframe
for label in dataset_labels:
df_all = pd.concat([df_all, data_dic[label]], ignore_index=True)
df_all_columns = df_all.copy() # keep a copy of the original dataframes before dropping the trial names
# dropping unused columns/features
for col in ['Time', 'trial', 'maneuver']:
if col in df_all.columns:
df_all = df_all.drop(columns=[col])
columns_all = df_all.columns.tolist()
columns_torque = [col for col in df_all.columns.tolist() if 'Torque' in col] # all torque data
# all torque features except for roc (mean/std/... & left/right/sum/diff)
columns_2d_torque = [col for col in df_all.columns.tolist()
if 'Torque_sum' in col or 'Torque_diff' in col and 'roc' not in col]
# all torque features of left and right only (mean/std/... & left/right)
columns_lr_torque = [col for col in df_all.columns.tolist()
if ('Torque_L' in col or 'Torque_R' in col) and 'roc' not in col]
columns_lr_torque_mean = ['Mean Torque_L', 'Mean Torque_R'] # mean torque left and right only
columns_2d_torque_mean = ['Mean Torque_sum', 'Mean Torque_diff'] # mean torque left and right only
# dictionary of list of feature subsets to be used for dimension_reduction or clustering
featureSet_dic = {'ALL': columns_all, 'ALL_TORQUE': columns_torque,
'2D_TORQUE': columns_2d_torque, '2D_TORQUE_MEAN': columns_2d_torque_mean,
'LR_TORQUE': columns_lr_torque, 'LR_TORQUE_MEAN': columns_lr_torque_mean}
# Standardize features by removing the mean and scaling to unit variance
scaler = StandardScaler()
feat_all_stand = scaler.fit_transform(df_all.values)
df_all_stand = pd.DataFrame(feat_all_stand, columns=data_columns) # normalized dataset
return df_all_stand, df_all_columns, featureSet_dic
def clus_func(df_all, n_components, feat_subset):
"""
function to cluster and evaluate the clustering performance
input: dataframe consisting of different maneuvers to be clustered, feature sets to be used for clustering,
and the clustering model
output: labeled dataframe and three performance measures
"""
df = df_all[featureSet_dic[feat_subset]].copy()
X = df.values
# # Fit a Gaussian mixture with EM
# gmm_model = mixture.GaussianMixture(n_components=n_components,
# covariance_type=cv_type,
# random_state=1,
# n_init=10)
# gmm_model = gmm_model.fit(X)
model_path = os.path.join(CURR_PATH, 'clustering_model') # create directiry for the current time
model_name = os.path.join(model_path, 'gmm.joblib')
gmm_model = joblib.load(model_name)
# predic labels & probabilities
labels = gmm_model.predict(X)
labels_prob = gmm_model.predict_proba(X)
# adding all droped features (for plotting purposes) of the standardized dataframe
added_feat = [feat for feat in data_columns if feat not in df.columns]
df[added_feat] = df_all_stand[added_feat].copy()
df = df[data_columns]
# adding the labels to the dataframe
df.insert(0, 'Clus_label', labels)
for n in range(n_components):
df['Prob_L'+str(n)] = labels_prob[:, n]
return gmm_model, df # export all gmm models and a dictionary of all labeled datasets
def labeling_func(df_clus):
""" add all cluster labels to the original dataframe """
df_all_labeled = df_all_columns.copy()
df_all_labeled['Clus_label'] = df_clus['Clus_label'].copy()
df_all_labeled['Clus_label']= df_all_labeled['Clus_label'].astype(int)
for i in range(0, clus_params['n_components']):
df_all_labeled['Prob_L'+str(i)] = df_clus['Prob_L'+str(i)].copy()
return df_all_labeled
def plt_gm_clusters(df_all, model):
"""this function gets unlabeled scaled dataframe and predict labels + plotting cluster ellips"""
# color_iter = itertools.cycle([cmap(i) for i in range(cmap.N)])
color_iter = itertools.cycle([cmap(i) for i in range(clus_params['n_components'])])
df = df_all[featureSet_dic[clus_params['feat_list']]].copy()
XX = df.values
Y_ = model.predict(XX) # predict labels for each model
plt.figure(figsize=(8, 6))
splot = plt.subplot(1, 1, 1)
for i, (mean, cov, color) in enumerate(zip(model.means_, model.covariances_, color_iter)):
if "MEAN" in clus_params['feat_list']:
v, w = linalg.eigh(cov)
else:
subset = [0, 5] # mean torque L & R
v, w = linalg.eigh(cov[np.ix_(subset, subset)])
mean = np.array([mean[0], mean[5]])
if not np.any(Y_ == i):
continue
if "MEAN" in clus_params['feat_list']:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 1], color=color, s=60)
else:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 5], color=color, s=60)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Subject: {}, feature set: {}'.format(USER, clus_params['feat_list']))
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
def range_dic_(df_):
"""
get the start index of each maneuver from the original dataframe
"""
range_dic = {}
for man in df_['maneuver']:
trial_indx = df_.index
|
# DEFINITIONS
USER = 'Mahsa' # ['Mahsa', 'Jaimie'] # participant name
WIN_SIZE = 32 # window size
|
random_line_split
|
|
clustering.py
|
def import_func(path_):
""" function to import featured datasets"""
datasets_dic = {}
for dataset_path in path_:
# Parse labels from filenames
dataset_label = os.path.split(dataset_path)[1].split('.')[0]
# Read from csv to Pandas
dataset = pd.read_csv(dataset_path)
# insert dataset label to the dataframes
dataset.insert(0, 'trial', dataset_label)
dataset.insert(0, 'maneuver', dataset_label.split('_')[0])
# Datasets are stored in a dictionary
datasets_dic.update({dataset_label: dataset})
# list of imported maneuvers
dataset_names = list(datasets_dic.keys())
return datasets_dic, dataset_names
def prep_func(data_dic):
"""Prepare dataframes for clustering"""
df_all = pd.DataFrame(columns=datasets[dataset_labels[0]].columns.tolist())
# combine desired datasets into one dataframe
for label in dataset_labels:
df_all = pd.concat([df_all, data_dic[label]], ignore_index=True)
df_all_columns = df_all.copy() # keep a copy of the original dataframes before dropping the trial names
# dropping unused columns/features
for col in ['Time', 'trial', 'maneuver']:
if col in df_all.columns:
df_all = df_all.drop(columns=[col])
columns_all = df_all.columns.tolist()
columns_torque = [col for col in df_all.columns.tolist() if 'Torque' in col] # all torque data
# all torque features except for roc (mean/std/... & left/right/sum/diff)
columns_2d_torque = [col for col in df_all.columns.tolist()
if 'Torque_sum' in col or 'Torque_diff' in col and 'roc' not in col]
# all torque features of left and right only (mean/std/... & left/right)
columns_lr_torque = [col for col in df_all.columns.tolist()
if ('Torque_L' in col or 'Torque_R' in col) and 'roc' not in col]
columns_lr_torque_mean = ['Mean Torque_L', 'Mean Torque_R'] # mean torque left and right only
columns_2d_torque_mean = ['Mean Torque_sum', 'Mean Torque_diff'] # mean torque left and right only
# dictionary of list of feature subsets to be used for dimension_reduction or clustering
featureSet_dic = {'ALL': columns_all, 'ALL_TORQUE': columns_torque,
'2D_TORQUE': columns_2d_torque, '2D_TORQUE_MEAN': columns_2d_torque_mean,
'LR_TORQUE': columns_lr_torque, 'LR_TORQUE_MEAN': columns_lr_torque_mean}
# Standardize features by removing the mean and scaling to unit variance
scaler = StandardScaler()
feat_all_stand = scaler.fit_transform(df_all.values)
df_all_stand = pd.DataFrame(feat_all_stand, columns=data_columns) # normalized dataset
return df_all_stand, df_all_columns, featureSet_dic
def clus_func(df_all, n_components, feat_subset):
"""
function to cluster and evaluate the clustering performance
input: dataframe consisting of different maneuvers to be clustered, feature sets to be used for clustering,
and the clustering model
output: labeled dataframe and three performance measures
"""
df = df_all[featureSet_dic[feat_subset]].copy()
X = df.values
# # Fit a Gaussian mixture with EM
# gmm_model = mixture.GaussianMixture(n_components=n_components,
# covariance_type=cv_type,
# random_state=1,
# n_init=10)
# gmm_model = gmm_model.fit(X)
model_path = os.path.join(CURR_PATH, 'clustering_model') # create directiry for the current time
model_name = os.path.join(model_path, 'gmm.joblib')
gmm_model = joblib.load(model_name)
# predic labels & probabilities
labels = gmm_model.predict(X)
labels_prob = gmm_model.predict_proba(X)
# adding all droped features (for plotting purposes) of the standardized dataframe
added_feat = [feat for feat in data_columns if feat not in df.columns]
df[added_feat] = df_all_stand[added_feat].copy()
df = df[data_columns]
# adding the labels to the dataframe
df.insert(0, 'Clus_label', labels)
for n in range(n_components):
df['Prob_L'+str(n)] = labels_prob[:, n]
return gmm_model, df # export all gmm models and a dictionary of all labeled datasets
def
|
(df_clus):
""" add all cluster labels to the original dataframe """
df_all_labeled = df_all_columns.copy()
df_all_labeled['Clus_label'] = df_clus['Clus_label'].copy()
df_all_labeled['Clus_label']= df_all_labeled['Clus_label'].astype(int)
for i in range(0, clus_params['n_components']):
df_all_labeled['Prob_L'+str(i)] = df_clus['Prob_L'+str(i)].copy()
return df_all_labeled
def plt_gm_clusters(df_all, model):
"""this function gets unlabeled scaled dataframe and predict labels + plotting cluster ellips"""
# color_iter = itertools.cycle([cmap(i) for i in range(cmap.N)])
color_iter = itertools.cycle([cmap(i) for i in range(clus_params['n_components'])])
df = df_all[featureSet_dic[clus_params['feat_list']]].copy()
XX = df.values
Y_ = model.predict(XX) # predict labels for each model
plt.figure(figsize=(8, 6))
splot = plt.subplot(1, 1, 1)
for i, (mean, cov, color) in enumerate(zip(model.means_, model.covariances_, color_iter)):
if "MEAN" in clus_params['feat_list']:
v, w = linalg.eigh(cov)
else:
subset = [0, 5] # mean torque L & R
v, w = linalg.eigh(cov[np.ix_(subset, subset)])
mean = np.array([mean[0], mean[5]])
if not np.any(Y_ == i):
continue
if "MEAN" in clus_params['feat_list']:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 1], color=color, s=60)
else:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 5], color=color, s=60)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Subject: {}, feature set: {}'.format(USER, clus_params['feat_list']))
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
def range_dic_(df_):
"""
get the start index of each maneuver from the original dataframe
"""
range_dic = {}
for man in df_['maneuver']:
trial_indx = df_.index[df_['maneuver'] == man].tolist()
range_ = (min(trial_indx), max(trial_indx))
range_dic.update({man: range_})
return range_dic
def plt_ts_cluster(df_, features_to_plot):
"""
input: input original dataframe (with maneuver columns), clustered dataframe, number of clusteres,
and selected features to plot
output: plotting clustered time series data with different colors
"""
df_clus = df_.copy()
plt_num = 2
fig, axs = plt.subplots(plt_num, 1, figsize=(15, 12), constrained_layout=True)
axs = axs.ravel()
states = df_clus['Clus_label']
clusterNum = clus_params['n_components']
color_dict = {i:cmap(i) for i in range(clusterNum)}
color_array = [color_dict[i] for i in states]
for i, feature in enumerate(features_to_plot):
axs[i].scatter(df_clus.index, df_clus[feature], c=color_array, s=10)
axs[i].set_xlim([-1, len(df_clus)+1])
axs[i].tick_params(direction='out', labelsize=15)
axs[i].yaxis.grid(True)
if 'Torque' in feature:
axs[i].set_ylabel(feature + ' (Nm)', fontsize=15)
elif 'Lin' in feature:
axs[i].set_ylabel(feature + ' (m/s)', fontsize=15)
elif 'Ang' in feature:
axs[i].set_ylabel(feature + ' (rad/s)', fontsize=1
|
labeling_func
|
identifier_name
|
clustering.py
|
in df_all.columns.tolist() if 'Torque' in col] # all torque data
# all torque features except for roc (mean/std/... & left/right/sum/diff)
columns_2d_torque = [col for col in df_all.columns.tolist()
if 'Torque_sum' in col or 'Torque_diff' in col and 'roc' not in col]
# all torque features of left and right only (mean/std/... & left/right)
columns_lr_torque = [col for col in df_all.columns.tolist()
if ('Torque_L' in col or 'Torque_R' in col) and 'roc' not in col]
columns_lr_torque_mean = ['Mean Torque_L', 'Mean Torque_R'] # mean torque left and right only
columns_2d_torque_mean = ['Mean Torque_sum', 'Mean Torque_diff'] # mean torque left and right only
# dictionary of list of feature subsets to be used for dimension_reduction or clustering
featureSet_dic = {'ALL': columns_all, 'ALL_TORQUE': columns_torque,
'2D_TORQUE': columns_2d_torque, '2D_TORQUE_MEAN': columns_2d_torque_mean,
'LR_TORQUE': columns_lr_torque, 'LR_TORQUE_MEAN': columns_lr_torque_mean}
# Standardize features by removing the mean and scaling to unit variance
scaler = StandardScaler()
feat_all_stand = scaler.fit_transform(df_all.values)
df_all_stand = pd.DataFrame(feat_all_stand, columns=data_columns) # normalized dataset
return df_all_stand, df_all_columns, featureSet_dic
def clus_func(df_all, n_components, feat_subset):
"""
function to cluster and evaluate the clustering performance
input: dataframe consisting of different maneuvers to be clustered, feature sets to be used for clustering,
and the clustering model
output: labeled dataframe and three performance measures
"""
df = df_all[featureSet_dic[feat_subset]].copy()
X = df.values
# # Fit a Gaussian mixture with EM
# gmm_model = mixture.GaussianMixture(n_components=n_components,
# covariance_type=cv_type,
# random_state=1,
# n_init=10)
# gmm_model = gmm_model.fit(X)
model_path = os.path.join(CURR_PATH, 'clustering_model') # create directiry for the current time
model_name = os.path.join(model_path, 'gmm.joblib')
gmm_model = joblib.load(model_name)
# predic labels & probabilities
labels = gmm_model.predict(X)
labels_prob = gmm_model.predict_proba(X)
# adding all droped features (for plotting purposes) of the standardized dataframe
added_feat = [feat for feat in data_columns if feat not in df.columns]
df[added_feat] = df_all_stand[added_feat].copy()
df = df[data_columns]
# adding the labels to the dataframe
df.insert(0, 'Clus_label', labels)
for n in range(n_components):
df['Prob_L'+str(n)] = labels_prob[:, n]
return gmm_model, df # export all gmm models and a dictionary of all labeled datasets
def labeling_func(df_clus):
""" add all cluster labels to the original dataframe """
df_all_labeled = df_all_columns.copy()
df_all_labeled['Clus_label'] = df_clus['Clus_label'].copy()
df_all_labeled['Clus_label']= df_all_labeled['Clus_label'].astype(int)
for i in range(0, clus_params['n_components']):
df_all_labeled['Prob_L'+str(i)] = df_clus['Prob_L'+str(i)].copy()
return df_all_labeled
def plt_gm_clusters(df_all, model):
"""this function gets unlabeled scaled dataframe and predict labels + plotting cluster ellips"""
# color_iter = itertools.cycle([cmap(i) for i in range(cmap.N)])
color_iter = itertools.cycle([cmap(i) for i in range(clus_params['n_components'])])
df = df_all[featureSet_dic[clus_params['feat_list']]].copy()
XX = df.values
Y_ = model.predict(XX) # predict labels for each model
plt.figure(figsize=(8, 6))
splot = plt.subplot(1, 1, 1)
for i, (mean, cov, color) in enumerate(zip(model.means_, model.covariances_, color_iter)):
if "MEAN" in clus_params['feat_list']:
v, w = linalg.eigh(cov)
else:
subset = [0, 5] # mean torque L & R
v, w = linalg.eigh(cov[np.ix_(subset, subset)])
mean = np.array([mean[0], mean[5]])
if not np.any(Y_ == i):
continue
if "MEAN" in clus_params['feat_list']:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 1], color=color, s=60)
else:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 5], color=color, s=60)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Subject: {}, feature set: {}'.format(USER, clus_params['feat_list']))
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
def range_dic_(df_):
"""
get the start index of each maneuver from the original dataframe
"""
range_dic = {}
for man in df_['maneuver']:
trial_indx = df_.index[df_['maneuver'] == man].tolist()
range_ = (min(trial_indx), max(trial_indx))
range_dic.update({man: range_})
return range_dic
def plt_ts_cluster(df_, features_to_plot):
"""
input: input original dataframe (with maneuver columns), clustered dataframe, number of clusteres,
and selected features to plot
output: plotting clustered time series data with different colors
"""
df_clus = df_.copy()
plt_num = 2
fig, axs = plt.subplots(plt_num, 1, figsize=(15, 12), constrained_layout=True)
axs = axs.ravel()
states = df_clus['Clus_label']
clusterNum = clus_params['n_components']
color_dict = {i:cmap(i) for i in range(clusterNum)}
color_array = [color_dict[i] for i in states]
for i, feature in enumerate(features_to_plot):
axs[i].scatter(df_clus.index, df_clus[feature], c=color_array, s=10)
axs[i].set_xlim([-1, len(df_clus)+1])
axs[i].tick_params(direction='out', labelsize=15)
axs[i].yaxis.grid(True)
if 'Torque' in feature:
axs[i].set_ylabel(feature + ' (Nm)', fontsize=15)
elif 'Lin' in feature:
axs[i].set_ylabel(feature + ' (m/s)', fontsize=15)
elif 'Ang' in feature:
axs[i].set_ylabel(feature + ' (rad/s)', fontsize=15)
fig.suptitle(clus_params['feat_list'], fontsize=16)
range_dic = range_dic_(df_clus)
for trial, range_ in range_dic.items():
axs[0].text(range_[0], axs[0].get_ylim()[1]+0.2, trial, fontsize=15, rotation=45)
for i in range(plt_num):
axs[i].axvline(x=range_[0], linestyle='--', linewidth=0.5)
plt.show()
# function to plot clusters in time series data
def plt_ts_cluster_subset(df_, features_to_plot, man_list=maneuvers):
|
"""
input: input original dataframe (with maneuver columns), clustered dataframe, number of clusteres,
and selected features to plot
output: plotting clustered time series data with different colors
"""
clusterNum = clus_params['n_components']
color_dict = {i: cmap(i) for i in range(clusterNum)}
figsize = (15, 15)
legend_size = 15
if len(man_list) == 1:
figsize = (15, 8)
fig, axs = plt.subplots(len(man_list), 1, figsize=figsize, constrained_layout=True)
fig.suptitle(clus_params['feat_list'], fontsize=16)
if len(man_list) != 1:
axs = axs.ravel()
|
identifier_body
|
|
clustering.py
|
def import_func(path_):
""" function to import featured datasets"""
datasets_dic = {}
for dataset_path in path_:
# Parse labels from filenames
dataset_label = os.path.split(dataset_path)[1].split('.')[0]
# Read from csv to Pandas
dataset = pd.read_csv(dataset_path)
# insert dataset label to the dataframes
dataset.insert(0, 'trial', dataset_label)
dataset.insert(0, 'maneuver', dataset_label.split('_')[0])
# Datasets are stored in a dictionary
datasets_dic.update({dataset_label: dataset})
# list of imported maneuvers
dataset_names = list(datasets_dic.keys())
return datasets_dic, dataset_names
def prep_func(data_dic):
"""Prepare dataframes for clustering"""
df_all = pd.DataFrame(columns=datasets[dataset_labels[0]].columns.tolist())
# combine desired datasets into one dataframe
for label in dataset_labels:
df_all = pd.concat([df_all, data_dic[label]], ignore_index=True)
df_all_columns = df_all.copy() # keep a copy of the original dataframes before dropping the trial names
# dropping unused columns/features
for col in ['Time', 'trial', 'maneuver']:
if col in df_all.columns:
df_all = df_all.drop(columns=[col])
columns_all = df_all.columns.tolist()
columns_torque = [col for col in df_all.columns.tolist() if 'Torque' in col] # all torque data
# all torque features except for roc (mean/std/... & left/right/sum/diff)
columns_2d_torque = [col for col in df_all.columns.tolist()
if 'Torque_sum' in col or 'Torque_diff' in col and 'roc' not in col]
# all torque features of left and right only (mean/std/... & left/right)
columns_lr_torque = [col for col in df_all.columns.tolist()
if ('Torque_L' in col or 'Torque_R' in col) and 'roc' not in col]
columns_lr_torque_mean = ['Mean Torque_L', 'Mean Torque_R'] # mean torque left and right only
columns_2d_torque_mean = ['Mean Torque_sum', 'Mean Torque_diff'] # mean torque left and right only
# dictionary of list of feature subsets to be used for dimension_reduction or clustering
featureSet_dic = {'ALL': columns_all, 'ALL_TORQUE': columns_torque,
'2D_TORQUE': columns_2d_torque, '2D_TORQUE_MEAN': columns_2d_torque_mean,
'LR_TORQUE': columns_lr_torque, 'LR_TORQUE_MEAN': columns_lr_torque_mean}
# Standardize features by removing the mean and scaling to unit variance
scaler = StandardScaler()
feat_all_stand = scaler.fit_transform(df_all.values)
df_all_stand = pd.DataFrame(feat_all_stand, columns=data_columns) # normalized dataset
return df_all_stand, df_all_columns, featureSet_dic
def clus_func(df_all, n_components, feat_subset):
"""
function to cluster and evaluate the clustering performance
input: dataframe consisting of different maneuvers to be clustered, feature sets to be used for clustering,
and the clustering model
output: labeled dataframe and three performance measures
"""
df = df_all[featureSet_dic[feat_subset]].copy()
X = df.values
# # Fit a Gaussian mixture with EM
# gmm_model = mixture.GaussianMixture(n_components=n_components,
# covariance_type=cv_type,
# random_state=1,
# n_init=10)
# gmm_model = gmm_model.fit(X)
model_path = os.path.join(CURR_PATH, 'clustering_model') # create directiry for the current time
model_name = os.path.join(model_path, 'gmm.joblib')
gmm_model = joblib.load(model_name)
# predic labels & probabilities
labels = gmm_model.predict(X)
labels_prob = gmm_model.predict_proba(X)
# adding all droped features (for plotting purposes) of the standardized dataframe
added_feat = [feat for feat in data_columns if feat not in df.columns]
df[added_feat] = df_all_stand[added_feat].copy()
df = df[data_columns]
# adding the labels to the dataframe
df.insert(0, 'Clus_label', labels)
for n in range(n_components):
df['Prob_L'+str(n)] = labels_prob[:, n]
return gmm_model, df # export all gmm models and a dictionary of all labeled datasets
def labeling_func(df_clus):
""" add all cluster labels to the original dataframe """
df_all_labeled = df_all_columns.copy()
df_all_labeled['Clus_label'] = df_clus['Clus_label'].copy()
df_all_labeled['Clus_label']= df_all_labeled['Clus_label'].astype(int)
for i in range(0, clus_params['n_components']):
df_all_labeled['Prob_L'+str(i)] = df_clus['Prob_L'+str(i)].copy()
return df_all_labeled
def plt_gm_clusters(df_all, model):
"""this function gets unlabeled scaled dataframe and predict labels + plotting cluster ellips"""
# color_iter = itertools.cycle([cmap(i) for i in range(cmap.N)])
color_iter = itertools.cycle([cmap(i) for i in range(clus_params['n_components'])])
df = df_all[featureSet_dic[clus_params['feat_list']]].copy()
XX = df.values
Y_ = model.predict(XX) # predict labels for each model
plt.figure(figsize=(8, 6))
splot = plt.subplot(1, 1, 1)
for i, (mean, cov, color) in enumerate(zip(model.means_, model.covariances_, color_iter)):
if "MEAN" in clus_params['feat_list']:
v, w = linalg.eigh(cov)
else:
|
if not np.any(Y_ == i):
continue
if "MEAN" in clus_params['feat_list']:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 1], color=color, s=60)
else:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 5], color=color, s=60)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Subject: {}, feature set: {}'.format(USER, clus_params['feat_list']))
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
def range_dic_(df_):
"""
get the start index of each maneuver from the original dataframe
"""
range_dic = {}
for man in df_['maneuver']:
trial_indx = df_.index[df_['maneuver'] == man].tolist()
range_ = (min(trial_indx), max(trial_indx))
range_dic.update({man: range_})
return range_dic
def plt_ts_cluster(df_, features_to_plot):
"""
input: input original dataframe (with maneuver columns), clustered dataframe, number of clusteres,
and selected features to plot
output: plotting clustered time series data with different colors
"""
df_clus = df_.copy()
plt_num = 2
fig, axs = plt.subplots(plt_num, 1, figsize=(15, 12), constrained_layout=True)
axs = axs.ravel()
states = df_clus['Clus_label']
clusterNum = clus_params['n_components']
color_dict = {i:cmap(i) for i in range(clusterNum)}
color_array = [color_dict[i] for i in states]
for i, feature in enumerate(features_to_plot):
axs[i].scatter(df_clus.index, df_clus[feature], c=color_array, s=10)
axs[i].set_xlim([-1, len(df_clus)+1])
axs[i].tick_params(direction='out', labelsize=15)
axs[i].yaxis.grid(True)
if 'Torque' in feature:
axs[i].set_ylabel(feature + ' (Nm)', fontsize=15)
elif 'Lin' in feature:
axs[i].set_ylabel(feature + ' (m/s)', fontsize=15)
elif 'Ang' in feature:
axs[i].set_ylabel(feature + ' (rad/s)', fontsize=15
|
subset = [0, 5] # mean torque L & R
v, w = linalg.eigh(cov[np.ix_(subset, subset)])
mean = np.array([mean[0], mean[5]])
|
conditional_block
|
dataflows.rs
|
self.index_imports.insert(
id,
IndexImport {
desc,
typ,
monotonic,
usage_types: None,
},
);
}
/// Imports a source and makes it available as `id`.
pub fn import_source(&mut self, id: GlobalId, typ: RelationType, monotonic: bool) {
// Import the source with no linear operators applied to it.
// They may be populated by whole-dataflow optimization.
self.source_imports.insert(
id,
(
SourceInstanceDesc {
storage_metadata: (),
arguments: SourceInstanceArguments { operators: None },
typ,
},
monotonic,
),
);
}
/// Binds to `id` the relation expression `plan`.
pub fn insert_plan(&mut self, id: GlobalId, plan: OptimizedMirRelationExpr) {
self.objects_to_build.push(BuildDesc { id, plan });
}
/// Exports as `id` an index described by `description`.
///
/// Future uses of `import_index` in other dataflow descriptions may use `id`,
/// as long as this dataflow has not been terminated in the meantime.
pub fn export_index(&mut self, id: GlobalId, description: IndexDesc, on_type: RelationType) {
// We first create a "view" named `id` that ensures that the
// data are correctly arranged and available for export.
self.insert_plan(
id,
OptimizedMirRelationExpr::declare_optimized(MirRelationExpr::ArrangeBy {
input: Box::new(MirRelationExpr::global_get(
description.on_id,
on_type.clone(),
)),
keys: vec![description.key.clone()],
}),
);
self.index_exports.insert(id, (description, on_type));
}
/// Exports as `id` a sink described by `description`.
pub fn export_sink(&mut self, id: GlobalId, description: ComputeSinkDesc<(), T>) {
self.sink_exports.insert(id, description);
}
/// Returns true iff `id` is already imported.
pub fn is_imported(&self, id: &GlobalId) -> bool {
self.objects_to_build.iter().any(|bd| &bd.id == id)
|| self.source_imports.keys().any(|i| i == id)
}
/// Assigns the `as_of` frontier to the supplied argument.
///
/// This method allows the dataflow to indicate a frontier up through
/// which all times should be advanced. This can be done for at least
/// two reasons: 1. correctness and 2. performance.
///
/// Correctness may require an `as_of` to ensure that historical detail
/// is consolidated at representative times that do not present specific
/// detail that is not specifically correct. For example, updates may be
/// compacted to times that are no longer the source times, but instead
/// some byproduct of when compaction was executed; we should not present
/// those specific times as meaningfully different from other equivalent
/// times.
///
/// Performance may benefit from an aggressive `as_of` as it reduces the
/// number of distinct moments at which collections vary. Differential
/// dataflow will refresh its outputs at each time its inputs change and
/// to moderate that we can minimize the volume of distinct input times
/// as much as possible.
///
/// Generally, one should consider setting `as_of` at least to the `since`
/// frontiers of contributing data sources and as aggressively as the
/// computation permits.
pub fn set_as_of(&mut self, as_of: Antichain<T>) {
self.as_of = Some(as_of);
}
/// The number of columns associated with an identifier in the dataflow.
pub fn arity_of(&self, id: &GlobalId) -> usize {
for (source_id, (source, _monotonic)) in self.source_imports.iter() {
if source_id == id {
return source.typ.arity();
}
}
for IndexImport { desc, typ, .. } in self.index_imports.values() {
if &desc.on_id == id {
return typ.arity();
}
}
for desc in self.objects_to_build.iter() {
if &desc.id == id {
return desc.plan.arity();
}
}
panic!("GlobalId {} not found in DataflowDesc", id);
}
/// Calls r and s on any sub-members of those types in self. Halts at the first error return.
pub fn visit_children<R, S, E>(&mut self, r: R, s: S) -> Result<(), E>
where
R: Fn(&mut OptimizedMirRelationExpr) -> Result<(), E>,
S: Fn(&mut MirScalarExpr) -> Result<(), E>,
{
for BuildDesc { plan, .. } in &mut self.objects_to_build {
r(plan)?;
}
for (source_instance_desc, _) in self.source_imports.values_mut() {
let Some(mfp) = source_instance_desc.arguments.operators.as_mut() else {
continue;
};
for expr in mfp.expressions.iter_mut() {
s(expr)?;
}
for (_, expr) in mfp.predicates.iter_mut() {
s(expr)?;
}
}
Ok(())
}
}
impl<P, S, T> DataflowDescription<P, S, T>
where
P: CollectionPlan,
{
/// Identifiers of exported objects (indexes and sinks).
pub fn
|
(&self) -> impl Iterator<Item = GlobalId> + '_ {
self.index_exports
.keys()
.chain(self.sink_exports.keys())
.cloned()
}
/// Identifiers of exported subscribe sinks.
pub fn subscribe_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
self.sink_exports
.iter()
.filter_map(|(id, desc)| match desc.connection {
ComputeSinkConnection::Subscribe(_) => Some(*id),
_ => None,
})
}
/// Returns the description of the object to build with the specified
/// identifier.
///
/// # Panics
///
/// Panics if `id` is not present in `objects_to_build` exactly once.
pub fn build_desc(&self, id: GlobalId) -> &BuildDesc<P> {
let mut builds = self.objects_to_build.iter().filter(|build| build.id == id);
let build = builds
.next()
.unwrap_or_else(|| panic!("object to build id {id} unexpectedly missing"));
assert!(builds.next().is_none());
build
}
/// Computes the set of identifiers upon which the specified collection
/// identifier depends.
///
/// `collection_id` must specify a valid object in `objects_to_build`.
///
/// This method includes identifiers for e.g. intermediate views, and should be filtered
/// if one only wants sources and indexes.
///
/// This method is safe for mutually recursive view definitions.
pub fn depends_on(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
let mut out = BTreeSet::new();
self.depends_on_into(collection_id, &mut out);
out
}
/// Like `depends_on`, but appends to an existing `BTreeSet`.
pub fn depends_on_into(&self, collection_id: GlobalId, out: &mut BTreeSet<GlobalId>) {
out.insert(collection_id);
if self.source_imports.contains_key(&collection_id) {
// The collection is provided by an imported source. Report the
// dependency on the source.
out.insert(collection_id);
return;
}
// NOTE(benesch): we're not smart enough here to know *which* index
// for the collection will be used, if one exists, so we have to report
// the dependency on all of them.
let mut found_index = false;
for (index_id, IndexImport { desc, .. }) in &self.index_imports {
if desc.on_id == collection_id {
// The collection is provided by an imported index. Report the
// dependency on the index.
out.insert(*index_id);
found_index = true;
}
}
if found_index {
return;
}
// The collection is not provided by a source or imported index.
// It must be a collection whose plan we have handy. Recurse.
let build = self.build_desc(collection_id);
for id in build.plan.depends_on() {
if !out.contains(&id) {
self.depends_on_into(id, out)
}
}
}
/// Computes the set of imports upon which the specified collection depends.
///
/// This method behaves like `depends_on` but filters out internal dependencies that are not
/// included in the dataflow imports.
pub fn depends_on_imports(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
let is_import = |id: &GlobalId| {
self.source_imports.contains_key(id) || self.index_imports.contains_key(id)
};
let deps = self.depends_on(collection_id);
deps.into_iter().filter(is_import).collect()
}
}
impl<P: PartialEq, S: PartialEq, T: timely::PartialOrder> DataflowDescription<P, S, T> {
/// Determine if a dataflow description is compatible with this
|
export_ids
|
identifier_name
|
dataflows.rs
|
self.index_imports.insert(
id,
IndexImport {
desc,
typ,
monotonic,
usage_types: None,
},
);
}
/// Imports a source and makes it available as `id`.
pub fn import_source(&mut self, id: GlobalId, typ: RelationType, monotonic: bool) {
// Import the source with no linear operators applied to it.
// They may be populated by whole-dataflow optimization.
self.source_imports.insert(
id,
(
SourceInstanceDesc {
storage_metadata: (),
arguments: SourceInstanceArguments { operators: None },
typ,
},
monotonic,
),
);
}
/// Binds to `id` the relation expression `plan`.
pub fn insert_plan(&mut self, id: GlobalId, plan: OptimizedMirRelationExpr) {
self.objects_to_build.push(BuildDesc { id, plan });
}
/// Exports as `id` an index described by `description`.
///
/// Future uses of `import_index` in other dataflow descriptions may use `id`,
/// as long as this dataflow has not been terminated in the meantime.
pub fn export_index(&mut self, id: GlobalId, description: IndexDesc, on_type: RelationType) {
// We first create a "view" named `id` that ensures that the
// data are correctly arranged and available for export.
self.insert_plan(
id,
OptimizedMirRelationExpr::declare_optimized(MirRelationExpr::ArrangeBy {
input: Box::new(MirRelationExpr::global_get(
description.on_id,
on_type.clone(),
)),
keys: vec![description.key.clone()],
}),
);
self.index_exports.insert(id, (description, on_type));
}
/// Exports as `id` a sink described by `description`.
pub fn export_sink(&mut self, id: GlobalId, description: ComputeSinkDesc<(), T>) {
self.sink_exports.insert(id, description);
}
/// Returns true iff `id` is already imported.
pub fn is_imported(&self, id: &GlobalId) -> bool {
self.objects_to_build.iter().any(|bd| &bd.id == id)
|| self.source_imports.keys().any(|i| i == id)
}
/// Assigns the `as_of` frontier to the supplied argument.
///
/// This method allows the dataflow to indicate a frontier up through
/// which all times should be advanced. This can be done for at least
/// two reasons: 1. correctness and 2. performance.
///
/// Correctness may require an `as_of` to ensure that historical detail
/// is consolidated at representative times that do not present specific
/// detail that is not specifically correct. For example, updates may be
/// compacted to times that are no longer the source times, but instead
/// some byproduct of when compaction was executed; we should not present
/// those specific times as meaningfully different from other equivalent
/// times.
///
/// Performance may benefit from an aggressive `as_of` as it reduces the
/// number of distinct moments at which collections vary. Differential
/// dataflow will refresh its outputs at each time its inputs change and
/// to moderate that we can minimize the volume of distinct input times
/// as much as possible.
///
/// Generally, one should consider setting `as_of` at least to the `since`
/// frontiers of contributing data sources and as aggressively as the
/// computation permits.
pub fn set_as_of(&mut self, as_of: Antichain<T>) {
self.as_of = Some(as_of);
}
/// The number of columns associated with an identifier in the dataflow.
pub fn arity_of(&self, id: &GlobalId) -> usize
|
/// Calls r and s on any sub-members of those types in self. Halts at the first error return.
pub fn visit_children<R, S, E>(&mut self, r: R, s: S) -> Result<(), E>
where
R: Fn(&mut OptimizedMirRelationExpr) -> Result<(), E>,
S: Fn(&mut MirScalarExpr) -> Result<(), E>,
{
for BuildDesc { plan, .. } in &mut self.objects_to_build {
r(plan)?;
}
for (source_instance_desc, _) in self.source_imports.values_mut() {
let Some(mfp) = source_instance_desc.arguments.operators.as_mut() else {
continue;
};
for expr in mfp.expressions.iter_mut() {
s(expr)?;
}
for (_, expr) in mfp.predicates.iter_mut() {
s(expr)?;
}
}
Ok(())
}
}
impl<P, S, T> DataflowDescription<P, S, T>
where
P: CollectionPlan,
{
/// Identifiers of exported objects (indexes and sinks).
pub fn export_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
self.index_exports
.keys()
.chain(self.sink_exports.keys())
.cloned()
}
/// Identifiers of exported subscribe sinks.
pub fn subscribe_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
self.sink_exports
.iter()
.filter_map(|(id, desc)| match desc.connection {
ComputeSinkConnection::Subscribe(_) => Some(*id),
_ => None,
})
}
/// Returns the description of the object to build with the specified
/// identifier.
///
/// # Panics
///
/// Panics if `id` is not present in `objects_to_build` exactly once.
pub fn build_desc(&self, id: GlobalId) -> &BuildDesc<P> {
let mut builds = self.objects_to_build.iter().filter(|build| build.id == id);
let build = builds
.next()
.unwrap_or_else(|| panic!("object to build id {id} unexpectedly missing"));
assert!(builds.next().is_none());
build
}
/// Computes the set of identifiers upon which the specified collection
/// identifier depends.
///
/// `collection_id` must specify a valid object in `objects_to_build`.
///
/// This method includes identifiers for e.g. intermediate views, and should be filtered
/// if one only wants sources and indexes.
///
/// This method is safe for mutually recursive view definitions.
pub fn depends_on(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
let mut out = BTreeSet::new();
self.depends_on_into(collection_id, &mut out);
out
}
/// Like `depends_on`, but appends to an existing `BTreeSet`.
pub fn depends_on_into(&self, collection_id: GlobalId, out: &mut BTreeSet<GlobalId>) {
out.insert(collection_id);
if self.source_imports.contains_key(&collection_id) {
// The collection is provided by an imported source. Report the
// dependency on the source.
out.insert(collection_id);
return;
}
// NOTE(benesch): we're not smart enough here to know *which* index
// for the collection will be used, if one exists, so we have to report
// the dependency on all of them.
let mut found_index = false;
for (index_id, IndexImport { desc, .. }) in &self.index_imports {
if desc.on_id == collection_id {
// The collection is provided by an imported index. Report the
// dependency on the index.
out.insert(*index_id);
found_index = true;
}
}
if found_index {
return;
}
// The collection is not provided by a source or imported index.
// It must be a collection whose plan we have handy. Recurse.
let build = self.build_desc(collection_id);
for id in build.plan.depends_on() {
if !out.contains(&id) {
self.depends_on_into(id, out)
}
}
}
/// Computes the set of imports upon which the specified collection depends.
///
/// This method behaves like `depends_on` but filters out internal dependencies that are not
/// included in the dataflow imports.
pub fn depends_on_imports(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
let is_import = |id: &GlobalId| {
self.source_imports.contains_key(id) || self.index_imports.contains_key(id)
};
let deps = self.depends_on(collection_id);
deps.into_iter().filter(is_import).collect()
}
}
impl<P: PartialEq, S: PartialEq, T: timely::PartialOrder> DataflowDescription<P, S, T> {
/// Determine if a dataflow description is compatible with
|
{
for (source_id, (source, _monotonic)) in self.source_imports.iter() {
if source_id == id {
return source.typ.arity();
}
}
for IndexImport { desc, typ, .. } in self.index_imports.values() {
if &desc.on_id == id {
return typ.arity();
}
}
for desc in self.objects_to_build.iter() {
if &desc.id == id {
return desc.plan.arity();
}
}
panic!("GlobalId {} not found in DataflowDesc", id);
}
|
identifier_body
|
dataflows.rs
|
{
self.index_imports.insert(
id,
IndexImport {
desc,
typ,
monotonic,
usage_types: None,
},
);
}
/// Imports a source and makes it available as `id`.
pub fn import_source(&mut self, id: GlobalId, typ: RelationType, monotonic: bool) {
// Import the source with no linear operators applied to it.
// They may be populated by whole-dataflow optimization.
self.source_imports.insert(
id,
(
SourceInstanceDesc {
storage_metadata: (),
arguments: SourceInstanceArguments { operators: None },
typ,
},
monotonic,
),
);
}
/// Binds to `id` the relation expression `plan`.
pub fn insert_plan(&mut self, id: GlobalId, plan: OptimizedMirRelationExpr) {
self.objects_to_build.push(BuildDesc { id, plan });
}
/// Exports as `id` an index described by `description`.
///
/// Future uses of `import_index` in other dataflow descriptions may use `id`,
/// as long as this dataflow has not been terminated in the meantime.
pub fn export_index(&mut self, id: GlobalId, description: IndexDesc, on_type: RelationType) {
// We first create a "view" named `id` that ensures that the
// data are correctly arranged and available for export.
self.insert_plan(
id,
OptimizedMirRelationExpr::declare_optimized(MirRelationExpr::ArrangeBy {
input: Box::new(MirRelationExpr::global_get(
description.on_id,
on_type.clone(),
)),
keys: vec![description.key.clone()],
}),
);
self.index_exports.insert(id, (description, on_type));
}
/// Exports as `id` a sink described by `description`.
pub fn export_sink(&mut self, id: GlobalId, description: ComputeSinkDesc<(), T>) {
self.sink_exports.insert(id, description);
}
|
/// Returns true iff `id` is already imported.
pub fn is_imported(&self, id: &GlobalId) -> bool {
self.objects_to_build.iter().any(|bd| &bd.id == id)
|| self.source_imports.keys().any(|i| i == id)
}
/// Assigns the `as_of` frontier to the supplied argument.
///
/// This method allows the dataflow to indicate a frontier up through
/// which all times should be advanced. This can be done for at least
/// two reasons: 1. correctness and 2. performance.
///
/// Correctness may require an `as_of` to ensure that historical detail
/// is consolidated at representative times that do not present specific
/// detail that is not specifically correct. For example, updates may be
/// compacted to times that are no longer the source times, but instead
/// some byproduct of when compaction was executed; we should not present
/// those specific times as meaningfully different from other equivalent
/// times.
///
/// Performance may benefit from an aggressive `as_of` as it reduces the
/// number of distinct moments at which collections vary. Differential
/// dataflow will refresh its outputs at each time its inputs change and
/// to moderate that we can minimize the volume of distinct input times
/// as much as possible.
///
/// Generally, one should consider setting `as_of` at least to the `since`
/// frontiers of contributing data sources and as aggressively as the
/// computation permits.
pub fn set_as_of(&mut self, as_of: Antichain<T>) {
self.as_of = Some(as_of);
}
/// The number of columns associated with an identifier in the dataflow.
pub fn arity_of(&self, id: &GlobalId) -> usize {
for (source_id, (source, _monotonic)) in self.source_imports.iter() {
if source_id == id {
return source.typ.arity();
}
}
for IndexImport { desc, typ, .. } in self.index_imports.values() {
if &desc.on_id == id {
return typ.arity();
}
}
for desc in self.objects_to_build.iter() {
if &desc.id == id {
return desc.plan.arity();
}
}
panic!("GlobalId {} not found in DataflowDesc", id);
}
/// Calls r and s on any sub-members of those types in self. Halts at the first error return.
pub fn visit_children<R, S, E>(&mut self, r: R, s: S) -> Result<(), E>
where
R: Fn(&mut OptimizedMirRelationExpr) -> Result<(), E>,
S: Fn(&mut MirScalarExpr) -> Result<(), E>,
{
for BuildDesc { plan, .. } in &mut self.objects_to_build {
r(plan)?;
}
for (source_instance_desc, _) in self.source_imports.values_mut() {
let Some(mfp) = source_instance_desc.arguments.operators.as_mut() else {
continue;
};
for expr in mfp.expressions.iter_mut() {
s(expr)?;
}
for (_, expr) in mfp.predicates.iter_mut() {
s(expr)?;
}
}
Ok(())
}
}
impl<P, S, T> DataflowDescription<P, S, T>
where
P: CollectionPlan,
{
/// Identifiers of exported objects (indexes and sinks).
pub fn export_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
self.index_exports
.keys()
.chain(self.sink_exports.keys())
.cloned()
}
/// Identifiers of exported subscribe sinks.
pub fn subscribe_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
self.sink_exports
.iter()
.filter_map(|(id, desc)| match desc.connection {
ComputeSinkConnection::Subscribe(_) => Some(*id),
_ => None,
})
}
/// Returns the description of the object to build with the specified
/// identifier.
///
/// # Panics
///
/// Panics if `id` is not present in `objects_to_build` exactly once.
pub fn build_desc(&self, id: GlobalId) -> &BuildDesc<P> {
let mut builds = self.objects_to_build.iter().filter(|build| build.id == id);
let build = builds
.next()
.unwrap_or_else(|| panic!("object to build id {id} unexpectedly missing"));
assert!(builds.next().is_none());
build
}
/// Computes the set of identifiers upon which the specified collection
/// identifier depends.
///
/// `collection_id` must specify a valid object in `objects_to_build`.
///
/// This method includes identifiers for e.g. intermediate views, and should be filtered
/// if one only wants sources and indexes.
///
/// This method is safe for mutually recursive view definitions.
pub fn depends_on(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
let mut out = BTreeSet::new();
self.depends_on_into(collection_id, &mut out);
out
}
/// Like `depends_on`, but appends to an existing `BTreeSet`.
pub fn depends_on_into(&self, collection_id: GlobalId, out: &mut BTreeSet<GlobalId>) {
out.insert(collection_id);
if self.source_imports.contains_key(&collection_id) {
// The collection is provided by an imported source. Report the
// dependency on the source.
out.insert(collection_id);
return;
}
// NOTE(benesch): we're not smart enough here to know *which* index
// for the collection will be used, if one exists, so we have to report
// the dependency on all of them.
let mut found_index = false;
for (index_id, IndexImport { desc, .. }) in &self.index_imports {
if desc.on_id == collection_id {
// The collection is provided by an imported index. Report the
// dependency on the index.
out.insert(*index_id);
found_index = true;
}
}
if found_index {
return;
}
// The collection is not provided by a source or imported index.
// It must be a collection whose plan we have handy. Recurse.
let build = self.build_desc(collection_id);
for id in build.plan.depends_on() {
if !out.contains(&id) {
self.depends_on_into(id, out)
}
}
}
/// Computes the set of imports upon which the specified collection depends.
///
/// This method behaves like `depends_on` but filters out internal dependencies that are not
/// included in the dataflow imports.
pub fn depends_on_imports(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
let is_import = |id: &GlobalId| {
self.source_imports.contains_key(id) || self.index_imports.contains_key(id)
};
let deps = self.depends_on(collection_id);
deps.into_iter().filter(is_import).collect()
}
}
impl<P: PartialEq, S: PartialEq, T: timely::PartialOrder> DataflowDescription<P, S, T> {
/// Determine if a dataflow description is compatible with this
|
random_line_split
|
|
rpc.rs
|
>,
stop_flag: Arc<Notify>,
) -> crate::Result<(
(WorkerId, WorkerConfiguration),
impl Future<Output = crate::Result<()>>,
)> {
let (_listener, port) = start_listener().await?;
configuration.listen_address = format!("{}:{}", configuration.hostname, port);
let ConnectionDescriptor {
mut sender,
mut receiver,
mut opener,
mut sealer,
..
} = connect_to_server_and_authenticate(scheduler_addresses, &secret_key).await?;
{
let message = ConnectionRegistration::Worker(RegisterWorker {
configuration: configuration.clone(),
});
let data = serialize(&message)?.into();
sender.send(seal_message(&mut sealer, data)).await?;
}
let (queue_sender, queue_receiver) = tokio::sync::mpsc::unbounded_channel::<Bytes>();
let heartbeat_interval = configuration.heartbeat_interval;
let overview_configuration = configuration.overview_configuration.clone();
let time_limit = configuration.time_limit;
let (worker_id, state, start_task_notify) = {
match timeout(Duration::from_secs(15), receiver.next()).await {
Ok(Some(data)) => {
let WorkerRegistrationResponse {
worker_id,
other_workers,
resource_names,
server_idle_timeout,
server_uid,
} = open_message(&mut opener, &data?)?;
sync_worker_configuration(&mut configuration, server_idle_timeout);
let start_task_notify = Rc::new(Notify::new());
let comm = WorkerComm::new(queue_sender, start_task_notify.clone());
let state_ref = WorkerStateRef::new(
comm,
worker_id,
configuration.clone(),
secret_key,
ResourceMap::from_vec(resource_names),
launcher_setup,
server_uid,
);
{
let mut state = state_ref.get_mut();
for worker_info in other_workers {
state.new_worker(worker_info);
}
}
(worker_id, state_ref, start_task_notify)
}
Ok(None) => panic!("Connection closed without receiving registration response"),
Err(_) => panic!("Did not receive worker registration response"),
}
};
let heartbeat_fut = heartbeat_process(heartbeat_interval, state.clone());
let idle_timeout_fut = match configuration.idle_timeout {
Some(timeout) => Either::Left(idle_timeout_process(timeout, state.clone())),
None => Either::Right(futures::future::pending()),
};
let overview_fut = match overview_configuration {
None => Either::Left(futures::future::pending()),
Some(configuration) => Either::Right(send_overview_loop(state.clone(), configuration)),
};
let time_limit_fut = match time_limit {
None => Either::Left(futures::future::pending::<()>()),
Some(d) => Either::Right(tokio::time::sleep(d)),
};
let future = async move {
let try_start_tasks = task_starter_process(state.clone(), start_task_notify);
let send_loop = forward_queue_to_sealed_sink(queue_receiver, sender, sealer);
tokio::pin! {
let send_loop = send_loop;
let try_start_tasks = try_start_tasks;
}
let result: crate::Result<Option<FromWorkerMessage>> = tokio::select! {
r = worker_message_loop(state.clone(), receiver, opener) => {
log::debug!("Server read connection has disconnected");
r.map(|_| None)
}
r = &mut send_loop => {
log::debug!("Server write connection has disconnected");
r.map_err(|e| e.into()).map(|_| None)
},
_ = time_limit_fut => {
log::info!("Time limit reached");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::TimeLimitReached)))
}
_ = idle_timeout_fut => {
log::info!("Idle timeout reached");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::IdleTimeout)))
}
_ = stop_flag.notified() => {
log::info!("Worker received an external stop notification");
Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::Interrupted)))
}
_ = &mut try_start_tasks => { unreachable!() }
_ = heartbeat_fut => { unreachable!() }
_ = overview_fut => { unreachable!() }
};
// Handle sending stop info to the server and finishing running tasks gracefully.
let result = match result {
Ok(Some(msg)) => {
// Worker wants to end gracefully, send message to the server
{
state.get_mut().comm().send_message_to_server(msg);
state.get_mut().comm().drop_sender();
}
send_loop.await?;
Ok(())
}
Ok(None) => {
// Graceful shutdown from server
Ok(())
}
Err(e) => {
// Server has disconnected
tokio::select! {
_ = &mut try_start_tasks => { unreachable!() }
r = finish_tasks_on_server_lost(state.clone()) => r
}
Err(e)
}
};
// At this point, there can still be some tasks that are running.
// We cancel them here to make sure that we do not leak their spawned processes, if possible.
// The futures of the tasks are scheduled onto the current tokio Runtime using spawn_local,
// therefore we do not need to await any specific future to drive them forward.
// try_start_tasks is not being polled, therefore no new tasks should be started.
cancel_running_tasks_on_worker_end(state).await;
result
};
// Provide a local task set for spawning futures
let future = async move {
let set = tokio::task::LocalSet::new();
set.run_until(future).await
};
Ok(((worker_id, configuration), future))
}
async fn finish_tasks_on_server_lost(state: WorkerStateRef) {
let on_server_lost = state.get().configuration.on_server_lost.clone();
match on_server_lost {
ServerLostPolicy::Stop => {}
ServerLostPolicy::FinishRunning => {
let notify = {
let mut state = state.get_mut();
state.drop_non_running_tasks();
if !state.is_empty() {
let notify = Rc::new(Notify::new());
state.comm().set_idle_worker_notify(notify.clone());
Some(notify)
} else {
None
}
};
if let Some(notify) = notify {
log::info!("Waiting for finishing running tasks");
notify.notified().await;
log::info!("All running tasks were finished");
} else {
log::info!("No running tasks remain")
}
}
}
}
async fn cancel_running_tasks_on_worker_end(state: WorkerStateRef) {
let notify = {
let mut state = state.get_mut();
state.drop_non_running_tasks();
for task in state.running_tasks.clone() {
state.cancel_task(task);
}
if state.running_tasks.is_empty() {
return;
}
let notify = Rc::new(Notify::new());
state.comm().set_idle_worker_notify(notify.clone());
notify
};
log::info!("Waiting for stopping running tasks");
match tokio::time::timeout(MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN, notify.notified()).await {
Ok(_) => {
log::info!("All running tasks were stopped");
}
Err(_) => {
log::info!("Timed out while waiting for running tasks to stop");
}
}
}
/// Tries to start tasks after a new task appears or some task finishes.
async fn task_starter_process(state_ref: WrappedRcRefCell<WorkerState>, notify: Rc<Notify>) {
loop {
notify.notified().await;
let mut state = state_ref.get_mut();
state.start_task_scheduled = false;
let remaining_time = if let Some(limit) = state.configuration.time_limit {
let life_time = std::time::Instant::now() - state.start_time;
if life_time >= limit {
log::debug!("Trying to start a task after time limit");
break;
}
Some(limit - life_time)
} else {
None
};
loop {
let (task_map, ready_task_queue) = state.borrow_tasks_and_queue();
let allocations = ready_task_queue.try_start_tasks(task_map, remaining_time);
if allocations.is_empty() {
break;
}
for (task_id, allocation, resource_index) in allocations {
run_task(&mut state, &state_ref, task_id, allocation, resource_index);
}
}
}
}
/// Repeatedly sends a heartbeat message to the server.
async fn heartbeat_process(heartbeat_interval: Duration, state_ref: WrappedRcRefCell<WorkerState>) {
let mut interval = tokio::time::interval(heartbeat_interval);
loop {
interval.tick().await;
state_ref
.get_mut()
.comm()
.send_message_to_server(FromWorkerMessage::Heartbeat);
log::debug!("Heartbeat sent");
}
}
/// Runs until an idle timeout happens.
/// Idle timeout occurs when the worker doesn't have anything to do for the specified duration.
async fn idle_timeout_process(idle_timeout: Duration, state_ref: WrappedRcRefCell<WorkerState>)
|
{
let mut interval = tokio::time::interval(Duration::from_secs(1));
loop {
interval.tick().await;
let state = state_ref.get();
if !state.has_tasks() && !state.reservation {
let elapsed = state.last_task_finish_time.elapsed();
if elapsed > idle_timeout {
break;
}
}
}
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.