file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs
|
, then broadcast addresses should use the
/// `Broadcast` variant.
Broadcast,
}
impl FrameDestination {
/// Is this `FrameDestination::Multicast`?
pub(crate) fn is_multicast(self) -> bool {
self == FrameDestination::Multicast
}
/// Is this `FrameDestination::Broadcast`?
pub(crate) fn is_broadcast(self) -> bool {
self == FrameDestination::Broadcast
}
}
/// Builder for a [`DeviceLayerState`].
#[derive(Clone)]
pub struct DeviceStateBuilder {
/// Default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`].
default_ndp_configs: ndp::NdpConfigurations,
}
impl Default for DeviceStateBuilder {
fn default() -> Self {
Self { default_ndp_configs: ndp::NdpConfigurations::default() }
}
}
impl DeviceStateBuilder {
/// Set the default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`] for more details.
pub fn set_default_ndp_configs(&mut self, v: ndp::NdpConfigurations) {
self.default_ndp_configs = v;
}
/// Build the [`DeviceLayerState`].
pub(crate) fn build(self) -> DeviceLayerState {
DeviceLayerState { ethernet: IdMap::new(), default_ndp_configs: self.default_ndp_configs }
}
}
/// The state associated with the device layer.
pub(crate) struct DeviceLayerState {
ethernet: IdMap<DeviceState<EthernetDeviceState>>,
default_ndp_configs: ndp::NdpConfigurations,
}
impl DeviceLayerState {
/// Add a new ethernet device to the device layer.
///
/// `add` adds a new `EthernetDeviceState` with the given MAC address and
/// MTU. The MTU will be taken as a limit on the size of Ethernet payloads -
/// the Ethernet header is not counted towards the MTU.
pub(crate) fn add_ethernet_device(&mut self, mac: Mac, mtu: u32) -> DeviceId {
let mut builder = EthernetDeviceStateBuilder::new(mac, mtu);
builder.set_ndp_configs(self.default_ndp_configs.clone());
let mut ethernet_state = DeviceState::new(builder.build());
let id = self.ethernet.push(ethernet_state);
debug!("adding Ethernet device with ID {} and MTU {}", id, mtu);
DeviceId::new_ethernet(id)
}
// TODO(rheacock, NET-2140): Add ability to remove inactive devices
}
/// Common state across devices.
#[derive(Default)]
pub(crate) struct CommonDeviceState {
/// Is the device initialized?
is_initialized: bool,
}
/// Device state.
///
/// `D` is the device-specific state.
pub(crate) struct DeviceState<D> {
/// Device-independant state.
common: CommonDeviceState,
/// Device-specific state.
device: D,
}
impl<D> DeviceState<D> {
/// Create a new `DeviceState` with a device-specific state `device`.
pub(crate) fn new(device: D) -> Self {
Self { common: CommonDeviceState::default(), device }
}
/// Get a reference to the common (device-independant) state.
pub(crate) fn common(&self) -> &CommonDeviceState {
&self.common
}
/// Get a mutable reference to the common (device-independant) state.
pub(crate) fn common_mut(&mut self) -> &mut CommonDeviceState {
&mut self.common
}
/// Get a reference to the inner (device-specific) state.
pub(crate) fn device(&self) -> &D {
&self.device
}
/// Get a mutable reference to the inner (device-specific) state.
pub(crate) fn device_mut(&mut self) -> &mut D {
&mut self.device
}
}
/// The identifier for timer events in the device layer.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
pub(crate) enum DeviceLayerTimerId {
/// A timer event in the ARP layer with a protocol type of IPv4
ArpIpv4(arp::ArpTimerId<usize, Ipv4Addr>),
Ndp(ndp::NdpTimerId),
}
impl From<arp::ArpTimerId<usize, Ipv4Addr>> for DeviceLayerTimerId {
fn from(id: arp::ArpTimerId<usize, Ipv4Addr>) -> DeviceLayerTimerId {
DeviceLayerTimerId::ArpIpv4(id)
}
}
/// Handle a timer event firing in the device layer.
pub(crate) fn handle_timeout<D: EventDispatcher>(ctx: &mut Context<D>, id: DeviceLayerTimerId) {
match id {
DeviceLayerTimerId::ArpIpv4(inner_id) => arp::handle_timer(ctx, inner_id),
DeviceLayerTimerId::Ndp(inner_id) => ndp::handle_timeout(ctx, inner_id),
}
}
/// An event dispatcher for the device layer.
///
/// See the `EventDispatcher` trait in the crate root for more details.
pub trait DeviceLayerEventDispatcher<B: BufferMut> {
/// Send a frame to a device driver.
///
/// If there was an MTU error while attempting to serialize the frame, the
/// original serializer is returned in the `Err` variant. All other errors
/// (for example, errors in allocating a buffer) are silently ignored and
/// reported as success.
///
/// Note, until `device` has been initialized, the netstack promises to not
/// send any outbound traffic to it. See [`initialize_device`] for more
/// information.
fn send_frame<S: Serializer<Buffer = B>>(
&mut self,
device: DeviceId,
frame: S,
) -> Result<(), S>;
}
/// Is `device` initialized?
pub(crate) fn is_device_initialized<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> bool {
get_common_device_state(state, device).is_initialized
}
/// Initialize a device.
///
/// `initialize_device` will start soliciting IPv6 routers on the link if `device` is configured to
/// be a host.
///
/// `initialize_device` MUST be called after adding the device to the netstack. A device MUST NOT
/// be used until it has been initialized.
///
/// This initialize step is kept separated from the device creation/allocation step so that
/// implementations have a chance to do some work (such as updating implementation specific IDs or
/// state, configure the device or driver, etc.) before the device is actually initialized and used
/// by this netstack.
///
/// See [`StackState::add_ethernet_device`] for information about adding ethernet devices.
///
/// # Panics
///
/// Panics if `device` is already initialized.
pub fn initialize_device<D: EventDispatcher>(ctx: &mut Context<D>, device: DeviceId) {
let state = get_common_device_state_mut(ctx.state_mut(), device);
// `device` must not already be initialized.
assert!(!state.is_initialized);
state.is_initialized = true;
// RFC 4861 section 6.3.7, it implies only a host sends router
// solicitation messages, so if this node is a router, do nothing.
if crate::ip::is_router::<_, Ipv6>(ctx) {
trace!("intialize_device: node is a router so not starting router solicitations");
return;
}
match device.protocol {
DeviceProtocol::Ethernet => {
ndp::start_soliciting_routers::<_, ethernet::EthernetNdpDevice>(ctx, device.id)
}
}
}
/// Send an IP packet in a device layer frame.
///
/// `send_ip_frame` accepts a device ID, a local IP address, and a
/// `SerializationRequest`. It computes the routing information and serializes
/// the request in a new device layer frame and sends it.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn send_ip_frame<B: BufferMut, D: BufferDispatcher<B>, A, S>(
ctx: &mut Context<D>,
device: DeviceId,
local_addr: A,
body: S,
) -> Result<(), S>
where
A: IpAddress,
S: Serializer<Buffer = B>,
{
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::send_ip_frame(ctx, device.id, local_addr, body),
}
}
/// Receive a device layer frame from the network.
///
|
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn receive_frame<B: BufferMut, D: BufferDispatcher<B>>(
ctx: &mut Context<D>,
device: DeviceId,
buffer: B,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::receive_frame(ctx, device.id, buffer),
}
}
/// Get the IP address and subnet associated with this device.
///
/// Note, tentative IP addresses (addresses which are not yet fully bound to a
/// device) will not returned by `get_ip_addr_subnet`.
pub fn get_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx
|
random_line_split
|
|
mod.rs
|
.protocol {
DeviceProtocol::Ethernet => {
ndp::start_soliciting_routers::<_, ethernet::EthernetNdpDevice>(ctx, device.id)
}
}
}
/// Send an IP packet in a device layer frame.
///
/// `send_ip_frame` accepts a device ID, a local IP address, and a
/// `SerializationRequest`. It computes the routing information and serializes
/// the request in a new device layer frame and sends it.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn send_ip_frame<B: BufferMut, D: BufferDispatcher<B>, A, S>(
ctx: &mut Context<D>,
device: DeviceId,
local_addr: A,
body: S,
) -> Result<(), S>
where
A: IpAddress,
S: Serializer<Buffer = B>,
{
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::send_ip_frame(ctx, device.id, local_addr, body),
}
}
/// Receive a device layer frame from the network.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn receive_frame<B: BufferMut, D: BufferDispatcher<B>>(
ctx: &mut Context<D>,
device: DeviceId,
buffer: B,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::receive_frame(ctx, device.id, buffer),
}
}
/// Get the IP address and subnet associated with this device.
///
/// Note, tentative IP addresses (addresses which are not yet fully bound to a
/// device) will not returned by `get_ip_addr_subnet`.
pub fn get_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<AddrSubnet<A>> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ip_addr_subnet(ctx, device.id),
}
}
/// Get the IP address and subnet associated with this device, including tentative
/// address.
pub fn get_ip_addr_subnet_with_tentative<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<Tentative<AddrSubnet<A>>> {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::get_ip_addr_subnet_with_tentative(ctx, device.id)
}
}
}
/// Set the IP address and subnet associated with this device.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn set_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
addr_sub: AddrSubnet<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("set_ip_addr_subnet: setting addr {:?} for device {:?}", addr_sub, device);
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::set_ip_addr_subnet(ctx, device.id, addr_sub),
}
}
/// Add `device` to a multicast group `multicast_addr`.
///
/// If `device` is already in the multicast group `multicast_addr`,
/// `join_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn join_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} joining multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::join_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Remove `device` from a multicast group `multicast_addr`.
///
/// If `device` is not in the multicast group `multicast_addr`,
/// `leave_ip_multicast` does nothing.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn leave_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &mut Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
trace!("join_ip_multicast: device {:?} leaving multicast {:?}", device, multicast_addr);
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::leave_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Is `device` part of the IP multicast group `multicast_addr`.
pub(crate) fn is_in_ip_multicast<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
multicast_addr: MulticastAddr<A>,
) -> bool {
match device.protocol {
DeviceProtocol::Ethernet => {
self::ethernet::is_in_ip_multicast(ctx, device.id, multicast_addr)
}
}
}
/// Get the MTU associated with this device.
pub(crate) fn get_mtu<D: EventDispatcher>(state: &StackState<D>, device: DeviceId) -> u32 {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_mtu(state, device.id),
}
}
/// Gets the IPv6 link-local address associated with this device.
// TODO(brunodalbo) when our device model allows for multiple IPs we can have
// a single function go get all the IP addresses associated with a device, which
// would be cleaner and remove the need for this function.
pub fn get_ipv6_link_local_addr<D: EventDispatcher>(
ctx: &Context<D>,
device: DeviceId,
) -> LinkLocalAddr<Ipv6Addr> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ipv6_link_local_addr(ctx, device.id),
}
}
/// Determine if an IP Address is considered tentative on a device.
///
/// Returns `true` if the address is tentative on a device; `false` otherwise.
/// Note, if the `addr` is not assigned to `device` but is considered tentative
/// on another device, `is_addr_tentative_on_device` will return `false`.
pub(crate) fn is_addr_tentative_on_device<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
addr: A,
device: DeviceId,
) -> bool {
get_ip_addr_subnet_with_tentative::<_, A>(ctx, device)
.map(|x| (x.inner().addr() == addr) && x.is_tentative())
.unwrap_or(false)
}
/// Get a reference to the common device state for a `device`.
fn get_common_device_state<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> &CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common(),
}
}
/// Get a mutable reference to the common device state for a `device`.
fn get_common_device_state_mut<D: EventDispatcher>(
state: &mut StackState<D>,
device: DeviceId,
) -> &mut CommonDeviceState {
match device.protocol {
DeviceProtocol::Ethernet => state
.device
.ethernet
.get_mut(device.id)
.unwrap_or_else(|| panic!("no such Ethernet device: {}", device.id))
.common_mut(),
}
}
/// An address that may be "tentative" in that it has not yet passed
/// duplicate address detection (DAD).
///
/// A tentative address is one for which DAD is currently being performed.
/// An address is only considered assigned to an interface once DAD has
/// completed without detecting any duplicates. See [RFC 4862] for more details.
///
/// [RFC 4862]: https://tools.ietf.org/html/rfc4862
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Tentative<T>(T, bool);
impl<T> Tentative<T> {
/// Create a new address that is marked as tentative.
pub(crate) fn new_tentative(t: T) -> Self {
Self(t, true)
}
/// Create a new address that is marked as permanent/assigned.
pub(crate) fn new_permanent(t: T) -> Self {
Self(t, false)
}
/// Returns whether the value is tentative.
pub(crate) fn is_tentative(&self) -> bool {
self.1
}
/// Gets the value that is stored inside.
pub(crate) fn into_inner(self) -> T {
self.0
}
/// Converts a `Tentative<T>` into a `Option<T>` in the way that
/// a tentative value corresponds to a `None`.
pub(crate) fn try_into_permanent(self) -> Option<T>
|
{
if self.is_tentative() {
None
} else {
Some(self.into_inner())
}
}
|
identifier_body
|
|
mod.rs
|
) -> bool {
self == FrameDestination::Broadcast
}
}
/// Builder for a [`DeviceLayerState`].
#[derive(Clone)]
pub struct DeviceStateBuilder {
/// Default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`].
default_ndp_configs: ndp::NdpConfigurations,
}
impl Default for DeviceStateBuilder {
fn default() -> Self {
Self { default_ndp_configs: ndp::NdpConfigurations::default() }
}
}
impl DeviceStateBuilder {
/// Set the default values for NDP's configurations for new interfaces.
///
/// See [`ndp::NdpConfigurations`] for more details.
pub fn set_default_ndp_configs(&mut self, v: ndp::NdpConfigurations) {
self.default_ndp_configs = v;
}
/// Build the [`DeviceLayerState`].
pub(crate) fn build(self) -> DeviceLayerState {
DeviceLayerState { ethernet: IdMap::new(), default_ndp_configs: self.default_ndp_configs }
}
}
/// The state associated with the device layer.
pub(crate) struct DeviceLayerState {
ethernet: IdMap<DeviceState<EthernetDeviceState>>,
default_ndp_configs: ndp::NdpConfigurations,
}
impl DeviceLayerState {
/// Add a new ethernet device to the device layer.
///
/// `add` adds a new `EthernetDeviceState` with the given MAC address and
/// MTU. The MTU will be taken as a limit on the size of Ethernet payloads -
/// the Ethernet header is not counted towards the MTU.
pub(crate) fn add_ethernet_device(&mut self, mac: Mac, mtu: u32) -> DeviceId {
let mut builder = EthernetDeviceStateBuilder::new(mac, mtu);
builder.set_ndp_configs(self.default_ndp_configs.clone());
let mut ethernet_state = DeviceState::new(builder.build());
let id = self.ethernet.push(ethernet_state);
debug!("adding Ethernet device with ID {} and MTU {}", id, mtu);
DeviceId::new_ethernet(id)
}
// TODO(rheacock, NET-2140): Add ability to remove inactive devices
}
/// Common state across devices.
#[derive(Default)]
pub(crate) struct CommonDeviceState {
/// Is the device initialized?
is_initialized: bool,
}
/// Device state.
///
/// `D` is the device-specific state.
pub(crate) struct DeviceState<D> {
/// Device-independant state.
common: CommonDeviceState,
/// Device-specific state.
device: D,
}
impl<D> DeviceState<D> {
/// Create a new `DeviceState` with a device-specific state `device`.
pub(crate) fn new(device: D) -> Self {
Self { common: CommonDeviceState::default(), device }
}
/// Get a reference to the common (device-independant) state.
pub(crate) fn common(&self) -> &CommonDeviceState {
&self.common
}
/// Get a mutable reference to the common (device-independant) state.
pub(crate) fn common_mut(&mut self) -> &mut CommonDeviceState {
&mut self.common
}
/// Get a reference to the inner (device-specific) state.
pub(crate) fn device(&self) -> &D {
&self.device
}
/// Get a mutable reference to the inner (device-specific) state.
pub(crate) fn device_mut(&mut self) -> &mut D {
&mut self.device
}
}
/// The identifier for timer events in the device layer.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
pub(crate) enum DeviceLayerTimerId {
/// A timer event in the ARP layer with a protocol type of IPv4
ArpIpv4(arp::ArpTimerId<usize, Ipv4Addr>),
Ndp(ndp::NdpTimerId),
}
impl From<arp::ArpTimerId<usize, Ipv4Addr>> for DeviceLayerTimerId {
fn from(id: arp::ArpTimerId<usize, Ipv4Addr>) -> DeviceLayerTimerId {
DeviceLayerTimerId::ArpIpv4(id)
}
}
/// Handle a timer event firing in the device layer.
pub(crate) fn handle_timeout<D: EventDispatcher>(ctx: &mut Context<D>, id: DeviceLayerTimerId) {
match id {
DeviceLayerTimerId::ArpIpv4(inner_id) => arp::handle_timer(ctx, inner_id),
DeviceLayerTimerId::Ndp(inner_id) => ndp::handle_timeout(ctx, inner_id),
}
}
/// An event dispatcher for the device layer.
///
/// See the `EventDispatcher` trait in the crate root for more details.
pub trait DeviceLayerEventDispatcher<B: BufferMut> {
/// Send a frame to a device driver.
///
/// If there was an MTU error while attempting to serialize the frame, the
/// original serializer is returned in the `Err` variant. All other errors
/// (for example, errors in allocating a buffer) are silently ignored and
/// reported as success.
///
/// Note, until `device` has been initialized, the netstack promises to not
/// send any outbound traffic to it. See [`initialize_device`] for more
/// information.
fn send_frame<S: Serializer<Buffer = B>>(
&mut self,
device: DeviceId,
frame: S,
) -> Result<(), S>;
}
/// Is `device` initialized?
pub(crate) fn is_device_initialized<D: EventDispatcher>(
state: &StackState<D>,
device: DeviceId,
) -> bool {
get_common_device_state(state, device).is_initialized
}
/// Initialize a device.
///
/// `initialize_device` will start soliciting IPv6 routers on the link if `device` is configured to
/// be a host.
///
/// `initialize_device` MUST be called after adding the device to the netstack. A device MUST NOT
/// be used until it has been initialized.
///
/// This initialize step is kept separated from the device creation/allocation step so that
/// implementations have a chance to do some work (such as updating implementation specific IDs or
/// state, configure the device or driver, etc.) before the device is actually initialized and used
/// by this netstack.
///
/// See [`StackState::add_ethernet_device`] for information about adding ethernet devices.
///
/// # Panics
///
/// Panics if `device` is already initialized.
pub fn initialize_device<D: EventDispatcher>(ctx: &mut Context<D>, device: DeviceId) {
let state = get_common_device_state_mut(ctx.state_mut(), device);
// `device` must not already be initialized.
assert!(!state.is_initialized);
state.is_initialized = true;
// RFC 4861 section 6.3.7, it implies only a host sends router
// solicitation messages, so if this node is a router, do nothing.
if crate::ip::is_router::<_, Ipv6>(ctx) {
trace!("intialize_device: node is a router so not starting router solicitations");
return;
}
match device.protocol {
DeviceProtocol::Ethernet => {
ndp::start_soliciting_routers::<_, ethernet::EthernetNdpDevice>(ctx, device.id)
}
}
}
/// Send an IP packet in a device layer frame.
///
/// `send_ip_frame` accepts a device ID, a local IP address, and a
/// `SerializationRequest`. It computes the routing information and serializes
/// the request in a new device layer frame and sends it.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub(crate) fn send_ip_frame<B: BufferMut, D: BufferDispatcher<B>, A, S>(
ctx: &mut Context<D>,
device: DeviceId,
local_addr: A,
body: S,
) -> Result<(), S>
where
A: IpAddress,
S: Serializer<Buffer = B>,
{
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::send_ip_frame(ctx, device.id, local_addr, body),
}
}
/// Receive a device layer frame from the network.
///
/// # Panics
///
/// Panics if `device` is not initialized.
pub fn receive_frame<B: BufferMut, D: BufferDispatcher<B>>(
ctx: &mut Context<D>,
device: DeviceId,
buffer: B,
) {
// `device` must be initialized.
assert!(is_device_initialized(ctx.state(), device));
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::receive_frame(ctx, device.id, buffer),
}
}
/// Get the IP address and subnet associated with this device.
///
/// Note, tentative IP addresses (addresses which are not yet fully bound to a
/// device) will not returned by `get_ip_addr_subnet`.
pub fn get_ip_addr_subnet<D: EventDispatcher, A: IpAddress>(
ctx: &Context<D>,
device: DeviceId,
) -> Option<AddrSubnet<A>> {
match device.protocol {
DeviceProtocol::Ethernet => self::ethernet::get_ip_addr_subnet(ctx, device.id),
}
}
/// Get the IP address and subnet associated with this device, including tentative
/// address.
pub fn
|
get_ip_addr_subnet_with_tentative
|
identifier_name
|
|
aula-principal.js
|
amy[0]); //mostra o que está na posição 0 desse array
//function
let corSite = "azul";
function resetaCor(cor, tonalidade){
corSite = cor + ' ' + tonalidade;
return corSite;
};
console.log(corSite);
resetaCor('verde', 'escuro');
console.log(corSite);
//incremento e decremento
console.log(idade++); // imprime 16, pois como o ++ tá depois da apresentação, ele não vai mostrar a idade atualizada
console.log(idade); //imprime 17, por causa da operação da linha anterior
console.log(++idade); //imprime 18, porque ele faz a operação antes de apresentar
idade -= 2;
//operadores de igualdade
//igualdade estrita (mais recomendado)
console.log(1 === 1); //imprime true, porque é o mesmo valor e o mesmo tipo
console.log('1' === 1) //imprime a false, porque apesar do valor ser o mesmo, eles são de tipos diferentes
//igualdade solta
console.log(1 == 1); //imprime true porque tem o mesmo valor
console.log('1' == 1); //imprime true porque continua tendo o mesmo valor
//operador ternário
let pontos = 100;
let tipo = pontos > 100 ? 'premium' : 'comum';
//let ex = (condicao) ? 'a' : 'b';
//ou seja, se a condicao estiver correta, a variavel ex vai receber 'a', se não vai receber 'b'
//operadores lógicos
//operador AND (&&)
let maiorDeIdade = true;
let possuiCarteira = false;
let podeAplicar = maiorDeIdade && possuiCarteira;
//se as duas variáveis comparadas forem true, ele retornará true, mas se alguma delas for false ele será false, nesse caso ele é true
console.log(podeAplicar); //vai imprimir false porque o possuiCarteira é false
//operador OR (||)
podeAplicar = maiorDeIdade || possuiCarteira;
//se pelo menos uma das variáveis comparadas forem true, ele já vai retornar true
console.log('candidato pode aplicar: ', podeAplicar); //vai imprimir true porque o maiorDeIdade é true
//Operador NOT (!)
let candidatoRecusado = !podeAplicar;
//se a variável podeAplicar for true, então candidatoRecusado receberá false, mas se podeAplicar for false o candidatoRecusado será true
console.log('candidato recusado: ', candidatoRecusado);//retorna false porque o candidato pode aplicar
//Condicionais: if... else e switch... case
numero = 263;
//Esse pequeno código verifica se a condição entre parênteses é verdadeira, caso seja ele executa o que tá no if, caso contrário ele executa o que está no "else"
if(numero % 2 == 0){ //nesse caso ele verifica se a variável numero é par
console.log('Par');
}
else if (numero == 0){//Se o laço não cair na condição anterior, ele faz essa verificação
console.log('Zero');
}
else{
console.log('ímpar');
}
//Laços de repetição: for, while, do... while, for... in e for... of
for (let i = 0; i < 5; i++){
//i = índice, executar enquanto a condição for verdadeira (i<5) e realiza o incremento
console.log(i);
}
let i = 0; //indice
while (i < 5){
console.log(i);
i++; //incremento
}
//A única diferença é que ele executa pelo menos uma vez antes de verificar o índice
i = 0;
do{
console.log(i);
i++;
}while(i < 5);
//For in
for(let chave in pessoa){
console.log(chave, pessoa[chave]);
}
//For of
for(let indice of camy){
console.log(indice);
}
//Factory Function: criar um objeto "padrão" mais facilmente através de um método
function criarCelular (marcaCel, bateriaCel, tamanhoTela){
return {
marcaCel,
bateriaCel,
tamanhoT
|
);
//Constructor function: mesmo objetivo que a factory function
function Celular (marcaCel, bateriaCel, tamanhoTela) {
this.marcaCel = marcaCel,
this.tamanhoTela = tamanhoTela,
this.bateriaCel = bateriaCel,
this.ligar = function() {
console.log('Fazendo ligação...');
}
}
//instanciando o objeto através do construtor
const cel2 = new Celular('Xiaomi Redmi', 70000, 6.0);
console.log(cel2);
//natureza dinâmica dos objetos
const mouse = {
cor: 'preto',
marca: 'dazz'
}
mouse.velocidade = 5000; //adiciona uma propriedade ao objeto já existente
mouse.trocarDPI = function() {
console.log('Trocando DPI...');
}
//deletando propriedades e funções do objeto existente:
delete mouse.trocarDPI;
delete mouse.velocidade;
console.log(mouse);
//clonando objetos
//nesse exemplo o novo objeto receberá o objeto já criado celular com a adição da propriedade temDigital
const objCopia = Object.assign({temDigital: true}, cel1);
console.log(objCopia);
//Math
console.log(Math.random()); // -> gera um número aleatório de 0 a 1
console.log(Math.random() * (10 - 1) + 1); // -> gera um número aleatório de 1 a 10
console.log(Math.max(1000, 10, 3, 2, 1, 10000)); // -> gera o maior número entre os listados como parâmetro
console.log(Math.min(1000, 10, 3, 2, 1, 10000)); // -> faz o contrário do que o max, gera o menor número listado
console.log(Math.pow(5,2)); // -> calcula o primeiro parâmetro elevado ao segundo parâmetro
//String: existe o tipo primitivo e o tipo objeto
let primitivo = 'Tipo primitivo';
let obj = new String('Tipo objeto');
console.log(primitivo.length); // -> mostra a quantidade de caracteres de uma string
console.log(primitivo[2]); // -> mostra o caracter que está no índice indicado, lembrando de que sempre começa no 0
console.log(primitivo.includes('Tipo')); // -> gera um true, porque na string indicada contém 'Tipo'
console.log(primitivo.includes('azul')); // -> gera um false, porque na string indicada contém 'azul'
console.log(primitivo.startsWith('Tipo')); // -> gera um true, porque a string indicada começa com 'Tipo'
console.log(primitivo.endsWith('primitivo')); // -> gera um true, porque a string indicada termina com 'primitivo'
console.log(primitivo.indexOf('v')); // -> gera o index em que 'v' está
console.log(primitivo.replace('Tipo', ''));// -> substitui uma parte da String, nesse caso ela vira só 'primitivo'
primitivo.trim(); //-> remove espaços desnecessários no início ou no final da string
console.log(obj.split(' ')); //-> separa a string a cada espaço que encontra, nesse caso ele mostra 'Tipo' e 'objeto' como duas coisas diferentes
//String com sequência de escape
let msg = 'Bom dia '+ nome +'. \nEssa é minha \'mensagem\'';
//String com template literal
let msgL = `Bom dia ${nome}.
Essa é minha 'mensagem'`
//Date
const dataAtual = new Date(); //vai preencher com as informações de data e hora atuais
const data1 = new Date('March 06 2019 09:30'); //data e hora deifinidos na declaração da constante
const data2 = new Date(2019, 02, 06, 09, 30, 05); //data e hora definidos através dos números na ordem: ano, mês, dia, hora, minuto, segundo e ms
data2.getFullYear(); //retorna o ano
data2.setFullYear(2021); //muda o ano dessa data
//Formas de converter para String:
data1.toDateString(); //retorna só a data em
|
ela,
ligar(){
console.log('Fazendo ligação...');
}
}
}
const cel1 = criarCelular('Samsung A10', 5000, 5.5);
console.log(cel1
|
identifier_body
|
aula-principal.js
|
amy[0]); //mostra o que está na posição 0 desse array
//function
let corSite = "azul";
function resetaCor(cor, tonalidade){
corSite = cor + ' ' + tonalidade;
return corSite;
};
console.log(corSite);
resetaCor('verde', 'escuro');
console.log(corSite);
//incremento e decremento
console.log(idade++); // imprime 16, pois como o ++ tá depois da apresentação, ele não vai mostrar a idade atualizada
console.log(idade); //imprime 17, por causa da operação da linha anterior
console.log(++idade); //imprime 18, porque ele faz a operação antes de apresentar
idade -= 2;
//operadores de igualdade
//igualdade estrita (mais recomendado)
console.log(1 === 1); //imprime true, porque é o mesmo valor e o mesmo tipo
console.log('1' === 1) //imprime a false, porque apesar do valor ser o mesmo, eles são de tipos diferentes
//igualdade solta
console.log(1 == 1); //imprime true porque tem o mesmo valor
console.log('1' == 1); //imprime true porque continua tendo o mesmo valor
//operador ternário
let pontos = 100;
let tipo = pontos > 100 ? 'premium' : 'comum';
//let ex = (condicao) ? 'a' : 'b';
//ou seja, se a condicao estiver correta, a variavel ex vai receber 'a', se não vai receber 'b'
//operadores lógicos
//operador AND (&&)
let maiorDeIdade = true;
let possuiCarteira = false;
let podeAplicar = maiorDeIdade && possuiCarteira;
//se as duas variáveis comparadas forem true, ele retornará true, mas se alguma delas for false ele será false, nesse caso ele é true
console.log(podeAplicar); //vai imprimir false porque o possuiCarteira é false
//operador OR (||)
podeAplicar = maiorDeIdade || possuiCarteira;
//se pelo menos uma das variáveis comparadas forem true, ele já vai retornar true
console.log('candidato pode aplicar: ', podeAplicar); //vai imprimir true porque o maiorDeIdade é true
//Operador NOT (!)
let candidatoRecusado = !podeAplicar;
//se a variável podeAplicar for true, então candidatoRecusado receberá false, mas se podeAplicar for false o candidatoRecusado será true
console.log('candidato recusado: ', candidatoRecusado);//retorna false porque o candidato pode aplicar
//Condicionais: if... else e switch... case
numero = 263;
//Esse pequeno código verifica se a condição entre parênteses é verdadeira, caso seja ele executa o que tá no if, caso contrário ele executa o que está no "else"
if(numero % 2 == 0){ //nesse caso ele verifica se a variável numero é par
console.log('Par');
}
else if (numero == 0){//Se o laço não cair na condição anterior, ele faz essa verificação
console.log('Zero');
}
else{
console.log('ímpar');
}
//Laços de repetição: for, while, do... while, for... in e for... of
for (let i = 0; i < 5; i++){
//i = índice, executar enquanto a condição for verdad
|
i++; //incremento
}
//A única diferença é que ele executa pelo menos uma vez antes de verificar o índice
i = 0;
do{
console.log(i);
i++;
}while(i < 5);
//For in
for(let chave in pessoa){
console.log(chave, pessoa[chave]);
}
//For of
for(let indice of camy){
console.log(indice);
}
//Factory Function: criar um objeto "padrão" mais facilmente através de um método
function criarCelular (marcaCel, bateriaCel, tamanhoTela){
return {
marcaCel,
bateriaCel,
tamanhoTela,
ligar(){
console.log('Fazendo ligação...');
}
}
}
const cel1 = criarCelular('Samsung A10', 5000, 5.5);
console.log(cel1);
//Constructor function: mesmo objetivo que a factory function
function Celular (marcaCel, bateriaCel, tamanhoTela) {
this.marcaCel = marcaCel,
this.tamanhoTela = tamanhoTela,
this.bateriaCel = bateriaCel,
this.ligar = function() {
console.log('Fazendo ligação...');
}
}
//instanciando o objeto através do construtor
const cel2 = new Celular('Xiaomi Redmi', 70000, 6.0);
console.log(cel2);
//natureza dinâmica dos objetos
const mouse = {
cor: 'preto',
marca: 'dazz'
}
mouse.velocidade = 5000; //adiciona uma propriedade ao objeto já existente
mouse.trocarDPI = function() {
console.log('Trocando DPI...');
}
//deletando propriedades e funções do objeto existente:
delete mouse.trocarDPI;
delete mouse.velocidade;
console.log(mouse);
//clonando objetos
//nesse exemplo o novo objeto receberá o objeto já criado celular com a adição da propriedade temDigital
const objCopia = Object.assign({temDigital: true}, cel1);
console.log(objCopia);
//Math
console.log(Math.random()); // -> gera um número aleatório de 0 a 1
console.log(Math.random() * (10 - 1) + 1); // -> gera um número aleatório de 1 a 10
console.log(Math.max(1000, 10, 3, 2, 1, 10000)); // -> gera o maior número entre os listados como parâmetro
console.log(Math.min(1000, 10, 3, 2, 1, 10000)); // -> faz o contrário do que o max, gera o menor número listado
console.log(Math.pow(5,2)); // -> calcula o primeiro parâmetro elevado ao segundo parâmetro
//String: existe o tipo primitivo e o tipo objeto
let primitivo = 'Tipo primitivo';
let obj = new String('Tipo objeto');
console.log(primitivo.length); // -> mostra a quantidade de caracteres de uma string
console.log(primitivo[2]); // -> mostra o caracter que está no índice indicado, lembrando de que sempre começa no 0
console.log(primitivo.includes('Tipo')); // -> gera um true, porque na string indicada contém 'Tipo'
console.log(primitivo.includes('azul')); // -> gera um false, porque na string indicada contém 'azul'
console.log(primitivo.startsWith('Tipo')); // -> gera um true, porque a string indicada começa com 'Tipo'
console.log(primitivo.endsWith('primitivo')); // -> gera um true, porque a string indicada termina com 'primitivo'
console.log(primitivo.indexOf('v')); // -> gera o index em que 'v' está
console.log(primitivo.replace('Tipo', ''));// -> substitui uma parte da String, nesse caso ela vira só 'primitivo'
primitivo.trim(); //-> remove espaços desnecessários no início ou no final da string
console.log(obj.split(' ')); //-> separa a string a cada espaço que encontra, nesse caso ele mostra 'Tipo' e 'objeto' como duas coisas diferentes
//String com sequência de escape
let msg = 'Bom dia '+ nome +'. \nEssa é minha \'mensagem\'';
//String com template literal
let msgL = `Bom dia ${nome}.
Essa é minha 'mensagem'`
//Date
const dataAtual = new Date(); //vai preencher com as informações de data e hora atuais
const data1 = new Date('March 06 2019 09:30'); //data e hora deifinidos na declaração da constante
const data2 = new Date(2019, 02, 06, 09, 30, 05); //data e hora definidos através dos números na ordem: ano, mês, dia, hora, minuto, segundo e ms
data2.getFullYear(); //retorna o ano
data2.setFullYear(2021); //muda o ano dessa data
//Formas de converter para String:
data1.toDateString(); //retorna só a data
|
eira (i<5) e realiza o incremento
console.log(i);
}
let i = 0; //indice
while (i < 5){
console.log(i);
|
conditional_block
|
aula-principal.js
|
amy[0]); //mostra o que está na posição 0 desse array
//function
let corSite = "azul";
function resetaCor(cor, tonalidade){
corSite = cor + ' ' + tonalidade;
return corSite;
};
console.log(corSite);
resetaCor('verde', 'escuro');
console.log(corSite);
//incremento e decremento
console.log(idade++); // imprime 16, pois como o ++ tá depois da apresentação, ele não vai mostrar a idade atualizada
console.log(idade); //imprime 17, por causa da operação da linha anterior
console.log(++idade); //imprime 18, porque ele faz a operação antes de apresentar
idade -= 2;
//operadores de igualdade
//igualdade estrita (mais recomendado)
console.log(1 === 1); //imprime true, porque é o mesmo valor e o mesmo tipo
console.log('1' === 1) //imprime a false, porque apesar do valor ser o mesmo, eles são de tipos diferentes
//igualdade solta
console.log(1 == 1); //imprime true porque tem o mesmo valor
console.log('1' == 1); //imprime true porque continua tendo o mesmo valor
//operador ternário
let pontos = 100;
let tipo = pontos > 100 ? 'premium' : 'comum';
//let ex = (condicao) ? 'a' : 'b';
//ou seja, se a condicao estiver correta, a variavel ex vai receber 'a', se não vai receber 'b'
//operadores lógicos
//operador AND (&&)
let maiorDeIdade = true;
let possuiCarteira = false;
let podeAplicar = maiorDeIdade && possuiCarteira;
//se as duas variáveis comparadas forem true, ele retornará true, mas se alguma delas for false ele será false, nesse caso ele é true
console.log(podeAplicar); //vai imprimir false porque o possuiCarteira é false
//operador OR (||)
podeAplicar = maiorDeIdade || possuiCarteira;
//se pelo menos uma das variáveis comparadas forem true, ele já vai retornar true
console.log('candidato pode aplicar: ', podeAplicar); //vai imprimir true porque o maiorDeIdade é true
//Operador NOT (!)
let candidatoRecusado = !podeAplicar;
//se a variável podeAplicar for true, então candidatoRecusado receberá false, mas se podeAplicar for false o candidatoRecusado será true
console.log('candidato recusado: ', candidatoRecusado);//retorna false porque o candidato pode aplicar
//Condicionais: if... else e switch... case
numero = 263;
//Esse pequeno código verifica se a condição entre parênteses é verdadeira, caso seja ele executa o que tá no if, caso contrário ele executa o que está no "else"
if(numero % 2 == 0){ //nesse caso ele verifica se a variável numero é par
console.log('Par');
}
else if (numero == 0){//Se o laço não cair na condição anterior, ele faz essa verificação
console.log('Zero');
}
else{
console.log('ímpar');
}
//Laços de repetição: for, while, do... while, for... in e for... of
for (let i = 0; i < 5; i++){
//i = índice, executar enquanto a condição for verdadeira (i<5) e realiza o incremento
console.log(i);
}
let i = 0; //indice
while (i < 5){
console.log(i);
i++; //incremento
}
//A única diferença é que ele executa pelo menos uma vez antes de verificar o índice
i = 0;
do{
console.log(i);
i++;
}while(i < 5);
//For in
for(let chave in pessoa){
console.log(chave, pessoa[chave]);
}
//For of
for(let indice of camy){
console.log(indice);
}
//Factory Function: criar um objeto "padrão" mais facilmente através de um método
function criarCelular (marcaCel, bateriaCel, tamanhoTela){
return {
|
bateriaCel,
tamanhoTela,
ligar(){
console.log('Fazendo ligação...');
}
}
}
const cel1 = criarCelular('Samsung A10', 5000, 5.5);
console.log(cel1);
//Constructor function: mesmo objetivo que a factory function
function Celular (marcaCel, bateriaCel, tamanhoTela) {
this.marcaCel = marcaCel,
this.tamanhoTela = tamanhoTela,
this.bateriaCel = bateriaCel,
this.ligar = function() {
console.log('Fazendo ligação...');
}
}
//instanciando o objeto através do construtor
const cel2 = new Celular('Xiaomi Redmi', 70000, 6.0);
console.log(cel2);
//natureza dinâmica dos objetos
const mouse = {
cor: 'preto',
marca: 'dazz'
}
mouse.velocidade = 5000; //adiciona uma propriedade ao objeto já existente
mouse.trocarDPI = function() {
console.log('Trocando DPI...');
}
//deletando propriedades e funções do objeto existente:
delete mouse.trocarDPI;
delete mouse.velocidade;
console.log(mouse);
//clonando objetos
//nesse exemplo o novo objeto receberá o objeto já criado celular com a adição da propriedade temDigital
const objCopia = Object.assign({temDigital: true}, cel1);
console.log(objCopia);
//Math
console.log(Math.random()); // -> gera um número aleatório de 0 a 1
console.log(Math.random() * (10 - 1) + 1); // -> gera um número aleatório de 1 a 10
console.log(Math.max(1000, 10, 3, 2, 1, 10000)); // -> gera o maior número entre os listados como parâmetro
console.log(Math.min(1000, 10, 3, 2, 1, 10000)); // -> faz o contrário do que o max, gera o menor número listado
console.log(Math.pow(5,2)); // -> calcula o primeiro parâmetro elevado ao segundo parâmetro
//String: existe o tipo primitivo e o tipo objeto
let primitivo = 'Tipo primitivo';
let obj = new String('Tipo objeto');
console.log(primitivo.length); // -> mostra a quantidade de caracteres de uma string
console.log(primitivo[2]); // -> mostra o caracter que está no índice indicado, lembrando de que sempre começa no 0
console.log(primitivo.includes('Tipo')); // -> gera um true, porque na string indicada contém 'Tipo'
console.log(primitivo.includes('azul')); // -> gera um false, porque na string indicada contém 'azul'
console.log(primitivo.startsWith('Tipo')); // -> gera um true, porque a string indicada começa com 'Tipo'
console.log(primitivo.endsWith('primitivo')); // -> gera um true, porque a string indicada termina com 'primitivo'
console.log(primitivo.indexOf('v')); // -> gera o index em que 'v' está
console.log(primitivo.replace('Tipo', ''));// -> substitui uma parte da String, nesse caso ela vira só 'primitivo'
primitivo.trim(); //-> remove espaços desnecessários no início ou no final da string
console.log(obj.split(' ')); //-> separa a string a cada espaço que encontra, nesse caso ele mostra 'Tipo' e 'objeto' como duas coisas diferentes
//String com sequência de escape
let msg = 'Bom dia '+ nome +'. \nEssa é minha \'mensagem\'';
//String com template literal
let msgL = `Bom dia ${nome}.
Essa é minha 'mensagem'`
//Date
const dataAtual = new Date(); //vai preencher com as informações de data e hora atuais
const data1 = new Date('March 06 2019 09:30'); //data e hora deifinidos na declaração da constante
const data2 = new Date(2019, 02, 06, 09, 30, 05); //data e hora definidos através dos números na ordem: ano, mês, dia, hora, minuto, segundo e ms
data2.getFullYear(); //retorna o ano
data2.setFullYear(2021); //muda o ano dessa data
//Formas de converter para String:
data1.toDateString(); //retorna só a data
|
marcaCel,
|
identifier_name
|
aula-principal.js
|
amy[0]); //mostra o que está na posição 0 desse array
//function
let corSite = "azul";
function resetaCor(cor, tonalidade){
corSite = cor + ' ' + tonalidade;
return corSite;
};
console.log(corSite);
resetaCor('verde', 'escuro');
console.log(corSite);
//incremento e decremento
console.log(idade++); // imprime 16, pois como o ++ tá depois da apresentação, ele não vai mostrar a idade atualizada
console.log(idade); //imprime 17, por causa da operação da linha anterior
console.log(++idade); //imprime 18, porque ele faz a operação antes de apresentar
idade -= 2;
//operadores de igualdade
//igualdade estrita (mais recomendado)
console.log(1 === 1); //imprime true, porque é o mesmo valor e o mesmo tipo
console.log('1' === 1) //imprime a false, porque apesar do valor ser o mesmo, eles são de tipos diferentes
//igualdade solta
console.log(1 == 1); //imprime true porque tem o mesmo valor
console.log('1' == 1); //imprime true porque continua tendo o mesmo valor
//operador ternário
let pontos = 100;
let tipo = pontos > 100 ? 'premium' : 'comum';
//let ex = (condicao) ? 'a' : 'b';
//ou seja, se a condicao estiver correta, a variavel ex vai receber 'a', se não vai receber 'b'
//operadores lógicos
//operador AND (&&)
let maiorDeIdade = true;
let possuiCarteira = false;
let podeAplicar = maiorDeIdade && possuiCarteira;
//se as duas variáveis comparadas forem true, ele retornará true, mas se alguma delas for false ele será false, nesse caso ele é true
console.log(podeAplicar); //vai imprimir false porque o possuiCarteira é false
//operador OR (||)
podeAplicar = maiorDeIdade || possuiCarteira;
//se pelo menos uma das variáveis comparadas forem true, ele já vai retornar true
console.log('candidato pode aplicar: ', podeAplicar); //vai imprimir true porque o maiorDeIdade é true
//Operador NOT (!)
let candidatoRecusado = !podeAplicar;
//se a variável podeAplicar for true, então candidatoRecusado receberá false, mas se podeAplicar for false o candidatoRecusado será true
console.log('candidato recusado: ', candidatoRecusado);//retorna false porque o candidato pode aplicar
//Condicionais: if... else e switch... case
numero = 263;
//Esse pequeno código verifica se a condição entre parênteses é verdadeira, caso seja ele executa o que tá no if, caso contrário ele executa o que está no "else"
if(numero % 2 == 0){ //nesse caso ele verifica se a variável numero é par
console.log('Par');
}
else if (numero == 0){//Se o laço não cair na condição anterior, ele faz essa verificação
console.log('Zero');
}
else{
console.log('ímpar');
}
//Laços de repetição: for, while, do... while, for... in e for... of
for (let i = 0; i < 5; i++){
//i = índice, executar enquanto a condição for verdadeira (i<5) e realiza o incremento
console.log(i);
}
let i = 0; //indice
while (i < 5){
console.log(i);
i++; //incremento
}
//A única diferença é que ele executa pelo menos uma vez antes de verificar o índice
i = 0;
do{
console.log(i);
i++;
}while(i < 5);
//For in
for(let chave in pessoa){
console.log(chave, pessoa[chave]);
}
//For of
for(let indice of camy){
console.log(indice);
}
//Factory Function: criar um objeto "padrão" mais facilmente através de um método
function criarCelular (marcaCel, bateriaCel, tamanhoTela){
return {
marcaCel,
bateriaCel,
tamanhoTela,
ligar(){
console.log('Fazendo ligação...');
}
}
}
const cel1 = criarCelular('Samsung A10', 5000, 5.5);
console.log(cel1);
//Constructor function: mesmo objetivo que a factory function
function Celular (marcaCel, bateriaCel, tamanhoTela) {
this.marcaCel = marcaCel,
this.tamanhoTela = tamanhoTela,
this.bateriaCel = bateriaCel,
this.ligar = function() {
console.log('Fazendo ligação...');
}
}
//instanciando o objeto através do construtor
const cel2 = new Celular('Xiaomi Redmi', 70000, 6.0);
console.log(cel2);
//natureza dinâmica dos objetos
const mouse = {
cor: 'preto',
marca: 'dazz'
}
mouse.velocidade = 5000; //adiciona uma propriedade ao objeto já existente
mouse.trocarDPI = function() {
console.log('Trocando DPI...');
}
|
console.log(mouse);
//clonando objetos
//nesse exemplo o novo objeto receberá o objeto já criado celular com a adição da propriedade temDigital
const objCopia = Object.assign({temDigital: true}, cel1);
console.log(objCopia);
//Math
console.log(Math.random()); // -> gera um número aleatório de 0 a 1
console.log(Math.random() * (10 - 1) + 1); // -> gera um número aleatório de 1 a 10
console.log(Math.max(1000, 10, 3, 2, 1, 10000)); // -> gera o maior número entre os listados como parâmetro
console.log(Math.min(1000, 10, 3, 2, 1, 10000)); // -> faz o contrário do que o max, gera o menor número listado
console.log(Math.pow(5,2)); // -> calcula o primeiro parâmetro elevado ao segundo parâmetro
//String: existe o tipo primitivo e o tipo objeto
let primitivo = 'Tipo primitivo';
let obj = new String('Tipo objeto');
console.log(primitivo.length); // -> mostra a quantidade de caracteres de uma string
console.log(primitivo[2]); // -> mostra o caracter que está no índice indicado, lembrando de que sempre começa no 0
console.log(primitivo.includes('Tipo')); // -> gera um true, porque na string indicada contém 'Tipo'
console.log(primitivo.includes('azul')); // -> gera um false, porque na string indicada contém 'azul'
console.log(primitivo.startsWith('Tipo')); // -> gera um true, porque a string indicada começa com 'Tipo'
console.log(primitivo.endsWith('primitivo')); // -> gera um true, porque a string indicada termina com 'primitivo'
console.log(primitivo.indexOf('v')); // -> gera o index em que 'v' está
console.log(primitivo.replace('Tipo', ''));// -> substitui uma parte da String, nesse caso ela vira só 'primitivo'
primitivo.trim(); //-> remove espaços desnecessários no início ou no final da string
console.log(obj.split(' ')); //-> separa a string a cada espaço que encontra, nesse caso ele mostra 'Tipo' e 'objeto' como duas coisas diferentes
//String com sequência de escape
let msg = 'Bom dia '+ nome +'. \nEssa é minha \'mensagem\'';
//String com template literal
let msgL = `Bom dia ${nome}.
Essa é minha 'mensagem'`
//Date
const dataAtual = new Date(); //vai preencher com as informações de data e hora atuais
const data1 = new Date('March 06 2019 09:30'); //data e hora deifinidos na declaração da constante
const data2 = new Date(2019, 02, 06, 09, 30, 05); //data e hora definidos através dos números na ordem: ano, mês, dia, hora, minuto, segundo e ms
data2.getFullYear(); //retorna o ano
data2.setFullYear(2021); //muda o ano dessa data
//Formas de converter para String:
data1.toDateString(); //retorna só a data em
|
//deletando propriedades e funções do objeto existente:
delete mouse.trocarDPI;
delete mouse.velocidade;
|
random_line_split
|
tree_estimator.py
|
"max_len" : FLAGS.max_len,
"num_rels" : 53,
"batch_size" : FLAGS.batch_size,
"l2_coef" : 1e-4,
}
def load_vocab():
vocab_file = os.path.join(FLAGS.out_dir, FLAGS.vocab_file)
vocab = []
vocab2id = {}
with open(vocab_file) as f:
for id, line in enumerate(f):
token = line.strip()
vocab.append(token)
vocab2id[token] = id
tf.logging.info("load vocab, size: %d" % len(vocab))
return vocab, vocab2id
def load_relation():
path = os.path.join(FLAGS.data_dir, FLAGS.relation_file)
relations = []
relation2id = {}
with open(path) as f:
for line in f:
parts = line.strip().split()
rel, id = parts[0], int(parts[1])
relations.append(rel)
relation2id[rel] = id
tf.logging.info("load relation, relation size %d" % len(relations))
return relations, relation2id
def _parse_example(example_proto):
context_features = {
'e1': tf.FixedLenFeature([], tf.int64),
'e2': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
'bag_size': tf.FixedLenFeature([], tf.int64),}
sequence_features = {
# "tokens": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e1_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e2_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "seq_len": tf.FixedLenSequenceFeature([], dtype=tf.int64),
"tokens": tf.VarLenFeature(dtype=tf.int64),
"children": tf.VarLenFeature(dtype=tf.int64),
"e1_dist": tf.VarLenFeature(dtype=tf.int64),
"e2_dist": tf.VarLenFeature(dtype=tf.int64),
"seq_len": tf.VarLenFeature(dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=example_proto,
context_features=context_features,
sequence_features=sequence_features)
# e1 = context_parsed['e1']
# e2 = context_parsed['e2']
label = context_parsed['label']
bag_size = context_parsed['bag_size']
tokens = sequence_parsed['tokens']
children = sequence_parsed['children']
e1_dist = sequence_parsed['e1_dist']
e2_dist = sequence_parsed['e2_dist']
seq_len = sequence_parsed['seq_len']
# tokens = tf.sparse_tensor_to_dense(tokens)
# children = tf.sparse_tensor_to_dense(children)
# e1_dist = tf.sparse_tensor_to_dense(e1_dist)
# e2_dist = tf.sparse_tensor_to_dense(e2_dist)
# seq_len = tf.sparse_tensor_to_dense(seq_len)
return label, bag_size, tokens, e1_dist, e2_dist, seq_len, children
def batch_sparse_idx(n_sent, seq_len, n_channel=1):
'''
[ [ 0 0] [ 0 1] [ 0 2] [ 0 3] [ 0 4] [ 0 5]
[ 1 0] [ 1 1] [ 1 2] [ 1 3] [ 1 4] [ 1 5] [ 1 6] [ 1 7]
]
'''
idx0 = tf.constant([], dtype=tf.int64)
idx1 = tf.constant([], dtype=tf.int64)
i = tf.constant(0, dtype=tf.int64)
shape_invariants=[i.get_shape(), tf.TensorShape([None]),tf.TensorShape([None])]
def body(i, a, b):
length = seq_len.values[i]
a = tf.concat([a, i*tf.ones([tf.cast(length*n_channel, tf.int32)], dtype=tf.int64)], axis=0)
b = tf.concat([b, tf.range(length*n_channel, dtype=tf.int64)], axis=0)
return i+1, a, b
_, idx0, idx1 = tf.while_loop(lambda i, a, b: i<n_sent,
body, [i, idx0, idx1], shape_invariants)
idx = tf.stack([idx0,idx1], axis=-1)
return idx
def _parse_batch_sparse(*args):
labels, bag_size, tokens, e1_dist, e2_dist, seq_len, children=args
n_sent = tf.reduce_sum(bag_size)
max_len = tf.reduce_max(seq_len.values)
# reshape 2d tensor: tokens, e1_dist, e2_dist
idx2d = batch_sparse_idx(n_sent, seq_len)
dense_shape_2d = [n_sent, max_len]
tokens = tf.SparseTensor(idx2d, tokens.values, dense_shape_2d)
e1_dist = tf.SparseTensor(idx2d, e1_dist.values, dense_shape_2d)
e2_dist = tf.SparseTensor(idx2d, e2_dist.values, dense_shape_2d)
# map sparse tensor to 2d dense tensor
tokens = tf.sparse_tensor_to_dense(tokens) # [n_sent, len]
e1_dist = tf.sparse_tensor_to_dense(e1_dist) # [n_sent, len]
e2_dist = tf.sparse_tensor_to_dense(e2_dist) # [n_sent, len]
# reshape 3d tensor: children
idx3d = batch_sparse_idx(n_sent, seq_len, n_channel=FLAGS.max_children)
dense_shape_3d = [n_sent, max_len*FLAGS.max_children]
children = tf.SparseTensor(idx3d, children.values, dense_shape_3d)
# map sparse tensor to 3d dense tensor
children = tf.sparse_tensor_to_dense(children) # [n_sent, len*n_channel]
shape2d = tf.shape(tokens)
children = tf.reshape(children, [shape2d[0], shape2d[1], FLAGS.max_children])
# idx to restore bag
bag_idx = tf.scan(lambda a, x: a+x, tf.pad(bag_size, [[1,0]]))
bag_idx = tf.cast(bag_idx, tf.int32)
features = bag_size, bag_idx, seq_len.values, tokens, e1_dist, e2_dist, children
return features, labels
def _input_fn(filenames, epochs, batch_size, shuffle=False):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_example) # Parse the record into tensors.
if shuffle:
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.repeat(epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(_parse_batch_sparse)
#iterator = dataset.make_initializable_iterator()
#batch_data = iterator.get_next()
return dataset
def train_input_fn():
"""An input function for training"""
# Initialize `iterator` with training data.
train_filenames = [os.path.join(FLAGS.out_dir, FLAGS.train_records)]
return _input_fn(train_filenames, FLAGS.epochs, FLAGS.batch_size, shuffle=True)
def test_input_fn():
test_filenames = [os.path.join(FLAGS.out_dir, FLAGS.test_records) ]
return _input_fn(test_filenames, 1, FLAGS.batch_size, shuffle=False)
class PatTopKHook(tf.train.SessionRunHook):
def __init__(self, prob_tensor, labels_tensor):
self.prob_tensor = prob_tensor
self.labels_tensor = labels_tensor
self.all_prob=[]
self.all_labels = []
def before_run(self, run_context):
|
def after_run(self, run_context, run_values):
prob, label = run_values.results
self.all_prob.append(prob)
self.all_labels.append(label)
def end(self, session):
all_prob = np.concatenate(self.all_prob, axis=0)
all_labels = np.concatenate(self.all_labels,axis=0)
np.save('prob.npy', all_prob)
np.save('labels.npy', all_labels)
tf.logging.info('save results to .npy file')
bag_size, num_class = all_prob.shape
mask = np.ones([num_class])
mask[0]=0
mask_prob = np.reshape(all_prob*mask, [-1])
idx_prob = mask_prob.argsort()
one_hot_labels = np.zeros([bag_size, num_class])
one_hot_labels[np.arange(bag_size), all_labels] = 1
one_hot_labels = np.reshape(one_hot_labels, [-1])
idx = idx_prob[-100:][::-1]
p100 = np.mean(one_hot_labels[idx])
idx = idx_prob[-200:][::-1]
p200 = np.mean(one_hot_labels[idx])
idx = idx_prob[-500:][::-1]
p500 = np.mean(one_hot_labels[idx])
tf.logging.info("p@100: %.3f p@200
|
return tf.train.SessionRunArgs([self.prob_tensor, self.labels_tensor])
|
identifier_body
|
tree_estimator.py
|
"max_len" : FLAGS.max_len,
"num_rels" : 53,
"batch_size" : FLAGS.batch_size,
"l2_coef" : 1e-4,
}
def load_vocab():
vocab_file = os.path.join(FLAGS.out_dir, FLAGS.vocab_file)
vocab = []
vocab2id = {}
with open(vocab_file) as f:
for id, line in enumerate(f):
token = line.strip()
vocab.append(token)
vocab2id[token] = id
tf.logging.info("load vocab, size: %d" % len(vocab))
return vocab, vocab2id
def load_relation():
path = os.path.join(FLAGS.data_dir, FLAGS.relation_file)
relations = []
relation2id = {}
with open(path) as f:
for line in f:
parts = line.strip().split()
rel, id = parts[0], int(parts[1])
relations.append(rel)
relation2id[rel] = id
tf.logging.info("load relation, relation size %d" % len(relations))
return relations, relation2id
def _parse_example(example_proto):
context_features = {
'e1': tf.FixedLenFeature([], tf.int64),
'e2': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
'bag_size': tf.FixedLenFeature([], tf.int64),}
sequence_features = {
# "tokens": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e1_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e2_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "seq_len": tf.FixedLenSequenceFeature([], dtype=tf.int64),
"tokens": tf.VarLenFeature(dtype=tf.int64),
"children": tf.VarLenFeature(dtype=tf.int64),
"e1_dist": tf.VarLenFeature(dtype=tf.int64),
"e2_dist": tf.VarLenFeature(dtype=tf.int64),
"seq_len": tf.VarLenFeature(dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=example_proto,
context_features=context_features,
sequence_features=sequence_features)
# e1 = context_parsed['e1']
# e2 = context_parsed['e2']
label = context_parsed['label']
bag_size = context_parsed['bag_size']
tokens = sequence_parsed['tokens']
children = sequence_parsed['children']
e1_dist = sequence_parsed['e1_dist']
e2_dist = sequence_parsed['e2_dist']
seq_len = sequence_parsed['seq_len']
# tokens = tf.sparse_tensor_to_dense(tokens)
# children = tf.sparse_tensor_to_dense(children)
# e1_dist = tf.sparse_tensor_to_dense(e1_dist)
# e2_dist = tf.sparse_tensor_to_dense(e2_dist)
# seq_len = tf.sparse_tensor_to_dense(seq_len)
return label, bag_size, tokens, e1_dist, e2_dist, seq_len, children
def
|
(n_sent, seq_len, n_channel=1):
'''
[ [ 0 0] [ 0 1] [ 0 2] [ 0 3] [ 0 4] [ 0 5]
[ 1 0] [ 1 1] [ 1 2] [ 1 3] [ 1 4] [ 1 5] [ 1 6] [ 1 7]
]
'''
idx0 = tf.constant([], dtype=tf.int64)
idx1 = tf.constant([], dtype=tf.int64)
i = tf.constant(0, dtype=tf.int64)
shape_invariants=[i.get_shape(), tf.TensorShape([None]),tf.TensorShape([None])]
def body(i, a, b):
length = seq_len.values[i]
a = tf.concat([a, i*tf.ones([tf.cast(length*n_channel, tf.int32)], dtype=tf.int64)], axis=0)
b = tf.concat([b, tf.range(length*n_channel, dtype=tf.int64)], axis=0)
return i+1, a, b
_, idx0, idx1 = tf.while_loop(lambda i, a, b: i<n_sent,
body, [i, idx0, idx1], shape_invariants)
idx = tf.stack([idx0,idx1], axis=-1)
return idx
def _parse_batch_sparse(*args):
labels, bag_size, tokens, e1_dist, e2_dist, seq_len, children=args
n_sent = tf.reduce_sum(bag_size)
max_len = tf.reduce_max(seq_len.values)
# reshape 2d tensor: tokens, e1_dist, e2_dist
idx2d = batch_sparse_idx(n_sent, seq_len)
dense_shape_2d = [n_sent, max_len]
tokens = tf.SparseTensor(idx2d, tokens.values, dense_shape_2d)
e1_dist = tf.SparseTensor(idx2d, e1_dist.values, dense_shape_2d)
e2_dist = tf.SparseTensor(idx2d, e2_dist.values, dense_shape_2d)
# map sparse tensor to 2d dense tensor
tokens = tf.sparse_tensor_to_dense(tokens) # [n_sent, len]
e1_dist = tf.sparse_tensor_to_dense(e1_dist) # [n_sent, len]
e2_dist = tf.sparse_tensor_to_dense(e2_dist) # [n_sent, len]
# reshape 3d tensor: children
idx3d = batch_sparse_idx(n_sent, seq_len, n_channel=FLAGS.max_children)
dense_shape_3d = [n_sent, max_len*FLAGS.max_children]
children = tf.SparseTensor(idx3d, children.values, dense_shape_3d)
# map sparse tensor to 3d dense tensor
children = tf.sparse_tensor_to_dense(children) # [n_sent, len*n_channel]
shape2d = tf.shape(tokens)
children = tf.reshape(children, [shape2d[0], shape2d[1], FLAGS.max_children])
# idx to restore bag
bag_idx = tf.scan(lambda a, x: a+x, tf.pad(bag_size, [[1,0]]))
bag_idx = tf.cast(bag_idx, tf.int32)
features = bag_size, bag_idx, seq_len.values, tokens, e1_dist, e2_dist, children
return features, labels
def _input_fn(filenames, epochs, batch_size, shuffle=False):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_example) # Parse the record into tensors.
if shuffle:
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.repeat(epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(_parse_batch_sparse)
#iterator = dataset.make_initializable_iterator()
#batch_data = iterator.get_next()
return dataset
def train_input_fn():
"""An input function for training"""
# Initialize `iterator` with training data.
train_filenames = [os.path.join(FLAGS.out_dir, FLAGS.train_records)]
return _input_fn(train_filenames, FLAGS.epochs, FLAGS.batch_size, shuffle=True)
def test_input_fn():
test_filenames = [os.path.join(FLAGS.out_dir, FLAGS.test_records) ]
return _input_fn(test_filenames, 1, FLAGS.batch_size, shuffle=False)
class PatTopKHook(tf.train.SessionRunHook):
def __init__(self, prob_tensor, labels_tensor):
self.prob_tensor = prob_tensor
self.labels_tensor = labels_tensor
self.all_prob=[]
self.all_labels = []
def before_run(self, run_context):
return tf.train.SessionRunArgs([self.prob_tensor, self.labels_tensor])
def after_run(self, run_context, run_values):
prob, label = run_values.results
self.all_prob.append(prob)
self.all_labels.append(label)
def end(self, session):
all_prob = np.concatenate(self.all_prob, axis=0)
all_labels = np.concatenate(self.all_labels,axis=0)
np.save('prob.npy', all_prob)
np.save('labels.npy', all_labels)
tf.logging.info('save results to .npy file')
bag_size, num_class = all_prob.shape
mask = np.ones([num_class])
mask[0]=0
mask_prob = np.reshape(all_prob*mask, [-1])
idx_prob = mask_prob.argsort()
one_hot_labels = np.zeros([bag_size, num_class])
one_hot_labels[np.arange(bag_size), all_labels] = 1
one_hot_labels = np.reshape(one_hot_labels, [-1])
idx = idx_prob[-100:][::-1]
p100 = np.mean(one_hot_labels[idx])
idx = idx_prob[-200:][::-1]
p200 = np.mean(one_hot_labels[idx])
idx = idx_prob[-500:][::-1]
p500 = np.mean(one_hot_labels[idx])
tf.logging.info("p@100: %.3f p@20
|
batch_sparse_idx
|
identifier_name
|
tree_estimator.py
|
def load_vocab():
vocab_file = os.path.join(FLAGS.out_dir, FLAGS.vocab_file)
vocab = []
vocab2id = {}
with open(vocab_file) as f:
for id, line in enumerate(f):
token = line.strip()
vocab.append(token)
vocab2id[token] = id
tf.logging.info("load vocab, size: %d" % len(vocab))
return vocab, vocab2id
def load_relation():
path = os.path.join(FLAGS.data_dir, FLAGS.relation_file)
relations = []
relation2id = {}
with open(path) as f:
for line in f:
parts = line.strip().split()
rel, id = parts[0], int(parts[1])
relations.append(rel)
relation2id[rel] = id
tf.logging.info("load relation, relation size %d" % len(relations))
return relations, relation2id
def _parse_example(example_proto):
context_features = {
'e1': tf.FixedLenFeature([], tf.int64),
'e2': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
'bag_size': tf.FixedLenFeature([], tf.int64),}
sequence_features = {
# "tokens": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e1_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e2_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "seq_len": tf.FixedLenSequenceFeature([], dtype=tf.int64),
"tokens": tf.VarLenFeature(dtype=tf.int64),
"children": tf.VarLenFeature(dtype=tf.int64),
"e1_dist": tf.VarLenFeature(dtype=tf.int64),
"e2_dist": tf.VarLenFeature(dtype=tf.int64),
"seq_len": tf.VarLenFeature(dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=example_proto,
context_features=context_features,
sequence_features=sequence_features)
# e1 = context_parsed['e1']
# e2 = context_parsed['e2']
label = context_parsed['label']
bag_size = context_parsed['bag_size']
tokens = sequence_parsed['tokens']
children = sequence_parsed['children']
e1_dist = sequence_parsed['e1_dist']
e2_dist = sequence_parsed['e2_dist']
seq_len = sequence_parsed['seq_len']
# tokens = tf.sparse_tensor_to_dense(tokens)
# children = tf.sparse_tensor_to_dense(children)
# e1_dist = tf.sparse_tensor_to_dense(e1_dist)
# e2_dist = tf.sparse_tensor_to_dense(e2_dist)
# seq_len = tf.sparse_tensor_to_dense(seq_len)
return label, bag_size, tokens, e1_dist, e2_dist, seq_len, children
def batch_sparse_idx(n_sent, seq_len, n_channel=1):
'''
[ [ 0 0] [ 0 1] [ 0 2] [ 0 3] [ 0 4] [ 0 5]
[ 1 0] [ 1 1] [ 1 2] [ 1 3] [ 1 4] [ 1 5] [ 1 6] [ 1 7]
]
'''
idx0 = tf.constant([], dtype=tf.int64)
idx1 = tf.constant([], dtype=tf.int64)
i = tf.constant(0, dtype=tf.int64)
shape_invariants=[i.get_shape(), tf.TensorShape([None]),tf.TensorShape([None])]
def body(i, a, b):
length = seq_len.values[i]
a = tf.concat([a, i*tf.ones([tf.cast(length*n_channel, tf.int32)], dtype=tf.int64)], axis=0)
b = tf.concat([b, tf.range(length*n_channel, dtype=tf.int64)], axis=0)
return i+1, a, b
_, idx0, idx1 = tf.while_loop(lambda i, a, b: i<n_sent,
body, [i, idx0, idx1], shape_invariants)
idx = tf.stack([idx0,idx1], axis=-1)
return idx
def _parse_batch_sparse(*args):
labels, bag_size, tokens, e1_dist, e2_dist, seq_len, children=args
n_sent = tf.reduce_sum(bag_size)
max_len = tf.reduce_max(seq_len.values)
# reshape 2d tensor: tokens, e1_dist, e2_dist
idx2d = batch_sparse_idx(n_sent, seq_len)
dense_shape_2d = [n_sent, max_len]
tokens = tf.SparseTensor(idx2d, tokens.values, dense_shape_2d)
e1_dist = tf.SparseTensor(idx2d, e1_dist.values, dense_shape_2d)
e2_dist = tf.SparseTensor(idx2d, e2_dist.values, dense_shape_2d)
# map sparse tensor to 2d dense tensor
tokens = tf.sparse_tensor_to_dense(tokens) # [n_sent, len]
e1_dist = tf.sparse_tensor_to_dense(e1_dist) # [n_sent, len]
e2_dist = tf.sparse_tensor_to_dense(e2_dist) # [n_sent, len]
# reshape 3d tensor: children
idx3d = batch_sparse_idx(n_sent, seq_len, n_channel=FLAGS.max_children)
dense_shape_3d = [n_sent, max_len*FLAGS.max_children]
children = tf.SparseTensor(idx3d, children.values, dense_shape_3d)
# map sparse tensor to 3d dense tensor
children = tf.sparse_tensor_to_dense(children) # [n_sent, len*n_channel]
shape2d = tf.shape(tokens)
children = tf.reshape(children, [shape2d[0], shape2d[1], FLAGS.max_children])
# idx to restore bag
bag_idx = tf.scan(lambda a, x: a+x, tf.pad(bag_size, [[1,0]]))
bag_idx = tf.cast(bag_idx, tf.int32)
features = bag_size, bag_idx, seq_len.values, tokens, e1_dist, e2_dist, children
return features, labels
def _input_fn(filenames, epochs, batch_size, shuffle=False):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_example) # Parse the record into tensors.
if shuffle:
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.repeat(epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(_parse_batch_sparse)
#iterator = dataset.make_initializable_iterator()
#batch_data = iterator.get_next()
return dataset
def train_input_fn():
"""An input function for training"""
# Initialize `iterator` with training data.
train_filenames = [os.path.join(FLAGS.out_dir, FLAGS.train_records)]
return _input_fn(train_filenames, FLAGS.epochs, FLAGS.batch_size, shuffle=True)
def test_input_fn():
test_filenames = [os.path.join(FLAGS.out_dir, FLAGS.test_records) ]
return _input_fn(test_filenames, 1, FLAGS.batch_size, shuffle=False)
class PatTopKHook(tf.train.SessionRunHook):
def __init__(self, prob_tensor, labels_tensor):
self.prob_tensor = prob_tensor
self.labels_tensor = labels_tensor
self.all_prob=[]
self.all_labels = []
def before_run(self, run_context):
return tf.train.SessionRunArgs([self.prob_tensor, self.labels_tensor])
def after_run(self, run_context, run_values):
prob, label = run_values.results
self.all_prob.append(prob)
self.all_labels.append(label)
def end(self, session):
all_prob = np.concatenate(self.all_prob, axis=0)
all_labels = np.concatenate(self.all_labels,axis=0)
np.save('prob.npy', all_prob)
np.save('labels.npy', all_labels)
tf.logging.info('save results to .npy file')
bag_size, num_class = all_prob.shape
mask = np.ones([num_class])
mask[0]=0
mask_prob = np.reshape(all_prob*mask, [-1])
idx_prob = mask_prob.argsort()
one_hot_labels = np.zeros([bag_size, num_class])
one_hot_labels[np.arange(bag_size), all_labels] = 1
one_hot_labels = np.reshape(one_hot_labels, [-1])
idx = idx_prob[-100:][::-1]
p100 = np.mean(one_hot_labels[idx])
idx = idx_prob[-200:][::-1]
p200 = np.mean(one_hot_labels[idx])
idx = idx_prob[-500:][::-1]
p500 = np.mean(one_hot_labels[idx])
tf.logging.info("p@100: %.3f p@20
|
"max_len" : FLAGS.max_len,
"num_rels" : 53,
"batch_size" : FLAGS.batch_size,
"l2_coef" : 1e-4,
}
|
random_line_split
|
|
tree_estimator.py
|
"max_len" : FLAGS.max_len,
"num_rels" : 53,
"batch_size" : FLAGS.batch_size,
"l2_coef" : 1e-4,
}
def load_vocab():
vocab_file = os.path.join(FLAGS.out_dir, FLAGS.vocab_file)
vocab = []
vocab2id = {}
with open(vocab_file) as f:
for id, line in enumerate(f):
token = line.strip()
vocab.append(token)
vocab2id[token] = id
tf.logging.info("load vocab, size: %d" % len(vocab))
return vocab, vocab2id
def load_relation():
path = os.path.join(FLAGS.data_dir, FLAGS.relation_file)
relations = []
relation2id = {}
with open(path) as f:
for line in f:
|
tf.logging.info("load relation, relation size %d" % len(relations))
return relations, relation2id
def _parse_example(example_proto):
context_features = {
'e1': tf.FixedLenFeature([], tf.int64),
'e2': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
'bag_size': tf.FixedLenFeature([], tf.int64),}
sequence_features = {
# "tokens": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e1_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "e2_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
# "seq_len": tf.FixedLenSequenceFeature([], dtype=tf.int64),
"tokens": tf.VarLenFeature(dtype=tf.int64),
"children": tf.VarLenFeature(dtype=tf.int64),
"e1_dist": tf.VarLenFeature(dtype=tf.int64),
"e2_dist": tf.VarLenFeature(dtype=tf.int64),
"seq_len": tf.VarLenFeature(dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=example_proto,
context_features=context_features,
sequence_features=sequence_features)
# e1 = context_parsed['e1']
# e2 = context_parsed['e2']
label = context_parsed['label']
bag_size = context_parsed['bag_size']
tokens = sequence_parsed['tokens']
children = sequence_parsed['children']
e1_dist = sequence_parsed['e1_dist']
e2_dist = sequence_parsed['e2_dist']
seq_len = sequence_parsed['seq_len']
# tokens = tf.sparse_tensor_to_dense(tokens)
# children = tf.sparse_tensor_to_dense(children)
# e1_dist = tf.sparse_tensor_to_dense(e1_dist)
# e2_dist = tf.sparse_tensor_to_dense(e2_dist)
# seq_len = tf.sparse_tensor_to_dense(seq_len)
return label, bag_size, tokens, e1_dist, e2_dist, seq_len, children
def batch_sparse_idx(n_sent, seq_len, n_channel=1):
'''
[ [ 0 0] [ 0 1] [ 0 2] [ 0 3] [ 0 4] [ 0 5]
[ 1 0] [ 1 1] [ 1 2] [ 1 3] [ 1 4] [ 1 5] [ 1 6] [ 1 7]
]
'''
idx0 = tf.constant([], dtype=tf.int64)
idx1 = tf.constant([], dtype=tf.int64)
i = tf.constant(0, dtype=tf.int64)
shape_invariants=[i.get_shape(), tf.TensorShape([None]),tf.TensorShape([None])]
def body(i, a, b):
length = seq_len.values[i]
a = tf.concat([a, i*tf.ones([tf.cast(length*n_channel, tf.int32)], dtype=tf.int64)], axis=0)
b = tf.concat([b, tf.range(length*n_channel, dtype=tf.int64)], axis=0)
return i+1, a, b
_, idx0, idx1 = tf.while_loop(lambda i, a, b: i<n_sent,
body, [i, idx0, idx1], shape_invariants)
idx = tf.stack([idx0,idx1], axis=-1)
return idx
def _parse_batch_sparse(*args):
labels, bag_size, tokens, e1_dist, e2_dist, seq_len, children=args
n_sent = tf.reduce_sum(bag_size)
max_len = tf.reduce_max(seq_len.values)
# reshape 2d tensor: tokens, e1_dist, e2_dist
idx2d = batch_sparse_idx(n_sent, seq_len)
dense_shape_2d = [n_sent, max_len]
tokens = tf.SparseTensor(idx2d, tokens.values, dense_shape_2d)
e1_dist = tf.SparseTensor(idx2d, e1_dist.values, dense_shape_2d)
e2_dist = tf.SparseTensor(idx2d, e2_dist.values, dense_shape_2d)
# map sparse tensor to 2d dense tensor
tokens = tf.sparse_tensor_to_dense(tokens) # [n_sent, len]
e1_dist = tf.sparse_tensor_to_dense(e1_dist) # [n_sent, len]
e2_dist = tf.sparse_tensor_to_dense(e2_dist) # [n_sent, len]
# reshape 3d tensor: children
idx3d = batch_sparse_idx(n_sent, seq_len, n_channel=FLAGS.max_children)
dense_shape_3d = [n_sent, max_len*FLAGS.max_children]
children = tf.SparseTensor(idx3d, children.values, dense_shape_3d)
# map sparse tensor to 3d dense tensor
children = tf.sparse_tensor_to_dense(children) # [n_sent, len*n_channel]
shape2d = tf.shape(tokens)
children = tf.reshape(children, [shape2d[0], shape2d[1], FLAGS.max_children])
# idx to restore bag
bag_idx = tf.scan(lambda a, x: a+x, tf.pad(bag_size, [[1,0]]))
bag_idx = tf.cast(bag_idx, tf.int32)
features = bag_size, bag_idx, seq_len.values, tokens, e1_dist, e2_dist, children
return features, labels
def _input_fn(filenames, epochs, batch_size, shuffle=False):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_example) # Parse the record into tensors.
if shuffle:
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.repeat(epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(_parse_batch_sparse)
#iterator = dataset.make_initializable_iterator()
#batch_data = iterator.get_next()
return dataset
def train_input_fn():
"""An input function for training"""
# Initialize `iterator` with training data.
train_filenames = [os.path.join(FLAGS.out_dir, FLAGS.train_records)]
return _input_fn(train_filenames, FLAGS.epochs, FLAGS.batch_size, shuffle=True)
def test_input_fn():
test_filenames = [os.path.join(FLAGS.out_dir, FLAGS.test_records) ]
return _input_fn(test_filenames, 1, FLAGS.batch_size, shuffle=False)
class PatTopKHook(tf.train.SessionRunHook):
def __init__(self, prob_tensor, labels_tensor):
self.prob_tensor = prob_tensor
self.labels_tensor = labels_tensor
self.all_prob=[]
self.all_labels = []
def before_run(self, run_context):
return tf.train.SessionRunArgs([self.prob_tensor, self.labels_tensor])
def after_run(self, run_context, run_values):
prob, label = run_values.results
self.all_prob.append(prob)
self.all_labels.append(label)
def end(self, session):
all_prob = np.concatenate(self.all_prob, axis=0)
all_labels = np.concatenate(self.all_labels,axis=0)
np.save('prob.npy', all_prob)
np.save('labels.npy', all_labels)
tf.logging.info('save results to .npy file')
bag_size, num_class = all_prob.shape
mask = np.ones([num_class])
mask[0]=0
mask_prob = np.reshape(all_prob*mask, [-1])
idx_prob = mask_prob.argsort()
one_hot_labels = np.zeros([bag_size, num_class])
one_hot_labels[np.arange(bag_size), all_labels] = 1
one_hot_labels = np.reshape(one_hot_labels, [-1])
idx = idx_prob[-100:][::-1]
p100 = np.mean(one_hot_labels[idx])
idx = idx_prob[-200:][::-1]
p200 = np.mean(one_hot_labels[idx])
idx = idx_prob[-500:][::-1]
p500 = np.mean(one_hot_labels[idx])
tf.logging.info("p@100: %.3f p@200
|
parts = line.strip().split()
rel, id = parts[0], int(parts[1])
relations.append(rel)
relation2id[rel] = id
|
conditional_block
|
utils.js
|
{
location.href = expect;
}
}
function tryLogin() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d == true) {
redirectIndex();
}
});
}
function tryLogout() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d != true) {
redirectLogin();
}
});
}
function validateLogin(d, fun) {
var _d;
if ("string" == typeof d) {
_d = JSON.parse(d);
} else {
_d = d;
}
if (_d.code == -999) {
redirectLogin();
return;
}
fun(_d);
}
function $get0(url, param, fun, async) {
if (fun == undefined) {
fun = param;
}
$.ajax({
cache: false,
type: "post",
url: ctxPath + url,
data: param,
async: async,
success: function (d) {
validateLogin(d, fun);
},
error: function (r) {
errorMsgbox("请求超时");
}
});
}
//同步请求
function $get(url, param, fun) {
$get0(url, param, fun, false);
}
//异步请求
function $geta(url, param, fun) {
$get0(url, param, fun, true);
}
function createCombobox(pid, id, list, cl) {
if (!cl) {
cl = "sel w150";
}
var p = "<select id='" + id + "' name='" + id + "' class='" + cl + "'>";
if (list.length > 0) {
p += "<option></option>";
for (var i in list) {
var item = list[i];
p += "<option value='" + item["VALUE"] + "'>" + item["NAME"] + "</option>"
}
} else {
p += "<option></option>";
}
p += "</select>";
$("#" + pid).html("");
$("#" + pid).append(p);
}
function createTable(pid, id, headers, buttonsFn, callbacks, list, cid) {
var buttons;
var hasButtons;
var isButtonsFn = false;
if ((typeof buttonsFn) == "function") {
hasButtons = true;
isButtonsFn = true;
} else if (buttonsFn.length > 0 && (typeof buttonsFn[0] == "string")) {
hasButtons = true;
buttons = buttonsFn;
}
if (!cid) {
cid = "ID";
}
var headersMap = [];
var p = "<table id=" + id + " class='showtablelist' style='width: 90%'>";
p += "<thead>";
p += "<tr>";
for (var i in headers) {
var header = headers[i];
var arr = header.split(":");
var width = "auto;";
if (arr.length == 2) {
headersMap.push(arr[1]);
} else if (arr.length == 3) {
headersMap.push(arr[1]);
width = arr[2] + "px;"
}
p += "<th nowrap='nowrap' style='width:" + width + "'>" + arr[0] + "</th>";
}
p += "</tr>";
p += "</thead>";
p += "<tbody>";
if (list.length > 0) {
for (var i in list) {
p += "<tr>";
var item = list[i];
if (hasButtons) {
if (isButtonsFn) {
buttons = buttonsFn(item);
}
var buttonsMap = [];
for (var i in buttons) {
buttonsMap.push(buttons[i].split(":"));
}
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
p += "<td nowrap='nowrap'>";
for (var j in buttonsMap) {
p += "<a href='javascript:void(0);' data-id='" + item[cid] + "' data-type='" + buttonsMap[j][1];
p += "' class='" + buttonsMap[j][2] + "'>" + buttonsMap[j][0] + "</a> ";
}
p += "</td>";
} else {
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
}
p += "</tr>";
}
$("#paging").show();
} else {
var noDtataTip = ctxPath + "/static/images/noDataTip.jpg";
p += "<tr>";
p += "<td class='tc' colspan='10'><img class='vm' src='" + noDtataTip + "'/></td>";
p += "</tr>";
$("#paging").hide();
}
p += "</tbody>";
p += "</table>";
p += "<br>";
$("#" + pid).html("");
$("#" + pid).append(p);
$("#" + pid).find("a").click(function () {
callbacks[$(this).data("type")].call(this, $(this).data("id"));
});
}
function createPage(url, param, callback) {
if (param != null) {
param = encodeURI(obj2httpParam(param));
}
$("#paging").myPagination({
cssStyle: 'bspagination',
currPage: 1,
pageNumber: 10,
ajax: {
on: true,
type: "POST",
url: ctxPath + url,
dataType: "json",
param: param,
ajaxStart: function () {
ZENG.msgbox.show(" 正在加载中,请稍后...", 6, 10000);
},
onClick: function (page) {
$.fn.debug(page);
},
ajaxStop: function () {
setTimeout(function () {
ZENG.msgbox.hide();
}, 1);
},
callback: function (d) {
validateLogin(d, callback);
}
}
});
}
function validateNotNull(param) {
for (var i in param) {
if (param[i] == '' || param[i] == null) {
errorMsgbox(i);
return false;
}
}
return true;
}
function validateNotNullForm(form) {
var v = true;
$("#" + form).find("input,select").each(function () {
if (this.type == "button") {
return;
}
if (!$(this).attr("hint") == "none") {
if (!$(this).val()) {
v = false;
errorMsgbox(this.name || this.id);
return false;
}
}
});
return v;
}
function mapList(list, id) {
if (!id) {
id = "ID";
}
var map = {};
for (var i in list) {
var item = list[i];
map[item[id]] = item;
}
return map;
}
function getInputValues(id) {
var map = {};
$("#" + id).find("input,select").each(function () {
if (this.type == "button") {
return;
}
map[this.name || this.id] = $(this).val();
});
return map;
}
function setInputValues(id, data, mapping) {
// $("#"+id).find("input").each(function(){
// if(this.type == "button"){
// return;
// }
// var v = mapping == null ? data[this.name] : data[mapping[this.name]];
// if(v == null || v == undefined){
// this.value = "";
// return;
// }
// this.value = v;
// });
// $("#"+id).find("select").each(function(){
// var v = mapping == null ? data[this.name] : data[mapping[this.name]];
// if(v == null || v == undefined){
// $(this).val("");
// return;
// }
// $(this).val(v);
// });
$("#" + id).find("input,select,textarea").each(function () {
var n = this.name || this.id;
var v = mapping == null ? data[n] : data[mapping[n]];
if (v == null || v == undefined) {
$(this).val("");
return;
}
$(this).val(v);
});
}
function errorMsgbox(msg) {
sysMsgbox(msg, true);
}
function msgbox(msg) {
sysMsgbox(msg, false);
}
function sysMsgbox(msg, error) {
var el = $("#sys-msgbox");
if (el.length == 0) {
$("body").append("<div id='sys-msgbox' title='系统提示' style='display:none;'></div>");
el = $("#sys-msgbox");
}
var color = error == true ? "red" : "black";
el.css("color", color);
el.html(msg);
setTimeout(function () {
el.dialog({
resizable: false,
height: "auto",
width: 400,
modal: true,
buttons: {
"关 闭": function () {
$(this).dialog("close");
|
}
|
random_line_split
|
|
utils.js
|
小时
"m+": this.getMinutes(), // 分
"s+": this.getSeconds(), // 秒
"q+": Math.floor((this.getMonth() + 3) / 3), // 季度
"S": this.getMilliseconds()
// 毫秒
};
if (/(y+)/.test(fmt))
fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "")
.substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt))
fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k])
: (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
}
function createSelect($select, data) {
$select.empty();
for (var i = 0; i < data.length; i++) {
var d = data[i];
var option = $("<option>" + d.TEXT + "</option>").val(d.VALUE);
$select.append(option);
}
}
function getRootParent() {
var _this = window;
var _parent = window.parent;
for (; _this != _parent;) {
_this = _parent;
_parent = _this.parent;
}
return _parent;
}
function redirectLogin() {
var expect = ctxPath + "/login.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function redirectIndex() {
var expect = ctxPath + "/index.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function tryLogin() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d == true) {
redirectIndex();
}
});
}
function tryLogout() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d != true) {
redirectLogin();
}
});
}
function validateLogin(d, fun) {
var _d;
if ("string" == typeof d) {
_d = JSON.parse(d);
} else {
_d = d;
}
if (_d.code == -999) {
redirectLogin();
return;
}
fun(_d);
}
function $get0(url, param, fun, async) {
if (fun == undefined) {
fun = param;
}
$.ajax({
cache: false,
type: "post",
url: ctxPath + url,
data: param,
async: async,
success: function (d) {
validateLogin(d, fun);
},
error: function (r) {
errorMsgbox("请求超时");
}
});
}
//同步请求
function $get(url, param, fun) {
$get0(url, param, fun, false);
}
//异步请求
function $geta(url, param, fun) {
$get0(url, param, fun, true);
}
function createCombobox(pid, id, list, cl) {
if (!cl) {
cl = "sel w150";
}
var p = "<select id='" + id + "' name='" + id + "' class='" + cl + "'>";
if (list.length > 0) {
p += "<option></option>";
for (var i in list) {
var item = list[i];
p += "<option value='" + item["VALUE"] + "'>" + item["NAME"] + "</option>"
}
} else {
p += "<option></option>";
}
p += "</select>";
$("#" + pid).html("");
$("#" + pid).append(p);
}
function createTable(pid, id, headers, buttonsFn, callbacks, list, cid) {
var buttons;
var hasButtons;
var isButtonsFn = false;
if ((typeof buttonsFn) == "function") {
hasButtons = true;
isButtonsFn = true;
} else if (buttonsFn.length > 0 && (typeof buttonsFn[0] == "string")) {
hasButtons = true;
buttons = buttonsFn;
}
if (!cid) {
cid = "ID";
}
var headersMap = [];
var p = "<table id=" + id + " class='showtablelist' style='width: 90%'>";
p += "<thead>";
p += "<tr>";
for (var i in headers) {
var header = headers[i];
var arr = header.split(":");
var width = "auto;";
if (arr.length == 2) {
headersMap.push(arr[1]);
} else if (arr.length == 3) {
headersMap.push(arr[1]);
width = arr[2] + "px;"
}
p += "<th nowrap='nowrap' style='width:" + width + "'>" + arr[0] + "</th>";
}
p += "</tr>";
p += "</thead>";
p += "<tbody>";
if (list.length > 0) {
for (var i in list) {
p += "<tr>";
var item = list[i];
if (hasButtons) {
if (isButtonsFn) {
buttons = buttonsFn(item);
}
var buttonsMap = [];
for (var i in buttons) {
buttonsMap.push(buttons[i].split(":"));
}
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
p += "<td nowrap='nowrap'>";
for (var j in buttonsMap) {
p += "<a href='javascript:void(0);' data-id='" + item[cid] + "' data-type='" + buttonsMap[j][1];
p += "' class='" + buttonsMap[j][2] + "'>" + buttonsMap[j][0] + "</a> ";
}
p += "</td>";
} else {
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
}
p += "</tr>";
}
$("#paging").show();
} else {
var noDtataTip = ctxPath + "/static/images/noDataTip.jpg";
p += "<tr>";
p += "<td class='tc' colspan='10'><img class='vm' src='" + noDtataTip + "'/></td>";
p += "</tr>";
$("#paging").hide();
}
p += "</tbody>";
p += "</table>";
p += "<br>";
$("#" + pid).html("");
$("#" + pid).append(p);
$("#" + pid).find("a").click(function () {
callbacks[$(this).data("type")].call(this, $(this).data("id"));
});
}
function createPage(url, param, callback) {
if (param != null) {
param = encodeURI(obj2httpParam(param));
}
$("#paging").myPagination({
cssStyle: 'bspagination',
currPage: 1,
pageNumber: 10,
ajax: {
on: true,
type: "POST",
url: ctxPath + url,
dataType: "json",
param: param,
ajaxStart: function () {
ZENG.msgbox.show(" 正在加载中,请稍后...", 6, 10000);
},
onClick: function (page) {
$.fn.debug(page);
},
ajaxStop: function () {
setTimeout(function () {
ZENG.msgbox.hide();
}, 1);
},
callback: function (d) {
validateLogin(d, callback);
}
}
});
}
function validateNotNull(param) {
for (var i in param) {
if (param[i] == '' || param[i] == null) {
errorMsgbox(i);
return false;
}
}
return true;
}
function validateNotNullForm(form) {
var v = true;
$("#" + form).find("input,select").each(function () {
if (this.type == "button") {
return;
}
if (!$(this).attr("hint") == "none") {
if (!$(this).val()) {
v = false;
errorMsgbox(this.name || this.id);
return false;
}
}
});
return v;
}
function mapList(list, id) {
if (!id) {
id = "ID";
}
var map = {};
for (var i in list) {
var item = list[i];
map[item[id]] = item;
}
return map;
}
function getInputValues(id) {
var map = {};
$("#" + id).find("input,select").each(function () {
if (this.type == "button") {
return;
}
map[this.name || this.id] = $(this)
|
.val();
});
return map;
}
function setInputValues(id, data, mapping) {
// $("#"+id).find("input").each(function(){
// if(this.type == "button"){
// return;
// }
// var v = mapping == null ? data[this.name]
|
identifier_body
|
|
utils.js
|
: meizz
var o = {
"M+": this.getMonth() + 1, // 月份
"d+": this.getDate(), // 日
"h+": this.getHours(), // 小时
"m+": this.getMinutes(), // 分
"s+": this.getSeconds(), // 秒
"q+": Math.floor((this.getMonth() + 3) / 3), // 季度
"S": this.getMilliseconds()
// 毫秒
};
if (/(y+)/.test(fmt))
fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "")
.substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt))
fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k])
: (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
}
function createSelect($select, data) {
$select.empty();
for (var i = 0; i < data.length; i++) {
var d = data[i];
var option = $("<option>" + d.TEXT + "</option>").val(d.VALUE);
$select.append(option);
}
}
function getRootParent() {
var _this = window;
var _parent = window.parent;
for (; _this != _parent;) {
_this = _parent;
_parent = _this.parent;
}
return _parent;
}
function redirectLogin() {
var expect = ctxPath + "/login.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function redirectIndex() {
var expect = ctxPath + "/index.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function tryLogin() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d == true) {
redirectIndex();
}
});
}
function tryLogout() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d != true) {
redirectLogin();
}
});
}
function validateLogin(d, fun) {
var _d;
if ("string" == typeof d) {
_d = JSON.parse(d);
} else {
_d = d;
}
if (_d.code == -999) {
redirectLogin();
return;
}
fun(_d);
}
function $get0(url, param, fun, async) {
if (fun == undefined) {
fun = param;
}
$.ajax({
cache: false,
type: "post",
url: ctxPath + url,
data: param,
async: async,
success: function (d) {
validateLogin(d, fun);
},
error: function (r) {
errorMsgbox("请求超时");
}
});
}
//同步请求
function $get(url, param, fun) {
$get0(url, param, fun, false);
}
//异步请求
function $geta(url, param, fun) {
$get0(url, param, fun, true);
}
function createCombobox(pid, id, list, cl) {
if (!cl) {
cl = "sel w150";
}
var p = "<select id='" + id + "' name='" + id + "' class='" + cl + "'>";
if (list.length > 0) {
p += "<option></option>";
for (var i in list) {
var item = list[i];
p += "<option value='" + item["VALUE"] + "'>" + item["NAME"] + "</option>"
}
} else {
p += "<option></option>";
}
p += "</select>";
$("#" + pid).html("");
$("#" + pid).append(p);
}
function createTable(pid, id, headers, buttonsFn, callbacks, list, cid) {
var buttons;
var hasButtons;
var isButtonsFn = false;
if ((typeof buttonsFn) == "function") {
hasButtons = true;
isButtonsFn = true;
} else if (buttonsFn.length > 0 && (typeof buttonsFn[0] == "string")) {
hasButtons = true;
buttons = buttonsFn;
}
if (!cid) {
cid = "ID";
}
var headersMap = [];
var p = "<table id=" + id + " class='showtablelist' style='width: 90%'>";
p += "<thead>";
p += "<tr>";
for (var i in headers) {
var header = headers[i];
var arr = header.split(":");
var width = "auto;";
if (arr.length == 2) {
headersMap.push(arr[1]);
} else if (arr.length == 3) {
headersMap.push(arr[1]);
width = arr[2] + "px;"
}
p += "<th nowrap='nowrap' style='width:" + width + "'>" + arr[0] + "</th>";
}
p += "</tr>";
p += "</thead>";
p += "<tbody>";
if (list.length > 0) {
for (var i in list) {
p += "<tr>";
var item = list[i];
if (hasButtons) {
if (isButtonsFn) {
buttons = buttonsFn(item);
}
var buttonsMap = [];
for (var i in buttons) {
buttonsMap.push(buttons[i].split(":"));
}
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
p += "<td nowrap='nowrap'>";
for (var j in buttonsMap) {
p += "<a href='javascript:void(0);' data-id='" + item[cid] + "' data-type='" + buttonsMap[j][1];
p += "' class='" + buttonsMap[j][2] + "'>" + buttonsMap[j][0] + "</a> ";
}
p += "</td>";
} else {
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
}
p += "</tr>";
}
$("#paging").show();
} else {
var noDtataTip = ctxPath + "/static/images/noDataTip.jpg";
p += "<tr>";
p += "<td class='tc' colspan='10'><img class='vm' src='" + noDtataTip + "'/></td>";
p += "</tr>";
$("#paging").hide();
}
p += "</tbody>";
p += "</table>";
p += "<br>";
$("#" + pid).html("");
$("#" + pid).append(p);
$("#" + pid).find("a").click(function () {
callbacks[$(this).data("type")].call(this, $(this).data("id"));
});
}
function createPage(url, param, callback) {
if (param != null) {
param = encodeURI(obj2httpParam(param));
}
$("#paging").myPagination({
cssStyle: 'bspagination',
currPage: 1,
pageNumber: 10,
ajax: {
on: true,
type: "POST",
url: ctxPath + url,
dataType: "json",
param: param,
ajaxStart: function () {
ZENG.msgbox.show(" 正在加载中,请稍后...", 6, 10000);
},
onClick: function (page) {
$.fn.debug(page);
},
ajaxStop: function () {
setTimeout(function () {
ZENG.msgbox.hide();
}, 1);
},
callback: function (d) {
validateLogin(d, callback);
}
}
});
}
function validateNotNull(param) {
for (var i in param) {
if (param[i] == '' || param[i] == null) {
errorMsgbox(i);
return false;
}
}
return tru
|
validateNotNullForm(form) {
var v = true;
$("#" + form).find("input,select").each(function () {
if (this.type == "button") {
return;
}
if (!$(this).attr("hint") == "none") {
if (!$(this).val()) {
v = false;
errorMsgbox(this.name || this.id);
return false;
}
}
});
return v;
}
function mapList(list, id) {
if (!id) {
id = "ID";
}
var map = {};
for (var i in list) {
var item = list[i];
map[item[id]] = item;
}
return map;
}
function getInputValues(id) {
var map = {};
$("#" + id).find("input,select").each(function () {
if (this.type == "button") {
return;
}
map[this.name || this.id] = $(this).val();
});
return map;
}
function setInputValues(id, data, mapping
|
e;
}
function
|
identifier_name
|
utils.js
|
author: meizz
var o = {
"M+": this.getMonth() + 1, // 月份
"d+": this.getDate(), // 日
"h+": this.getHours(), // 小时
"m+": this.getMinutes(), // 分
"s+": this.getSeconds(), // 秒
"q+": Math.floor((this.getMonth() + 3) / 3), // 季度
"S": this.getMilliseconds()
// 毫秒
};
if (/(y+)/.test(fmt))
fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "")
.substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt))
fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k])
: (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
}
function createSelect($select, data) {
$select.empty();
for (var i = 0; i < data.length; i++) {
var d = data[i];
var option = $("<option>" + d.TEXT + "</option>").val(d.VALUE);
$select.append(option);
}
}
func
|
_parent = _this.parent;
}
return _parent;
}
function redirectLogin() {
var expect = ctxPath + "/login.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function redirectIndex() {
var expect = ctxPath + "/index.html";
var location = getRootParent().location;
if (location.pathname != expect) {
location.href = expect;
}
}
function tryLogin() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d == true) {
redirectIndex();
}
});
}
function tryLogout() {
$geta("/isUserLogined.htm?r=" + Math.random(), function (d) {
if (d != true) {
redirectLogin();
}
});
}
function validateLogin(d, fun) {
var _d;
if ("string" == typeof d) {
_d = JSON.parse(d);
} else {
_d = d;
}
if (_d.code == -999) {
redirectLogin();
return;
}
fun(_d);
}
function $get0(url, param, fun, async) {
if (fun == undefined) {
fun = param;
}
$.ajax({
cache: false,
type: "post",
url: ctxPath + url,
data: param,
async: async,
success: function (d) {
validateLogin(d, fun);
},
error: function (r) {
errorMsgbox("请求超时");
}
});
}
//同步请求
function $get(url, param, fun) {
$get0(url, param, fun, false);
}
//异步请求
function $geta(url, param, fun) {
$get0(url, param, fun, true);
}
function createCombobox(pid, id, list, cl) {
if (!cl) {
cl = "sel w150";
}
var p = "<select id='" + id + "' name='" + id + "' class='" + cl + "'>";
if (list.length > 0) {
p += "<option></option>";
for (var i in list) {
var item = list[i];
p += "<option value='" + item["VALUE"] + "'>" + item["NAME"] + "</option>"
}
} else {
p += "<option></option>";
}
p += "</select>";
$("#" + pid).html("");
$("#" + pid).append(p);
}
function createTable(pid, id, headers, buttonsFn, callbacks, list, cid) {
var buttons;
var hasButtons;
var isButtonsFn = false;
if ((typeof buttonsFn) == "function") {
hasButtons = true;
isButtonsFn = true;
} else if (buttonsFn.length > 0 && (typeof buttonsFn[0] == "string")) {
hasButtons = true;
buttons = buttonsFn;
}
if (!cid) {
cid = "ID";
}
var headersMap = [];
var p = "<table id=" + id + " class='showtablelist' style='width: 90%'>";
p += "<thead>";
p += "<tr>";
for (var i in headers) {
var header = headers[i];
var arr = header.split(":");
var width = "auto;";
if (arr.length == 2) {
headersMap.push(arr[1]);
} else if (arr.length == 3) {
headersMap.push(arr[1]);
width = arr[2] + "px;"
}
p += "<th nowrap='nowrap' style='width:" + width + "'>" + arr[0] + "</th>";
}
p += "</tr>";
p += "</thead>";
p += "<tbody>";
if (list.length > 0) {
for (var i in list) {
p += "<tr>";
var item = list[i];
if (hasButtons) {
if (isButtonsFn) {
buttons = buttonsFn(item);
}
var buttonsMap = [];
for (var i in buttons) {
buttonsMap.push(buttons[i].split(":"));
}
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
p += "<td nowrap='nowrap'>";
for (var j in buttonsMap) {
p += "<a href='javascript:void(0);' data-id='" + item[cid] + "' data-type='" + buttonsMap[j][1];
p += "' class='" + buttonsMap[j][2] + "'>" + buttonsMap[j][0] + "</a> ";
}
p += "</td>";
} else {
for (var j in headersMap) {
p += "<td nowrap='nowrap'>" + (item[headersMap[j]] || "") + "</td>";
}
}
p += "</tr>";
}
$("#paging").show();
} else {
var noDtataTip = ctxPath + "/static/images/noDataTip.jpg";
p += "<tr>";
p += "<td class='tc' colspan='10'><img class='vm' src='" + noDtataTip + "'/></td>";
p += "</tr>";
$("#paging").hide();
}
p += "</tbody>";
p += "</table>";
p += "<br>";
$("#" + pid).html("");
$("#" + pid).append(p);
$("#" + pid).find("a").click(function () {
callbacks[$(this).data("type")].call(this, $(this).data("id"));
});
}
function createPage(url, param, callback) {
if (param != null) {
param = encodeURI(obj2httpParam(param));
}
$("#paging").myPagination({
cssStyle: 'bspagination',
currPage: 1,
pageNumber: 10,
ajax: {
on: true,
type: "POST",
url: ctxPath + url,
dataType: "json",
param: param,
ajaxStart: function () {
ZENG.msgbox.show(" 正在加载中,请稍后...", 6, 10000);
},
onClick: function (page) {
$.fn.debug(page);
},
ajaxStop: function () {
setTimeout(function () {
ZENG.msgbox.hide();
}, 1);
},
callback: function (d) {
validateLogin(d, callback);
}
}
});
}
function validateNotNull(param) {
for (var i in param) {
if (param[i] == '' || param[i] == null) {
errorMsgbox(i);
return false;
}
}
return true;
}
function validateNotNullForm(form) {
var v = true;
$("#" + form).find("input,select").each(function () {
if (this.type == "button") {
return;
}
if (!$(this).attr("hint") == "none") {
if (!$(this).val()) {
v = false;
errorMsgbox(this.name || this.id);
return false;
}
}
});
return v;
}
function mapList(list, id) {
if (!id) {
id = "ID";
}
var map = {};
for (var i in list) {
var item = list[i];
map[item[id]] = item;
}
return map;
}
function getInputValues(id) {
var map = {};
$("#" + id).find("input,select").each(function () {
if (this.type == "button") {
return;
}
map[this.name || this.id] = $(this).val();
});
return map;
}
function setInputValues(id, data, mapping
|
tion getRootParent() {
var _this = window;
var _parent = window.parent;
for (; _this != _parent;) {
_this = _parent;
|
conditional_block
|
bikeshare.py
|
? y/n?\n").lower()
if restart_month == 'y':
get_filters()
else:
exit()
# Get user input for day of the week
global day
day=()
day_choice = input("which day of the week are you interested in?\n\nChoose a day by entering the following choices:\n (all, monday, tuesday, wednesday, thursday, friday, saturday, sunday)")
valid_days = ['all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
day_choice = day_choice.lower()
if day_choice in valid_days:
day = day_choice
print ('For days, you have selected {}'.format(day))
else:
print('This does not seem to be a valid choice!')
restart_days = input("Do you wish to repick filters? y/n?\n").lower()
if restart_days == 'y':
get_filters()
else:
exit()
print('-'*40)
return city, month, day
def load_data(city, month, day):
# load data file into a dataframe
global df
df = pd.read_csv(CITY_DATA[city],index_col=0, infer_datetime_format=True)
# convert the Start Time and end Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['End Time'] = pd.to_datetime(df['End Time'])
# extract month and day of week from Start Time to create new columns
df['Start_Hour'] = df['Start Time'].dt.hour
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['Start Time'] = df['Start Time'].dt.time
df['End Time'] = df['End Time'].dt.time
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
#Displays statistics on the most frequent times of travel.
print('\nCalculating The Most Frequent Times of Travel for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display the most common month
most_common_month = df['month'].mode()[0]
print('Most Common month: \n', most_common_month)
#display the most common day of week
most_common_day = df['day_of_week'].mode()[0]
print('Most Common Day: \n', most_common_day)
#display the most common start hour
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:\n', most_common_start_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
#Displays statistics on the most popular stations and trip.
print('\nCalculating The Most Popular Stations and Trips for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display most commonly used start station
most_common_start_station = df['Start Station'].mode()[0]
print('Most Common Start Station:{}\n'.format(most_common_start_station))
#print('Most Common Start Hour:', most_common_start_hour)
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:{}: '.format(most_common_start_hour))
#display most commonly used end station
most_common_end_station = df['End Station'].mode()[0]
print('Most Common End Station:{}: '.format(most_common_end_station))
#display most frequent combination of start station and end station trip
time_delay_short()
most_common_start_end_station = df[['Start Station', 'End Station']].mode(0)
print('Most Common Start and End Station: \n',most_common_start_end_station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
#Displays statistics on the total and average trip duration.
print('\nCalculating Trip Duration for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# TO DO: display total travel time
Total_travel_time = df['Trip Duration'].sum(axis = 0, skipna = True)
print('Total travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ' , display_time(Total_travel_time))
time_delay_short()
# TO DO: display mean travel time
Mean_travel_time = df['Trip Duration'].mean(axis = 0, skipna = True)
print('Total average travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ', display_time(Mean_travel_time))
time_delay_short()
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
#Displays statistics on bikeshare users.
print('\nCalculating User Stats: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# Display counts of user type
x = 'User Type'
print('\nCount of User Type:\n',df[x].value_counts())
time_delay_short()
# Display counts of gender
y = 'Gender'
print('\nCount of Gender:\n',df[y].value_counts())
# Display earliest, most recent, and most common year of birth
z = 'Birth Year'
currentYear = datetime.now().year
oldest_biker = currentYear - df[z].min()
print('\nOldest User is {} years old!'.format(oldest_biker))
print('Wow that\'s old!')
youngest_biker = currentYear - df[z].max()
print('\nYoungest User is {} years old!'.format(youngest_biker))
print('Wow that\'s young!')
common_year = currentYear - df[z].mode()
print('\nMost common age of users in data set is {} years old'.format(str(common_year)))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def display_raw_data():
# get user input whether to displays cycle through 5 rows of data
raw_data_display = input("Would you like to see 5 records of the data? Press any key to continue displaying or type 'pass' to skip to descriptive statistics \n")
if raw_data_display != 'pass':
i = 5
while raw_data_display !='pass':
print(df.iloc[i-5:i, :])
raw_data_display = input("Would you like to see the next 5 records of raw data? Press any key to continue displaying or type 'pass' to skip to descriptive statistics \n")
i = i + 5
else:
print("....skipping ahead to descriptive stats\n")
def drop_na_values():
global df
# get number of rows in dataframe
numOfRows = df.shape[0]
print('\nThe raw data set is {} rows long!\n'.format(numOfRows))
time_delay_short()
print('\nAnalyzing for number of blank fields in the raw dataset...\n')
time_delay_short()
nan_count = df.isnull().sum()
print ('\nNumber of blank fields of each column in our dataset:\n', nan_count)
time_delay_short()
count_of_non_nan = df.count()
print ('\nCount of number of completed fields in our data set:\n', count_of_non_nan)
print ('\nWe will now drop the rows with blanks from the dataset so that the calculated statistics will not be skewed...\n')
df.dropna(axis = 0, inplace = True)
time_delay_short()
numOfRows = df.shape[0]
print('\nThe modified data set is now {} rows long!'.format(numOfRows))
#def time_delay_long():
#to add time delay to slow down the bombard of text to the user (and for fun!)
# time.sleep(1)
# print('...executing task...')
# time.sleep(2)
# print('.........................Complete!\n')
# time.sleep(1)
def
|
time_delay_short
|
identifier_name
|
|
bikeshare.py
|
* 24 * 7 * 30
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
#function to convert seconds to years,months,weeks,days,hours,seconds
def display_time(seconds, granularity=6):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def get_filters():
#City Choice Input
city_choice = input("Which city are you interested in?\n\nChoose a city by entering the corresponding number:\n1 for Chicago or\n2 for New York city or\n3 for Washington?")
global city
if city_choice == '1':
city ='chicago'
print('you have chosen Chicago!\n')
elif city_choice == '2':
city = 'new york city'
print('you have chosen New York city!\n')
elif city_choice == '3':
city = 'washington'
print('you have chosen Washington city!\n')
|
restart = input("Do you wish to reselect filters? y/n?\n").lower()
if restart == 'y':
get_filters()
else:
exit()
# TO DO: get user input for month (all, january, february, ... , june)
# Month Choice Input
global month
month =()
month_choice = input("Which month are you interested in?\n\nChoose a month by entering the following choices:\n (all, january, february, march, april, may, june) ")
valid_months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']
month_choice = month_choice.lower()
if month_choice in valid_months:
month = month_choice
print ('For months, you have selected {}'.format(month))
else:
print('This does not seem to be a valid choice!')
restart_month = input("Do you wish to choose filters again? y/n?\n").lower()
if restart_month == 'y':
get_filters()
else:
exit()
# Get user input for day of the week
global day
day=()
day_choice = input("which day of the week are you interested in?\n\nChoose a day by entering the following choices:\n (all, monday, tuesday, wednesday, thursday, friday, saturday, sunday)")
valid_days = ['all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
day_choice = day_choice.lower()
if day_choice in valid_days:
day = day_choice
print ('For days, you have selected {}'.format(day))
else:
print('This does not seem to be a valid choice!')
restart_days = input("Do you wish to repick filters? y/n?\n").lower()
if restart_days == 'y':
get_filters()
else:
exit()
print('-'*40)
return city, month, day
def load_data(city, month, day):
# load data file into a dataframe
global df
df = pd.read_csv(CITY_DATA[city],index_col=0, infer_datetime_format=True)
# convert the Start Time and end Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['End Time'] = pd.to_datetime(df['End Time'])
# extract month and day of week from Start Time to create new columns
df['Start_Hour'] = df['Start Time'].dt.hour
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['Start Time'] = df['Start Time'].dt.time
df['End Time'] = df['End Time'].dt.time
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
#Displays statistics on the most frequent times of travel.
print('\nCalculating The Most Frequent Times of Travel for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display the most common month
most_common_month = df['month'].mode()[0]
print('Most Common month: \n', most_common_month)
#display the most common day of week
most_common_day = df['day_of_week'].mode()[0]
print('Most Common Day: \n', most_common_day)
#display the most common start hour
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:\n', most_common_start_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
#Displays statistics on the most popular stations and trip.
print('\nCalculating The Most Popular Stations and Trips for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display most commonly used start station
most_common_start_station = df['Start Station'].mode()[0]
print('Most Common Start Station:{}\n'.format(most_common_start_station))
#print('Most Common Start Hour:', most_common_start_hour)
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:{}: '.format(most_common_start_hour))
#display most commonly used end station
most_common_end_station = df['End Station'].mode()[0]
print('Most Common End Station:{}: '.format(most_common_end_station))
#display most frequent combination of start station and end station trip
time_delay_short()
most_common_start_end_station = df[['Start Station', 'End Station']].mode(0)
print('Most Common Start and End Station: \n',most_common_start_end_station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
#Displays statistics on the total and average trip duration.
print('\nCalculating Trip Duration for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# TO DO: display total travel time
Total_travel_time = df['Trip Duration'].sum(axis = 0, skipna = True)
print('Total travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ' , display_time(Total_travel_time))
time_delay_short()
# TO DO: display mean travel time
Mean_travel_time = df['Trip Duration'].mean(axis = 0, skipna = True)
print('Total average travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ', display_time(Mean_travel_time))
time_delay_short()
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
#Displays statistics on bikeshare users.
print('\nCalculating User Stats: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# Display counts of user type
x = 'User Type'
print('\nCount of User Type:\n',df[x].value_counts())
time_delay_short()
# Display counts of gender
y = 'Gender'
print('\nCount of Gender:\n',df[y].value_counts())
# Display earliest, most recent, and most common year of birth
z = 'Birth Year'
currentYear = datetime.now().year
oldest_biker = currentYear - df[z].min()
print('\nOldest User is {} years old!'.format(oldest_biker))
print
|
else:
print('This does not seem to be a valid choice!')
|
random_line_split
|
bikeshare.py
|
* 24 * 7 * 30
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
#function to convert seconds to years,months,weeks,days,hours,seconds
def display_time(seconds, granularity=6):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def get_filters():
#City Choice Input
city_choice = input("Which city are you interested in?\n\nChoose a city by entering the corresponding number:\n1 for Chicago or\n2 for New York city or\n3 for Washington?")
global city
if city_choice == '1':
city ='chicago'
print('you have chosen Chicago!\n')
elif city_choice == '2':
city = 'new york city'
print('you have chosen New York city!\n')
elif city_choice == '3':
city = 'washington'
print('you have chosen Washington city!\n')
else:
|
# TO DO: get user input for month (all, january, february, ... , june)
# Month Choice Input
global month
month =()
month_choice = input("Which month are you interested in?\n\nChoose a month by entering the following choices:\n (all, january, february, march, april, may, june) ")
valid_months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']
month_choice = month_choice.lower()
if month_choice in valid_months:
month = month_choice
print ('For months, you have selected {}'.format(month))
else:
print('This does not seem to be a valid choice!')
restart_month = input("Do you wish to choose filters again? y/n?\n").lower()
if restart_month == 'y':
get_filters()
else:
exit()
# Get user input for day of the week
global day
day=()
day_choice = input("which day of the week are you interested in?\n\nChoose a day by entering the following choices:\n (all, monday, tuesday, wednesday, thursday, friday, saturday, sunday)")
valid_days = ['all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
day_choice = day_choice.lower()
if day_choice in valid_days:
day = day_choice
print ('For days, you have selected {}'.format(day))
else:
print('This does not seem to be a valid choice!')
restart_days = input("Do you wish to repick filters? y/n?\n").lower()
if restart_days == 'y':
get_filters()
else:
exit()
print('-'*40)
return city, month, day
def load_data(city, month, day):
# load data file into a dataframe
global df
df = pd.read_csv(CITY_DATA[city],index_col=0, infer_datetime_format=True)
# convert the Start Time and end Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['End Time'] = pd.to_datetime(df['End Time'])
# extract month and day of week from Start Time to create new columns
df['Start_Hour'] = df['Start Time'].dt.hour
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['Start Time'] = df['Start Time'].dt.time
df['End Time'] = df['End Time'].dt.time
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
#Displays statistics on the most frequent times of travel.
print('\nCalculating The Most Frequent Times of Travel for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display the most common month
most_common_month = df['month'].mode()[0]
print('Most Common month: \n', most_common_month)
#display the most common day of week
most_common_day = df['day_of_week'].mode()[0]
print('Most Common Day: \n', most_common_day)
#display the most common start hour
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:\n', most_common_start_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
#Displays statistics on the most popular stations and trip.
print('\nCalculating The Most Popular Stations and Trips for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display most commonly used start station
most_common_start_station = df['Start Station'].mode()[0]
print('Most Common Start Station:{}\n'.format(most_common_start_station))
#print('Most Common Start Hour:', most_common_start_hour)
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:{}: '.format(most_common_start_hour))
#display most commonly used end station
most_common_end_station = df['End Station'].mode()[0]
print('Most Common End Station:{}: '.format(most_common_end_station))
#display most frequent combination of start station and end station trip
time_delay_short()
most_common_start_end_station = df[['Start Station', 'End Station']].mode(0)
print('Most Common Start and End Station: \n',most_common_start_end_station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
#Displays statistics on the total and average trip duration.
print('\nCalculating Trip Duration for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# TO DO: display total travel time
Total_travel_time = df['Trip Duration'].sum(axis = 0, skipna = True)
print('Total travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ' , display_time(Total_travel_time))
time_delay_short()
# TO DO: display mean travel time
Mean_travel_time = df['Trip Duration'].mean(axis = 0, skipna = True)
print('Total average travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ', display_time(Mean_travel_time))
time_delay_short()
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
#Displays statistics on bikeshare users.
print('\nCalculating User Stats: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# Display counts of user type
x = 'User Type'
print('\nCount of User Type:\n',df[x].value_counts())
time_delay_short()
# Display counts of gender
y = 'Gender'
print('\nCount of Gender:\n',df[y].value_counts())
# Display earliest, most recent, and most common year of birth
z = 'Birth Year'
currentYear = datetime.now().year
oldest_biker = currentYear - df[z].min()
print('\nOldest User is {} years old!'.format(oldest_biker))
print
|
print('This does not seem to be a valid choice!')
restart = input("Do you wish to reselect filters? y/n?\n").lower()
if restart == 'y':
get_filters()
else:
exit()
|
conditional_block
|
bikeshare.py
|
0 * 24 * 7 * 30
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
#function to convert seconds to years,months,weeks,days,hours,seconds
def display_time(seconds, granularity=6):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def get_filters():
#City Choice Input
city_choice = input("Which city are you interested in?\n\nChoose a city by entering the corresponding number:\n1 for Chicago or\n2 for New York city or\n3 for Washington?")
global city
if city_choice == '1':
city ='chicago'
print('you have chosen Chicago!\n')
elif city_choice == '2':
city = 'new york city'
print('you have chosen New York city!\n')
elif city_choice == '3':
city = 'washington'
print('you have chosen Washington city!\n')
else:
print('This does not seem to be a valid choice!')
restart = input("Do you wish to reselect filters? y/n?\n").lower()
if restart == 'y':
get_filters()
else:
exit()
# TO DO: get user input for month (all, january, february, ... , june)
# Month Choice Input
global month
month =()
month_choice = input("Which month are you interested in?\n\nChoose a month by entering the following choices:\n (all, january, february, march, april, may, june) ")
valid_months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']
month_choice = month_choice.lower()
if month_choice in valid_months:
month = month_choice
print ('For months, you have selected {}'.format(month))
else:
print('This does not seem to be a valid choice!')
restart_month = input("Do you wish to choose filters again? y/n?\n").lower()
if restart_month == 'y':
get_filters()
else:
exit()
# Get user input for day of the week
global day
day=()
day_choice = input("which day of the week are you interested in?\n\nChoose a day by entering the following choices:\n (all, monday, tuesday, wednesday, thursday, friday, saturday, sunday)")
valid_days = ['all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
day_choice = day_choice.lower()
if day_choice in valid_days:
day = day_choice
print ('For days, you have selected {}'.format(day))
else:
print('This does not seem to be a valid choice!')
restart_days = input("Do you wish to repick filters? y/n?\n").lower()
if restart_days == 'y':
get_filters()
else:
exit()
print('-'*40)
return city, month, day
def load_data(city, month, day):
# load data file into a dataframe
|
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
#Displays statistics on the most frequent times of travel.
print('\nCalculating The Most Frequent Times of Travel for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display the most common month
most_common_month = df['month'].mode()[0]
print('Most Common month: \n', most_common_month)
#display the most common day of week
most_common_day = df['day_of_week'].mode()[0]
print('Most Common Day: \n', most_common_day)
#display the most common start hour
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:\n', most_common_start_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
#Displays statistics on the most popular stations and trip.
print('\nCalculating The Most Popular Stations and Trips for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display most commonly used start station
most_common_start_station = df['Start Station'].mode()[0]
print('Most Common Start Station:{}\n'.format(most_common_start_station))
#print('Most Common Start Hour:', most_common_start_hour)
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:{}: '.format(most_common_start_hour))
#display most commonly used end station
most_common_end_station = df['End Station'].mode()[0]
print('Most Common End Station:{}: '.format(most_common_end_station))
#display most frequent combination of start station and end station trip
time_delay_short()
most_common_start_end_station = df[['Start Station', 'End Station']].mode(0)
print('Most Common Start and End Station: \n',most_common_start_end_station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
#Displays statistics on the total and average trip duration.
print('\nCalculating Trip Duration for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# TO DO: display total travel time
Total_travel_time = df['Trip Duration'].sum(axis = 0, skipna = True)
print('Total travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ' , display_time(Total_travel_time))
time_delay_short()
# TO DO: display mean travel time
Mean_travel_time = df['Trip Duration'].mean(axis = 0, skipna = True)
print('Total average travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ', display_time(Mean_travel_time))
time_delay_short()
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
#Displays statistics on bikeshare users.
print('\nCalculating User Stats: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# Display counts of user type
x = 'User Type'
print('\nCount of User Type:\n',df[x].value_counts())
time_delay_short()
# Display counts of gender
y = 'Gender'
print('\nCount of Gender:\n',df[y].value_counts())
# Display earliest, most recent, and most common year of birth
z = 'Birth Year'
currentYear = datetime.now().year
oldest_biker = currentYear - df[z].min()
print('\nOldest User is {} years old!'.format(oldest_biker))
print('
|
global df
df = pd.read_csv(CITY_DATA[city],index_col=0, infer_datetime_format=True)
# convert the Start Time and end Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['End Time'] = pd.to_datetime(df['End Time'])
# extract month and day of week from Start Time to create new columns
df['Start_Hour'] = df['Start Time'].dt.hour
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['Start Time'] = df['Start Time'].dt.time
df['End Time'] = df['End Time'].dt.time
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
|
identifier_body
|
MyGraph.py
|
o incidentes
#self.get_adjacents(v) -> ver os sucessores e os predecessores para dar lista de adjacentes
return len(self.get_adjacents(v))#contar os adjacentes da lista
def all_degrees(self, deg_type = "inout"):#tudo o que sai e tudo o que entra
''' Cálculo de graus de entrada e saída (ou ambos) para todos os nós da rede.
deg_type can be "in", "out", or "inout" '''
degs = {}
for v in self.graph.keys():#para cada key no grafo
if deg_type == "out" or deg_type == "inout":#se for graus de saida ou de entrada/saida
degs[v] = len(self.graph[v])#inicializar o número do dicionario com o valor de graus de saida
else: degs[v] = 0
if deg_type == "in" or deg_type == "inout":#se for graus de entrada ou de entrada/saida
for v in self.graph.keys():#para cada key (metabolito ou reação) no grafo
for d in self.graph[v]:#para cada value de v
if deg_type == "in" or v not in self.graph[d]:#se in ou v, não for um value de d no grafo
#-> se nao estiver nos values de d quer dizer que é de entrada(antecessor) e nao de saida(sucessor)
degs[d] = degs[d] + 1 #adicionar + 1 ao value de d no dicionario degs
return degs #retorna todas as keys com os seus respetivos graus (entrada + saida)
def highest_degrees(self, all_deg= None, deg_type = "inout", top= 10):#vai ver o top 10
'''Vai buscar o top 10 de nos com maior grau'''
if all_deg is None: #percorrer todos graus
all_deg = self.all_degrees(deg_type)#ir buscar o dicionario a all_degrees
ord_deg = sorted(list(all_deg.items()), key=lambda x : x[1], reverse = True)
#por por ordem o dicionario do mmaior para o mais pequeno, neste caso trasnforma em lista .items para por em tuplo (key,value) so assim consegue
#por por ordem os graus
return list(map(lambda x:x[0], ord_deg[:top]))#retorna uma lista com os nos com os 10 primeiros
#x[0]-> key; x[1]-> values
## topological metrics over degrees
def mean_degree(self, deg_type = "inout"):#media dos graus
degs = self.all_degrees(deg_type)#calculo dos graus de entrada e saída (ou ambos) para todos os nós da rede
return sum(degs.values()) / float(len(degs))#soma de todos os valores do dicionario e fazer a media de nos do grafico
def prob_degree(self, deg_type = "inout"):#probabilidade desse grau existir no grafo
'''Para cada grau quantos nós é que tenho'''
degs = self.all_degrees(deg_type)#calculo dos graus de entrada e saída (ou ambos) para todos os nós da rede
res = {}#abrir dicionario
for k in degs.keys():#percorrer todas as keys de degs
if degs[k] in res.keys():#ver se tem um determinado k(grau) nas keys de res
res[degs[k]] += 1 #adicionar esse k + 1 ao dicionario res
else:#caso contrario
res[degs[k]] = 1
|
n res.keys():
res[k] /= float(len(degs))#probabilidade dos graus
return res
'''EXEMPLO:
c ={'a':1,'b':1,'c':3}
v={}
for k in c.keys():
if c[k] in v.keys():
v[c[k]] += 1
else:
v[c[k]] =1
v ={1: 2, 3: 1} '''
## BFS and DFS searches
def reachable_bfs(self, v):
'''Começa pelo nó origem, depois explora todos os seus sucessores,
depois os sucessores destes, e assim sucessivamente até todos os nós
atingíveis terem sido explorados'''
'''de cima para baixo'''
l = [v]#comeca pelo no de origem
res = []
while len(l) > 0:
node = l.pop(0)
if node != v: res.append(node) #se o node for diferente de v adicionar a res
for elem in self.graph[node]:#ver os values de node
if elem not in res and elem not in l and elem != node:#se esse value nao estiver em res, l e for diferente de node
l.append(elem)#adicionar a l
return res
def reachable_dfs(self, v):
'''Começa pelo nó origem e explora o 1º sucessor,
seguido pelo 1º sucessor deste e assim sucessivamente
até não haver mais sucessores e ter que se fazer “backtracking”'''
'''da esquerda para a direita'''
l = [v]#comeca pelo no de origem
res = []
while len(l) > 0:
node = l.pop(0)
if node != v: res.append(node)
s = 0 #contagem
for elem in self.graph[node]:
if elem not in res and elem not in l:
l.insert(s, elem)#s=posicao, elemento
s += 1
return res
def distance(self, s, d):#retorna distancia entre vertices(nos) s e d
if s == d:
return 0
l = [(s,0)]#lista com o no e a distancia de origem
visited = [s]#vertices visitados para obter o caminho
while len(l) > 0:
node, dist = l.pop(0)#removes the item at the given index from the list and returns the removed item(isolar o 1º no na queue)
for elem in self.graph[node]:#percorrer os values do no de origem
if elem == d: return dist + 1 #se o primeiro value for d retornar logo a distancia
elif elem not in visited:#se o value nao estiver em visitado
l.append((elem,dist+1))#vamos adicionar a lista l (caminho)
visited.append(elem)#adicionar o no (que ja foi visitado)
return None #retorna None se nao e atingivel
def shortest_path(self, s, d):#retorna caminho mais curto entre s e d (lista de nos por onde passa)
'''Retorna caminho mais curto entre s e d (lista de nós por onde passa)'''
if s == d: return 0
l = [(s,[])]#lista de nos por onde passa que comeca na de origem
visited = [s]#vertices visitados (nos atingidos)
while len(l) > 0:
node, preds = l.pop(0)#removes the item at the given index from the list and returns the removed item
for elem in self.graph[node]:#percorrer os values do no de origem
if elem == d: return preds+[node,elem] #se o primeiro for logo d retorna o caminho mais curto
elif elem not in visited:#se o value nao estiver em visitado
l.append((elem,preds+[node]))#adicionar a l
visited.append(elem)#acrescentar vertice a lista de vetices visitados
return None#retorna None se nao e atingivel o caminho mais curto
def reachable_with_dist(self, s):
'''Retorna lista de nós atingíveis a partir de s com respetiva distância(lista de pares nó, distância)'''
#na primeira iteracao faz o for logo
res = []#lista de nós atingíveis a partir de s com respetiva distância
l = [(s,0)] #lista com tuplo com s e a distancia de s a s (0)
while len(l) > 0:
node, dist = l.pop(0)
if node != s: #vai ver se e diferente de s
res.append((node,dist))# nao conta o s
for elem in self.graph[node]:#vai ver onde e que o node s se esta a ligar
if not is_in_tuple_list(l,elem) and not is_in_tuple_list(res,elem): #vai
|
#adicionar ao dicionario esse k(grau) = 1
#degs[k]= key e res[degs[k]] = value
for k i
|
conditional_block
|
MyGraph.py
|
ignoring unreachable nodes
def mean_distances(self):
tot = 0 #total
num_reachable = 0 #numero de vetores ligados entre si
for k in self.graph.keys():
distsk = self.reachable_with_dist(k)#[(no,dist)]->lista de nos atingiveis a partir de s com respetiva distancia
for _, dist in distsk:
tot += dist
num_reachable += len(distsk)#todas as proporçoes de nos atingiveis
meandist = float(tot) / num_reachable #media das distancias de ligacao
n = len(self.get_nodes()) #contagem de todos os nos que tem
return meandist, float(num_reachable)/((n-1)*n) #meandist->distância média,num_reachable proporção de nos atingiveis(num_reachable) / nº de ligacoes esperadas ((n-1)*n))
def closeness_centrality(self, node):#node = s
'''Baseadas nos nós que estão mais próximos dos restantes'''
dist = self.reachable_with_dist(node) #[(no,dist)]->lista de nos atingiveis a partir de s com respetiva distancia
if len(dist)==0:
return 0.0 #centralidade mais proxima e 0
s = 0.0 #distancia
for d in dist: #d = ( , )
s += d[1] #tuplo (t,6)
return len(dist) / s #todos os nos a dividir pela distancia total
#Centralidade mais proxima = todos os tuplos (vertice com ligacao a esse vertice)/distancia total
def highest_closeness(self, top = 10):
'''Centralidade mais alta -> top 10'''
cc = {} #abrir o dicionario com todas as keys do grafo e a centralidade mais proxima
for k in self.graph.keys():#para todas as keys no grafo
cc[k] = self.closeness_centrality(k)# o value de k = a centralidade mais proxima da key do grafo
print(cc)
ord_cl = sorted(list(cc.items()), key=lambda x : x[1], reverse = True) #ordenar o dicionario em ordem a centralidade mais proxima(transformar em lista)
return list(map(lambda x:x[0], ord_cl[:top])) #retornar os vertices com o top 10
def betweenness_centrality(self, node):
'''Baseadas na proporção de caminhos mais curtos entre todos os nós que passam pelo nó'''
'''Soma de todas as distancia possiveis '''
total_sp = 0 #todos os caminhos curtos que existem
sps_with_node = 0 #caminhos curtos que passam pelo node
for s in self.graph.keys():
for t in self.graph.keys():
if s != t and s != node and t != node:
sp = self.shortest_path(s, t)#retorna os caminhos dos nos de s a t
if sp is not None:# ou seja, se existir um caminho
total_sp += 1 #somar 1 aos caminhos totais
if node in sp: #se node se encontrar no sp
sps_with_node += 1 #ver se nesse caminho o meu node existe
return sps_with_node / total_sp #caminhos curtos que passam pelo node/ caminhos curtos totais
def highest_betweenness(self, top = 10):
'''Centralidade mais alta no betweenes -> top 10'''
cc = {} #abrir o dicionario com todas as keys do grafo e a betweenness_centrality
for k in self.graph.keys():#para todas as keys no grafo
cc[k] = self.betweenness_centrality(k)# o value de k = a cbetweenness_centrality da key do grafo
print(cc)
ord_cl = sorted(list(cc.items()), key=lambda x : x[1], reverse = True) #ordenar o dicionario em ordem da betweenness_centrality (transformar em lista)
return list(map(lambda x:x[0], ord_cl[:top])) #retornar os vertices com o top 10
def centralidade_de_grau_no(self,v):
'''A centralidade de grau de um vertice e dada pelo seu grau'''
alldegree = self.all_degrees()
return(alldegree[v]) #vai buscar o grau do no v
## cycles
def node_has_cycle (self, v):
l = [v]
res = False
visited = [v]
while len(l) > 0:
node = l.pop(0)
for elem in self.graph[node]:
if elem == v: return True
elif elem not in visited:
l.append(elem)
visited.append(elem)
return res
def has_cycle(self):
res = False
for v in self.graph.keys():
if self.node_has_cycle(v): return True
return res
## clustering
def clustering_coef(self, v):#nova função
adjs = self.get_adjacents(v) #lista de vertices
if len(adjs) <=1:#se isto acontecer quer dizer que nao existe agrupamento
return 0.0 #entao o coeficiente e 0
ligs = 0 #ligacoes
for i in adjs:#vai ao primeiro elemento de adjs (um no)
for j in adjs:# vai ao primeiro elemento de adjs(um no)
if i != j:#na primeira iteracao nao se vai verificar isto entao volta para o inicio do for
if j in self.graph[i] or i in self.graph[j]: #se j for um value de i e se i for um value de j
ligs = ligs + 1 #adicionar aos ligantes
return float(ligs)/(len(adjs)*(len(adjs)-1))#nº de arcos existentes entre vizinhos do no / nº total de arcos que poderiam existir entre vizinhos do no
'''EXEMPLO:
l = [1,2,3]
for i in l:
for j in l:
if i != j:
print('soma:',i+j)
RESULTADO:
i: 1,
j: 1, j: 2 -> soma: 3
j: 3 -> soma: 4
i: 2
j: 1-> soma: 3
j: 2, j: 3 -> soma: 5'''
def all_clustering_coefs(self):#nova função
ccs = {}#dicionario com todos os coeficientes de clustering
for k in self.graph.keys():#percorrer os nos
ccs[k] = self.clustering_coef(k)#adicionar ao k(no) o seu valor de clustering
return ccs
def mean_clustering_coef(self):#nova função
'''Média sobre todos os nós'''
ccs = self.all_clustering_coefs()#vai buscar um dicionario com {no: coeficiente}
return sum(ccs.values()) / float(len(ccs))# soma dos values/total de elemntos em ccs
def mean_clustering_perdegree(self, deg_type = "inout"):#nova função
'''Média dos coeficientes considerando nós de grau k.'''
d
|
egs = self.all_degrees(deg_type)#dicionario com as keys com os seus respetivos graus (entrada + saida) -> {no: graus(entrada + saida)}
ccs = self.all_clustering_coefs()#vai buscar um dicionario com {no: coeficiente}
degs_k = {}#{grau: no}
for k in degs.keys():#percorrer os no
if degs[k] in degs_k.keys(): #se o degs[k] (value- graus) for uma key em degs_k
degs_k[degs[k]].append(k)#adicionar a key de degs_k (grau) o value k (no)
else: degs_k[degs[k]] = [k]#caso contrario adicionar a key (degs[k]) o value k
ck = {}#{grau: media coeficiente}
for k in degs_k.keys():#para cada grau(key) em degs_k
tot = 0
for v in degs_k[k]:#para cada no(value) em degs_k -> buscar todos os nos com aquele grau
tot += ccs[v]# buscar o coeficiente de todos os nos com aquele grau
ck[k] = float(tot) / len(degs_k[k])#media do clustering, adicionar ao dicionario o {grau: media do coeficiente por grau}
return ck
def is_in_tuple_list(tl, val):
res = False
for (x,y) in t
|
identifier_body
|
|
MyGraph.py
|
incidentes
#self.get_adjacents(v) -> ver os sucessores e os predecessores para dar lista de adjacentes
return len(self.get_adjacents(v))#contar os adjacentes da lista
def all_degrees(self, deg_type = "inout"):#tudo o que sai e tudo o que entra
''' Cálculo de graus de entrada e saída (ou ambos) para todos os nós da rede.
deg_type can be "in", "out", or "inout" '''
degs = {}
for v in self.graph.keys():#para cada key no grafo
if deg_type == "out" or deg_type == "inout":#se for graus de saida ou de entrada/saida
degs[v] = len(self.graph[v])#inicializar o número do dicionario com o valor de graus de saida
else: degs[v] = 0
if deg_type == "in" or deg_type == "inout":#se for graus de entrada ou de entrada/saida
for v in self.graph.keys():#para cada key (metabolito ou reação) no grafo
for d in self.graph[v]:#para cada value de v
if deg_type == "in" or v not in self.graph[d]:#se in ou v, não for um value de d no grafo
#-> se nao estiver nos values de d quer dizer que é de entrada(antecessor) e nao de saida(sucessor)
degs[d] = degs[d] + 1 #adicionar + 1 ao value de d no dicionario degs
return degs #retorna todas as keys com os seus respetivos graus (entrada + saida)
def highest_degrees(self, all_deg= None, deg_type = "inout", top= 10):#vai ver o top 10
'''Vai buscar o top 10 de nos com maior grau'''
if all_deg is None: #percorrer todos graus
all_deg = self.all_degrees(deg_type)#ir buscar o dicionario a all_degrees
ord_deg = sorted(list(all_deg.items()), key=lambda x : x[1], reverse = True)
#por por ordem o dicionario do mmaior para o mais pequeno, neste caso trasnforma em lista .items para por em tuplo (key,value) so assim consegue
#por por ordem os graus
return list(map(lambda x:x[0], ord_deg[:top]))#retorna uma lista com os nos com os 10 primeiros
#x[0]-> key; x[1]-> values
## topological metrics over degrees
def mean_degree(self, deg_type = "inout"):#media dos graus
degs = self.all_degrees(deg_type)#calculo dos graus de entrada e saída (ou ambos) para todos os nós da rede
return sum(degs.values()) / float(len(degs))#soma de todos os valores do dicionario e fazer a media de nos do grafico
def prob_degree(self, deg_type = "inout"):#probabilidade desse grau existir no grafo
'''Para cada grau quantos nós é que tenho'''
degs = self.all_degrees(deg_type)#calculo dos graus de entrada e saída (ou ambos) para todos os nós da rede
res = {}#abrir dicionario
for k in degs.keys():#percorrer todas as keys de degs
if degs[k] in res.keys():#ver se tem um determinado k(grau) nas keys de res
res[degs[k]] += 1 #adicionar esse k + 1 ao dicionario res
else:#caso contrario
res[degs[k]] = 1 #adicionar ao dicionario esse k(grau) = 1
#degs[k]= key e res[degs[k]] = value
for k in res.keys():
res[k] /= float(len(degs))#probabilidade dos graus
return res
'''EXEMPLO:
c ={'a':1,'b':1,'c':3}
v={}
for k in c.keys():
if c[k] in v.keys():
v[c[k]] += 1
else:
v[c[k]] =1
v ={1: 2, 3: 1} '''
## BFS and DFS searches
def reachable_bfs(self, v):
'''Começa pelo nó origem, depois explora todos os seus sucessores,
depois os sucessores destes, e assim sucessivamente até todos os nós
atingíveis terem sido explorados'''
'''de cima para baixo'''
l = [v]#comeca pelo no de origem
res = []
while len(l) > 0:
node = l.pop(0)
if node != v: res.append(node) #se o node for diferente de v adicionar a res
for elem in self.graph[node]:#ver os values de node
if elem not in res and elem not in l and elem != node:#se esse value nao estiver em res, l e for diferente de node
l.append(elem)#adicionar a l
return res
def reachable_dfs(self, v):
'''Começa pelo nó origem e explora o 1º sucessor,
seguido pelo 1º sucessor deste e assim sucessivamente
até não haver mais sucessores e ter que se fazer “backtracking”'''
'''da esquerda para a direita'''
l = [v]#comeca pelo no de origem
res = []
while len(l) > 0:
node = l.pop(0)
if node != v: res.append(node)
s = 0 #contagem
for elem in self.graph[node]:
if elem not in res and elem not in l:
l.insert(s, elem)#s=posicao, elemento
s += 1
return res
def distance(self, s, d):#retorna distancia entre vertices(nos) s e d
if s == d:
return 0
l = [(s,0)]#lista com o no e a distancia de origem
visited = [s]#vertices visitados para obter o caminho
while len(l) > 0:
node, dist = l.pop(0)#removes the item at the given index from the list and returns the removed item(isolar o 1º no na queue)
for elem in self.graph[node]:#percorrer os values do no de origem
if elem == d: return dist + 1 #se o primeiro value for d retornar logo a distancia
elif elem not in visited:#se o value nao estiver em visitado
l.append((elem,dist+1))#vamos adicionar a lista l (caminho)
visited.append(elem)#adicionar o no (que ja foi visitado)
return None #retorna None se nao e atingivel
def shortest_path(self, s, d):#retor
|
is curto entre s e d (lista de nos por onde passa)
'''Retorna caminho mais curto entre s e d (lista de nós por onde passa)'''
if s == d: return 0
l = [(s,[])]#lista de nos por onde passa que comeca na de origem
visited = [s]#vertices visitados (nos atingidos)
while len(l) > 0:
node, preds = l.pop(0)#removes the item at the given index from the list and returns the removed item
for elem in self.graph[node]:#percorrer os values do no de origem
if elem == d: return preds+[node,elem] #se o primeiro for logo d retorna o caminho mais curto
elif elem not in visited:#se o value nao estiver em visitado
l.append((elem,preds+[node]))#adicionar a l
visited.append(elem)#acrescentar vertice a lista de vetices visitados
return None#retorna None se nao e atingivel o caminho mais curto
def reachable_with_dist(self, s):
'''Retorna lista de nós atingíveis a partir de s com respetiva distância(lista de pares nó, distância)'''
#na primeira iteracao faz o for logo
res = []#lista de nós atingíveis a partir de s com respetiva distância
l = [(s,0)] #lista com tuplo com s e a distancia de s a s (0)
while len(l) > 0:
node, dist = l.pop(0)
if node != s: #vai ver se e diferente de s
res.append((node,dist))# nao conta o s
for elem in self.graph[node]:#vai ver onde e que o node s se esta a ligar
if not is_in_tuple_list(l,elem) and not is_in_tuple_list(res,elem): #vai
|
na caminho ma
|
identifier_name
|
MyGraph.py
|
s == d: return 0
l = [(s,[])]#lista de nos por onde passa que comeca na de origem
visited = [s]#vertices visitados (nos atingidos)
while len(l) > 0:
node, preds = l.pop(0)#removes the item at the given index from the list and returns the removed item
for elem in self.graph[node]:#percorrer os values do no de origem
if elem == d: return preds+[node,elem] #se o primeiro for logo d retorna o caminho mais curto
elif elem not in visited:#se o value nao estiver em visitado
l.append((elem,preds+[node]))#adicionar a l
visited.append(elem)#acrescentar vertice a lista de vetices visitados
return None#retorna None se nao e atingivel o caminho mais curto
def reachable_with_dist(self, s):
'''Retorna lista de nós atingíveis a partir de s com respetiva distância(lista de pares nó, distância)'''
#na primeira iteracao faz o for logo
res = []#lista de nós atingíveis a partir de s com respetiva distância
l = [(s,0)] #lista com tuplo com s e a distancia de s a s (0)
while len(l) > 0:
node, dist = l.pop(0)
if node != s: #vai ver se e diferente de s
res.append((node,dist))# nao conta o s
for elem in self.graph[node]:#vai ver onde e que o node s se esta a ligar
if not is_in_tuple_list(l,elem) and not is_in_tuple_list(res,elem): #vai ver se o p se encontra dentro de l ou em res
l.append((elem,dist+1))#adiciona o vertice a que se liga
return res
## mean distances ignoring unreachable nodes
def mean_distances(self):
tot = 0 #total
num_reachable = 0 #numero de vetores ligados entre si
for k in self.graph.keys():
distsk = self.reachable_with_dist(k)#[(no,dist)]->lista de nos atingiveis a partir de s com respetiva distancia
for _, dist in distsk:
tot += dist
num_reachable += len(distsk)#todas as proporçoes de nos atingiveis
meandist = float(tot) / num_reachable #media das distancias de ligacao
n = len(self.get_nodes()) #contagem de todos os nos que tem
return meandist, float(num_reachable)/((n-1)*n) #meandist->distância média,num_reachable proporção de nos atingiveis(num_reachable) / nº de ligacoes esperadas ((n-1)*n))
def closeness_centrality(self, node):#node = s
'''Baseadas nos nós que estão mais próximos dos restantes'''
dist = self.reachable_with_dist(node) #[(no,dist)]->lista de nos atingiveis a partir de s com respetiva distancia
if len(dist)==0:
return 0.0 #centralidade mais proxima e 0
s = 0.0 #distancia
for d in dist: #d = ( , )
s += d[1] #tuplo (t,6)
return len(dist) / s #todos os nos a dividir pela distancia total
#Centralidade mais proxima = todos os tuplos (vertice com ligacao a esse vertice)/distancia total
def highest_closeness(self, top = 10):
'''Centralidade mais alta -> top 10'''
cc = {} #abrir o dicionario com todas as keys do grafo e a centralidade mais proxima
for k in self.graph.keys():#para todas as keys no grafo
cc[k] = self.closeness_centrality(k)# o value de k = a centralidade mais proxima da key do grafo
print(cc)
ord_cl = sorted(list(cc.items()), key=lambda x : x[1], reverse = True) #ordenar o dicionario em ordem a centralidade mais proxima(transformar em lista)
return list(map(lambda x:x[0], ord_cl[:top])) #retornar os vertices com o top 10
def betweenness_centrality(self, node):
'''Baseadas na proporção de caminhos mais curtos entre todos os nós que passam pelo nó'''
'''Soma de todas as distancia possiveis '''
total_sp = 0 #todos os caminhos curtos que existem
sps_with_node = 0 #caminhos curtos que passam pelo node
for s in self.graph.keys():
for t in self.graph.keys():
if s != t and s != node and t != node:
sp = self.shortest_path(s, t)#retorna os caminhos dos nos de s a t
if sp is not None:# ou seja, se existir um caminho
total_sp += 1 #somar 1 aos caminhos totais
if node in sp: #se node se encontrar no sp
sps_with_node += 1 #ver se nesse caminho o meu node existe
return sps_with_node / total_sp #caminhos curtos que passam pelo node/ caminhos curtos totais
def highest_betweenness(self, top = 10):
'''Centralidade mais alta no betweenes -> top 10'''
cc = {} #abrir o dicionario com todas as keys do grafo e a betweenness_centrality
for k in self.graph.keys():#para todas as keys no grafo
cc[k] = self.betweenness_centrality(k)# o value de k = a cbetweenness_centrality da key do grafo
print(cc)
ord_cl = sorted(list(cc.items()), key=lambda x : x[1], reverse = True) #ordenar o dicionario em ordem da betweenness_centrality (transformar em lista)
return list(map(lambda x:x[0], ord_cl[:top])) #retornar os vertices com o top 10
def centralidade_de_grau_no(self,v):
'''A centralidade de grau de um vertice e dada pelo seu grau'''
alldegree = self.all_degrees()
return(alldegree[v]) #vai buscar o grau do no v
## cycles
def node_has_cycle (self, v):
l = [v]
res = False
visited = [v]
while len(l) > 0:
node = l.pop(0)
for elem in self.graph[node]:
if elem == v: return True
elif elem not in visited:
l.append(elem)
visited.append(elem)
return res
def has_cycle(self):
res = False
for v in self.graph.keys():
if self.node_has_cycle(v): return True
return res
## clustering
def clustering_coef(self, v):#nova função
adjs = self.get_adjacents(v) #lista de vertices
if len(adjs) <=1:#se isto acontecer quer dizer que nao existe agrupamento
return 0.0 #entao o coeficiente e 0
ligs = 0 #ligacoes
for i in adjs:#vai ao primeiro elemento de adjs (um no)
for j in adjs:# vai ao primeiro elemento de adjs(um no)
if i != j:#na primeira iteracao nao se vai verificar isto entao volta para o inicio do for
if j in self.graph[i] or i in self.graph[j]: #se j for um value de i e se i for um value de j
ligs = ligs + 1 #adicionar aos ligantes
return float(ligs)/(len(adjs)*(len(adjs)-1))#nº de arcos existentes entre vizinhos do no / nº total de arcos que poderiam existir entre vizinhos do no
'''EXEMPLO:
l = [1,2,3]
for i in l:
for j in l:
if i != j:
print('soma:',i+j)
RESULTADO:
i: 1,
j: 1, j: 2 -> soma: 3
j: 3 -> soma: 4
i: 2
j: 1-> soma: 3
j: 2, j: 3 -> soma: 5'''
def all_clustering_coefs(self):#nova função
ccs = {}#dicionario com todos os coeficientes de clustering
for k in self.graph.keys():#percorrer os nos
ccs[k] = self.clustering_coef(k)#adicionar ao k(no) o seu valor de clustering
return ccs
|
def mean_clustering_coef(self):#nova função
'''Média sobre todos os nós'''
|
random_line_split
|
|
queries.py
|
COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import pandas as pd
import sqlite3 as db
import dbtypes
from specimen import utils
def read_sql(sql, conn):
# read sql with pandas but make sure column names are lowercase
df = pd.read_sql(sql, conn)
df.columns = df.columns.map(lambda x: x.lower())
return df
class SpecimenQueries:
"""
Contains helpful specimen database queries. Should be used as a starting point for analysis of specimen
data.
"""
def __init__(self, database_path=None):
"""
Provides wrapper for queries. Caches queries where possible.
:param database_path: Path to SQLITE database file
"""
self.database_path = database_path
self.conn = db.connect(database=self.database_path)
# start use of foreign keys
_cursor = self.conn.cursor()
_cursor.execute('PRAGMA foreign_keys = ON')
_cursor.close()
self.cache = {}
def _clear_cache(self):
""" Clear cache, which stores prior query results """
self.cache = {}
def _drop_tables(self, tables):
"""
Drop a set of tables from db (often used to materialize intermediate tables for ease of querying and
then removing these to avoid affecting db state)
:param tables: list of tables to drop
:return: drops if they exist, ignores otherwise
"""
cursor = self.conn.cursor()
try:
cursor.execute('DROP TABLE ' + ','.join(map(str, tables)))
except:
pass
finally:
cursor.close()
def _get_unknown_userid(self):
"""
Retrieve user id associated with unknown user
"""
cursor = self.conn.cursor()
unknown_user_str = dbtypes.User.null
cursor.execute("select id from users where uniqueid='%s'" % unknown_user_str)
return cursor.fetchone()[0]
def users_and_countries(self, use_cache=True):
"""
Returns a table with userid and most likely country (based on carrier location frequency).
:param use_cache: if true uses cached result, else clears database state and reruns query
:return: pandas dataframe
"""
key = 'user_and_countries'
if use_cache and key in self.cache:
return self.cache[key].copy()
cursor = self.conn.cursor()
if not use_cache:
self._drop_tables(['user_country_freqs', 'user_and_likely_country'])
# userid for unknown user
unknown_user_id = self._get_unknown_userid()
# can only return country info if userid is known
cursor.execute(
"""
CREATE TEMP TABLE user_country_freqs AS
select userid, country, count(*) as ct
from sessions where userid <> %d and country is not null
group by userid, country
""" % unknown_user_id
)
# assigns each user to country with most counts
cursor.execute(
"""
CREATE TEMP TABLE user_and_likely_country AS
SELECT *
FROM
user_country_freqs JOIN (SELECT userid, max(ct) as max_ct FROM user_country_freqs GROUP BY userid) max_cts
USING (userid)
WHERE user_country_freqs.ct = max_cts.max_ct
GROUP BY userid
"""
)
cursor.close()
result = read_sql('SELECT * FROM user_and_likely_country', self.conn)
self.cache[key] = result.copy()
return result
def
|
(self, vals, table_name='_ref'):
"""
Create a temporary reference table by inserting values.
This is used to speed up sqlite queries that are too slow when given
the list directly in the query text (most likely a parsing issue?).
"""
# remove existing
self._drop_tables([table_name])
cursor = self.conn.cursor()
cursor.execute('CREATE TEMP TABLE %s (id INTEGER)' % table_name)
for i, v in enumerate(vals):
cursor.execute('INSERT INTO %s VALUES(%d)' % (table_name, v))
def get_time_offset(self, event_ids, get_extra_info=True, use_cache=True):
"""
Compute the time offset from the start of a session for a list of events.
Only possible with data from JSON files. CSV files have dummy timestamps.
:param event_ids: list of event ids to query
"""
print "Warning: This is only valid for data from the json files! Timestamps in csv are dummies"
if event_ids is None:
raise ValueError('Must provide event ids ts')
key = ('timestamps', tuple(event_ids), get_extra_info)
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
ts_query = """
SELECT events.id as id, offsettimestamp, event FROM events, _ref
WHERE events.id = _ref.id AND offsettimestamp >= 0
"""
ts = read_sql(ts_query, self.conn)
# adds additional information such as user id, and session id for matching up timestamps
if get_extra_info:
extra_info_query = """
SELECT
sessions.userid,
events.id AS id,
sessions.id AS sessionid
FROM events, sessions, _ref
WHERE events.id = _ref.id AND
events.sessionid = sessions.id
"""
extra_info_df = read_sql(extra_info_query, self.conn)
ts = ts.merge(extra_info_df, how='left', on='id')
self.cache[key] = ts.copy()
return ts
def get_devices(self, event_ids, use_cache=True):
"""
Query the devices associated with particular event ids.
:param event_ids: list of event ids to query
"""
if event_ids is None:
raise ValueError('Must provide event ids')
# cast to tuple so that can be hashed
key = ('devices', tuple(event_ids))
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
devices_query = """
select
devices.name as device_name,
events.id as eventid
FROM
sessions, events, devices, _ref
WHERE
events.id = _ref.id AND
sessions.id = events.sessionid AND
sessions.deviceid = devices.id
"""
devices_df = read_sql(devices_query, self.conn)
self.cache[key] = devices_df.copy()
return devices_df
def base_selections(self, min_turns=50, which='all', add_fields=None, use_cache=True):
"""
Obtain base selections data, consisting of selections for known userids (i.e. this
precludes data from the CSV files from Flurry, which do not have known user ids associated
with each record). Selects only the first turn in a 'play',
to control for game play. Selects data for users with at least `min_turns` such turns. Caches results
:param min_turns: minimum number of first turns necessary for data, if 0, returns all
:param which: one of 'all', 'correct', 'incorrect', determines what kind of selections are returned
:param add_fields: add extra base fields from table selectionevents. If dict, uses keys as fields
and values as names, if list uses elements as fields and names
:param use_cache: if true, uses cached results, else clears database state and reruns.
:return: pandas dataframe
"""
if min_turns < 0:
raise ValueError('min_turns must be > 0')
if add_fields and not utils.is_iterable(add_fields):
raise ValueError('add_fields must be iterable')
if not which in ['all', 'correct', 'incorrect']:
raise ValueError("which must be one of 'all', 'correct', 'incorrect'")
key = ('first_sels', min_turns, which, add_fields)
if use_cache:
if key in self.cache:
return self.cache[key].copy()
else:
# we may have created tables for different optional args (i.e. diff min_turns)
self._drop_tables(['first_sels', 'enough_plays'])
if not use_cache:
self._drop_tables(['first_sels', 'enough_plays'])
# cobble together additional fields from selectionevents
added = ""
if add_fields:
if not isinstance(add_fields, dict):
add_fields = dict(zip(add_fields, add_fields))
added = ", " + (".".join(["%s as %s" % (f,n) for f, n in add_fields.iteritems()]))
cursor = self.conn.cursor()
# unknown user id
unknown_user_id = self._get_unknown_userid()
# filter to base data consisting of first-turns in play for known user ids
print "Filtering down to first-turns in a play"
cursor.execute("""
-- compute the smallest eventid associated with each playid
CREATE TEMP TABLE sel_cts AS
SELECT MIN(eventid) as min_event_id
FROM selectionevents
where userid <> %d
GROUP BY playid
""" % unknown_user_id)
print "Ret
|
create_reference_ids_table
|
identifier_name
|
queries.py
|
COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import pandas as pd
import sqlite3 as db
import dbtypes
from specimen import utils
def read_sql(sql, conn):
# read sql with pandas but make sure column names are lowercase
df = pd.read_sql(sql, conn)
df.columns = df.columns.map(lambda x: x.lower())
return df
class SpecimenQueries:
"""
Contains helpful specimen database queries. Should be used as a starting point for analysis of specimen
data.
"""
def __init__(self, database_path=None):
"""
Provides wrapper for queries. Caches queries where possible.
:param database_path: Path to SQLITE database file
"""
self.database_path = database_path
self.conn = db.connect(database=self.database_path)
# start use of foreign keys
_cursor = self.conn.cursor()
_cursor.execute('PRAGMA foreign_keys = ON')
_cursor.close()
self.cache = {}
def _clear_cache(self):
""" Clear cache, which stores prior query results """
self.cache = {}
def _drop_tables(self, tables):
"""
Drop a set of tables from db (often used to materialize intermediate tables for ease of querying and
then removing these to avoid affecting db state)
:param tables: list of tables to drop
:return: drops if they exist, ignores otherwise
"""
cursor = self.conn.cursor()
try:
cursor.execute('DROP TABLE ' + ','.join(map(str, tables)))
except:
pass
finally:
cursor.close()
def _get_unknown_userid(self):
"""
Retrieve user id associated with unknown user
"""
cursor = self.conn.cursor()
unknown_user_str = dbtypes.User.null
cursor.execute("select id from users where uniqueid='%s'" % unknown_user_str)
return cursor.fetchone()[0]
def users_and_countries(self, use_cache=True):
"""
Returns a table with userid and most likely country (based on carrier location frequency).
:param use_cache: if true uses cached result, else clears database state and reruns query
:return: pandas dataframe
"""
key = 'user_and_countries'
if use_cache and key in self.cache:
return self.cache[key].copy()
cursor = self.conn.cursor()
if not use_cache:
|
# userid for unknown user
unknown_user_id = self._get_unknown_userid()
# can only return country info if userid is known
cursor.execute(
"""
CREATE TEMP TABLE user_country_freqs AS
select userid, country, count(*) as ct
from sessions where userid <> %d and country is not null
group by userid, country
""" % unknown_user_id
)
# assigns each user to country with most counts
cursor.execute(
"""
CREATE TEMP TABLE user_and_likely_country AS
SELECT *
FROM
user_country_freqs JOIN (SELECT userid, max(ct) as max_ct FROM user_country_freqs GROUP BY userid) max_cts
USING (userid)
WHERE user_country_freqs.ct = max_cts.max_ct
GROUP BY userid
"""
)
cursor.close()
result = read_sql('SELECT * FROM user_and_likely_country', self.conn)
self.cache[key] = result.copy()
return result
def create_reference_ids_table(self, vals, table_name='_ref'):
"""
Create a temporary reference table by inserting values.
This is used to speed up sqlite queries that are too slow when given
the list directly in the query text (most likely a parsing issue?).
"""
# remove existing
self._drop_tables([table_name])
cursor = self.conn.cursor()
cursor.execute('CREATE TEMP TABLE %s (id INTEGER)' % table_name)
for i, v in enumerate(vals):
cursor.execute('INSERT INTO %s VALUES(%d)' % (table_name, v))
def get_time_offset(self, event_ids, get_extra_info=True, use_cache=True):
"""
Compute the time offset from the start of a session for a list of events.
Only possible with data from JSON files. CSV files have dummy timestamps.
:param event_ids: list of event ids to query
"""
print "Warning: This is only valid for data from the json files! Timestamps in csv are dummies"
if event_ids is None:
raise ValueError('Must provide event ids ts')
key = ('timestamps', tuple(event_ids), get_extra_info)
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
ts_query = """
SELECT events.id as id, offsettimestamp, event FROM events, _ref
WHERE events.id = _ref.id AND offsettimestamp >= 0
"""
ts = read_sql(ts_query, self.conn)
# adds additional information such as user id, and session id for matching up timestamps
if get_extra_info:
extra_info_query = """
SELECT
sessions.userid,
events.id AS id,
sessions.id AS sessionid
FROM events, sessions, _ref
WHERE events.id = _ref.id AND
events.sessionid = sessions.id
"""
extra_info_df = read_sql(extra_info_query, self.conn)
ts = ts.merge(extra_info_df, how='left', on='id')
self.cache[key] = ts.copy()
return ts
def get_devices(self, event_ids, use_cache=True):
"""
Query the devices associated with particular event ids.
:param event_ids: list of event ids to query
"""
if event_ids is None:
raise ValueError('Must provide event ids')
# cast to tuple so that can be hashed
key = ('devices', tuple(event_ids))
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
devices_query = """
select
devices.name as device_name,
events.id as eventid
FROM
sessions, events, devices, _ref
WHERE
events.id = _ref.id AND
sessions.id = events.sessionid AND
sessions.deviceid = devices.id
"""
devices_df = read_sql(devices_query, self.conn)
self.cache[key] = devices_df.copy()
return devices_df
def base_selections(self, min_turns=50, which='all', add_fields=None, use_cache=True):
"""
Obtain base selections data, consisting of selections for known userids (i.e. this
precludes data from the CSV files from Flurry, which do not have known user ids associated
with each record). Selects only the first turn in a 'play',
to control for game play. Selects data for users with at least `min_turns` such turns. Caches results
:param min_turns: minimum number of first turns necessary for data, if 0, returns all
:param which: one of 'all', 'correct', 'incorrect', determines what kind of selections are returned
:param add_fields: add extra base fields from table selectionevents. If dict, uses keys as fields
and values as names, if list uses elements as fields and names
:param use_cache: if true, uses cached results, else clears database state and reruns.
:return: pandas dataframe
"""
if min_turns < 0:
raise ValueError('min_turns must be > 0')
if add_fields and not utils.is_iterable(add_fields):
raise ValueError('add_fields must be iterable')
if not which in ['all', 'correct', 'incorrect']:
raise ValueError("which must be one of 'all', 'correct', 'incorrect'")
key = ('first_sels', min_turns, which, add_fields)
if use_cache:
if key in self.cache:
return self.cache[key].copy()
else:
# we may have created tables for different optional args (i.e. diff min_turns)
self._drop_tables(['first_sels', 'enough_plays'])
if not use_cache:
self._drop_tables(['first_sels', 'enough_plays'])
# cobble together additional fields from selectionevents
added = ""
if add_fields:
if not isinstance(add_fields, dict):
add_fields = dict(zip(add_fields, add_fields))
added = ", " + (".".join(["%s as %s" % (f,n) for f, n in add_fields.iteritems()]))
cursor = self.conn.cursor()
# unknown user id
unknown_user_id = self._get_unknown_userid()
# filter to base data consisting of first-turns in play for known user ids
print "Filtering down to first-turns in a play"
cursor.execute("""
-- compute the smallest eventid associated with each playid
CREATE TEMP TABLE sel_cts AS
SELECT MIN(eventid) as min_event_id
FROM selectionevents
where userid <> %d
GROUP BY playid
""" % unknown_user_id)
print "Ret
|
self._drop_tables(['user_country_freqs', 'user_and_likely_country'])
|
conditional_block
|
queries.py
|
COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import pandas as pd
import sqlite3 as db
import dbtypes
from specimen import utils
def read_sql(sql, conn):
# read sql with pandas but make sure column names are lowercase
df = pd.read_sql(sql, conn)
df.columns = df.columns.map(lambda x: x.lower())
return df
class SpecimenQueries:
"""
Contains helpful specimen database queries. Should be used as a starting point for analysis of specimen
data.
"""
def __init__(self, database_path=None):
"""
Provides wrapper for queries. Caches queries where possible.
:param database_path: Path to SQLITE database file
"""
self.database_path = database_path
self.conn = db.connect(database=self.database_path)
# start use of foreign keys
_cursor = self.conn.cursor()
_cursor.execute('PRAGMA foreign_keys = ON')
_cursor.close()
self.cache = {}
def _clear_cache(self):
""" Clear cache, which stores prior query results """
self.cache = {}
def _drop_tables(self, tables):
|
def _get_unknown_userid(self):
"""
Retrieve user id associated with unknown user
"""
cursor = self.conn.cursor()
unknown_user_str = dbtypes.User.null
cursor.execute("select id from users where uniqueid='%s'" % unknown_user_str)
return cursor.fetchone()[0]
def users_and_countries(self, use_cache=True):
"""
Returns a table with userid and most likely country (based on carrier location frequency).
:param use_cache: if true uses cached result, else clears database state and reruns query
:return: pandas dataframe
"""
key = 'user_and_countries'
if use_cache and key in self.cache:
return self.cache[key].copy()
cursor = self.conn.cursor()
if not use_cache:
self._drop_tables(['user_country_freqs', 'user_and_likely_country'])
# userid for unknown user
unknown_user_id = self._get_unknown_userid()
# can only return country info if userid is known
cursor.execute(
"""
CREATE TEMP TABLE user_country_freqs AS
select userid, country, count(*) as ct
from sessions where userid <> %d and country is not null
group by userid, country
""" % unknown_user_id
)
# assigns each user to country with most counts
cursor.execute(
"""
CREATE TEMP TABLE user_and_likely_country AS
SELECT *
FROM
user_country_freqs JOIN (SELECT userid, max(ct) as max_ct FROM user_country_freqs GROUP BY userid) max_cts
USING (userid)
WHERE user_country_freqs.ct = max_cts.max_ct
GROUP BY userid
"""
)
cursor.close()
result = read_sql('SELECT * FROM user_and_likely_country', self.conn)
self.cache[key] = result.copy()
return result
def create_reference_ids_table(self, vals, table_name='_ref'):
"""
Create a temporary reference table by inserting values.
This is used to speed up sqlite queries that are too slow when given
the list directly in the query text (most likely a parsing issue?).
"""
# remove existing
self._drop_tables([table_name])
cursor = self.conn.cursor()
cursor.execute('CREATE TEMP TABLE %s (id INTEGER)' % table_name)
for i, v in enumerate(vals):
cursor.execute('INSERT INTO %s VALUES(%d)' % (table_name, v))
def get_time_offset(self, event_ids, get_extra_info=True, use_cache=True):
"""
Compute the time offset from the start of a session for a list of events.
Only possible with data from JSON files. CSV files have dummy timestamps.
:param event_ids: list of event ids to query
"""
print "Warning: This is only valid for data from the json files! Timestamps in csv are dummies"
if event_ids is None:
raise ValueError('Must provide event ids ts')
key = ('timestamps', tuple(event_ids), get_extra_info)
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
ts_query = """
SELECT events.id as id, offsettimestamp, event FROM events, _ref
WHERE events.id = _ref.id AND offsettimestamp >= 0
"""
ts = read_sql(ts_query, self.conn)
# adds additional information such as user id, and session id for matching up timestamps
if get_extra_info:
extra_info_query = """
SELECT
sessions.userid,
events.id AS id,
sessions.id AS sessionid
FROM events, sessions, _ref
WHERE events.id = _ref.id AND
events.sessionid = sessions.id
"""
extra_info_df = read_sql(extra_info_query, self.conn)
ts = ts.merge(extra_info_df, how='left', on='id')
self.cache[key] = ts.copy()
return ts
def get_devices(self, event_ids, use_cache=True):
"""
Query the devices associated with particular event ids.
:param event_ids: list of event ids to query
"""
if event_ids is None:
raise ValueError('Must provide event ids')
# cast to tuple so that can be hashed
key = ('devices', tuple(event_ids))
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
devices_query = """
select
devices.name as device_name,
events.id as eventid
FROM
sessions, events, devices, _ref
WHERE
events.id = _ref.id AND
sessions.id = events.sessionid AND
sessions.deviceid = devices.id
"""
devices_df = read_sql(devices_query, self.conn)
self.cache[key] = devices_df.copy()
return devices_df
def base_selections(self, min_turns=50, which='all', add_fields=None, use_cache=True):
"""
Obtain base selections data, consisting of selections for known userids (i.e. this
precludes data from the CSV files from Flurry, which do not have known user ids associated
with each record). Selects only the first turn in a 'play',
to control for game play. Selects data for users with at least `min_turns` such turns. Caches results
:param min_turns: minimum number of first turns necessary for data, if 0, returns all
:param which: one of 'all', 'correct', 'incorrect', determines what kind of selections are returned
:param add_fields: add extra base fields from table selectionevents. If dict, uses keys as fields
and values as names, if list uses elements as fields and names
:param use_cache: if true, uses cached results, else clears database state and reruns.
:return: pandas dataframe
"""
if min_turns < 0:
raise ValueError('min_turns must be > 0')
if add_fields and not utils.is_iterable(add_fields):
raise ValueError('add_fields must be iterable')
if not which in ['all', 'correct', 'incorrect']:
raise ValueError("which must be one of 'all', 'correct', 'incorrect'")
key = ('first_sels', min_turns, which, add_fields)
if use_cache:
if key in self.cache:
return self.cache[key].copy()
else:
# we may have created tables for different optional args (i.e. diff min_turns)
self._drop_tables(['first_sels', 'enough_plays'])
if not use_cache:
self._drop_tables(['first_sels', 'enough_plays'])
# cobble together additional fields from selectionevents
added = ""
if add_fields:
if not isinstance(add_fields, dict):
add_fields = dict(zip(add_fields, add_fields))
added = ", " + (".".join(["%s as %s" % (f,n) for f, n in add_fields.iteritems()]))
cursor = self.conn.cursor()
# unknown user id
unknown_user_id = self._get_unknown_userid()
# filter to base data consisting of first-turns in play for known user ids
print "Filtering down to first-turns in a play"
cursor.execute("""
-- compute the smallest eventid associated with each playid
CREATE TEMP TABLE sel_cts AS
SELECT MIN(eventid) as min_event_id
FROM selectionevents
where userid <> %d
GROUP BY playid
""" % unknown_user_id)
print "Ret
|
"""
Drop a set of tables from db (often used to materialize intermediate tables for ease of querying and
then removing these to avoid affecting db state)
:param tables: list of tables to drop
:return: drops if they exist, ignores otherwise
"""
cursor = self.conn.cursor()
try:
cursor.execute('DROP TABLE ' + ','.join(map(str, tables)))
except:
pass
finally:
cursor.close()
|
identifier_body
|
queries.py
|
OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import pandas as pd
import sqlite3 as db
import dbtypes
from specimen import utils
def read_sql(sql, conn):
# read sql with pandas but make sure column names are lowercase
df = pd.read_sql(sql, conn)
df.columns = df.columns.map(lambda x: x.lower())
return df
class SpecimenQueries:
"""
Contains helpful specimen database queries. Should be used as a starting point for analysis of specimen
data.
"""
def __init__(self, database_path=None):
"""
Provides wrapper for queries. Caches queries where possible.
:param database_path: Path to SQLITE database file
"""
self.database_path = database_path
self.conn = db.connect(database=self.database_path)
# start use of foreign keys
_cursor = self.conn.cursor()
_cursor.execute('PRAGMA foreign_keys = ON')
_cursor.close()
self.cache = {}
def _clear_cache(self):
""" Clear cache, which stores prior query results """
self.cache = {}
def _drop_tables(self, tables):
"""
Drop a set of tables from db (often used to materialize intermediate tables for ease of querying and
then removing these to avoid affecting db state)
:param tables: list of tables to drop
:return: drops if they exist, ignores otherwise
"""
cursor = self.conn.cursor()
try:
cursor.execute('DROP TABLE ' + ','.join(map(str, tables)))
except:
pass
finally:
cursor.close()
def _get_unknown_userid(self):
"""
Retrieve user id associated with unknown user
"""
cursor = self.conn.cursor()
unknown_user_str = dbtypes.User.null
cursor.execute("select id from users where uniqueid='%s'" % unknown_user_str)
return cursor.fetchone()[0]
def users_and_countries(self, use_cache=True):
"""
Returns a table with userid and most likely country (based on carrier location frequency).
:param use_cache: if true uses cached result, else clears database state and reruns query
:return: pandas dataframe
|
"""
key = 'user_and_countries'
if use_cache and key in self.cache:
return self.cache[key].copy()
cursor = self.conn.cursor()
if not use_cache:
self._drop_tables(['user_country_freqs', 'user_and_likely_country'])
# userid for unknown user
unknown_user_id = self._get_unknown_userid()
# can only return country info if userid is known
cursor.execute(
"""
CREATE TEMP TABLE user_country_freqs AS
select userid, country, count(*) as ct
from sessions where userid <> %d and country is not null
group by userid, country
""" % unknown_user_id
)
# assigns each user to country with most counts
cursor.execute(
"""
CREATE TEMP TABLE user_and_likely_country AS
SELECT *
FROM
user_country_freqs JOIN (SELECT userid, max(ct) as max_ct FROM user_country_freqs GROUP BY userid) max_cts
USING (userid)
WHERE user_country_freqs.ct = max_cts.max_ct
GROUP BY userid
"""
)
cursor.close()
result = read_sql('SELECT * FROM user_and_likely_country', self.conn)
self.cache[key] = result.copy()
return result
def create_reference_ids_table(self, vals, table_name='_ref'):
"""
Create a temporary reference table by inserting values.
This is used to speed up sqlite queries that are too slow when given
the list directly in the query text (most likely a parsing issue?).
"""
# remove existing
self._drop_tables([table_name])
cursor = self.conn.cursor()
cursor.execute('CREATE TEMP TABLE %s (id INTEGER)' % table_name)
for i, v in enumerate(vals):
cursor.execute('INSERT INTO %s VALUES(%d)' % (table_name, v))
def get_time_offset(self, event_ids, get_extra_info=True, use_cache=True):
"""
Compute the time offset from the start of a session for a list of events.
Only possible with data from JSON files. CSV files have dummy timestamps.
:param event_ids: list of event ids to query
"""
print "Warning: This is only valid for data from the json files! Timestamps in csv are dummies"
if event_ids is None:
raise ValueError('Must provide event ids ts')
key = ('timestamps', tuple(event_ids), get_extra_info)
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
ts_query = """
SELECT events.id as id, offsettimestamp, event FROM events, _ref
WHERE events.id = _ref.id AND offsettimestamp >= 0
"""
ts = read_sql(ts_query, self.conn)
# adds additional information such as user id, and session id for matching up timestamps
if get_extra_info:
extra_info_query = """
SELECT
sessions.userid,
events.id AS id,
sessions.id AS sessionid
FROM events, sessions, _ref
WHERE events.id = _ref.id AND
events.sessionid = sessions.id
"""
extra_info_df = read_sql(extra_info_query, self.conn)
ts = ts.merge(extra_info_df, how='left', on='id')
self.cache[key] = ts.copy()
return ts
def get_devices(self, event_ids, use_cache=True):
"""
Query the devices associated with particular event ids.
:param event_ids: list of event ids to query
"""
if event_ids is None:
raise ValueError('Must provide event ids')
# cast to tuple so that can be hashed
key = ('devices', tuple(event_ids))
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
devices_query = """
select
devices.name as device_name,
events.id as eventid
FROM
sessions, events, devices, _ref
WHERE
events.id = _ref.id AND
sessions.id = events.sessionid AND
sessions.deviceid = devices.id
"""
devices_df = read_sql(devices_query, self.conn)
self.cache[key] = devices_df.copy()
return devices_df
def base_selections(self, min_turns=50, which='all', add_fields=None, use_cache=True):
"""
Obtain base selections data, consisting of selections for known userids (i.e. this
precludes data from the CSV files from Flurry, which do not have known user ids associated
with each record). Selects only the first turn in a 'play',
to control for game play. Selects data for users with at least `min_turns` such turns. Caches results
:param min_turns: minimum number of first turns necessary for data, if 0, returns all
:param which: one of 'all', 'correct', 'incorrect', determines what kind of selections are returned
:param add_fields: add extra base fields from table selectionevents. If dict, uses keys as fields
and values as names, if list uses elements as fields and names
:param use_cache: if true, uses cached results, else clears database state and reruns.
:return: pandas dataframe
"""
if min_turns < 0:
raise ValueError('min_turns must be > 0')
if add_fields and not utils.is_iterable(add_fields):
raise ValueError('add_fields must be iterable')
if not which in ['all', 'correct', 'incorrect']:
raise ValueError("which must be one of 'all', 'correct', 'incorrect'")
key = ('first_sels', min_turns, which, add_fields)
if use_cache:
if key in self.cache:
return self.cache[key].copy()
else:
# we may have created tables for different optional args (i.e. diff min_turns)
self._drop_tables(['first_sels', 'enough_plays'])
if not use_cache:
self._drop_tables(['first_sels', 'enough_plays'])
# cobble together additional fields from selectionevents
added = ""
if add_fields:
if not isinstance(add_fields, dict):
add_fields = dict(zip(add_fields, add_fields))
added = ", " + (".".join(["%s as %s" % (f,n) for f, n in add_fields.iteritems()]))
cursor = self.conn.cursor()
# unknown user id
unknown_user_id = self._get_unknown_userid()
# filter to base data consisting of first-turns in play for known user ids
print "Filtering down to first-turns in a play"
cursor.execute("""
-- compute the smallest eventid associated with each playid
CREATE TEMP TABLE sel_cts AS
SELECT MIN(eventid) as min_event_id
FROM selectionevents
where userid <> %d
GROUP BY playid
""" % unknown_user_id)
print "Retrieving
|
random_line_split
|
|
retryable.go
|
Config = tlsc
r.httpClient.Transport = t
}
}
return r
}
// WithAuth adds authentication to retryable methods
func WithAuth(auth Auth) Opts {
return func(r *retryable) {
r.auth = auth
}
}
// WithCerts adds certificates
func WithCerts(certs [][]byte) Opts {
return func(r *retryable) {
for _, c := range certs {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
// WithCertFiles adds certificates by filename
func WithCertFiles(files []string) Opts {
return func(r *retryable) {
for _, f := range files {
c, err := ioutil.ReadFile(f)
if err != nil {
r.log.WithFields(logrus.Fields{
"err": err,
"file": f,
}).Warn("Failed to read certificate")
} else {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
}
// WithDelay initial time to wait between retries (increased with exponential backoff)
func WithDelay(delayInit time.Duration, delayMax time.Duration) Opts {
return func(r *retryable) {
if delayInit > 0 {
r.delayInit = delayInit
}
// delayMax must be at least delayInit, if 0 initialize to 30x delayInit
if delayMax > r.delayInit {
r.delayMax = delayMax
} else if delayMax > 0 {
r.delayMax = r.delayInit
} else {
r.delayMax = r.delayInit * 30
}
}
}
// WithHTTPClient uses a specific http client with retryable requests
func WithHTTPClient(h *http.Client) Opts {
return func(r *retryable) {
r.httpClient = h
}
}
// WithLimit restricts the number of retries (defaults to 5)
func WithLimit(l int) Opts {
return func(r *retryable) {
if l > 0 {
r.limit = l
}
}
}
// WithLog injects a logrus Logger configuration
func WithLog(log *logrus.Logger) Opts {
return func(r *retryable) {
r.log = log
}
}
// WithTransport uses a specific http transport with retryable requests
func WithTransport(t *http.Transport) Opts {
return func(r *retryable) {
r.httpClient = &http.Client{Transport: t}
}
}
// WithUserAgent sets a user agent header
func WithUserAgent(ua string) Opts {
return func(r *retryable) {
r.useragent = ua
}
}
func (r *retryable) BackoffClear() {
if r.backoffCur > r.limit {
r.backoffCur = r.limit
}
if r.backoffCur > 0 {
r.backoffCur--
if r.backoffCur == 0 {
r.backoffUntil = time.Time{}
}
}
r.backoffNeeded = false
}
func (r *retryable) backoffSet(lastResp *http.Response) error {
r.backoffCur++
// sleep for backoff time
sleepTime := r.delayInit << r.backoffCur
// limit to max delay
if sleepTime > r.delayMax {
sleepTime = r.delayMax
}
// check rate limit header
if lastResp != nil && lastResp.Header.Get("Retry-After") != "" {
ras := lastResp.Header.Get("Retry-After")
ra, _ := time.ParseDuration(ras + "s")
if ra > r.delayMax {
sleepTime = r.delayMax
} else if ra > sleepTime {
sleepTime = ra
}
}
r.backoffUntil = time.Now().Add(sleepTime)
r.backoffNeeded = true
if r.backoffCur == r.limit {
return fmt.Errorf("%w: backoffs %d", ErrBackoffLimit, r.backoffCur)
}
return nil
}
// BackoffUntil returns the time until the next backoff would complete
func (r *retryable) BackoffUntil() time.Time {
return r.backoffUntil
}
type request struct {
r *retryable
context context.Context
method string
urls []url.URL
curURL int
header http.Header
getBody func() (io.ReadCloser, error)
contentLen int64
chunking bool
offset int64
curRead int64
done bool
digest digest.Digest
digester digest.Digester
progressCB func(int64, error)
responses []*http.Response
reader io.Reader
log *logrus.Logger
}
func (r *retryable) DoRequest(ctx context.Context, method string, u []url.URL, opts ...OptsReq) (Response, error) {
req := &request{
r: r,
context: ctx,
method: method,
urls: u,
curURL: 0,
header: http.Header{},
getBody: nil,
contentLen: -1,
chunking: false,
offset: 0,
curRead: 0,
done: false,
digest: "",
digester: nil,
progressCB: nil,
responses: []*http.Response{},
reader: nil,
log: r.log,
}
// apply opts
for _, opt := range opts {
opt(req)
}
// run the request until successful or non-recoverable error
err := req.retryLoop()
return req, err
}
// WithBodyBytes converts a bytes slice into a body func and content length
func WithBodyBytes(body []byte) OptsReq {
return func(req *request) {
req.contentLen = int64(len(body))
req.getBody = func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(body)), nil
}
}
}
// WithBodyFunc includes body content in a request
func WithBodyFunc(getbody func() (io.ReadCloser, error)) OptsReq
|
// WithChunking allows content to be divided into multiple smaller chunks
func WithChunking() OptsReq {
return func(req *request) {
req.chunking = true
}
}
// WithContentLen sets the content length
func WithContentLen(l int64) OptsReq {
return func(req *request) {
req.contentLen = l
}
}
// WithDigest verifies the returned content digest matches.
// Note that the digest is only calculated upon EOF from the downloaded
// content, so the reader may receive an error rather than EOF from a
// digest mismatch. The content itself must still be read.
func WithDigest(d digest.Digest) OptsReq {
return func(req *request) {
req.digest = d
req.digester = digest.Canonical.Digester()
}
}
// WithHeader sets a header
func WithHeader(key string, values []string) OptsReq {
return func(req *request) {
for _, v := range values {
req.header.Add(key, v)
}
}
}
// WithHeaders includes a header object
func WithHeaders(headers http.Header) OptsReq {
return func(req *request) {
for key := range headers {
for _, val := range headers.Values(key) {
req.header.Add(key, val)
}
}
}
}
// WithProgressCB calls the CB function as data is received
func WithProgressCB(cb func(int64, error)) OptsReq {
return func(req *request) {
req.progressCB = cb
}
}
func WithScope(repo string, push bool) OptsReq {
scope := "repository:" + repo + ":pull"
if push {
scope = scope + ",push"
}
return func(req *request) {
for _, url := range req.urls {
req.r.auth.AddScope(url.Host, scope)
}
}
}
func (req *request) retryLoop() error {
req.r.mu.Lock()
defer req.r.mu.Unlock()
curRetry := 0
var httpErr error
for {
// handle backoffs and errors
if len(req.urls) == 0 {
if httpErr != nil {
return httpErr
}
return ErrAllRequestsFailed
}
curRetry++
if curRetry > req.r.limit {
return ErrAllRequestsFailed
}
if !req.r.backoffUntil.IsZero() && req.r.backoffUntil.After(time.Now()) {
sleepTime := time.Until(req.r.backoffUntil)
req.log.WithFields(logrus.Fields{
"Host": req.urls[req.curURL].Host,
"Seconds": sleepTime.Seconds(),
}).Warn("Sleeping for backoff")
select {
case <-req.context.Done():
return ErrCanceled
case <-time.After(sleepTime):
}
}
// close any previous responses before making a new request
if len(req.responses) > 0 {
req.responses[len(req.responses)-1].Body.Close()
}
// send the new request
httpErr = req.httpDo()
|
{
return func(req *request) {
req.getBody = getbody
}
}
|
identifier_body
|
retryable.go
|
Config = tlsc
r.httpClient.Transport = t
}
}
return r
}
// WithAuth adds authentication to retryable methods
func WithAuth(auth Auth) Opts {
return func(r *retryable) {
r.auth = auth
}
}
// WithCerts adds certificates
func WithCerts(certs [][]byte) Opts {
return func(r *retryable) {
for _, c := range certs {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
// WithCertFiles adds certificates by filename
func WithCertFiles(files []string) Opts {
return func(r *retryable) {
for _, f := range files {
c, err := ioutil.ReadFile(f)
if err != nil {
r.log.WithFields(logrus.Fields{
"err": err,
"file": f,
}).Warn("Failed to read certificate")
} else {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
}
// WithDelay initial time to wait between retries (increased with exponential backoff)
func WithDelay(delayInit time.Duration, delayMax time.Duration) Opts {
return func(r *retryable) {
if delayInit > 0 {
r.delayInit = delayInit
}
// delayMax must be at least delayInit, if 0 initialize to 30x delayInit
if delayMax > r.delayInit {
r.delayMax = delayMax
} else if delayMax > 0 {
r.delayMax = r.delayInit
} else {
r.delayMax = r.delayInit * 30
}
}
}
// WithHTTPClient uses a specific http client with retryable requests
func WithHTTPClient(h *http.Client) Opts {
return func(r *retryable) {
r.httpClient = h
}
}
// WithLimit restricts the number of retries (defaults to 5)
func WithLimit(l int) Opts {
return func(r *retryable) {
if l > 0 {
r.limit = l
}
}
}
// WithLog injects a logrus Logger configuration
func WithLog(log *logrus.Logger) Opts {
return func(r *retryable) {
r.log = log
}
}
// WithTransport uses a specific http transport with retryable requests
func WithTransport(t *http.Transport) Opts {
return func(r *retryable) {
r.httpClient = &http.Client{Transport: t}
}
}
// WithUserAgent sets a user agent header
func WithUserAgent(ua string) Opts {
return func(r *retryable) {
r.useragent = ua
}
}
func (r *retryable) BackoffClear() {
if r.backoffCur > r.limit {
r.backoffCur = r.limit
}
if r.backoffCur > 0 {
r.backoffCur--
if r.backoffCur == 0 {
r.backoffUntil = time.Time{}
}
}
r.backoffNeeded = false
}
func (r *retryable) backoffSet(lastResp *http.Response) error {
r.backoffCur++
// sleep for backoff time
sleepTime := r.delayInit << r.backoffCur
// limit to max delay
if sleepTime > r.delayMax {
sleepTime = r.delayMax
}
// check rate limit header
if lastResp != nil && lastResp.Header.Get("Retry-After") != "" {
ras := lastResp.Header.Get("Retry-After")
ra, _ := time.ParseDuration(ras + "s")
if ra > r.delayMax {
sleepTime = r.delayMax
} else if ra > sleepTime {
sleepTime = ra
}
}
r.backoffUntil = time.Now().Add(sleepTime)
r.backoffNeeded = true
if r.backoffCur == r.limit {
return fmt.Errorf("%w: backoffs %d", ErrBackoffLimit, r.backoffCur)
}
return nil
}
// BackoffUntil returns the time until the next backoff would complete
func (r *retryable) BackoffUntil() time.Time {
return r.backoffUntil
}
type request struct {
r *retryable
context context.Context
method string
urls []url.URL
curURL int
header http.Header
getBody func() (io.ReadCloser, error)
contentLen int64
chunking bool
offset int64
curRead int64
done bool
digest digest.Digest
digester digest.Digester
progressCB func(int64, error)
responses []*http.Response
reader io.Reader
log *logrus.Logger
}
func (r *retryable) DoRequest(ctx context.Context, method string, u []url.URL, opts ...OptsReq) (Response, error) {
req := &request{
r: r,
context: ctx,
method: method,
urls: u,
curURL: 0,
header: http.Header{},
getBody: nil,
contentLen: -1,
chunking: false,
offset: 0,
curRead: 0,
done: false,
digest: "",
digester: nil,
progressCB: nil,
responses: []*http.Response{},
reader: nil,
log: r.log,
}
// apply opts
for _, opt := range opts {
opt(req)
}
// run the request until successful or non-recoverable error
err := req.retryLoop()
return req, err
}
// WithBodyBytes converts a bytes slice into a body func and content length
func WithBodyBytes(body []byte) OptsReq {
return func(req *request) {
req.contentLen = int64(len(body))
req.getBody = func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(body)), nil
}
}
}
// WithBodyFunc includes body content in a request
func WithBodyFunc(getbody func() (io.ReadCloser, error)) OptsReq {
return func(req *request) {
req.getBody = getbody
}
}
// WithChunking allows content to be divided into multiple smaller chunks
func WithChunking() OptsReq {
return func(req *request) {
req.chunking = true
}
}
// WithContentLen sets the content length
func WithContentLen(l int64) OptsReq {
return func(req *request) {
req.contentLen = l
}
}
// WithDigest verifies the returned content digest matches.
// Note that the digest is only calculated upon EOF from the downloaded
// content, so the reader may receive an error rather than EOF from a
// digest mismatch. The content itself must still be read.
func WithDigest(d digest.Digest) OptsReq {
return func(req *request) {
req.digest = d
req.digester = digest.Canonical.Digester()
}
}
// WithHeader sets a header
func WithHeader(key string, values []string) OptsReq {
return func(req *request) {
for _, v := range values {
req.header.Add(key, v)
}
}
}
// WithHeaders includes a header object
func WithHeaders(headers http.Header) OptsReq {
return func(req *request) {
for key := range headers {
for _, val := range headers.Values(key) {
req.header.Add(key, val)
}
}
}
}
// WithProgressCB calls the CB function as data is received
func WithProgressCB(cb func(int64, error)) OptsReq {
return func(req *request) {
req.progressCB = cb
}
}
func WithScope(repo string, push bool) OptsReq {
scope := "repository:" + repo + ":pull"
if push {
scope = scope + ",push"
}
return func(req *request) {
for _, url := range req.urls {
req.r.auth.AddScope(url.Host, scope)
}
}
}
func (req *request) retryLoop() error {
req.r.mu.Lock()
defer req.r.mu.Unlock()
curRetry := 0
var httpErr error
for {
// handle backoffs and errors
if len(req.urls) == 0 {
if httpErr != nil {
return httpErr
}
return ErrAllRequestsFailed
}
curRetry++
if curRetry > req.r.limit {
return ErrAllRequestsFailed
}
if !req.r.backoffUntil.IsZero() && req.r.backoffUntil.After(time.Now())
|
// close any previous responses before making a new request
if len(req.responses) > 0 {
req.responses[len(req.responses)-1].Body.Close()
}
// send the new request
httpErr = req.httpDo()
|
{
sleepTime := time.Until(req.r.backoffUntil)
req.log.WithFields(logrus.Fields{
"Host": req.urls[req.curURL].Host,
"Seconds": sleepTime.Seconds(),
}).Warn("Sleeping for backoff")
select {
case <-req.context.Done():
return ErrCanceled
case <-time.After(sleepTime):
}
}
|
conditional_block
|
retryable.go
|
req.header.Add(key, v)
}
}
}
// WithHeaders includes a header object
func WithHeaders(headers http.Header) OptsReq {
return func(req *request) {
for key := range headers {
for _, val := range headers.Values(key) {
req.header.Add(key, val)
}
}
}
}
// WithProgressCB calls the CB function as data is received
func WithProgressCB(cb func(int64, error)) OptsReq {
return func(req *request) {
req.progressCB = cb
}
}
func WithScope(repo string, push bool) OptsReq {
scope := "repository:" + repo + ":pull"
if push {
scope = scope + ",push"
}
return func(req *request) {
for _, url := range req.urls {
req.r.auth.AddScope(url.Host, scope)
}
}
}
func (req *request) retryLoop() error {
req.r.mu.Lock()
defer req.r.mu.Unlock()
curRetry := 0
var httpErr error
for {
// handle backoffs and errors
if len(req.urls) == 0 {
if httpErr != nil {
return httpErr
}
return ErrAllRequestsFailed
}
curRetry++
if curRetry > req.r.limit {
return ErrAllRequestsFailed
}
if !req.r.backoffUntil.IsZero() && req.r.backoffUntil.After(time.Now()) {
sleepTime := time.Until(req.r.backoffUntil)
req.log.WithFields(logrus.Fields{
"Host": req.urls[req.curURL].Host,
"Seconds": sleepTime.Seconds(),
}).Warn("Sleeping for backoff")
select {
case <-req.context.Done():
return ErrCanceled
case <-time.After(sleepTime):
}
}
// close any previous responses before making a new request
if len(req.responses) > 0 {
req.responses[len(req.responses)-1].Body.Close()
}
// send the new request
httpErr = req.httpDo()
if httpErr != nil {
req.r.backoffSet(nil)
req.nextURL(true)
continue
}
// check the response
lastURL := req.urls[req.curURL]
lastResp := req.responses[len(req.responses)-1]
statusCode := lastResp.StatusCode
removeURL := false
runBackoff := false
switch {
case 200 <= statusCode && statusCode < 300:
// all 200 status codes are successful
req.r.BackoffClear()
return nil
case statusCode == http.StatusUnauthorized:
err := req.handleAuth()
if err != nil {
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Err": err,
}).Warn("Failed to handle auth request")
runBackoff = true
removeURL = true
}
case statusCode == http.StatusForbidden:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Forbidden")
runBackoff = true
removeURL = true
case statusCode == http.StatusNotFound:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Not found")
removeURL = true
case statusCode == http.StatusTooManyRequests:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Rate limit exceeded")
runBackoff = true
case statusCode == http.StatusRequestTimeout:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Timeout")
runBackoff = true
case statusCode == http.StatusGatewayTimeout:
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
}).Debug("Gateway timeout")
runBackoff = true
default:
body, _ := ioutil.ReadAll(lastResp.Body)
req.log.WithFields(logrus.Fields{
"URL": lastURL.String(),
"Status": lastResp.Status,
"Body": string(body),
}).Debug("Unexpected status")
runBackoff = true
removeURL = true
}
// remove url and trigger backoff if needed
if removeURL {
req.nextURL(removeURL)
}
if runBackoff {
req.r.backoffSet(lastResp)
}
}
}
func (req *request) handleAuth() error {
curURL := req.urls[req.curURL]
lastResp := req.responses[len(req.responses)-1]
// for unauthorized requests, try to setup auth and retry without backoff
if req.r.auth == nil {
return ErrUnauthorized
}
err := req.r.auth.HandleResponse(lastResp)
if err != nil {
req.log.WithFields(logrus.Fields{
"URL": curURL.String(),
"Err": err,
}).Warn("Failed to handle auth request")
return err
}
return nil
}
func (req *request) httpDo() error {
// build the http reqest for the current mirror url
httpReq, err := http.NewRequestWithContext(req.context, req.method, req.urls[req.curURL].String(), nil)
if err != nil {
return err
}
if req.getBody != nil {
httpReq.Body, err = req.getBody()
if err != nil {
return err
}
httpReq.GetBody = req.getBody
httpReq.ContentLength = req.contentLen
}
if len(req.header) > 0 {
httpReq.Header = req.header
}
if req.r.useragent != "" && httpReq.Header.Get("User-Agent") == "" {
httpReq.Header.Add("User-Agent", req.r.useragent)
}
if req.offset > 0 {
// TODO: implement range requests
return ErrNotImplemented
}
// include auth header
if req.r.auth != nil {
err = req.r.auth.UpdateRequest(httpReq)
if err != nil {
return err
}
}
req.log.WithFields(logrus.Fields{
"method": req.method,
"url": req.urls[req.curURL].String(),
"withAuth": (len(httpReq.Header.Values("Authorization")) > 0),
}).Debug("Sending request")
resp, err := req.r.httpClient.Do(httpReq)
if err != nil {
return err
}
req.responses = append(req.responses, resp)
// update reader
if req.digester == nil {
req.reader = resp.Body
} else {
req.reader = io.TeeReader(resp.Body, req.digester.Hash())
}
return nil
}
func (req *request) nextURL(removeLast bool) {
// next mirror based on whether remove flag is set
if removeLast {
req.urls = append(req.urls[:req.curURL], req.urls[req.curURL+1:]...)
if req.curURL >= len(req.urls) {
req.curURL = 0
}
} else {
if len(req.urls) > 0 {
req.curURL = (req.curURL + 1) % len(req.urls)
} else {
req.curURL = 0
}
}
}
func (req *request) Read(b []byte) (int, error) {
// if done, return eof
if req.done {
return 0, io.EOF
}
// if no responses, error
if len(req.responses) == 0 {
return 0, ErrNotFound
}
// fetch block
lastResp := req.responses[len(req.responses)-1]
i, err := req.reader.Read(b)
req.curRead += int64(i)
if err == io.EOF && lastResp.ContentLength > 0 {
if lastResp.Request.Method == "HEAD" {
// no body on a head request
req.done = true
} else if req.curRead < lastResp.ContentLength {
// TODO: handle early EOF or other failed connection with a retry
// req.offset += req.curRead
// err = req.retryLoop()
// if err != nil {
// return i, err
// }
req.log.WithFields(logrus.Fields{
"curRead": req.curRead,
"contentLen": lastResp.ContentLength,
}).Debug("EOF before reading all content, retrying")
return i, err
} else if req.curRead >= lastResp.ContentLength {
req.done = true
}
}
// if eof, verify digest, set error on mismatch
if req.digester != nil && err == io.EOF && req.digest != req.digester.Digest() {
req.log.WithFields(logrus.Fields{
"expected": req.digest,
"computed": req.digester.Digest(),
}).Warn("Digest mismatch")
req.done = true
return i, ErrDigestMismatch
}
// pass through read on the last response
return i, err
}
func (req *request)
|
Close
|
identifier_name
|
|
retryable.go
|
}).Warn("Failed to load root certificate")
}
}
t.TLSClientConfig = tlsc
r.httpClient.Transport = t
}
}
return r
}
// WithAuth adds authentication to retryable methods
func WithAuth(auth Auth) Opts {
return func(r *retryable) {
r.auth = auth
}
}
// WithCerts adds certificates
func WithCerts(certs [][]byte) Opts {
return func(r *retryable) {
for _, c := range certs {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
// WithCertFiles adds certificates by filename
func WithCertFiles(files []string) Opts {
return func(r *retryable) {
for _, f := range files {
c, err := ioutil.ReadFile(f)
if err != nil {
r.log.WithFields(logrus.Fields{
"err": err,
"file": f,
}).Warn("Failed to read certificate")
} else {
r.rootCAPool = append(r.rootCAPool, c)
}
}
}
}
// WithDelay initial time to wait between retries (increased with exponential backoff)
func WithDelay(delayInit time.Duration, delayMax time.Duration) Opts {
return func(r *retryable) {
if delayInit > 0 {
r.delayInit = delayInit
}
// delayMax must be at least delayInit, if 0 initialize to 30x delayInit
if delayMax > r.delayInit {
r.delayMax = delayMax
} else if delayMax > 0 {
r.delayMax = r.delayInit
} else {
r.delayMax = r.delayInit * 30
}
}
}
// WithHTTPClient uses a specific http client with retryable requests
func WithHTTPClient(h *http.Client) Opts {
return func(r *retryable) {
r.httpClient = h
}
}
// WithLimit restricts the number of retries (defaults to 5)
func WithLimit(l int) Opts {
return func(r *retryable) {
if l > 0 {
r.limit = l
}
}
}
// WithLog injects a logrus Logger configuration
func WithLog(log *logrus.Logger) Opts {
return func(r *retryable) {
r.log = log
}
}
// WithTransport uses a specific http transport with retryable requests
func WithTransport(t *http.Transport) Opts {
return func(r *retryable) {
r.httpClient = &http.Client{Transport: t}
}
}
// WithUserAgent sets a user agent header
func WithUserAgent(ua string) Opts {
return func(r *retryable) {
r.useragent = ua
}
}
func (r *retryable) BackoffClear() {
if r.backoffCur > r.limit {
r.backoffCur = r.limit
}
if r.backoffCur > 0 {
r.backoffCur--
if r.backoffCur == 0 {
r.backoffUntil = time.Time{}
}
}
r.backoffNeeded = false
}
func (r *retryable) backoffSet(lastResp *http.Response) error {
r.backoffCur++
// sleep for backoff time
sleepTime := r.delayInit << r.backoffCur
// limit to max delay
if sleepTime > r.delayMax {
sleepTime = r.delayMax
}
// check rate limit header
if lastResp != nil && lastResp.Header.Get("Retry-After") != "" {
ras := lastResp.Header.Get("Retry-After")
ra, _ := time.ParseDuration(ras + "s")
if ra > r.delayMax {
sleepTime = r.delayMax
} else if ra > sleepTime {
sleepTime = ra
}
}
r.backoffUntil = time.Now().Add(sleepTime)
r.backoffNeeded = true
if r.backoffCur == r.limit {
return fmt.Errorf("%w: backoffs %d", ErrBackoffLimit, r.backoffCur)
}
return nil
}
// BackoffUntil returns the time until the next backoff would complete
func (r *retryable) BackoffUntil() time.Time {
return r.backoffUntil
}
type request struct {
r *retryable
context context.Context
method string
urls []url.URL
curURL int
header http.Header
getBody func() (io.ReadCloser, error)
contentLen int64
chunking bool
offset int64
curRead int64
done bool
digest digest.Digest
digester digest.Digester
progressCB func(int64, error)
responses []*http.Response
reader io.Reader
log *logrus.Logger
}
func (r *retryable) DoRequest(ctx context.Context, method string, u []url.URL, opts ...OptsReq) (Response, error) {
req := &request{
r: r,
context: ctx,
method: method,
urls: u,
curURL: 0,
header: http.Header{},
getBody: nil,
contentLen: -1,
chunking: false,
offset: 0,
curRead: 0,
done: false,
digest: "",
digester: nil,
progressCB: nil,
responses: []*http.Response{},
reader: nil,
log: r.log,
}
// apply opts
for _, opt := range opts {
opt(req)
}
// run the request until successful or non-recoverable error
err := req.retryLoop()
return req, err
}
// WithBodyBytes converts a bytes slice into a body func and content length
func WithBodyBytes(body []byte) OptsReq {
return func(req *request) {
req.contentLen = int64(len(body))
req.getBody = func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(body)), nil
}
}
}
// WithBodyFunc includes body content in a request
func WithBodyFunc(getbody func() (io.ReadCloser, error)) OptsReq {
return func(req *request) {
req.getBody = getbody
}
}
// WithChunking allows content to be divided into multiple smaller chunks
func WithChunking() OptsReq {
return func(req *request) {
req.chunking = true
}
}
// WithContentLen sets the content length
func WithContentLen(l int64) OptsReq {
return func(req *request) {
req.contentLen = l
}
}
// WithDigest verifies the returned content digest matches.
// Note that the digest is only calculated upon EOF from the downloaded
// content, so the reader may receive an error rather than EOF from a
// digest mismatch. The content itself must still be read.
func WithDigest(d digest.Digest) OptsReq {
return func(req *request) {
req.digest = d
req.digester = digest.Canonical.Digester()
}
}
// WithHeader sets a header
func WithHeader(key string, values []string) OptsReq {
return func(req *request) {
for _, v := range values {
req.header.Add(key, v)
}
}
}
// WithHeaders includes a header object
func WithHeaders(headers http.Header) OptsReq {
return func(req *request) {
for key := range headers {
for _, val := range headers.Values(key) {
req.header.Add(key, val)
}
}
}
}
// WithProgressCB calls the CB function as data is received
func WithProgressCB(cb func(int64, error)) OptsReq {
return func(req *request) {
req.progressCB = cb
}
}
func WithScope(repo string, push bool) OptsReq {
scope := "repository:" + repo + ":pull"
if push {
scope = scope + ",push"
}
return func(req *request) {
for _, url := range req.urls {
req.r.auth.AddScope(url.Host, scope)
}
}
}
func (req *request) retryLoop() error {
req.r.mu.Lock()
defer req.r.mu.Unlock()
curRetry := 0
var httpErr error
for {
// handle backoffs and errors
if len(req.urls) == 0 {
if httpErr != nil {
return httpErr
}
return ErrAllRequestsFailed
}
curRetry++
if curRetry > req.r.limit {
return ErrAllRequestsFailed
}
if !req.r.backoffUntil.IsZero() && req.r.backoffUntil.After(time.Now()) {
sleepTime := time.Until(req.r.backoffUntil)
req.log.WithFields(logrus.Fields{
"Host": req.urls[req.curURL].Host,
"Seconds": sleepTime.Seconds(),
}).Warn("Sleeping for backoff")
select {
case <-req.context.Done():
return ErrCanceled
case <-time.After(sleepTime):
}
}
// close any previous responses before making a new request
if len(req.responses) > 0 {
req.responses[len
|
"cert": string(ca),
|
random_line_split
|
|
Telecom_customer_churn.py
|
= df[i].replace('No internet service' , 'No')
df["MultipleLines"]=df["MultipleLines"].replace("No phone service","No")
# In[16]:
#to check if duplicated rows are present
df.duplicated().sum()
# In[17]:
y = pd.crosstab(df["Churn"],columns = "Frequency")
print(y)
#no: of customers churned = 1869
#no: of customers not churned = 5174
# In[18]:
#bar plot showing the customers who churned and who didn't
y_bar = y.plot(kind="bar")
y_percent = y/len(df)*100
print(round(y_percent,2))
#27% churned
#73% not churned
# In[19]:
#categorical columns and numerical columns
categorical_cols = ["gender","Partner","Dependents","PhoneService","MultipleLines","InternetService","OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies","Contract","PaperlessBilling","PaymentMethod","Churn"]
numerical_cols = ["SeniorCitizen","tenure","MonthlyCharges","TotalCharges"]
# # Hypothesis Generation
# Possible Questions or variables to be checked:
#
# 1)tenure - which category of people (people with high tenure or low tenure) are getting churned.We need to know if recently joining cstomers are churning or not
#
# 2)MonthlyCharges - if the monthly charges are high, there is a chance for churning.We need to analyse whether monthly charges are high or not
#
# 3)TotalCharges - Same as monthly charge, total charge should increase accoding to monthly charges
#
# 4)SeniorCitizen - need to check whether senior citizens are more tending to churn
#
# 5)PaymentMethod - to check whether payment method is creating any transaction issues which is causing churning.Which among them is causing issue
#
# 6)PaperlessBilling - to see how many customers using paperless billing and analyse it with respect to churning
#
# 7)There are multiple services that company is providing like phone,internet,multiple lines, etc.check which particular service or which all services is giving more churning
# **KDE PLOT on tenure, MonthlyCharges and TotalCharges.**
# In[20]:
"""
checking the churn status of other numerical fields using kde plot
we can see that recent joiners have a churning tendency more and high monthly charges leads to churning
"""
def kde(feature):
plt.figure(figsize=(9,4)
|
hlyCharges")
kde("TotalCharges")
# # Tenure
# In[21]:
#Univariate Analysis
#histogram
sns.distplot(df["tenure"])
# In[22]:
# there is a good no: of people with less than 10 months of tenure approximately 26%
df[df["tenure"]<10]["tenure"].count()/len(df)*100
# In[23]:
#summary of tenure
df["tenure"].describe()
# In[24]:
#dividing tenure into 3 categories for further analysisanalysis
#tenure>=60 months-->highest
#tenure 20 to 60 months-->medium
#tenure 0 to 20 months--->lowest
df["tenure_groups"] = np.where(df["tenure"]>=60,"highest",np.where(df["tenure"]<20,"lowest","medium"))
# In[25]:
sns.countplot(df["tenure_groups"],data=df)
pd.crosstab(df["tenure_groups"],columns="frequency")
# In[26]:
#Multivariate Analysis
#checking which tenure period gives more churning.Around 44% among the lowest tenure group has churned
tenure_Crosstab = pd.crosstab(df.tenure_groups, columns=df.Churn)
row_tot = tenure_Crosstab.sum(axis=1)
tenure_Crosstab_prop = round(tenure_Crosstab.div(row_tot,axis=0)*100)
print("---------------------------------------------------------------------------------------------------------------------------")
print("The proportion of churning in different tenure groups namley lowest,medium and highest in the order of their tenure period is: ")
print("---------------------------------------------------------------------------------------------------------------------------")
print(tenure_Crosstab_prop)
tenure_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
#lowest tenure period gives more churning
# **tenure vs Monthly charges and total charges**
# In[27]:
#as tenure is less and monthly or total charges increases, churning happens
g=sns.PairGrid(df,x_vars=["MonthlyCharges","TotalCharges"],y_vars="tenure",hue="Churn",palette="coolwarm",height=8)
g.map(plt.scatter,alpha=0.5)
plt.legend(loc=(-0.3,0.6))
# **Summary:
# low tenure is a reason for churning.This means that new joining customers are getting churned.**
# # MonthlyCharges
# In[28]:
#univarate analysis
#summary of Monthly Charges
df["MonthlyCharges"].describe()
# In[29]:
#histogram showing the distribution of monthly charges
sns.distplot(df["MonthlyCharges"])
# In[30]:
#we can see that as monthly charges increases, churning increases
sns.boxplot(x="Churn",y="MonthlyCharges",data=df,palette="coolwarm")
# **Monthly Charges vs Multiple Lines**
# In[31]:
df.MultipleLines.value_counts()
# In[32]:
"""
multiple lines with high monthly charges is showing high churning rate.
Whether or not the person has multiple lines, if he has high monthly charges, he has a tendency to churn.
"""
print(sns.boxplot(x="MultipleLines",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm"))
# **Monthly Charges vs Internet Service**
# In[33]:
#Fibre optic services have a high monthly charge when compared to others and so is the churn rate
sns.boxplot(x="InternetService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Phone Service**
# In[34]:
#churning is there for people having phone service and high monthly charges
sns.boxplot(x="PhoneService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Total Charges**
# In[35]:
plt.figure(figsize=(13,8))
sns.scatterplot(x="MonthlyCharges",y="TotalCharges",data = df,palette="coolwarm",hue = "Churn")
# using monthly charges for further analysis instead of total charges as both are proportional and taking anyone of this would be only required
# **Summary:As monthly charges and total charges increases, churning increases**
# # Senior Citizen
# In[36]:
#We can infer that there are less senior citizen people(1142 senior citizens) joined when compared to youngsters
sns.countplot(x="SeniorCitizen",data=df)
pd.crosstab(df["SeniorCitizen"],columns="frequency")
# In[37]:
#here among the senior citzens,around 42% has churned where as youngsters have churned less(among youngsters, 24% only churned)
SeniorCitizen_Crosstab = pd.crosstab(df.SeniorCitizen, columns=df.Churn)
row_tot = SeniorCitizen_Crosstab.sum(axis=1)
print("------------------------------------------------------------------------------------")
SeniorCitizen_Crosstab_prop = round(SeniorCitizen_Crosstab.div(row_tot,axis=0)*100)
print("Percentage of people who got attrited among the senior citizen and youngsters: ")
print("------------------------------------------------------------------------------------")
print(SeniorCitizen_Crosstab_prop)
SeniorCitizen_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
# In[38]:
#senior citizen vs payment method
# In[39]:
#senior citizens have opted electronic check more when compared to other payment methods.
#So we need to know if there was any issue regarding electronic check
sns.barplot(x="SeniorCitizen",y="PaymentMethod",data=df)
# In[40]:
#The average monthly charges were around 90 dollars for senior citizens who have churned
#whereas the average is less for people who haven't churned around 65 dollars
sns.boxplot(x="SeniorCitizen",y="MonthlyCharges",data=df)
# **Summary:
# Senior citizens are comparitively very less.ie, around 16%.Among these 16%, around 48% are churned .
# When checked their monthly charges, it looks comparitively higher for people who churned among the senior citizens
# Also, the payment method used was electronic check.We need to further analyse whether electronic check is creating an issue for them causing churning**
# # All other services including:
|
)
plt.title("kde plot for {}".format(feature))
ax0=sns.kdeplot(df[df["Churn"]=="Yes"][feature],color="red",label= "Churn - Yes")
ax1=sns.kdeplot(df[df["Churn"]=="No"][feature],color="green",label="Churn - No")
kde("tenure")
kde("Mont
|
identifier_body
|
Telecom_customer_churn.py
|
= df[i].replace('No internet service' , 'No')
df["MultipleLines"]=df["MultipleLines"].replace("No phone service","No")
# In[16]:
#to check if duplicated rows are present
df.duplicated().sum()
# In[17]:
y = pd.crosstab(df["Churn"],columns = "Frequency")
print(y)
#no: of customers churned = 1869
#no: of customers not churned = 5174
# In[18]:
#bar plot showing the customers who churned and who didn't
y_bar = y.plot(kind="bar")
y_percent = y/len(df)*100
print(round(y_percent,2))
#27% churned
#73% not churned
# In[19]:
#categorical columns and numerical columns
categorical_cols = ["gender","Partner","Dependents","PhoneService","MultipleLines","InternetService","OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies","Contract","PaperlessBilling","PaymentMethod","Churn"]
numerical_cols = ["SeniorCitizen","tenure","MonthlyCharges","TotalCharges"]
# # Hypothesis Generation
# Possible Questions or variables to be checked:
#
# 1)tenure - which category of people (people with high tenure or low tenure) are getting churned.We need to know if recently joining cstomers are churning or not
#
# 2)MonthlyCharges - if the monthly charges are high, there is a chance for churning.We need to analyse whether monthly charges are high or not
#
# 3)TotalCharges - Same as monthly charge, total charge should increase accoding to monthly charges
#
# 4)SeniorCitizen - need to check whether senior citizens are more tending to churn
#
# 5)PaymentMethod - to check whether payment method is creating any transaction issues which is causing churning.Which among them is causing issue
#
# 6)PaperlessBilling - to see how many customers using paperless billing and analyse it with respect to churning
#
# 7)There are multiple services that company is providing like phone,internet,multiple lines, etc.check which particular service or which all services is giving more churning
# **KDE PLOT on tenure, MonthlyCharges and TotalCharges.**
# In[20]:
"""
checking the churn status of other numerical fields using kde plot
we can see that recent joiners have a churning tendency more and high monthly charges leads to churning
"""
def kde(feature):
plt.fi
|
e(figsize=(9,4))
plt.title("kde plot for {}".format(feature))
ax0=sns.kdeplot(df[df["Churn"]=="Yes"][feature],color="red",label= "Churn - Yes")
ax1=sns.kdeplot(df[df["Churn"]=="No"][feature],color="green",label="Churn - No")
kde("tenure")
kde("MonthlyCharges")
kde("TotalCharges")
# # Tenure
# In[21]:
#Univariate Analysis
#histogram
sns.distplot(df["tenure"])
# In[22]:
# there is a good no: of people with less than 10 months of tenure approximately 26%
df[df["tenure"]<10]["tenure"].count()/len(df)*100
# In[23]:
#summary of tenure
df["tenure"].describe()
# In[24]:
#dividing tenure into 3 categories for further analysisanalysis
#tenure>=60 months-->highest
#tenure 20 to 60 months-->medium
#tenure 0 to 20 months--->lowest
df["tenure_groups"] = np.where(df["tenure"]>=60,"highest",np.where(df["tenure"]<20,"lowest","medium"))
# In[25]:
sns.countplot(df["tenure_groups"],data=df)
pd.crosstab(df["tenure_groups"],columns="frequency")
# In[26]:
#Multivariate Analysis
#checking which tenure period gives more churning.Around 44% among the lowest tenure group has churned
tenure_Crosstab = pd.crosstab(df.tenure_groups, columns=df.Churn)
row_tot = tenure_Crosstab.sum(axis=1)
tenure_Crosstab_prop = round(tenure_Crosstab.div(row_tot,axis=0)*100)
print("---------------------------------------------------------------------------------------------------------------------------")
print("The proportion of churning in different tenure groups namley lowest,medium and highest in the order of their tenure period is: ")
print("---------------------------------------------------------------------------------------------------------------------------")
print(tenure_Crosstab_prop)
tenure_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
#lowest tenure period gives more churning
# **tenure vs Monthly charges and total charges**
# In[27]:
#as tenure is less and monthly or total charges increases, churning happens
g=sns.PairGrid(df,x_vars=["MonthlyCharges","TotalCharges"],y_vars="tenure",hue="Churn",palette="coolwarm",height=8)
g.map(plt.scatter,alpha=0.5)
plt.legend(loc=(-0.3,0.6))
# **Summary:
# low tenure is a reason for churning.This means that new joining customers are getting churned.**
# # MonthlyCharges
# In[28]:
#univarate analysis
#summary of Monthly Charges
df["MonthlyCharges"].describe()
# In[29]:
#histogram showing the distribution of monthly charges
sns.distplot(df["MonthlyCharges"])
# In[30]:
#we can see that as monthly charges increases, churning increases
sns.boxplot(x="Churn",y="MonthlyCharges",data=df,palette="coolwarm")
# **Monthly Charges vs Multiple Lines**
# In[31]:
df.MultipleLines.value_counts()
# In[32]:
"""
multiple lines with high monthly charges is showing high churning rate.
Whether or not the person has multiple lines, if he has high monthly charges, he has a tendency to churn.
"""
print(sns.boxplot(x="MultipleLines",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm"))
# **Monthly Charges vs Internet Service**
# In[33]:
#Fibre optic services have a high monthly charge when compared to others and so is the churn rate
sns.boxplot(x="InternetService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Phone Service**
# In[34]:
#churning is there for people having phone service and high monthly charges
sns.boxplot(x="PhoneService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Total Charges**
# In[35]:
plt.figure(figsize=(13,8))
sns.scatterplot(x="MonthlyCharges",y="TotalCharges",data = df,palette="coolwarm",hue = "Churn")
# using monthly charges for further analysis instead of total charges as both are proportional and taking anyone of this would be only required
# **Summary:As monthly charges and total charges increases, churning increases**
# # Senior Citizen
# In[36]:
#We can infer that there are less senior citizen people(1142 senior citizens) joined when compared to youngsters
sns.countplot(x="SeniorCitizen",data=df)
pd.crosstab(df["SeniorCitizen"],columns="frequency")
# In[37]:
#here among the senior citzens,around 42% has churned where as youngsters have churned less(among youngsters, 24% only churned)
SeniorCitizen_Crosstab = pd.crosstab(df.SeniorCitizen, columns=df.Churn)
row_tot = SeniorCitizen_Crosstab.sum(axis=1)
print("------------------------------------------------------------------------------------")
SeniorCitizen_Crosstab_prop = round(SeniorCitizen_Crosstab.div(row_tot,axis=0)*100)
print("Percentage of people who got attrited among the senior citizen and youngsters: ")
print("------------------------------------------------------------------------------------")
print(SeniorCitizen_Crosstab_prop)
SeniorCitizen_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
# In[38]:
#senior citizen vs payment method
# In[39]:
#senior citizens have opted electronic check more when compared to other payment methods.
#So we need to know if there was any issue regarding electronic check
sns.barplot(x="SeniorCitizen",y="PaymentMethod",data=df)
# In[40]:
#The average monthly charges were around 90 dollars for senior citizens who have churned
#whereas the average is less for people who haven't churned around 65 dollars
sns.boxplot(x="SeniorCitizen",y="MonthlyCharges",data=df)
# **Summary:
# Senior citizens are comparitively very less.ie, around 16%.Among these 16%, around 48% are churned .
# When checked their monthly charges, it looks comparitively higher for people who churned among the senior citizens
# Also, the payment method used was electronic check.We need to further analyse whether electronic check is creating an issue for them causing churning**
# # All other services including:
|
gur
|
identifier_name
|
Telecom_customer_churn.py
|
27% churned
#73% not churned
# In[19]:
#categorical columns and numerical columns
categorical_cols = ["gender","Partner","Dependents","PhoneService","MultipleLines","InternetService","OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies","Contract","PaperlessBilling","PaymentMethod","Churn"]
numerical_cols = ["SeniorCitizen","tenure","MonthlyCharges","TotalCharges"]
# # Hypothesis Generation
# Possible Questions or variables to be checked:
#
# 1)tenure - which category of people (people with high tenure or low tenure) are getting churned.We need to know if recently joining cstomers are churning or not
#
# 2)MonthlyCharges - if the monthly charges are high, there is a chance for churning.We need to analyse whether monthly charges are high or not
#
# 3)TotalCharges - Same as monthly charge, total charge should increase accoding to monthly charges
#
# 4)SeniorCitizen - need to check whether senior citizens are more tending to churn
#
# 5)PaymentMethod - to check whether payment method is creating any transaction issues which is causing churning.Which among them is causing issue
#
# 6)PaperlessBilling - to see how many customers using paperless billing and analyse it with respect to churning
#
# 7)There are multiple services that company is providing like phone,internet,multiple lines, etc.check which particular service or which all services is giving more churning
# **KDE PLOT on tenure, MonthlyCharges and TotalCharges.**
# In[20]:
"""
checking the churn status of other numerical fields using kde plot
we can see that recent joiners have a churning tendency more and high monthly charges leads to churning
"""
def kde(feature):
plt.figure(figsize=(9,4))
plt.title("kde plot for {}".format(feature))
ax0=sns.kdeplot(df[df["Churn"]=="Yes"][feature],color="red",label= "Churn - Yes")
ax1=sns.kdeplot(df[df["Churn"]=="No"][feature],color="green",label="Churn - No")
kde("tenure")
kde("MonthlyCharges")
kde("TotalCharges")
# # Tenure
# In[21]:
#Univariate Analysis
#histogram
sns.distplot(df["tenure"])
# In[22]:
# there is a good no: of people with less than 10 months of tenure approximately 26%
df[df["tenure"]<10]["tenure"].count()/len(df)*100
# In[23]:
#summary of tenure
df["tenure"].describe()
# In[24]:
#dividing tenure into 3 categories for further analysisanalysis
#tenure>=60 months-->highest
#tenure 20 to 60 months-->medium
#tenure 0 to 20 months--->lowest
df["tenure_groups"] = np.where(df["tenure"]>=60,"highest",np.where(df["tenure"]<20,"lowest","medium"))
# In[25]:
sns.countplot(df["tenure_groups"],data=df)
pd.crosstab(df["tenure_groups"],columns="frequency")
# In[26]:
#Multivariate Analysis
#checking which tenure period gives more churning.Around 44% among the lowest tenure group has churned
tenure_Crosstab = pd.crosstab(df.tenure_groups, columns=df.Churn)
row_tot = tenure_Crosstab.sum(axis=1)
tenure_Crosstab_prop = round(tenure_Crosstab.div(row_tot,axis=0)*100)
print("---------------------------------------------------------------------------------------------------------------------------")
print("The proportion of churning in different tenure groups namley lowest,medium and highest in the order of their tenure period is: ")
print("---------------------------------------------------------------------------------------------------------------------------")
print(tenure_Crosstab_prop)
tenure_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
#lowest tenure period gives more churning
# **tenure vs Monthly charges and total charges**
# In[27]:
#as tenure is less and monthly or total charges increases, churning happens
g=sns.PairGrid(df,x_vars=["MonthlyCharges","TotalCharges"],y_vars="tenure",hue="Churn",palette="coolwarm",height=8)
g.map(plt.scatter,alpha=0.5)
plt.legend(loc=(-0.3,0.6))
# **Summary:
# low tenure is a reason for churning.This means that new joining customers are getting churned.**
# # MonthlyCharges
# In[28]:
#univarate analysis
#summary of Monthly Charges
df["MonthlyCharges"].describe()
# In[29]:
#histogram showing the distribution of monthly charges
sns.distplot(df["MonthlyCharges"])
# In[30]:
#we can see that as monthly charges increases, churning increases
sns.boxplot(x="Churn",y="MonthlyCharges",data=df,palette="coolwarm")
# **Monthly Charges vs Multiple Lines**
# In[31]:
df.MultipleLines.value_counts()
# In[32]:
"""
multiple lines with high monthly charges is showing high churning rate.
Whether or not the person has multiple lines, if he has high monthly charges, he has a tendency to churn.
"""
print(sns.boxplot(x="MultipleLines",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm"))
# **Monthly Charges vs Internet Service**
# In[33]:
#Fibre optic services have a high monthly charge when compared to others and so is the churn rate
sns.boxplot(x="InternetService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Phone Service**
# In[34]:
#churning is there for people having phone service and high monthly charges
sns.boxplot(x="PhoneService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Total Charges**
# In[35]:
plt.figure(figsize=(13,8))
sns.scatterplot(x="MonthlyCharges",y="TotalCharges",data = df,palette="coolwarm",hue = "Churn")
# using monthly charges for further analysis instead of total charges as both are proportional and taking anyone of this would be only required
# **Summary:As monthly charges and total charges increases, churning increases**
# # Senior Citizen
# In[36]:
#We can infer that there are less senior citizen people(1142 senior citizens) joined when compared to youngsters
sns.countplot(x="SeniorCitizen",data=df)
pd.crosstab(df["SeniorCitizen"],columns="frequency")
# In[37]:
#here among the senior citzens,around 42% has churned where as youngsters have churned less(among youngsters, 24% only churned)
SeniorCitizen_Crosstab = pd.crosstab(df.SeniorCitizen, columns=df.Churn)
row_tot = SeniorCitizen_Crosstab.sum(axis=1)
print("------------------------------------------------------------------------------------")
SeniorCitizen_Crosstab_prop = round(SeniorCitizen_Crosstab.div(row_tot,axis=0)*100)
print("Percentage of people who got attrited among the senior citizen and youngsters: ")
print("------------------------------------------------------------------------------------")
print(SeniorCitizen_Crosstab_prop)
SeniorCitizen_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
# In[38]:
#senior citizen vs payment method
# In[39]:
#senior citizens have opted electronic check more when compared to other payment methods.
#So we need to know if there was any issue regarding electronic check
sns.barplot(x="SeniorCitizen",y="PaymentMethod",data=df)
# In[40]:
#The average monthly charges were around 90 dollars for senior citizens who have churned
#whereas the average is less for people who haven't churned around 65 dollars
sns.boxplot(x="SeniorCitizen",y="MonthlyCharges",data=df)
# **Summary:
# Senior citizens are comparitively very less.ie, around 16%.Among these 16%, around 48% are churned .
# When checked their monthly charges, it looks comparitively higher for people who churned among the senior citizens
# Also, the payment method used was electronic check.We need to further analyse whether electronic check is creating an issue for them causing churning**
# # All other services including:
# **OnlineSecurity,OnlineBackup,DeviceProtection,TechSupport,StreamingTV,StreamingMovies**
#
# In[41]:
replace_cols=['OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',
'StreamingTV', 'StreamingMovies']
#To display these columns together with subplots using for loop
x=0
y=0
num=0
plt.tight_layout()
fig, axes =plt.subplots(2,3,figsize=(15,8))
for x in range(2):
for y in range(3):
|
sns.countplot(x=replace_cols[num],data=df,hue = "Churn",ax=axes[x,y],palette="coolwarm")
num +=1
#for people who have op
|
conditional_block
|
|
Telecom_customer_churn.py
|
] = df[i].replace('No internet service' , 'No')
df["MultipleLines"]=df["MultipleLines"].replace("No phone service","No")
# In[16]:
#to check if duplicated rows are present
df.duplicated().sum()
# In[17]:
y = pd.crosstab(df["Churn"],columns = "Frequency")
print(y)
#no: of customers churned = 1869
#no: of customers not churned = 5174
# In[18]:
#bar plot showing the customers who churned and who didn't
y_bar = y.plot(kind="bar")
y_percent = y/len(df)*100
print(round(y_percent,2))
#27% churned
#73% not churned
# In[19]:
#categorical columns and numerical columns
categorical_cols = ["gender","Partner","Dependents","PhoneService","MultipleLines","InternetService","OnlineSecurity","OnlineBackup","DeviceProtection","TechSupport","StreamingTV","StreamingMovies","Contract","PaperlessBilling","PaymentMethod","Churn"]
numerical_cols = ["SeniorCitizen","tenure","MonthlyCharges","TotalCharges"]
# # Hypothesis Generation
# Possible Questions or variables to be checked:
#
# 1)tenure - which category of people (people with high tenure or low tenure) are getting churned.We need to know if recently joining cstomers are churning or not
#
# 2)MonthlyCharges - if the monthly charges are high, there is a chance for churning.We need to analyse whether monthly charges are high or not
#
# 3)TotalCharges - Same as monthly charge, total charge should increase accoding to monthly charges
#
# 4)SeniorCitizen - need to check whether senior citizens are more tending to churn
#
# 5)PaymentMethod - to check whether payment method is creating any transaction issues which is causing churning.Which among them is causing issue
#
# 6)PaperlessBilling - to see how many customers using paperless billing and analyse it with respect to churning
#
# 7)There are multiple services that company is providing like phone,internet,multiple lines, etc.check which particular service or which all services is giving more churning
# **KDE PLOT on tenure, MonthlyCharges and TotalCharges.**
# In[20]:
"""
checking the churn status of other numerical fields using kde plot
we can see that recent joiners have a churning tendency more and high monthly charges leads to churning
"""
def kde(feature):
plt.figure(figsize=(9,4))
plt.title("kde plot for {}".format(feature))
ax0=sns.kdeplot(df[df["Churn"]=="Yes"][feature],color="red",label= "Churn - Yes")
ax1=sns.kdeplot(df[df["Churn"]=="No"][feature],color="green",label="Churn - No")
kde("tenure")
kde("MonthlyCharges")
kde("TotalCharges")
# # Tenure
# In[21]:
#Univariate Analysis
#histogram
sns.distplot(df["tenure"])
# In[22]:
# there is a good no: of people with less than 10 months of tenure approximately 26%
df[df["tenure"]<10]["tenure"].count()/len(df)*100
# In[23]:
#summary of tenure
df["tenure"].describe()
# In[24]:
#dividing tenure into 3 categories for further analysisanalysis
#tenure>=60 months-->highest
#tenure 20 to 60 months-->medium
#tenure 0 to 20 months--->lowest
df["tenure_groups"] = np.where(df["tenure"]>=60,"highest",np.where(df["tenure"]<20,"lowest","medium"))
# In[25]:
sns.countplot(df["tenure_groups"],data=df)
pd.crosstab(df["tenure_groups"],columns="frequency")
# In[26]:
#Multivariate Analysis
#checking which tenure period gives more churning.Around 44% among the lowest tenure group has churned
tenure_Crosstab = pd.crosstab(df.tenure_groups, columns=df.Churn)
row_tot = tenure_Crosstab.sum(axis=1)
tenure_Crosstab_prop = round(tenure_Crosstab.div(row_tot,axis=0)*100)
print("---------------------------------------------------------------------------------------------------------------------------")
print("The proportion of churning in different tenure groups namley lowest,medium and highest in the order of their tenure period is: ")
print("---------------------------------------------------------------------------------------------------------------------------")
print(tenure_Crosstab_prop)
tenure_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
#lowest tenure period gives more churning
# **tenure vs Monthly charges and total charges**
# In[27]:
#as tenure is less and monthly or total charges increases, churning happens
g=sns.PairGrid(df,x_vars=["MonthlyCharges","TotalCharges"],y_vars="tenure",hue="Churn",palette="coolwarm",height=8)
g.map(plt.scatter,alpha=0.5)
plt.legend(loc=(-0.3,0.6))
# **Summary:
# low tenure is a reason for churning.This means that new joining customers are getting churned.**
# # MonthlyCharges
# In[28]:
#univarate analysis
#summary of Monthly Charges
df["MonthlyCharges"].describe()
# In[29]:
#histogram showing the distribution of monthly charges
sns.distplot(df["MonthlyCharges"])
# In[30]:
#we can see that as monthly charges increases, churning increases
sns.boxplot(x="Churn",y="MonthlyCharges",data=df,palette="coolwarm")
# **Monthly Charges vs Multiple Lines**
# In[31]:
df.MultipleLines.value_counts()
# In[32]:
"""
multiple lines with high monthly charges is showing high churning rate.
Whether or not the person has multiple lines, if he has high monthly charges, he has a tendency to churn.
"""
print(sns.boxplot(x="MultipleLines",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm"))
# **Monthly Charges vs Internet Service**
# In[33]:
#Fibre optic services have a high monthly charge when compared to others and so is the churn rate
sns.boxplot(x="InternetService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Phone Service**
# In[34]:
#churning is there for people having phone service and high monthly charges
sns.boxplot(x="PhoneService",y="MonthlyCharges",hue="Churn",data=df,palette="coolwarm")
# **Monthly Charges vs Total Charges**
# In[35]:
plt.figure(figsize=(13,8))
sns.scatterplot(x="MonthlyCharges",y="TotalCharges",data = df,palette="coolwarm",hue = "Churn")
# using monthly charges for further analysis instead of total charges as both are proportional and taking anyone of this would be only required
# **Summary:As monthly charges and total charges increases, churning increases**
# # Senior Citizen
# In[36]:
#We can infer that there are less senior citizen people(1142 senior citizens) joined when compared to youngsters
sns.countplot(x="SeniorCitizen",data=df)
pd.crosstab(df["SeniorCitizen"],columns="frequency")
# In[37]:
#here among the senior citzens,around 42% has churned where as youngsters have churned less(among youngsters, 24% only churned)
SeniorCitizen_Crosstab = pd.crosstab(df.SeniorCitizen, columns=df.Churn)
row_tot = SeniorCitizen_Crosstab.sum(axis=1)
print("------------------------------------------------------------------------------------")
SeniorCitizen_Crosstab_prop = round(SeniorCitizen_Crosstab.div(row_tot,axis=0)*100)
print("Percentage of people who got attrited among the senior citizen and youngsters: ")
print("------------------------------------------------------------------------------------")
print(SeniorCitizen_Crosstab_prop)
|
# In[38]:
#senior citizen vs payment method
# In[39]:
#senior citizens have opted electronic check more when compared to other payment methods.
#So we need to know if there was any issue regarding electronic check
sns.barplot(x="SeniorCitizen",y="PaymentMethod",data=df)
# In[40]:
#The average monthly charges were around 90 dollars for senior citizens who have churned
#whereas the average is less for people who haven't churned around 65 dollars
sns.boxplot(x="SeniorCitizen",y="MonthlyCharges",data=df)
# **Summary:
# Senior citizens are comparitively very less.ie, around 16%.Among these 16%, around 48% are churned .
# When checked their monthly charges, it looks comparitively higher for people who churned among the senior citizens
# Also, the payment method used was electronic check.We need to further analyse whether electronic check is creating an issue for them causing churning**
# # All other services including:
#
|
SeniorCitizen_Crosstab_prop.plot(kind = 'bar' ,rot=0 , figsize = [16,5])
|
random_line_split
|
export_saved_model.py
|
load the JAX model.'
)
_OUTPUT_DIR = flags.DEFINE_string(
'output_dir', None, 'Path under which to save the SavedModel.'
)
_MODEL_NAME = flags.DEFINE_string(
'model_name', 'resnet_50', 'The name of the backbone model to export.'
)
_IMAGE_SIZE = flags.DEFINE_integer(
'image_size', 1024, 'Image size to serve the model at.'
)
_VLM_WEIGHT = flags.DEFINE_float(
'vlm_weight',
0.65,
'A float between [0, 1] as a tradeoff between open/closed-set detection.',
)
_SERVING_BATCH_SIZE = flags.DEFINE_integer(
'serving_batch_size',
1,
'For what batch size to prepare the serving signature.',
)
_MAX_NUM_CLASSES = flags.DEFINE_integer(
'max_num_classes', 30, 'Maximum number of classes to feed in by the user.'
)
_INCLUDE_MASK = flags.DEFINE_bool(
'include_mask', True, 'Whether to include mask.'
)
_MODEL_CONFIG_PATH = flags.DEFINE_string(
'model_config_path',
'./configs/export_model.gin',
'The path to model gin config.',
)
@gin.constants_from_enum
class ExecutionMode(enum.Enum):
"""Defines the model execution mode."""
TRAIN = 1
EVAL = 2
PREDICT = 3
class Anchor:
"""Anchor class for anchor-based object detectors."""
def __init__(self,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
image_size):
"""Constructs multiscale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instance, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of float numbers representing the aspect ratio anchors
added on each level. The number indicates the ratio of width to height.
For instance, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
scale level.
anchor_size: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: a list of integer numbers or Tensors representing
[height, width] of the input image size.The image_size should be divided
by the largest feature stride 2^max_level.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_size = anchor_size
self.image_size = image_size
self.boxes = self._generate_boxes()
def _generate_boxes(self):
"""Generates multiscale anchor boxes.
Returns:
a Tensor of shape [N, 4], representing anchor boxes of all levels
concatenated together.
"""
boxes_all = []
for level in range(self.min_level, self.max_level + 1):
boxes_l = []
for scale in range(self.num_scales):
for aspect_ratio in self.aspect_ratios:
stride = 2 ** level
intermidate_scale = 2 ** (scale / float(self.num_scales))
base_anchor_size = self.anchor_size * stride * intermidate_scale
aspect_x = aspect_ratio ** 0.5
aspect_y = aspect_ratio ** -0.5
half_anchor_size_x = base_anchor_size * aspect_x / 2.0
half_anchor_size_y = base_anchor_size * aspect_y / 2.0
x = tf.range(stride / 2, self.image_size[1], stride)
y = tf.range(stride / 2, self.image_size[0], stride)
xv, yv = tf.meshgrid(x, y)
xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
# Tensor shape Nx4.
boxes = tf.stack([yv - half_anchor_size_y, xv - half_anchor_size_x,
yv + half_anchor_size_y, xv + half_anchor_size_x],
axis=1)
boxes_l.append(boxes)
# Concat anchors on the same level to tensor shape NxAx4.
boxes_l = tf.stack(boxes_l, axis=1)
boxes_l = tf.reshape(boxes_l, [-1, 4])
boxes_all.append(boxes_l)
return tf.concat(boxes_all, axis=0)
def unpack_labels(self, labels,
is_box = False):
"""Unpacks an array of labels into multiscales labels.
Args:
labels: labels to unpack.
is_box: to unpack anchor boxes or not. If it is true, will unpack to 2D,
otherwise, will unpack to 3D.
Returns:
unpacked_labels: a dictionary contains unpack labels in different levels.
"""
unpacked_labels = {}
count = 0
for level in range(self.min_level, self.max_level + 1):
feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32)
feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32)
steps = feat_size_y * feat_size_x * self.anchors_per_location
if is_box:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[-1, 4])
else:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[feat_size_y, feat_size_x, -1])
count += steps
return unpacked_labels
@property
def anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
@property
def multilevel_boxes(self):
|
def generate_anchors_info():
"""Generate anchors and image info."""
original_height, original_width = 512, 640
input_anchor = Anchor(
min_level=2,
max_level=6,
num_scales=1,
aspect_ratios=[1.0, 2.0, 0.5],
anchor_size=8,
image_size=(_IMAGE_SIZE.value, _IMAGE_SIZE.value))
anchor_boxes = input_anchor.multilevel_boxes
for key in anchor_boxes:
anchor_boxes[key] = anchor_boxes[key].numpy()
scale = min(_IMAGE_SIZE.value / original_height,
_IMAGE_SIZE.value / original_width)
image_info = np.array([[[original_height, original_width],
[_IMAGE_SIZE.value, _IMAGE_SIZE.value],
[scale, scale], [0, 0]]])
return anchor_boxes, image_info
def load_fvlm_gin_configs():
"""Load gin configs for F-VLM model."""
clip_model_embed_dim = {
'resnet_50': (1024, 32, 7),
'resnet_50x4': (640, 40, 9),
'resnet_50x16': (768, 48, 12),
'resnet_50x64': (1024, 64, 14),
}
config_path = _MODEL_CONFIG_PATH.value
text_dim, model_num_heads, roi_size = clip_model_embed_dim[_MODEL_NAME.value]
gin.parse_config_file(config_path)
gin.parse_config(f'CATG_PAD_SIZE = {_MAX_NUM_CLASSES.value}')
gin.parse_config(f'CLIP_NAME = "{_MODEL_NAME.value}"')
gin.parse_config(f'TEXT_DIM = {text_dim}')
gin.parse_config(f'AttentionPool.num_heads = {model_num_heads}')
gin.parse_config(f'ClipFasterRCNNHead.roi_output_size = {roi_size}')
gin.parse_config(f'ClipFasterRCNNHead.novel_vlm_weight = {_VLM_WEIGHT.value}')
gin.parse_config(f'INCLUDE_MASK = {_INCLUDE_MASK.value}')
return _MAX_NUM_CLASSES.value, text_dim
def generate_rng_dict(base_rng):
"""Generates a dictionary of rngs to pass in to `nn.Module`s.
Stochastic layers in Flax Modules use separate stream of random number
generators (e.g. dropout requires an rng named 'dropout'). This function
generates all rngs needed for stochastic layers.
Args:
base_rng: The base rng to split.
Returns:
A dictionary of rngs to be used in calling modules.
"""
keys = ('dropout', 'stochastic_depth', 'rng')
rngs = jax.random.split(base_rng, len(keys))
return {key: rngs[i] for i, key in enumerate(keys)}
@gin.configurable
def create_predict_step(model_fn = gin.REQUIRED):
"""Get prediction step function.
Args:
model_fn: A flax.deprecated.nn.module of forward model to use.
Returns:
model_outputs: A dictionary of model_outputs.
"""
def predict_step_v2(variables, batch, rng):
features, _ = batch if isinstance(batch, tuple) else (batch, {})
|
return self.unpack_labels(self.boxes, is_box=True)
|
identifier_body
|
export_saved_model.py
|
load the JAX model.'
)
_OUTPUT_DIR = flags.DEFINE_string(
'output_dir', None, 'Path under which to save the SavedModel.'
)
_MODEL_NAME = flags.DEFINE_string(
'model_name', 'resnet_50', 'The name of the backbone model to export.'
)
_IMAGE_SIZE = flags.DEFINE_integer(
'image_size', 1024, 'Image size to serve the model at.'
)
_VLM_WEIGHT = flags.DEFINE_float(
'vlm_weight',
0.65,
'A float between [0, 1] as a tradeoff between open/closed-set detection.',
)
_SERVING_BATCH_SIZE = flags.DEFINE_integer(
'serving_batch_size',
1,
'For what batch size to prepare the serving signature.',
)
_MAX_NUM_CLASSES = flags.DEFINE_integer(
'max_num_classes', 30, 'Maximum number of classes to feed in by the user.'
)
_INCLUDE_MASK = flags.DEFINE_bool(
'include_mask', True, 'Whether to include mask.'
)
_MODEL_CONFIG_PATH = flags.DEFINE_string(
'model_config_path',
'./configs/export_model.gin',
'The path to model gin config.',
)
@gin.constants_from_enum
class ExecutionMode(enum.Enum):
"""Defines the model execution mode."""
TRAIN = 1
EVAL = 2
PREDICT = 3
class Anchor:
"""Anchor class for anchor-based object detectors."""
def __init__(self,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
image_size):
"""Constructs multiscale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instance, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of float numbers representing the aspect ratio anchors
added on each level. The number indicates the ratio of width to height.
For instance, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
scale level.
anchor_size: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: a list of integer numbers or Tensors representing
[height, width] of the input image size.The image_size should be divided
by the largest feature stride 2^max_level.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_size = anchor_size
self.image_size = image_size
self.boxes = self._generate_boxes()
def _generate_boxes(self):
"""Generates multiscale anchor boxes.
Returns:
a Tensor of shape [N, 4], representing anchor boxes of all levels
concatenated together.
"""
boxes_all = []
for level in range(self.min_level, self.max_level + 1):
boxes_l = []
for scale in range(self.num_scales):
for aspect_ratio in self.aspect_ratios:
stride = 2 ** level
intermidate_scale = 2 ** (scale / float(self.num_scales))
base_anchor_size = self.anchor_size * stride * intermidate_scale
aspect_x = aspect_ratio ** 0.5
aspect_y = aspect_ratio ** -0.5
half_anchor_size_x = base_anchor_size * aspect_x / 2.0
half_anchor_size_y = base_anchor_size * aspect_y / 2.0
x = tf.range(stride / 2, self.image_size[1], stride)
y = tf.range(stride / 2, self.image_size[0], stride)
xv, yv = tf.meshgrid(x, y)
xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
# Tensor shape Nx4.
boxes = tf.stack([yv - half_anchor_size_y, xv - half_anchor_size_x,
yv + half_anchor_size_y, xv + half_anchor_size_x],
axis=1)
boxes_l.append(boxes)
# Concat anchors on the same level to tensor shape NxAx4.
boxes_l = tf.stack(boxes_l, axis=1)
boxes_l = tf.reshape(boxes_l, [-1, 4])
boxes_all.append(boxes_l)
return tf.concat(boxes_all, axis=0)
def
|
(self, labels,
is_box = False):
"""Unpacks an array of labels into multiscales labels.
Args:
labels: labels to unpack.
is_box: to unpack anchor boxes or not. If it is true, will unpack to 2D,
otherwise, will unpack to 3D.
Returns:
unpacked_labels: a dictionary contains unpack labels in different levels.
"""
unpacked_labels = {}
count = 0
for level in range(self.min_level, self.max_level + 1):
feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32)
feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32)
steps = feat_size_y * feat_size_x * self.anchors_per_location
if is_box:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[-1, 4])
else:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[feat_size_y, feat_size_x, -1])
count += steps
return unpacked_labels
@property
def anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
@property
def multilevel_boxes(self):
return self.unpack_labels(self.boxes, is_box=True)
def generate_anchors_info():
"""Generate anchors and image info."""
original_height, original_width = 512, 640
input_anchor = Anchor(
min_level=2,
max_level=6,
num_scales=1,
aspect_ratios=[1.0, 2.0, 0.5],
anchor_size=8,
image_size=(_IMAGE_SIZE.value, _IMAGE_SIZE.value))
anchor_boxes = input_anchor.multilevel_boxes
for key in anchor_boxes:
anchor_boxes[key] = anchor_boxes[key].numpy()
scale = min(_IMAGE_SIZE.value / original_height,
_IMAGE_SIZE.value / original_width)
image_info = np.array([[[original_height, original_width],
[_IMAGE_SIZE.value, _IMAGE_SIZE.value],
[scale, scale], [0, 0]]])
return anchor_boxes, image_info
def load_fvlm_gin_configs():
"""Load gin configs for F-VLM model."""
clip_model_embed_dim = {
'resnet_50': (1024, 32, 7),
'resnet_50x4': (640, 40, 9),
'resnet_50x16': (768, 48, 12),
'resnet_50x64': (1024, 64, 14),
}
config_path = _MODEL_CONFIG_PATH.value
text_dim, model_num_heads, roi_size = clip_model_embed_dim[_MODEL_NAME.value]
gin.parse_config_file(config_path)
gin.parse_config(f'CATG_PAD_SIZE = {_MAX_NUM_CLASSES.value}')
gin.parse_config(f'CLIP_NAME = "{_MODEL_NAME.value}"')
gin.parse_config(f'TEXT_DIM = {text_dim}')
gin.parse_config(f'AttentionPool.num_heads = {model_num_heads}')
gin.parse_config(f'ClipFasterRCNNHead.roi_output_size = {roi_size}')
gin.parse_config(f'ClipFasterRCNNHead.novel_vlm_weight = {_VLM_WEIGHT.value}')
gin.parse_config(f'INCLUDE_MASK = {_INCLUDE_MASK.value}')
return _MAX_NUM_CLASSES.value, text_dim
def generate_rng_dict(base_rng):
"""Generates a dictionary of rngs to pass in to `nn.Module`s.
Stochastic layers in Flax Modules use separate stream of random number
generators (e.g. dropout requires an rng named 'dropout'). This function
generates all rngs needed for stochastic layers.
Args:
base_rng: The base rng to split.
Returns:
A dictionary of rngs to be used in calling modules.
"""
keys = ('dropout', 'stochastic_depth', 'rng')
rngs = jax.random.split(base_rng, len(keys))
return {key: rngs[i] for i, key in enumerate(keys)}
@gin.configurable
def create_predict_step(model_fn = gin.REQUIRED):
"""Get prediction step function.
Args:
model_fn: A flax.deprecated.nn.module of forward model to use.
Returns:
model_outputs: A dictionary of model_outputs.
"""
def predict_step_v2(variables, batch, rng):
features, _ = batch if isinstance(batch, tuple) else (batch, {})
|
unpack_labels
|
identifier_name
|
export_saved_model.py
|
load the JAX model.'
)
_OUTPUT_DIR = flags.DEFINE_string(
'output_dir', None, 'Path under which to save the SavedModel.'
)
_MODEL_NAME = flags.DEFINE_string(
'model_name', 'resnet_50', 'The name of the backbone model to export.'
)
_IMAGE_SIZE = flags.DEFINE_integer(
'image_size', 1024, 'Image size to serve the model at.'
)
_VLM_WEIGHT = flags.DEFINE_float(
'vlm_weight',
0.65,
'A float between [0, 1] as a tradeoff between open/closed-set detection.',
)
_SERVING_BATCH_SIZE = flags.DEFINE_integer(
'serving_batch_size',
1,
'For what batch size to prepare the serving signature.',
)
_MAX_NUM_CLASSES = flags.DEFINE_integer(
'max_num_classes', 30, 'Maximum number of classes to feed in by the user.'
)
_INCLUDE_MASK = flags.DEFINE_bool(
'include_mask', True, 'Whether to include mask.'
)
_MODEL_CONFIG_PATH = flags.DEFINE_string(
'model_config_path',
'./configs/export_model.gin',
'The path to model gin config.',
)
@gin.constants_from_enum
class ExecutionMode(enum.Enum):
"""Defines the model execution mode."""
TRAIN = 1
EVAL = 2
PREDICT = 3
class Anchor:
"""Anchor class for anchor-based object detectors."""
def __init__(self,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
image_size):
"""Constructs multiscale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instance, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of float numbers representing the aspect ratio anchors
added on each level. The number indicates the ratio of width to height.
For instance, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
scale level.
anchor_size: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: a list of integer numbers or Tensors representing
[height, width] of the input image size.The image_size should be divided
by the largest feature stride 2^max_level.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_size = anchor_size
self.image_size = image_size
self.boxes = self._generate_boxes()
def _generate_boxes(self):
"""Generates multiscale anchor boxes.
Returns:
a Tensor of shape [N, 4], representing anchor boxes of all levels
concatenated together.
"""
boxes_all = []
for level in range(self.min_level, self.max_level + 1):
|
# Concat anchors on the same level to tensor shape NxAx4.
boxes_l = tf.stack(boxes_l, axis=1)
boxes_l = tf.reshape(boxes_l, [-1, 4])
boxes_all.append(boxes_l)
return tf.concat(boxes_all, axis=0)
def unpack_labels(self, labels,
is_box = False):
"""Unpacks an array of labels into multiscales labels.
Args:
labels: labels to unpack.
is_box: to unpack anchor boxes or not. If it is true, will unpack to 2D,
otherwise, will unpack to 3D.
Returns:
unpacked_labels: a dictionary contains unpack labels in different levels.
"""
unpacked_labels = {}
count = 0
for level in range(self.min_level, self.max_level + 1):
feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32)
feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32)
steps = feat_size_y * feat_size_x * self.anchors_per_location
if is_box:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[-1, 4])
else:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[feat_size_y, feat_size_x, -1])
count += steps
return unpacked_labels
@property
def anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
@property
def multilevel_boxes(self):
return self.unpack_labels(self.boxes, is_box=True)
def generate_anchors_info():
"""Generate anchors and image info."""
original_height, original_width = 512, 640
input_anchor = Anchor(
min_level=2,
max_level=6,
num_scales=1,
aspect_ratios=[1.0, 2.0, 0.5],
anchor_size=8,
image_size=(_IMAGE_SIZE.value, _IMAGE_SIZE.value))
anchor_boxes = input_anchor.multilevel_boxes
for key in anchor_boxes:
anchor_boxes[key] = anchor_boxes[key].numpy()
scale = min(_IMAGE_SIZE.value / original_height,
_IMAGE_SIZE.value / original_width)
image_info = np.array([[[original_height, original_width],
[_IMAGE_SIZE.value, _IMAGE_SIZE.value],
[scale, scale], [0, 0]]])
return anchor_boxes, image_info
def load_fvlm_gin_configs():
"""Load gin configs for F-VLM model."""
clip_model_embed_dim = {
'resnet_50': (1024, 32, 7),
'resnet_50x4': (640, 40, 9),
'resnet_50x16': (768, 48, 12),
'resnet_50x64': (1024, 64, 14),
}
config_path = _MODEL_CONFIG_PATH.value
text_dim, model_num_heads, roi_size = clip_model_embed_dim[_MODEL_NAME.value]
gin.parse_config_file(config_path)
gin.parse_config(f'CATG_PAD_SIZE = {_MAX_NUM_CLASSES.value}')
gin.parse_config(f'CLIP_NAME = "{_MODEL_NAME.value}"')
gin.parse_config(f'TEXT_DIM = {text_dim}')
gin.parse_config(f'AttentionPool.num_heads = {model_num_heads}')
gin.parse_config(f'ClipFasterRCNNHead.roi_output_size = {roi_size}')
gin.parse_config(f'ClipFasterRCNNHead.novel_vlm_weight = {_VLM_WEIGHT.value}')
gin.parse_config(f'INCLUDE_MASK = {_INCLUDE_MASK.value}')
return _MAX_NUM_CLASSES.value, text_dim
def generate_rng_dict(base_rng):
"""Generates a dictionary of rngs to pass in to `nn.Module`s.
Stochastic layers in Flax Modules use separate stream of random number
generators (e.g. dropout requires an rng named 'dropout'). This function
generates all rngs needed for stochastic layers.
Args:
base_rng: The base rng to split.
Returns:
A dictionary of rngs to be used in calling modules.
"""
keys = ('dropout', 'stochastic_depth', 'rng')
rngs = jax.random.split(base_rng, len(keys))
return {key: rngs[i] for i, key in enumerate(keys)}
@gin.configurable
def create_predict_step(model_fn = gin.REQUIRED):
"""Get prediction step function.
Args:
model_fn: A flax.deprecated.nn.module of forward model to use.
Returns:
model_outputs: A dictionary of model_outputs.
"""
def predict_step_v2(variables, batch, rng):
features, _ = batch if isinstance(batch, tuple) else (batch, {})
|
boxes_l = []
for scale in range(self.num_scales):
for aspect_ratio in self.aspect_ratios:
stride = 2 ** level
intermidate_scale = 2 ** (scale / float(self.num_scales))
base_anchor_size = self.anchor_size * stride * intermidate_scale
aspect_x = aspect_ratio ** 0.5
aspect_y = aspect_ratio ** -0.5
half_anchor_size_x = base_anchor_size * aspect_x / 2.0
half_anchor_size_y = base_anchor_size * aspect_y / 2.0
x = tf.range(stride / 2, self.image_size[1], stride)
y = tf.range(stride / 2, self.image_size[0], stride)
xv, yv = tf.meshgrid(x, y)
xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
# Tensor shape Nx4.
boxes = tf.stack([yv - half_anchor_size_y, xv - half_anchor_size_x,
yv + half_anchor_size_y, xv + half_anchor_size_x],
axis=1)
boxes_l.append(boxes)
|
conditional_block
|
export_saved_model.py
|
TRAIN = 1
EVAL = 2
PREDICT = 3
class Anchor:
"""Anchor class for anchor-based object detectors."""
def __init__(self,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
image_size):
"""Constructs multiscale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instance, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of float numbers representing the aspect ratio anchors
added on each level. The number indicates the ratio of width to height.
For instance, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each
scale level.
anchor_size: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: a list of integer numbers or Tensors representing
[height, width] of the input image size.The image_size should be divided
by the largest feature stride 2^max_level.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_size = anchor_size
self.image_size = image_size
self.boxes = self._generate_boxes()
def _generate_boxes(self):
"""Generates multiscale anchor boxes.
Returns:
a Tensor of shape [N, 4], representing anchor boxes of all levels
concatenated together.
"""
boxes_all = []
for level in range(self.min_level, self.max_level + 1):
boxes_l = []
for scale in range(self.num_scales):
for aspect_ratio in self.aspect_ratios:
stride = 2 ** level
intermidate_scale = 2 ** (scale / float(self.num_scales))
base_anchor_size = self.anchor_size * stride * intermidate_scale
aspect_x = aspect_ratio ** 0.5
aspect_y = aspect_ratio ** -0.5
half_anchor_size_x = base_anchor_size * aspect_x / 2.0
half_anchor_size_y = base_anchor_size * aspect_y / 2.0
x = tf.range(stride / 2, self.image_size[1], stride)
y = tf.range(stride / 2, self.image_size[0], stride)
xv, yv = tf.meshgrid(x, y)
xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
# Tensor shape Nx4.
boxes = tf.stack([yv - half_anchor_size_y, xv - half_anchor_size_x,
yv + half_anchor_size_y, xv + half_anchor_size_x],
axis=1)
boxes_l.append(boxes)
# Concat anchors on the same level to tensor shape NxAx4.
boxes_l = tf.stack(boxes_l, axis=1)
boxes_l = tf.reshape(boxes_l, [-1, 4])
boxes_all.append(boxes_l)
return tf.concat(boxes_all, axis=0)
def unpack_labels(self, labels,
is_box = False):
"""Unpacks an array of labels into multiscales labels.
Args:
labels: labels to unpack.
is_box: to unpack anchor boxes or not. If it is true, will unpack to 2D,
otherwise, will unpack to 3D.
Returns:
unpacked_labels: a dictionary contains unpack labels in different levels.
"""
unpacked_labels = {}
count = 0
for level in range(self.min_level, self.max_level + 1):
feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32)
feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32)
steps = feat_size_y * feat_size_x * self.anchors_per_location
if is_box:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[-1, 4])
else:
unpacked_labels[level] = tf.reshape(labels[count:count + steps],
[feat_size_y, feat_size_x, -1])
count += steps
return unpacked_labels
@property
def anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
@property
def multilevel_boxes(self):
return self.unpack_labels(self.boxes, is_box=True)
def generate_anchors_info():
"""Generate anchors and image info."""
original_height, original_width = 512, 640
input_anchor = Anchor(
min_level=2,
max_level=6,
num_scales=1,
aspect_ratios=[1.0, 2.0, 0.5],
anchor_size=8,
image_size=(_IMAGE_SIZE.value, _IMAGE_SIZE.value))
anchor_boxes = input_anchor.multilevel_boxes
for key in anchor_boxes:
anchor_boxes[key] = anchor_boxes[key].numpy()
scale = min(_IMAGE_SIZE.value / original_height,
_IMAGE_SIZE.value / original_width)
image_info = np.array([[[original_height, original_width],
[_IMAGE_SIZE.value, _IMAGE_SIZE.value],
[scale, scale], [0, 0]]])
return anchor_boxes, image_info
def load_fvlm_gin_configs():
"""Load gin configs for F-VLM model."""
clip_model_embed_dim = {
'resnet_50': (1024, 32, 7),
'resnet_50x4': (640, 40, 9),
'resnet_50x16': (768, 48, 12),
'resnet_50x64': (1024, 64, 14),
}
config_path = _MODEL_CONFIG_PATH.value
text_dim, model_num_heads, roi_size = clip_model_embed_dim[_MODEL_NAME.value]
gin.parse_config_file(config_path)
gin.parse_config(f'CATG_PAD_SIZE = {_MAX_NUM_CLASSES.value}')
gin.parse_config(f'CLIP_NAME = "{_MODEL_NAME.value}"')
gin.parse_config(f'TEXT_DIM = {text_dim}')
gin.parse_config(f'AttentionPool.num_heads = {model_num_heads}')
gin.parse_config(f'ClipFasterRCNNHead.roi_output_size = {roi_size}')
gin.parse_config(f'ClipFasterRCNNHead.novel_vlm_weight = {_VLM_WEIGHT.value}')
gin.parse_config(f'INCLUDE_MASK = {_INCLUDE_MASK.value}')
return _MAX_NUM_CLASSES.value, text_dim
def generate_rng_dict(base_rng):
"""Generates a dictionary of rngs to pass in to `nn.Module`s.
Stochastic layers in Flax Modules use separate stream of random number
generators (e.g. dropout requires an rng named 'dropout'). This function
generates all rngs needed for stochastic layers.
Args:
base_rng: The base rng to split.
Returns:
A dictionary of rngs to be used in calling modules.
"""
keys = ('dropout', 'stochastic_depth', 'rng')
rngs = jax.random.split(base_rng, len(keys))
return {key: rngs[i] for i, key in enumerate(keys)}
@gin.configurable
def create_predict_step(model_fn = gin.REQUIRED):
"""Get prediction step function.
Args:
model_fn: A flax.deprecated.nn.module of forward model to use.
Returns:
model_outputs: A dictionary of model_outputs.
"""
def predict_step_v2(variables, batch, rng):
features, _ = batch if isinstance(batch, tuple) else (batch, {})
rng, _ = jax.random.split(rng)
pred_model_fn = model_fn(mode=ExecutionMode.EVAL)
model_outputs = pred_model_fn.apply(
variables,
**features,
mutable=False,
_do_remap=True,
rngs=generate_rng_dict(rng))
return model_outputs
return predict_step_v2
def get_fvlm_predict_fn(serving_batch_size):
"""Get predict function and input signatures for F-VLM model."""
num_classes, text_dim = load_fvlm_gin_configs()
predict_step = create_predict_step()
anchor_boxes, image_info = generate_anchors_info()
def predict_fn(params, input_dict):
input_dict['labels'] = {
'detection': {
'anchor_boxes': anchor_boxes,
'image_info': image_info,
}
}
output = predict_step(params, input_dict, jax.random.PRNGKey(0))
output = output['detection']
output.pop('rpn_score_outputs')
output.pop('rpn_box_outputs')
output.pop('class_outputs')
output.pop('box_outputs')
return output
input_signatures = {
|
'image':
tf.TensorSpec(
shape=(serving_batch_size, _IMAGE_SIZE.value, _IMAGE_SIZE.value,
|
random_line_split
|
|
old_main.rs
|
{
// `pos` has reached the current target, so we can update the `progress`,
// then recurse to spend the remaining `step` to progress to the next waypoint
*pos = target;
self.progress += 1;
self.advance_by(step - dist, pos)
} else {
// move as far as the player can in the direction of the target; this should end the loop
let movement = vec2_scale(to_target, step / dist);
pos[0] += movement[0];
pos[1] += movement[1];
// Navigation is not yet complete
false
}
} else {
// Navigation is complete
true
}
}
}
struct App {
gl: GlGraphics,
window: GlutinWindow,
world: MyGameWorld,
pcc: PlayerCameraCursor,
mouse_pressed: bool,
generate_requested: bool,
pointed_room: PointedRoom,
nav_requested: bool,
nav: NavController,
}
impl App {
fn new(opengl: OpenGL, window: GlutinWindow) -> Self {
let screen_size = window.size().into();
App {
gl: GlGraphics::new(opengl),
window,
world: MyGameWorld::new(),
pcc: PlayerCameraCursor::new(screen_size),
mouse_pressed: false,
generate_requested: true,
pointed_room: PointedRoom::new(),
nav_requested: false,
nav: NavController::new(),
}
}
fn update(&mut self, dt: f64) {
// if the world needs to regenerate, do it now
if self.generate_requested {
let Size { width, height } = self.window.size();
self.regenerate(width as i32, height as i32);
}
// update the navigation target as long as the mouse is down
if self.mouse_pressed {
if let Some(graph) = &self.world.floor_graph {
self.nav.update_nav(self.pcc.cursor_pos, &self.pcc.player_pos, graph);
}
}
// move the player along the current navigation path
if let Some(nav) = &mut self.nav.current {
self.pcc.modify(|PccState { player_pos, .. }| {
nav.advance_by(200.0 * dt, player_pos);
});
}
// update the player camera/cursor if it was modified since the last update
self.pcc.update();
// re-check the 'pointed room' if the mouse cursor's world position has changed
if let Some(graph) = &self.world.floor_graph {
self.pointed_room.update(self.pcc.cursor_pos, graph);
}
}
fn render(&mut self, args: &RenderArgs) {
use graphics::*;
let world = &self.world;
let pcc = &self.pcc;
let player_pos = &pcc.player_pos;
let cursor = pcc.cursor_pos;
let pointed_room = &self.pointed_room;
let nav_opt = &self.nav.current;
&self.gl.draw(args.viewport(), |_c, gl| {
let c = _c.append_transform(pcc.camera);
clear(BACKGROUND_COLOR, gl);
// PRETTY room tiles + walls + doors
if let Some(dungeon) = world.dungeon() {
let tiles = dungeon.tiles();
let tile_size = world.tile_pixel_size() as f64;
// fill in a square for each room tile in the grid
for addr in tiles.tile_addresses() {
if let Some((_room_id, room_weight)) = tiles[addr] {
let color = {
if room_weight >= 1.0 && room_weight <= 2.0 {
lerp_color(&DEBUG_ROOM_LOW, &DEBUG_ROOM_HIGH, room_weight - 1.0)
} else if room_weight >= 1.0 {
WHITE
} else {
lerp_color(&WEIGHT_ROOM_LOW, &WEIGHT_ROOM_HIGH, room_weight)
}
};
let x = addr.x as f64 * tile_size;
let y = addr.y as f64 * tile_size;
let rect = [x, y, tile_size, tile_size];
rectangle(color, rect, c.transform, gl);
}
}
// draw an appropriate line(s) for each wall in the dungeon
for (wall_addr, wall_type) in dungeon.walls().iter() {
match *wall_type {
WallType::Clear => (),
WallType::Wall => {
let TileAddress { x, y } = wall_addr.tile();
let (base_to, base_from) = match wall_addr.direction() {
CompassDirection::North => ((0, 1), (1, 1)),
CompassDirection::East => ((1, 1), (1, 0)),
CompassDirection::South => ((0, 0), (1, 0)),
CompassDirection::West => ((0, 0), (0, 1)),
};
let to_px = |(dx, dy)| {
[(dx + x) as f64 * tile_size, (dy + y) as f64 * tile_size]
};
line_from_to(DEBUG_WALL_COLOR, 0.5, to_px(base_from), to_px(base_to), c.transform, gl);
}
WallType::Door => {
let TileAddress { x, y } = wall_addr.tile();
match wall_addr.direction() {
CompassDirection::North => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64 + 1.0),
CompassDirection::East => draw_vertical_door(&c, gl, tile_size, (x + 1) as f64, y as f64),
CompassDirection::South => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64),
CompassDirection::West => draw_vertical_door(&c, gl, tile_size, x as f64, y as f64),
}
}
}
}
}
// NAVIGATION-related debug
if let Some(floor_graph) = &world.floor_graph {
// DEBUG: walkable areas
for node in floor_graph.nodes().iter() {
let bounds = &floor_graph.get_bounds(*node.id());
let color = match node {
FloorNode::Room { .. } => WALKABLE_ROOM_COLOR,
FloorNode::Door { .. } => WALKABLE_DOOR_COLOR,
};
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(color, rect, c.transform, gl);
}
// DEBUG: cursor target walkable area
if let Some(pointed_room) = pointed_room.current {
let bounds = floor_graph.get_bounds(pointed_room);
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(POINTED_ROOM_COLOR, rect, c.transform, gl);
}
}
if let Some(nav) = nav_opt {
let start = Some(player_pos.clone());
let lines = start.iter().chain(nav.waypoints().iter().skip(nav.progress)).sliding();
for (from, to) in lines {
line_from_to(PATH_COLOR, 1.0, *from, *to, c.transform, gl);
}
}
// DEBUG: cursor
{
let [cx, cy] = cursor;
let vertical = rectangle::centered([cx, cy, 1.0, 4.0]);
let horizontal = rectangle::centered([cx, cy, 4.0, 1.0]);
rectangle(CURSOR_COLOR, vertical, c.transform, gl);
rectangle(CURSOR_COLOR, horizontal, c.transform, gl);
}
{
let [x, y] = player_pos;
let player = circle(*x, *y, 3.0);
ellipse(CURSOR_COLOR, player, c.transform, gl);
}
});
}
// updates the app's knowledge of the mouse cursor, returning `true` if the cursor position has changed since last time
fn set_cursor(&mut self, cursor_screen: [f64; 2]) -> bool {
self.pcc.modify(|PccState { cursor_px, .. }| {
*cursor_px = cursor_screen;
});
self.pcc.dirty
}
fn regenerate(&mut self, width: i32, height: i32) {
// regenerate the "world"
self.world.regenerate(Rect::from_xywh(0, 0, width, height));
// reset any app state that depends on the previous "world"
self.nav.forget();
self.pointed_room.forget();
self.generate_requested = false;
// pick a random position for the player
let new_player_pos = self.world.floor_graph.as_ref().and_then(|graph| {
let mut rng = thread_rng();
graph.nodes().choose(&mut rng).map(|n| {
let point = graph.get_bounds(*n.id()).center();
[point.x, point.y]
}).clone()
});
if let Some(pos) = new_player_pos {
self.pcc.modify(|PccState { player_pos, .. }| {
*player_pos = pos;
});
}
}
}
struct
|
PointedRoom
|
identifier_name
|
|
old_main.rs
|
should_update = match self.last_goal {
Some(g) => !point_eq(&goal, &g),
None => true,
};
if should_update {
self.current = graph.find_route(player_pos, &goal).map(|route| Nav::new(route));
self.last_goal = Some(goal);
}
}
}
struct Nav {
waypoints: Vec<Point>,
progress: usize,
}
impl Nav {
fn new(waypoints: Vec<Point>) -> Self {
Nav { waypoints, progress: 0, }
}
fn waypoints(&self) -> &Vec<Point> {
&self.waypoints
}
fn progress(&self) -> usize {
self.progress
}
fn current_target(&self) -> Option<&Point> {
self.waypoints.get(self.progress)
}
fn is_complete(&self) -> bool {
self.progress >= self.waypoints.len()
}
/// Modify `pos` by moving it `step` units towards the next waypoint, or no-op if navigation is complete.
/// Returns `true` to indicate navigation is complete, or `false` to indicate there is further movement to do.
fn advance_by(&mut self, step: f64, pos: &mut Point) -> bool {
if let Some(&target) = self.current_target() {
let to_target = vec2_sub(target, *pos);
let dist = vec2_len(to_target);
if dist < step {
// `pos` has reached the current target, so we can update the `progress`,
// then recurse to spend the remaining `step` to progress to the next waypoint
*pos = target;
self.progress += 1;
self.advance_by(step - dist, pos)
} else {
// move as far as the player can in the direction of the target; this should end the loop
let movement = vec2_scale(to_target, step / dist);
pos[0] += movement[0];
pos[1] += movement[1];
// Navigation is not yet complete
false
}
} else {
// Navigation is complete
true
}
}
}
struct App {
gl: GlGraphics,
window: GlutinWindow,
world: MyGameWorld,
pcc: PlayerCameraCursor,
mouse_pressed: bool,
generate_requested: bool,
pointed_room: PointedRoom,
nav_requested: bool,
nav: NavController,
}
impl App {
fn new(opengl: OpenGL, window: GlutinWindow) -> Self {
let screen_size = window.size().into();
App {
gl: GlGraphics::new(opengl),
window,
world: MyGameWorld::new(),
pcc: PlayerCameraCursor::new(screen_size),
mouse_pressed: false,
generate_requested: true,
pointed_room: PointedRoom::new(),
nav_requested: false,
nav: NavController::new(),
}
}
fn update(&mut self, dt: f64) {
// if the world needs to regenerate, do it now
if self.generate_requested {
let Size { width, height } = self.window.size();
self.regenerate(width as i32, height as i32);
}
// update the navigation target as long as the mouse is down
if self.mouse_pressed {
if let Some(graph) = &self.world.floor_graph {
self.nav.update_nav(self.pcc.cursor_pos, &self.pcc.player_pos, graph);
}
}
// move the player along the current navigation path
if let Some(nav) = &mut self.nav.current {
self.pcc.modify(|PccState { player_pos, .. }| {
nav.advance_by(200.0 * dt, player_pos);
});
}
// update the player camera/cursor if it was modified since the last update
self.pcc.update();
// re-check the 'pointed room' if the mouse cursor's world position has changed
if let Some(graph) = &self.world.floor_graph {
self.pointed_room.update(self.pcc.cursor_pos, graph);
}
}
fn render(&mut self, args: &RenderArgs) {
use graphics::*;
let world = &self.world;
let pcc = &self.pcc;
let player_pos = &pcc.player_pos;
let cursor = pcc.cursor_pos;
let pointed_room = &self.pointed_room;
let nav_opt = &self.nav.current;
&self.gl.draw(args.viewport(), |_c, gl| {
let c = _c.append_transform(pcc.camera);
clear(BACKGROUND_COLOR, gl);
// PRETTY room tiles + walls + doors
if let Some(dungeon) = world.dungeon() {
let tiles = dungeon.tiles();
let tile_size = world.tile_pixel_size() as f64;
// fill in a square for each room tile in the grid
for addr in tiles.tile_addresses() {
if let Some((_room_id, room_weight)) = tiles[addr] {
let color = {
if room_weight >= 1.0 && room_weight <= 2.0 {
lerp_color(&DEBUG_ROOM_LOW, &DEBUG_ROOM_HIGH, room_weight - 1.0)
} else if room_weight >= 1.0 {
WHITE
} else {
lerp_color(&WEIGHT_ROOM_LOW, &WEIGHT_ROOM_HIGH, room_weight)
}
};
let x = addr.x as f64 * tile_size;
let y = addr.y as f64 * tile_size;
let rect = [x, y, tile_size, tile_size];
rectangle(color, rect, c.transform, gl);
}
}
// draw an appropriate line(s) for each wall in the dungeon
for (wall_addr, wall_type) in dungeon.walls().iter() {
match *wall_type {
WallType::Clear => (),
WallType::Wall => {
let TileAddress { x, y } = wall_addr.tile();
let (base_to, base_from) = match wall_addr.direction() {
CompassDirection::North => ((0, 1), (1, 1)),
CompassDirection::East => ((1, 1), (1, 0)),
CompassDirection::South => ((0, 0), (1, 0)),
CompassDirection::West => ((0, 0), (0, 1)),
};
let to_px = |(dx, dy)| {
[(dx + x) as f64 * tile_size, (dy + y) as f64 * tile_size]
};
line_from_to(DEBUG_WALL_COLOR, 0.5, to_px(base_from), to_px(base_to), c.transform, gl);
}
WallType::Door => {
let TileAddress { x, y } = wall_addr.tile();
match wall_addr.direction() {
CompassDirection::North => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64 + 1.0),
CompassDirection::East => draw_vertical_door(&c, gl, tile_size, (x + 1) as f64, y as f64),
CompassDirection::South => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64),
CompassDirection::West => draw_vertical_door(&c, gl, tile_size, x as f64, y as f64),
}
}
}
}
}
// NAVIGATION-related debug
if let Some(floor_graph) = &world.floor_graph {
// DEBUG: walkable areas
for node in floor_graph.nodes().iter() {
let bounds = &floor_graph.get_bounds(*node.id());
let color = match node {
FloorNode::Room { .. } => WALKABLE_ROOM_COLOR,
FloorNode::Door { .. } => WALKABLE_DOOR_COLOR,
};
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(color, rect, c.transform, gl);
}
// DEBUG: cursor target walkable area
if let Some(pointed_room) = pointed_room.current {
let bounds = floor_graph.get_bounds(pointed_room);
let rect = rectangle::rectangle_by_corners(bounds.mins().x, bounds.mins().y, bounds.maxs().x, bounds.maxs().y);
rectangle(POINTED_ROOM_COLOR, rect, c.transform, gl);
}
}
if let Some(nav) = nav_opt
|
// DEBUG: cursor
{
let [cx, cy] = cursor;
let vertical = rectangle::centered([cx, cy, 1.0, 4.0]);
let horizontal = rectangle::centered([cx, cy, 4.0, 1.0]);
rectangle(CURSOR_COLOR, vertical, c.transform, gl);
rectangle(CURSOR_COLOR, horizontal, c.transform, gl);
}
{
let [x, y] = player_pos;
let player = circle(*x, *y, 3.0);
|
{
let start = Some(player_pos.clone());
let lines = start.iter().chain(nav.waypoints().iter().skip(nav.progress)).sliding();
for (from, to) in lines {
line_from_to(PATH_COLOR, 1.0, *from, *to, c.transform, gl);
}
}
|
conditional_block
|
old_main.rs
|
e.update(|args| {
app.update(args.dt);
});
// handle keyboard/button presses
e.press(|button| {
if let Button::Keyboard(key) = button {
if key == Key::Space {
app.generate_requested = true;
}
println!("Typed key: {:?}", key);
}
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = true;
app.nav_requested = true;
}
});
e.release(|button| {
if let Button::Mouse(MouseButton::Left) = button {
app.mouse_pressed = false;
}
});
e.mouse_cursor(|pos| {
if app.set_cursor(pos) {
if app.mouse_pressed {
app.nav_requested = true;
}
}
});
e.mouse_relative(|change| {
// TODO: only do this if the cursor is "captured"
// if app.update_pointer(None, Some(&change)) { app.route_requested = true; }
});
}
}
struct NavController {
current: Option<Nav>,
last_goal: Option<Point>
}
impl NavController {
fn new() -> Self {
NavController {
current: None,
last_goal: None,
}
}
fn forget(&mut self) {
self.current = None;
self.last_goal = None;
}
fn update_nav(&mut self, goal: Point, player_pos: &Point, graph: &DungeonFloorGraph) {
let should_update = match self.last_goal {
Some(g) => !point_eq(&goal, &g),
None => true,
};
if should_update {
self.current = graph.find_route(player_pos, &goal).map(|route| Nav::new(route));
self.last_goal = Some(goal);
}
}
}
struct Nav {
waypoints: Vec<Point>,
progress: usize,
}
impl Nav {
fn new(waypoints: Vec<Point>) -> Self {
Nav { waypoints, progress: 0, }
}
fn waypoints(&self) -> &Vec<Point> {
&self.waypoints
}
fn progress(&self) -> usize {
self.progress
}
fn current_target(&self) -> Option<&Point> {
self.waypoints.get(self.progress)
}
fn is_complete(&self) -> bool {
self.progress >= self.waypoints.len()
}
/// Modify `pos` by moving it `step` units towards the next waypoint, or no-op if navigation is complete.
/// Returns `true` to indicate navigation is complete, or `false` to indicate there is further movement to do.
fn advance_by(&mut self, step: f64, pos: &mut Point) -> bool {
if let Some(&target) = self.current_target() {
let to_target = vec2_sub(target, *pos);
let dist = vec2_len(to_target);
if dist < step {
// `pos` has reached the current target, so we can update the `progress`,
// then recurse to spend the remaining `step` to progress to the next waypoint
*pos = target;
self.progress += 1;
self.advance_by(step - dist, pos)
} else {
// move as far as the player can in the direction of the target; this should end the loop
let movement = vec2_scale(to_target, step / dist);
pos[0] += movement[0];
pos[1] += movement[1];
// Navigation is not yet complete
false
}
} else {
// Navigation is complete
true
}
}
}
struct App {
gl: GlGraphics,
window: GlutinWindow,
world: MyGameWorld,
pcc: PlayerCameraCursor,
mouse_pressed: bool,
generate_requested: bool,
pointed_room: PointedRoom,
nav_requested: bool,
nav: NavController,
}
impl App {
fn new(opengl: OpenGL, window: GlutinWindow) -> Self {
let screen_size = window.size().into();
App {
gl: GlGraphics::new(opengl),
window,
world: MyGameWorld::new(),
pcc: PlayerCameraCursor::new(screen_size),
mouse_pressed: false,
generate_requested: true,
pointed_room: PointedRoom::new(),
nav_requested: false,
nav: NavController::new(),
}
}
fn update(&mut self, dt: f64) {
// if the world needs to regenerate, do it now
if self.generate_requested {
let Size { width, height } = self.window.size();
self.regenerate(width as i32, height as i32);
}
// update the navigation target as long as the mouse is down
if self.mouse_pressed {
if let Some(graph) = &self.world.floor_graph {
self.nav.update_nav(self.pcc.cursor_pos, &self.pcc.player_pos, graph);
}
}
// move the player along the current navigation path
if let Some(nav) = &mut self.nav.current {
self.pcc.modify(|PccState { player_pos, .. }| {
nav.advance_by(200.0 * dt, player_pos);
});
}
// update the player camera/cursor if it was modified since the last update
self.pcc.update();
// re-check the 'pointed room' if the mouse cursor's world position has changed
if let Some(graph) = &self.world.floor_graph {
self.pointed_room.update(self.pcc.cursor_pos, graph);
}
}
fn render(&mut self, args: &RenderArgs) {
use graphics::*;
let world = &self.world;
let pcc = &self.pcc;
let player_pos = &pcc.player_pos;
let cursor = pcc.cursor_pos;
let pointed_room = &self.pointed_room;
let nav_opt = &self.nav.current;
&self.gl.draw(args.viewport(), |_c, gl| {
let c = _c.append_transform(pcc.camera);
clear(BACKGROUND_COLOR, gl);
// PRETTY room tiles + walls + doors
if let Some(dungeon) = world.dungeon() {
let tiles = dungeon.tiles();
let tile_size = world.tile_pixel_size() as f64;
// fill in a square for each room tile in the grid
for addr in tiles.tile_addresses() {
if let Some((_room_id, room_weight)) = tiles[addr] {
let color = {
if room_weight >= 1.0 && room_weight <= 2.0 {
lerp_color(&DEBUG_ROOM_LOW, &DEBUG_ROOM_HIGH, room_weight - 1.0)
} else if room_weight >= 1.0 {
WHITE
} else {
lerp_color(&WEIGHT_ROOM_LOW, &WEIGHT_ROOM_HIGH, room_weight)
}
};
let x = addr.x as f64 * tile_size;
let y = addr.y as f64 * tile_size;
let rect = [x, y, tile_size, tile_size];
rectangle(color, rect, c.transform, gl);
}
}
// draw an appropriate line(s) for each wall in the dungeon
for (wall_addr, wall_type) in dungeon.walls().iter() {
match *wall_type {
WallType::Clear => (),
WallType::Wall => {
let TileAddress { x, y } = wall_addr.tile();
let (base_to, base_from) = match wall_addr.direction() {
CompassDirection::North => ((0, 1), (1, 1)),
CompassDirection::East => ((1, 1), (1, 0)),
CompassDirection::South => ((0, 0), (1, 0)),
CompassDirection::West => ((0, 0), (0, 1)),
};
let to_px = |(dx, dy)| {
[(dx + x) as f64 * tile_size, (dy + y) as f64 * tile_size]
};
line_from_to(DEBUG_WALL_COLOR, 0.5, to_px(base_from), to_px(base_to), c.transform, gl);
}
WallType::Door => {
let TileAddress { x, y } = wall_addr.tile();
match wall_addr.direction() {
CompassDirection::North => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64 + 1.0),
CompassDirection::East => draw_vertical_door(&c, gl, tile_size, (x + 1) as f64, y as f64),
CompassDirection::South => draw_horizontal_door(&c, gl, tile_size, x as f64, y as f64),
CompassDirection::West => draw_vertical_door(&c, gl, tile_size, x as f64, y as f64),
}
}
}
}
}
// NAVIGATION-related debug
if let Some(floor_graph) = &world.floor_graph {
// DEBUG: walkable areas
for node in floor_graph.nodes().iter() {
let bounds = &floor_graph.get_bounds(*node.id());
let color = match node {
FloorNode::Room { .. } => WALKABLE_ROOM_COLOR,
FloorNode::Door { .. } => WALKABLE_DOOR_COLOR,
};
let rect = rectangle::rectangle_by_corners(bounds.mins().x,
|
app.render(args);
});
|
random_line_split
|
|
environ_config.py
|
"
DBND_RUN_UID = "DBND_RUN_UID"
DBND_RESUBMIT_RUN = "DBND_RESUBMIT_RUN"
DBND_TASK_RUN_ATTEMPT_UID = "DBND_TASK_RUN_ATTEMPT_UID"
DBND_TRACE_ID = "DBND_TRACE_ID"
DBND_MAX_CALLS_PER_RUN = "DBND_MAX_CALL_PER_FUNC"
ENV_DBND_DISABLE_SCHEDULED_DAGS_LOAD = "DBND_DISABLE_SCHEDULED_DAGS_LOAD"
ENV_DBND__ENV_MACHINE = "DBND__ENV_MACHINE"
ENV_DBND__ENV_IMAGE = "DBND__ENV_IMAGE"
ENV_DBND__CORE__PLUGINS = "DBND__CORE__PLUGINS"
ENV_SHELL_COMPLETION = "_DBND_COMPLETE"
ENV_DBND_FIX_PYSPARK_IMPORTS = "DBND__FIX_PYSPARK_IMPORTS"
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING = "DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING"
ENV_DBND__ENABLE__SPARK_CONTEXT_ENV = "DBND__ENABLE__SPARK_CONTEXT_ENV"
ENV_DBND__AUTO_TRACKING = "DBND__AUTO_TRACKING"
DEFAULT_MAX_CALLS_PER_RUN = 100
ENV_DBND_TRACKING_ATTEMPT_UID = "DBND__TRACKING_ATTEMPT_UID"
ENV_DBND_SCRIPT_NAME = "DBND__SCRIPT_NAME"
_databand_package = relative_path(__file__, "..", "..")
def is_databand_enabled():
return not get_dbnd_project_config().disabled
def disable_databand():
get_dbnd_project_config().disabled = True
def set_dbnd_unit_test_mode():
set_on(ENV_DBND__UNITTEST_MODE) # bypass to subprocess
get_dbnd_project_config().unit_test_mode = True
def get_max_calls_per_func():
return get_dbnd_project_config().max_calls_per_run
# User setup configs
def get_dbnd_environ_config_file():
return os.environ.get(ENV_DBND_CONFIG, None)
def get_user_preinit():
return os.environ.get(ENV_DBND__USER_PRE_INIT, None)
def in_quiet_mode():
"""
quiet mode was made for the scheduler to silence the launcher runners.
Don't want this flag to propagate into the actual scheduled cmd
"""
return get_dbnd_project_config().quiet_mode
def in_tracking_mode():
return get_dbnd_project_config().is_tracking_mode()
def in_airflow_tracking_mode():
return get_dbnd_project_config().is_in_airflow_tracking_mode()
def is_unit_test_mode():
return get_dbnd_project_config().unit_test_mode
def spark_tracking_enabled():
return environ_enabled(ENV_DBND__ENABLE__SPARK_CONTEXT_ENV)
def should_fix_pyspark_imports():
return environ_enabled(ENV_DBND_FIX_PYSPARK_IMPORTS)
_project_config = None # type: Optional[DbndProjectConfig]
def get_dbnd_project_config():
global _project_config
if not _project_config:
# initialize dbnd home first
_project_config = DbndProjectConfig()
_initialize_dbnd_home()
return _project_config
def get_dbnd_custom_config():
try:
import dbnd_custom_config
return dbnd_custom_config.get_config_file_path()
except Exception:
return ""
def reset_dbnd_project_config():
global _project_config
_project_config = None
@contextmanager
def tracking_mode_context(tracking=None):
"""
change the tracking mode for the scope of the `with`
"""
is_current_tracking = get_dbnd_project_config()._dbnd_tracking
get_dbnd_project_config()._dbnd_tracking = tracking
try:
yield
finally:
get_dbnd_project_config()._dbnd_tracking = is_current_tracking
def try_get_script_name():
# type: () -> Optional[str]
return os.environ.get(ENV_DBND_SCRIPT_NAME)
class DbndProjectConfig(object):
"""
very basic environment config!
"""
def __init__(self):
# IF FALSE - we will not modify decorated @task code
self._disabled = environ_enabled(ENV_DBND__DISABLED, False)
self.unit_test_mode = environ_enabled(ENV_DBND__UNITTEST_MODE)
self.max_calls_per_run = environ_int(
DBND_MAX_CALLS_PER_RUN, DEFAULT_MAX_CALLS_PER_RUN
)
self.shell_cmd_complete_mode = ENV_SHELL_COMPLETION in os.environ
self.quiet_mode = (
os.environ.pop(ENV_DBND_QUIET, None) is not None
or self.shell_cmd_complete_mode
)
# external process can create "wrapper run" (airflow scheduler)
# a run with partial information,
# when we have a subprocess, only nested run will have all actual details
# so we are going to "resubmit" them
self.resubmit_run = (
DBND_RESUBMIT_RUN in os.environ
and os.environ.pop(DBND_RESUBMIT_RUN) == "true"
)
self.is_no_modules = environ_enabled(ENV_DBND__NO_MODULES)
self.is_no_plugins = environ_enabled(ENV_DBND__NO_PLUGINS)
self.disable_pluggy_entrypoint_loading = environ_enabled(
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING
)
self.is_sigquit_handler_on = environ_enabled(ENV_DBND__SHOW_STACK_ON_SIGQUIT)
self._dbnd_tracking = environ_enabled(ENV_DBND__TRACKING, default=None)
self._airflow_context = False
self._inline_tracking = None
self.disable_inline = False
self.airflow_auto_tracking = environ_enabled(
ENV_DBND__AUTO_TRACKING, default=True
)
self._is_airflow_runtime = None
@property
def disabled(self):
return self._disabled
@disabled.setter
def disabled(self, value):
set_on(ENV_DBND__DISABLED)
self._disabled = value
def airflow_context(self):
if not self._airflow_context:
from dbnd._core.tracking.airflow_dag_inplace_tracking import (
try_get_airflow_context,
)
self._airflow_context = try_get_airflow_context()
return self._airflow_context
def is_tracking_mode(self):
if self.disabled:
return False
if self._dbnd_tracking is None:
return self.is_in_airflow_tracking_mode()
return self._dbnd_tracking
def is_in_airflow_tracking_mode(self):
if self._is_airflow_runtime is None:
self._is_airflow_runtime = bool(self.airflow_context())
return self._is_airflow_runtime
def is_verbose(self):
return dbnd_log.is_verbose()
def dbnd_home(self):
r
|
def dbnd_lib_path(self, *path):
return abs_join(_databand_package, *path)
def dbnd_config_path(self, *path):
return self.dbnd_lib_path("conf", *path)
def dbnd_system_path(self, *path):
dbnd_system = os.environ.get(ENV_DBND_SYSTEM) or self.dbnd_home()
return abs_join(dbnd_system, *path)
def dbnd_project_path(self, *path):
return abs_join(self.dbnd_home(), *path)
def validate_init(self):
dbnd_log_init_msg("Successfully created dbnd project config")
def set_is_airflow_runtime(self):
self._is_airflow_runtime = True
class DatabandHomeError(Exception):
pass
def _find_project_by_import():
"""
check if we can have project marker file by import it
"""
try:
import _databand_project
return abs_join(_databand_project.__file__, "..")
except ImportError:
dbnd_log_init_msg("Can't import `_databand_project` marker.")
return None
def _process_cfg(folder):
# dbnd home is being pointed inside [databand] in 'config' files
found_dbnd_home = False
config_file = None
config_files = ["tox.ini", "setup.cfg"]
for file in config_files:
config_path = os.path.join(folder, file)
try:
parser = ConfigParser()
parser.read(config_path)
config_root, config_name = os.path.split(config_path)
source = os.path.basename(config_path)
if not parser.has_section("databand"):
continue
for config_key in ["dbnd_home", "dbnd_system", "dbnd_config"]:
# TODO: hidden magic, do we need these setters?
if not parser.has_option("databand", config_key):
continue
config_value = parser.get("databand", config_key)
config_value = os.path.abspath(os.path.join(config_root, config_value))
set_env_dir(config_key, config_value)
dbnd_log_init_msg("%s: %s=%s" % (source, config_key, config_value))
except Exception as ex:
print("Failed to process %s: %s" % (config_path, ex))
return found_dbnd_home, config_file
def _has_marker_file(folder):
# dbnd home is where 'marker' files are located in
for file in _MARKER_FILES:
file_path = os.path.join(folder, file)
if os.path.exists(file_path):
return folder, file_path
return False, None
def __find_dbnd_home_at(folder):
dbnd_home, config_file =
|
eturn os.environ.get(ENV_DBND_HOME) or os.curdir
|
identifier_body
|
environ_config.py
|
"
DBND_RUN_UID = "DBND_RUN_UID"
DBND_RESUBMIT_RUN = "DBND_RESUBMIT_RUN"
DBND_TASK_RUN_ATTEMPT_UID = "DBND_TASK_RUN_ATTEMPT_UID"
DBND_TRACE_ID = "DBND_TRACE_ID"
DBND_MAX_CALLS_PER_RUN = "DBND_MAX_CALL_PER_FUNC"
ENV_DBND_DISABLE_SCHEDULED_DAGS_LOAD = "DBND_DISABLE_SCHEDULED_DAGS_LOAD"
ENV_DBND__ENV_MACHINE = "DBND__ENV_MACHINE"
ENV_DBND__ENV_IMAGE = "DBND__ENV_IMAGE"
ENV_DBND__CORE__PLUGINS = "DBND__CORE__PLUGINS"
ENV_SHELL_COMPLETION = "_DBND_COMPLETE"
ENV_DBND_FIX_PYSPARK_IMPORTS = "DBND__FIX_PYSPARK_IMPORTS"
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING = "DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING"
ENV_DBND__ENABLE__SPARK_CONTEXT_ENV = "DBND__ENABLE__SPARK_CONTEXT_ENV"
ENV_DBND__AUTO_TRACKING = "DBND__AUTO_TRACKING"
DEFAULT_MAX_CALLS_PER_RUN = 100
ENV_DBND_TRACKING_ATTEMPT_UID = "DBND__TRACKING_ATTEMPT_UID"
ENV_DBND_SCRIPT_NAME = "DBND__SCRIPT_NAME"
_databand_package = relative_path(__file__, "..", "..")
def is_databand_enabled():
return not get_dbnd_project_config().disabled
def disable_databand():
get_dbnd_project_config().disabled = True
def set_dbnd_unit_test_mode():
set_on(ENV_DBND__UNITTEST_MODE) # bypass to subprocess
get_dbnd_project_config().unit_test_mode = True
def get_max_calls_per_func():
return get_dbnd_project_config().max_calls_per_run
# User setup configs
def get_dbnd_environ_config_file():
return os.environ.get(ENV_DBND_CONFIG, None)
def get_user_preinit():
return os.environ.get(ENV_DBND__USER_PRE_INIT, None)
def in_quiet_mode():
"""
quiet mode was made for the scheduler to silence the launcher runners.
Don't want this flag to propagate into the actual scheduled cmd
"""
return get_dbnd_project_config().quiet_mode
def in_tracking_mode():
return get_dbnd_project_config().is_tracking_mode()
def in_airflow_tracking_mode():
return get_dbnd_project_config().is_in_airflow_tracking_mode()
def is_unit_test_mode():
return get_dbnd_project_config().unit_test_mode
def spark_tracking_enabled():
return environ_enabled(ENV_DBND__ENABLE__SPARK_CONTEXT_ENV)
def should_fix_pyspark_imports():
return environ_enabled(ENV_DBND_FIX_PYSPARK_IMPORTS)
_project_config = None # type: Optional[DbndProjectConfig]
def get_dbnd_project_config():
global _project_config
if not _project_config:
# initialize dbnd home first
_project_config = DbndProjectConfig()
_initialize_dbnd_home()
return _project_config
def get_dbnd_custom_config():
try:
import dbnd_custom_config
return dbnd_custom_config.get_config_file_path()
except Exception:
return ""
def reset_dbnd_project_config():
global _project_config
_project_config = None
@contextmanager
def tracking_mode_context(tracking=None):
"""
change the tracking mode for the scope of the `with`
"""
is_current_tracking = get_dbnd_project_config()._dbnd_tracking
get_dbnd_project_config()._dbnd_tracking = tracking
try:
yield
finally:
get_dbnd_project_config()._dbnd_tracking = is_current_tracking
def try_get_script_name():
# type: () -> Optional[str]
return os.environ.get(ENV_DBND_SCRIPT_NAME)
class DbndProjectConfig(object):
"""
very basic environment config!
"""
def __init__(self):
# IF FALSE - we will not modify decorated @task code
self._disabled = environ_enabled(ENV_DBND__DISABLED, False)
self.unit_test_mode = environ_enabled(ENV_DBND__UNITTEST_MODE)
self.max_calls_per_run = environ_int(
DBND_MAX_CALLS_PER_RUN, DEFAULT_MAX_CALLS_PER_RUN
)
self.shell_cmd_complete_mode = ENV_SHELL_COMPLETION in os.environ
self.quiet_mode = (
os.environ.pop(ENV_DBND_QUIET, None) is not None
or self.shell_cmd_complete_mode
)
# external process can create "wrapper run" (airflow scheduler)
# a run with partial information,
# when we have a subprocess, only nested run will have all actual details
# so we are going to "resubmit" them
self.resubmit_run = (
DBND_RESUBMIT_RUN in os.environ
and os.environ.pop(DBND_RESUBMIT_RUN) == "true"
)
self.is_no_modules = environ_enabled(ENV_DBND__NO_MODULES)
self.is_no_plugins = environ_enabled(ENV_DBND__NO_PLUGINS)
self.disable_pluggy_entrypoint_loading = environ_enabled(
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING
)
self.is_sigquit_handler_on = environ_enabled(ENV_DBND__SHOW_STACK_ON_SIGQUIT)
self._dbnd_tracking = environ_enabled(ENV_DBND__TRACKING, default=None)
self._airflow_context = False
self._inline_tracking = None
self.disable_inline = False
self.airflow_auto_tracking = environ_enabled(
ENV_DBND__AUTO_TRACKING, default=True
)
self._is_airflow_runtime = None
@property
def disabled(self):
return self._disabled
@disabled.setter
def disabled(self, value):
set_on(ENV_DBND__DISABLED)
self._disabled = value
def airflow_context(self):
if not self._airflow_context:
from dbnd._core.tracking.airflow_dag_inplace_tracking import (
try_get_airflow_context,
)
self._airflow_context = try_get_airflow_context()
return self._airflow_context
def is_tracking_mode(self):
if self.disabled:
return False
if self._dbnd_tracking is None:
return self.is_in_airflow_tracking_mode()
return self._dbnd_tracking
def is_in_airflow_tracking_mode(self):
if self._is_airflow_runtime is None:
self._is_airflow_runtime = bool(self.airflow_context())
return self._is_airflow_runtime
def is_verbose(self):
return dbnd_log.is_verbose()
def dbnd_home(self):
return os.environ.get(ENV_DBND_HOME) or os.curdir
def dbnd_lib_path(self, *path):
return abs_join(_databand_package, *path)
def dbnd_config_path(self, *path):
return self.dbnd_lib_path("conf", *path)
def dbnd_system_path(self, *path):
dbnd_system = os.environ.get(ENV_DBND_SYSTEM) or self.dbnd_home()
return abs_join(dbnd_system, *path)
def dbnd_project_path(self, *path):
return abs_join(self.dbnd_home(), *path)
def validate_init(self):
dbnd_log_init_msg("Successfully created dbnd project config")
def set_is_airflow_runtime(self):
self._is_airflow_runtime = True
class DatabandHomeError(Exception):
pass
def _find_project_by_import():
"""
check if we can have project marker file by import it
"""
try:
import _databand_project
return abs_join(_databand_project.__file__, "..")
except ImportError:
dbnd_log_init_msg("Can't import `_databand_project` marker.")
return None
def _process_cfg(folder):
# dbnd home is being pointed inside [databand] in 'config' files
found_dbnd_home = False
config_file = None
config_files = ["tox.ini", "setup.cfg"]
for file in config_files:
config_path = os.path.join(folder, file)
try:
parser = ConfigParser()
parser.read(config_path)
config_root, config_name = os.path.split(config_path)
source = os.path.basename(config_path)
if not parser.has_section("databand"):
continue
for config_key in ["dbnd_home", "dbnd_system", "dbnd_config"]:
# TODO: hidden magic, do we need these setters?
if not parser.has_option("databand", config_key):
continue
config_value = parser.get("databand", config_key)
config_value = os.path.abspath(os.path.join(config_root, config_value))
set_env_dir(config_key, config_value)
dbnd_log_init_msg("%s: %s=%s" % (source, config_key, config_value))
except Exception as ex:
print("Failed to process %s: %s" % (config_path, ex))
return found_dbnd_home, config_file
def _
|
folder):
# dbnd home is where 'marker' files are located in
for file in _MARKER_FILES:
file_path = os.path.join(folder, file)
if os.path.exists(file_path):
return folder, file_path
return False, None
def __find_dbnd_home_at(folder):
dbnd_home, config_file = _
|
has_marker_file(
|
identifier_name
|
environ_config.py
|
"
DBND_RUN_UID = "DBND_RUN_UID"
DBND_RESUBMIT_RUN = "DBND_RESUBMIT_RUN"
DBND_TASK_RUN_ATTEMPT_UID = "DBND_TASK_RUN_ATTEMPT_UID"
DBND_TRACE_ID = "DBND_TRACE_ID"
DBND_MAX_CALLS_PER_RUN = "DBND_MAX_CALL_PER_FUNC"
ENV_DBND_DISABLE_SCHEDULED_DAGS_LOAD = "DBND_DISABLE_SCHEDULED_DAGS_LOAD"
ENV_DBND__ENV_MACHINE = "DBND__ENV_MACHINE"
ENV_DBND__ENV_IMAGE = "DBND__ENV_IMAGE"
ENV_DBND__CORE__PLUGINS = "DBND__CORE__PLUGINS"
ENV_SHELL_COMPLETION = "_DBND_COMPLETE"
ENV_DBND_FIX_PYSPARK_IMPORTS = "DBND__FIX_PYSPARK_IMPORTS"
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING = "DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING"
ENV_DBND__ENABLE__SPARK_CONTEXT_ENV = "DBND__ENABLE__SPARK_CONTEXT_ENV"
ENV_DBND__AUTO_TRACKING = "DBND__AUTO_TRACKING"
DEFAULT_MAX_CALLS_PER_RUN = 100
ENV_DBND_TRACKING_ATTEMPT_UID = "DBND__TRACKING_ATTEMPT_UID"
ENV_DBND_SCRIPT_NAME = "DBND__SCRIPT_NAME"
_databand_package = relative_path(__file__, "..", "..")
def is_databand_enabled():
return not get_dbnd_project_config().disabled
def disable_databand():
get_dbnd_project_config().disabled = True
def set_dbnd_unit_test_mode():
set_on(ENV_DBND__UNITTEST_MODE) # bypass to subprocess
get_dbnd_project_config().unit_test_mode = True
def get_max_calls_per_func():
return get_dbnd_project_config().max_calls_per_run
# User setup configs
def get_dbnd_environ_config_file():
return os.environ.get(ENV_DBND_CONFIG, None)
def get_user_preinit():
return os.environ.get(ENV_DBND__USER_PRE_INIT, None)
def in_quiet_mode():
"""
quiet mode was made for the scheduler to silence the launcher runners.
Don't want this flag to propagate into the actual scheduled cmd
"""
return get_dbnd_project_config().quiet_mode
def in_tracking_mode():
return get_dbnd_project_config().is_tracking_mode()
def in_airflow_tracking_mode():
return get_dbnd_project_config().is_in_airflow_tracking_mode()
def is_unit_test_mode():
return get_dbnd_project_config().unit_test_mode
def spark_tracking_enabled():
return environ_enabled(ENV_DBND__ENABLE__SPARK_CONTEXT_ENV)
def should_fix_pyspark_imports():
return environ_enabled(ENV_DBND_FIX_PYSPARK_IMPORTS)
_project_config = None # type: Optional[DbndProjectConfig]
def get_dbnd_project_config():
global _project_config
if not _project_config:
# initialize dbnd home first
_project_config = DbndProjectConfig()
_initialize_dbnd_home()
return _project_config
def get_dbnd_custom_config():
try:
import dbnd_custom_config
return dbnd_custom_config.get_config_file_path()
except Exception:
return ""
def reset_dbnd_project_config():
global _project_config
_project_config = None
@contextmanager
def tracking_mode_context(tracking=None):
"""
change the tracking mode for the scope of the `with`
"""
is_current_tracking = get_dbnd_project_config()._dbnd_tracking
get_dbnd_project_config()._dbnd_tracking = tracking
try:
yield
finally:
get_dbnd_project_config()._dbnd_tracking = is_current_tracking
def try_get_script_name():
# type: () -> Optional[str]
return os.environ.get(ENV_DBND_SCRIPT_NAME)
class DbndProjectConfig(object):
"""
very basic environment config!
"""
def __init__(self):
# IF FALSE - we will not modify decorated @task code
self._disabled = environ_enabled(ENV_DBND__DISABLED, False)
self.unit_test_mode = environ_enabled(ENV_DBND__UNITTEST_MODE)
self.max_calls_per_run = environ_int(
DBND_MAX_CALLS_PER_RUN, DEFAULT_MAX_CALLS_PER_RUN
)
self.shell_cmd_complete_mode = ENV_SHELL_COMPLETION in os.environ
self.quiet_mode = (
os.environ.pop(ENV_DBND_QUIET, None) is not None
or self.shell_cmd_complete_mode
)
# external process can create "wrapper run" (airflow scheduler)
# a run with partial information,
# when we have a subprocess, only nested run will have all actual details
# so we are going to "resubmit" them
self.resubmit_run = (
DBND_RESUBMIT_RUN in os.environ
and os.environ.pop(DBND_RESUBMIT_RUN) == "true"
)
self.is_no_modules = environ_enabled(ENV_DBND__NO_MODULES)
self.is_no_plugins = environ_enabled(ENV_DBND__NO_PLUGINS)
self.disable_pluggy_entrypoint_loading = environ_enabled(
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING
)
self.is_sigquit_handler_on = environ_enabled(ENV_DBND__SHOW_STACK_ON_SIGQUIT)
self._dbnd_tracking = environ_enabled(ENV_DBND__TRACKING, default=None)
self._airflow_context = False
self._inline_tracking = None
self.disable_inline = False
self.airflow_auto_tracking = environ_enabled(
ENV_DBND__AUTO_TRACKING, default=True
)
self._is_airflow_runtime = None
@property
def disabled(self):
return self._disabled
@disabled.setter
def disabled(self, value):
set_on(ENV_DBND__DISABLED)
self._disabled = value
def airflow_context(self):
if not self._airflow_context:
from dbnd._core.tracking.airflow_dag_inplace_tracking import (
try_get_airflow_context,
)
self._airflow_context = try_get_airflow_context()
return self._airflow_context
def is_tracking_mode(self):
if self.disabled:
return False
if self._dbnd_tracking is None:
return self.is_in_airflow_tracking_mode()
return self._dbnd_tracking
def is_in_airflow_tracking_mode(self):
if self._is_airflow_runtime is None:
self._is_airflow_runtime = bool(self.airflow_context())
return self._is_airflow_runtime
def is_verbose(self):
return dbnd_log.is_verbose()
def dbnd_home(self):
return os.environ.get(ENV_DBND_HOME) or os.curdir
def dbnd_lib_path(self, *path):
return abs_join(_databand_package, *path)
def dbnd_config_path(self, *path):
return self.dbnd_lib_path("conf", *path)
def dbnd_system_path(self, *path):
dbnd_system = os.environ.get(ENV_DBND_SYSTEM) or self.dbnd_home()
return abs_join(dbnd_system, *path)
def dbnd_project_path(self, *path):
return abs_join(self.dbnd_home(), *path)
def validate_init(self):
dbnd_log_init_msg("Successfully created dbnd project config")
def set_is_airflow_runtime(self):
self._is_airflow_runtime = True
class DatabandHomeError(Exception):
pass
def _find_project_by_import():
"""
check if we can have project marker file by import it
"""
try:
import _databand_project
return abs_join(_databand_project.__file__, "..")
except ImportError:
dbnd_log_init_msg("Can't import `_databand_project` marker.")
return None
def _process_cfg(folder):
# dbnd home is being pointed inside [databand] in 'config' files
found_dbnd_home = False
config_file = None
config_files = ["tox.ini", "setup.cfg"]
for file in config_files:
config_path = os.path.join(folder, file)
try:
parser = ConfigParser()
parser.read(config_path)
config_root, config_name = os.path.split(config_path)
source = os.path.basename(config_path)
if not parser.has_section("databand"):
continue
for config_key in ["dbnd_home", "dbnd_system", "dbnd_config"]:
# TODO: hidden magic, do we need these setters?
if not parser.has_option("databand", config_key):
continue
config_value = parser.get("databand", config_key)
config_value = os.path.abspath(os.path.join(config_root, config_value))
set_env_dir(config_key, config_value)
dbnd_log_init_msg("%s: %s=%s" % (source, config_key, config_value))
except Exception as ex:
print("Failed to process %s: %s" % (config_path, ex))
return found_dbnd_home, config_file
def _has_marker_file(folder):
# dbnd home is where 'marker' files are located in
for file in _MARKER_FILES:
file_path = os.path.join(folder, file)
if os.path.exists(file_path):
|
return False, None
def __find_dbnd_home_at(folder):
dbnd_home, config_file = _process
|
return folder, file_path
|
random_line_split
|
environ_config.py
|
"
DBND_RUN_UID = "DBND_RUN_UID"
DBND_RESUBMIT_RUN = "DBND_RESUBMIT_RUN"
DBND_TASK_RUN_ATTEMPT_UID = "DBND_TASK_RUN_ATTEMPT_UID"
DBND_TRACE_ID = "DBND_TRACE_ID"
DBND_MAX_CALLS_PER_RUN = "DBND_MAX_CALL_PER_FUNC"
ENV_DBND_DISABLE_SCHEDULED_DAGS_LOAD = "DBND_DISABLE_SCHEDULED_DAGS_LOAD"
ENV_DBND__ENV_MACHINE = "DBND__ENV_MACHINE"
ENV_DBND__ENV_IMAGE = "DBND__ENV_IMAGE"
ENV_DBND__CORE__PLUGINS = "DBND__CORE__PLUGINS"
ENV_SHELL_COMPLETION = "_DBND_COMPLETE"
ENV_DBND_FIX_PYSPARK_IMPORTS = "DBND__FIX_PYSPARK_IMPORTS"
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING = "DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING"
ENV_DBND__ENABLE__SPARK_CONTEXT_ENV = "DBND__ENABLE__SPARK_CONTEXT_ENV"
ENV_DBND__AUTO_TRACKING = "DBND__AUTO_TRACKING"
DEFAULT_MAX_CALLS_PER_RUN = 100
ENV_DBND_TRACKING_ATTEMPT_UID = "DBND__TRACKING_ATTEMPT_UID"
ENV_DBND_SCRIPT_NAME = "DBND__SCRIPT_NAME"
_databand_package = relative_path(__file__, "..", "..")
def is_databand_enabled():
return not get_dbnd_project_config().disabled
def disable_databand():
get_dbnd_project_config().disabled = True
def set_dbnd_unit_test_mode():
set_on(ENV_DBND__UNITTEST_MODE) # bypass to subprocess
get_dbnd_project_config().unit_test_mode = True
def get_max_calls_per_func():
return get_dbnd_project_config().max_calls_per_run
# User setup configs
def get_dbnd_environ_config_file():
return os.environ.get(ENV_DBND_CONFIG, None)
def get_user_preinit():
return os.environ.get(ENV_DBND__USER_PRE_INIT, None)
def in_quiet_mode():
"""
quiet mode was made for the scheduler to silence the launcher runners.
Don't want this flag to propagate into the actual scheduled cmd
"""
return get_dbnd_project_config().quiet_mode
def in_tracking_mode():
return get_dbnd_project_config().is_tracking_mode()
def in_airflow_tracking_mode():
return get_dbnd_project_config().is_in_airflow_tracking_mode()
def is_unit_test_mode():
return get_dbnd_project_config().unit_test_mode
def spark_tracking_enabled():
return environ_enabled(ENV_DBND__ENABLE__SPARK_CONTEXT_ENV)
def should_fix_pyspark_imports():
return environ_enabled(ENV_DBND_FIX_PYSPARK_IMPORTS)
_project_config = None # type: Optional[DbndProjectConfig]
def get_dbnd_project_config():
global _project_config
if not _project_config:
# initialize dbnd home first
_project_config = DbndProjectConfig()
_initialize_dbnd_home()
return _project_config
def get_dbnd_custom_config():
try:
import dbnd_custom_config
return dbnd_custom_config.get_config_file_path()
except Exception:
return ""
def reset_dbnd_project_config():
global _project_config
_project_config = None
@contextmanager
def tracking_mode_context(tracking=None):
"""
change the tracking mode for the scope of the `with`
"""
is_current_tracking = get_dbnd_project_config()._dbnd_tracking
get_dbnd_project_config()._dbnd_tracking = tracking
try:
yield
finally:
get_dbnd_project_config()._dbnd_tracking = is_current_tracking
def try_get_script_name():
# type: () -> Optional[str]
return os.environ.get(ENV_DBND_SCRIPT_NAME)
class DbndProjectConfig(object):
"""
very basic environment config!
"""
def __init__(self):
# IF FALSE - we will not modify decorated @task code
self._disabled = environ_enabled(ENV_DBND__DISABLED, False)
self.unit_test_mode = environ_enabled(ENV_DBND__UNITTEST_MODE)
self.max_calls_per_run = environ_int(
DBND_MAX_CALLS_PER_RUN, DEFAULT_MAX_CALLS_PER_RUN
)
self.shell_cmd_complete_mode = ENV_SHELL_COMPLETION in os.environ
self.quiet_mode = (
os.environ.pop(ENV_DBND_QUIET, None) is not None
or self.shell_cmd_complete_mode
)
# external process can create "wrapper run" (airflow scheduler)
# a run with partial information,
# when we have a subprocess, only nested run will have all actual details
# so we are going to "resubmit" them
self.resubmit_run = (
DBND_RESUBMIT_RUN in os.environ
and os.environ.pop(DBND_RESUBMIT_RUN) == "true"
)
self.is_no_modules = environ_enabled(ENV_DBND__NO_MODULES)
self.is_no_plugins = environ_enabled(ENV_DBND__NO_PLUGINS)
self.disable_pluggy_entrypoint_loading = environ_enabled(
ENV_DBND__DISABLE_PLUGGY_ENTRYPOINT_LOADING
)
self.is_sigquit_handler_on = environ_enabled(ENV_DBND__SHOW_STACK_ON_SIGQUIT)
self._dbnd_tracking = environ_enabled(ENV_DBND__TRACKING, default=None)
self._airflow_context = False
self._inline_tracking = None
self.disable_inline = False
self.airflow_auto_tracking = environ_enabled(
ENV_DBND__AUTO_TRACKING, default=True
)
self._is_airflow_runtime = None
@property
def disabled(self):
return self._disabled
@disabled.setter
def disabled(self, value):
set_on(ENV_DBND__DISABLED)
self._disabled = value
def airflow_context(self):
if not self._airflow_context:
from dbnd._core.tracking.airflow_dag_inplace_tracking import (
try_get_airflow_context,
)
self._airflow_context = try_get_airflow_context()
return self._airflow_context
def is_tracking_mode(self):
if self.disabled:
return False
if self._dbnd_tracking is None:
return self.is_in_airflow_tracking_mode()
return self._dbnd_tracking
def is_in_airflow_tracking_mode(self):
if self._is_airflow_runtime is None:
self._is_airflow_runtime = bool(self.airflow_context())
return self._is_airflow_runtime
def is_verbose(self):
return dbnd_log.is_verbose()
def dbnd_home(self):
return os.environ.get(ENV_DBND_HOME) or os.curdir
def dbnd_lib_path(self, *path):
return abs_join(_databand_package, *path)
def dbnd_config_path(self, *path):
return self.dbnd_lib_path("conf", *path)
def dbnd_system_path(self, *path):
dbnd_system = os.environ.get(ENV_DBND_SYSTEM) or self.dbnd_home()
return abs_join(dbnd_system, *path)
def dbnd_project_path(self, *path):
return abs_join(self.dbnd_home(), *path)
def validate_init(self):
dbnd_log_init_msg("Successfully created dbnd project config")
def set_is_airflow_runtime(self):
self._is_airflow_runtime = True
class DatabandHomeError(Exception):
pass
def _find_project_by_import():
"""
check if we can have project marker file by import it
"""
try:
import _databand_project
return abs_join(_databand_project.__file__, "..")
except ImportError:
dbnd_log_init_msg("Can't import `_databand_project` marker.")
return None
def _process_cfg(folder):
# dbnd home is being pointed inside [databand] in 'config' files
found_dbnd_home = False
config_file = None
config_files = ["tox.ini", "setup.cfg"]
for file in config_files:
config_path = os.path.join(folder, file)
try:
parser = ConfigParser()
parser.read(config_path)
config_root, config_name = os.path.split(config_path)
source = os.path.basename(config_path)
if not parser.has_section("databand"):
continue
for config_key in ["dbnd_home", "dbnd_system", "dbnd_config"]:
# TODO: hidden magic, do we need these setters?
if not parser.has_option("databand", config_key):
c
|
config_value = parser.get("databand", config_key)
config_value = os.path.abspath(os.path.join(config_root, config_value))
set_env_dir(config_key, config_value)
dbnd_log_init_msg("%s: %s=%s" % (source, config_key, config_value))
except Exception as ex:
print("Failed to process %s: %s" % (config_path, ex))
return found_dbnd_home, config_file
def _has_marker_file(folder):
# dbnd home is where 'marker' files are located in
for file in _MARKER_FILES:
file_path = os.path.join(folder, file)
if os.path.exists(file_path):
return folder, file_path
return False, None
def __find_dbnd_home_at(folder):
dbnd_home, config_file =
|
ontinue
|
conditional_block
|
aarch64.rs
|
,
}
pub type KvmVcpuConfigureError = Error;
impl KvmVcpu {
/// Constructs a new kvm vcpu with arch specific functionality.
///
/// # Arguments
///
/// * `index` - Represents the 0-based CPU index between [0, max vcpus).
/// * `vm` - The vm to which this vcpu will get attached.
pub fn new(index: u8, vm: &Vm) -> Result<Self> {
let kvm_vcpu = vm.fd().create_vcpu(index.into()).map_err(Error::CreateFd)?;
Ok(KvmVcpu {
index,
fd: kvm_vcpu,
mmio_bus: None,
mpidr: 0,
})
}
/// Gets the MPIDR register value.
pub fn get_mpidr(&self) -> u64 {
self.mpidr
}
/// Configures an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `guest_mem` - The guest memory used by this microvm.
/// * `kernel_load_addr` - Offset from `guest_mem` at which the kernel is loaded.
pub fn configure(
&mut self,
guest_mem: &GuestMemoryMmap,
kernel_load_addr: GuestAddress,
) -> std::result::Result<(), KvmVcpuConfigureError> {
arch::aarch64::regs::setup_boot_regs(
&self.fd,
self.index,
kernel_load_addr.raw_value(),
guest_mem,
)
.map_err(Error::ConfigureRegisters)?;
self.mpidr =
arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::ConfigureRegisters)?;
Ok(())
}
/// Initializes an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `vm_fd` - The kvm `VmFd` for this microvm.
pub fn init(&self, vm_fd: &VmFd) -> Result<()> {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
// This reads back the kernel's preferred target type.
vm_fd
.get_preferred_target(&mut kvi)
.map_err(Error::GetPreferredTarget)?;
// We already checked that the capability is supported.
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_PSCI_0_2;
// Non-boot cpus are powered off initially.
if self.index > 0 {
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_POWER_OFF;
}
self.fd.vcpu_init(&kvi).map_err(Error::Init)
}
/// Save the KVM internal state.
pub fn save_state(&self) -> Result<VcpuState> {
let mut state = VcpuState {
mp_state: arch::regs::get_mpstate(&self.fd).map_err(Error::SaveState)?,
..Default::default()
};
arch::regs::save_core_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
arch::regs::save_system_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
state.mpidr = arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::SaveState)?;
Ok(state)
}
/// Use provided state to populate KVM internal state.
pub fn restore_state(&self, state: &VcpuState) -> Result<()> {
arch::regs::restore_registers(&self.fd, &state.regs).map_err(Error::RestoreState)?;
arch::regs::set_mpstate(&self.fd, state.mp_state).map_err(Error::RestoreState)?;
Ok(())
}
/// Runs the vCPU in KVM context and handles the kvm exit reason.
///
/// Returns error or enum specifying whether emulation was handled or interrupted.
pub fn run_arch_emulation(&self, exit: VcpuExit) -> super::Result<VcpuEmulation> {
METRICS.vcpu.failures.inc();
// TODO: Are we sure we want to finish running a vcpu upon
// receiving a vm exit that is not necessarily an error?
error!("Unexpected exit reason on vcpu run: {:?}", exit);
Err(super::Error::UnhandledKvmExit(format!("{:?}", exit)))
}
}
/// Structure holding VCPU kvm state.
#[derive(Clone, Default, Versionize)]
pub struct VcpuState {
pub mp_state: kvm_bindings::kvm_mp_state,
pub regs: Vec<Aarch64Register>,
// We will be using the mpidr for passing it to the VmState.
// The VmState will give this away for saving restoring the icc and redistributor
// registers.
pub mpidr: u64,
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use vm_memory::GuestMemoryMmap;
use super::*;
use crate::vstate::vm::tests::setup_vm;
use crate::vstate::vm::Vm;
fn setup_vcpu(mem_size: usize) -> (Vm, KvmVcpu, GuestMemoryMmap) {
let (mut vm, vm_mem) = setup_vm(mem_size);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vcpu.init(vm.fd()).unwrap();
vm.setup_irqchip(1).unwrap();
(vm, vcpu, vm_mem)
}
fn init_vcpu(vcpu: &VcpuFd, vm: &VmFd) {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
vcpu.vcpu_init(&kvi).unwrap();
}
#[test]
fn test_create_vcpu() {
let (vm, _) = setup_vm(0x1000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = KvmVcpu::new(0, &vm);
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error in opening the VCPU file descriptor: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_configure_vcpu() {
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
assert!(vcpu
.configure(&vm_mem, GuestAddress(arch::get_kernel_start()),)
.is_ok());
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_faulty_init_vcpu() {
let (vm, vcpu, _) = setup_vcpu(0x10000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = vcpu.init(vm.fd());
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error retrieving the vcpu preferred target: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_vcpu_save_restore_state() {
let (mut vm, _vm_mem) = setup_vm(0x1000);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vm.setup_irqchip(1).unwrap();
// Calling KVM_GET_REGLIST before KVM_VCPU_INIT will result in error.
let res = vcpu.save_state();
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(),
"Failed to save the state of the vcpu: Failed to get X0 register: Exec format error \
(os error 8)"
.to_string()
);
// Try to restore the register using a faulty state.
let faulty_vcpu_state = VcpuState {
regs: vec![Aarch64Register { id: 0, value: 0 }],
..Default::default()
};
let res = vcpu.restore_state(&faulty_vcpu_state);
assert!(res.is_err());
assert_eq!(
res.err().unwrap().to_string(),
|
"Failed to restore the state of the vcpu: Failed to set register: Exec format error \
|
random_line_split
|
|
aarch64.rs
|
Arm.
GetPreferredTarget(kvm_ioctls::Error),
/// Error doing Vcpu Init on Arm.
Init(kvm_ioctls::Error),
/// Failed to set value for some arm specific register.
RestoreState(arch::aarch64::regs::Error),
/// Failed to fetch value for some arm specific register.
SaveState(arch::aarch64::regs::Error),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
use self::Error::*;
match self {
ConfigureRegisters(err) => {
write!(
f,
"Error configuring the general purpose registers: {}",
err
)
}
CreateFd(err) => write!(f, "Error in opening the VCPU file descriptor: {}", err),
GetPreferredTarget(err) => {
write!(f, "Error retrieving the vcpu preferred target: {}", err)
}
Init(err) => write!(f, "Error initializing the vcpu: {}", err),
RestoreState(err) => write!(f, "Failed to restore the state of the vcpu: {}", err),
SaveState(err) => write!(f, "Failed to save the state of the vcpu: {}", err),
}
}
}
type Result<T> = result::Result<T, Error>;
/// A wrapper around creating and using a kvm aarch64 vcpu.
pub struct KvmVcpu {
pub index: u8,
pub fd: VcpuFd,
pub mmio_bus: Option<devices::Bus>,
mpidr: u64,
}
pub type KvmVcpuConfigureError = Error;
impl KvmVcpu {
/// Constructs a new kvm vcpu with arch specific functionality.
///
/// # Arguments
///
/// * `index` - Represents the 0-based CPU index between [0, max vcpus).
/// * `vm` - The vm to which this vcpu will get attached.
pub fn
|
(index: u8, vm: &Vm) -> Result<Self> {
let kvm_vcpu = vm.fd().create_vcpu(index.into()).map_err(Error::CreateFd)?;
Ok(KvmVcpu {
index,
fd: kvm_vcpu,
mmio_bus: None,
mpidr: 0,
})
}
/// Gets the MPIDR register value.
pub fn get_mpidr(&self) -> u64 {
self.mpidr
}
/// Configures an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `guest_mem` - The guest memory used by this microvm.
/// * `kernel_load_addr` - Offset from `guest_mem` at which the kernel is loaded.
pub fn configure(
&mut self,
guest_mem: &GuestMemoryMmap,
kernel_load_addr: GuestAddress,
) -> std::result::Result<(), KvmVcpuConfigureError> {
arch::aarch64::regs::setup_boot_regs(
&self.fd,
self.index,
kernel_load_addr.raw_value(),
guest_mem,
)
.map_err(Error::ConfigureRegisters)?;
self.mpidr =
arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::ConfigureRegisters)?;
Ok(())
}
/// Initializes an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `vm_fd` - The kvm `VmFd` for this microvm.
pub fn init(&self, vm_fd: &VmFd) -> Result<()> {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
// This reads back the kernel's preferred target type.
vm_fd
.get_preferred_target(&mut kvi)
.map_err(Error::GetPreferredTarget)?;
// We already checked that the capability is supported.
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_PSCI_0_2;
// Non-boot cpus are powered off initially.
if self.index > 0 {
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_POWER_OFF;
}
self.fd.vcpu_init(&kvi).map_err(Error::Init)
}
/// Save the KVM internal state.
pub fn save_state(&self) -> Result<VcpuState> {
let mut state = VcpuState {
mp_state: arch::regs::get_mpstate(&self.fd).map_err(Error::SaveState)?,
..Default::default()
};
arch::regs::save_core_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
arch::regs::save_system_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
state.mpidr = arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::SaveState)?;
Ok(state)
}
/// Use provided state to populate KVM internal state.
pub fn restore_state(&self, state: &VcpuState) -> Result<()> {
arch::regs::restore_registers(&self.fd, &state.regs).map_err(Error::RestoreState)?;
arch::regs::set_mpstate(&self.fd, state.mp_state).map_err(Error::RestoreState)?;
Ok(())
}
/// Runs the vCPU in KVM context and handles the kvm exit reason.
///
/// Returns error or enum specifying whether emulation was handled or interrupted.
pub fn run_arch_emulation(&self, exit: VcpuExit) -> super::Result<VcpuEmulation> {
METRICS.vcpu.failures.inc();
// TODO: Are we sure we want to finish running a vcpu upon
// receiving a vm exit that is not necessarily an error?
error!("Unexpected exit reason on vcpu run: {:?}", exit);
Err(super::Error::UnhandledKvmExit(format!("{:?}", exit)))
}
}
/// Structure holding VCPU kvm state.
#[derive(Clone, Default, Versionize)]
pub struct VcpuState {
pub mp_state: kvm_bindings::kvm_mp_state,
pub regs: Vec<Aarch64Register>,
// We will be using the mpidr for passing it to the VmState.
// The VmState will give this away for saving restoring the icc and redistributor
// registers.
pub mpidr: u64,
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use vm_memory::GuestMemoryMmap;
use super::*;
use crate::vstate::vm::tests::setup_vm;
use crate::vstate::vm::Vm;
fn setup_vcpu(mem_size: usize) -> (Vm, KvmVcpu, GuestMemoryMmap) {
let (mut vm, vm_mem) = setup_vm(mem_size);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vcpu.init(vm.fd()).unwrap();
vm.setup_irqchip(1).unwrap();
(vm, vcpu, vm_mem)
}
fn init_vcpu(vcpu: &VcpuFd, vm: &VmFd) {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
vcpu.vcpu_init(&kvi).unwrap();
}
#[test]
fn test_create_vcpu() {
let (vm, _) = setup_vm(0x1000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = KvmVcpu::new(0, &vm);
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error in opening the VCPU file descriptor: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_configure_vcpu() {
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
assert!(vcpu
.configure(&vm_mem, GuestAddress(arch::get_kernel_start()),)
.is_ok());
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_faulty_init_vcpu() {
let (vm
|
new
|
identifier_name
|
aarch64.rs
|
Arm.
GetPreferredTarget(kvm_ioctls::Error),
/// Error doing Vcpu Init on Arm.
Init(kvm_ioctls::Error),
/// Failed to set value for some arm specific register.
RestoreState(arch::aarch64::regs::Error),
/// Failed to fetch value for some arm specific register.
SaveState(arch::aarch64::regs::Error),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
use self::Error::*;
match self {
ConfigureRegisters(err) => {
write!(
f,
"Error configuring the general purpose registers: {}",
err
)
}
CreateFd(err) => write!(f, "Error in opening the VCPU file descriptor: {}", err),
GetPreferredTarget(err) => {
write!(f, "Error retrieving the vcpu preferred target: {}", err)
}
Init(err) => write!(f, "Error initializing the vcpu: {}", err),
RestoreState(err) => write!(f, "Failed to restore the state of the vcpu: {}", err),
SaveState(err) => write!(f, "Failed to save the state of the vcpu: {}", err),
}
}
}
type Result<T> = result::Result<T, Error>;
/// A wrapper around creating and using a kvm aarch64 vcpu.
pub struct KvmVcpu {
pub index: u8,
pub fd: VcpuFd,
pub mmio_bus: Option<devices::Bus>,
mpidr: u64,
}
pub type KvmVcpuConfigureError = Error;
impl KvmVcpu {
/// Constructs a new kvm vcpu with arch specific functionality.
///
/// # Arguments
///
/// * `index` - Represents the 0-based CPU index between [0, max vcpus).
/// * `vm` - The vm to which this vcpu will get attached.
pub fn new(index: u8, vm: &Vm) -> Result<Self> {
let kvm_vcpu = vm.fd().create_vcpu(index.into()).map_err(Error::CreateFd)?;
Ok(KvmVcpu {
index,
fd: kvm_vcpu,
mmio_bus: None,
mpidr: 0,
})
}
/// Gets the MPIDR register value.
pub fn get_mpidr(&self) -> u64 {
self.mpidr
}
/// Configures an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `guest_mem` - The guest memory used by this microvm.
/// * `kernel_load_addr` - Offset from `guest_mem` at which the kernel is loaded.
pub fn configure(
&mut self,
guest_mem: &GuestMemoryMmap,
kernel_load_addr: GuestAddress,
) -> std::result::Result<(), KvmVcpuConfigureError>
|
/// Initializes an aarch64 specific vcpu for booting Linux.
///
/// # Arguments
///
/// * `vm_fd` - The kvm `VmFd` for this microvm.
pub fn init(&self, vm_fd: &VmFd) -> Result<()> {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
// This reads back the kernel's preferred target type.
vm_fd
.get_preferred_target(&mut kvi)
.map_err(Error::GetPreferredTarget)?;
// We already checked that the capability is supported.
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_PSCI_0_2;
// Non-boot cpus are powered off initially.
if self.index > 0 {
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_POWER_OFF;
}
self.fd.vcpu_init(&kvi).map_err(Error::Init)
}
/// Save the KVM internal state.
pub fn save_state(&self) -> Result<VcpuState> {
let mut state = VcpuState {
mp_state: arch::regs::get_mpstate(&self.fd).map_err(Error::SaveState)?,
..Default::default()
};
arch::regs::save_core_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
arch::regs::save_system_registers(&self.fd, &mut state.regs).map_err(Error::SaveState)?;
state.mpidr = arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::SaveState)?;
Ok(state)
}
/// Use provided state to populate KVM internal state.
pub fn restore_state(&self, state: &VcpuState) -> Result<()> {
arch::regs::restore_registers(&self.fd, &state.regs).map_err(Error::RestoreState)?;
arch::regs::set_mpstate(&self.fd, state.mp_state).map_err(Error::RestoreState)?;
Ok(())
}
/// Runs the vCPU in KVM context and handles the kvm exit reason.
///
/// Returns error or enum specifying whether emulation was handled or interrupted.
pub fn run_arch_emulation(&self, exit: VcpuExit) -> super::Result<VcpuEmulation> {
METRICS.vcpu.failures.inc();
// TODO: Are we sure we want to finish running a vcpu upon
// receiving a vm exit that is not necessarily an error?
error!("Unexpected exit reason on vcpu run: {:?}", exit);
Err(super::Error::UnhandledKvmExit(format!("{:?}", exit)))
}
}
/// Structure holding VCPU kvm state.
#[derive(Clone, Default, Versionize)]
pub struct VcpuState {
pub mp_state: kvm_bindings::kvm_mp_state,
pub regs: Vec<Aarch64Register>,
// We will be using the mpidr for passing it to the VmState.
// The VmState will give this away for saving restoring the icc and redistributor
// registers.
pub mpidr: u64,
}
#[cfg(test)]
mod tests {
#![allow(clippy::undocumented_unsafe_blocks)]
use std::os::unix::io::AsRawFd;
use vm_memory::GuestMemoryMmap;
use super::*;
use crate::vstate::vm::tests::setup_vm;
use crate::vstate::vm::Vm;
fn setup_vcpu(mem_size: usize) -> (Vm, KvmVcpu, GuestMemoryMmap) {
let (mut vm, vm_mem) = setup_vm(mem_size);
let vcpu = KvmVcpu::new(0, &vm).unwrap();
vcpu.init(vm.fd()).unwrap();
vm.setup_irqchip(1).unwrap();
(vm, vcpu, vm_mem)
}
fn init_vcpu(vcpu: &VcpuFd, vm: &VmFd) {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
vcpu.vcpu_init(&kvi).unwrap();
}
#[test]
fn test_create_vcpu() {
let (vm, _) = setup_vm(0x1000);
unsafe { libc::close(vm.fd().as_raw_fd()) };
let err = KvmVcpu::new(0, &vm);
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error in opening the VCPU file descriptor: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_configure_vcpu() {
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
assert!(vcpu
.configure(&vm_mem, GuestAddress(arch::get_kernel_start()),)
.is_ok());
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
let (_vm, mut vcpu, vm_mem) = setup_vcpu(0x10000);
unsafe { libc::close(vcpu.fd.as_raw_fd()) };
let err = vcpu.configure(&vm_mem, GuestAddress(arch::get_kernel_start()));
assert!(err.is_err());
assert_eq!(
err.err().unwrap().to_string(),
"Error configuring the general purpose registers: Failed to set processor state \
register: Bad file descriptor (os error 9)"
.to_string()
);
}
#[test]
fn test_faulty_init_vcpu() {
let (
|
{
arch::aarch64::regs::setup_boot_regs(
&self.fd,
self.index,
kernel_load_addr.raw_value(),
guest_mem,
)
.map_err(Error::ConfigureRegisters)?;
self.mpidr =
arch::aarch64::regs::read_mpidr(&self.fd).map_err(Error::ConfigureRegisters)?;
Ok(())
}
|
identifier_body
|
ic7406.rs
|
/// | H | **L** |
///
/// The chip comes in a 14-pin dual in-line package with the following pin assignments.
/// ```txt
/// +---+--+---+
/// A1 |1 +--+ 14| Vcc
/// Y1 |2 13| A6
/// A2 |3 12| Y6
/// Y2 |4 7406 11| A5
/// A3 |5 10| Y5
/// Y3 |6 9| A4
/// GND |7 8| Y4
/// +----------+
/// ```
/// GND and Vcc are ground and power supply pins respectively, and they are not emulated.
///
/// In the Commodore 64, U8 is a 7406. It's responsible for inverting logic signals that are
/// expected in the inverse they're given, such as the 6567's AEC signal being turned into
/// the inverse AEC signal for the 82S100.
pub struct Ic7406 {
/// The pins of the 7406, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
}
impl Ic7406 {
/// Creates a new 7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it.
pub fn new() -> DeviceRef {
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = pin!(Y1, "Y1", Output);
let y2 = pin!(Y2, "Y2", Output);
let y3 = pin!(Y3, "Y3", Output);
let y4 = pin!(Y4, "Y4", Output);
let y5 = pin!(Y5, "Y5", Output);
let y6 = pin!(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = pin!(GND, "GND", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let device: DeviceRef = new_ref!(Ic7406 {
pins: pins![a1, a2, a3, a4, a5, a6, y1, y2, y3, y4, y5, y6, vcc, gnd],
});
// All outputs begin high since all of the inputs begin non-high.
set!(y1, y2, y3, y4, y5, y6);
attach_to!(device, a1, a2, a3, a4, a5, a6);
device
}
/// Creates a new Ic7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it. This is identical to `new` except that this one is coded without
/// the benefit of crate-defined macros or type aliases (the vec! macro is still used,
/// but that's standard library). It's here in this struct only for demonstration
/// purposes.
pub fn new_no_macro() -> Rc<RefCell<dyn Device>> {
// Dummy pin, used as a spacer to put the index of the first real pin at 1.
let dummy = Pin::new(0, DUMMY, Unconnected);
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = Pin::new(A1, "A1", Input);
let a2 = Pin::new(A2, "A2", Input);
let a3 = Pin::new(A3, "A3", Input);
let a4 = Pin::new(A4, "A4", Input);
let a5 = Pin::new(A5, "A5", Input);
let a6 = Pin::new(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = Pin::new(Y1, "Y1", Output);
let y2 = Pin::new(Y2, "Y2", Output);
let y3 = Pin::new(Y3, "Y3", Output);
let y4 = Pin::new(Y4, "Y4", Output);
let y5 = Pin::new(Y5, "Y5", Output);
let y6 = Pin::new(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = Pin::new(GND, "GND", Unconnected);
let vcc = Pin::new(VCC, "VCC", Unconnected);
let device: Rc<RefCell<dyn Device>> = Rc::new(RefCell::new(Ic7406 {
pins: RefVec::with_vec(vec![
Rc::clone(&dummy),
Rc::clone(&a1),
Rc::clone(&y1),
Rc::clone(&a2),
Rc::clone(&y2),
Rc::clone(&a3),
Rc::clone(&y3),
Rc::clone(&gnd),
Rc::clone(&y4),
Rc::clone(&a4),
Rc::clone(&y5),
Rc::clone(&a5),
Rc::clone(&y6),
Rc::clone(&a6),
Rc::clone(&vcc),
]),
}));
// All outputs begin high since all of the inputs begin non-high.
y1.borrow_mut().set();
y2.borrow_mut().set();
y3.borrow_mut().set();
y4.borrow_mut().set();
y5.borrow_mut().set();
y6.borrow_mut().set();
a1.borrow_mut().attach(Rc::clone(&device));
a2.borrow_mut().attach(Rc::clone(&device));
a3.borrow_mut().attach(Rc::clone(&device));
a4.borrow_mut().attach(Rc::clone(&device));
a5.borrow_mut().attach(Rc::clone(&device));
a6.borrow_mut().attach(Rc::clone(&device));
device
}
}
/// Maps each input pin assignment ot its corresponding output pin assignment.
fn output_for(input: usize) -> usize {
match input {
A1 => Y1,
A2 => Y2,
A3 => Y3,
A4 => Y4,
A5 => Y5,
A6 => Y6,
_ => 0,
}
}
impl Device for Ic7406 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> {
Vec::new()
}
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if INPUTS.contains(&number!(pin)) => {
let o = output_for(number!(pin));
if high!(pin) {
clear!(self.pins[o]);
} else {
set!(self.pins[o]);
}
}
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::{components::trace::Trace, test_utils::make_traces};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>) {
let chip = Ic7406::new();
let tr = make_traces(&chip);
(chip, tr)
}
#[test]
fn input_high() {
let (_, tr) = before_each();
set!(tr[A1]);
assert!(low!(tr[Y1]), "Y1 should be low when A1 is high");
set!(tr[A2]);
assert!(low!(tr[Y2]), "Y2 should be low when A2 is high");
set!(tr[A3]);
assert!(low!(tr[Y3]), "Y3 should be low when A3 is high");
set!(tr[A4]);
assert!(low!(tr[Y4]), "Y4 should be low when A4 is high");
set!(tr[A5]);
|
assert!(low!(tr[Y5]), "Y5 should be low when A5 is high");
set!(tr[A6]);
|
random_line_split
|
|
ic7406.rs
|
pin assignment for the +5V power supply.
pub const VCC: usize = 14;
/// The pin assignment for the ground.
pub const GND: usize = 7;
}
use std::{cell::RefCell, rc::Rc};
use crate::{
components::{
device::{Device, DeviceRef, LevelChange, DUMMY},
pin::{
Mode::{Input, Output, Unconnected},
Pin,
},
},
vectors::RefVec,
};
use self::constants::*;
const INPUTS: [usize; 6] = [A1, A2, A3, A4, A5, A6];
/// An emulation of the 7406 hex inverter.
///
/// The 7406 is one of the 7400-series TTL logic chips, consisting of six single-input
/// inverters. An inverter is the simplest of logic gates: if the input is low, the output
/// is high, and vice versa.
///
/// | An | Yn |
/// | :---: | :---: |
/// | L | **H** |
/// | H | **L** |
///
/// The chip comes in a 14-pin dual in-line package with the following pin assignments.
/// ```txt
/// +---+--+---+
/// A1 |1 +--+ 14| Vcc
/// Y1 |2 13| A6
/// A2 |3 12| Y6
/// Y2 |4 7406 11| A5
/// A3 |5 10| Y5
/// Y3 |6 9| A4
/// GND |7 8| Y4
/// +----------+
/// ```
/// GND and Vcc are ground and power supply pins respectively, and they are not emulated.
///
/// In the Commodore 64, U8 is a 7406. It's responsible for inverting logic signals that are
/// expected in the inverse they're given, such as the 6567's AEC signal being turned into
/// the inverse AEC signal for the 82S100.
pub struct Ic7406 {
/// The pins of the 7406, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
}
impl Ic7406 {
/// Creates a new 7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it.
pub fn new() -> DeviceRef {
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = pin!(Y1, "Y1", Output);
let y2 = pin!(Y2, "Y2", Output);
let y3 = pin!(Y3, "Y3", Output);
let y4 = pin!(Y4, "Y4", Output);
let y5 = pin!(Y5, "Y5", Output);
let y6 = pin!(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = pin!(GND, "GND", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let device: DeviceRef = new_ref!(Ic7406 {
pins: pins![a1, a2, a3, a4, a5, a6, y1, y2, y3, y4, y5, y6, vcc, gnd],
});
// All outputs begin high since all of the inputs begin non-high.
set!(y1, y2, y3, y4, y5, y6);
attach_to!(device, a1, a2, a3, a4, a5, a6);
device
}
/// Creates a new Ic7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it. This is identical to `new` except that this one is coded without
/// the benefit of crate-defined macros or type aliases (the vec! macro is still used,
/// but that's standard library). It's here in this struct only for demonstration
/// purposes.
pub fn new_no_macro() -> Rc<RefCell<dyn Device>> {
// Dummy pin, used as a spacer to put the index of the first real pin at 1.
let dummy = Pin::new(0, DUMMY, Unconnected);
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = Pin::new(A1, "A1", Input);
let a2 = Pin::new(A2, "A2", Input);
let a3 = Pin::new(A3, "A3", Input);
let a4 = Pin::new(A4, "A4", Input);
let a5 = Pin::new(A5, "A5", Input);
let a6 = Pin::new(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = Pin::new(Y1, "Y1", Output);
let y2 = Pin::new(Y2, "Y2", Output);
let y3 = Pin::new(Y3, "Y3", Output);
let y4 = Pin::new(Y4, "Y4", Output);
let y5 = Pin::new(Y5, "Y5", Output);
let y6 = Pin::new(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = Pin::new(GND, "GND", Unconnected);
let vcc = Pin::new(VCC, "VCC", Unconnected);
let device: Rc<RefCell<dyn Device>> = Rc::new(RefCell::new(Ic7406 {
pins: RefVec::with_vec(vec![
Rc::clone(&dummy),
Rc::clone(&a1),
Rc::clone(&y1),
Rc::clone(&a2),
Rc::clone(&y2),
Rc::clone(&a3),
Rc::clone(&y3),
Rc::clone(&gnd),
Rc::clone(&y4),
Rc::clone(&a4),
Rc::clone(&y5),
Rc::clone(&a5),
Rc::clone(&y6),
Rc::clone(&a6),
Rc::clone(&vcc),
]),
}));
// All outputs begin high since all of the inputs begin non-high.
y1.borrow_mut().set();
y2.borrow_mut().set();
y3.borrow_mut().set();
y4.borrow_mut().set();
y5.borrow_mut().set();
y6.borrow_mut().set();
a1.borrow_mut().attach(Rc::clone(&device));
a2.borrow_mut().attach(Rc::clone(&device));
a3.borrow_mut().attach(Rc::clone(&device));
a4.borrow_mut().attach(Rc::clone(&device));
a5.borrow_mut().attach(Rc::clone(&device));
a6.borrow_mut().attach(Rc::clone(&device));
device
}
}
/// Maps each input pin assignment ot its corresponding output pin assignment.
fn output_for(input: usize) -> usize {
match input {
A1 => Y1,
A2 => Y2,
A3 => Y3,
A4 => Y4,
A5 => Y5,
A6 => Y6,
_ => 0,
}
}
impl Device for Ic7406 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> {
Vec::new()
}
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if INPUTS.contains(&number!(pin)) => {
let o = output_for(number!(pin));
if high!(pin) {
clear!(self.pins[o]);
} else {
set!(self.pins[o]);
}
}
_ =>
|
}
}
|
{}
|
conditional_block
|
ic7406.rs
|
pin assignment for the +5V power supply.
pub const VCC: usize = 14;
/// The pin assignment for the ground.
pub const GND: usize = 7;
}
use std::{cell::RefCell, rc::Rc};
use crate::{
components::{
device::{Device, DeviceRef, LevelChange, DUMMY},
pin::{
Mode::{Input, Output, Unconnected},
Pin,
},
},
vectors::RefVec,
};
use self::constants::*;
const INPUTS: [usize; 6] = [A1, A2, A3, A4, A5, A6];
/// An emulation of the 7406 hex inverter.
///
/// The 7406 is one of the 7400-series TTL logic chips, consisting of six single-input
/// inverters. An inverter is the simplest of logic gates: if the input is low, the output
/// is high, and vice versa.
///
/// | An | Yn |
/// | :---: | :---: |
/// | L | **H** |
/// | H | **L** |
///
/// The chip comes in a 14-pin dual in-line package with the following pin assignments.
/// ```txt
/// +---+--+---+
/// A1 |1 +--+ 14| Vcc
/// Y1 |2 13| A6
/// A2 |3 12| Y6
/// Y2 |4 7406 11| A5
/// A3 |5 10| Y5
/// Y3 |6 9| A4
/// GND |7 8| Y4
/// +----------+
/// ```
/// GND and Vcc are ground and power supply pins respectively, and they are not emulated.
///
/// In the Commodore 64, U8 is a 7406. It's responsible for inverting logic signals that are
/// expected in the inverse they're given, such as the 6567's AEC signal being turned into
/// the inverse AEC signal for the 82S100.
pub struct Ic7406 {
/// The pins of the 7406, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
}
impl Ic7406 {
/// Creates a new 7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it.
pub fn new() -> DeviceRef {
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = pin!(Y1, "Y1", Output);
let y2 = pin!(Y2, "Y2", Output);
let y3 = pin!(Y3, "Y3", Output);
let y4 = pin!(Y4, "Y4", Output);
let y5 = pin!(Y5, "Y5", Output);
let y6 = pin!(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = pin!(GND, "GND", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let device: DeviceRef = new_ref!(Ic7406 {
pins: pins![a1, a2, a3, a4, a5, a6, y1, y2, y3, y4, y5, y6, vcc, gnd],
});
// All outputs begin high since all of the inputs begin non-high.
set!(y1, y2, y3, y4, y5, y6);
attach_to!(device, a1, a2, a3, a4, a5, a6);
device
}
/// Creates a new Ic7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it. This is identical to `new` except that this one is coded without
/// the benefit of crate-defined macros or type aliases (the vec! macro is still used,
/// but that's standard library). It's here in this struct only for demonstration
/// purposes.
pub fn new_no_macro() -> Rc<RefCell<dyn Device>> {
// Dummy pin, used as a spacer to put the index of the first real pin at 1.
let dummy = Pin::new(0, DUMMY, Unconnected);
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = Pin::new(A1, "A1", Input);
let a2 = Pin::new(A2, "A2", Input);
let a3 = Pin::new(A3, "A3", Input);
let a4 = Pin::new(A4, "A4", Input);
let a5 = Pin::new(A5, "A5", Input);
let a6 = Pin::new(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = Pin::new(Y1, "Y1", Output);
let y2 = Pin::new(Y2, "Y2", Output);
let y3 = Pin::new(Y3, "Y3", Output);
let y4 = Pin::new(Y4, "Y4", Output);
let y5 = Pin::new(Y5, "Y5", Output);
let y6 = Pin::new(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = Pin::new(GND, "GND", Unconnected);
let vcc = Pin::new(VCC, "VCC", Unconnected);
let device: Rc<RefCell<dyn Device>> = Rc::new(RefCell::new(Ic7406 {
pins: RefVec::with_vec(vec![
Rc::clone(&dummy),
Rc::clone(&a1),
Rc::clone(&y1),
Rc::clone(&a2),
Rc::clone(&y2),
Rc::clone(&a3),
Rc::clone(&y3),
Rc::clone(&gnd),
Rc::clone(&y4),
Rc::clone(&a4),
Rc::clone(&y5),
Rc::clone(&a5),
Rc::clone(&y6),
Rc::clone(&a6),
Rc::clone(&vcc),
]),
}));
// All outputs begin high since all of the inputs begin non-high.
y1.borrow_mut().set();
y2.borrow_mut().set();
y3.borrow_mut().set();
y4.borrow_mut().set();
y5.borrow_mut().set();
y6.borrow_mut().set();
a1.borrow_mut().attach(Rc::clone(&device));
a2.borrow_mut().attach(Rc::clone(&device));
a3.borrow_mut().attach(Rc::clone(&device));
a4.borrow_mut().attach(Rc::clone(&device));
a5.borrow_mut().attach(Rc::clone(&device));
a6.borrow_mut().attach(Rc::clone(&device));
device
}
}
/// Maps each input pin assignment ot its corresponding output pin assignment.
fn output_for(input: usize) -> usize {
match input {
A1 => Y1,
A2 => Y2,
A3 => Y3,
A4 => Y4,
A5 => Y5,
A6 => Y6,
_ => 0,
}
}
impl Device for Ic7406 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8>
|
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if INPUTS.contains(&number!(pin)) => {
let o = output_for(number!(pin));
if high!(pin) {
clear!(self.pins[o]);
} else {
set!(self.pins[o]);
}
}
_ => {}
}
}
|
{
Vec::new()
}
|
identifier_body
|
ic7406.rs
|
, Output, Unconnected},
Pin,
},
},
vectors::RefVec,
};
use self::constants::*;
const INPUTS: [usize; 6] = [A1, A2, A3, A4, A5, A6];
/// An emulation of the 7406 hex inverter.
///
/// The 7406 is one of the 7400-series TTL logic chips, consisting of six single-input
/// inverters. An inverter is the simplest of logic gates: if the input is low, the output
/// is high, and vice versa.
///
/// | An | Yn |
/// | :---: | :---: |
/// | L | **H** |
/// | H | **L** |
///
/// The chip comes in a 14-pin dual in-line package with the following pin assignments.
/// ```txt
/// +---+--+---+
/// A1 |1 +--+ 14| Vcc
/// Y1 |2 13| A6
/// A2 |3 12| Y6
/// Y2 |4 7406 11| A5
/// A3 |5 10| Y5
/// Y3 |6 9| A4
/// GND |7 8| Y4
/// +----------+
/// ```
/// GND and Vcc are ground and power supply pins respectively, and they are not emulated.
///
/// In the Commodore 64, U8 is a 7406. It's responsible for inverting logic signals that are
/// expected in the inverse they're given, such as the 6567's AEC signal being turned into
/// the inverse AEC signal for the 82S100.
pub struct Ic7406 {
/// The pins of the 7406, along with a dummy pin (at index 0) to ensure that the vector
/// index of the others matches the 1-based pin assignments.
pins: RefVec<Pin>,
}
impl Ic7406 {
/// Creates a new 7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it.
pub fn new() -> DeviceRef {
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = pin!(A1, "A1", Input);
let a2 = pin!(A2, "A2", Input);
let a3 = pin!(A3, "A3", Input);
let a4 = pin!(A4, "A4", Input);
let a5 = pin!(A5, "A5", Input);
let a6 = pin!(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = pin!(Y1, "Y1", Output);
let y2 = pin!(Y2, "Y2", Output);
let y3 = pin!(Y3, "Y3", Output);
let y4 = pin!(Y4, "Y4", Output);
let y5 = pin!(Y5, "Y5", Output);
let y6 = pin!(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = pin!(GND, "GND", Unconnected);
let vcc = pin!(VCC, "VCC", Unconnected);
let device: DeviceRef = new_ref!(Ic7406 {
pins: pins![a1, a2, a3, a4, a5, a6, y1, y2, y3, y4, y5, y6, vcc, gnd],
});
// All outputs begin high since all of the inputs begin non-high.
set!(y1, y2, y3, y4, y5, y6);
attach_to!(device, a1, a2, a3, a4, a5, a6);
device
}
/// Creates a new Ic7406 hex inverter emulation and returns a shared, internally mutable
/// reference to it. This is identical to `new` except that this one is coded without
/// the benefit of crate-defined macros or type aliases (the vec! macro is still used,
/// but that's standard library). It's here in this struct only for demonstration
/// purposes.
pub fn new_no_macro() -> Rc<RefCell<dyn Device>> {
// Dummy pin, used as a spacer to put the index of the first real pin at 1.
let dummy = Pin::new(0, DUMMY, Unconnected);
// Input pins. In the TI data sheet, these are named "1A", "2A", etc., and the C64
// schematic does not suggest names for them. Since these names are not legal
// variable names, we've switched the letter and number.
let a1 = Pin::new(A1, "A1", Input);
let a2 = Pin::new(A2, "A2", Input);
let a3 = Pin::new(A3, "A3", Input);
let a4 = Pin::new(A4, "A4", Input);
let a5 = Pin::new(A5, "A5", Input);
let a6 = Pin::new(A6, "A6", Input);
// Output pins. Similarly, the TI data sheet refers to these as "1Y", "2Y", etc.
let y1 = Pin::new(Y1, "Y1", Output);
let y2 = Pin::new(Y2, "Y2", Output);
let y3 = Pin::new(Y3, "Y3", Output);
let y4 = Pin::new(Y4, "Y4", Output);
let y5 = Pin::new(Y5, "Y5", Output);
let y6 = Pin::new(Y6, "Y6", Output);
// Power supply and ground pins, not emulated
let gnd = Pin::new(GND, "GND", Unconnected);
let vcc = Pin::new(VCC, "VCC", Unconnected);
let device: Rc<RefCell<dyn Device>> = Rc::new(RefCell::new(Ic7406 {
pins: RefVec::with_vec(vec![
Rc::clone(&dummy),
Rc::clone(&a1),
Rc::clone(&y1),
Rc::clone(&a2),
Rc::clone(&y2),
Rc::clone(&a3),
Rc::clone(&y3),
Rc::clone(&gnd),
Rc::clone(&y4),
Rc::clone(&a4),
Rc::clone(&y5),
Rc::clone(&a5),
Rc::clone(&y6),
Rc::clone(&a6),
Rc::clone(&vcc),
]),
}));
// All outputs begin high since all of the inputs begin non-high.
y1.borrow_mut().set();
y2.borrow_mut().set();
y3.borrow_mut().set();
y4.borrow_mut().set();
y5.borrow_mut().set();
y6.borrow_mut().set();
a1.borrow_mut().attach(Rc::clone(&device));
a2.borrow_mut().attach(Rc::clone(&device));
a3.borrow_mut().attach(Rc::clone(&device));
a4.borrow_mut().attach(Rc::clone(&device));
a5.borrow_mut().attach(Rc::clone(&device));
a6.borrow_mut().attach(Rc::clone(&device));
device
}
}
/// Maps each input pin assignment ot its corresponding output pin assignment.
fn output_for(input: usize) -> usize {
match input {
A1 => Y1,
A2 => Y2,
A3 => Y3,
A4 => Y4,
A5 => Y5,
A6 => Y6,
_ => 0,
}
}
impl Device for Ic7406 {
fn pins(&self) -> RefVec<Pin> {
self.pins.clone()
}
fn registers(&self) -> Vec<u8> {
Vec::new()
}
fn update(&mut self, event: &LevelChange) {
match event {
LevelChange(pin) if INPUTS.contains(&number!(pin)) => {
let o = output_for(number!(pin));
if high!(pin) {
clear!(self.pins[o]);
} else {
set!(self.pins[o]);
}
}
_ => {}
}
}
}
#[cfg(test)]
mod test {
use crate::{components::trace::Trace, test_utils::make_traces};
use super::*;
fn before_each() -> (DeviceRef, RefVec<Trace>) {
let chip = Ic7406::new();
let tr = make_traces(&chip);
(chip, tr)
}
#[test]
fn
|
input_high
|
identifier_name
|
|
render.go
|
(tree *ast.Tree) (string, error) {
subRenderer := &renderer{
template: r.template,
stack: r.stack,
depth: 0,
w: strings.Builder{},
indent: "",
indentNext: false,
}
err := subRenderer.walk(tree.Name, tree)
s := subRenderer.String()
// the subRenderer may have pushed and popped enough contexts onto the stack
// to cause the slice to allocate to a new larger underlaying array. If this
// has happened, we want to keep the pointer to that larger array to minimize
// allocations.
r.stack = subRenderer.stack
return s, err
}
// write a string to the template output.
func (r *renderer) write(s string, unescaped bool) {
if r.indentNext {
r.indentNext = false
r.w.WriteString(r.indent)
}
if !unescaped {
s = html.EscapeString(s)
}
r.w.WriteString(s)
}
// conceptually shifts a context onto the stack. Since the stack is actually in
// reverse order, the context is pushed.
func (r *renderer) push(context reflect.Value) {
r.stack = append(r.stack, context)
}
// conceptually unshifts a context onto the stack. Since the stack is actually in
// reverse order, the context is popped.
func (r *renderer) pop() reflect.Value {
if len(r.stack) == 0 {
return reflect.Value{}
}
ctx := r.stack[len(r.stack)-1]
r.stack = r.stack[:len(r.stack)-1]
return ctx
}
// render recursively walks each node of the tree, incrementally building the template
// string output.
func (r *renderer) walk(treeName string, node interface{}) error {
switch t := node.(type) {
case *ast.Tree:
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
case *ast.Text:
r.write(t.Text, true)
if t.EndOfLine {
r.indentNext = true
}
case *ast.Variable:
v, err := r.lookup(treeName, t.Line, t.Column, t.Key)
if err != nil {
return err
}
s, err := r.toString(v, parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return err
}
r.write(s, t.Unescaped)
case *ast.Section:
v, err := r.lookup(treeName, t.Line, t.Column, t.Key)
if err != nil {
return err
}
v, err = r.toTruthyValue(v)
if err != nil {
return err
}
isTruthy := v.IsValid()
if !t.Inverted && isTruthy {
switch v.Kind() {
case reflect.Slice, reflect.Array:
for i := 0; i < v.Len(); i++ {
r.push(v.Index(i))
for j := range t.Nodes {
err := r.walk(treeName, t.Nodes[j])
if err != nil {
return err
}
}
r.pop()
}
case reflect.Func:
s := v.Call([]reflect.Value{reflect.ValueOf(t.Text)})[0].String()
tree, err := parse.Parse("lambda", s, t.LDelim, t.RDelim)
if err != nil {
return nil
}
err = r.walk(treeName, tree)
if err != nil {
return err
}
default:
r.push(v)
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
r.pop()
}
} else if t.Inverted && !isTruthy {
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
}
case *ast.Partial:
tree, ok := r.template.treeMap[t.Key]
if !ok {
if r.template.ContextErrorsEnabled {
return fmt.Errorf("%s:%d:%d: partial not found: %s", treeName, t.Line, t.Column, t.Key)
}
return nil
}
origIndent := r.indent
r.indent += t.Indent
r.indentNext = true
r.depth++
if r.depth >= maxPartialDepth {
return fmt.Errorf("exceeded maximum partial depth: %d", maxPartialDepth)
}
err := r.walk(tree.Name, tree)
if err != nil {
return err
}
r.depth--
r.indent = origIndent
}
return nil
}
// toString transforms a reflect.Value into a string.
func (r *renderer) toString(v reflect.Value, ldelim, rdelim string) (string, error) {
switch v.Kind() {
case reflect.String:
return v.String(), nil
case reflect.Bool:
return strconv.FormatBool(v.Bool()), nil
case reflect.Complex64, reflect.Complex128:
return fmt.Sprintf("%v", v.Complex()), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(v.Int(), 10), nil
case reflect.Float32, reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(v.Uint(), 10), nil
case reflect.Func:
if v.IsNil() {
return "", nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if !isArity0 {
return "", nil
}
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toString(v, ldelim, rdelim)
}
tree, err := parse.Parse("lambda", v.String(), ldelim, rdelim)
if err != nil {
return "", err
}
s, err := r.renderToString(tree)
if err != nil {
return "", err
}
return s, nil
case reflect.Ptr, reflect.Interface:
return r.toString(indirect(v), ldelim, rdelim)
case reflect.Chan:
return "", nil
case reflect.Invalid:
return "", nil
default:
return fmt.Sprintf("%v", v.Interface()), nil
}
}
// toTruthyValue returns a value when it is "truthy". If the value is
// falsey, the reflect zero value is returned.
func (r *renderer) toTruthyValue(v reflect.Value) (reflect.Value, error) {
switch v.Kind() {
case reflect.Bool:
if !v.Bool() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if v.Int() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if v.Uint() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Float32, reflect.Float64:
if math.Float64bits(v.Float()) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Complex64, reflect.Complex128:
c := v.Complex()
if math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.String:
if v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Array, reflect.Slice:
if v.IsNil() || v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Func:
if v.IsNil() {
return reflect.Value{}, nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if isArity0 {
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toTruthyValue(v)
}
tree, err := parse.Parse("lambda", v.String(), parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return reflect.Value{}, nil
}
s, err := r.renderToString(tree)
if err != nil {
return reflect.Value{}, nil
}
return r.toTruthyValue(reflect.ValueOf(s))
}
isArity1 := t.NumIn() == 1 && t.In(0).Kind() == reflect.String && t.NumOut() == 1 && t.Out(0).Kind() == reflect.String
if isArity1 {
return
|
renderToString
|
identifier_name
|
|
render.go
|
}
r.write(s, t.Unescaped)
case *ast.Section:
v, err := r.lookup(treeName, t.Line, t.Column, t.Key)
if err != nil {
return err
}
v, err = r.toTruthyValue(v)
if err != nil {
return err
}
isTruthy := v.IsValid()
if !t.Inverted && isTruthy {
switch v.Kind() {
case reflect.Slice, reflect.Array:
for i := 0; i < v.Len(); i++ {
r.push(v.Index(i))
for j := range t.Nodes {
err := r.walk(treeName, t.Nodes[j])
if err != nil {
return err
}
}
r.pop()
}
case reflect.Func:
s := v.Call([]reflect.Value{reflect.ValueOf(t.Text)})[0].String()
tree, err := parse.Parse("lambda", s, t.LDelim, t.RDelim)
if err != nil {
return nil
}
err = r.walk(treeName, tree)
if err != nil {
return err
}
default:
r.push(v)
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
r.pop()
}
} else if t.Inverted && !isTruthy {
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
}
case *ast.Partial:
tree, ok := r.template.treeMap[t.Key]
if !ok {
if r.template.ContextErrorsEnabled {
return fmt.Errorf("%s:%d:%d: partial not found: %s", treeName, t.Line, t.Column, t.Key)
}
return nil
}
origIndent := r.indent
r.indent += t.Indent
r.indentNext = true
r.depth++
if r.depth >= maxPartialDepth {
return fmt.Errorf("exceeded maximum partial depth: %d", maxPartialDepth)
}
err := r.walk(tree.Name, tree)
if err != nil {
return err
}
r.depth--
r.indent = origIndent
}
return nil
}
// toString transforms a reflect.Value into a string.
func (r *renderer) toString(v reflect.Value, ldelim, rdelim string) (string, error) {
switch v.Kind() {
case reflect.String:
return v.String(), nil
case reflect.Bool:
return strconv.FormatBool(v.Bool()), nil
case reflect.Complex64, reflect.Complex128:
return fmt.Sprintf("%v", v.Complex()), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(v.Int(), 10), nil
case reflect.Float32, reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(v.Uint(), 10), nil
case reflect.Func:
if v.IsNil() {
return "", nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if !isArity0 {
return "", nil
}
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toString(v, ldelim, rdelim)
}
tree, err := parse.Parse("lambda", v.String(), ldelim, rdelim)
if err != nil {
return "", err
}
s, err := r.renderToString(tree)
if err != nil {
return "", err
}
return s, nil
case reflect.Ptr, reflect.Interface:
return r.toString(indirect(v), ldelim, rdelim)
case reflect.Chan:
return "", nil
case reflect.Invalid:
return "", nil
default:
return fmt.Sprintf("%v", v.Interface()), nil
}
}
// toTruthyValue returns a value when it is "truthy". If the value is
// falsey, the reflect zero value is returned.
func (r *renderer) toTruthyValue(v reflect.Value) (reflect.Value, error) {
switch v.Kind() {
case reflect.Bool:
if !v.Bool() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if v.Int() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if v.Uint() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Float32, reflect.Float64:
if math.Float64bits(v.Float()) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Complex64, reflect.Complex128:
c := v.Complex()
if math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.String:
if v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Array, reflect.Slice:
if v.IsNil() || v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Func:
if v.IsNil() {
return reflect.Value{}, nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if isArity0 {
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toTruthyValue(v)
}
tree, err := parse.Parse("lambda", v.String(), parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return reflect.Value{}, nil
}
s, err := r.renderToString(tree)
if err != nil {
return reflect.Value{}, nil
}
return r.toTruthyValue(reflect.ValueOf(s))
}
isArity1 := t.NumIn() == 1 && t.In(0).Kind() == reflect.String && t.NumOut() == 1 && t.Out(0).Kind() == reflect.String
if isArity1 {
return v, nil
}
return reflect.Value{}, nil
case reflect.Ptr, reflect.Interface:
return r.toTruthyValue(indirect(v))
case reflect.Map:
if v.IsNil() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Struct:
return v, nil
case reflect.Invalid:
return reflect.Value{}, nil
default:
return reflect.Value{}, nil
}
}
// indirect returns the value that v points to, or concrete
// element underlying an interface.
func indirect(v reflect.Value) reflect.Value {
loop:
for v.IsValid() {
switch av := v; av.Kind() {
case reflect.Ptr:
v = av.Elem()
case reflect.Interface:
v = av.Elem()
default:
break loop
}
}
return v
}
// lookup a key in the context stack. If a value was not found, the reflect.Value zero
// type is returned.
func (r *renderer) lookup(name string, ln, col int, key []string) (reflect.Value, error) {
v := lookupKeysStack(key, r.stack)
if !v.IsValid() && r.template.ContextErrorsEnabled {
return v, fmt.Errorf("%s:%d:%d: cannot find value %s in context", name, ln, col, strings.Join(key, "."))
}
return v, nil
}
// lookupKeysStack obtains a value for a dotted key - eg: a.b.c . If a value
// was not found, the reflect.Value zero type is returned.
func lookupKeysStack(key []string, contexts []reflect.Value) reflect.Value {
var v reflect.Value
if len(key) == 0 {
return v
}
for i := range key {
if i == 0 {
v = lookupKeyStack(key[i], contexts)
continue
}
v = lookupKeyContext(key[i], v)
if !v.IsValid() {
break
}
}
return v
}
// lookupKeyStack returns a value from the first context in the stack that
// contains a value for that key. If a value was not found, the reflect.Value zero
// type is returned.
func lookupKeyStack(key string, contexts []reflect.Value) reflect.Value {
var v reflect.Value
for i := len(contexts) - 1; i >= 0; i-- {
ctx := contexts[i]
v = lookupKeyContext(key, ctx)
if v.IsValid()
|
{
break
}
|
conditional_block
|
|
render.go
|
Next {
r.indentNext = false
r.w.WriteString(r.indent)
}
if !unescaped {
s = html.EscapeString(s)
}
r.w.WriteString(s)
}
// conceptually shifts a context onto the stack. Since the stack is actually in
// reverse order, the context is pushed.
func (r *renderer) push(context reflect.Value) {
r.stack = append(r.stack, context)
}
// conceptually unshifts a context onto the stack. Since the stack is actually in
// reverse order, the context is popped.
func (r *renderer) pop() reflect.Value {
if len(r.stack) == 0 {
return reflect.Value{}
}
ctx := r.stack[len(r.stack)-1]
r.stack = r.stack[:len(r.stack)-1]
return ctx
}
// render recursively walks each node of the tree, incrementally building the template
// string output.
func (r *renderer) walk(treeName string, node interface{}) error {
switch t := node.(type) {
case *ast.Tree:
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
case *ast.Text:
r.write(t.Text, true)
if t.EndOfLine {
r.indentNext = true
}
case *ast.Variable:
v, err := r.lookup(treeName, t.Line, t.Column, t.Key)
if err != nil {
return err
}
s, err := r.toString(v, parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return err
}
r.write(s, t.Unescaped)
case *ast.Section:
v, err := r.lookup(treeName, t.Line, t.Column, t.Key)
if err != nil {
return err
}
v, err = r.toTruthyValue(v)
if err != nil {
return err
}
isTruthy := v.IsValid()
if !t.Inverted && isTruthy {
switch v.Kind() {
case reflect.Slice, reflect.Array:
for i := 0; i < v.Len(); i++ {
r.push(v.Index(i))
for j := range t.Nodes {
err := r.walk(treeName, t.Nodes[j])
if err != nil {
return err
}
}
r.pop()
}
case reflect.Func:
s := v.Call([]reflect.Value{reflect.ValueOf(t.Text)})[0].String()
tree, err := parse.Parse("lambda", s, t.LDelim, t.RDelim)
if err != nil {
return nil
}
err = r.walk(treeName, tree)
if err != nil {
return err
}
default:
r.push(v)
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
r.pop()
}
} else if t.Inverted && !isTruthy {
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
}
case *ast.Partial:
tree, ok := r.template.treeMap[t.Key]
if !ok {
if r.template.ContextErrorsEnabled {
return fmt.Errorf("%s:%d:%d: partial not found: %s", treeName, t.Line, t.Column, t.Key)
}
return nil
}
origIndent := r.indent
r.indent += t.Indent
r.indentNext = true
r.depth++
if r.depth >= maxPartialDepth {
return fmt.Errorf("exceeded maximum partial depth: %d", maxPartialDepth)
}
err := r.walk(tree.Name, tree)
if err != nil {
return err
}
r.depth--
r.indent = origIndent
}
return nil
}
// toString transforms a reflect.Value into a string.
func (r *renderer) toString(v reflect.Value, ldelim, rdelim string) (string, error) {
switch v.Kind() {
case reflect.String:
return v.String(), nil
case reflect.Bool:
return strconv.FormatBool(v.Bool()), nil
case reflect.Complex64, reflect.Complex128:
return fmt.Sprintf("%v", v.Complex()), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(v.Int(), 10), nil
case reflect.Float32, reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(v.Uint(), 10), nil
case reflect.Func:
if v.IsNil() {
return "", nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if !isArity0 {
return "", nil
}
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toString(v, ldelim, rdelim)
}
tree, err := parse.Parse("lambda", v.String(), ldelim, rdelim)
if err != nil {
return "", err
}
s, err := r.renderToString(tree)
if err != nil {
return "", err
}
return s, nil
case reflect.Ptr, reflect.Interface:
return r.toString(indirect(v), ldelim, rdelim)
case reflect.Chan:
return "", nil
case reflect.Invalid:
return "", nil
default:
return fmt.Sprintf("%v", v.Interface()), nil
}
}
// toTruthyValue returns a value when it is "truthy". If the value is
// falsey, the reflect zero value is returned.
func (r *renderer) toTruthyValue(v reflect.Value) (reflect.Value, error) {
switch v.Kind() {
case reflect.Bool:
if !v.Bool() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if v.Int() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if v.Uint() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Float32, reflect.Float64:
if math.Float64bits(v.Float()) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Complex64, reflect.Complex128:
c := v.Complex()
if math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.String:
if v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Array, reflect.Slice:
if v.IsNil() || v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Func:
if v.IsNil() {
return reflect.Value{}, nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if isArity0 {
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toTruthyValue(v)
}
tree, err := parse.Parse("lambda", v.String(), parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return reflect.Value{}, nil
}
s, err := r.renderToString(tree)
if err != nil {
return reflect.Value{}, nil
}
return r.toTruthyValue(reflect.ValueOf(s))
}
isArity1 := t.NumIn() == 1 && t.In(0).Kind() == reflect.String && t.NumOut() == 1 && t.Out(0).Kind() == reflect.String
if isArity1 {
return v, nil
}
return reflect.Value{}, nil
case reflect.Ptr, reflect.Interface:
return r.toTruthyValue(indirect(v))
case reflect.Map:
if v.IsNil() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Struct:
return v, nil
case reflect.Invalid:
return reflect.Value{}, nil
default:
return reflect.Value{}, nil
}
}
// indirect returns the value that v points to, or concrete
// element underlying an interface.
func indirect(v reflect.Value) reflect.Value
|
{
loop:
for v.IsValid() {
switch av := v; av.Kind() {
case reflect.Ptr:
v = av.Elem()
case reflect.Interface:
v = av.Elem()
default:
break loop
}
}
return v
}
|
identifier_body
|
|
render.go
|
range t.Nodes {
err := r.walk(treeName, t.Nodes[j])
if err != nil {
return err
}
}
r.pop()
}
case reflect.Func:
s := v.Call([]reflect.Value{reflect.ValueOf(t.Text)})[0].String()
tree, err := parse.Parse("lambda", s, t.LDelim, t.RDelim)
if err != nil {
return nil
}
err = r.walk(treeName, tree)
if err != nil {
return err
}
default:
r.push(v)
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
r.pop()
}
} else if t.Inverted && !isTruthy {
for i := range t.Nodes {
err := r.walk(treeName, t.Nodes[i])
if err != nil {
return err
}
}
}
case *ast.Partial:
tree, ok := r.template.treeMap[t.Key]
if !ok {
if r.template.ContextErrorsEnabled {
return fmt.Errorf("%s:%d:%d: partial not found: %s", treeName, t.Line, t.Column, t.Key)
}
return nil
}
origIndent := r.indent
r.indent += t.Indent
r.indentNext = true
r.depth++
if r.depth >= maxPartialDepth {
return fmt.Errorf("exceeded maximum partial depth: %d", maxPartialDepth)
}
err := r.walk(tree.Name, tree)
if err != nil {
return err
}
r.depth--
r.indent = origIndent
}
return nil
}
// toString transforms a reflect.Value into a string.
func (r *renderer) toString(v reflect.Value, ldelim, rdelim string) (string, error) {
switch v.Kind() {
case reflect.String:
return v.String(), nil
case reflect.Bool:
return strconv.FormatBool(v.Bool()), nil
case reflect.Complex64, reflect.Complex128:
return fmt.Sprintf("%v", v.Complex()), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(v.Int(), 10), nil
case reflect.Float32, reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(v.Uint(), 10), nil
case reflect.Func:
if v.IsNil() {
return "", nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if !isArity0 {
return "", nil
}
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toString(v, ldelim, rdelim)
}
tree, err := parse.Parse("lambda", v.String(), ldelim, rdelim)
if err != nil {
return "", err
}
s, err := r.renderToString(tree)
if err != nil {
return "", err
}
return s, nil
case reflect.Ptr, reflect.Interface:
return r.toString(indirect(v), ldelim, rdelim)
case reflect.Chan:
return "", nil
case reflect.Invalid:
return "", nil
default:
return fmt.Sprintf("%v", v.Interface()), nil
}
}
// toTruthyValue returns a value when it is "truthy". If the value is
// falsey, the reflect zero value is returned.
func (r *renderer) toTruthyValue(v reflect.Value) (reflect.Value, error) {
switch v.Kind() {
case reflect.Bool:
if !v.Bool() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if v.Int() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if v.Uint() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Float32, reflect.Float64:
if math.Float64bits(v.Float()) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Complex64, reflect.Complex128:
c := v.Complex()
if math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.String:
if v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Array, reflect.Slice:
if v.IsNil() || v.Len() == 0 {
return reflect.Value{}, nil
}
return v, nil
case reflect.Func:
if v.IsNil() {
return reflect.Value{}, nil
}
t := v.Type()
isArity0 := t.NumIn() == 0 && t.NumOut() == 1
if isArity0 {
v = v.Call(nil)[0]
if v.Kind() != reflect.String {
return r.toTruthyValue(v)
}
tree, err := parse.Parse("lambda", v.String(), parse.DefaultLeftDelim, parse.DefaultRightDelim)
if err != nil {
return reflect.Value{}, nil
}
s, err := r.renderToString(tree)
if err != nil {
return reflect.Value{}, nil
}
return r.toTruthyValue(reflect.ValueOf(s))
}
isArity1 := t.NumIn() == 1 && t.In(0).Kind() == reflect.String && t.NumOut() == 1 && t.Out(0).Kind() == reflect.String
if isArity1 {
return v, nil
}
return reflect.Value{}, nil
case reflect.Ptr, reflect.Interface:
return r.toTruthyValue(indirect(v))
case reflect.Map:
if v.IsNil() {
return reflect.Value{}, nil
}
return v, nil
case reflect.Struct:
return v, nil
case reflect.Invalid:
return reflect.Value{}, nil
default:
return reflect.Value{}, nil
}
}
// indirect returns the value that v points to, or concrete
// element underlying an interface.
func indirect(v reflect.Value) reflect.Value {
loop:
for v.IsValid() {
switch av := v; av.Kind() {
case reflect.Ptr:
v = av.Elem()
case reflect.Interface:
v = av.Elem()
default:
break loop
}
}
return v
}
// lookup a key in the context stack. If a value was not found, the reflect.Value zero
// type is returned.
func (r *renderer) lookup(name string, ln, col int, key []string) (reflect.Value, error) {
v := lookupKeysStack(key, r.stack)
if !v.IsValid() && r.template.ContextErrorsEnabled {
return v, fmt.Errorf("%s:%d:%d: cannot find value %s in context", name, ln, col, strings.Join(key, "."))
}
return v, nil
}
// lookupKeysStack obtains a value for a dotted key - eg: a.b.c . If a value
// was not found, the reflect.Value zero type is returned.
func lookupKeysStack(key []string, contexts []reflect.Value) reflect.Value {
var v reflect.Value
if len(key) == 0 {
return v
}
for i := range key {
if i == 0 {
v = lookupKeyStack(key[i], contexts)
continue
}
v = lookupKeyContext(key[i], v)
if !v.IsValid() {
break
}
}
return v
}
// lookupKeyStack returns a value from the first context in the stack that
// contains a value for that key. If a value was not found, the reflect.Value zero
// type is returned.
func lookupKeyStack(key string, contexts []reflect.Value) reflect.Value {
var v reflect.Value
for i := len(contexts) - 1; i >= 0; i-- {
ctx := contexts[i]
v = lookupKeyContext(key, ctx)
if v.IsValid() {
break
}
}
return v
}
// lookup returns a value by key from the context. If a value
// was not found, the reflect.Value zero type is returned.
func lookupKeyContext(key string, ctx reflect.Value) reflect.Value {
if key == "." {
return ctx
}
// check context for method by name
if ctx.IsValid() {
method := ctx.MethodByName(key)
if method.IsValid() {
return method
}
}
// check for fields and keys on concrete types.
switch ctx.Kind() {
case reflect.Ptr, reflect.Interface:
|
return lookupKeyContext(key, indirect(ctx))
|
random_line_split
|
|
utils.go
|
Value returns value of the string
func (s *SyncString) Value() string
|
// Set sets the value of the string
func (s *SyncString) Set(v string) {
s.Lock()
defer s.Unlock()
s.string = v
}
// ClickableURL fixes address in url to make sure
// it's clickable, e.g. it replaces "undefined" address like
// 0.0.0.0 used in network listeners format with loopback 127.0.0.1
func ClickableURL(in string) string {
out, err := url.Parse(in)
if err != nil {
return in
}
host, port, err := net.SplitHostPort(out.Host)
if err != nil {
return in
}
ip := net.ParseIP(host)
// if address is not an IP, unspecified, e.g. all interfaces 0.0.0.0 or multicast,
// replace with localhost that is clickable
if len(ip) == 0 || ip.IsUnspecified() || ip.IsMulticast() {
out.Host = fmt.Sprintf("127.0.0.1:%v", port)
return out.String()
}
return out.String()
}
// AsBool converts string to bool, in case of the value is empty
// or unknown, defaults to false
func AsBool(v string) bool {
if v == "" {
return false
}
out, _ := apiutils.ParseBool(v)
return out
}
// ParseAdvertiseAddr validates advertise address,
// makes sure it's not an unreachable or multicast address
// returns address split into host and port, port could be empty
// if not specified
func ParseAdvertiseAddr(advertiseIP string) (string, string, error) {
advertiseIP = strings.TrimSpace(advertiseIP)
host := advertiseIP
port := ""
if len(net.ParseIP(host)) == 0 && strings.Contains(advertiseIP, ":") {
var err error
host, port, err = net.SplitHostPort(advertiseIP)
if err != nil {
return "", "", trace.BadParameter("failed to parse address %q", advertiseIP)
}
if _, err := strconv.Atoi(port); err != nil {
return "", "", trace.BadParameter("bad port %q, expected integer", port)
}
if host == "" {
return "", "", trace.BadParameter("missing host parameter")
}
}
ip := net.ParseIP(host)
if len(ip) != 0 {
if ip.IsUnspecified() || ip.IsMulticast() {
return "", "", trace.BadParameter("unreachable advertise IP: %v", advertiseIP)
}
}
return host, port, nil
}
// StringsSliceFromSet returns a sorted strings slice from set
func StringsSliceFromSet(in map[string]struct{}) []string {
if in == nil {
return nil
}
out := make([]string, 0, len(in))
for key := range in {
out = append(out, key)
}
sort.Strings(out)
return out
}
// StringsSet creates set of string (map[string]struct{})
// from a list of strings
func StringsSet(in []string) map[string]struct{} {
if in == nil {
return map[string]struct{}{}
}
out := make(map[string]struct{})
for _, v := range in {
out[v] = struct{}{}
}
return out
}
// ParseOnOff parses whether value is "on" or "off", parameterName is passed for error
// reporting purposes, defaultValue is returned when no value is set
func ParseOnOff(parameterName, val string, defaultValue bool) (bool, error) {
switch val {
case teleport.On:
return true, nil
case teleport.Off:
return false, nil
case "":
return defaultValue, nil
default:
return false, trace.BadParameter("bad %q parameter value: %q, supported values are on or off", parameterName, val)
}
}
// IsGroupMember returns whether currently logged user is a member of a group
func IsGroupMember(gid int) (bool, error) {
groups, err := os.Getgroups()
if err != nil {
return false, trace.ConvertSystemError(err)
}
for _, group := range groups {
if group == gid {
return true, nil
}
}
return false, nil
}
// DNSName extracts DNS name from host:port string.
func DNSName(hostport string) (string, error) {
host, err := Host(hostport)
if err != nil {
return "", trace.Wrap(err)
}
if ip := net.ParseIP(host); len(ip) != 0 {
return "", trace.BadParameter("%v is an IP address", host)
}
return host, nil
}
// Host extracts host from host:port string
func Host(hostname string) (string, error) {
if hostname == "" {
return "", trace.BadParameter("missing parameter hostname")
}
// if this is IPv4 or V6, return as is
if ip := net.ParseIP(hostname); len(ip) != 0 {
return hostname, nil
}
// has no indication of port, return, note that
// it will not break ipv6 as it always has at least one colon
if !strings.Contains(hostname, ":") {
return hostname, nil
}
host, _, err := SplitHostPort(hostname)
if err != nil {
return "", trace.Wrap(err)
}
return host, nil
}
// SplitHostPort splits host and port and checks that host is not empty
func SplitHostPort(hostname string) (string, string, error) {
host, port, err := net.SplitHostPort(hostname)
if err != nil {
return "", "", trace.Wrap(err)
}
if host == "" {
return "", "", trace.BadParameter("empty hostname")
}
return host, port, nil
}
// IsValidHostname checks if a string represents a valid hostname.
func IsValidHostname(hostname string) bool {
for _, label := range strings.Split(hostname, ".") {
if len(validation.IsDNS1035Label(label)) > 0 {
return false
}
}
return true
}
// ReadPath reads file contents
func ReadPath(path string) ([]byte, error) {
if path == "" {
return nil, trace.NotFound("empty path")
}
s, err := filepath.Abs(path)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
abs, err := filepath.EvalSymlinks(s)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
bytes, err := os.ReadFile(abs)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
return bytes, nil
}
type multiCloser struct {
closers []io.Closer
}
func (mc *multiCloser) Close() error {
for _, closer := range mc.closers {
if err := closer.Close(); err != nil {
return trace.Wrap(err)
}
}
return nil
}
// MultiCloser implements io.Close, it sequentially calls Close() on each object
func MultiCloser(closers ...io.Closer) io.Closer {
return &multiCloser{
closers: closers,
}
}
// IsHandshakeFailedError specifies whether this error indicates
// failed handshake
func IsHandshakeFailedError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: handshake failed")
}
// IsCertExpiredError specifies whether this error indicates
// expired SSH certificate
func IsCertExpiredError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: cert has expired")
}
// OpaqueAccessDenied returns a generic NotFound instead of AccessDenied
// so as to avoid leaking the existence of secret resources.
func OpaqueAccessDenied(err error) error {
if trace.IsAccessDenied(err) {
return trace.NotFound("not found")
}
return trace.Wrap(err)
}
// PortList is a list of TCP ports.
type PortList struct {
ports []string
sync.Mutex
}
// Pop returns a value from the list, it panics if the value is not there
func (p *PortList) Pop() string {
p.Lock()
defer p.Unlock()
if len(p.ports) == 0 {
panic("list is empty")
}
val := p.ports[len(p.ports)-1]
p.ports = p.ports[:len(p.ports)-1]
return val
}
// PopInt returns a value from the list, it panics if not enough values
// were allocated
func (p *PortList) PopInt() int {
i, err := strconv.Atoi(p.Pop())
if err != nil {
panic(err)
}
return i
}
// PortStartingNumber is a starting port number for tests
const PortStartingNumber = 20000
// GetFreeTCPPorts returns n ports starting from port 20000.
func GetFreeTCPPorts(n int,
|
{
s.Lock()
defer s.Unlock()
return s.string
}
|
identifier_body
|
utils.go
|
Value returns value of the string
func (s *SyncString) Value() string {
s.Lock()
defer s.Unlock()
return s.string
}
// Set sets the value of the string
func (s *SyncString) Set(v string) {
s.Lock()
defer s.Unlock()
s.string = v
}
// ClickableURL fixes address in url to make sure
// it's clickable, e.g. it replaces "undefined" address like
// 0.0.0.0 used in network listeners format with loopback 127.0.0.1
func ClickableURL(in string) string {
out, err := url.Parse(in)
if err != nil {
return in
}
host, port, err := net.SplitHostPort(out.Host)
if err != nil {
return in
}
ip := net.ParseIP(host)
// if address is not an IP, unspecified, e.g. all interfaces 0.0.0.0 or multicast,
// replace with localhost that is clickable
if len(ip) == 0 || ip.IsUnspecified() || ip.IsMulticast() {
out.Host = fmt.Sprintf("127.0.0.1:%v", port)
return out.String()
}
return out.String()
}
// AsBool converts string to bool, in case of the value is empty
// or unknown, defaults to false
func AsBool(v string) bool {
if v == "" {
return false
}
out, _ := apiutils.ParseBool(v)
return out
}
// ParseAdvertiseAddr validates advertise address,
// makes sure it's not an unreachable or multicast address
// returns address split into host and port, port could be empty
// if not specified
func ParseAdvertiseAddr(advertiseIP string) (string, string, error) {
advertiseIP = strings.TrimSpace(advertiseIP)
host := advertiseIP
port := ""
if len(net.ParseIP(host)) == 0 && strings.Contains(advertiseIP, ":") {
var err error
host, port, err = net.SplitHostPort(advertiseIP)
if err != nil {
return "", "", trace.BadParameter("failed to parse address %q", advertiseIP)
}
if _, err := strconv.Atoi(port); err != nil {
return "", "", trace.BadParameter("bad port %q, expected integer", port)
}
if host == "" {
return "", "", trace.BadParameter("missing host parameter")
}
}
ip := net.ParseIP(host)
if len(ip) != 0 {
if ip.IsUnspecified() || ip.IsMulticast() {
return "", "", trace.BadParameter("unreachable advertise IP: %v", advertiseIP)
}
}
return host, port, nil
}
// StringsSliceFromSet returns a sorted strings slice from set
func StringsSliceFromSet(in map[string]struct{}) []string {
if in == nil {
return nil
}
out := make([]string, 0, len(in))
for key := range in {
out = append(out, key)
}
sort.Strings(out)
return out
}
// StringsSet creates set of string (map[string]struct{})
// from a list of strings
func StringsSet(in []string) map[string]struct{} {
if in == nil {
return map[string]struct{}{}
}
out := make(map[string]struct{})
for _, v := range in {
out[v] = struct{}{}
}
return out
}
// ParseOnOff parses whether value is "on" or "off", parameterName is passed for error
// reporting purposes, defaultValue is returned when no value is set
func ParseOnOff(parameterName, val string, defaultValue bool) (bool, error) {
switch val {
case teleport.On:
return true, nil
case teleport.Off:
return false, nil
case "":
return defaultValue, nil
default:
return false, trace.BadParameter("bad %q parameter value: %q, supported values are on or off", parameterName, val)
}
}
// IsGroupMember returns whether currently logged user is a member of a group
func IsGroupMember(gid int) (bool, error) {
groups, err := os.Getgroups()
if err != nil {
return false, trace.ConvertSystemError(err)
}
for _, group := range groups {
if group == gid {
return true, nil
}
}
return false, nil
}
// DNSName extracts DNS name from host:port string.
func DNSName(hostport string) (string, error) {
host, err := Host(hostport)
if err != nil {
return "", trace.Wrap(err)
}
if ip := net.ParseIP(host); len(ip) != 0 {
return "", trace.BadParameter("%v is an IP address", host)
}
return host, nil
}
// Host extracts host from host:port string
func Host(hostname string) (string, error) {
if hostname == "" {
return "", trace.BadParameter("missing parameter hostname")
}
// if this is IPv4 or V6, return as is
if ip := net.ParseIP(hostname); len(ip) != 0 {
return hostname, nil
}
// has no indication of port, return, note that
// it will not break ipv6 as it always has at least one colon
if !strings.Contains(hostname, ":") {
return hostname, nil
}
host, _, err := SplitHostPort(hostname)
if err != nil {
return "", trace.Wrap(err)
}
return host, nil
}
// SplitHostPort splits host and port and checks that host is not empty
func SplitHostPort(hostname string) (string, string, error) {
host, port, err := net.SplitHostPort(hostname)
if err != nil {
return "", "", trace.Wrap(err)
}
if host == "" {
return "", "", trace.BadParameter("empty hostname")
}
return host, port, nil
}
// IsValidHostname checks if a string represents a valid hostname.
func IsValidHostname(hostname string) bool {
for _, label := range strings.Split(hostname, ".") {
if len(validation.IsDNS1035Label(label)) > 0 {
return false
}
}
return true
}
// ReadPath reads file contents
func ReadPath(path string) ([]byte, error) {
if path == "" {
return nil, trace.NotFound("empty path")
}
s, err := filepath.Abs(path)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
abs, err := filepath.EvalSymlinks(s)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
bytes, err := os.ReadFile(abs)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
return bytes, nil
}
type multiCloser struct {
closers []io.Closer
}
func (mc *multiCloser) Close() error {
for _, closer := range mc.closers {
if err := closer.Close(); err != nil {
return trace.Wrap(err)
}
}
return nil
}
// MultiCloser implements io.Close, it sequentially calls Close() on each object
func MultiCloser(closers ...io.Closer) io.Closer {
return &multiCloser{
closers: closers,
}
}
// IsHandshakeFailedError specifies whether this error indicates
// failed handshake
func IsHandshakeFailedError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: handshake failed")
}
// IsCertExpiredError specifies whether this error indicates
// expired SSH certificate
func IsCertExpiredError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: cert has expired")
}
// OpaqueAccessDenied returns a generic NotFound instead of AccessDenied
// so as to avoid leaking the existence of secret resources.
func OpaqueAccessDenied(err error) error {
if trace.IsAccessDenied(err) {
return trace.NotFound("not found")
}
return trace.Wrap(err)
}
// PortList is a list of TCP ports.
type PortList struct {
ports []string
sync.Mutex
}
// Pop returns a value from the list, it panics if the value is not there
func (p *PortList)
|
() string {
p.Lock()
defer p.Unlock()
if len(p.ports) == 0 {
panic("list is empty")
}
val := p.ports[len(p.ports)-1]
p.ports = p.ports[:len(p.ports)-1]
return val
}
// PopInt returns a value from the list, it panics if not enough values
// were allocated
func (p *PortList) PopInt() int {
i, err := strconv.Atoi(p.Pop())
if err != nil {
panic(err)
}
return i
}
// PortStartingNumber is a starting port number for tests
const PortStartingNumber = 20000
// GetFreeTCPPorts returns n ports starting from port 20000.
func GetFreeTCPPorts(n int, offset
|
Pop
|
identifier_name
|
utils.go
|
// Value returns value of the string
func (s *SyncString) Value() string {
|
s.Lock()
defer s.Unlock()
return s.string
}
// Set sets the value of the string
func (s *SyncString) Set(v string) {
s.Lock()
defer s.Unlock()
s.string = v
}
// ClickableURL fixes address in url to make sure
// it's clickable, e.g. it replaces "undefined" address like
// 0.0.0.0 used in network listeners format with loopback 127.0.0.1
func ClickableURL(in string) string {
out, err := url.Parse(in)
if err != nil {
return in
}
host, port, err := net.SplitHostPort(out.Host)
if err != nil {
return in
}
ip := net.ParseIP(host)
// if address is not an IP, unspecified, e.g. all interfaces 0.0.0.0 or multicast,
// replace with localhost that is clickable
if len(ip) == 0 || ip.IsUnspecified() || ip.IsMulticast() {
out.Host = fmt.Sprintf("127.0.0.1:%v", port)
return out.String()
}
return out.String()
}
// AsBool converts string to bool, in case of the value is empty
// or unknown, defaults to false
func AsBool(v string) bool {
if v == "" {
return false
}
out, _ := apiutils.ParseBool(v)
return out
}
// ParseAdvertiseAddr validates advertise address,
// makes sure it's not an unreachable or multicast address
// returns address split into host and port, port could be empty
// if not specified
func ParseAdvertiseAddr(advertiseIP string) (string, string, error) {
advertiseIP = strings.TrimSpace(advertiseIP)
host := advertiseIP
port := ""
if len(net.ParseIP(host)) == 0 && strings.Contains(advertiseIP, ":") {
var err error
host, port, err = net.SplitHostPort(advertiseIP)
if err != nil {
return "", "", trace.BadParameter("failed to parse address %q", advertiseIP)
}
if _, err := strconv.Atoi(port); err != nil {
return "", "", trace.BadParameter("bad port %q, expected integer", port)
}
if host == "" {
return "", "", trace.BadParameter("missing host parameter")
}
}
ip := net.ParseIP(host)
if len(ip) != 0 {
if ip.IsUnspecified() || ip.IsMulticast() {
return "", "", trace.BadParameter("unreachable advertise IP: %v", advertiseIP)
}
}
return host, port, nil
}
// StringsSliceFromSet returns a sorted strings slice from set
func StringsSliceFromSet(in map[string]struct{}) []string {
if in == nil {
return nil
}
out := make([]string, 0, len(in))
for key := range in {
out = append(out, key)
}
sort.Strings(out)
return out
}
// StringsSet creates set of string (map[string]struct{})
// from a list of strings
func StringsSet(in []string) map[string]struct{} {
if in == nil {
return map[string]struct{}{}
}
out := make(map[string]struct{})
for _, v := range in {
out[v] = struct{}{}
}
return out
}
// ParseOnOff parses whether value is "on" or "off", parameterName is passed for error
// reporting purposes, defaultValue is returned when no value is set
func ParseOnOff(parameterName, val string, defaultValue bool) (bool, error) {
switch val {
case teleport.On:
return true, nil
case teleport.Off:
return false, nil
case "":
return defaultValue, nil
default:
return false, trace.BadParameter("bad %q parameter value: %q, supported values are on or off", parameterName, val)
}
}
// IsGroupMember returns whether currently logged user is a member of a group
func IsGroupMember(gid int) (bool, error) {
groups, err := os.Getgroups()
if err != nil {
return false, trace.ConvertSystemError(err)
}
for _, group := range groups {
if group == gid {
return true, nil
}
}
return false, nil
}
// DNSName extracts DNS name from host:port string.
func DNSName(hostport string) (string, error) {
host, err := Host(hostport)
if err != nil {
return "", trace.Wrap(err)
}
if ip := net.ParseIP(host); len(ip) != 0 {
return "", trace.BadParameter("%v is an IP address", host)
}
return host, nil
}
// Host extracts host from host:port string
func Host(hostname string) (string, error) {
if hostname == "" {
return "", trace.BadParameter("missing parameter hostname")
}
// if this is IPv4 or V6, return as is
if ip := net.ParseIP(hostname); len(ip) != 0 {
return hostname, nil
}
// has no indication of port, return, note that
// it will not break ipv6 as it always has at least one colon
if !strings.Contains(hostname, ":") {
return hostname, nil
}
host, _, err := SplitHostPort(hostname)
if err != nil {
return "", trace.Wrap(err)
}
return host, nil
}
// SplitHostPort splits host and port and checks that host is not empty
func SplitHostPort(hostname string) (string, string, error) {
host, port, err := net.SplitHostPort(hostname)
if err != nil {
return "", "", trace.Wrap(err)
}
if host == "" {
return "", "", trace.BadParameter("empty hostname")
}
return host, port, nil
}
// IsValidHostname checks if a string represents a valid hostname.
func IsValidHostname(hostname string) bool {
for _, label := range strings.Split(hostname, ".") {
if len(validation.IsDNS1035Label(label)) > 0 {
return false
}
}
return true
}
// ReadPath reads file contents
func ReadPath(path string) ([]byte, error) {
if path == "" {
return nil, trace.NotFound("empty path")
}
s, err := filepath.Abs(path)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
abs, err := filepath.EvalSymlinks(s)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
bytes, err := os.ReadFile(abs)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
return bytes, nil
}
type multiCloser struct {
closers []io.Closer
}
func (mc *multiCloser) Close() error {
for _, closer := range mc.closers {
if err := closer.Close(); err != nil {
return trace.Wrap(err)
}
}
return nil
}
// MultiCloser implements io.Close, it sequentially calls Close() on each object
func MultiCloser(closers ...io.Closer) io.Closer {
return &multiCloser{
closers: closers,
}
}
// IsHandshakeFailedError specifies whether this error indicates
// failed handshake
func IsHandshakeFailedError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: handshake failed")
}
// IsCertExpiredError specifies whether this error indicates
// expired SSH certificate
func IsCertExpiredError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: cert has expired")
}
// OpaqueAccessDenied returns a generic NotFound instead of AccessDenied
// so as to avoid leaking the existence of secret resources.
func OpaqueAccessDenied(err error) error {
if trace.IsAccessDenied(err) {
return trace.NotFound("not found")
}
return trace.Wrap(err)
}
// PortList is a list of TCP ports.
type PortList struct {
ports []string
sync.Mutex
}
// Pop returns a value from the list, it panics if the value is not there
func (p *PortList) Pop() string {
p.Lock()
defer p.Unlock()
if len(p.ports) == 0 {
panic("list is empty")
}
val := p.ports[len(p.ports)-1]
p.ports = p.ports[:len(p.ports)-1]
return val
}
// PopInt returns a value from the list, it panics if not enough values
// were allocated
func (p *PortList) PopInt() int {
i, err := strconv.Atoi(p.Pop())
if err != nil {
panic(err)
}
return i
}
// PortStartingNumber is a starting port number for tests
const PortStartingNumber = 20000
// GetFreeTCPPorts returns n ports starting from port 20000.
func GetFreeTCPPorts(n int, offset
|
random_line_split
|
|
utils.go
|
could be empty
// if not specified
func ParseAdvertiseAddr(advertiseIP string) (string, string, error) {
advertiseIP = strings.TrimSpace(advertiseIP)
host := advertiseIP
port := ""
if len(net.ParseIP(host)) == 0 && strings.Contains(advertiseIP, ":") {
var err error
host, port, err = net.SplitHostPort(advertiseIP)
if err != nil {
return "", "", trace.BadParameter("failed to parse address %q", advertiseIP)
}
if _, err := strconv.Atoi(port); err != nil {
return "", "", trace.BadParameter("bad port %q, expected integer", port)
}
if host == "" {
return "", "", trace.BadParameter("missing host parameter")
}
}
ip := net.ParseIP(host)
if len(ip) != 0 {
if ip.IsUnspecified() || ip.IsMulticast() {
return "", "", trace.BadParameter("unreachable advertise IP: %v", advertiseIP)
}
}
return host, port, nil
}
// StringsSliceFromSet returns a sorted strings slice from set
func StringsSliceFromSet(in map[string]struct{}) []string {
if in == nil {
return nil
}
out := make([]string, 0, len(in))
for key := range in {
out = append(out, key)
}
sort.Strings(out)
return out
}
// StringsSet creates set of string (map[string]struct{})
// from a list of strings
func StringsSet(in []string) map[string]struct{} {
if in == nil {
return map[string]struct{}{}
}
out := make(map[string]struct{})
for _, v := range in {
out[v] = struct{}{}
}
return out
}
// ParseOnOff parses whether value is "on" or "off", parameterName is passed for error
// reporting purposes, defaultValue is returned when no value is set
func ParseOnOff(parameterName, val string, defaultValue bool) (bool, error) {
switch val {
case teleport.On:
return true, nil
case teleport.Off:
return false, nil
case "":
return defaultValue, nil
default:
return false, trace.BadParameter("bad %q parameter value: %q, supported values are on or off", parameterName, val)
}
}
// IsGroupMember returns whether currently logged user is a member of a group
func IsGroupMember(gid int) (bool, error) {
groups, err := os.Getgroups()
if err != nil {
return false, trace.ConvertSystemError(err)
}
for _, group := range groups {
if group == gid {
return true, nil
}
}
return false, nil
}
// DNSName extracts DNS name from host:port string.
func DNSName(hostport string) (string, error) {
host, err := Host(hostport)
if err != nil {
return "", trace.Wrap(err)
}
if ip := net.ParseIP(host); len(ip) != 0 {
return "", trace.BadParameter("%v is an IP address", host)
}
return host, nil
}
// Host extracts host from host:port string
func Host(hostname string) (string, error) {
if hostname == "" {
return "", trace.BadParameter("missing parameter hostname")
}
// if this is IPv4 or V6, return as is
if ip := net.ParseIP(hostname); len(ip) != 0 {
return hostname, nil
}
// has no indication of port, return, note that
// it will not break ipv6 as it always has at least one colon
if !strings.Contains(hostname, ":") {
return hostname, nil
}
host, _, err := SplitHostPort(hostname)
if err != nil {
return "", trace.Wrap(err)
}
return host, nil
}
// SplitHostPort splits host and port and checks that host is not empty
func SplitHostPort(hostname string) (string, string, error) {
host, port, err := net.SplitHostPort(hostname)
if err != nil {
return "", "", trace.Wrap(err)
}
if host == "" {
return "", "", trace.BadParameter("empty hostname")
}
return host, port, nil
}
// IsValidHostname checks if a string represents a valid hostname.
func IsValidHostname(hostname string) bool {
for _, label := range strings.Split(hostname, ".") {
if len(validation.IsDNS1035Label(label)) > 0 {
return false
}
}
return true
}
// ReadPath reads file contents
func ReadPath(path string) ([]byte, error) {
if path == "" {
return nil, trace.NotFound("empty path")
}
s, err := filepath.Abs(path)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
abs, err := filepath.EvalSymlinks(s)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
bytes, err := os.ReadFile(abs)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return nil, err
}
return nil, trace.ConvertSystemError(err)
}
return bytes, nil
}
type multiCloser struct {
closers []io.Closer
}
func (mc *multiCloser) Close() error {
for _, closer := range mc.closers {
if err := closer.Close(); err != nil {
return trace.Wrap(err)
}
}
return nil
}
// MultiCloser implements io.Close, it sequentially calls Close() on each object
func MultiCloser(closers ...io.Closer) io.Closer {
return &multiCloser{
closers: closers,
}
}
// IsHandshakeFailedError specifies whether this error indicates
// failed handshake
func IsHandshakeFailedError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: handshake failed")
}
// IsCertExpiredError specifies whether this error indicates
// expired SSH certificate
func IsCertExpiredError(err error) bool {
if err == nil {
return false
}
return strings.Contains(trace.Unwrap(err).Error(), "ssh: cert has expired")
}
// OpaqueAccessDenied returns a generic NotFound instead of AccessDenied
// so as to avoid leaking the existence of secret resources.
func OpaqueAccessDenied(err error) error {
if trace.IsAccessDenied(err) {
return trace.NotFound("not found")
}
return trace.Wrap(err)
}
// PortList is a list of TCP ports.
type PortList struct {
ports []string
sync.Mutex
}
// Pop returns a value from the list, it panics if the value is not there
func (p *PortList) Pop() string {
p.Lock()
defer p.Unlock()
if len(p.ports) == 0 {
panic("list is empty")
}
val := p.ports[len(p.ports)-1]
p.ports = p.ports[:len(p.ports)-1]
return val
}
// PopInt returns a value from the list, it panics if not enough values
// were allocated
func (p *PortList) PopInt() int {
i, err := strconv.Atoi(p.Pop())
if err != nil {
panic(err)
}
return i
}
// PortStartingNumber is a starting port number for tests
const PortStartingNumber = 20000
// GetFreeTCPPorts returns n ports starting from port 20000.
func GetFreeTCPPorts(n int, offset ...int) (PortList, error) {
list := make([]string, 0, n)
start := PortStartingNumber
if len(offset) != 0 {
start = offset[0]
}
for i := start; i < start+n; i++ {
list = append(list, strconv.Itoa(i))
}
return PortList{ports: list}, nil
}
// HostUUIDExistsLocally checks if dataDir/host_uuid file exists in local storage.
func HostUUIDExistsLocally(dataDir string) bool {
_, err := ReadHostUUID(dataDir)
return err == nil
}
// ReadHostUUID reads host UUID from the file in the data dir
func ReadHostUUID(dataDir string) (string, error) {
out, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return "", err
}
return "", trace.ConvertSystemError(err)
}
id := strings.TrimSpace(string(out))
if id == "" {
return "", trace.NotFound("host uuid is empty")
}
return id, nil
}
// WriteHostUUID writes host UUID into a file
func WriteHostUUID(dataDir string, id string) error {
err := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400)
if err != nil {
if errors.Is(err, fs.ErrPermission)
|
{
//do not convert to system error as this loses the ability to compare that it is a permission error
return err
}
|
conditional_block
|
|
graphics.rs
|
{
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 6]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x2,
}
]
}
}
}
pub struct Mesh {
pub vertices: Vec<Vertex>,
pub indices: Vec<u16>,
pub vertex_buffer: Option<wgpu::Buffer>,
pub index_buffer: Option<wgpu::Buffer>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ModelProperties {
pub model_matrix: [[f32; 4]; 4],
}
fn create_quad() -> Mesh {
let mut vertices = Vec::new();
let vertexA = Vertex {
position: [-0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 0.0],
};
let vertexB = Vertex {
position: [0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 0.0],
};
let vertexC = Vertex {
position: [-0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 1.0],
};
let vertexD = Vertex {
position: [0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 1.0],
};
vertices.push(vertexA);
vertices.push(vertexB);
vertices.push(vertexC);
vertices.push(vertexD);
let indices = vec!(2, 1, 0, 1, 2, 3);
Mesh {
vertices,
indices,
vertex_buffer: None,
index_buffer: None,
}
}
impl Mesh {
fn upload_to_gpu(&mut self, device: &wgpu::Device) {
self.vertex_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.vertices),
usage: wgpu::BufferUsage::VERTEX,
}));
self.index_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.indices),
usage: wgpu::BufferUsage::INDEX,
}));
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Uniforms {
view_proj: [[f32; 4]; 4],
}
impl Uniforms {
pub fn new() -> Self {
use cgmath::SquareMatrix;
Self {
view_proj: cgmath::Matrix4::identity().into(),
}
}
pub fn update_view_proj(&mut self, matrix4: cgmath::Matrix4<f32>) {
self.view_proj = matrix4.into();
}
}
pub fn upload_texture_to_gpu(texture_name: &str, device: &wgpu::Device, queue: &wgpu::Queue, texture_bind_group_layout: &wgpu::BindGroupLayout) -> wgpu::BindGroup
|
pub fn load_shader(shader_name: &str) -> Vec<u8> {
let mut shader_dir = std::env::current_dir().unwrap();
shader_dir.push("src\\resources\\shaders");
shader_dir.push(shader_name);
match fs::read(&shader_dir) {
Ok(v) => v,
Err(error) => panic!("Failed to read the file: {:?}. Error: {}", shader_dir.as_path(), error)
}
}
pub fn new_pipeline(device: &wgpu::Device, texture_format: wgpu::TextureFormat, vert_shader_name: &str, frag_shader_name: &str, texture_bind_group_layout: &wgpu::BindGroupLayout, uniform_bind_group_layout: &wgpu::BindGroupLayout, topology: wgpu::PrimitiveTopology, polygon_mode: wgpu::PolygonMode) -> wgpu::RenderPipeline {
let vert_shader_contents = load_shader(vert_shader_name);
let frag_shader_contents = load_shader(frag_shader_name);
let vertex_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(vert_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&vert_shader_contents),
});
let frag_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(frag_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&frag_shader_contents),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
push_constant_ranges: &[wgpu::PushConstantRange {
stages: wgpu_types::ShaderStage::VERTEX,
range: 0..128,
}],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &vertex_shader,
entry_point: "main",
buffers: &[Vertex::Desc()],
},
fragment: Some(wgpu::FragmentState {
module: &frag_shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: texture_format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING), // To select alpha
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: topology,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: polygon_mode,
clamp_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
});
pipeline
}
impl Graphics {
pub async fn new(window: &sdl2::video::Window) -> Self {
let size = window.size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// Surface is used to create the swap chain and adapter
let surface = unsafe { instance.create_surface(window) };
// Adapter is used to create the device and queue
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
// Specify any extra gpu feature. You can get a list of features supported by your device using adapter.features(), or device.features().
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::PUSH_CONSTANTS,
// The limits field describes the limit of certain types of resource we can create.
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits {
max_push_constant_size: 128,
..wgpu::Limits::default()
},
label: None,
},
None,
).await.unwrap();
// Define and creating the swap_chain.
let swap_chain_descriptor = wgpu::SwapChainDescriptor {
// The usage field describes how the swap_chain's underlying textures will be used.
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
// Defines how the swap_chains textures will be stored on the gpu
format: adapter.get_swap_chain_preferred_format(&surface).unwrap(),
width: size.0,
height: size.1,
// The
|
{
let texture = Texture::load_texture(texture_name, &device, &queue).unwrap();
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
}
],
label: Some(texture_name),
})
}
|
identifier_body
|
graphics.rs
|
gpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 6]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x2,
}
]
}
}
}
pub struct Mesh {
pub vertices: Vec<Vertex>,
pub indices: Vec<u16>,
pub vertex_buffer: Option<wgpu::Buffer>,
pub index_buffer: Option<wgpu::Buffer>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ModelProperties {
pub model_matrix: [[f32; 4]; 4],
}
fn create_quad() -> Mesh {
let mut vertices = Vec::new();
let vertexA = Vertex {
position: [-0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 0.0],
};
let vertexB = Vertex {
position: [0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 0.0],
};
let vertexC = Vertex {
position: [-0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 1.0],
};
let vertexD = Vertex {
position: [0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 1.0],
};
vertices.push(vertexA);
vertices.push(vertexB);
vertices.push(vertexC);
vertices.push(vertexD);
let indices = vec!(2, 1, 0, 1, 2, 3);
Mesh {
vertices,
indices,
vertex_buffer: None,
index_buffer: None,
}
}
impl Mesh {
fn upload_to_gpu(&mut self, device: &wgpu::Device) {
self.vertex_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.vertices),
usage: wgpu::BufferUsage::VERTEX,
}));
self.index_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.indices),
usage: wgpu::BufferUsage::INDEX,
}));
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Uniforms {
view_proj: [[f32; 4]; 4],
}
impl Uniforms {
pub fn new() -> Self {
use cgmath::SquareMatrix;
Self {
view_proj: cgmath::Matrix4::identity().into(),
}
}
pub fn update_view_proj(&mut self, matrix4: cgmath::Matrix4<f32>) {
self.view_proj = matrix4.into();
}
}
pub fn upload_texture_to_gpu(texture_name: &str, device: &wgpu::Device, queue: &wgpu::Queue, texture_bind_group_layout: &wgpu::BindGroupLayout) -> wgpu::BindGroup {
let texture = Texture::load_texture(texture_name, &device, &queue).unwrap();
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
}
],
label: Some(texture_name),
})
}
pub fn load_shader(shader_name: &str) -> Vec<u8> {
let mut shader_dir = std::env::current_dir().unwrap();
shader_dir.push("src\\resources\\shaders");
shader_dir.push(shader_name);
match fs::read(&shader_dir) {
Ok(v) => v,
Err(error) => panic!("Failed to read the file: {:?}. Error: {}", shader_dir.as_path(), error)
}
}
pub fn new_pipeline(device: &wgpu::Device, texture_format: wgpu::TextureFormat, vert_shader_name: &str, frag_shader_name: &str, texture_bind_group_layout: &wgpu::BindGroupLayout, uniform_bind_group_layout: &wgpu::BindGroupLayout, topology: wgpu::PrimitiveTopology, polygon_mode: wgpu::PolygonMode) -> wgpu::RenderPipeline {
let vert_shader_contents = load_shader(vert_shader_name);
let frag_shader_contents = load_shader(frag_shader_name);
let vertex_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(vert_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&vert_shader_contents),
});
let frag_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(frag_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&frag_shader_contents),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
push_constant_ranges: &[wgpu::PushConstantRange {
stages: wgpu_types::ShaderStage::VERTEX,
range: 0..128,
}],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &vertex_shader,
entry_point: "main",
buffers: &[Vertex::Desc()],
},
fragment: Some(wgpu::FragmentState {
module: &frag_shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: texture_format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING), // To select alpha
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: topology,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: polygon_mode,
clamp_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
});
pipeline
}
impl Graphics {
pub async fn new(window: &sdl2::video::Window) -> Self {
let size = window.size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// Surface is used to create the swap chain and adapter
let surface = unsafe { instance.create_surface(window) };
// Adapter is used to create the device and queue
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
// Specify any extra gpu feature. You can get a list of features supported by your device using adapter.features(), or device.features().
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::PUSH_CONSTANTS,
// The limits field describes the limit of certain types of resource we can create.
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits {
max_push_constant_size: 128,
..wgpu::Limits::default()
},
label: None,
},
None,
).await.unwrap();
// Define and creating the swap_chain.
let swap_chain_descriptor = wgpu::SwapChainDescriptor {
// The usage field describes how the swap_chain's underlying textures will be used.
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
// Defines how the swap_chains textures will be stored on the gpu
format: adapter.get_swap_chain_preferred_format(&surface).unwrap(),
width: size.0,
height: size.1,
// The present_mode uses the wgpu::PresentMode enum which determines how to sync the swap chain with the display.
present_mode: wgpu::PresentMode::Fifo,
|
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_descriptor);
|
random_line_split
|
|
graphics.rs
|
Attribute {
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: std::mem::size_of::<[f32; 6]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x2,
}
]
}
}
}
pub struct Mesh {
pub vertices: Vec<Vertex>,
pub indices: Vec<u16>,
pub vertex_buffer: Option<wgpu::Buffer>,
pub index_buffer: Option<wgpu::Buffer>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct
|
{
pub model_matrix: [[f32; 4]; 4],
}
fn create_quad() -> Mesh {
let mut vertices = Vec::new();
let vertexA = Vertex {
position: [-0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 0.0],
};
let vertexB = Vertex {
position: [0.5, 0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 0.0],
};
let vertexC = Vertex {
position: [-0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [0.0, 1.0],
};
let vertexD = Vertex {
position: [0.5, -0.5, 0.0],
normal: [0.0, 0.0, 1.0],
tex_coords: [1.0, 1.0],
};
vertices.push(vertexA);
vertices.push(vertexB);
vertices.push(vertexC);
vertices.push(vertexD);
let indices = vec!(2, 1, 0, 1, 2, 3);
Mesh {
vertices,
indices,
vertex_buffer: None,
index_buffer: None,
}
}
impl Mesh {
fn upload_to_gpu(&mut self, device: &wgpu::Device) {
self.vertex_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.vertices),
usage: wgpu::BufferUsage::VERTEX,
}));
self.index_buffer = Some(device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&self.indices),
usage: wgpu::BufferUsage::INDEX,
}));
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct Uniforms {
view_proj: [[f32; 4]; 4],
}
impl Uniforms {
pub fn new() -> Self {
use cgmath::SquareMatrix;
Self {
view_proj: cgmath::Matrix4::identity().into(),
}
}
pub fn update_view_proj(&mut self, matrix4: cgmath::Matrix4<f32>) {
self.view_proj = matrix4.into();
}
}
pub fn upload_texture_to_gpu(texture_name: &str, device: &wgpu::Device, queue: &wgpu::Queue, texture_bind_group_layout: &wgpu::BindGroupLayout) -> wgpu::BindGroup {
let texture = Texture::load_texture(texture_name, &device, &queue).unwrap();
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
}
],
label: Some(texture_name),
})
}
pub fn load_shader(shader_name: &str) -> Vec<u8> {
let mut shader_dir = std::env::current_dir().unwrap();
shader_dir.push("src\\resources\\shaders");
shader_dir.push(shader_name);
match fs::read(&shader_dir) {
Ok(v) => v,
Err(error) => panic!("Failed to read the file: {:?}. Error: {}", shader_dir.as_path(), error)
}
}
pub fn new_pipeline(device: &wgpu::Device, texture_format: wgpu::TextureFormat, vert_shader_name: &str, frag_shader_name: &str, texture_bind_group_layout: &wgpu::BindGroupLayout, uniform_bind_group_layout: &wgpu::BindGroupLayout, topology: wgpu::PrimitiveTopology, polygon_mode: wgpu::PolygonMode) -> wgpu::RenderPipeline {
let vert_shader_contents = load_shader(vert_shader_name);
let frag_shader_contents = load_shader(frag_shader_name);
let vertex_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(vert_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&vert_shader_contents),
});
let frag_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some(frag_shader_name),
flags: wgpu::ShaderFlags::all(),
source: wgpu::util::make_spirv(&frag_shader_contents),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
],
push_constant_ranges: &[wgpu::PushConstantRange {
stages: wgpu_types::ShaderStage::VERTEX,
range: 0..128,
}],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &vertex_shader,
entry_point: "main",
buffers: &[Vertex::Desc()],
},
fragment: Some(wgpu::FragmentState {
module: &frag_shader,
entry_point: "main",
targets: &[wgpu::ColorTargetState {
format: texture_format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING), // To select alpha
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: topology,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE
polygon_mode: polygon_mode,
clamp_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
});
pipeline
}
impl Graphics {
pub async fn new(window: &sdl2::video::Window) -> Self {
let size = window.size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// Surface is used to create the swap chain and adapter
let surface = unsafe { instance.create_surface(window) };
// Adapter is used to create the device and queue
let adapter = instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
}).await.unwrap();
let (device, queue) = adapter.request_device(&wgpu::DeviceDescriptor {
// Specify any extra gpu feature. You can get a list of features supported by your device using adapter.features(), or device.features().
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Features.html
features: wgpu::Features::PUSH_CONSTANTS,
// The limits field describes the limit of certain types of resource we can create.
// https://docs.rs/wgpu/0.7.0/wgpu/struct.Limits.html
limits: wgpu::Limits {
max_push_constant_size: 128,
..wgpu::Limits::default()
},
label: None,
},
None,
).await.unwrap();
// Define and creating the swap_chain.
let swap_chain_descriptor = wgpu::SwapChainDescriptor {
// The usage field describes how the swap_chain's underlying textures will be used.
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
// Defines how the swap_chains textures will be stored on the gpu
format: adapter.get_swap_chain_preferred_format(&surface).unwrap(),
width: size.0,
height: size.1,
// The
|
ModelProperties
|
identifier_name
|
MMM-Ilevia-Lille.js
|
rgb(0, 118,125)",
Yellow: "rgb(253,197,16)",
Purple: "rgb(153,51,255)",
White: "rgb(255,255,255)",
Orange: "rgb(236,114,0)"
},
size: "medium", // Text size, for example small, medium or large
stacked: true, // Show multiple buses on same row, if same route and destination
showTimeLimit: 45, // If not stacked, show time of departure instead of minutes, if more than this limit until departure.
debug: false, //console.log more things to help debugging
ileviaAPIURL: 'https://opendata.lillemetropole.fr/api/records/1.0/search/?dataset=ilevia-prochainspassages',
ileviaAPIURLColor: 'https://opendata.lillemetropole.fr/api/records/1.0/search/?dataset=ilevia-couleurslignes'
},
// Define start sequence.
start: function() {
Log.info("Starting module: " + this.name);
//Get Timezone
this.config.timezone = Intl.DateTimeFormat().resolvedOptions().timeZone
//Send data to Node JS
this.sendSocketNotification('SET_CONFIG', this.config);
//Get color for buses lines from Ilevia
this.IleviaColor = []
this.sendSocketNotification('GET_COLOR', this.config.busStations);
this.busRecords = {};
this.loaded = false;
this.updateTimer = null;
var self = this;
setInterval(function () {
self.caller = 'updateInterval';
self.updateDom();
}, 1000);
},
getTranslations: function () {
return {
en: "translations/en.json",
fr: "translations/fr.json"
};
},
getHeader: function () {
var header = this.data.header;
if (this.config.showSecondsToNextUpdate && typeof(this.config.lastUpdate) !== 'undefined') {
var timeDifference = Math.round((this.config.updateInterval - new Date() + Date.parse(this.config.lastUpdate)) / 1000);
if (timeDifference > 0) {
header += ', ' + this.translate("NEXT_UPDATE_IN") + ' ' + timeDifference + ' s';
} else {
header += ', ' + this.translate("UPDATE_REQUESTED") + ' ' + Math.abs(timeDifference) + 's ago';
}
}
if (this.config.showLastUpdateTime && typeof(this.config.lastUpdate) !== 'undefined') {
var now = new Date(this.config.lastUpdate);
header += (now ? (' @ ' + now.getHours() + ':' + (now.getMinutes() > 9 ? '' : '0') + now.getMinutes() + ':' + (now.getSeconds() > 9 ? '' : '0') + now.getSeconds()) : '');
}
return header;
},
setColor: function(element, codeColor) {
if (this.config.useColor && codeColor != null) {
var color = null;
switch(codeColor) {
case 'blue':
color = this.config.colorCode.Blue;
break;
case 'green':
color = this.config.colorCode.Green;
break;
case 'yellow':
color = this.config.colorCode.Yellow;
break;
case 'purple':
color = this.config.colorCode.Purple;
break;
case 'white':
color = this.config.colorCode.White;
break;
case 'orange':
color = this.config.colorCode.Orange;
break;
default :
}
if (color != null) {
element.style="color:"+color+";";
}
}
},
setIleviaColor: function(element, codeligne) {
if (this.config.useColor) {
var colorHEX = null;
for (var index in this.IleviaColor) {
if(this.IleviaColor[index].codeligne === codeligne){
colorHEX = '#' + this.IleviaColor[index].colorHEX
break;
}
}
if (colorHEX != null) {
element.style="color:"+colorHEX+";";
}
}
},
stackBuses: function (buses) {
stackedBuses = [];
var len = buses.length;
var previousStackvalue = '';
var stackedTimes = [];
if (len > 0) {
previousStackvalue = '' + buses[0].fields.nomstation + buses[0].fields.codeligne + buses[0].fields.sensligne;
stackedTimes.push(buses[0].fields.heureestimeedepart);
for (var i = 1; i < len; i++) {
|
stackedBuses.push({
from: buses[len - 1].fields.nomstation,
number: buses[len - 1].fields.codeligne,
to: buses[len - 1].fields.sensligne,
times: stackedTimes
});
}
return stackedBuses;
},
formatBuses: function (buses) {
formatedBuses = [];
var len = buses.length;
if (len > 0) {
for (var i = 0; i < len; i++) {
formatedBuses.push({
from: buses[i].fields.nomstation,
number: buses[i].fields.codeligne,
to: buses[i].fields.sensligne,
time: buses[i].fields.heureestimeedepart
});
}
}
return formatedBuses;
},
// Override dom generator.
getDom: function() {
self = this;
var wrapper = document.createElement("table");
wrapper.className = "small";
var first = true;
if (!this.loaded) {
wrapper.innerHTML = self.translate("LOADING");
wrapper.className = "medium dimmed";
return wrapper;
}
for (var busIndex = 0; busIndex < this.config.busStations.length; busIndex++) {
var stop = this.config.busStations[busIndex];
//#region Get stop index
var stopIndex = ''
if(typeof(stop.nomstation) !== 'undefined'){
stopIndex += stop.nomstation + '_'
}
if(typeof(stop.codeligne) !== 'undefined'){
stopIndex += stop.codeligne + '_'
}
if(typeof(stop.sensligne) !== 'undefined'){
stopIndex += stop.sensligne
}
//#endregion
var comingBuses = this.busRecords[stopIndex];
if(self.config.debug){
Log.info('MMM-Ilevia-Lille Debug : comingBuses')
Log.info(comingBuses)
Log.info(self.config.debug)
}
if(typeof(comingBuses) !== 'undefined'){
comingBuses.forEach(function (bus) {
//#region Get the next passage time
var now = new Date();
var minutes = '';
if(self.config.stacked) {
if(bus.times.length > 0) {
var busTime = new Date(bus.times[0]);
minutes = Math.round((busTime - now) / 60000);
if(minutes <= 1 && minutes > 0){
minutes = self.translate("CLOSE");
}
else if (minutes <= 0){
minutes = ''
}
}
for(var i=1; i < bus.times.length; i++){
var busTime = new Date(bus.times[i]);
if(minutes == ''){
minutes += Math.round((busTime - now) / 60000);
}else{
minutes += '/ ' + Math.round((busTime - now) / 60000);
}
}
minutes += " min";
} else {
var busTime = new Date(bus.time);
minutes = Math.round((busTime - now) / 60000);
if(minutes > self.config.showTimeLimit){
minutes = busTime.getHours() + ':' + (busTime.getMinutes() < 10 ? '0' : '') + busTime.getMinutes();
}else{
minutes += " min";
}
}
//#endregion
var busWrapper = document.createElement("tr");
busWrapper.className = first ? ' border_top' : '';
first = false; // Top border
|
stackvalue = '' + buses[i].fields.nomstation + buses[i].fields.codeligne + buses[i].fields.sensligne;
if (stackvalue == previousStackvalue) {
stackedTimes.push(buses[i].fields.heureestimeedepart);
} else {
stackedBuses.push({
from: buses[i - 1].fields.nomstation,
number: buses[i - 1].fields.codeligne,
to: buses[i - 1].fields.sensligne,
times: stackedTimes
});
previousStackvalue = stackvalue;
stackedTimes = [];
stackedTimes.push(buses[i].fields.heureestimeedepart)
}
}
|
conditional_block
|
MMM-Ilevia-Lille.js
|
rgb(0, 118,125)",
Yellow: "rgb(253,197,16)",
Purple: "rgb(153,51,255)",
White: "rgb(255,255,255)",
Orange: "rgb(236,114,0)"
},
size: "medium", // Text size, for example small, medium or large
stacked: true, // Show multiple buses on same row, if same route and destination
showTimeLimit: 45, // If not stacked, show time of departure instead of minutes, if more than this limit until departure.
debug: false, //console.log more things to help debugging
ileviaAPIURL: 'https://opendata.lillemetropole.fr/api/records/1.0/search/?dataset=ilevia-prochainspassages',
ileviaAPIURLColor: 'https://opendata.lillemetropole.fr/api/records/1.0/search/?dataset=ilevia-couleurslignes'
},
// Define start sequence.
start: function() {
Log.info("Starting module: " + this.name);
//Get Timezone
this.config.timezone = Intl.DateTimeFormat().resolvedOptions().timeZone
//Send data to Node JS
this.sendSocketNotification('SET_CONFIG', this.config);
//Get color for buses lines from Ilevia
this.IleviaColor = []
this.sendSocketNotification('GET_COLOR', this.config.busStations);
this.busRecords = {};
this.loaded = false;
this.updateTimer = null;
var self = this;
setInterval(function () {
self.caller = 'updateInterval';
self.updateDom();
}, 1000);
},
getTranslations: function () {
return {
en: "translations/en.json",
fr: "translations/fr.json"
};
},
getHeader: function () {
var header = this.data.header;
if (this.config.showSecondsToNextUpdate && typeof(this.config.lastUpdate) !== 'undefined') {
var timeDifference = Math.round((this.config.updateInterval - new Date() + Date.parse(this.config.lastUpdate)) / 1000);
if (timeDifference > 0) {
header += ', ' + this.translate("NEXT_UPDATE_IN") + ' ' + timeDifference + ' s';
} else {
header += ', ' + this.translate("UPDATE_REQUESTED") + ' ' + Math.abs(timeDifference) + 's ago';
}
}
if (this.config.showLastUpdateTime && typeof(this.config.lastUpdate) !== 'undefined') {
var now = new Date(this.config.lastUpdate);
header += (now ? (' @ ' + now.getHours() + ':' + (now.getMinutes() > 9 ? '' : '0') + now.getMinutes() + ':' + (now.getSeconds() > 9 ? '' : '0') + now.getSeconds()) : '');
}
return header;
},
setColor: function(element, codeColor) {
if (this.config.useColor && codeColor != null) {
var color = null;
switch(codeColor) {
case 'blue':
color = this.config.colorCode.Blue;
break;
case 'green':
color = this.config.colorCode.Green;
break;
case 'yellow':
color = this.config.colorCode.Yellow;
break;
case 'purple':
color = this.config.colorCode.Purple;
break;
case 'white':
color = this.config.colorCode.White;
break;
case 'orange':
color = this.config.colorCode.Orange;
break;
default :
}
if (color != null) {
element.style="color:"+color+";";
}
}
},
setIleviaColor: function(element, codeligne) {
if (this.config.useColor) {
var colorHEX = null;
for (var index in this.IleviaColor) {
if(this.IleviaColor[index].codeligne === codeligne){
colorHEX = '#' + this.IleviaColor[index].colorHEX
break;
}
}
|
if (colorHEX != null) {
element.style="color:"+colorHEX+";";
}
}
},
stackBuses: function (buses) {
stackedBuses = [];
var len = buses.length;
var previousStackvalue = '';
var stackedTimes = [];
if (len > 0) {
previousStackvalue = '' + buses[0].fields.nomstation + buses[0].fields.codeligne + buses[0].fields.sensligne;
stackedTimes.push(buses[0].fields.heureestimeedepart);
for (var i = 1; i < len; i++) {
stackvalue = '' + buses[i].fields.nomstation + buses[i].fields.codeligne + buses[i].fields.sensligne;
if (stackvalue == previousStackvalue) {
stackedTimes.push(buses[i].fields.heureestimeedepart);
} else {
stackedBuses.push({
from: buses[i - 1].fields.nomstation,
number: buses[i - 1].fields.codeligne,
to: buses[i - 1].fields.sensligne,
times: stackedTimes
});
previousStackvalue = stackvalue;
stackedTimes = [];
stackedTimes.push(buses[i].fields.heureestimeedepart)
}
}
stackedBuses.push({
from: buses[len - 1].fields.nomstation,
number: buses[len - 1].fields.codeligne,
to: buses[len - 1].fields.sensligne,
times: stackedTimes
});
}
return stackedBuses;
},
formatBuses: function (buses) {
formatedBuses = [];
var len = buses.length;
if (len > 0) {
for (var i = 0; i < len; i++) {
formatedBuses.push({
from: buses[i].fields.nomstation,
number: buses[i].fields.codeligne,
to: buses[i].fields.sensligne,
time: buses[i].fields.heureestimeedepart
});
}
}
return formatedBuses;
},
// Override dom generator.
getDom: function() {
self = this;
var wrapper = document.createElement("table");
wrapper.className = "small";
var first = true;
if (!this.loaded) {
wrapper.innerHTML = self.translate("LOADING");
wrapper.className = "medium dimmed";
return wrapper;
}
for (var busIndex = 0; busIndex < this.config.busStations.length; busIndex++) {
var stop = this.config.busStations[busIndex];
//#region Get stop index
var stopIndex = ''
if(typeof(stop.nomstation) !== 'undefined'){
stopIndex += stop.nomstation + '_'
}
if(typeof(stop.codeligne) !== 'undefined'){
stopIndex += stop.codeligne + '_'
}
if(typeof(stop.sensligne) !== 'undefined'){
stopIndex += stop.sensligne
}
//#endregion
var comingBuses = this.busRecords[stopIndex];
if(self.config.debug){
Log.info('MMM-Ilevia-Lille Debug : comingBuses')
Log.info(comingBuses)
Log.info(self.config.debug)
}
if(typeof(comingBuses) !== 'undefined'){
comingBuses.forEach(function (bus) {
//#region Get the next passage time
var now = new Date();
var minutes = '';
if(self.config.stacked) {
if(bus.times.length > 0) {
var busTime = new Date(bus.times[0]);
minutes = Math.round((busTime - now) / 60000);
if(minutes <= 1 && minutes > 0){
minutes = self.translate("CLOSE");
}
else if (minutes <= 0){
minutes = ''
}
}
for(var i=1; i < bus.times.length; i++){
var busTime = new Date(bus.times[i]);
if(minutes == ''){
minutes += Math.round((busTime - now) / 60000);
}else{
minutes += '/ ' + Math.round((busTime - now) / 60000);
}
}
minutes += " min";
} else {
var busTime = new Date(bus.time);
minutes = Math.round((busTime - now) / 60000);
if(minutes > self.config.showTimeLimit){
minutes = busTime.getHours() + ':' + (busTime.getMinutes() < 10 ? '0' : '') + busTime.getMinutes();
}else{
minutes += " min";
}
}
//#endregion
var busWrapper = document.createElement("tr");
busWrapper.className = first ? ' border_top' : '';
first = false; // Top border only
|
random_line_split
|
|
test_supernet.py
|
=str)
parser.add_argument('--arch_start', default=1, type=int,
metavar='N', help='the start index of eval archs')
parser.add_argument('--arch_num', default=101, type=int,
metavar='N', help='the num of eval archs')
parser.add_argument('--workers', default=1, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--batch_size', default=512, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--affine', action='store_true', help='BN affine')
parser.add_argument('--save_dir', help='The directory used to save the trained models',
default='./checkpoints', type=str)
parser.add_argument('--save_file', help='The file used to save the result',
default='eval-1_50000', type=str)
parser.add_argument(
'--save_every', help='Saves checkpoints at every specified number of epochs', type=int, default=1)
parser.add_argument('--convbn_type',
default='sample_channel',
type=str,
help='convbn forward with different mask: mix_channel or random_mix_channel or sample_channel or sample_random_channel or sample_sepmask_channel or sample_sepproject_channel or sample_localfree_channel')
parser.add_argument('--alpha_type', default='sample_uniform', type=str,
help='how to cal alpha in forward process: mix, sample_uniform, sample_fair, sample_flops_uniform, sample_flops_fair, sample_sandwich')
parser.add_argument('--mask_repeat', type=int, default=1,
help='used in random_mix_channel')
parser.add_argument('--prob_ratio', type=float, default=1.,
help='used in sample_flops_uniform or sample_flops_fair')
parser.add_argument('--r', type=int, default=1.,
help='used in local sample_localfree_channel')
parser.add_argument('--localsep_layers', default=None,
type=str, help='used in sample_localsepmask_channel')
parser.add_argument('--localsep_portion', type=float,
default=1., help='used in sample_localsepmask_channel')
parser.add_argument('--sameshortcut', action='store_true',
help='same shortcut')
parser.add_argument('--track_running_stats',
action='store_true', help='bn track_running_stats')
parser.add_argument('--bn_calibrate', action='store_true', help='bn calibrate')
parser.add_argument('--bn_calibrate_batch', type=int,
default=10000, help='bn calibrate batch')
parser.add_argument('--bn_calibrate_batch_num', type=int,
default=1, help='bn calibrate batch num')
parser.add_argument('--train', action='store_true', help='train on supernet')
parser.add_argument('--train_batch_size', type=int,
default=128, help='train epoch on supernet')
parser.add_argument('--train_epochs', type=int, default=1,
help='train epoch on supernet')
parser.add_argument('--train_lr', type=float, default=1e-3,
help='train lr on supernet')
parser.add_argument('--train_momentum', type=float,
default=0.9, help='train momentum on supernet')
parser.add_argument('--train_min_lr', type=float, default=0,
help='train min_lr on supernet')
parser.add_argument('--train_weight_decay', type=float,
default=5e-4, help='train wd on supernet')
parser.add_argument('--train_print_freq', type=int, default=100,
help='train print freq epoch on supernet')
parser.add_argument('--seed', type=int, default=2, help='random seed')
args = parser.parse_args()
best_prec1 = 0
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if args.bn_calibrate:
args.track_running_stats = True
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(
args.save_dir, '{}.txt'.format(args.save_file)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
def main():
global args, best_prec1
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
cudnn.benchmark = False
cudnn.enabled = True
cudnn.deterministic = True
model = resnet20(args.affine, args.convbn_type, args.mask_repeat,
args.alpha_type, localsep_layers=args.localsep_layers,
localsep_portion=args.localsep_portion,
same_shortcut=args.sameshortcut,
track_running_stats=args.track_running_stats)
model.cuda()
try:
model.load_state_dict(torch.load(args.model_path)['state_dict'])
except:
print("BN track running stats is False in pt but True in model, so here ignore it")
model.load_state_dict(torch.load(args.model_path)[
'state_dict'], strict=False)
normalize = transforms.Normalize(
mean=[0.5071, 0.4865, 0.4409], std=[0.1942, 0.1918, 0.1958])
if args.bn_calibrate:
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d) and m.track_running_stats is False:
del m.running_mean
del m.running_var
del m.num_batches_tracked
m.register_buffer('running_mean', torch.zeros(m.num_features))
m.register_buffer('running_var', torch.ones(m.num_features))
m.register_buffer('num_batches_tracked',
torch.tensor(0, dtype=torch.long))
model.cuda()
calib_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
transform=transforms.Compose([
# transforms.RandomCrop(32, 4),
# transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1)]),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(15),
transforms.ToTensor(),
normalize,
]),
download=True,
),
batch_size=args.bn_calibrate_batch,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
calib_loader = get_loader(calib_loader)
if args.train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, 4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
Cutout(16),
normalize,
]),
download=True,
),
batch_size=args.train_batch_size,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]),
),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
val_loader = get_loader(val_loader)
with open(args.eval_json_path, 'r') as f:
archs_info = json.load(f)
sub_archs_info = {}
if args.train:
model_origin = model
for arch_i in range(args.arch_start, min(50001, args.arch_start + args.arch_num)):
if 'arch{}'.format(arch_i) in archs_info:
lenlist = get_arch_lenlist(archs_info, arch_i)
if args.train:
model = train(train_loader, model_origin, lenlist, args)
if args.bn_calibrate:
model = calibrate_bn(calib_loader, model,
lenlist, args.bn_calibrate_batch_num)
prec1 = validate(val_loader, model, lenlist)
sub_archs_info['arch{}'.format(arch_i)] = {}
sub_archs_info['arch{}'.format(arch_i)]['acc'] = prec1
sub_archs_info['arch{}'.format(
arch_i)]['arch'] = archs_info['arch{}'.format(arch_i)]['arch']
logging.info('Arch{}: [acc: {:.5f}][arch: {}]'.format(
arch_i, prec1, archs_info['arch{}'.format(arch_i)]['arch']))
save_json = os.path.join(args.save_dir, '{}.json'.format(args.save_file))
with open(save_json, 'w') as f:
json.dump(sub_archs_info, f)
def get_arch_lenlist(archs_dict, arch_i):
arch = archs_dict['arch{}'.format(arch_i)]
arch_list = arch['arch'].split('-')
for i, lenth in enumerate(arch_list):
arch_list[i] = int(lenth)
return arch_list
def get_loader(loader):
new_loader = []
for x, y in loader:
new_loader.append((x.cuda(), y.cuda()))
return new_loader
def
|
calibrate_bn
|
identifier_name
|
|
test_supernet.py
|
/model.th',
help='model checkpoint', type=str)
parser.add_argument('--arch_start', default=1, type=int,
metavar='N', help='the start index of eval archs')
parser.add_argument('--arch_num', default=101, type=int,
metavar='N', help='the num of eval archs')
parser.add_argument('--workers', default=1, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--batch_size', default=512, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--affine', action='store_true', help='BN affine')
parser.add_argument('--save_dir', help='The directory used to save the trained models',
default='./checkpoints', type=str)
parser.add_argument('--save_file', help='The file used to save the result',
default='eval-1_50000', type=str)
parser.add_argument(
'--save_every', help='Saves checkpoints at every specified number of epochs', type=int, default=1)
parser.add_argument('--convbn_type',
default='sample_channel',
type=str,
help='convbn forward with different mask: mix_channel or random_mix_channel or sample_channel or sample_random_channel or sample_sepmask_channel or sample_sepproject_channel or sample_localfree_channel')
parser.add_argument('--alpha_type', default='sample_uniform', type=str,
help='how to cal alpha in forward process: mix, sample_uniform, sample_fair, sample_flops_uniform, sample_flops_fair, sample_sandwich')
parser.add_argument('--mask_repeat', type=int, default=1,
help='used in random_mix_channel')
parser.add_argument('--prob_ratio', type=float, default=1.,
help='used in sample_flops_uniform or sample_flops_fair')
parser.add_argument('--r', type=int, default=1.,
help='used in local sample_localfree_channel')
parser.add_argument('--localsep_layers', default=None,
type=str, help='used in sample_localsepmask_channel')
parser.add_argument('--localsep_portion', type=float,
default=1., help='used in sample_localsepmask_channel')
parser.add_argument('--sameshortcut', action='store_true',
help='same shortcut')
parser.add_argument('--track_running_stats',
action='store_true', help='bn track_running_stats')
parser.add_argument('--bn_calibrate', action='store_true', help='bn calibrate')
parser.add_argument('--bn_calibrate_batch', type=int,
default=10000, help='bn calibrate batch')
parser.add_argument('--bn_calibrate_batch_num', type=int,
default=1, help='bn calibrate batch num')
parser.add_argument('--train', action='store_true', help='train on supernet')
parser.add_argument('--train_batch_size', type=int,
default=128, help='train epoch on supernet')
parser.add_argument('--train_epochs', type=int, default=1,
help='train epoch on supernet')
parser.add_argument('--train_lr', type=float, default=1e-3,
help='train lr on supernet')
parser.add_argument('--train_momentum', type=float,
default=0.9, help='train momentum on supernet')
parser.add_argument('--train_min_lr', type=float, default=0,
help='train min_lr on supernet')
parser.add_argument('--train_weight_decay', type=float,
default=5e-4, help='train wd on supernet')
parser.add_argument('--train_print_freq', type=int, default=100,
help='train print freq epoch on supernet')
parser.add_argument('--seed', type=int, default=2, help='random seed')
args = parser.parse_args()
best_prec1 = 0
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if args.bn_calibrate:
args.track_running_stats = True
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(
args.save_dir, '{}.txt'.format(args.save_file)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
def main():
global args, best_prec1
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
cudnn.benchmark = False
cudnn.enabled = True
cudnn.deterministic = True
model = resnet20(args.affine, args.convbn_type, args.mask_repeat,
args.alpha_type, localsep_layers=args.localsep_layers,
localsep_portion=args.localsep_portion,
same_shortcut=args.sameshortcut,
track_running_stats=args.track_running_stats)
model.cuda()
try:
model.load_state_dict(torch.load(args.model_path)['state_dict'])
except:
print("BN track running stats is False in pt but True in model, so here ignore it")
model.load_state_dict(torch.load(args.model_path)[
'state_dict'], strict=False)
normalize = transforms.Normalize(
mean=[0.5071, 0.4865, 0.4409], std=[0.1942, 0.1918, 0.1958])
if args.bn_calibrate:
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d) and m.track_running_stats is False:
del m.running_mean
del m.running_var
del m.num_batches_tracked
m.register_buffer('running_mean', torch.zeros(m.num_features))
m.register_buffer('running_var', torch.ones(m.num_features))
m.register_buffer('num_batches_tracked',
torch.tensor(0, dtype=torch.long))
model.cuda()
calib_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
transform=transforms.Compose([
# transforms.RandomCrop(32, 4),
# transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1)]),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(15),
transforms.ToTensor(),
normalize,
]),
download=True,
),
batch_size=args.bn_calibrate_batch,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
calib_loader = get_loader(calib_loader)
if args.train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, 4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
Cutout(16),
normalize,
]),
download=True,
),
batch_size=args.train_batch_size,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]),
),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
val_loader = get_loader(val_loader)
with open(args.eval_json_path, 'r') as f:
archs_info = json.load(f)
sub_archs_info = {}
if args.train:
model_origin = model
for arch_i in range(args.arch_start, min(50001, args.arch_start + args.arch_num)):
if 'arch{}'.format(arch_i) in archs_info:
lenlist = get_arch_lenlist(archs_info, arch_i)
if args.train:
model = train(train_loader, model_origin, lenlist, args)
if args.bn_calibrate:
model = calibrate_bn(calib_loader, model,
lenlist, args.bn_calibrate_batch_num)
prec1 = validate(val_loader, model, lenlist)
sub_archs_info['arch{}'.format(arch_i)] = {}
sub_archs_info['arch{}'.format(arch_i)]['acc'] = prec1
sub_archs_info['arch{}'.format(
arch_i)]['arch'] = archs_info['arch{}'.format(arch_i)]['arch']
logging.info('Arch{}: [acc: {:.5f}][arch: {}]'.format(
arch_i, prec1, archs_info['arch{}'.format(arch_i)]['arch']))
save_json = os.path.join(args.save_dir, '{}.json'.format(args.save_file))
with open(save_json, 'w') as f:
json.dump(sub_archs_info, f)
def get_arch_lenlist(archs_dict, arch_i):
arch = archs_dict['arch{}'.format(arch_i)]
arch_list = arch['arch'].split('-')
for i, lenth in enumerate(arch_list):
arch_list[i] = int(lenth)
return arch_list
def get_loader(loader):
new_loader = []
for x, y in loader:
|
new_loader.append((x.cuda(), y.cuda()))
|
conditional_block
|
|
test_supernet.py
|
train on supernet')
parser.add_argument('--train_batch_size', type=int,
default=128, help='train epoch on supernet')
parser.add_argument('--train_epochs', type=int, default=1,
help='train epoch on supernet')
parser.add_argument('--train_lr', type=float, default=1e-3,
help='train lr on supernet')
parser.add_argument('--train_momentum', type=float,
default=0.9, help='train momentum on supernet')
parser.add_argument('--train_min_lr', type=float, default=0,
help='train min_lr on supernet')
parser.add_argument('--train_weight_decay', type=float,
default=5e-4, help='train wd on supernet')
parser.add_argument('--train_print_freq', type=int, default=100,
help='train print freq epoch on supernet')
parser.add_argument('--seed', type=int, default=2, help='random seed')
args = parser.parse_args()
best_prec1 = 0
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if args.bn_calibrate:
args.track_running_stats = True
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(
args.save_dir, '{}.txt'.format(args.save_file)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
def main():
global args, best_prec1
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
cudnn.benchmark = False
cudnn.enabled = True
cudnn.deterministic = True
model = resnet20(args.affine, args.convbn_type, args.mask_repeat,
args.alpha_type, localsep_layers=args.localsep_layers,
localsep_portion=args.localsep_portion,
same_shortcut=args.sameshortcut,
track_running_stats=args.track_running_stats)
model.cuda()
try:
model.load_state_dict(torch.load(args.model_path)['state_dict'])
except:
print("BN track running stats is False in pt but True in model, so here ignore it")
model.load_state_dict(torch.load(args.model_path)[
'state_dict'], strict=False)
normalize = transforms.Normalize(
mean=[0.5071, 0.4865, 0.4409], std=[0.1942, 0.1918, 0.1958])
if args.bn_calibrate:
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d) and m.track_running_stats is False:
del m.running_mean
del m.running_var
del m.num_batches_tracked
m.register_buffer('running_mean', torch.zeros(m.num_features))
m.register_buffer('running_var', torch.ones(m.num_features))
m.register_buffer('num_batches_tracked',
torch.tensor(0, dtype=torch.long))
model.cuda()
calib_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
transform=transforms.Compose([
# transforms.RandomCrop(32, 4),
# transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1)]),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(15),
transforms.ToTensor(),
normalize,
]),
download=True,
),
batch_size=args.bn_calibrate_batch,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
calib_loader = get_loader(calib_loader)
if args.train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, 4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
Cutout(16),
normalize,
]),
download=True,
),
batch_size=args.train_batch_size,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]),
),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
val_loader = get_loader(val_loader)
with open(args.eval_json_path, 'r') as f:
archs_info = json.load(f)
sub_archs_info = {}
if args.train:
model_origin = model
for arch_i in range(args.arch_start, min(50001, args.arch_start + args.arch_num)):
if 'arch{}'.format(arch_i) in archs_info:
lenlist = get_arch_lenlist(archs_info, arch_i)
if args.train:
model = train(train_loader, model_origin, lenlist, args)
if args.bn_calibrate:
model = calibrate_bn(calib_loader, model,
lenlist, args.bn_calibrate_batch_num)
prec1 = validate(val_loader, model, lenlist)
sub_archs_info['arch{}'.format(arch_i)] = {}
sub_archs_info['arch{}'.format(arch_i)]['acc'] = prec1
sub_archs_info['arch{}'.format(
arch_i)]['arch'] = archs_info['arch{}'.format(arch_i)]['arch']
logging.info('Arch{}: [acc: {:.5f}][arch: {}]'.format(
arch_i, prec1, archs_info['arch{}'.format(arch_i)]['arch']))
save_json = os.path.join(args.save_dir, '{}.json'.format(args.save_file))
with open(save_json, 'w') as f:
json.dump(sub_archs_info, f)
def get_arch_lenlist(archs_dict, arch_i):
arch = archs_dict['arch{}'.format(arch_i)]
arch_list = arch['arch'].split('-')
for i, lenth in enumerate(arch_list):
arch_list[i] = int(lenth)
return arch_list
def get_loader(loader):
new_loader = []
for x, y in loader:
new_loader.append((x.cuda(), y.cuda()))
return new_loader
def calibrate_bn(loader, model, lenlist, num):
model.train()
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d):
m.running_mean.data.fill_(0)
m.running_var.data.fill_(0)
m.num_batches_tracked.data.zero_()
m.momentum = None
for i, (input, _) in enumerate(loader):
# input = input.cuda()
if i < min(len(loader), num):
model(input, lenlist)
return model
def train(train_queue, model, lenlist, args):
model = copy.deepcopy(model)
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(
), args.train_lr, momentum=args.train_momentum, weight_decay=args.train_weight_decay)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, args.train_epochs, eta_min=args.train_min_lr)
logging.info('Train arch: {}'.format(lenlist))
for epoch in range(args.train_epochs):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_queue):
data_time.update(time.time() - end)
target_var = target.cuda()
input_var = input.cuda()
optimizer.zero_grad() # zero gradient
output = model(input_var, lenlist) # compute output
loss = criterion(output, target_var) # compute loss
loss.backward() # compute gradient
optimizer.step() # do SGD step
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target_var.data)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.train_print_freq == 0 or i == len(train_queue) - 1:
logging.info('\tEpoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(epoch, i, len(train_queue), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1))
lr_scheduler.step()
del criterion
del optimizer
del lr_scheduler
return model
|
random_line_split
|
||
test_supernet.py
|
_channel or sample_sepmask_channel or sample_sepproject_channel or sample_localfree_channel')
parser.add_argument('--alpha_type', default='sample_uniform', type=str,
help='how to cal alpha in forward process: mix, sample_uniform, sample_fair, sample_flops_uniform, sample_flops_fair, sample_sandwich')
parser.add_argument('--mask_repeat', type=int, default=1,
help='used in random_mix_channel')
parser.add_argument('--prob_ratio', type=float, default=1.,
help='used in sample_flops_uniform or sample_flops_fair')
parser.add_argument('--r', type=int, default=1.,
help='used in local sample_localfree_channel')
parser.add_argument('--localsep_layers', default=None,
type=str, help='used in sample_localsepmask_channel')
parser.add_argument('--localsep_portion', type=float,
default=1., help='used in sample_localsepmask_channel')
parser.add_argument('--sameshortcut', action='store_true',
help='same shortcut')
parser.add_argument('--track_running_stats',
action='store_true', help='bn track_running_stats')
parser.add_argument('--bn_calibrate', action='store_true', help='bn calibrate')
parser.add_argument('--bn_calibrate_batch', type=int,
default=10000, help='bn calibrate batch')
parser.add_argument('--bn_calibrate_batch_num', type=int,
default=1, help='bn calibrate batch num')
parser.add_argument('--train', action='store_true', help='train on supernet')
parser.add_argument('--train_batch_size', type=int,
default=128, help='train epoch on supernet')
parser.add_argument('--train_epochs', type=int, default=1,
help='train epoch on supernet')
parser.add_argument('--train_lr', type=float, default=1e-3,
help='train lr on supernet')
parser.add_argument('--train_momentum', type=float,
default=0.9, help='train momentum on supernet')
parser.add_argument('--train_min_lr', type=float, default=0,
help='train min_lr on supernet')
parser.add_argument('--train_weight_decay', type=float,
default=5e-4, help='train wd on supernet')
parser.add_argument('--train_print_freq', type=int, default=100,
help='train print freq epoch on supernet')
parser.add_argument('--seed', type=int, default=2, help='random seed')
args = parser.parse_args()
best_prec1 = 0
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if args.bn_calibrate:
args.track_running_stats = True
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(
args.save_dir, '{}.txt'.format(args.save_file)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(args)
def main():
global args, best_prec1
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
cudnn.benchmark = False
cudnn.enabled = True
cudnn.deterministic = True
model = resnet20(args.affine, args.convbn_type, args.mask_repeat,
args.alpha_type, localsep_layers=args.localsep_layers,
localsep_portion=args.localsep_portion,
same_shortcut=args.sameshortcut,
track_running_stats=args.track_running_stats)
model.cuda()
try:
model.load_state_dict(torch.load(args.model_path)['state_dict'])
except:
print("BN track running stats is False in pt but True in model, so here ignore it")
model.load_state_dict(torch.load(args.model_path)[
'state_dict'], strict=False)
normalize = transforms.Normalize(
mean=[0.5071, 0.4865, 0.4409], std=[0.1942, 0.1918, 0.1958])
if args.bn_calibrate:
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d) and m.track_running_stats is False:
del m.running_mean
del m.running_var
del m.num_batches_tracked
m.register_buffer('running_mean', torch.zeros(m.num_features))
m.register_buffer('running_var', torch.ones(m.num_features))
m.register_buffer('num_batches_tracked',
torch.tensor(0, dtype=torch.long))
model.cuda()
calib_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
transform=transforms.Compose([
# transforms.RandomCrop(32, 4),
# transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1)]),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(15),
transforms.ToTensor(),
normalize,
]),
download=True,
),
batch_size=args.bn_calibrate_batch,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
calib_loader = get_loader(calib_loader)
if args.train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=True,
transform=transforms.Compose([
transforms.RandomCrop(32, 4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
Cutout(16),
normalize,
]),
download=True,
),
batch_size=args.train_batch_size,
pin_memory=True,
shuffle=True,
num_workers=args.workers,
)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root='./data',
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]),
),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
val_loader = get_loader(val_loader)
with open(args.eval_json_path, 'r') as f:
archs_info = json.load(f)
sub_archs_info = {}
if args.train:
model_origin = model
for arch_i in range(args.arch_start, min(50001, args.arch_start + args.arch_num)):
if 'arch{}'.format(arch_i) in archs_info:
lenlist = get_arch_lenlist(archs_info, arch_i)
if args.train:
model = train(train_loader, model_origin, lenlist, args)
if args.bn_calibrate:
model = calibrate_bn(calib_loader, model,
lenlist, args.bn_calibrate_batch_num)
prec1 = validate(val_loader, model, lenlist)
sub_archs_info['arch{}'.format(arch_i)] = {}
sub_archs_info['arch{}'.format(arch_i)]['acc'] = prec1
sub_archs_info['arch{}'.format(
arch_i)]['arch'] = archs_info['arch{}'.format(arch_i)]['arch']
logging.info('Arch{}: [acc: {:.5f}][arch: {}]'.format(
arch_i, prec1, archs_info['arch{}'.format(arch_i)]['arch']))
save_json = os.path.join(args.save_dir, '{}.json'.format(args.save_file))
with open(save_json, 'w') as f:
json.dump(sub_archs_info, f)
def get_arch_lenlist(archs_dict, arch_i):
arch = archs_dict['arch{}'.format(arch_i)]
arch_list = arch['arch'].split('-')
for i, lenth in enumerate(arch_list):
arch_list[i] = int(lenth)
return arch_list
def get_loader(loader):
new_loader = []
for x, y in loader:
new_loader.append((x.cuda(), y.cuda()))
return new_loader
def calibrate_bn(loader, model, lenlist, num):
model.train()
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d):
m.running_mean.data.fill_(0)
m.running_var.data.fill_(0)
m.num_batches_tracked.data.zero_()
m.momentum = None
for i, (input, _) in enumerate(loader):
# input = input.cuda()
if i < min(len(loader), num):
model(input, lenlist)
return model
def train(train_queue, model, lenlist, args):
|
model = copy.deepcopy(model)
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(
), args.train_lr, momentum=args.train_momentum, weight_decay=args.train_weight_decay)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, args.train_epochs, eta_min=args.train_min_lr)
logging.info('Train arch: {}'.format(lenlist))
for epoch in range(args.train_epochs):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_queue):
data_time.update(time.time() - end)
|
identifier_body
|
|
order.js
|
fenye({description:"", clientid:"", pagenow:1});
$(".Preservation").click(function () {
var add = {};
// arry.pkey;
// arry["pkey"];
// arry[html];
$(".zeng").children().each(function (idx, ele) {
// if (idx < 1) {
// return;
// }
var kay = $(this).children()[0].name;
var val = $(this).children()[0].value;
add[kay] = val;
});
if(add["userName"] == ""){
alert("帐号不能为空");
return;
}
if(add["productId"] == ""){
alert("产品ID不能为空");
return;
}
var temp = checkUser(add["userName"]);;
if(!temp){
alert("帐号不存在");
return;
}
var temp2 =checkProduct(add["productId"]);
if(!temp2){
alert("产品不存在");
return;
}
$.ajax({
url: saveUrl,
data: add,
type: "POST",
dataType: "text",
success: function (data) {
if (data == "ok") {alert("添加成功");location.reload();}
if (data == "error") {alert("添加失败")}
if (data == "2") {alert("该设备已存在")}
}
})
});
});
//保存功能
function getenter() {
// $("#show_tab_tr ").each(function(i){
//var txt=$('input[type="text"]').value;
//var j = true;
$('input[kepp="dianji"]').click(function () {
//alert(toEdit);
var arry = {}
// var toEdit = $(this).attr("value") == "编辑";
// $(this).attr("value", toEdit ? "保存" : "编辑");
// var index = $(this).parent().parent().attr("idx");
// td_arr = $(this).parent().siblings().children().val();
var toEdit = $(this).attr("value") == "编辑";
var index = $(this).parent().parent().attr("idx");
td_arr=$(this).parent().siblings().children().val();
$(this).attr("value", toEdit ? "保存" : "编辑");
if (toEdit == false) {
$(this).parent().siblings().each(function (idx, ele) {
var td_arr = $(ele).children().val();
var key = $(this).attr("key");
arry[key] =td_arr;
});
if(arry["status"] == "0" || arry["status"] == "1"){
}else{
$(this).attr("value", "保存");
alert("类型只能为1或者0");
return;;
}
if(!checkUser(arry["userName"])){
$(this).attr("value", "保存");
alert("帐号不存在");
return;
}
if(!checkProduct(arry["productId"])){
$(this).attr("value", "保存");
alert("产品不存在");
return;
}
console.info(arry);
$.ajax({
url:updateUrl,
data:arry,
type:"POST",
dataType:"text",
success:function(data){
if(data=="ok")
{
alert("修改成功");
location.reload();
}
if (data=="error") {alert("修改失败");}
}
});
} else { }
var inputcss = $(this).parent().siblings().children()
if (toEdit) {
inputcss.attr("disabled", false);
inputcss.css("border", "1px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().attr("disabled", true);
$(this).parent().siblings('td[key="orderTime"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="orderTime"]').children().attr("disabled", true);
} else {
inputcss.attr("disabled", true);
inputcss.css("border", "0px solid #51e5fb");
}
});
}
function getfenye(arry){
// $(".search").children().each(function (idx, ele) {
// kay = $(this).children()[0].name;
// val = $(this).children()[0].value;
// arry[kay] = val;
console.info(arry);
// });
// // arry.pagenow = "1";
pagenow = 1;
document.getElementById("curPage").value = pagenow;
// getTable();//显示第一页
$("#nextpage").click(function () {//下一页
var arry = setarry();
//alert(page);
if (pagenow < countpage) {
pagenow += 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = countpage;
alert("已是最后一页");
}
console.info(arry);
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
//alert(pagenow);
});
$("#lastpage").click(function () {//上一页
var arry = setarry();
//alert(pagenow);
if (pagenow != 1) {
pagenow -= 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = 1
alert("已是首页")
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
$("#npage").click(function () {//跳到固定某一页
var arry = setarry();
var npage = parseInt(document.getElementById("curPage").value);
if (npage > countpage || npage < 1) {
alert("请输入1-" + countpage + "页");
}
else {
pagenow = npage;
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
}
//表身及分页
//获取表格数据
function getTable(data) {
var pagenow = document.getElementById("curPage").value;
$.ajax({
url: "count",
data: { "name": "order" },
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
|
=contact % pageSize==0 ? contact/pageSize : Math.floor(contact/pageSize)+1;
countpage = page;
$("#pageSum").val(countpage);
}
})
data[ "pagenow"] = pagenow;
data[ "pagesize"] = pageSize;
$.ajax({
url: listUrl,
data: data,
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
allData = JSON.parse(contact);
temp = allData;
//temp = allData.data;
//alert(pagenow);
//console.info(temp);
var html = '';
for (var i = 0; i < allData.length; i++) {
//alert(temp.length);
var data = temp[i];
// console.info(temp[i][0]);
html += '<tr id="show_tab_tr" idx="' + i + '" >' +
'<td key="pkey"><input type="text" value="' + data.pkey + '" disabled ></td>' +
'<td key="userName"><input type="text" value="' + data.userName + '" disabled ></td>' +
'<td key="productId" ><input type="text" value="' + data.productId + '" disabled ></td>' +
'<td key="price" ><input type="text" value="' + data.price + '" disabled ></td>' +
'<td key="status" ><input type="text" value="' + data.status + '" disabled ></td>' +
'<td key="orderTime" ><input type="text" value="' + data.orderTime + '" disabled ></td>' +
'<td><input id="center" style="display: inline-block;float:left;width:40px;color:#12a9ef;" kepp="dianji" type="button" value="编辑">    ' +
'<input type="button" class = "remove" style="display: inline-block;float:right; width:40px; color:#12a9ef;" det="detlet" value="删除" ></td>' +
'</tr>';
$("tbody").html(html);
}
getenter();
getdet();
// len =temp.length;
// page=len % pageSize==0
|
var page
|
identifier_name
|
order.js
|
fenye({description:"", clientid:"", pagenow:1});
$(".Preservation").click(function () {
var add = {};
// arry.pkey;
// arry["pkey"];
// arry[html];
$(".zeng").children().each(function (idx, ele) {
// if (idx < 1) {
// return;
// }
var kay = $(this).children()[0].name;
var val = $(this).children()[0].value;
add[kay] = val;
});
if(add["userName"] == ""){
alert("帐号不能为空");
return;
}
if(add["productId"] == ""){
alert("产品ID不能为空");
return;
}
var temp = checkUser(add["userName"]);;
if(!temp){
alert("帐号不存在");
return;
}
var temp2 =checkProduct(add["productId"]);
if(!temp2){
alert("产品不存在");
return;
}
$.ajax({
url: saveUrl,
data: add,
type: "POST",
dataType: "text",
success: function (data) {
if (data == "ok") {alert("添加成功");location.reload();}
if (data == "error") {alert("添加失败")}
if (data == "2") {alert("该设备已存在")}
}
})
});
});
//保存功能
function getenter() {
// $("#show_tab_tr ").each(function(i){
//var txt=$('input[type="text"]').value;
//var j = true;
$('input[kepp="dianji"]').click(function () {
//alert(toEdit);
var arry = {}
// var toEdit = $(this).attr("value") == "编辑";
// $(this).attr("value", toEdit ? "保存" : "编辑");
// var index = $(this).parent().parent().attr("idx");
// td_arr = $(this).parent().siblings().children().val();
var toEdit = $(this).attr("value") == "编辑";
var index = $(this).parent().parent().attr("idx");
td_arr=$(this).parent().siblings().children().val();
$(this).attr("value", toEdit ? "保存" : "编辑");
if (toEdit == false) {
$(this).parent().siblings().each(function (idx, ele) {
var td_arr = $(ele).children().val();
var key = $(this).attr("key");
arry[key] =td_arr;
});
if(arry["status"] == "0" || arry["status"] == "1"){
}else{
$(this).attr("value", "保存");
alert("类型只能为1或者0");
return;;
}
if(!checkUser(arry["userName"])){
$(this).attr("value", "保存");
alert("帐号不存在");
return;
}
if(!checkProduct(arry["productId"])){
$(this).attr("value", "保存");
alert("产品不存在");
return;
}
console.info(arry);
$.ajax({
url:updateUrl,
data:arry,
type:"POST",
dataType:"text",
success:function(data){
if(data=="ok")
{
alert("修改成功");
location.reload();
}
if (data=="error") {alert("修改失败");}
}
});
} else { }
var inputcss = $(this).parent().siblings().children()
if (toEdit) {
inputcss.attr("disabled", false);
inputcss.css("border", "1px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().attr("disabled", true);
$(this).parent().siblings('td[key="orderTime"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="orderTime"]').children().attr("disabled", true);
} else {
inputcss.attr("disabled", true);
inputcss.css("border", "0px solid #51e5fb");
}
});
}
function getfenye(arry){
// $(".search").children().each(function (idx, ele) {
// kay = $(this).children()[0].name;
// val = $(this).children()[0].value;
// arry[kay] = val;
console.info(arry);
// });
|
arry.pagenow=pagenow;
getTable(arry);
}
//alert(pagenow);
});
$("#lastpage").click(function () {//上一页
var arry = setarry();
//alert(pagenow);
if (pagenow != 1) {
pagenow -= 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = 1
alert("已是首页")
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
$("#npage").click(function () {//跳到固定某一页
var arry = setarry();
var npage = parseInt(document.getElementById("curPage").value);
if (npage > countpage || npage < 1) {
alert("请输入1-" + countpage + "页");
}
else {
pagenow = npage;
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
}
//表身及分页
//获取表格数据
function getTable(data) {
var pagenow = document.getElementById("curPage").value;
$.ajax({
url: "count",
data: { "name": "order" },
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
var page=contact % pageSize==0 ? contact/pageSize : Math.floor(contact/pageSize)+1;
countpage = page;
$("#pageSum").val(countpage);
}
})
data[ "pagenow"] = pagenow;
data[ "pagesize"] = pageSize;
$.ajax({
url: listUrl,
data: data,
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
allData = JSON.parse(contact);
temp = allData;
//temp = allData.data;
//alert(pagenow);
//console.info(temp);
var html = '';
for (var i = 0; i < allData.length; i++) {
//alert(temp.length);
var data = temp[i];
// console.info(temp[i][0]);
html += '<tr id="show_tab_tr" idx="' + i + '" >' +
'<td key="pkey"><input type="text" value="' + data.pkey + '" disabled ></td>' +
'<td key="userName"><input type="text" value="' + data.userName + '" disabled ></td>' +
'<td key="productId" ><input type="text" value="' + data.productId + '" disabled ></td>' +
'<td key="price" ><input type="text" value="' + data.price + '" disabled ></td>' +
'<td key="status" ><input type="text" value="' + data.status + '" disabled ></td>' +
'<td key="orderTime" ><input type="text" value="' + data.orderTime + '" disabled ></td>' +
'<td><input id="center" style="display: inline-block;float:left;width:40px;color:#12a9ef;" kepp="dianji" type="button" value="编辑">    ' +
'<input type="button" class = "remove" style="display: inline-block;float:right; width:40px; color:#12a9ef;" det="detlet" value="删除" ></td>' +
'</tr>';
$("tbody").html(html);
}
getenter();
getdet();
// len =temp.length;
// page=len % pageSize==0
|
// // arry.pagenow = "1";
pagenow = 1;
document.getElementById("curPage").value = pagenow;
// getTable();//显示第一页
$("#nextpage").click(function () {//下一页
var arry = setarry();
//alert(page);
if (pagenow < countpage) {
pagenow += 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = countpage;
alert("已是最后一页");
}
console.info(arry);
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
|
identifier_body
|
order.js
|
ye({description:"", clientid:"", pagenow:1});
$(".Preservation").click(function () {
var add = {};
// arry.pkey;
// arry["pkey"];
// arry[html];
$(".zeng").children().each(function (idx, ele) {
// if (idx < 1) {
// return;
// }
var kay = $(this).children()[0].name;
var val = $(this).children()[0].value;
add[kay] = val;
});
if(add["userName"] == ""){
alert("帐号不能为空");
return;
}
if(add["productId"] == ""){
alert("产品ID不能为空");
return;
}
var temp = checkUser(add["userName"]);;
if(!temp){
alert("帐号不存在");
return;
}
var temp2 =checkProduct(add["productId"]);
if(!temp2){
alert("产品不存在");
return;
}
$.ajax({
url: saveUrl,
data: add,
type: "POST",
dataType: "text",
success: function (data) {
if (data == "ok") {alert("添加成功");location.reload();}
if (data == "error") {alert("添加失败")}
if (data == "2") {alert("该设备已存在")}
}
})
});
});
//保存功能
function getenter() {
// $("#show_tab_tr ").each(function(i){
//var txt=$('input[type="text"]').value;
//var j = true;
$('input[kepp="dianji"]').click(function () {
//alert(toEdit);
var arry = {}
// var toEdit = $(this).attr("value") == "编辑";
// $(this).attr("value", toEdit ? "保存" : "编辑");
// var index = $(this).parent().parent().attr("idx");
// td_arr = $(this).parent().siblings().children().val();
var toEdit = $(this).attr("value") == "编辑";
var index = $(this).parent().parent().attr("idx");
td_arr=$(this).parent().siblings().children().val();
$(this).attr("value", toEdit ? "保存" : "编辑");
if (toEdit == false) {
$(this).parent().siblings().each(function (idx, ele) {
var td_arr = $(ele).children().val();
var key = $(this).attr("key");
arry[key] =td_arr;
});
if(arry["status"] == "0" || arry["status"] == "1"){
}else{
$(this).attr("value", "保存");
alert("类型只能为1或者0");
return;;
}
if(!checkUser(arry["userName"])){
$(this).attr("value", "保存");
alert("帐号不存在");
return;
}
if(!checkProduct(arry["productId"])){
$(this).attr("value", "保存");
alert("产品不存在");
return;
}
console.info(arry);
$.ajax({
url:updateUrl,
data:arry,
type:"POST",
dataType:"text",
success:function(data){
if(data=="ok")
{
alert("修改成功");
location.reload();
}
if (data=="error") {alert("修改失败");}
}
});
} else { }
var inputcss = $(this).parent().siblings().children()
if (toEdit) {
inputcss.attr("disabled", false);
inputcss.css("border", "1px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().attr("disabled", true);
$(this).parent().siblings('td[key="orderTime"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="orderTime"]').children().attr("disabled", true);
} else {
inputcss.attr("disabled", true);
inputcss.css("border", "0px solid #51e5fb");
}
});
}
function getfenye(arry){
// $(".search").children().each(function (idx, ele) {
// kay = $(this).children()[0].name;
// val = $(this).children()[0].value;
// arry[kay] = val;
console.info(arry);
// });
// // arry.pagenow = "1";
pagenow = 1;
document.getElementById("curPage").value = pagenow;
// getTable();//显示第一页
$("#nextpage").click(function () {//下一页
var arry = setarry();
//alert(page);
if (pagenow < countpage) {
pagenow += 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = countpage;
alert("已是最后一页");
}
console.info(arry);
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
//alert(pagenow);
});
$("#lastpage").click(function () {//上一页
var arry = setarry();
//alert(pagenow);
if (pagenow != 1) {
pagenow -= 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = 1
alert("已是首页")
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
$("#npage").click(function () {//跳到固定某一页
var arry = setarry();
var npage = parseInt(document.getElementById("curP
|
untpage || npage < 1) {
alert("请输入1-" + countpage + "页");
}
else {
pagenow = npage;
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
}
//表身及分页
//获取表格数据
function getTable(data) {
var pagenow = document.getElementById("curPage").value;
$.ajax({
url: "count",
data: { "name": "order" },
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
var page=contact % pageSize==0 ? contact/pageSize : Math.floor(contact/pageSize)+1;
countpage = page;
$("#pageSum").val(countpage);
}
})
data[ "pagenow"] = pagenow;
data[ "pagesize"] = pageSize;
$.ajax({
url: listUrl,
data: data,
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
allData = JSON.parse(contact);
temp = allData;
//temp = allData.data;
//alert(pagenow);
//console.info(temp);
var html = '';
for (var i = 0; i < allData.length; i++) {
//alert(temp.length);
var data = temp[i];
// console.info(temp[i][0]);
html += '<tr id="show_tab_tr" idx="' + i + '" >' +
'<td key="pkey"><input type="text" value="' + data.pkey + '" disabled ></td>' +
'<td key="userName"><input type="text" value="' + data.userName + '" disabled ></td>' +
'<td key="productId" ><input type="text" value="' + data.productId + '" disabled ></td>' +
'<td key="price" ><input type="text" value="' + data.price + '" disabled ></td>' +
'<td key="status" ><input type="text" value="' + data.status + '" disabled ></td>' +
'<td key="orderTime" ><input type="text" value="' + data.orderTime + '" disabled ></td>' +
'<td><input id="center" style="display: inline-block;float:left;width:40px;color:#12a9ef;" kepp="dianji" type="button" value="编辑">    ' +
'<input type="button" class = "remove" style="display: inline-block;float:right; width:40px; color:#12a9ef;" det="detlet" value="删除" ></td>' +
'</tr>';
$("tbody").html(html);
}
getenter();
getdet();
// len =temp.length;
// page=len % pageSize
|
age").value);
if (npage > co
|
conditional_block
|
order.js
|
ye({description:"", clientid:"", pagenow:1});
$(".Preservation").click(function () {
var add = {};
// arry.pkey;
// arry["pkey"];
// arry[html];
$(".zeng").children().each(function (idx, ele) {
// if (idx < 1) {
// return;
// }
var kay = $(this).children()[0].name;
var val = $(this).children()[0].value;
add[kay] = val;
});
if(add["userName"] == ""){
alert("帐号不能为空");
return;
}
if(add["productId"] == ""){
alert("产品ID不能为空");
return;
}
var temp = checkUser(add["userName"]);;
if(!temp){
alert("帐号不存在");
return;
}
var temp2 =checkProduct(add["productId"]);
if(!temp2){
alert("产品不存在");
return;
}
$.ajax({
url: saveUrl,
data: add,
type: "POST",
dataType: "text",
success: function (data) {
if (data == "ok") {alert("添加成功");location.reload();}
if (data == "error") {alert("添加失败")}
if (data == "2") {alert("该设备已存在")}
}
})
});
});
//保存功能
function getenter() {
// $("#show_tab_tr ").each(function(i){
//var txt=$('input[type="text"]').value;
//var j = true;
$('input[kepp="dianji"]').click(function () {
//alert(toEdit);
var arry = {}
// var toEdit = $(this).attr("value") == "编辑";
// $(this).attr("value", toEdit ? "保存" : "编辑");
// var index = $(this).parent().parent().attr("idx");
// td_arr = $(this).parent().siblings().children().val();
var toEdit = $(this).attr("value") == "编辑";
var index = $(this).parent().parent().attr("idx");
td_arr=$(this).parent().siblings().children().val();
$(this).attr("value", toEdit ? "保存" : "编辑");
if (toEdit == false) {
$(this).parent().siblings().each(function (idx, ele) {
var td_arr = $(ele).children().val();
var key = $(this).attr("key");
arry[key] =td_arr;
});
if(arry["status"] == "0" || arry["status"] == "1"){
}else{
$(this).attr("value", "保存");
alert("类型只能为1或者0");
return;;
}
if(!checkUser(arry["userName"])){
$(this).attr("value", "保存");
alert("帐号不存在");
return;
}
if(!checkProduct(arry["productId"])){
$(this).attr("value", "保存");
alert("产品不存在");
return;
}
console.info(arry);
$.ajax({
url:updateUrl,
data:arry,
type:"POST",
dataType:"text",
success:function(data){
if(data=="ok")
{
alert("修改成功");
location.reload();
}
if (data=="error") {alert("修改失败");}
}
});
} else { }
var inputcss = $(this).parent().siblings().children()
if (toEdit) {
inputcss.attr("disabled", false);
inputcss.css("border", "1px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="pkey"]').children().attr("disabled", true);
$(this).parent().siblings('td[key="orderTime"]').children().css("border", "0px solid #51e5fb");
$(this).parent().siblings('td[key="orderTime"]').children().attr("disabled", true);
} else {
inputcss.attr("disabled", true);
inputcss.css("border", "0px solid #51e5fb");
}
});
}
function getfenye(arry){
// $(".search").children().each(function (idx, ele) {
// kay = $(this).children()[0].name;
// val = $(this).children()[0].value;
// arry[kay] = val;
console.info(arry);
// });
// // arry.pagenow = "1";
pagenow = 1;
document.getElementById("curPage").value = pagenow;
// getTable();//显示第一页
$("#nextpage").click(function () {//下一页
var arry = setarry();
//alert(page);
if (pagenow < countpage) {
pagenow += 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = countpage;
alert("已是最后一页");
}
console.info(arry);
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
|
//alert(pagenow);
});
$("#lastpage").click(function () {//上一页
var arry = setarry();
//alert(pagenow);
if (pagenow != 1) {
pagenow -= 1;
document.getElementById("curPage").value = pagenow;
} else {
pagenow = 1
alert("已是首页")
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
$("#npage").click(function () {//跳到固定某一页
var arry = setarry();
var npage = parseInt(document.getElementById("curPage").value);
if (npage > countpage || npage < 1) {
alert("请输入1-" + countpage + "页");
}
else {
pagenow = npage;
}
if (arry["description"] == "" && arry["clientid"] == "") {
getTable();
} else {
arry.pagenow=pagenow;
getTable(arry);
}
});
}
//表身及分页
//获取表格数据
function getTable(data) {
var pagenow = document.getElementById("curPage").value;
$.ajax({
url: "count",
data: { "name": "order" },
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
var page=contact % pageSize==0 ? contact/pageSize : Math.floor(contact/pageSize)+1;
countpage = page;
$("#pageSum").val(countpage);
}
})
data[ "pagenow"] = pagenow;
data[ "pagesize"] = pageSize;
$.ajax({
url: listUrl,
data: data,
type: "Post",
//contentType: "application/json; charset=utf-8",
dataType: "text",
success: function (contact) {
allData = JSON.parse(contact);
temp = allData;
//temp = allData.data;
//alert(pagenow);
//console.info(temp);
var html = '';
for (var i = 0; i < allData.length; i++) {
//alert(temp.length);
var data = temp[i];
// console.info(temp[i][0]);
html += '<tr id="show_tab_tr" idx="' + i + '" >' +
'<td key="pkey"><input type="text" value="' + data.pkey + '" disabled ></td>' +
'<td key="userName"><input type="text" value="' + data.userName + '" disabled ></td>' +
'<td key="productId" ><input type="text" value="' + data.productId + '" disabled ></td>' +
'<td key="price" ><input type="text" value="' + data.price + '" disabled ></td>' +
'<td key="status" ><input type="text" value="' + data.status + '" disabled ></td>' +
'<td key="orderTime" ><input type="text" value="' + data.orderTime + '" disabled ></td>' +
'<td><input id="center" style="display: inline-block;float:left;width:40px;color:#12a9ef;" kepp="dianji" type="button" value="编辑">    ' +
'<input type="button" class = "remove" style="display: inline-block;float:right; width:40px; color:#12a9ef;" det="detlet" value="删除" ></td>' +
'</tr>';
$("tbody").html(html);
}
getenter();
getdet();
// len =temp.length;
// page=len % pageSize==0
|
}
|
random_line_split
|
lib.rs
|
a> Fn(&'a W) -> Vec<(Cow<'static, str>, Cow<'a, str>)>;
/// GraphML output printer
///
/// See the [main crate documentation](index.html) for usage instructions and examples.
pub struct GraphMl<G>
where
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
graph: G,
pretty_print: bool,
export_edges: Option<Box<PrintWeights<G::EdgeWeight>>>,
export_nodes: Option<Box<PrintWeights<G::NodeWeight>>>,
}
impl<G> GraphMl<G>
where
G: GraphProp,
G: IntoNodeReferences,
G: IntoEdgeReferences,
G: NodeIndexable,
{
/// Create a new GraphML printer for the graph.
pub fn new(graph: G) -> Self {
Self {
graph,
pretty_print: true,
export_edges: None,
export_nodes: None,
}
}
/// Enable or disble pretty printing of the XML.
///
/// Pretty printing enables linebreaks and indentation.
pub fn pretty_print(mut self, state: bool) -> Self {
self.pretty_print = state;
self
}
/// Export the edge weights to GraphML.
///
/// This uses the [`Display`] implementation of the edge weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_edge_weights_display(self) -> Self
where
G::EdgeWeight: Display,
{
self.export_edge_weights(Box::new(|edge| {
vec![("weight".into(), edge.to_string().into())]
}))
}
/// Export the edge weights to GraphML.
///
/// This uses a custom conversion function.
/// Each edge can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(), (String, u32)> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_edge_weights(Box::new(|edge| {
/// let &(ref s, i) = edge;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_edge_weights(mut self, edge_weight: Box<PrintWeights<G::EdgeWeight>>) -> Self {
self.export_edges = Some(edge_weight);
self
}
/// Export the node weights to GraphML.
///
/// This uses the [`Display`] implementation of the node weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_node_weights_display(self) -> Self
where
G::NodeWeight: Display,
{
self.export_node_weights(Box::new(|node| {
vec![("weight".into(), node.to_string().into())]
}))
}
/// Export the node weights to GraphML.
///
/// This uses a custom conversion function.
/// Each node can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(String, u32), ()> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_node_weights(Box::new(|node| {
/// let &(ref s, i) = node;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_node_weights(mut self, node_weight: Box<PrintWeights<G::NodeWeight>>) -> Self {
self.export_nodes = Some(node_weight);
self
}
/// Write the GraphML file to the given writer.
pub fn to_writer<W>(&self, writer: W) -> io::Result<()>
where
W: Write,
{
let mut writer = EventWriter::new_with_config(
writer,
EmitterConfig::new().perform_indent(self.pretty_print),
);
match self.emit_graphml(&mut writer) {
Ok(()) => Ok(()),
Err(XmlError::Io(ioerror)) => Err(ioerror),
_ => panic!(""),
}
}
fn emit_graphml<W>(&self, writer: &mut EventWriter<W>) -> WriterResult<()>
where
W: Write,
{
// Store information about the attributes for nodes and edges.
// We cannot know in advance what the attribute names will be, so we just keep track of what gets emitted.
let mut attributes: HashSet<Attribute> = HashSet::new();
// XML/GraphML boilerplate
writer.write(XmlEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("UTF-8"),
standalone: None,
})?;
writer.write(XmlEvent::start_element("graphml").attr("xmlns", NAMESPACE_URL))?;
// emit graph with nodes/edges and possibly weights
self.emit_graph(writer, &mut attributes)?;
// Emit <key> tags for all the attributes
self.emit_keys(writer, &attributes)?;
writer.write(XmlEvent::end_element())?; // end graphml
Ok(())
}
fn emit_graph<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &mut HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
// convenience function to turn a NodeId into a String
let node2str_id = |node: G::NodeId| -> String { format!("n{}", self.graph.to_index(node)) };
// Emit an attribute for either node or edge
// This will also keep track of updating the global attributes list
let mut emit_attribute = |writer: &mut EventWriter<_>,
name: Cow<'static, str>,
data: &str,
for_: For|
-> WriterResult<()> {
writer.write(XmlEvent::start_element("data").attr("key", &*name))?;
attributes.insert(Attribute { name, for_ });
writer.write(XmlEvent::characters(data))?;
writer.write(XmlEvent::end_element()) // end data
};
// Each graph needs a default edge type
writer.write(XmlEvent::start_element("graph").attr(
"edgedefault",
if self.graph.is_directed() {
"directed"
} else {
"undirected"
},
))?;
// Emit nodes
for node in self.graph.node_references() {
writer.write(XmlEvent::start_element("node").attr("id", &*node2str_id(node.id())))?;
// Print weights
if let Some(ref node_labels) = self.export_nodes {
let datas = node_labels(node.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Node)?;
}
}
writer.write(XmlEvent::end_element())?; // end node
}
// Emit edges
for (i, edge) in self.graph.edge_references().enumerate() {
writer.write(
XmlEvent::start_element("edge")
.attr("id", &format!("e{}", i))
.attr("source", &*node2str_id(edge.source()))
.attr("target", &*node2str_id(edge.target())),
)?;
// Print weights
if let Some(ref edge_labels) = self.export_edges {
let datas = edge_labels(edge.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Edge)?;
}
}
writer.write(XmlEvent::end_element())?; // end edge
}
writer.write(XmlEvent::end_element()) // end graph
}
fn emit_keys<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
for attr in attributes {
writer.write(
|
XmlEvent::start_element("key")
|
random_line_split
|
|
lib.rs
|
graphml.graphdrawing.org/
//! [petgraph]: https://docs.rs/petgraph/
#![deny(
missing_debug_implementations,
missing_copy_implementations,
missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
#![allow(unknown_lints, clippy::return_self_not_must_use)]
#![doc(html_root_url = "https://docs.rs/petgraph-graphml/3.0.0")]
use petgraph::visit::{
EdgeRef, GraphProp, IntoEdgeReferences, IntoNodeReferences, NodeIndexable, NodeRef,
};
use std::borrow::Cow;
use std::collections::HashSet;
use std::fmt::{self, Debug, Display};
use std::io::{self, Cursor, Write};
use xml::common::XmlVersion;
use xml::writer::events::XmlEvent;
use xml::writer::{Error as XmlError, EventWriter, Result as WriterResult};
use xml::EmitterConfig;
static NAMESPACE_URL: &str = "http://graphml.graphdrawing.org/xmlns";
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
struct Attribute {
name: Cow<'static, str>,
for_: For,
}
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
enum For {
Node,
Edge,
}
impl For {
fn to_str(self) -> &'static str {
match self {
For::Node => "node",
For::Edge => "edge",
}
}
}
type PrintWeights<W> = dyn for<'a> Fn(&'a W) -> Vec<(Cow<'static, str>, Cow<'a, str>)>;
/// GraphML output printer
///
/// See the [main crate documentation](index.html) for usage instructions and examples.
pub struct GraphMl<G>
where
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
graph: G,
pretty_print: bool,
export_edges: Option<Box<PrintWeights<G::EdgeWeight>>>,
export_nodes: Option<Box<PrintWeights<G::NodeWeight>>>,
}
impl<G> GraphMl<G>
where
G: GraphProp,
G: IntoNodeReferences,
G: IntoEdgeReferences,
G: NodeIndexable,
{
/// Create a new GraphML printer for the graph.
pub fn new(graph: G) -> Self {
Self {
graph,
pretty_print: true,
export_edges: None,
export_nodes: None,
}
}
/// Enable or disble pretty printing of the XML.
///
/// Pretty printing enables linebreaks and indentation.
pub fn pretty_print(mut self, state: bool) -> Self {
self.pretty_print = state;
self
}
/// Export the edge weights to GraphML.
///
/// This uses the [`Display`] implementation of the edge weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_edge_weights_display(self) -> Self
where
G::EdgeWeight: Display,
{
self.export_edge_weights(Box::new(|edge| {
vec![("weight".into(), edge.to_string().into())]
}))
}
/// Export the edge weights to GraphML.
///
/// This uses a custom conversion function.
/// Each edge can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(), (String, u32)> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_edge_weights(Box::new(|edge| {
/// let &(ref s, i) = edge;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_edge_weights(mut self, edge_weight: Box<PrintWeights<G::EdgeWeight>>) -> Self {
self.export_edges = Some(edge_weight);
self
}
/// Export the node weights to GraphML.
///
/// This uses the [`Display`] implementation of the node weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_node_weights_display(self) -> Self
where
G::NodeWeight: Display,
{
self.export_node_weights(Box::new(|node| {
vec![("weight".into(), node.to_string().into())]
}))
}
/// Export the node weights to GraphML.
///
/// This uses a custom conversion function.
/// Each node can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(String, u32), ()> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_node_weights(Box::new(|node| {
/// let &(ref s, i) = node;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_node_weights(mut self, node_weight: Box<PrintWeights<G::NodeWeight>>) -> Self {
self.export_nodes = Some(node_weight);
self
}
/// Write the GraphML file to the given writer.
pub fn to_writer<W>(&self, writer: W) -> io::Result<()>
where
W: Write,
{
let mut writer = EventWriter::new_with_config(
writer,
EmitterConfig::new().perform_indent(self.pretty_print),
);
match self.emit_graphml(&mut writer) {
Ok(()) => Ok(()),
Err(XmlError::Io(ioerror)) => Err(ioerror),
_ => panic!(""),
}
}
fn emit_graphml<W>(&self, writer: &mut EventWriter<W>) -> WriterResult<()>
where
W: Write,
{
// Store information about the attributes for nodes and edges.
// We cannot know in advance what the attribute names will be, so we just keep track of what gets emitted.
let mut attributes: HashSet<Attribute> = HashSet::new();
// XML/GraphML boilerplate
writer.write(XmlEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("UTF-8"),
standalone: None,
})?;
writer.write(XmlEvent::start_element("graphml").attr("xmlns", NAMESPACE_URL))?;
// emit graph with nodes/edges and possibly weights
self.emit_graph(writer, &mut attributes)?;
// Emit <key> tags for all the attributes
self.emit_keys(writer, &attributes)?;
writer.write(XmlEvent::end_element())?; // end graphml
Ok(())
}
fn emit_graph<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &mut HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
// convenience function to turn a NodeId into a String
let node2str_id = |node: G::NodeId| -> String { format!("n{}", self.graph.to_index(node)) };
// Emit an attribute for either node or edge
// This will also keep track of updating the global attributes list
let mut emit_attribute = |writer: &mut EventWriter<_>,
name: Cow<'static, str>,
data: &str,
for_: For|
-> WriterResult<()> {
writer.write(XmlEvent::start_element("data").attr("key", &*name))?;
attributes.insert(Attribute { name, for_ });
writer.write(XmlEvent::characters(data))?;
writer.write(XmlEvent::end_element()) // end data
};
// Each graph needs a default edge type
writer.write(XmlEvent::start_element("graph").attr(
"edgedefault",
if self.graph.is_directed()
|
{
"directed"
}
|
conditional_block
|
|
lib.rs
|
_graph();
//! // Configure output settings
//! // Enable pretty printing and exporting of node weights.
//! // Use the Display implementation of NodeWeights for exporting them.
//! let graphml = GraphMl::new(&graph)
//! .pretty_print(true)
//! .export_node_weights_display();
//!
//! assert_eq!(
//! graphml.to_string(),
//! r#"<?xml version="1.0" encoding="UTF-8"?>
//! <graphml xmlns="http://graphml.graphdrawing.org/xmlns">
//! <graph edgedefault="directed">
//! <node id="n0">
//! <data key="weight">0</data>
//! </node>
//! <node id="n1">
//! <data key="weight">1</data>
//! </node>
//! <node id="n2">
//! <data key="weight">2</data>
//! </node>
//! <edge id="e0" source="n0" target="n1" />
//! <edge id="e1" source="n1" target="n2" />
//! </graph>
//! <key id="weight" for="node" attr.name="weight" attr.type="string" />
//! </graphml>"#
//! );
//! # }
//! ```
//!
//! [`GraphMl`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html
//! [`GraphMl::to_string`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_string
//! [`GraphMl::to_writer`]: https://docs.rs/petgraph-graphml/*/petgraph_graphml/struct.GraphMl.html#method.to_writer
//! [graphmlwebsite]: http://graphml.graphdrawing.org/
//! [petgraph]: https://docs.rs/petgraph/
#![deny(
missing_debug_implementations,
missing_copy_implementations,
missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
#![allow(unknown_lints, clippy::return_self_not_must_use)]
#![doc(html_root_url = "https://docs.rs/petgraph-graphml/3.0.0")]
use petgraph::visit::{
EdgeRef, GraphProp, IntoEdgeReferences, IntoNodeReferences, NodeIndexable, NodeRef,
};
use std::borrow::Cow;
use std::collections::HashSet;
use std::fmt::{self, Debug, Display};
use std::io::{self, Cursor, Write};
use xml::common::XmlVersion;
use xml::writer::events::XmlEvent;
use xml::writer::{Error as XmlError, EventWriter, Result as WriterResult};
use xml::EmitterConfig;
static NAMESPACE_URL: &str = "http://graphml.graphdrawing.org/xmlns";
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
struct Attribute {
name: Cow<'static, str>,
for_: For,
}
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
enum For {
Node,
Edge,
}
impl For {
fn to_str(self) -> &'static str {
match self {
For::Node => "node",
For::Edge => "edge",
}
}
}
type PrintWeights<W> = dyn for<'a> Fn(&'a W) -> Vec<(Cow<'static, str>, Cow<'a, str>)>;
/// GraphML output printer
///
/// See the [main crate documentation](index.html) for usage instructions and examples.
pub struct GraphMl<G>
where
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
graph: G,
pretty_print: bool,
export_edges: Option<Box<PrintWeights<G::EdgeWeight>>>,
export_nodes: Option<Box<PrintWeights<G::NodeWeight>>>,
}
impl<G> GraphMl<G>
where
G: GraphProp,
G: IntoNodeReferences,
G: IntoEdgeReferences,
G: NodeIndexable,
{
/// Create a new GraphML printer for the graph.
pub fn new(graph: G) -> Self {
Self {
graph,
pretty_print: true,
export_edges: None,
export_nodes: None,
}
}
/// Enable or disble pretty printing of the XML.
///
/// Pretty printing enables linebreaks and indentation.
pub fn pretty_print(mut self, state: bool) -> Self {
self.pretty_print = state;
self
}
/// Export the edge weights to GraphML.
///
/// This uses the [`Display`] implementation of the edge weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_edge_weights_display(self) -> Self
where
G::EdgeWeight: Display,
{
self.export_edge_weights(Box::new(|edge| {
vec![("weight".into(), edge.to_string().into())]
}))
}
/// Export the edge weights to GraphML.
///
/// This uses a custom conversion function.
/// Each edge can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(), (String, u32)> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_edge_weights(Box::new(|edge| {
/// let &(ref s, i) = edge;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_edge_weights(mut self, edge_weight: Box<PrintWeights<G::EdgeWeight>>) -> Self
|
/// Export the node weights to GraphML.
///
/// This uses the [`Display`] implementation of the node weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_node_weights_display(self) -> Self
where
G::NodeWeight: Display,
{
self.export_node_weights(Box::new(|node| {
vec![("weight".into(), node.to_string().into())]
}))
}
/// Export the node weights to GraphML.
///
/// This uses a custom conversion function.
/// Each node can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(String, u32), ()> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_node_weights(Box::new(|node| {
/// let &(ref s, i) = node;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_node_weights(mut self, node_weight: Box<PrintWeights<G::NodeWeight>>) -> Self {
self.export_nodes = Some(node_weight);
self
}
/// Write the GraphML file to the given writer.
pub fn to_writer<W>(&self, writer: W) -> io::Result<()>
where
W: Write,
{
let mut writer = EventWriter::new_with_config(
writer,
EmitterConfig::new().perform_indent(self.pretty_print),
);
match self.emit_graphml(&mut writer) {
Ok(()) => Ok(()),
Err(XmlError::Io(ioerror)) => Err(ioerror),
_ => panic!(""),
}
}
fn emit_graphml<W>(&self, writer: &mut EventWriter<W>) -> WriterResult<()>
where
W: Write,
{
// Store information about the attributes for nodes and edges.
// We cannot know in advance what the attribute names will be, so we just keep track of what gets emitted.
let mut attributes: HashSet<Attribute> = HashSet::new
|
{
self.export_edges = Some(edge_weight);
self
}
|
identifier_body
|
lib.rs
|
<G::NodeWeight>>>,
}
impl<G> GraphMl<G>
where
G: GraphProp,
G: IntoNodeReferences,
G: IntoEdgeReferences,
G: NodeIndexable,
{
/// Create a new GraphML printer for the graph.
pub fn new(graph: G) -> Self {
Self {
graph,
pretty_print: true,
export_edges: None,
export_nodes: None,
}
}
/// Enable or disble pretty printing of the XML.
///
/// Pretty printing enables linebreaks and indentation.
pub fn pretty_print(mut self, state: bool) -> Self {
self.pretty_print = state;
self
}
/// Export the edge weights to GraphML.
///
/// This uses the [`Display`] implementation of the edge weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_edge_weights_display(self) -> Self
where
G::EdgeWeight: Display,
{
self.export_edge_weights(Box::new(|edge| {
vec![("weight".into(), edge.to_string().into())]
}))
}
/// Export the edge weights to GraphML.
///
/// This uses a custom conversion function.
/// Each edge can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(), (String, u32)> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_edge_weights(Box::new(|edge| {
/// let &(ref s, i) = edge;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_edge_weights(mut self, edge_weight: Box<PrintWeights<G::EdgeWeight>>) -> Self {
self.export_edges = Some(edge_weight);
self
}
/// Export the node weights to GraphML.
///
/// This uses the [`Display`] implementation of the node weight type.
/// The attribute name defaults to "weight".
///
/// Once set this option cannot be disabled anymore.
///
/// [`Display`]: ::std::fmt::Display
pub fn export_node_weights_display(self) -> Self
where
G::NodeWeight: Display,
{
self.export_node_weights(Box::new(|node| {
vec![("weight".into(), node.to_string().into())]
}))
}
/// Export the node weights to GraphML.
///
/// This uses a custom conversion function.
/// Each node can be converted into an arbitray number of attributes.
/// Each attribute is a key-value pair, represented as tuple.
///
/// Once set this option cannot be disabled anymore.
///
/// # Example
///
/// A custom print function for the type `(String, u32)`.
/// It will create two attributes "str attr" and "int attr" containing the string and integer part.
///
/// ```
/// # use petgraph::Graph;
/// # use petgraph_graphml::GraphMl;
/// # fn make_graph() -> Graph<(String, u32), ()> {
/// # Graph::new()
/// # }
/// let graph = make_graph();
/// let graphml = GraphMl::new(&graph).export_node_weights(Box::new(|node| {
/// let &(ref s, i) = node;
/// vec![
/// ("str attr".into(), s[..].into()),
/// ("int attr".into(), i.to_string().into()),
/// ]
/// }));
/// ```
///
/// Currently only string attribute types are supported.
pub fn export_node_weights(mut self, node_weight: Box<PrintWeights<G::NodeWeight>>) -> Self {
self.export_nodes = Some(node_weight);
self
}
/// Write the GraphML file to the given writer.
pub fn to_writer<W>(&self, writer: W) -> io::Result<()>
where
W: Write,
{
let mut writer = EventWriter::new_with_config(
writer,
EmitterConfig::new().perform_indent(self.pretty_print),
);
match self.emit_graphml(&mut writer) {
Ok(()) => Ok(()),
Err(XmlError::Io(ioerror)) => Err(ioerror),
_ => panic!(""),
}
}
fn emit_graphml<W>(&self, writer: &mut EventWriter<W>) -> WriterResult<()>
where
W: Write,
{
// Store information about the attributes for nodes and edges.
// We cannot know in advance what the attribute names will be, so we just keep track of what gets emitted.
let mut attributes: HashSet<Attribute> = HashSet::new();
// XML/GraphML boilerplate
writer.write(XmlEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("UTF-8"),
standalone: None,
})?;
writer.write(XmlEvent::start_element("graphml").attr("xmlns", NAMESPACE_URL))?;
// emit graph with nodes/edges and possibly weights
self.emit_graph(writer, &mut attributes)?;
// Emit <key> tags for all the attributes
self.emit_keys(writer, &attributes)?;
writer.write(XmlEvent::end_element())?; // end graphml
Ok(())
}
fn emit_graph<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &mut HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
// convenience function to turn a NodeId into a String
let node2str_id = |node: G::NodeId| -> String { format!("n{}", self.graph.to_index(node)) };
// Emit an attribute for either node or edge
// This will also keep track of updating the global attributes list
let mut emit_attribute = |writer: &mut EventWriter<_>,
name: Cow<'static, str>,
data: &str,
for_: For|
-> WriterResult<()> {
writer.write(XmlEvent::start_element("data").attr("key", &*name))?;
attributes.insert(Attribute { name, for_ });
writer.write(XmlEvent::characters(data))?;
writer.write(XmlEvent::end_element()) // end data
};
// Each graph needs a default edge type
writer.write(XmlEvent::start_element("graph").attr(
"edgedefault",
if self.graph.is_directed() {
"directed"
} else {
"undirected"
},
))?;
// Emit nodes
for node in self.graph.node_references() {
writer.write(XmlEvent::start_element("node").attr("id", &*node2str_id(node.id())))?;
// Print weights
if let Some(ref node_labels) = self.export_nodes {
let datas = node_labels(node.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Node)?;
}
}
writer.write(XmlEvent::end_element())?; // end node
}
// Emit edges
for (i, edge) in self.graph.edge_references().enumerate() {
writer.write(
XmlEvent::start_element("edge")
.attr("id", &format!("e{}", i))
.attr("source", &*node2str_id(edge.source()))
.attr("target", &*node2str_id(edge.target())),
)?;
// Print weights
if let Some(ref edge_labels) = self.export_edges {
let datas = edge_labels(edge.weight());
for (name, data) in datas {
emit_attribute(writer, name, &*data, For::Edge)?;
}
}
writer.write(XmlEvent::end_element())?; // end edge
}
writer.write(XmlEvent::end_element()) // end graph
}
fn emit_keys<W>(
&self,
writer: &mut EventWriter<W>,
attributes: &HashSet<Attribute>,
) -> WriterResult<()>
where
W: Write,
{
for attr in attributes {
writer.write(
XmlEvent::start_element("key")
.attr("id", &*attr.name)
.attr("for", attr.for_.to_str())
.attr("attr.name", &*attr.name)
.attr("attr.type", "string"),
)?;
writer.write(XmlEvent::end_element())?; // end key
}
Ok(())
}
}
impl<G> Debug for GraphMl<G>
where
G: Debug,
G: IntoEdgeReferences,
G: IntoNodeReferences,
{
fn
|
fmt
|
identifier_name
|
|
main.rs
|
env;
use std::ffi::OsStr;
use std::fs::File;
use std::path::Path;
use std::process::Command;
use util::APP_USAGE;
struct HacspecCallbacks {
output_file: Option<String>,
target_directory: String,
}
const ERROR_OUTPUT_CONFIG: ErrorOutputType =
ErrorOutputType::HumanReadable(HumanReadableErrorType::Default(ColorConfig::Auto));
trait HacspecErrorEmitter {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str);
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str);
}
impl HacspecErrorEmitter for Session {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_err_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_warn_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
}
impl Callbacks for HacspecCallbacks {
fn config(&mut self, config: &mut Config) {
log::debug!(" --- hacspec config callback");
log::trace!(" target directory {}", self.target_directory);
config.opts.search_paths.push(SearchPath::from_cli_opt(
&self.target_directory,
ERROR_OUTPUT_CONFIG,
));
config.crate_cfg.insert((
String::from("feature"),
Some(String::from("\"hacspec_attributes\"")),
));
}
fn after_analysis<'tcx>(
&mut self,
compiler: &Compiler,
queries: &'tcx Queries<'tcx>,
) -> Compilation {
log::debug!(" --- hacspec after_analysis callback");
let krate = queries.parse().unwrap().take();
let external_data = |imported_crates: &Vec<rustspec::Spanned<String>>| {
queries.global_ctxt().unwrap().peek_mut().enter(|tcx| {
hir_to_rustspec::retrieve_external_data(&compiler.session(), &tcx, imported_crates)
})
};
let krate = match ast_to_rustspec::translate(&compiler.session(), &krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("unable to translate to Hacspec due to out-of-language errors");
return Compilation::Stop;
}
};
let (krate, mut top_ctx) =
match name_resolution::resolve_crate(&compiler.session(), krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec name resolution errors");
return Compilation::Stop;
}
};
let krate = match typechecker::typecheck_program(&compiler.session(), &krate, &mut top_ctx)
{
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec typechecking errors");
return Compilation::Stop;
}
};
let imported_crates = name_resolution::get_imported_crates(&krate);
let imported_crates = imported_crates
.into_iter()
.filter(|(x, _)| x != "hacspec_lib")
.map(|(x, _)| x)
.collect::<Vec<_>>();
println!(
" > Successfully typechecked{}",
if imported_crates.len() == 0 {
".".to_string()
} else {
format!(
", assuming that the code in crates {} has also been Hacspec-typechecked",
imported_crates.iter().format(", ")
)
}
);
match &self.output_file {
None => return Compilation::Stop,
Some(file) => match Path::new(file).extension().and_then(OsStr::to_str).unwrap() {
"fst" => rustspec_to_fstar::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"ec" => rustspec_to_easycrypt::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"json" => {
let file = file.trim();
let path = Path::new(file);
let file = match File::create(&path) {
Err(why) => {
compiler.session().err(
format!("Unable to write to output file {}: \"{}\"", file, why)
.as_str(),
);
return Compilation::Stop;
}
Ok(file) => file,
};
match serde_json::to_writer_pretty(file, &krate) {
Err(why) => {
compiler
.session()
.err(format!("Unable to serialize program: \"{}\"", why).as_str());
return Compilation::Stop;
}
Ok(_) => (),
};
}
"v" => rustspec_to_coq::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
_ => {
compiler
.session()
.err("unknown backend extension for output file");
return Compilation::Stop;
}
},
}
Compilation::Stop
}
}
// === Cargo Metadata Helpers ===
#[derive(Debug, Default, Deserialize)]
struct Dependency {
name: String,
#[allow(dead_code)]
kind: Option<String>,
}
#[derive(Debug, Default, Deserialize)]
struct Target {
#[allow(dead_code)]
name: String,
#[allow(dead_code)]
kind: Vec<String>,
crate_types: Vec<String>,
src_path: String,
}
#[derive(Debug, Default, Deserialize)]
struct Package {
name: String,
targets: Vec<Target>,
dependencies: Vec<Dependency>,
}
#[derive(Debug, Default, Deserialize)]
struct Manifest {
packages: Vec<Package>,
target_directory: String,
}
// ===
/// Read the crate metadata and use the information for the build.
fn read_crate(
manifest: Option<String>,
package_name: Option<String>,
args: &mut Vec<String>,
callbacks: &mut HacspecCallbacks,
) {
let manifest: Manifest = {
let mut output = Command::new("cargo");
let mut output_args = if let Some(manifest_path) = manifest {
vec!["--manifest-path".to_string(), manifest_path]
} else {
Vec::<String>::new()
};
output_args.extend_from_slice(&[
"--no-deps".to_string(),
"--format-version".to_string(),
"1".to_string(),
]);
let output = output.arg("metadata").args(&output_args);
let output = output.output().expect(" ⚠️ Error reading cargo manifest.");
let stdout = output.stdout;
if !output.status.success() {
let error =
String::from_utf8(output.stderr).expect(" ⚠️ Failed reading cargo stderr output");
panic!("Error running cargo metadata: {:?}", error);
}
let json_string = String::from_utf8(stdout).expect(" ⚠️ Failed reading cargo output");
serde_json::from_str(&json_string).expect(" ⚠️ Error reading to manifest")
};
// Pick the package of the given name or the only package available.
let package = if let Some(package_name) = package_name {
manifest
.packages
|
.find(|p| p.name == package_name)
.expect(&format!(
" ⚠️ Can't find the package {} in the Cargo.toml\n\n{}",
package_name, APP_USAGE,
))
} else {
&manifest.packages[0]
};
log::trace!("Typechecking '{:?}' ...", package);
// Take the first lib target we find. There should be only one really.
// log::trace!("crate types: {:?}", package.targets);
// log::trace!("package targets {:?}", package.targets);
let target = package
.targets
.iter()
.find(|p| {
p.crate_types.contains(&"lib".to_string())
|| p.crate_types.contains(&"rlib".to_string())
})
.expect(&format!(" ⚠️ No target in the Cargo.toml\n\n{}", APP_USAGE));
// Add the target source file to the arguments
args.push(target.src_path.clone());
// Add build artifact path.
// This only works with debug builds.
let deps = manifest.target_directory + "/debug/deps";
callbacks.target_directory = deps;
// Add the dependencies as --extern for the hacpsec typechecker.
for dependency in package.dependencies.iter() {
args.push(format!("--extern={}", dependency.name.replace("-", "_")));
}
}
fn main() -> Result<(), usize> {
pretty_env_logger::init();
log::debug!(" --- hacspec");
let mut args = env::args().collect::<Vec<String>>();
log::trace!(" args: {:?}", args);
// Args to pass to the compiler
let mut compiler_args = Vec::new();
// Drop and pass along binary name.
compiler_args.push(args.remove(0));
// Optionally get output file.
let output_file_index =
|
.iter()
|
random_line_split
|
main.rs
|
env;
use std::ffi::OsStr;
use std::fs::File;
use std::path::Path;
use std::process::Command;
use util::APP_USAGE;
struct
|
{
output_file: Option<String>,
target_directory: String,
}
const ERROR_OUTPUT_CONFIG: ErrorOutputType =
ErrorOutputType::HumanReadable(HumanReadableErrorType::Default(ColorConfig::Auto));
trait HacspecErrorEmitter {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str);
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str);
}
impl HacspecErrorEmitter for Session {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_err_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_warn_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
}
impl Callbacks for HacspecCallbacks {
fn config(&mut self, config: &mut Config) {
log::debug!(" --- hacspec config callback");
log::trace!(" target directory {}", self.target_directory);
config.opts.search_paths.push(SearchPath::from_cli_opt(
&self.target_directory,
ERROR_OUTPUT_CONFIG,
));
config.crate_cfg.insert((
String::from("feature"),
Some(String::from("\"hacspec_attributes\"")),
));
}
fn after_analysis<'tcx>(
&mut self,
compiler: &Compiler,
queries: &'tcx Queries<'tcx>,
) -> Compilation {
log::debug!(" --- hacspec after_analysis callback");
let krate = queries.parse().unwrap().take();
let external_data = |imported_crates: &Vec<rustspec::Spanned<String>>| {
queries.global_ctxt().unwrap().peek_mut().enter(|tcx| {
hir_to_rustspec::retrieve_external_data(&compiler.session(), &tcx, imported_crates)
})
};
let krate = match ast_to_rustspec::translate(&compiler.session(), &krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("unable to translate to Hacspec due to out-of-language errors");
return Compilation::Stop;
}
};
let (krate, mut top_ctx) =
match name_resolution::resolve_crate(&compiler.session(), krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec name resolution errors");
return Compilation::Stop;
}
};
let krate = match typechecker::typecheck_program(&compiler.session(), &krate, &mut top_ctx)
{
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec typechecking errors");
return Compilation::Stop;
}
};
let imported_crates = name_resolution::get_imported_crates(&krate);
let imported_crates = imported_crates
.into_iter()
.filter(|(x, _)| x != "hacspec_lib")
.map(|(x, _)| x)
.collect::<Vec<_>>();
println!(
" > Successfully typechecked{}",
if imported_crates.len() == 0 {
".".to_string()
} else {
format!(
", assuming that the code in crates {} has also been Hacspec-typechecked",
imported_crates.iter().format(", ")
)
}
);
match &self.output_file {
None => return Compilation::Stop,
Some(file) => match Path::new(file).extension().and_then(OsStr::to_str).unwrap() {
"fst" => rustspec_to_fstar::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"ec" => rustspec_to_easycrypt::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"json" => {
let file = file.trim();
let path = Path::new(file);
let file = match File::create(&path) {
Err(why) => {
compiler.session().err(
format!("Unable to write to output file {}: \"{}\"", file, why)
.as_str(),
);
return Compilation::Stop;
}
Ok(file) => file,
};
match serde_json::to_writer_pretty(file, &krate) {
Err(why) => {
compiler
.session()
.err(format!("Unable to serialize program: \"{}\"", why).as_str());
return Compilation::Stop;
}
Ok(_) => (),
};
}
"v" => rustspec_to_coq::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
_ => {
compiler
.session()
.err("unknown backend extension for output file");
return Compilation::Stop;
}
},
}
Compilation::Stop
}
}
// === Cargo Metadata Helpers ===
#[derive(Debug, Default, Deserialize)]
struct Dependency {
name: String,
#[allow(dead_code)]
kind: Option<String>,
}
#[derive(Debug, Default, Deserialize)]
struct Target {
#[allow(dead_code)]
name: String,
#[allow(dead_code)]
kind: Vec<String>,
crate_types: Vec<String>,
src_path: String,
}
#[derive(Debug, Default, Deserialize)]
struct Package {
name: String,
targets: Vec<Target>,
dependencies: Vec<Dependency>,
}
#[derive(Debug, Default, Deserialize)]
struct Manifest {
packages: Vec<Package>,
target_directory: String,
}
// ===
/// Read the crate metadata and use the information for the build.
fn read_crate(
manifest: Option<String>,
package_name: Option<String>,
args: &mut Vec<String>,
callbacks: &mut HacspecCallbacks,
) {
let manifest: Manifest = {
let mut output = Command::new("cargo");
let mut output_args = if let Some(manifest_path) = manifest {
vec!["--manifest-path".to_string(), manifest_path]
} else {
Vec::<String>::new()
};
output_args.extend_from_slice(&[
"--no-deps".to_string(),
"--format-version".to_string(),
"1".to_string(),
]);
let output = output.arg("metadata").args(&output_args);
let output = output.output().expect(" ⚠️ Error reading cargo manifest.");
let stdout = output.stdout;
if !output.status.success() {
let error =
String::from_utf8(output.stderr).expect(" ⚠️ Failed reading cargo stderr output");
panic!("Error running cargo metadata: {:?}", error);
}
let json_string = String::from_utf8(stdout).expect(" ⚠️ Failed reading cargo output");
serde_json::from_str(&json_string).expect(" ⚠️ Error reading to manifest")
};
// Pick the package of the given name or the only package available.
let package = if let Some(package_name) = package_name {
manifest
.packages
.iter()
.find(|p| p.name == package_name)
.expect(&format!(
" ⚠️ Can't find the package {} in the Cargo.toml\n\n{}",
package_name, APP_USAGE,
))
} else {
&manifest.packages[0]
};
log::trace!("Typechecking '{:?}' ...", package);
// Take the first lib target we find. There should be only one really.
// log::trace!("crate types: {:?}", package.targets);
// log::trace!("package targets {:?}", package.targets);
let target = package
.targets
.iter()
.find(|p| {
p.crate_types.contains(&"lib".to_string())
|| p.crate_types.contains(&"rlib".to_string())
})
.expect(&format!(" ⚠️ No target in the Cargo.toml\n\n{}", APP_USAGE));
// Add the target source file to the arguments
args.push(target.src_path.clone());
// Add build artifact path.
// This only works with debug builds.
let deps = manifest.target_directory + "/debug/deps";
callbacks.target_directory = deps;
// Add the dependencies as --extern for the hacpsec typechecker.
for dependency in package.dependencies.iter() {
args.push(format!("--extern={}", dependency.name.replace("-", "_")));
}
}
fn main() -> Result<(), usize> {
pretty_env_logger::init();
log::debug!(" --- hacspec");
let mut args = env::args().collect::<Vec<String>>();
log::trace!(" args: {:?}", args);
// Args to pass to the compiler
let mut compiler_args = Vec::new();
// Drop and pass along binary name.
compiler_args.push(args.remove(0));
// Optionally get output file.
let output_file
|
HacspecCallbacks
|
identifier_name
|
main.rs
|
;
use std::ffi::OsStr;
use std::fs::File;
use std::path::Path;
use std::process::Command;
use util::APP_USAGE;
struct HacspecCallbacks {
output_file: Option<String>,
target_directory: String,
}
const ERROR_OUTPUT_CONFIG: ErrorOutputType =
ErrorOutputType::HumanReadable(HumanReadableErrorType::Default(ColorConfig::Auto));
trait HacspecErrorEmitter {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str);
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str);
}
impl HacspecErrorEmitter for Session {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_err_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_warn_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
}
impl Callbacks for HacspecCallbacks {
fn config(&mut self, config: &mut Config) {
log::debug!(" --- hacspec config callback");
log::trace!(" target directory {}", self.target_directory);
config.opts.search_paths.push(SearchPath::from_cli_opt(
&self.target_directory,
ERROR_OUTPUT_CONFIG,
));
config.crate_cfg.insert((
String::from("feature"),
Some(String::from("\"hacspec_attributes\"")),
));
}
fn after_analysis<'tcx>(
&mut self,
compiler: &Compiler,
queries: &'tcx Queries<'tcx>,
) -> Compilation {
log::debug!(" --- hacspec after_analysis callback");
let krate = queries.parse().unwrap().take();
let external_data = |imported_crates: &Vec<rustspec::Spanned<String>>| {
queries.global_ctxt().unwrap().peek_mut().enter(|tcx| {
hir_to_rustspec::retrieve_external_data(&compiler.session(), &tcx, imported_crates)
})
};
let krate = match ast_to_rustspec::translate(&compiler.session(), &krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("unable to translate to Hacspec due to out-of-language errors");
return Compilation::Stop;
}
};
let (krate, mut top_ctx) =
match name_resolution::resolve_crate(&compiler.session(), krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec name resolution errors");
return Compilation::Stop;
}
};
let krate = match typechecker::typecheck_program(&compiler.session(), &krate, &mut top_ctx)
{
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec typechecking errors");
return Compilation::Stop;
}
};
let imported_crates = name_resolution::get_imported_crates(&krate);
let imported_crates = imported_crates
.into_iter()
.filter(|(x, _)| x != "hacspec_lib")
.map(|(x, _)| x)
.collect::<Vec<_>>();
println!(
" > Successfully typechecked{}",
if imported_crates.len() == 0 {
".".to_string()
} else {
format!(
", assuming that the code in crates {} has also been Hacspec-typechecked",
imported_crates.iter().format(", ")
)
}
);
match &self.output_file {
None => return Compilation::Stop,
Some(file) => match Path::new(file).extension().and_then(OsStr::to_str).unwrap() {
"fst" => rustspec_to_fstar::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"ec" => rustspec_to_easycrypt::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"json" => {
let file = file.trim();
let path = Path::new(file);
let file = match File::create(&path) {
Err(why) => {
compiler.session().err(
format!("Unable to write to output file {}: \"{}\"", file, why)
.as_str(),
);
return Compilation::Stop;
}
Ok(file) => file,
};
match serde_json::to_writer_pretty(file, &krate) {
Err(why) => {
compiler
.session()
.err(format!("Unable to serialize program: \"{}\"", why).as_str());
return Compilation::Stop;
}
Ok(_) => (),
};
}
"v" => rustspec_to_coq::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
_ => {
compiler
.session()
.err("unknown backend extension for output file");
return Compilation::Stop;
}
},
}
Compilation::Stop
}
}
// === Cargo Metadata Helpers ===
#[derive(Debug, Default, Deserialize)]
struct Dependency {
name: String,
#[allow(dead_code)]
kind: Option<String>,
}
#[derive(Debug, Default, Deserialize)]
struct Target {
#[allow(dead_code)]
name: String,
#[allow(dead_code)]
kind: Vec<String>,
crate_types: Vec<String>,
src_path: String,
}
#[derive(Debug, Default, Deserialize)]
struct Package {
name: String,
targets: Vec<Target>,
dependencies: Vec<Dependency>,
}
#[derive(Debug, Default, Deserialize)]
struct Manifest {
packages: Vec<Package>,
target_directory: String,
}
// ===
/// Read the crate metadata and use the information for the build.
fn read_crate(
manifest: Option<String>,
package_name: Option<String>,
args: &mut Vec<String>,
callbacks: &mut HacspecCallbacks,
)
|
}
let json_string = String::from_utf8(stdout).expect(" ⚠️ Failed reading cargo output");
serde_json::from_str(&json_string).expect(" ⚠️ Error reading to manifest")
};
// Pick the package of the given name or the only package available.
let package = if let Some(package_name) = package_name {
manifest
.packages
.iter()
.find(|p| p.name == package_name)
.expect(&format!(
" ⚠️ Can't find the package {} in the Cargo.toml\n\n{}",
package_name, APP_USAGE,
))
} else {
&manifest.packages[0]
};
log::trace!("Typechecking '{:?}' ...", package);
// Take the first lib target we find. There should be only one really.
// log::trace!("crate types: {:?}", package.targets);
// log::trace!("package targets {:?}", package.targets);
let target = package
.targets
.iter()
.find(|p| {
p.crate_types.contains(&"lib".to_string())
|| p.crate_types.contains(&"rlib".to_string())
})
.expect(&format!(" ⚠️ No target in the Cargo.toml\n\n{}", APP_USAGE));
// Add the target source file to the arguments
args.push(target.src_path.clone());
// Add build artifact path.
// This only works with debug builds.
let deps = manifest.target_directory + "/debug/deps";
callbacks.target_directory = deps;
// Add the dependencies as --extern for the hacpsec typechecker.
for dependency in package.dependencies.iter() {
args.push(format!("--extern={}", dependency.name.replace("-", "_")));
}
}
fn main() -> Result<()
, usize> {
pretty_env_logger::init();
log::debug!(" --- hacspec");
let mut args = env::args().collect::<Vec<String>>();
log::trace!(" args: {:?}", args);
// Args to pass to the compiler
let mut compiler_args = Vec::new();
// Drop and pass along binary name.
compiler_args.push(args.remove(0));
// Optionally get output file.
let output
|
{
let manifest: Manifest = {
let mut output = Command::new("cargo");
let mut output_args = if let Some(manifest_path) = manifest {
vec!["--manifest-path".to_string(), manifest_path]
} else {
Vec::<String>::new()
};
output_args.extend_from_slice(&[
"--no-deps".to_string(),
"--format-version".to_string(),
"1".to_string(),
]);
let output = output.arg("metadata").args(&output_args);
let output = output.output().expect(" ⚠️ Error reading cargo manifest.");
let stdout = output.stdout;
if !output.status.success() {
let error =
String::from_utf8(output.stderr).expect(" ⚠️ Failed reading cargo stderr output");
panic!("Error running cargo metadata: {:?}", error);
|
identifier_body
|
get.go
|
fmt.Printf("- LocalGetActions (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
}
if len(dbSchema.Things.Classes) > 0 {
localGetThings, err := buildGetClasses(dbSchema, kind.THING_KIND, dbSchema.Things, &knownClasses)
if err != nil {
return nil, err
}
getKinds["Things"] = &graphql.Field{
Name: "WeaviateLocalGetThings",
Description: "Get Things on the Local Weaviate",
Type: localGetThings,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGetThings (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
}
field := graphql.Field{
Name: "WeaviateLocalGet",
Description: "Get Things or Actions on the local weaviate",
Args: graphql.FieldConfigArgument{
"where": &graphql.ArgumentConfig{
Description: "Filter options for the Get search, to convert the data to the filter input",
Type: graphql.NewInputObject(
graphql.InputObjectConfig{
Name: "WeaviateLocalGetWhereInpObj",
Fields: common_filters.Build(),
Description: "Filter options for the Get search, to convert the data to the filter input",
},
),
},
},
Type: graphql.NewObject(graphql.ObjectConfig{
Name: "WeaviateLocalGetObj",
Fields: getKinds,
Description: "Type of Get function to get Things or Actions on the Local Weaviate",
}),
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGet (extract resolver from source, parse filters )\n")
resolver := p.Source.(map[string]interface{})["Resolver"].(Resolver)
filters, err := common_filters.ExtractFilters(p.Args)
if err != nil {
return nil, err
}
return &filtersAndResolver{
filters: filters,
resolver: resolver,
}, nil
},
}
return &field, nil
}
// Builds the classes below a Local -> Get -> (k kind.Kind)
func buildGetClasses(dbSchema *schema.Schema, k kind.Kind, semanticSchema *models.SemanticSchema, knownClasses *map[string]*graphql.Object) (*graphql.Object, error) {
classFields := graphql.Fields{}
var kindName string
switch k {
case kind.THING_KIND:
kindName = "Thing"
case kind.ACTION_KIND:
kindName = "Action"
}
for _, class := range semanticSchema.Classes {
classField, err := buildGetClass(dbSchema, k, class, knownClasses)
if err != nil {
return nil, fmt.Errorf("Could not build class for %s", class.Class)
}
classFields[class.Class] = classField
}
classes := graphql.NewObject(graphql.ObjectConfig{
Name: fmt.Sprintf("WeaviateLocalGet%ssObj", kindName),
Fields: classFields,
Description: fmt.Sprintf("Type of %ss i.e. %ss classes to Get on the Local Weaviate", kindName, kindName),
})
return classes, nil
}
// Build a single class in Local -> Get -> (k kind.Kind) -> (models.SemanticSchemaClass)
func buildGetClass(dbSchema *schema.Schema, k kind.Kind, class *models.SemanticSchemaClass, knownClasses *map[string]*graphql.Object) (*graphql.Field, error) {
classObject := graphql.NewObject(graphql.ObjectConfig{
Name: class.Class,
Fields: (graphql.FieldsThunk)(func() graphql.Fields {
classProperties := graphql.Fields{}
classProperties["uuid"] = &graphql.Field{
Description: "UUID of the thing or action given by the local Weaviate instance",
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("WHOOPTYDOO uuid\n")
return "uuid", nil
},
}
for _, property := range class.Properties {
propertyType, err := dbSchema.FindPropertyDataType(property.AtDataType)
if err != nil {
// We can't return an error in this FieldsThunk function, so we need to panic
panic(fmt.Sprintf("buildGetClass: wrong propertyType for %s.%s.%s; %s", k.Name(), class.Class, property.Name, err.Error()))
}
var propertyField *graphql.Field
if propertyType.IsPrimitive() {
switch propertyType.AsPrimitive() {
case schema.DataTypeString:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: string\n")
return "primitive string", nil
},
}
case schema.DataTypeInt:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Int,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: int\n")
return nil, nil
},
}
case schema.DataTypeNumber:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Float,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: float\n")
return 4.2, nil
},
}
case schema.DataTypeBoolean:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Boolean,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: bool\n")
return true, nil
},
}
case schema.DataTypeDate:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String, // String since no graphql date datatype exists
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: date\n")
return "somedate", nil
},
}
default:
panic(fmt.Sprintf("buildGetClass: unknown primitive type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, propertyType.AsPrimitive()))
}
propertyField.Name = property.Name
classProperties[property.Name] = propertyField
} else {
// This is a reference
refClasses := propertyType.Classes()
propertyName := strings.Title(property.Name)
dataTypeClasses := make([]*graphql.Object, len(refClasses))
for index, refClassName := range refClasses {
refClass, ok := (*knownClasses)[string(refClassName)]
if !ok {
panic(fmt.Sprintf("buildGetClass: unknown referenced class type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, refClassName))
}
dataTypeClasses[index] = refClass
}
classUnion := graphql.NewUnion(graphql.UnionConfig{
Name: fmt.Sprintf("%s%s%s", class.Class, propertyName, "Obj"),
Types: dataTypeClasses,
ResolveType: func(p graphql.ResolveTypeParams) *graphql.Object {
// TODO: inspect type of result.
return (*knownClasses)["City"]
fmt.Printf("Resolver: WHOOPTYDOO\n")
return nil
},
Description: property.Description,
})
// TODO: Check cardinality
classProperties[propertyName] = &graphql.Field{
Type: classUnion,
Description: property.Description,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- Resolve action property field (ref?)\n")
fmt.Printf("WHOOPTYDOO2\n")
return true, nil
},
}
}
}
return classProperties
}),
Description: class.Description,
})
(*knownClasses)[class.Class] = classObject
classField := graphql.Field{
Type: graphql.NewList(classObject),
Description: class.Description,
Args: graphql.FieldConfigArgument{
"first": &graphql.ArgumentConfig{
Description: "Pagination option, show the first x results",
Type: graphql.Int,
},
"after": &graphql.ArgumentConfig{
Description
|
{
getKinds := graphql.Fields{}
if len(dbSchema.Actions.Classes) == 0 && len(dbSchema.Things.Classes) == 0 {
return nil, fmt.Errorf("There are not any Actions or Things classes defined yet.")
}
knownClasses := map[string]*graphql.Object{}
if len(dbSchema.Actions.Classes) > 0 {
localGetActions, err := buildGetClasses(dbSchema, kind.ACTION_KIND, dbSchema.Actions, &knownClasses)
if err != nil {
return nil, err
}
getKinds["Actions"] = &graphql.Field{
Name: "WeaviateLocalGetActions",
Description: "Get Actions on the Local Weaviate",
Type: localGetActions,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
identifier_body
|
|
get.go
|
getKinds["Actions"] = &graphql.Field{
Name: "WeaviateLocalGetActions",
Description: "Get Actions on the Local Weaviate",
Type: localGetActions,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGetActions (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
}
|
if err != nil {
return nil, err
}
getKinds["Things"] = &graphql.Field{
Name: "WeaviateLocalGetThings",
Description: "Get Things on the Local Weaviate",
Type: localGetThings,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGetThings (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
}
field := graphql.Field{
Name: "WeaviateLocalGet",
Description: "Get Things or Actions on the local weaviate",
Args: graphql.FieldConfigArgument{
"where": &graphql.ArgumentConfig{
Description: "Filter options for the Get search, to convert the data to the filter input",
Type: graphql.NewInputObject(
graphql.InputObjectConfig{
Name: "WeaviateLocalGetWhereInpObj",
Fields: common_filters.Build(),
Description: "Filter options for the Get search, to convert the data to the filter input",
},
),
},
},
Type: graphql.NewObject(graphql.ObjectConfig{
Name: "WeaviateLocalGetObj",
Fields: getKinds,
Description: "Type of Get function to get Things or Actions on the Local Weaviate",
}),
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGet (extract resolver from source, parse filters )\n")
resolver := p.Source.(map[string]interface{})["Resolver"].(Resolver)
filters, err := common_filters.ExtractFilters(p.Args)
if err != nil {
return nil, err
}
return &filtersAndResolver{
filters: filters,
resolver: resolver,
}, nil
},
}
return &field, nil
}
// Builds the classes below a Local -> Get -> (k kind.Kind)
func buildGetClasses(dbSchema *schema.Schema, k kind.Kind, semanticSchema *models.SemanticSchema, knownClasses *map[string]*graphql.Object) (*graphql.Object, error) {
classFields := graphql.Fields{}
var kindName string
switch k {
case kind.THING_KIND:
kindName = "Thing"
case kind.ACTION_KIND:
kindName = "Action"
}
for _, class := range semanticSchema.Classes {
classField, err := buildGetClass(dbSchema, k, class, knownClasses)
if err != nil {
return nil, fmt.Errorf("Could not build class for %s", class.Class)
}
classFields[class.Class] = classField
}
classes := graphql.NewObject(graphql.ObjectConfig{
Name: fmt.Sprintf("WeaviateLocalGet%ssObj", kindName),
Fields: classFields,
Description: fmt.Sprintf("Type of %ss i.e. %ss classes to Get on the Local Weaviate", kindName, kindName),
})
return classes, nil
}
// Build a single class in Local -> Get -> (k kind.Kind) -> (models.SemanticSchemaClass)
func buildGetClass(dbSchema *schema.Schema, k kind.Kind, class *models.SemanticSchemaClass, knownClasses *map[string]*graphql.Object) (*graphql.Field, error) {
classObject := graphql.NewObject(graphql.ObjectConfig{
Name: class.Class,
Fields: (graphql.FieldsThunk)(func() graphql.Fields {
classProperties := graphql.Fields{}
classProperties["uuid"] = &graphql.Field{
Description: "UUID of the thing or action given by the local Weaviate instance",
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("WHOOPTYDOO uuid\n")
return "uuid", nil
},
}
for _, property := range class.Properties {
propertyType, err := dbSchema.FindPropertyDataType(property.AtDataType)
if err != nil {
// We can't return an error in this FieldsThunk function, so we need to panic
panic(fmt.Sprintf("buildGetClass: wrong propertyType for %s.%s.%s; %s", k.Name(), class.Class, property.Name, err.Error()))
}
var propertyField *graphql.Field
if propertyType.IsPrimitive() {
switch propertyType.AsPrimitive() {
case schema.DataTypeString:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: string\n")
return "primitive string", nil
},
}
case schema.DataTypeInt:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Int,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: int\n")
return nil, nil
},
}
case schema.DataTypeNumber:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Float,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: float\n")
return 4.2, nil
},
}
case schema.DataTypeBoolean:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Boolean,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: bool\n")
return true, nil
},
}
case schema.DataTypeDate:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String, // String since no graphql date datatype exists
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: date\n")
return "somedate", nil
},
}
default:
panic(fmt.Sprintf("buildGetClass: unknown primitive type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, propertyType.AsPrimitive()))
}
propertyField.Name = property.Name
classProperties[property.Name] = propertyField
} else {
// This is a reference
refClasses := propertyType.Classes()
propertyName := strings.Title(property.Name)
dataTypeClasses := make([]*graphql.Object, len(refClasses))
for index, refClassName := range refClasses {
refClass, ok := (*knownClasses)[string(refClassName)]
if !ok {
panic(fmt.Sprintf("buildGetClass: unknown referenced class type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, refClassName))
}
dataTypeClasses[index] = refClass
}
classUnion := graphql.NewUnion(graphql.UnionConfig{
Name: fmt.Sprintf("%s%s%s", class.Class, propertyName, "Obj"),
Types: dataTypeClasses,
ResolveType: func(p graphql.ResolveTypeParams) *graphql.Object {
// TODO: inspect type of result.
return (*knownClasses)["City"]
fmt.Printf("Resolver: WHOOPTYDOO\n")
return nil
},
Description: property.Description,
})
// TODO: Check cardinality
classProperties[propertyName] = &graphql.Field{
Type: classUnion,
Description: property.Description,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- Resolve action property field (ref?)\n")
fmt.Printf("WHOOPTYDOO2\n")
return true, nil
},
}
}
}
return classProperties
}),
Description: class.Description,
})
(*knownClasses)[class.Class] = classObject
classField := graphql.Field{
Type: graphql.NewList(classObject),
Description: class.Description,
Args: graphql.FieldConfigArgument{
"first": &graphql.ArgumentConfig{
Description: "Pagination option, show the first x results",
Type: graphql.Int,
},
"after": &graphql.ArgumentConfig{
Description: "Pagination option, show the results after the first x results",
Type: graphql.Int,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- thing class (supposed to extract pagination, now return nil)\n")
filtersAndResolver := p.Source.(*filtersAndResolver)
pagination, err := common.ExtractPaginationFromArgs(p.Args)
if err != nil {
return nil, err
}
// There can
|
if len(dbSchema.Things.Classes) > 0 {
localGetThings, err := buildGetClasses(dbSchema, kind.THING_KIND, dbSchema.Things, &knownClasses)
|
random_line_split
|
get.go
|
(dbSchema *schema.Schema) (*graphql.Field, error) {
getKinds := graphql.Fields{}
if len(dbSchema.Actions.Classes) == 0 && len(dbSchema.Things.Classes) == 0 {
return nil, fmt.Errorf("There are not any Actions or Things classes defined yet.")
}
knownClasses := map[string]*graphql.Object{}
if len(dbSchema.Actions.Classes) > 0 {
localGetActions, err := buildGetClasses(dbSchema, kind.ACTION_KIND, dbSchema.Actions, &knownClasses)
if err != nil {
return nil, err
}
getKinds["Actions"] = &graphql.Field{
Name: "WeaviateLocalGetActions",
Description: "Get Actions on the Local Weaviate",
Type: localGetActions,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGetActions (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
}
if len(dbSchema.Things.Classes) > 0 {
localGetThings, err := buildGetClasses(dbSchema, kind.THING_KIND, dbSchema.Things, &knownClasses)
if err != nil {
return nil, err
}
getKinds["Things"] = &graphql.Field{
Name: "WeaviateLocalGetThings",
Description: "Get Things on the Local Weaviate",
Type: localGetThings,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGetThings (pass on Source)\n")
// Does nothing; pass through the filters
return p.Source, nil
},
}
}
field := graphql.Field{
Name: "WeaviateLocalGet",
Description: "Get Things or Actions on the local weaviate",
Args: graphql.FieldConfigArgument{
"where": &graphql.ArgumentConfig{
Description: "Filter options for the Get search, to convert the data to the filter input",
Type: graphql.NewInputObject(
graphql.InputObjectConfig{
Name: "WeaviateLocalGetWhereInpObj",
Fields: common_filters.Build(),
Description: "Filter options for the Get search, to convert the data to the filter input",
},
),
},
},
Type: graphql.NewObject(graphql.ObjectConfig{
Name: "WeaviateLocalGetObj",
Fields: getKinds,
Description: "Type of Get function to get Things or Actions on the Local Weaviate",
}),
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGet (extract resolver from source, parse filters )\n")
resolver := p.Source.(map[string]interface{})["Resolver"].(Resolver)
filters, err := common_filters.ExtractFilters(p.Args)
if err != nil {
return nil, err
}
return &filtersAndResolver{
filters: filters,
resolver: resolver,
}, nil
},
}
return &field, nil
}
// Builds the classes below a Local -> Get -> (k kind.Kind)
func buildGetClasses(dbSchema *schema.Schema, k kind.Kind, semanticSchema *models.SemanticSchema, knownClasses *map[string]*graphql.Object) (*graphql.Object, error) {
classFields := graphql.Fields{}
var kindName string
switch k {
case kind.THING_KIND:
kindName = "Thing"
case kind.ACTION_KIND:
kindName = "Action"
}
for _, class := range semanticSchema.Classes {
classField, err := buildGetClass(dbSchema, k, class, knownClasses)
if err != nil {
return nil, fmt.Errorf("Could not build class for %s", class.Class)
}
classFields[class.Class] = classField
}
classes := graphql.NewObject(graphql.ObjectConfig{
Name: fmt.Sprintf("WeaviateLocalGet%ssObj", kindName),
Fields: classFields,
Description: fmt.Sprintf("Type of %ss i.e. %ss classes to Get on the Local Weaviate", kindName, kindName),
})
return classes, nil
}
// Build a single class in Local -> Get -> (k kind.Kind) -> (models.SemanticSchemaClass)
func buildGetClass(dbSchema *schema.Schema, k kind.Kind, class *models.SemanticSchemaClass, knownClasses *map[string]*graphql.Object) (*graphql.Field, error) {
classObject := graphql.NewObject(graphql.ObjectConfig{
Name: class.Class,
Fields: (graphql.FieldsThunk)(func() graphql.Fields {
classProperties := graphql.Fields{}
classProperties["uuid"] = &graphql.Field{
Description: "UUID of the thing or action given by the local Weaviate instance",
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("WHOOPTYDOO uuid\n")
return "uuid", nil
},
}
for _, property := range class.Properties {
propertyType, err := dbSchema.FindPropertyDataType(property.AtDataType)
if err != nil {
// We can't return an error in this FieldsThunk function, so we need to panic
panic(fmt.Sprintf("buildGetClass: wrong propertyType for %s.%s.%s; %s", k.Name(), class.Class, property.Name, err.Error()))
}
var propertyField *graphql.Field
if propertyType.IsPrimitive() {
switch propertyType.AsPrimitive() {
case schema.DataTypeString:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: string\n")
return "primitive string", nil
},
}
case schema.DataTypeInt:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Int,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: int\n")
return nil, nil
},
}
case schema.DataTypeNumber:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Float,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: float\n")
return 4.2, nil
},
}
case schema.DataTypeBoolean:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Boolean,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: bool\n")
return true, nil
},
}
case schema.DataTypeDate:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String, // String since no graphql date datatype exists
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: date\n")
return "somedate", nil
},
}
default:
panic(fmt.Sprintf("buildGetClass: unknown primitive type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, propertyType.AsPrimitive()))
}
propertyField.Name = property.Name
classProperties[property.Name] = propertyField
} else {
// This is a reference
refClasses := propertyType.Classes()
propertyName := strings.Title(property.Name)
dataTypeClasses := make([]*graphql.Object, len(refClasses))
for index, refClassName := range refClasses {
refClass, ok := (*knownClasses)[string(refClassName)]
if !ok {
panic(fmt.Sprintf("buildGetClass: unknown referenced class type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, refClassName))
}
dataTypeClasses[index] = refClass
}
classUnion := graphql.NewUnion(graphql.UnionConfig{
Name: fmt.Sprintf("%s%s%s", class.Class, propertyName, "Obj"),
Types: dataTypeClasses,
ResolveType: func(p graphql.ResolveTypeParams) *graphql.Object {
// TODO: inspect type of result.
return (*knownClasses)["City"]
fmt.Printf("Resolver: WHOOPTYDOO\n")
return nil
},
Description: property.Description,
})
// TODO: Check cardinality
classProperties[propertyName] = &graphql.Field{
Type: classUnion,
Description: property.Description,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- Resolve action property field (ref?)\n")
fmt.Printf("WHOOPTYDOO2\n")
return true, nil
},
}
}
}
return classProperties
}),
Description: class.Description,
})
(*knownClasses)[class.Class] = classObject
classField := graphql.Field{
Type: graphql.NewList(classObject),
Description: class.Description,
Args: graphql.FieldConfigArgument{
"first": &graphql.ArgumentConfig{
Description: "Pagination option, show the first x results",
Type: graphql.Int,
|
Build
|
identifier_name
|
|
get.go
|
: "Type of Get function to get Things or Actions on the Local Weaviate",
}),
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- LocalGet (extract resolver from source, parse filters )\n")
resolver := p.Source.(map[string]interface{})["Resolver"].(Resolver)
filters, err := common_filters.ExtractFilters(p.Args)
if err != nil {
return nil, err
}
return &filtersAndResolver{
filters: filters,
resolver: resolver,
}, nil
},
}
return &field, nil
}
// Builds the classes below a Local -> Get -> (k kind.Kind)
func buildGetClasses(dbSchema *schema.Schema, k kind.Kind, semanticSchema *models.SemanticSchema, knownClasses *map[string]*graphql.Object) (*graphql.Object, error) {
classFields := graphql.Fields{}
var kindName string
switch k {
case kind.THING_KIND:
kindName = "Thing"
case kind.ACTION_KIND:
kindName = "Action"
}
for _, class := range semanticSchema.Classes {
classField, err := buildGetClass(dbSchema, k, class, knownClasses)
if err != nil {
return nil, fmt.Errorf("Could not build class for %s", class.Class)
}
classFields[class.Class] = classField
}
classes := graphql.NewObject(graphql.ObjectConfig{
Name: fmt.Sprintf("WeaviateLocalGet%ssObj", kindName),
Fields: classFields,
Description: fmt.Sprintf("Type of %ss i.e. %ss classes to Get on the Local Weaviate", kindName, kindName),
})
return classes, nil
}
// Build a single class in Local -> Get -> (k kind.Kind) -> (models.SemanticSchemaClass)
func buildGetClass(dbSchema *schema.Schema, k kind.Kind, class *models.SemanticSchemaClass, knownClasses *map[string]*graphql.Object) (*graphql.Field, error) {
classObject := graphql.NewObject(graphql.ObjectConfig{
Name: class.Class,
Fields: (graphql.FieldsThunk)(func() graphql.Fields {
classProperties := graphql.Fields{}
classProperties["uuid"] = &graphql.Field{
Description: "UUID of the thing or action given by the local Weaviate instance",
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("WHOOPTYDOO uuid\n")
return "uuid", nil
},
}
for _, property := range class.Properties {
propertyType, err := dbSchema.FindPropertyDataType(property.AtDataType)
if err != nil {
// We can't return an error in this FieldsThunk function, so we need to panic
panic(fmt.Sprintf("buildGetClass: wrong propertyType for %s.%s.%s; %s", k.Name(), class.Class, property.Name, err.Error()))
}
var propertyField *graphql.Field
if propertyType.IsPrimitive() {
switch propertyType.AsPrimitive() {
case schema.DataTypeString:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: string\n")
return "primitive string", nil
},
}
case schema.DataTypeInt:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Int,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: int\n")
return nil, nil
},
}
case schema.DataTypeNumber:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Float,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: float\n")
return 4.2, nil
},
}
case schema.DataTypeBoolean:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.Boolean,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: bool\n")
return true, nil
},
}
case schema.DataTypeDate:
propertyField = &graphql.Field{
Description: property.Description,
Type: graphql.String, // String since no graphql date datatype exists
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("GET PRIMITIVE PROP: date\n")
return "somedate", nil
},
}
default:
panic(fmt.Sprintf("buildGetClass: unknown primitive type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, propertyType.AsPrimitive()))
}
propertyField.Name = property.Name
classProperties[property.Name] = propertyField
} else {
// This is a reference
refClasses := propertyType.Classes()
propertyName := strings.Title(property.Name)
dataTypeClasses := make([]*graphql.Object, len(refClasses))
for index, refClassName := range refClasses {
refClass, ok := (*knownClasses)[string(refClassName)]
if !ok {
panic(fmt.Sprintf("buildGetClass: unknown referenced class type for %s.%s.%s; %s", k.Name(), class.Class, property.Name, refClassName))
}
dataTypeClasses[index] = refClass
}
classUnion := graphql.NewUnion(graphql.UnionConfig{
Name: fmt.Sprintf("%s%s%s", class.Class, propertyName, "Obj"),
Types: dataTypeClasses,
ResolveType: func(p graphql.ResolveTypeParams) *graphql.Object {
// TODO: inspect type of result.
return (*knownClasses)["City"]
fmt.Printf("Resolver: WHOOPTYDOO\n")
return nil
},
Description: property.Description,
})
// TODO: Check cardinality
classProperties[propertyName] = &graphql.Field{
Type: classUnion,
Description: property.Description,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- Resolve action property field (ref?)\n")
fmt.Printf("WHOOPTYDOO2\n")
return true, nil
},
}
}
}
return classProperties
}),
Description: class.Description,
})
(*knownClasses)[class.Class] = classObject
classField := graphql.Field{
Type: graphql.NewList(classObject),
Description: class.Description,
Args: graphql.FieldConfigArgument{
"first": &graphql.ArgumentConfig{
Description: "Pagination option, show the first x results",
Type: graphql.Int,
},
"after": &graphql.ArgumentConfig{
Description: "Pagination option, show the results after the first x results",
Type: graphql.Int,
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
fmt.Printf("- thing class (supposed to extract pagination, now return nil)\n")
filtersAndResolver := p.Source.(*filtersAndResolver)
pagination, err := common.ExtractPaginationFromArgs(p.Args)
if err != nil {
return nil, err
}
// There can only be exactly one graphql_ast.Field; it is the class name.
if len(p.Info.FieldASTs) != 1 {
panic("Only one Field expected here")
}
selectionsOfClass := p.Info.FieldASTs[0].SelectionSet
properties, err := extractProperties(selectionsOfClass)
if err != nil {
return nil, err
}
params := LocalGetClassParams{
Filters: filtersAndResolver.filters,
Kind: k,
ClassName: class.Class,
Pagination: pagination,
Properties: properties,
}
promise, err := filtersAndResolver.resolver.LocalGetClass(¶ms)
return promise, err
},
}
return &classField, nil
}
func extractProperties(selections *graphql_ast.SelectionSet) ([]SelectProperty, error) {
//debugFieldAsts(fieldASTs)
var properties []SelectProperty
for _, selection := range selections.Selections {
field := selection.(*graphql_ast.Field)
name := field.Name.Value
property := SelectProperty{Name: name}
property.IsPrimitive = (field.SelectionSet == nil)
if !property.IsPrimitive
|
{
// We can interpret this property in different ways
for _, subSelection := range field.SelectionSet.Selections {
// Is it a field with the name __typename?
subsectionField, ok := subSelection.(*graphql_ast.Field)
if ok {
if subsectionField.Name.Value == "__typename" {
property.IncludeTypeName = true
continue
} else {
return nil, fmt.Errorf("Expected a InlineFragment, not a '%s' field ", subsectionField.Name.Value)
}
}
// Otherwise these _must_ be inline fragments
fragment, ok := subSelection.(*graphql_ast.InlineFragment)
if !ok {
return nil, fmt.Errorf("Expected a InlineFragment; you need to specify as which type you want to retrieve a reference %#v", subSelection)
}
|
conditional_block
|
|
configV2.go
|
[j] = os.ExpandEnv(p)
}
for j, p := range m.Excludes {
m.Excludes[j] = os.ExpandEnv(p)
}
}
cfg.Input.PositionFile = os.ExpandEnv(cfg.Input.PositionFile)
}
func (cfg *Config) addDefaults() {
cfg.Global.addDefaults()
cfg.Input.addDefaults()
cfg.Grok.addDefaults()
if cfg.Metrics == nil {
cfg.Metrics = MetricsConfig(make([]MetricConfig, 0))
}
cfg.Metrics.addDefaults()
cfg.Server.addDefaults()
}
func (c *GlobalConfig) addDefaults() {
if c.ConfigVersion == 0 {
c.ConfigVersion = 2
}
if c.RetentionCheckInterval == 0 {
c.RetentionCheckInterval = defaultRetentionCheckInterval
}
if c.LogLevel == "" {
c.LogLevel = defaultLogLevel
}
if c.LogTo == "" {
c.LogLevel = defaultLogTo
}
}
func (c *InputConfig) addDefaults() {
if len(c.CollectMode) == 0 {
c.CollectMode = "mixed"
}
if c.PollInterval == 0 {
c.PollInterval = defaultPollInterval
}
switch c.Type {
case "", inputTypeStdin:
c.Type = inputTypeStdin
case inputTypeFile:
if c.PositionFile == "" {
c.PositionFile = defaultPositionsFile
}
if c.SyncInterval == 0 {
c.SyncInterval = defaultPositionSyncIntervcal
}
case inputTypeWebhook:
if len(c.WebhookPath) == 0 {
c.WebhookPath = "/webhook"
}
if len(c.WebhookFormat) == 0 {
c.WebhookFormat = "text_single"
}
if len(c.WebhookJsonSelector) == 0 {
c.WebhookJsonSelector = ".message"
}
if len(c.WebhookTextBulkSeparator) == 0 {
c.WebhookTextBulkSeparator = "\n\n"
}
}
}
func (c *GrokConfig) addDefaults() {}
func (c *MetricsConfig) addDefaults() {}
func (c *ServerConfig) addDefaults() {
if c.Protocol == "" {
c.Protocol = "http"
}
if c.Port == 0 {
c.Port = 9144
}
if c.Path == "" {
c.Path = "/metrics"
}
}
func (cfg *Config) validate() error {
err := cfg.Input.validate()
if err != nil {
return err
}
err = cfg.Grok.validate()
if err != nil {
return err
}
err = cfg.Metrics.validate()
if err != nil {
return err
}
err = cfg.Server.validate()
if err != nil {
return err
}
return nil
}
func (c *InputConfig) validate() error {
switch {
case c.Type == inputTypeStdin:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.path' when 'input.type' is stdin")
}
if c.PollInterval != 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.poll_interval_seconds' when 'input.type' is stdin")
}
case c.Type == inputTypeFile:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: 'input.path' is required for input type \"file\"")
}
if c.PollInterval > 0 {
if c.MaxLinesRatePerFile != 0 {
return fmt.Errorf("cannot limit input speed when using poller")
}
}
fi, err := os.Stat(c.PositionFile)
if err != nil {
if !os.IsNotExist(err) {
return err
}
} else {
if fi.IsDir() {
return errors.New("expected a file for position_file")
}
}
if c.SyncInterval < time.Second {
return errors.New("expected sync_interval more than 1s")
}
case c.Type == inputTypeWebhook:
if c.WebhookPath == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' is required for input type \"webhook\"")
} else if c.WebhookPath[0] != '/' {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' must start with \"/\"")
}
if c.WebhookFormat != "text_single" && c.WebhookFormat != "text_bulk" && c.WebhookFormat != "json_single" && c.WebhookFormat != "json_bulk" {
return fmt.Errorf("invalid input configuration: 'input.webhook_format' must be \"text_single|text_bulk|json_single|json_bulk\"")
}
if c.WebhookJsonSelector == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' is required for input type \"webhook\"")
} else if c.WebhookJsonSelector[0] != '.' {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' must start with \".\"")
}
if c.WebhookFormat == "text_bulk" && c.WebhookTextBulkSeparator == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_text_bulk_separator' is required for input type \"webhook\" and webhook_format \"text_bulk\"")
}
default:
return fmt.Errorf("unsupported 'input.type': %v", c.Type)
}
return nil
}
func (c *GrokConfig) validate() error {
if c.PatternsDir == "" && len(c.AdditionalPatterns) == 0 {
return fmt.Errorf("Invalid grok configuration: no patterns defined: one of 'grok.patterns_dir' and 'grok.additional_patterns' must be configured.")
}
return nil
}
func (c *MetricsConfig) validate() error {
if len(*c) == 0 {
return fmt.Errorf("Invalid metrics configuration: 'metrics' must not be empty.")
}
metricNames := make(map[string]bool)
for _, metric := range *c {
err := metric.validate()
if err != nil {
return err
}
_, exists := metricNames[metric.Name]
if exists {
return fmt.Errorf("Invalid metric configuration: metric '%v' defined twice.", metric.Name)
}
metricNames[metric.Name] = true
}
return nil
}
func (c *MetricConfig) validate() error {
switch {
case c.Type == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.type' must not be empty.")
case c.Name == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.name' must not be empty.")
case c.Help == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.help' must not be empty.")
case c.Match == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.match' must not be empty.")
}
var hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed bool
switch c.Type {
case "counter":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = false, false, false, false
case "gauge":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, true, false, false
case "histogram":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, false, true, false
case "summary":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, false, false, true
default:
return fmt.Errorf("Invalid 'metrics.type': '%v'. We currently only support 'counter' and 'gauge'.", c.Type)
}
switch {
case hasValue && len(c.Value) == 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.value' must not be empty for %v metrics.", c.Type)
case !hasValue && len(c.Value) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.value' cannot be used for %v metrics.", c.Type)
case !cumulativeAllowed && c.Cumulative:
return fmt.Errorf("Invalid metric configuration: 'metrics.cumulative' cannot be used for %v metrics.", c.Type)
case !bucketsAllowed && len(c.Buckets) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.buckets' cannot be used for %v metrics.", c.Type)
case !quantilesAllowed && len(c.Quantiles) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.buckets' cannot be used for %v metrics.", c.Type)
}
if len(c.DeleteMatch) > 0 && len(c.Labels) == 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.delete_match' is only supported for metrics with labels.")
}
if len(c.DeleteMatch) == 0 && len(c.DeleteLabelTemplates) > 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.delete_labels' can only be used when 'metrics.delete_match' is present.")
}
if c.Retention > 0 && len(c.Labels) == 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.retention' is only supported for metrics with labels.")
}
for _, deleteLabelTemplate := range c.DeleteLabelTemplates {
found := false
for _, labelTemplate := range c.LabelTemplates
|
{
if deleteLabelTemplate.Name() == labelTemplate.Name() {
found = true
break
}
}
|
conditional_block
|
|
configV2.go
|
}
}
if c.SyncInterval < time.Second {
return errors.New("expected sync_interval more than 1s")
}
case c.Type == inputTypeWebhook:
if c.WebhookPath == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' is required for input type \"webhook\"")
} else if c.WebhookPath[0] != '/' {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' must start with \"/\"")
}
if c.WebhookFormat != "text_single" && c.WebhookFormat != "text_bulk" && c.WebhookFormat != "json_single" && c.WebhookFormat != "json_bulk" {
return fmt.Errorf("invalid input configuration: 'input.webhook_format' must be \"text_single|text_bulk|json_single|json_bulk\"")
}
if c.WebhookJsonSelector == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' is required for input type \"webhook\"")
} else if c.WebhookJsonSelector[0] != '.' {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' must start with \".\"")
}
if c.WebhookFormat == "text_bulk" && c.WebhookTextBulkSeparator == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_text_bulk_separator' is required for input type \"webhook\" and webhook_format \"text_bulk\"")
}
default:
return fmt.Errorf("unsupported 'input.type': %v", c.Type)
}
return nil
}
func (c *GrokConfig) validate() error {
if c.PatternsDir == "" && len(c.AdditionalPatterns) == 0 {
return fmt.Errorf("Invalid grok configuration: no patterns defined: one of 'grok.patterns_dir' and 'grok.additional_patterns' must be configured.")
}
return nil
}
func (c *MetricsConfig) validate() error {
if len(*c) == 0 {
return fmt.Errorf("Invalid metrics configuration: 'metrics' must not be empty.")
}
metricNames := make(map[string]bool)
for _, metric := range *c {
err := metric.validate()
if err != nil {
return err
}
_, exists := metricNames[metric.Name]
if exists {
return fmt.Errorf("Invalid metric configuration: metric '%v' defined twice.", metric.Name)
}
metricNames[metric.Name] = true
}
return nil
}
func (c *MetricConfig) validate() error {
switch {
case c.Type == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.type' must not be empty.")
case c.Name == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.name' must not be empty.")
case c.Help == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.help' must not be empty.")
case c.Match == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.match' must not be empty.")
}
var hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed bool
switch c.Type {
case "counter":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = false, false, false, false
case "gauge":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, true, false, false
case "histogram":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, false, true, false
case "summary":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, false, false, true
default:
return fmt.Errorf("Invalid 'metrics.type': '%v'. We currently only support 'counter' and 'gauge'.", c.Type)
}
switch {
case hasValue && len(c.Value) == 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.value' must not be empty for %v metrics.", c.Type)
case !hasValue && len(c.Value) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.value' cannot be used for %v metrics.", c.Type)
case !cumulativeAllowed && c.Cumulative:
return fmt.Errorf("Invalid metric configuration: 'metrics.cumulative' cannot be used for %v metrics.", c.Type)
case !bucketsAllowed && len(c.Buckets) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.buckets' cannot be used for %v metrics.", c.Type)
case !quantilesAllowed && len(c.Quantiles) > 0:
return fmt.Errorf("Invalid metric configuration: 'metrics.buckets' cannot be used for %v metrics.", c.Type)
}
if len(c.DeleteMatch) > 0 && len(c.Labels) == 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.delete_match' is only supported for metrics with labels.")
}
if len(c.DeleteMatch) == 0 && len(c.DeleteLabelTemplates) > 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.delete_labels' can only be used when 'metrics.delete_match' is present.")
}
if c.Retention > 0 && len(c.Labels) == 0 {
return fmt.Errorf("Invalid metric configuration: 'metrics.retention' is only supported for metrics with labels.")
}
for _, deleteLabelTemplate := range c.DeleteLabelTemplates {
found := false
for _, labelTemplate := range c.LabelTemplates {
if deleteLabelTemplate.Name() == labelTemplate.Name() {
found = true
break
}
}
if !found {
return fmt.Errorf("Invalid metric configuration: '%v' cannot be used as a delete_label, because the metric does not have a label named '%v'.", deleteLabelTemplate.Name(), deleteLabelTemplate.Name())
}
}
// InitTemplates() validates that labels/delete_labels/value are present as grok_fields in the grok pattern.
return nil
}
func (c *ServerConfig) validate() error {
switch {
case c.Protocol != "https" && c.Protocol != "http":
return fmt.Errorf("Invalid 'server.protocol': '%v'. Expecting 'http' or 'https'.", c.Protocol)
case c.Port <= 0:
return fmt.Errorf("Invalid 'server.port': '%v'.", c.Port)
case !strings.HasPrefix(c.Path, "/"):
return fmt.Errorf("Invalid server configuration: 'server.path' must start with '/'.")
case c.Protocol == "https":
if c.Cert != "" && c.Key == "" {
return fmt.Errorf("Invalid server configuration: 'server.cert' must not be specified without 'server.key'")
}
if c.Cert == "" && c.Key != "" {
return fmt.Errorf("Invalid server configuration: 'server.key' must not be specified without 'server.cert'")
}
case c.Protocol == "http":
if c.Cert != "" || c.Key != "" {
return fmt.Errorf("Invalid server configuration: 'server.cert' and 'server.key' can only be configured for protocol 'https'.")
}
}
return nil
}
// Made this public so it can be called when converting config v1 to config v2.
func AddDefaultsAndValidate(cfg *Config) error {
var err error
cfg.addDefaults()
for i := range []MetricConfig(cfg.Metrics) {
err = cfg.Metrics[i].InitTemplates()
if err != nil {
return err
}
}
return cfg.validate()
}
// Made this public so MetricConfig can be initialized in tests.
func (metric *MetricConfig) InitTemplates() error {
var (
err error
tmplt template.Template
msg = "invalid configuration: failed to read metric %v: error parsing %v template: %v: " +
"don't forget to put a . (dot) in front of grok fields, otherwise it will be interpreted as a function."
)
for _, t := range []struct {
src map[string]string // label / template string as read from the config file
dest *[]template.Template // parsed template used internally in grok_exporter
}{
{
src: metric.Labels,
dest: &(metric.LabelTemplates),
},
{
src: metric.DeleteLabels,
dest: &(metric.DeleteLabelTemplates),
},
} {
*t.dest = make([]template.Template, 0, len(t.src))
for name, templateString := range t.src {
tmplt, err = template.New(name, templateString)
if err != nil {
return fmt.Errorf(msg, fmt.Sprintf("label %v", metric.Name), name, err.Error())
}
*t.dest = append(*t.dest, tmplt)
}
}
if len(metric.Value) > 0 {
metric.ValueTemplate, err = template.New("__value__", metric.Value)
if err != nil {
return fmt.Errorf(msg, "value", metric.Name, err.Error())
}
}
return nil
}
// YAML representation, does not include default values.
func (cfg *Config) String() string {
stripped := cfg.copy()
if stripped.Global.RetentionCheckInterval == defaultRetentionCheckInterval {
stripped.Global.RetentionCheckInterval = 0
}
if stripped.Server.Path == "/metrics" {
stripped.Server.Path = ""
}
return stripped.marshalToString()
}
func (cfg *Config) copy() *Config
|
{
result, _ := Unmarshal([]byte(cfg.marshalToString()))
return result
}
|
identifier_body
|
|
configV2.go
|
:"webhook_text_bulk_separator,omitempty"`
}
type GrokConfig struct {
PatternsDir string `yaml:"patterns_dir,omitempty"`
AdditionalPatterns []string `yaml:"additional_patterns,omitempty"`
}
type MetricConfig struct {
Type string `yaml:",omitempty"`
Name string `yaml:",omitempty"`
Path []string `yaml:",omitempty"`
Excludes []string `yaml:",omitempty"`
Help string `yaml:",omitempty"`
Match string `yaml:",omitempty"`
Retention time.Duration `yaml:",omitempty"` // implicitly parsed with time.ParseDuration()
Value string `yaml:",omitempty"`
Cumulative bool `yaml:",omitempty"`
Buckets []float64 `yaml:",flow,omitempty"`
Quantiles map[float64]float64 `yaml:",flow,omitempty"`
Labels map[string]string `yaml:",omitempty"`
LabelTemplates []template.Template `yaml:"-"` // parsed version of Labels, will not be serialized to yaml.
ValueTemplate template.Template `yaml:"-"` // parsed version of Value, will not be serialized to yaml.
DeleteMatch string `yaml:"delete_match,omitempty"`
DeleteLabels map[string]string `yaml:"delete_labels,omitempty"` // TODO: Make sure that DeleteMatch is not nil if DeleteLabels are used.
DeleteLabelTemplates []template.Template `yaml:"-"` // parsed version of DeleteLabels, will not be serialized to yaml.
}
type MetricsConfig []MetricConfig
type ServerConfig struct {
Protocol string `yaml:",omitempty"`
Host string `yaml:",omitempty"`
Port int `yaml:",omitempty"`
Path string `yaml:",omitempty"`
Cert string `yaml:",omitempty"`
Key string `yaml:",omitempty"`
}
func (cfg *Config) LoadEnvironments() {
path := cfg.Input.Path
for i := range path {
path[i] = os.ExpandEnv(path[i])
}
excludes := cfg.Input.Excludes
for i := range excludes {
excludes[i] = os.ExpandEnv(excludes[i])
}
for i := range cfg.Metrics {
m := &cfg.Metrics[i]
for j, p := range m.Path {
m.Path[j] = os.ExpandEnv(p)
}
for j, p := range m.Excludes {
m.Excludes[j] = os.ExpandEnv(p)
}
}
cfg.Input.PositionFile = os.ExpandEnv(cfg.Input.PositionFile)
}
func (cfg *Config) addDefaults() {
cfg.Global.addDefaults()
cfg.Input.addDefaults()
cfg.Grok.addDefaults()
if cfg.Metrics == nil {
cfg.Metrics = MetricsConfig(make([]MetricConfig, 0))
}
cfg.Metrics.addDefaults()
cfg.Server.addDefaults()
}
func (c *GlobalConfig) addDefaults() {
if c.ConfigVersion == 0 {
c.ConfigVersion = 2
}
if c.RetentionCheckInterval == 0 {
c.RetentionCheckInterval = defaultRetentionCheckInterval
}
if c.LogLevel == "" {
c.LogLevel = defaultLogLevel
}
if c.LogTo == "" {
c.LogLevel = defaultLogTo
}
}
func (c *InputConfig) addDefaults() {
if len(c.CollectMode) == 0 {
c.CollectMode = "mixed"
}
if c.PollInterval == 0 {
c.PollInterval = defaultPollInterval
}
switch c.Type {
case "", inputTypeStdin:
c.Type = inputTypeStdin
case inputTypeFile:
if c.PositionFile == "" {
c.PositionFile = defaultPositionsFile
}
if c.SyncInterval == 0 {
c.SyncInterval = defaultPositionSyncIntervcal
}
case inputTypeWebhook:
if len(c.WebhookPath) == 0 {
c.WebhookPath = "/webhook"
}
if len(c.WebhookFormat) == 0 {
c.WebhookFormat = "text_single"
}
if len(c.WebhookJsonSelector) == 0 {
c.WebhookJsonSelector = ".message"
}
if len(c.WebhookTextBulkSeparator) == 0 {
c.WebhookTextBulkSeparator = "\n\n"
}
}
}
func (c *GrokConfig) addDefaults() {}
func (c *MetricsConfig) addDefaults() {}
func (c *ServerConfig) addDefaults() {
if c.Protocol == "" {
c.Protocol = "http"
}
if c.Port == 0 {
c.Port = 9144
}
if c.Path == "" {
c.Path = "/metrics"
}
}
func (cfg *Config) validate() error {
err := cfg.Input.validate()
if err != nil {
return err
}
err = cfg.Grok.validate()
if err != nil {
return err
}
err = cfg.Metrics.validate()
if err != nil {
return err
}
err = cfg.Server.validate()
if err != nil {
return err
}
return nil
}
func (c *InputConfig) validate() error {
switch {
case c.Type == inputTypeStdin:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.path' when 'input.type' is stdin")
}
if c.PollInterval != 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.poll_interval_seconds' when 'input.type' is stdin")
}
case c.Type == inputTypeFile:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: 'input.path' is required for input type \"file\"")
}
if c.PollInterval > 0 {
if c.MaxLinesRatePerFile != 0 {
return fmt.Errorf("cannot limit input speed when using poller")
}
}
fi, err := os.Stat(c.PositionFile)
if err != nil {
if !os.IsNotExist(err) {
return err
}
} else {
if fi.IsDir() {
return errors.New("expected a file for position_file")
}
}
if c.SyncInterval < time.Second {
return errors.New("expected sync_interval more than 1s")
}
case c.Type == inputTypeWebhook:
if c.WebhookPath == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' is required for input type \"webhook\"")
} else if c.WebhookPath[0] != '/' {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' must start with \"/\"")
}
if c.WebhookFormat != "text_single" && c.WebhookFormat != "text_bulk" && c.WebhookFormat != "json_single" && c.WebhookFormat != "json_bulk" {
return fmt.Errorf("invalid input configuration: 'input.webhook_format' must be \"text_single|text_bulk|json_single|json_bulk\"")
}
if c.WebhookJsonSelector == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' is required for input type \"webhook\"")
} else if c.WebhookJsonSelector[0] != '.' {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' must start with \".\"")
}
if c.WebhookFormat == "text_bulk" && c.WebhookTextBulkSeparator == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_text_bulk_separator' is required for input type \"webhook\" and webhook_format \"text_bulk\"")
}
default:
return fmt.Errorf("unsupported 'input.type': %v", c.Type)
}
return nil
}
func (c *GrokConfig) validate() error {
if c.PatternsDir == "" && len(c.AdditionalPatterns) == 0 {
return fmt.Errorf("Invalid grok configuration: no patterns defined: one of 'grok.patterns_dir' and 'grok.additional_patterns' must be configured.")
}
return nil
}
func (c *MetricsConfig) validate() error {
if len(*c) == 0 {
return fmt.Errorf("Invalid metrics configuration: 'metrics' must not be empty.")
}
metricNames := make(map[string]bool)
for _, metric := range *c {
err := metric.validate()
if err != nil {
return err
}
_, exists := metricNames[metric.Name]
if exists {
return fmt.Errorf("Invalid metric configuration: metric '%v' defined twice.", metric.Name)
}
metricNames[metric.Name] = true
}
return nil
}
func (c *MetricConfig) validate() error {
switch {
case c.Type == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.type' must not be empty.")
case c.Name == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.name' must not be empty.")
case c.Help == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.help' must not be empty.")
case c.Match == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.match' must not be empty.")
}
var hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed bool
switch c.Type {
|
case "counter":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = false, false, false, false
case "gauge":
hasValue, cumulativeAllowed, bucketsAllowed, quantilesAllowed = true, true, false, false
|
random_line_split
|
|
configV2.go
|
yaml:"max_line_size,omitempty"`
MaxLinesRatePerFile uint16 `yaml:"max_lines_rate_per_file,omitempty"`
IdleTimeout time.Duration `yaml:"idle_timeout,omitempty"`
WebhookPath string `yaml:"webhook_path,omitempty"`
WebhookFormat string `yaml:"webhook_format,omitempty"`
WebhookJsonSelector string `yaml:"webhook_json_selector,omitempty"`
WebhookTextBulkSeparator string `yaml:"webhook_text_bulk_separator,omitempty"`
}
type GrokConfig struct {
PatternsDir string `yaml:"patterns_dir,omitempty"`
AdditionalPatterns []string `yaml:"additional_patterns,omitempty"`
}
type MetricConfig struct {
Type string `yaml:",omitempty"`
Name string `yaml:",omitempty"`
Path []string `yaml:",omitempty"`
Excludes []string `yaml:",omitempty"`
Help string `yaml:",omitempty"`
Match string `yaml:",omitempty"`
Retention time.Duration `yaml:",omitempty"` // implicitly parsed with time.ParseDuration()
Value string `yaml:",omitempty"`
Cumulative bool `yaml:",omitempty"`
Buckets []float64 `yaml:",flow,omitempty"`
Quantiles map[float64]float64 `yaml:",flow,omitempty"`
Labels map[string]string `yaml:",omitempty"`
LabelTemplates []template.Template `yaml:"-"` // parsed version of Labels, will not be serialized to yaml.
ValueTemplate template.Template `yaml:"-"` // parsed version of Value, will not be serialized to yaml.
DeleteMatch string `yaml:"delete_match,omitempty"`
DeleteLabels map[string]string `yaml:"delete_labels,omitempty"` // TODO: Make sure that DeleteMatch is not nil if DeleteLabels are used.
DeleteLabelTemplates []template.Template `yaml:"-"` // parsed version of DeleteLabels, will not be serialized to yaml.
}
type MetricsConfig []MetricConfig
type ServerConfig struct {
Protocol string `yaml:",omitempty"`
Host string `yaml:",omitempty"`
Port int `yaml:",omitempty"`
Path string `yaml:",omitempty"`
Cert string `yaml:",omitempty"`
Key string `yaml:",omitempty"`
}
func (cfg *Config) LoadEnvironments() {
path := cfg.Input.Path
for i := range path {
path[i] = os.ExpandEnv(path[i])
}
excludes := cfg.Input.Excludes
for i := range excludes {
excludes[i] = os.ExpandEnv(excludes[i])
}
for i := range cfg.Metrics {
m := &cfg.Metrics[i]
for j, p := range m.Path {
m.Path[j] = os.ExpandEnv(p)
}
for j, p := range m.Excludes {
m.Excludes[j] = os.ExpandEnv(p)
}
}
cfg.Input.PositionFile = os.ExpandEnv(cfg.Input.PositionFile)
}
func (cfg *Config) addDefaults() {
cfg.Global.addDefaults()
cfg.Input.addDefaults()
cfg.Grok.addDefaults()
if cfg.Metrics == nil {
cfg.Metrics = MetricsConfig(make([]MetricConfig, 0))
}
cfg.Metrics.addDefaults()
cfg.Server.addDefaults()
}
func (c *GlobalConfig) addDefaults() {
if c.ConfigVersion == 0 {
c.ConfigVersion = 2
}
if c.RetentionCheckInterval == 0 {
c.RetentionCheckInterval = defaultRetentionCheckInterval
}
if c.LogLevel == "" {
c.LogLevel = defaultLogLevel
}
if c.LogTo == "" {
c.LogLevel = defaultLogTo
}
}
func (c *InputConfig) addDefaults() {
if len(c.CollectMode) == 0 {
c.CollectMode = "mixed"
}
if c.PollInterval == 0 {
c.PollInterval = defaultPollInterval
}
switch c.Type {
case "", inputTypeStdin:
c.Type = inputTypeStdin
case inputTypeFile:
if c.PositionFile == "" {
c.PositionFile = defaultPositionsFile
}
if c.SyncInterval == 0 {
c.SyncInterval = defaultPositionSyncIntervcal
}
case inputTypeWebhook:
if len(c.WebhookPath) == 0 {
c.WebhookPath = "/webhook"
}
if len(c.WebhookFormat) == 0 {
c.WebhookFormat = "text_single"
}
if len(c.WebhookJsonSelector) == 0 {
c.WebhookJsonSelector = ".message"
}
if len(c.WebhookTextBulkSeparator) == 0 {
c.WebhookTextBulkSeparator = "\n\n"
}
}
}
func (c *GrokConfig) addDefaults() {}
func (c *MetricsConfig) addDefaults() {}
func (c *ServerConfig) addDefaults() {
if c.Protocol == "" {
c.Protocol = "http"
}
if c.Port == 0 {
c.Port = 9144
}
if c.Path == "" {
c.Path = "/metrics"
}
}
func (cfg *Config) validate() error {
err := cfg.Input.validate()
if err != nil {
return err
}
err = cfg.Grok.validate()
if err != nil {
return err
}
err = cfg.Metrics.validate()
if err != nil {
return err
}
err = cfg.Server.validate()
if err != nil {
return err
}
return nil
}
func (c *InputConfig) validate() error {
switch {
case c.Type == inputTypeStdin:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.path' when 'input.type' is stdin")
}
if c.PollInterval != 0 {
return fmt.Errorf("invalid input configuration: cannot use 'input.poll_interval_seconds' when 'input.type' is stdin")
}
case c.Type == inputTypeFile:
if len(c.Path) == 0 {
return fmt.Errorf("invalid input configuration: 'input.path' is required for input type \"file\"")
}
if c.PollInterval > 0 {
if c.MaxLinesRatePerFile != 0 {
return fmt.Errorf("cannot limit input speed when using poller")
}
}
fi, err := os.Stat(c.PositionFile)
if err != nil {
if !os.IsNotExist(err) {
return err
}
} else {
if fi.IsDir() {
return errors.New("expected a file for position_file")
}
}
if c.SyncInterval < time.Second {
return errors.New("expected sync_interval more than 1s")
}
case c.Type == inputTypeWebhook:
if c.WebhookPath == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' is required for input type \"webhook\"")
} else if c.WebhookPath[0] != '/' {
return fmt.Errorf("invalid input configuration: 'input.webhook_path' must start with \"/\"")
}
if c.WebhookFormat != "text_single" && c.WebhookFormat != "text_bulk" && c.WebhookFormat != "json_single" && c.WebhookFormat != "json_bulk" {
return fmt.Errorf("invalid input configuration: 'input.webhook_format' must be \"text_single|text_bulk|json_single|json_bulk\"")
}
if c.WebhookJsonSelector == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' is required for input type \"webhook\"")
} else if c.WebhookJsonSelector[0] != '.' {
return fmt.Errorf("invalid input configuration: 'input.webhook_json_selector' must start with \".\"")
}
if c.WebhookFormat == "text_bulk" && c.WebhookTextBulkSeparator == "" {
return fmt.Errorf("invalid input configuration: 'input.webhook_text_bulk_separator' is required for input type \"webhook\" and webhook_format \"text_bulk\"")
}
default:
return fmt.Errorf("unsupported 'input.type': %v", c.Type)
}
return nil
}
func (c *GrokConfig) validate() error {
if c.PatternsDir == "" && len(c.AdditionalPatterns) == 0 {
return fmt.Errorf("Invalid grok configuration: no patterns defined: one of 'grok.patterns_dir' and 'grok.additional_patterns' must be configured.")
}
return nil
}
func (c *MetricsConfig)
|
() error {
if len(*c) == 0 {
return fmt.Errorf("Invalid metrics configuration: 'metrics' must not be empty.")
}
metricNames := make(map[string]bool)
for _, metric := range *c {
err := metric.validate()
if err != nil {
return err
}
_, exists := metricNames[metric.Name]
if exists {
return fmt.Errorf("Invalid metric configuration: metric '%v' defined twice.", metric.Name)
}
metricNames[metric.Name] = true
}
return nil
}
func (c *MetricConfig) validate() error {
switch {
case c.Type == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.type' must not be empty.")
case c.Name == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.name' must not be empty.")
case c.Help == "":
return fmt.Errorf("Invalid metric configuration: 'metrics.help' must not
|
validate
|
identifier_name
|
jQuery-Validate-Extend.js
|
]).rules("add", { cantempty: newElements });
//eleresult = $(param[i]).valid();
//break;
}
}
var result = (value || eleresult) ? true : false;
return result;
}, "This fields can not be empty.");
/* 判断负数 */
jQuery.validator.addMethod("negativeCheck", function (value, element, param) {
if (!isNaN(value))
return parseFloat(value) >= 0;
}, "Please enter a number greater than 0.");
/**日期有效性验证
日期是否在某个时间段
param:number || string || array
Create by Aaron [20140319]
*/
jQuery.validator.addMethod("inDate", function (value, element, param) {
var _format = "dmy";
var _inDate = param, _inEndDate;
if ($.type(param) == "array") {
_inDate = param[0];
_inEndDate = param[1];
}
var _thisDate = Date.parse(global.Fn.formatDate(value + ":" + _format, "MM/dd/yyyy"));
_inDate = Date.parse(_inDate);
if (_inEndDate) {
_inEndDate = Date.parse(_inEndDate);
return (_thisDate >= _inDate && _thisDate <= _inEndDate);
}
return _thisDate <= _inDate;
}, "This date is not in {0}");
/** check from and to should both have value in group
*/
jQuery.validator.addMethod("groupDateRequired", function (value, element, param)
{
var groupArray = param.split(',');
$.each(groupArray, function (i, o)
{
groupArray[i] = ('input[name=' + o + ']');
})
if (!value)
{
if (!$(groupArray[0]).val())
{
return !!($(groupArray[1]).val() && $(groupArray[2]).val());
}
else
{
return false;
}
}
if (value)
{
if (!$(groupArray[0]).val())
{
return false;
}
else
{
return !($(groupArray[1]).val() || $(groupArray[2]).val());
}
}
return true;
}, "From and To must be both have value!");
/** inFinlYear - Check whether is in the finance year
param:number || string || array
if(array)
{
[finlYearFromDate,finlYearToDate]
}
if(string)
{
if(element is FromDate)
[finlYearFromDate]
if(element is ToDate)
[finlYearToDate]
}
*/
jQuery.validator.addMethod("inFinlYear", function (value, element, param)
{
var _format = "dmy";
var _inDate = param, _inEndDate;
if ($.type(param) == "array")
{
_inDate = param[0];
_inEndDate = param[1];
}
var _thisDate = Date.parse(global.Fn.formatDate(value + ":" + _format, "MM/dd/yyyy"));
_inDate = Date.parse(_inDate);
if (_inEndDate)
{
_inEndDate = Date.parse(_inEndDate);
return (_thisDate >= _inDate && _thisDate <= _inEndDate);
}
return element.attributes.name.value.indexOf('To') > -1 ? (_thisDate <= _inDate) : (_inDate <= _thisDate);
}, "This date is not in {0}");
/**当控件为一定值时,指定字段必填
param:[{value:array,element:array}]
Create by Aaron [20140313]
*/
jQuery.validator.addMethod("dorequired", function (value, element, param) {
if ($.type(param) != "array") param = [param];
for (var i = 0; i < param.length; i++) {
var obj = param[i];
if (!obj.element || obj.element.length < 1) continue;
var _value = (obj.value && $.type(obj.value) != "array") ? [obj.value] : obj.value;
var _ele = $.type(obj.element) == "array" ? obj.element : obj.element.split(",");
for (var j = 0; j < _ele.length; j++) {
var $self = $(_ele[j]);
if (!$self.rules().required && (!obj.value || _value.length < 1 || $.inArray(value, _value) != -1)) {
$self.rules("add", { required: true });
return $self.valid();
} else if ($self.rules().required && $.inArray(value, _value) == -1) {
$self.removeClass("required").rules("remove", "required");
return $self.valid();
}
}
}
return true;
}, "");
/**格式化金额格式10,000,000.00
param:boolean
*/
jQuery.validator.addMethod("amount", function (value, element, param) {
var amountReg = /^[1-9](?:\d*,\d{3})*(?:(\.\d+|$))/;
if (param == true) return amountReg.test(value);
return true;
}, "Please enter a valid Amount");
/**惟一性验证,返回消息格式<XXX> is duplicated!
param:与remote一致
Create By Gary[20140327]
*/
jQuery.validator.addMethod("duplicatedRemote", function (value, element, param) {
if (this.optional(element))
return "dependency-mismatch";
var previous = this.previousValue(element);
if (!this.settings.messages[element.name])
this.settings.messages[element.name] = {};
previous.originalMessage = this.settings.messages[element.name].remote;
this.settings.messages[element.name].remote = previous.message;
param = typeof param == "string" && { url: param } || param;
var validator = this;
if (previous.old != value) {
previous.old = value;
var validator = this;
this.startRequest(element);
var data = {};
data[element.name] = value;
$.ajax($.extend(true, {
url: param,
mode: "abort",
port: "validate" + element.name,
dataType: "json",
type: "post",
data: data,
success: function (response) {
//var tempResponse = response;
//if (tempResponse.result != undefined) {
// response = tempResponse.result;
//}
//if (tempResponse.code != undefined) {
// validator.settings.messages[element.name].remote = "<" + tempResponse.code + ">is duplicated!";
//} else {
// validator.settings.messages[element.name].remote =previous.originalMessage;
//}
var valid = response === true;
if (valid) {
var submitted = validator.formSubmitted;
validator.prepareElement(element);
validator.formSubmitted = submitted;
validator.successList.push(element);
validator.showErrors();
} else {
var errors = {};
var message = value + " is duplicated!"; //response || validator.defaultMessage(element, "remote");
errors[element.name] = previous.message = message;// $.isFunction(message) ? message(value) :
validator.showErrors(errors);
}
previous.valid = valid;
validator.stopRequest(element, valid);
}
}, param));
return "pending";
} else if (this.pending[element.name]) {
return "pending";
}
if (previous.valid == true) {
return previous.valid;
} else {
var errors = {};
errors[element.name] = previous.message;
validator.showErrors(errors);
return "pending";
}
}, "This Field is duplicated!");
/*
设置显示duplication时,是显示对象的Text值还是Value值,例如DropDown控件
param.objID -> $('#'+objID) //如果param.objID没给值,默认用element.id
param.objType -> ['text'|'value'] 'text': get obj.text(), 'value': get obj.val() //如果param.objType没给值,默认用value
示例:
param.objID: "popAgentID option:selected", //这是用在DropDown控件的
param.objType:'text' //param.objType选text,显示duplication信息时,就取DropDown控件所选的item的text值
*/
jQuery.validator.addMethod("duplicatedRemoteCustomized", function (value, element, param) {
if (this.optional(element))
return "dependency-mismatch";
var previous = this.previousValue(element);
if (!this.settings.messages[element.name])
this.settings.messages[element.name] = {};
previous.originalMessage = this.settings.messages[element.name].remote;
this.settings.messages[element.name].remote = previous.message;
param = typeof param == "string" && { url: param } || param;
var validator = this;
if (previous.old != value) {
previous.old = value;
var validator = this;
this.startRequest(element);
var data = {};
data[element.name] = value;
$.ajax($.extend(true, {
url: param,
mode: "abort",
port: "validate" + element.name,
dataType: "json",
type: "post",
data: data,
success: function (response) {
|
var valid = response === true;
if (valid) {
var submitted = validator.formSubmitted;
|
random_line_split
|
|
jQuery-Validate-Extend.js
|
;
} else if (_type.match("<")) {
_elseRule.type = _type.replace("<", ">");
} else {
_elseRule.type = _type
}
_elseRule.object = "#" + element.id;
$elseDate.rules("add", { compareDate: _elseRule });
}
}
var result = eval("" + Date.parse(_thisDate) + _type + Date.parse(_elseDate));
return result;
});
//all cache false;
jQuery.validator.addMethod("htmltag", function (value, element, parm) {
var htmltag1 = /<(\/\s*)?((\w+:)?\w+)(\w+(\s*=\s*((["'])(\\["'tbnr]|[^\7])*?\7|\w+)|.{0})|\s)*?(\/\s*)?>/ig;
return this.optional(element) || !htmltag1.test(value);
}, "Not allowed to enter the HTML tag.");
//验证两个文本框不能同时为空
jQuery.validator.addMethod("bothEmpty", function (value, element, parm) {
if (value == '' && $("#" + parm).val() == '') return false;
else
return true;
}, "PaymentTerm1 and PaymentTerm2 can not both be empty.");
//验证值范围,自动去掉非数值字符"."除外 如2,010,000.00自动验证 2010000.00
jQuery.validator.addMethod("range", function (value, element, parm) {
var reg = /[^\d+(.)]/g;
var value = value.replace(reg, "");
return (value < parm[0] || value > parm[1]) ? false : true;
}, "This Field value should be between {0} - {1} .");
//验证日期小于指定范围内
jQuery.validator.addMethod("compareRangeDate", function (value, element, parm) {
var startDate = jQuery("#" + parm).val();
if (!$.trim(startDate) == "") {
var result = startDate.split("-");
//alert(Date.parse(result[0]));
var startDate = result[1] + "/12/31";
//alert(startDate);
var date1 = new Date(Date.parse(startDate.replace("-", "/")));
var date2 = new Date(Date.parse(value.replace("-", "/")));
return date1 >= date2;
} else {
return true;
}
}, "Date is invalid!!");
//验证日期大于指定范围内
jQuery.validator.addMethod("compareRangeDateToDate", function (value, element, parm) {
var startDate = jQuery("#" + parm).val();
if (!$.trim(startDate) == "") {
var result = startDate.split("-");
//alert(Date.parse(result[0]));
var startDate = result[0] + "/01/01";
//alert(startDate);
var date1 = new Date(Date.parse(startDate.replace("-", "/")));
var date2 = new Date(Date.parse(value.replace("-", "/")));
return date1 <= date2;
} else {
return true;
}
}, "Date is invalid!!");
/**验证指定元素不能同时为空:支持同时验证多个元素
param:string || array
Create by Aaron [20140318]
*/
jQuery.validator.addMethod("cantempty", function (value, element, param) {
if ($.type(param) != "array") param = [param];
var eleresult = false;
//复制数组
var newElements = global.Fn.copy(param);
for (var i = 0; i < param.length; i++) {
if ($(param[i]).val()) {
eleresult = true;
break;
}
if (!$(param[i]).rules().cantempty) {
newElements.splice(i, 1, "#" + element.id);
$(param[i]).rules("add", { cantempty: newElements });
//eleresult = $(param[i]).valid();
//break;
}
}
var result = (value || eleresult) ? true : false;
return result;
}, "This fields can not be empty.");
/* 判断负数 */
jQuery.validator.addMethod("negativeCheck", function (value, element, param) {
if (!isNaN(value))
return parseFloat(value) >= 0;
}, "Please enter a number greater than 0.");
/**日期有效性验证
日期是否在某个时间段
param:number || string || array
Create by Aaron [20140319]
*/
jQuery.validator.addMethod("inDate", function (value, element, param) {
var _format = "dmy";
var _inDate = param, _inEndDate;
if ($.type(param) == "array") {
_inDate = param[0];
_inEndDate = param[1];
}
var _thisDate = Date.parse(global.Fn.formatDate(value + ":" + _format, "MM/dd/yyyy"));
_inDate = Date.parse(_inDate);
if (_inEndDate) {
_inEndDate = Date.parse(_inEndDate);
return (_thisDate >= _inDate && _thisDate <= _inEndDate);
}
return _thisDate <= _inDate;
}, "This date is not in {0}");
/** check from and to should both have value in group
*/
jQuery.validator.addMethod("groupDateRequired", function (value, element, param)
{
var groupArray = param.split(',');
$.each(groupArray, function (i, o)
{
groupArray[i] = ('input[name=' + o + ']');
})
if (!value)
{
if (!$(groupArray[0]).val())
{
return !!($(groupArray[1]).val() && $(groupArray[2]).val());
}
else
{
return false;
}
}
if (value)
{
if (!$(groupArray[0]).val())
{
return false;
}
else
{
return !($(groupArray[1]).val() || $(groupArray[2]).val());
}
}
return true;
}, "From and To must be both have value!");
/** inFinlYear - Check whether is in the finance year
param:number || string || array
if(array)
{
[finlYearFromDate,finlYearToDate]
}
if(string)
{
if(element is FromDate)
[finlYearFromDate]
if(element is ToDate)
[finlYearToDate]
}
*/
jQuery.validator.addMethod("inFinlYear", function (value, element, param)
{
var _format = "dmy";
var _inDate = param, _inEndDate;
if ($.type(param) == "array")
{
_inDate = param[0];
_inEndDate = param[1];
}
var _thisDate = Date.parse(global.Fn.formatDate(value + ":" + _format, "MM/dd/yyyy"));
_inDate = Date.parse(_inDate);
if (_inEndDate)
{
_inEndDate = Date.parse(_inEndDate);
return (_thisDate >= _inDate && _thisDate <= _inEndDate);
}
return element.attributes.name.value.indexOf('To') > -1 ? (_thisDate <= _inDate) : (_inDate <= _thisDate);
}, "This date is not in {0}");
/**当控件为一定值时,指定字段必填
param:[{value:array,element:array}]
Create by Aaron [20140313]
*/
jQuery.validator.addMethod("dorequired", function (value, element, param) {
if ($.type(param) != "array") param = [param];
for (var i = 0; i < param.length; i++) {
var obj = param[i];
if (!obj.element || obj.element.length < 1) continue;
var _value = (obj.value && $.type(obj.value) != "array") ? [obj.value] : obj.value;
var _ele = $.type(obj.element) == "array" ? obj.element : obj.element.split(",");
for (var j = 0; j < _ele.length; j++) {
var $self = $(_ele[j]);
if (!$self.rules().required && (!obj.value || _value.length < 1 || $.inArray(value, _value) != -1)) {
$self.rules("add", { required: true });
return $self.valid();
} else if ($self.rules().required && $.inArray(value, _value) == -1) {
$self.removeClass("required").rules("remove", "required");
return $self.valid();
}
}
}
return true;
}, "");
/**格式化金额格式10,000,000.00
param:boolean
*/
jQuery.validator.addMethod("amount", function (value, element, param) {
var amountReg = /^[1-9](?:\d*,\d{3})*(?:(\.\d+|$))/;
if (param == true) return amountReg.test(value);
return true;
}, "Please enter a valid Amount");
/**惟一性验证,返回消息格式<XXX> is duplicated!
param:与remote一致
Create By Gary[20140327]
*/
jQuery.validator.addMethod("duplicatedRemote", function (value, element, param) {
if (this
|
_elseRule.type = _type.replace(">", "<")
|
conditional_block
|
|
inputprocessor.py
|
import runInferenceTriton
from .utils import FILL_NONE_VALUE, add_selection_no_cutflow, bkgs, sigs, tagger_gen_matching
warnings.filterwarnings("ignore", message="Found duplicate branch ")
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", message="Missing cross-reference index ")
warnings.filterwarnings("ignore", message="divide by zero encountered in log")
np.seterr(invalid="ignore")
P4 = {
"eta": "eta",
"phi": "phi",
"mass": "mass",
"pt": "pt",
}
class InputProcessor(ProcessorABC):
"""
Produces a flat training ntuple from PFNano.
"""
def __init__(self, label, inference, output_location="./outfiles/"):
"""
:param num_jets: Number of jets to save
:type num_jets: int
"""
"""
Skimming variables
"""
self.label = label
self.inference = inference
self._output_location = output_location
self.skim_vars = {
"Event": {
"event": "event",
},
"FatJet": {
**P4,
"msoftdrop": "msoftdrop",
},
"GenPart": [
"fj_genjetmass",
"fj_genRes_pt",
"fj_genRes_eta",
"fj_genRes_phi",
"fj_genRes_mass",
"fj_nprongs",
"fj_ncquarks",
"fj_lepinprongs",
"fj_nquarks",
"fj_H_VV_4q",
"fj_H_VV_elenuqq",
"fj_H_VV_munuqq",
"fj_H_VV_leptauelvqq",
"fj_H_VV_leptaumuvqq",
"fj_H_VV_hadtauvqq",
"fj_QCDb",
"fj_QCDbb",
"fj_QCDc",
"fj_QCDcc",
"fj_QCDothers",
"fj_V_2q",
"fj_V_elenu",
"fj_V_munu",
"fj_V_taunu",
"fj_Top_nquarksnob",
"fj_Top_nbquarks",
"fj_Top_ncquarks",
"fj_Top_nleptons",
"fj_Top_nele",
"fj_Top_nmu",
"fj_Top_ntau",
"fj_Top_taudecay",
],
# formatted to match weaver's preprocess.json
"MET": {
"met_features": {
"var_names": [
"met_relpt",
"met_relphi",
],
},
"met_points": {"var_length": 1},
},
"Lep": {
"fj_features": {
"fj_lep_dR",
"fj_lep_pt",
"fj_lep_iso",
"fj_lep_miniiso",
},
},
}
self.tagger_resources_path = str(pathlib.Path(__file__).parent.resolve()) + "/tagger_resources/"
self.fatjet_label = "FatJet"
self.pfcands_label = "FatJetPFCands"
self.svs_label = "FatJetSVs"
self._accumulator = dict_accumulator({})
@property
def accumulator(self):
return self._accumulator
def save_dfs_parquet(self, df, fname):
if self._output_location is not None:
PATH = f"{self._output_location}/parquet/"
if not os.path.exists(PATH):
os.makedirs(PATH)
table = pa.Table.from_pandas(df)
if len(table) != 0: # skip dataframes with empty entries
pq.write_table(table, f"{PATH}/{fname}.parquet")
def ak_to_pandas(self, output_collection: ak.Array) -> pd.DataFrame:
output = pd.DataFrame()
for field in ak.fields(output_collection):
output[field] = ak.to_numpy(output_collection[field])
return output
def dump_root(self, skimmed_vars: Dict[str, np.array], fname: str) -> None:
"""
Saves ``jet_vars`` dict as a rootfile to './outroot'
"""
local_dir = os.path.abspath(os.path.join(self._output_location, "outroot"))
os.system(f"mkdir -p {local_dir}")
with uproot.recreate(f"{local_dir}/{fname}.root", compression=uproot.LZ4(4)) as rfile:
rfile["Events"] = ak.Array(skimmed_vars)
rfile["Events"].show()
def process(self, events: ak.Array):
import time
start = time.time()
def build_p4(cand):
return ak.zip(
{
"pt": cand.pt,
"eta": cand.eta,
"phi": cand.phi,
"mass": cand.mass,
"charge": cand.charge,
},
with_name="PtEtaPhiMCandidate",
behavior=candidate.behavior,
)
electrons = events["Electron"][events["Electron"].pt > 40]
muons = events["Muon"][events["Muon"].pt > 30]
leptons = ak.concatenate([electrons, muons], axis=1)
leptons = leptons[ak.argsort(leptons.pt, ascending=False)]
fatjets = events[self.fatjet_label]
candidatelep_p4 = build_p4(ak.firsts(leptons))
fj_idx_lep = ak.argmin(fatjets.delta_r(candidatelep_p4), axis=1, keepdims=True)
fatjet = ak.firsts(fatjets[fj_idx_lep])
# selection
selection = PackedSelection()
add_selection_no_cutflow("fjselection", (fatjet.pt > 200), selection)
if np.sum(selection.all(*selection.names)) == 0:
return {}
# variables
FatJetVars = {
f"fj_{key}": ak.fill_none(fatjet[var], FILL_NONE_VALUE) for (var, key) in self.skim_vars["FatJet"].items()
}
LepVars = {
**get_lep_features(
self.skim_vars["Lep"],
events,
fatjet,
candidatelep_p4,
),
}
METVars = {
**get_met_features(
self.skim_vars["MET"],
events,
fatjet,
"MET",
normalize=False,
),
}
genparts = events.GenPart
matched_mask, genVars = tagger_gen_matching(
events,
genparts,
fatjet,
# candidatelep_p4,
self.skim_vars["GenPart"],
label=self.label,
)
# add_selection_no_cutflow("gen_match", matched_mask, selection)
skimmed_vars = {**FatJetVars, **{"matched_mask": matched_mask}, **genVars, **METVars, **LepVars}
# apply selections
skimmed_vars = {
key: np.squeeze(np.array(value[selection.all(*selection.names)])) for (key, value) in skimmed_vars.items()
}
# fill inference
if self.inference:
from .run_tagger_inference import runInferenceTriton
for model_name in ["ak8_MD_vminclv2ParT_manual_fixwrap_all_nodes"]:
pnet_vars = runInferenceTriton(
self.tagger_resources_path,
events[selection.all(*selection.names)],
fj_idx_lep[selection.all(*selection.names)],
model_name=model_name,
)
# pnet_df = self.ak_to_pandas(pnet_vars)
pnet_df = pd.DataFrame(pnet_vars)
num = pnet_df[sigs].sum(axis=1)
den = pnet_df[sigs].sum(axis=1) + pnet_df[bkgs].sum(axis=1)
scores = {"fj_ParT_inclusive_score": (num / den).values}
reg_mass = {"fj_ParT_mass": pnet_vars["fj_ParT_mass"]}
hidNeurons = {}
for key in pnet_vars:
|
skimmed_vars = {**skimmed_vars, **scores, **reg_mass, **hidNeurons}
for key in skimmed_vars:
skimmed_vars[key] = skimmed_vars[key].squeeze()
# convert output to pandas
df = pd.DataFrame(skimmed_vars)
df = df.dropna() # very few events would have genjetmass NaN for some reason
print(f"convert: {time.time() - start:.1f}s")
print(df)
# save the output
fname = events.behavior["__events_factory__"]._partition_key.replace("/", "_")
fname = "condor_" + fname
self.save_dfs_parquet(df, fname)
print(f"dump parquet: {time.time() - start:.1f}s")
# TODO: drop NaNs from rootfiles
self.dump_root(skimmed_vars, fname)
print(f"dump rootfile: {time.time() - start:.1f}s")
#
|
if "hidNeuron" in key:
hidNeurons[key] = pnet_vars[key]
|
conditional_block
|
inputprocessor.py
|
import runInferenceTriton
from .utils import FILL_NONE_VALUE, add_selection_no_cutflow, bkgs, sigs, tagger_gen_matching
warnings.filterwarnings("ignore", message="Found duplicate branch ")
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", message="Missing cross-reference index ")
warnings.filterwarnings("ignore", message="divide by zero encountered in log")
np.seterr(invalid="ignore")
P4 = {
"eta": "eta",
"phi": "phi",
"mass": "mass",
"pt": "pt",
}
class InputProcessor(ProcessorABC):
"""
Produces a flat training ntuple from PFNano.
"""
def __init__(self, label, inference, output_location="./outfiles/"):
"""
:param num_jets: Number of jets to save
:type num_jets: int
"""
"""
Skimming variables
"""
self.label = label
self.inference = inference
self._output_location = output_location
self.skim_vars = {
"Event": {
"event": "event",
},
"FatJet": {
**P4,
"msoftdrop": "msoftdrop",
},
"GenPart": [
"fj_genjetmass",
"fj_genRes_pt",
"fj_genRes_eta",
"fj_genRes_phi",
"fj_genRes_mass",
"fj_nprongs",
"fj_ncquarks",
"fj_lepinprongs",
"fj_nquarks",
"fj_H_VV_4q",
"fj_H_VV_elenuqq",
"fj_H_VV_munuqq",
"fj_H_VV_leptauelvqq",
"fj_H_VV_leptaumuvqq",
"fj_H_VV_hadtauvqq",
"fj_QCDb",
"fj_QCDbb",
"fj_QCDc",
"fj_QCDcc",
"fj_QCDothers",
"fj_V_2q",
"fj_V_elenu",
"fj_V_munu",
"fj_V_taunu",
"fj_Top_nquarksnob",
"fj_Top_nbquarks",
"fj_Top_ncquarks",
"fj_Top_nleptons",
"fj_Top_nele",
"fj_Top_nmu",
"fj_Top_ntau",
"fj_Top_taudecay",
],
# formatted to match weaver's preprocess.json
"MET": {
"met_features": {
"var_names": [
"met_relpt",
"met_relphi",
],
},
"met_points": {"var_length": 1},
},
"Lep": {
"fj_features": {
"fj_lep_dR",
"fj_lep_pt",
"fj_lep_iso",
"fj_lep_miniiso",
},
},
}
self.tagger_resources_path = str(pathlib.Path(__file__).parent.resolve()) + "/tagger_resources/"
self.fatjet_label = "FatJet"
self.pfcands_label = "FatJetPFCands"
self.svs_label = "FatJetSVs"
self._accumulator = dict_accumulator({})
@property
def accumulator(self):
|
def save_dfs_parquet(self, df, fname):
if self._output_location is not None:
PATH = f"{self._output_location}/parquet/"
if not os.path.exists(PATH):
os.makedirs(PATH)
table = pa.Table.from_pandas(df)
if len(table) != 0: # skip dataframes with empty entries
pq.write_table(table, f"{PATH}/{fname}.parquet")
def ak_to_pandas(self, output_collection: ak.Array) -> pd.DataFrame:
output = pd.DataFrame()
for field in ak.fields(output_collection):
output[field] = ak.to_numpy(output_collection[field])
return output
def dump_root(self, skimmed_vars: Dict[str, np.array], fname: str) -> None:
"""
Saves ``jet_vars`` dict as a rootfile to './outroot'
"""
local_dir = os.path.abspath(os.path.join(self._output_location, "outroot"))
os.system(f"mkdir -p {local_dir}")
with uproot.recreate(f"{local_dir}/{fname}.root", compression=uproot.LZ4(4)) as rfile:
rfile["Events"] = ak.Array(skimmed_vars)
rfile["Events"].show()
def process(self, events: ak.Array):
import time
start = time.time()
def build_p4(cand):
return ak.zip(
{
"pt": cand.pt,
"eta": cand.eta,
"phi": cand.phi,
"mass": cand.mass,
"charge": cand.charge,
},
with_name="PtEtaPhiMCandidate",
behavior=candidate.behavior,
)
electrons = events["Electron"][events["Electron"].pt > 40]
muons = events["Muon"][events["Muon"].pt > 30]
leptons = ak.concatenate([electrons, muons], axis=1)
leptons = leptons[ak.argsort(leptons.pt, ascending=False)]
fatjets = events[self.fatjet_label]
candidatelep_p4 = build_p4(ak.firsts(leptons))
fj_idx_lep = ak.argmin(fatjets.delta_r(candidatelep_p4), axis=1, keepdims=True)
fatjet = ak.firsts(fatjets[fj_idx_lep])
# selection
selection = PackedSelection()
add_selection_no_cutflow("fjselection", (fatjet.pt > 200), selection)
if np.sum(selection.all(*selection.names)) == 0:
return {}
# variables
FatJetVars = {
f"fj_{key}": ak.fill_none(fatjet[var], FILL_NONE_VALUE) for (var, key) in self.skim_vars["FatJet"].items()
}
LepVars = {
**get_lep_features(
self.skim_vars["Lep"],
events,
fatjet,
candidatelep_p4,
),
}
METVars = {
**get_met_features(
self.skim_vars["MET"],
events,
fatjet,
"MET",
normalize=False,
),
}
genparts = events.GenPart
matched_mask, genVars = tagger_gen_matching(
events,
genparts,
fatjet,
# candidatelep_p4,
self.skim_vars["GenPart"],
label=self.label,
)
# add_selection_no_cutflow("gen_match", matched_mask, selection)
skimmed_vars = {**FatJetVars, **{"matched_mask": matched_mask}, **genVars, **METVars, **LepVars}
# apply selections
skimmed_vars = {
key: np.squeeze(np.array(value[selection.all(*selection.names)])) for (key, value) in skimmed_vars.items()
}
# fill inference
if self.inference:
from .run_tagger_inference import runInferenceTriton
for model_name in ["ak8_MD_vminclv2ParT_manual_fixwrap_all_nodes"]:
pnet_vars = runInferenceTriton(
self.tagger_resources_path,
events[selection.all(*selection.names)],
fj_idx_lep[selection.all(*selection.names)],
model_name=model_name,
)
# pnet_df = self.ak_to_pandas(pnet_vars)
pnet_df = pd.DataFrame(pnet_vars)
num = pnet_df[sigs].sum(axis=1)
den = pnet_df[sigs].sum(axis=1) + pnet_df[bkgs].sum(axis=1)
scores = {"fj_ParT_inclusive_score": (num / den).values}
reg_mass = {"fj_ParT_mass": pnet_vars["fj_ParT_mass"]}
hidNeurons = {}
for key in pnet_vars:
if "hidNeuron" in key:
hidNeurons[key] = pnet_vars[key]
skimmed_vars = {**skimmed_vars, **scores, **reg_mass, **hidNeurons}
for key in skimmed_vars:
skimmed_vars[key] = skimmed_vars[key].squeeze()
# convert output to pandas
df = pd.DataFrame(skimmed_vars)
df = df.dropna() # very few events would have genjetmass NaN for some reason
print(f"convert: {time.time() - start:.1f}s")
print(df)
# save the output
fname = events.behavior["__events_factory__"]._partition_key.replace("/", "_")
fname = "condor_" + fname
self.save_dfs_parquet(df, fname)
print(f"dump parquet: {time.time() - start:.1f}s")
# TODO: drop NaNs from rootfiles
self.dump_root(skimmed_vars, fname)
print(f"dump rootfile: {time.time() - start:.1f}s")
# for
|
return self._accumulator
|
identifier_body
|
inputprocessor.py
|
import runInferenceTriton
from .utils import FILL_NONE_VALUE, add_selection_no_cutflow, bkgs, sigs, tagger_gen_matching
warnings.filterwarnings("ignore", message="Found duplicate branch ")
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", message="Missing cross-reference index ")
warnings.filterwarnings("ignore", message="divide by zero encountered in log")
np.seterr(invalid="ignore")
P4 = {
"eta": "eta",
"phi": "phi",
"mass": "mass",
"pt": "pt",
}
class InputProcessor(ProcessorABC):
"""
Produces a flat training ntuple from PFNano.
"""
def __init__(self, label, inference, output_location="./outfiles/"):
"""
:param num_jets: Number of jets to save
:type num_jets: int
"""
"""
Skimming variables
"""
self.label = label
self.inference = inference
self._output_location = output_location
self.skim_vars = {
"Event": {
"event": "event",
},
"FatJet": {
**P4,
"msoftdrop": "msoftdrop",
},
"GenPart": [
"fj_genjetmass",
"fj_genRes_pt",
"fj_genRes_eta",
"fj_genRes_phi",
"fj_genRes_mass",
"fj_nprongs",
"fj_ncquarks",
"fj_lepinprongs",
"fj_nquarks",
"fj_H_VV_4q",
"fj_H_VV_elenuqq",
"fj_H_VV_munuqq",
"fj_H_VV_leptauelvqq",
"fj_H_VV_leptaumuvqq",
"fj_H_VV_hadtauvqq",
"fj_QCDb",
"fj_QCDbb",
"fj_QCDc",
"fj_QCDcc",
"fj_QCDothers",
"fj_V_2q",
"fj_V_elenu",
"fj_V_munu",
"fj_V_taunu",
"fj_Top_nquarksnob",
"fj_Top_nbquarks",
"fj_Top_ncquarks",
"fj_Top_nleptons",
"fj_Top_nele",
"fj_Top_nmu",
"fj_Top_ntau",
"fj_Top_taudecay",
],
# formatted to match weaver's preprocess.json
"MET": {
"met_features": {
"var_names": [
"met_relpt",
"met_relphi",
],
},
"met_points": {"var_length": 1},
},
"Lep": {
"fj_features": {
"fj_lep_dR",
"fj_lep_pt",
"fj_lep_iso",
"fj_lep_miniiso",
},
},
}
self.tagger_resources_path = str(pathlib.Path(__file__).parent.resolve()) + "/tagger_resources/"
self.fatjet_label = "FatJet"
self.pfcands_label = "FatJetPFCands"
self.svs_label = "FatJetSVs"
self._accumulator = dict_accumulator({})
@property
def accumulator(self):
return self._accumulator
def save_dfs_parquet(self, df, fname):
if self._output_location is not None:
PATH = f"{self._output_location}/parquet/"
if not os.path.exists(PATH):
os.makedirs(PATH)
table = pa.Table.from_pandas(df)
if len(table) != 0: # skip dataframes with empty entries
pq.write_table(table, f"{PATH}/{fname}.parquet")
def
|
(self, output_collection: ak.Array) -> pd.DataFrame:
output = pd.DataFrame()
for field in ak.fields(output_collection):
output[field] = ak.to_numpy(output_collection[field])
return output
def dump_root(self, skimmed_vars: Dict[str, np.array], fname: str) -> None:
"""
Saves ``jet_vars`` dict as a rootfile to './outroot'
"""
local_dir = os.path.abspath(os.path.join(self._output_location, "outroot"))
os.system(f"mkdir -p {local_dir}")
with uproot.recreate(f"{local_dir}/{fname}.root", compression=uproot.LZ4(4)) as rfile:
rfile["Events"] = ak.Array(skimmed_vars)
rfile["Events"].show()
def process(self, events: ak.Array):
import time
start = time.time()
def build_p4(cand):
return ak.zip(
{
"pt": cand.pt,
"eta": cand.eta,
"phi": cand.phi,
"mass": cand.mass,
"charge": cand.charge,
},
with_name="PtEtaPhiMCandidate",
behavior=candidate.behavior,
)
electrons = events["Electron"][events["Electron"].pt > 40]
muons = events["Muon"][events["Muon"].pt > 30]
leptons = ak.concatenate([electrons, muons], axis=1)
leptons = leptons[ak.argsort(leptons.pt, ascending=False)]
fatjets = events[self.fatjet_label]
candidatelep_p4 = build_p4(ak.firsts(leptons))
fj_idx_lep = ak.argmin(fatjets.delta_r(candidatelep_p4), axis=1, keepdims=True)
fatjet = ak.firsts(fatjets[fj_idx_lep])
# selection
selection = PackedSelection()
add_selection_no_cutflow("fjselection", (fatjet.pt > 200), selection)
if np.sum(selection.all(*selection.names)) == 0:
return {}
# variables
FatJetVars = {
f"fj_{key}": ak.fill_none(fatjet[var], FILL_NONE_VALUE) for (var, key) in self.skim_vars["FatJet"].items()
}
LepVars = {
**get_lep_features(
self.skim_vars["Lep"],
events,
fatjet,
candidatelep_p4,
),
}
METVars = {
**get_met_features(
self.skim_vars["MET"],
events,
fatjet,
"MET",
normalize=False,
),
}
genparts = events.GenPart
matched_mask, genVars = tagger_gen_matching(
events,
genparts,
fatjet,
# candidatelep_p4,
self.skim_vars["GenPart"],
label=self.label,
)
# add_selection_no_cutflow("gen_match", matched_mask, selection)
skimmed_vars = {**FatJetVars, **{"matched_mask": matched_mask}, **genVars, **METVars, **LepVars}
# apply selections
skimmed_vars = {
key: np.squeeze(np.array(value[selection.all(*selection.names)])) for (key, value) in skimmed_vars.items()
}
# fill inference
if self.inference:
from .run_tagger_inference import runInferenceTriton
for model_name in ["ak8_MD_vminclv2ParT_manual_fixwrap_all_nodes"]:
pnet_vars = runInferenceTriton(
self.tagger_resources_path,
events[selection.all(*selection.names)],
fj_idx_lep[selection.all(*selection.names)],
model_name=model_name,
)
# pnet_df = self.ak_to_pandas(pnet_vars)
pnet_df = pd.DataFrame(pnet_vars)
num = pnet_df[sigs].sum(axis=1)
den = pnet_df[sigs].sum(axis=1) + pnet_df[bkgs].sum(axis=1)
scores = {"fj_ParT_inclusive_score": (num / den).values}
reg_mass = {"fj_ParT_mass": pnet_vars["fj_ParT_mass"]}
hidNeurons = {}
for key in pnet_vars:
if "hidNeuron" in key:
hidNeurons[key] = pnet_vars[key]
skimmed_vars = {**skimmed_vars, **scores, **reg_mass, **hidNeurons}
for key in skimmed_vars:
skimmed_vars[key] = skimmed_vars[key].squeeze()
# convert output to pandas
df = pd.DataFrame(skimmed_vars)
df = df.dropna() # very few events would have genjetmass NaN for some reason
print(f"convert: {time.time() - start:.1f}s")
print(df)
# save the output
fname = events.behavior["__events_factory__"]._partition_key.replace("/", "_")
fname = "condor_" + fname
self.save_dfs_parquet(df, fname)
print(f"dump parquet: {time.time() - start:.1f}s")
# TODO: drop NaNs from rootfiles
self.dump_root(skimmed_vars, fname)
print(f"dump rootfile: {time.time() - start:.1f}s")
#
|
ak_to_pandas
|
identifier_name
|
inputprocessor.py
|
import runInferenceTriton
from .utils import FILL_NONE_VALUE, add_selection_no_cutflow, bkgs, sigs, tagger_gen_matching
warnings.filterwarnings("ignore", message="Found duplicate branch ")
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", message="Missing cross-reference index ")
warnings.filterwarnings("ignore", message="divide by zero encountered in log")
np.seterr(invalid="ignore")
P4 = {
"eta": "eta",
"phi": "phi",
"mass": "mass",
"pt": "pt",
}
class InputProcessor(ProcessorABC):
"""
Produces a flat training ntuple from PFNano.
"""
def __init__(self, label, inference, output_location="./outfiles/"):
"""
:param num_jets: Number of jets to save
:type num_jets: int
"""
"""
Skimming variables
"""
self.label = label
self.inference = inference
self._output_location = output_location
self.skim_vars = {
"Event": {
"event": "event",
},
"FatJet": {
**P4,
"msoftdrop": "msoftdrop",
},
"GenPart": [
"fj_genjetmass",
"fj_genRes_pt",
"fj_genRes_eta",
"fj_genRes_phi",
"fj_genRes_mass",
"fj_nprongs",
"fj_ncquarks",
"fj_lepinprongs",
"fj_nquarks",
"fj_H_VV_4q",
"fj_H_VV_elenuqq",
"fj_H_VV_munuqq",
"fj_H_VV_leptauelvqq",
"fj_H_VV_leptaumuvqq",
"fj_H_VV_hadtauvqq",
"fj_QCDb",
"fj_QCDbb",
"fj_QCDc",
"fj_QCDcc",
"fj_QCDothers",
"fj_V_2q",
"fj_V_elenu",
"fj_V_munu",
"fj_V_taunu",
"fj_Top_nquarksnob",
"fj_Top_nbquarks",
"fj_Top_ncquarks",
"fj_Top_nleptons",
"fj_Top_nele",
"fj_Top_nmu",
"fj_Top_ntau",
"fj_Top_taudecay",
],
# formatted to match weaver's preprocess.json
"MET": {
"met_features": {
"var_names": [
"met_relpt",
"met_relphi",
],
},
"met_points": {"var_length": 1},
},
"Lep": {
"fj_features": {
"fj_lep_dR",
"fj_lep_pt",
"fj_lep_iso",
"fj_lep_miniiso",
},
},
}
self.tagger_resources_path = str(pathlib.Path(__file__).parent.resolve()) + "/tagger_resources/"
self.fatjet_label = "FatJet"
self.pfcands_label = "FatJetPFCands"
self.svs_label = "FatJetSVs"
self._accumulator = dict_accumulator({})
@property
def accumulator(self):
return self._accumulator
def save_dfs_parquet(self, df, fname):
if self._output_location is not None:
PATH = f"{self._output_location}/parquet/"
if not os.path.exists(PATH):
os.makedirs(PATH)
table = pa.Table.from_pandas(df)
if len(table) != 0: # skip dataframes with empty entries
pq.write_table(table, f"{PATH}/{fname}.parquet")
def ak_to_pandas(self, output_collection: ak.Array) -> pd.DataFrame:
output = pd.DataFrame()
for field in ak.fields(output_collection):
output[field] = ak.to_numpy(output_collection[field])
return output
def dump_root(self, skimmed_vars: Dict[str, np.array], fname: str) -> None:
"""
Saves ``jet_vars`` dict as a rootfile to './outroot'
"""
local_dir = os.path.abspath(os.path.join(self._output_location, "outroot"))
os.system(f"mkdir -p {local_dir}")
with uproot.recreate(f"{local_dir}/{fname}.root", compression=uproot.LZ4(4)) as rfile:
rfile["Events"] = ak.Array(skimmed_vars)
rfile["Events"].show()
def process(self, events: ak.Array):
import time
start = time.time()
def build_p4(cand):
return ak.zip(
{
"pt": cand.pt,
"eta": cand.eta,
"phi": cand.phi,
"mass": cand.mass,
"charge": cand.charge,
},
with_name="PtEtaPhiMCandidate",
behavior=candidate.behavior,
)
electrons = events["Electron"][events["Electron"].pt > 40]
muons = events["Muon"][events["Muon"].pt > 30]
leptons = ak.concatenate([electrons, muons], axis=1)
leptons = leptons[ak.argsort(leptons.pt, ascending=False)]
fatjets = events[self.fatjet_label]
|
# selection
selection = PackedSelection()
add_selection_no_cutflow("fjselection", (fatjet.pt > 200), selection)
if np.sum(selection.all(*selection.names)) == 0:
return {}
# variables
FatJetVars = {
f"fj_{key}": ak.fill_none(fatjet[var], FILL_NONE_VALUE) for (var, key) in self.skim_vars["FatJet"].items()
}
LepVars = {
**get_lep_features(
self.skim_vars["Lep"],
events,
fatjet,
candidatelep_p4,
),
}
METVars = {
**get_met_features(
self.skim_vars["MET"],
events,
fatjet,
"MET",
normalize=False,
),
}
genparts = events.GenPart
matched_mask, genVars = tagger_gen_matching(
events,
genparts,
fatjet,
# candidatelep_p4,
self.skim_vars["GenPart"],
label=self.label,
)
# add_selection_no_cutflow("gen_match", matched_mask, selection)
skimmed_vars = {**FatJetVars, **{"matched_mask": matched_mask}, **genVars, **METVars, **LepVars}
# apply selections
skimmed_vars = {
key: np.squeeze(np.array(value[selection.all(*selection.names)])) for (key, value) in skimmed_vars.items()
}
# fill inference
if self.inference:
from .run_tagger_inference import runInferenceTriton
for model_name in ["ak8_MD_vminclv2ParT_manual_fixwrap_all_nodes"]:
pnet_vars = runInferenceTriton(
self.tagger_resources_path,
events[selection.all(*selection.names)],
fj_idx_lep[selection.all(*selection.names)],
model_name=model_name,
)
# pnet_df = self.ak_to_pandas(pnet_vars)
pnet_df = pd.DataFrame(pnet_vars)
num = pnet_df[sigs].sum(axis=1)
den = pnet_df[sigs].sum(axis=1) + pnet_df[bkgs].sum(axis=1)
scores = {"fj_ParT_inclusive_score": (num / den).values}
reg_mass = {"fj_ParT_mass": pnet_vars["fj_ParT_mass"]}
hidNeurons = {}
for key in pnet_vars:
if "hidNeuron" in key:
hidNeurons[key] = pnet_vars[key]
skimmed_vars = {**skimmed_vars, **scores, **reg_mass, **hidNeurons}
for key in skimmed_vars:
skimmed_vars[key] = skimmed_vars[key].squeeze()
# convert output to pandas
df = pd.DataFrame(skimmed_vars)
df = df.dropna() # very few events would have genjetmass NaN for some reason
print(f"convert: {time.time() - start:.1f}s")
print(df)
# save the output
fname = events.behavior["__events_factory__"]._partition_key.replace("/", "_")
fname = "condor_" + fname
self.save_dfs_parquet(df, fname)
print(f"dump parquet: {time.time() - start:.1f}s")
# TODO: drop NaNs from rootfiles
self.dump_root(skimmed_vars, fname)
print(f"dump rootfile: {time.time() - start:.1f}s")
#
|
candidatelep_p4 = build_p4(ak.firsts(leptons))
fj_idx_lep = ak.argmin(fatjets.delta_r(candidatelep_p4), axis=1, keepdims=True)
fatjet = ak.firsts(fatjets[fj_idx_lep])
|
random_line_split
|
npm-utils.js
|
else {
set = [];
}
}
if(supportsSet) {
if(set.has(s)) {
return s;
} else {
set.add(s);
}
} else {
if(set.indexOf(s) !== -1) {
return s;
} else {
set.push(s);
}
}
}
for(var prop in s) {
val = s[prop];
if(deep) {
if(utils.isArray(val)) {
d[prop] = slice.call(val);
} else if(utils.isPlainObject(val)) {
d[prop] = utils.extend({}, val, deep, set);
} else {
d[prop] = s[prop];
}
} else {
d[prop] = s[prop];
}
}
return d;
},
map: function(arr, fn){
var i = 0, len = arr.length, out = [];
for(; i < len; i++) {
out.push(fn.call(arr, arr[i]));
}
return out;
},
filter: function(arr, fn){
var i = 0, len = arr.length, out = [], res;
for(; i < len; i++) {
res = fn.call(arr, arr[i]);
if(res) {
out.push(arr[i]);
}
}
return out;
},
forEach: function(arr, fn) {
var i = 0, len = arr.length;
for(; i < len; i++) {
fn.call(arr, arr[i], i);
}
},
isObject: function(obj){
return typeof obj === "object";
},
isPlainObject: function(obj){
// A plain object has a proto that is the Object
return utils.isObject(obj) && (!obj || obj.__proto__ === Object.prototype);
},
isArray: Array.isArray || function(arr){
return Object.prototype.toString.call(arr) === "[object Array]";
},
isEnv: function(name) {
return this.isEnv ? this.isEnv(name) : this.env === name;
},
isGitUrl: function(str) {
return gitUrlEx.test(str);
},
warnOnce: function(msg){
var w = this._warnings = this._warnings || {};
if(w[msg]) return;
w[msg] = true;
this.warn(msg);
},
warn: function(msg){
if(typeof steal !== "undefined" && typeof console !== "undefined" && console.warn) {
steal.done().then(function(){
if(steal.dev && steal.dev.warn){
steal.dev.warn(msg)
} else if(console.warn) {
console.warn("steal.js WARNING: "+msg);
} else {
console.log(msg);
}
});
}
},
relativeURI: function(baseURL, url) {
return typeof steal !== "undefined" ? steal.relativeURI(baseURL, url) : url;
},
moduleName: {
/**
* @function moduleName.create
* Converts a parsed module name to a string
*
* @param {system-npm/parsed_npm} descriptor
*/
create: function (descriptor, standard) {
if(standard) {
return descriptor.moduleName;
} else {
if(descriptor === "@empty") {
return descriptor;
}
var modulePath;
if(descriptor.modulePath) {
modulePath = descriptor.modulePath.substr(0,2) === "./" ? descriptor.modulePath.substr(2) : descriptor.modulePath;
}
return descriptor.packageName
+ (descriptor.version ? '@' + descriptor.version : '')
+ (modulePath ? '#' + modulePath : '')
+ (descriptor.plugin ? descriptor.plugin : '');
}
},
/**
* @function moduleName.isNpm
* Determines whether a moduleName is npm-like.
* @return {Boolean}
*/
isNpm: function(moduleName){
return npmModuleRegEx.test(moduleName);
},
/**
* @function moduleName.isConditional
* Determines whether a moduleName includes a condition.
* @return {Boolean}
*/
isConditional: function(moduleName){
return conditionalModuleRegEx.test(moduleName);
},
/**
* @function moduleName.isFullyConvertedModuleName
* Determines whether a moduleName is a fully npm name, not npm-like
* With a parsed module name we can make sure there is a package name,
* package version, and module path.
*/
isFullyConvertedNpm: function(parsedModuleName){
return !!(parsedModuleName.packageName &&
parsedModuleName.version && parsedModuleName.modulePath);
},
/**
* @function moduleName.isScoped
* Determines whether a moduleName is from a scoped package.
* @return {Boolean}
*/
isScoped: function(moduleName){
return moduleName[0] === "@";
},
/**
* @function moduleName.parse
* Breaks a string moduleName into parts.
* packageName@version!plugin#modulePath
* "./lib/bfs"
*
* @return {system-npm/parsed_npm}
*/
parse: function (moduleName, currentPackageName, global) {
var pluginParts = moduleName.split('!');
var modulePathParts = pluginParts[0].split("#");
var versionParts = modulePathParts[0].split("@");
// it could be something like `@empty`
if(!modulePathParts[1] && !versionParts[0]) {
versionParts = ["@"+versionParts[1]];
}
// it could be a scope package
if(versionParts.length === 3 && utils.moduleName.isScoped(moduleName)) {
versionParts.splice(0, 1);
versionParts[0] = "@"+versionParts[0];
}
var packageName,
modulePath;
// if the module name is relative
// use the currentPackageName
if (currentPackageName && utils.path.isRelative(moduleName)) {
packageName = currentPackageName;
modulePath = versionParts[0];
// if the module name starts with the ~ (tilde) operator
// use the currentPackageName
} else if (currentPackageName && utils.path.startsWithTildeSlash(moduleName)) {
packageName = currentPackageName;
modulePath = versionParts[0].split("/").slice(1).join("/");
} else {
if(modulePathParts[1]) { // foo@1.2#./path
packageName = versionParts[0];
modulePath = modulePathParts[1];
} else {
// test/abc
var folderParts = versionParts[0].split("/");
// Detect scoped packages
if(folderParts.length && folderParts[0][0] === "@") {
packageName = folderParts.splice(0, 2).join("/");
} else {
packageName = folderParts.shift();
}
modulePath = folderParts.join("/");
}
}
modulePath = utils.path.removeJS(modulePath);
return {
plugin: pluginParts.length === 2 ? "!"+pluginParts[1] : undefined,
version: versionParts[1],
modulePath: modulePath,
packageName: packageName,
moduleName: moduleName,
isGlobal: global
};
},
/**
* @function moduleName.parseFromPackage
*
* Given the package that loads the dependency, the dependency name,
* and the moduleName of what loaded the package, return
* a [system-npm/parsed_npm].
*
* @param {Loader} loader
* @param {NpmPackage} refPkg The package `name` is a dependency of.
* @param {moduleName} name
* @param {moduleName} parentName
* @return {system-npm/parsed_npm}
*
*/
parseFromPackage: function(loader, refPkg, name, parentName) {
// Get the name of the
var packageName = utils.pkg.name(refPkg),
parsedModuleName = utils.moduleName.parse(name, packageName),
isRelative = utils.path.isRelative(parsedModuleName.modulePath);
if(isRelative && !parentName) {
throw new Error("Cannot resolve a relative module identifier " +
"with no parent module:", name);
}
// If the module needs to be loaded relative.
if(isRelative) {
// get the location of the parent
var parentParsed = utils.moduleName.parse(parentName, packageName);
// If the parentModule and the currentModule are from the same parent
if( parentParsed.packageName === parsedModuleName.packageName && parentParsed.modulePath ) {
var makePathRelative = true;
if(name === "../" || name === "./" || name === "..") {
var relativePath = utils.path.relativeTo(
parentParsed.modulePath, name);
var isInRoot = utils.path.isPackageRootDir(relativePath);
if(isInRoot) {
parsedModuleName.modulePath = utils.pkg.main(refPkg);
makePathRelative = false;
} else {
parsedModuleName.modulePath = name +
(utils.path.endsWithSlash(name) ? "" : "/") +
"index";
}
}
if(make
|
{
set = new Set();
}
|
conditional_block
|
|
npm-utils.js
|
Slash(name)) {
// Todo .. first part of name
var curPackage = utils.path.depPackageDir(refPackage.fileUrl, name);
while(curPackage) {
var pkg = loader.npmPaths[curPackage];
if(pkg) {
return pkg;
}
var parentAddress = utils.path.parentNodeModuleAddress(curPackage);
if(!parentAddress) {
return;
}
curPackage = parentAddress+"/"+name;
}
}
},
findByName: function(loader, name) {
if(loader.npm && !utils.path.startsWithDotSlash(name)) {
return loader.npm[name];
}
},
findByNameAndVersion: function(loader, name, version) {
if(loader.npm && !utils.path.startsWithDotSlash(name)) {
var nameAndVersion = name + "@" + version;
return loader.npm[nameAndVersion];
}
},
findByUrl: function(loader, url) {
if(loader.npm) {
url = utils.pkg.folderAddress(url);
return loader.npmPaths[url];
}
},
directoriesLib: function(pkg) {
var steal = utils.pkg.config(pkg);
var lib = steal && steal.directories && steal.directories.lib;
var ignores = [".", "/"], ignore;
if(!lib) return undefined;
while(!!(ignore = ignores.shift())) {
if(lib[0] === ignore) {
lib = lib.substr(1);
}
}
return lib;
},
hasDirectoriesLib: function(pkg) {
var steal = utils.pkg.config(pkg);
return steal && steal.directories && !!steal.directories.lib;
},
findPackageInfo: function(context, pkg){
var pkgInfo = context.pkgInfo;
if(pkgInfo) {
var out;
utils.forEach(pkgInfo, function(p){
if(pkg.name === p.name && pkg.version === p.version) {
out = p;
}
});
return out;
}
},
saveResolution: function(context, refPkg, pkg){
var npmPkg = utils.pkg.findPackageInfo(context, refPkg);
npmPkg.resolutions[pkg.name] = refPkg.resolutions[pkg.name] =
pkg.version;
},
config: function(pkg){
return pkg.steal || pkg.system;
}
},
path: {
makeRelative: function(path){
if( utils.path.isRelative(path) && path.substr(0,1) !== "/" ) {
return path;
} else {
return "./"+path;
}
},
removeJS: function(path) {
return path.replace(/\.js(!|$)/,function(whole, part){return part;});
},
removePackage: function (path){
return path.replace(/\/package\.json.*/,"");
},
addJS: function(path){
// Don't add `.js` for types that need to work without an extension.
if(/\.js(on)?$/.test(path)) {
return path;
} else {
return path+".js";
}
},
isRelative: function(path) {
return path.substr(0,1) === ".";
},
startsWithTildeSlash: function( path ) {
return path.substr(0,2) === "~/";
},
joinURIs: function(base, href) {
function removeDotSegments(input) {
var output = [];
input.replace(/^(\.\.?(\/|$))+/, '')
.replace(/\/(\.(\/|$))+/g, '/')
.replace(/\/\.\.$/, '/../')
.replace(/\/?[^\/]*/g, function (p) {
if (p === '/..') {
output.pop();
} else {
output.push(p);
}
});
return output.join('').replace(/^\//, input.charAt(0) === '/' ? '/' : '');
}
href = parseURI(href || '');
base = parseURI(base || '');
return !href || !base ? null : (href.protocol || base.protocol) +
(href.protocol || href.authority ? href.authority : base.authority) +
removeDotSegments(href.protocol || href.authority || href.pathname.charAt(0) === '/' ? href.pathname : (href.pathname ? ((base.authority && !base.pathname ? '/' : '') + base.pathname.slice(0, base.pathname.lastIndexOf('/') + 1) + href.pathname) : base.pathname)) +
(href.protocol || href.authority || href.pathname ? href.search : (href.search || base.search)) +
href.hash;
},
startsWithDotSlash: function( path ) {
return path.substr(0,2) === "./";
},
removeDotSlash: function(path) {
return utils.path.startsWithDotSlash(path) ?
path.substr(2) :
path;
},
endsWithSlash: function(path){
return path[path.length -1] === "/";
},
addEndingSlash: function(path){
return utils.path.endsWithSlash(path) ? path : path+"/";
},
// Returns a package.json path one node_modules folder deeper than the
// parentPackageAddress
depPackage: function (parentPackageAddress, childName){
var packageFolderName = parentPackageAddress.replace(/\/package\.json.*/,"");
return (packageFolderName ? packageFolderName+"/" : "")+"node_modules/" + childName + "/package.json";
},
peerPackage: function(parentPackageAddress, childName){
var packageFolderName = parentPackageAddress.replace(/\/package\.json.*/,"");
return packageFolderName.substr(0, packageFolderName.lastIndexOf("/"))
+ "/" + childName + "/package.json";
},
// returns the package directory one level deeper.
depPackageDir: function(parentPackageAddress, childName){
return utils.path.depPackage(parentPackageAddress, childName).replace(/\/package\.json.*/,"");
},
peerNodeModuleAddress: function(address) {
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules);
if(nodeModulesIndex >= 0) {
return address.substr(0, nodeModulesIndex+nodeModules.length - 1 );
}
},
// /node_modules/a/node_modules/b/node_modules/c -> /node_modules/a/node_modules/
parentNodeModuleAddress: function(address) {
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
prevModulesIndex = address.lastIndexOf(nodeModules, nodeModulesIndex-1);
if(prevModulesIndex >= 0) {
return address.substr(0, prevModulesIndex+nodeModules.length - 1 );
}
},
pkgDir: function(address){
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
nextSlash = address.indexOf("/", nodeModulesIndex+nodeModules.length);
// Scoped packages
if(address[nodeModulesIndex+nodeModules.length] === "@") {
nextSlash = address.indexOf("/", nextSlash+1);
}
if(nodeModulesIndex >= 0) {
return nextSlash>=0 ? address.substr(0, nextSlash) : address;
}
},
basename: function(address){
var parts = address.split("/");
return parts[parts.length - 1];
},
relativeTo: function(modulePath, rel) {
var parts = modulePath.split("/");
var idx = 1;
while(rel[idx] === ".") {
parts.pop();
idx++;
}
return parts.join("/");
},
isPackageRootDir: function(pth) {
return pth.indexOf("/") === -1;
}
},
json: {
/**
* if a jsonOptions transformer is provided (by the System.config)
* use it for all json files, package.json's are also included
* @param loader
* @param load
* @param data
* @returns data
*/
transform: function(loader, load, data) {
// harmonize steal config
data.steal = utils.pkg.config(data);
var fn = loader.jsonOptions && loader.jsonOptions.transform;
if(!fn) return data;
return fn.call(loader, load, data);
}
},
includeInBuild: true
};
function parseURI(url)
|
{
var m = String(url).replace(/^\s+|\s+$/g, '').match(/^([^:\/?#]+:)?(\/\/(?:[^:@]*(?::[^:@]*)?@)?(([^:\/?#]*)(?::(\d*))?))?([^?#]*)(\?[^#]*)?(#[\s\S]*)?/);
// authority = '//' + user + ':' + pass '@' + hostname + ':' port
return (m ? {
href : m[0] || '',
protocol : m[1] || '',
authority: m[2] || '',
host : m[3] || '',
hostname : m[4] || '',
port : m[5] || '',
pathname : m[6] || '',
search : m[7] || '',
hash : m[8] || ''
} : null);
}
|
identifier_body
|
|
npm-utils.js
|
moduleName includes a condition.
* @return {Boolean}
*/
isConditional: function(moduleName){
return conditionalModuleRegEx.test(moduleName);
},
/**
* @function moduleName.isFullyConvertedModuleName
* Determines whether a moduleName is a fully npm name, not npm-like
* With a parsed module name we can make sure there is a package name,
* package version, and module path.
*/
isFullyConvertedNpm: function(parsedModuleName){
return !!(parsedModuleName.packageName &&
parsedModuleName.version && parsedModuleName.modulePath);
},
/**
* @function moduleName.isScoped
* Determines whether a moduleName is from a scoped package.
* @return {Boolean}
*/
isScoped: function(moduleName){
return moduleName[0] === "@";
},
/**
* @function moduleName.parse
* Breaks a string moduleName into parts.
* packageName@version!plugin#modulePath
* "./lib/bfs"
*
* @return {system-npm/parsed_npm}
*/
parse: function (moduleName, currentPackageName, global) {
var pluginParts = moduleName.split('!');
var modulePathParts = pluginParts[0].split("#");
var versionParts = modulePathParts[0].split("@");
// it could be something like `@empty`
if(!modulePathParts[1] && !versionParts[0]) {
versionParts = ["@"+versionParts[1]];
}
// it could be a scope package
if(versionParts.length === 3 && utils.moduleName.isScoped(moduleName)) {
versionParts.splice(0, 1);
versionParts[0] = "@"+versionParts[0];
}
var packageName,
modulePath;
|
// if the module name is relative
// use the currentPackageName
if (currentPackageName && utils.path.isRelative(moduleName)) {
packageName = currentPackageName;
modulePath = versionParts[0];
// if the module name starts with the ~ (tilde) operator
// use the currentPackageName
} else if (currentPackageName && utils.path.startsWithTildeSlash(moduleName)) {
packageName = currentPackageName;
modulePath = versionParts[0].split("/").slice(1).join("/");
} else {
if(modulePathParts[1]) { // foo@1.2#./path
packageName = versionParts[0];
modulePath = modulePathParts[1];
} else {
// test/abc
var folderParts = versionParts[0].split("/");
// Detect scoped packages
if(folderParts.length && folderParts[0][0] === "@") {
packageName = folderParts.splice(0, 2).join("/");
} else {
packageName = folderParts.shift();
}
modulePath = folderParts.join("/");
}
}
modulePath = utils.path.removeJS(modulePath);
return {
plugin: pluginParts.length === 2 ? "!"+pluginParts[1] : undefined,
version: versionParts[1],
modulePath: modulePath,
packageName: packageName,
moduleName: moduleName,
isGlobal: global
};
},
/**
* @function moduleName.parseFromPackage
*
* Given the package that loads the dependency, the dependency name,
* and the moduleName of what loaded the package, return
* a [system-npm/parsed_npm].
*
* @param {Loader} loader
* @param {NpmPackage} refPkg The package `name` is a dependency of.
* @param {moduleName} name
* @param {moduleName} parentName
* @return {system-npm/parsed_npm}
*
*/
parseFromPackage: function(loader, refPkg, name, parentName) {
// Get the name of the
var packageName = utils.pkg.name(refPkg),
parsedModuleName = utils.moduleName.parse(name, packageName),
isRelative = utils.path.isRelative(parsedModuleName.modulePath);
if(isRelative && !parentName) {
throw new Error("Cannot resolve a relative module identifier " +
"with no parent module:", name);
}
// If the module needs to be loaded relative.
if(isRelative) {
// get the location of the parent
var parentParsed = utils.moduleName.parse(parentName, packageName);
// If the parentModule and the currentModule are from the same parent
if( parentParsed.packageName === parsedModuleName.packageName && parentParsed.modulePath ) {
var makePathRelative = true;
if(name === "../" || name === "./" || name === "..") {
var relativePath = utils.path.relativeTo(
parentParsed.modulePath, name);
var isInRoot = utils.path.isPackageRootDir(relativePath);
if(isInRoot) {
parsedModuleName.modulePath = utils.pkg.main(refPkg);
makePathRelative = false;
} else {
parsedModuleName.modulePath = name +
(utils.path.endsWithSlash(name) ? "" : "/") +
"index";
}
}
if(makePathRelative) {
// Make the path relative to the parentName's path.
parsedModuleName.modulePath = utils.path.makeRelative(
utils.path.joinURIs(parentParsed.modulePath,
parsedModuleName.modulePath)
);
}
}
}
// we have the moduleName without the version
// we check this against various configs
var mapName = utils.moduleName.create(parsedModuleName),
refSteal = utils.pkg.config(refPkg),
mappedName;
// The refPkg might have a browser [https://github.com/substack/node-browserify#browser-field] mapping.
// Perform that mapping here.
if(refPkg.browser && (typeof refPkg.browser !== "string") &&
(mapName in refPkg.browser) &&
(!refSteal || !refSteal.ignoreBrowser)) {
mappedName = refPkg.browser[mapName] === false ?
"@empty" : refPkg.browser[mapName];
}
// globalBrowser looks like: {moduleName: aliasName, pgk: aliasingPkg}
var global = loader && loader.globalBrowser &&
loader.globalBrowser[mapName];
if(global) {
mappedName = global.moduleName === false ? "@empty" :
global.moduleName;
}
if(mappedName) {
return utils.moduleName.parse(mappedName, packageName, !!global);
} else {
return parsedModuleName;
}
},
nameAndVersion: function(parsedModuleName){
return parsedModuleName.packageName + "@" + parsedModuleName.version;
}
},
pkg: {
/**
* Returns a package's name. The system config allows one to set this to
* something else.
* @return {String}
*/
name: function(pkg){
var steal = utils.pkg.config(pkg);
return (steal && steal.name) || pkg.name;
},
main: function(pkg) {
var main;
var steal = utils.pkg.config(pkg);
if(steal && steal.main) {
main = steal.main;
} else if(typeof pkg.browser === "string") {
if(utils.path.endsWithSlash(pkg.browser)) {
main = pkg.browser + "index";
} else {
main = pkg.browser;
}
} else if(typeof pkg.jam === "object" && pkg.jam.main) {
main = pkg.jam.main;
} else if(pkg.main) {
main = pkg.main;
} else {
main = "index";
}
return utils.path.removeJS(
utils.path.removeDotSlash(main)
);
},
rootDir: function(pkg, isRoot) {
var root = isRoot ?
utils.path.removePackage( pkg.fileUrl ) :
utils.path.pkgDir(pkg.fileUrl);
var lib = utils.pkg.directoriesLib(pkg);
if(lib) {
root = utils.path.joinURIs(utils.path.addEndingSlash(root), lib);
}
return root;
},
/**
* @function pkg.isRoot
* Determines whether a module is the loader's root module.
* @return {Boolean}
*/
isRoot: function(loader, pkg) {
var root = utils.pkg.getDefault(loader);
return pkg.name === root.name && pkg.version === root.version;
},
getDefault: function(loader) {
return loader.npmPaths.__default;
},
/**
* Returns packageData given a module's name or module's address.
*
* Given a moduleName, it tries to return the package it belongs to.
* If a moduleName isn't provided, but a moduleA
*
* @param {Loader} loader
* @param {String} [moduleName]
* @param {String} [moduleAddress]
* @return {NpmPackage|undefined}
*/
findByModuleNameOrAddress: function(loader, moduleName, moduleAddress) {
if(loader.npm) {
if(moduleName) {
var parsed = utils.moduleName.parse(moduleName);
if(parsed.version && parsed.packageName) {
var name = parsed.package
|
random_line_split
|
|
npm-utils.js
|
we check this against various configs
var mapName = utils.moduleName.create(parsedModuleName),
refSteal = utils.pkg.config(refPkg),
mappedName;
// The refPkg might have a browser [https://github.com/substack/node-browserify#browser-field] mapping.
// Perform that mapping here.
if(refPkg.browser && (typeof refPkg.browser !== "string") &&
(mapName in refPkg.browser) &&
(!refSteal || !refSteal.ignoreBrowser)) {
mappedName = refPkg.browser[mapName] === false ?
"@empty" : refPkg.browser[mapName];
}
// globalBrowser looks like: {moduleName: aliasName, pgk: aliasingPkg}
var global = loader && loader.globalBrowser &&
loader.globalBrowser[mapName];
if(global) {
mappedName = global.moduleName === false ? "@empty" :
global.moduleName;
}
if(mappedName) {
return utils.moduleName.parse(mappedName, packageName, !!global);
} else {
return parsedModuleName;
}
},
nameAndVersion: function(parsedModuleName){
return parsedModuleName.packageName + "@" + parsedModuleName.version;
}
},
pkg: {
/**
* Returns a package's name. The system config allows one to set this to
* something else.
* @return {String}
*/
name: function(pkg){
var steal = utils.pkg.config(pkg);
return (steal && steal.name) || pkg.name;
},
main: function(pkg) {
var main;
var steal = utils.pkg.config(pkg);
if(steal && steal.main) {
main = steal.main;
} else if(typeof pkg.browser === "string") {
if(utils.path.endsWithSlash(pkg.browser)) {
main = pkg.browser + "index";
} else {
main = pkg.browser;
}
} else if(typeof pkg.jam === "object" && pkg.jam.main) {
main = pkg.jam.main;
} else if(pkg.main) {
main = pkg.main;
} else {
main = "index";
}
return utils.path.removeJS(
utils.path.removeDotSlash(main)
);
},
rootDir: function(pkg, isRoot) {
var root = isRoot ?
utils.path.removePackage( pkg.fileUrl ) :
utils.path.pkgDir(pkg.fileUrl);
var lib = utils.pkg.directoriesLib(pkg);
if(lib) {
root = utils.path.joinURIs(utils.path.addEndingSlash(root), lib);
}
return root;
},
/**
* @function pkg.isRoot
* Determines whether a module is the loader's root module.
* @return {Boolean}
*/
isRoot: function(loader, pkg) {
var root = utils.pkg.getDefault(loader);
return pkg.name === root.name && pkg.version === root.version;
},
getDefault: function(loader) {
return loader.npmPaths.__default;
},
/**
* Returns packageData given a module's name or module's address.
*
* Given a moduleName, it tries to return the package it belongs to.
* If a moduleName isn't provided, but a moduleA
*
* @param {Loader} loader
* @param {String} [moduleName]
* @param {String} [moduleAddress]
* @return {NpmPackage|undefined}
*/
findByModuleNameOrAddress: function(loader, moduleName, moduleAddress) {
if(loader.npm) {
if(moduleName) {
var parsed = utils.moduleName.parse(moduleName);
if(parsed.version && parsed.packageName) {
var name = parsed.packageName+"@"+parsed.version;
if(name in loader.npm) {
return loader.npm[name];
}
}
}
if(moduleAddress) {
// Remove the baseURL so that folderAddress only detects
// node_modules that are within the baseURL. Otherwise
// you cannot load a project that is itself within
// node_modules
var startingAddress = utils.relativeURI(loader.baseURL,
moduleAddress);
var packageFolder = utils.pkg.folderAddress(startingAddress);
return packageFolder ? loader.npmPaths[packageFolder] : utils.pkg.getDefault(loader);
} else {
return utils.pkg.getDefault(loader);
}
}
},
folderAddress: function (address){
var nodeModules = "/node_modules/",
nodeModulesIndex = address.lastIndexOf(nodeModules),
nextSlash = address.indexOf("/", nodeModulesIndex+nodeModules.length);
if(nodeModulesIndex >= 0) {
return nextSlash>=0 ? address.substr(0, nextSlash) : address;
}
},
/**
* Finds a dependency by its saved resolutions. This will only be called
* after we've first successful found a package the "hard way" by doing
* semver matching.
*/
findDep: function(loader, refPkg, name){
if(loader.npm && refPkg && !utils.path.startsWithDotSlash(name)) {
var nameAndVersion = name + "@" + refPkg.resolutions[name];
var pkg = loader.npm[nameAndVersion];
return pkg;
}
},
/**
* Walks up npmPaths looking for a [name]/package.json. Returns
* the package data it finds.
*
* @param {Loader} loader
* @param {NpmPackage} refPackage
* @param {packgeName} name the package name we are looking for.
*
* @return {undefined|NpmPackage}
*/
findDepWalking: function (loader, refPackage, name) {
if(loader.npm && refPackage && !utils.path.startsWithDotSlash(name)) {
// Todo .. first part of name
var curPackage = utils.path.depPackageDir(refPackage.fileUrl, name);
while(curPackage) {
var pkg = loader.npmPaths[curPackage];
if(pkg) {
return pkg;
}
var parentAddress = utils.path.parentNodeModuleAddress(curPackage);
if(!parentAddress) {
return;
}
curPackage = parentAddress+"/"+name;
}
}
},
findByName: function(loader, name) {
if(loader.npm && !utils.path.startsWithDotSlash(name)) {
return loader.npm[name];
}
},
findByNameAndVersion: function(loader, name, version) {
if(loader.npm && !utils.path.startsWithDotSlash(name)) {
var nameAndVersion = name + "@" + version;
return loader.npm[nameAndVersion];
}
},
findByUrl: function(loader, url) {
if(loader.npm) {
url = utils.pkg.folderAddress(url);
return loader.npmPaths[url];
}
},
directoriesLib: function(pkg) {
var steal = utils.pkg.config(pkg);
var lib = steal && steal.directories && steal.directories.lib;
var ignores = [".", "/"], ignore;
if(!lib) return undefined;
while(!!(ignore = ignores.shift())) {
if(lib[0] === ignore) {
lib = lib.substr(1);
}
}
return lib;
},
hasDirectoriesLib: function(pkg) {
var steal = utils.pkg.config(pkg);
return steal && steal.directories && !!steal.directories.lib;
},
findPackageInfo: function(context, pkg){
var pkgInfo = context.pkgInfo;
if(pkgInfo) {
var out;
utils.forEach(pkgInfo, function(p){
if(pkg.name === p.name && pkg.version === p.version) {
out = p;
}
});
return out;
}
},
saveResolution: function(context, refPkg, pkg){
var npmPkg = utils.pkg.findPackageInfo(context, refPkg);
npmPkg.resolutions[pkg.name] = refPkg.resolutions[pkg.name] =
pkg.version;
},
config: function(pkg){
return pkg.steal || pkg.system;
}
},
path: {
makeRelative: function(path){
if( utils.path.isRelative(path) && path.substr(0,1) !== "/" ) {
return path;
} else {
return "./"+path;
}
},
removeJS: function(path) {
return path.replace(/\.js(!|$)/,function(whole, part){return part;});
},
removePackage: function (path){
return path.replace(/\/package\.json.*/,"");
},
addJS: function(path){
// Don't add `.js` for types that need to work without an extension.
if(/\.js(on)?$/.test(path)) {
return path;
} else {
return path+".js";
}
},
isRelative: function(path) {
return path.substr(0,1) === ".";
},
startsWithTildeSlash: function( path ) {
return path.substr(0,2) === "~/";
},
joinURIs: function(base, href) {
function
|
removeDotSegments
|
identifier_name
|
|
utils.py
|
from .products.builders import products
from .responses import response, make_identity, make_error
from .static.builders import codevalues
from .users.builders import users
def fill_cache(cache, values_dict):
"""
Fill a mock cache object with some keys and values.
"""
cache.get.side_effect = lambda k, d=None: values_dict.get(k, d)
def setup_responses(http, response_dict):
"""
Setup a mock http object with some responses to given
URLs. ``response_dict`` should map full URLs (including query string) to
the (response, content) tuple that will be returned (equivalent to the
return value of the httplib2.Http.request method).
"""
url_dict = dict((Url(k), v) for k, v in response_dict.iteritems())
def request(*args, **kwargs):
uri = Url(kwargs["uri"])
try:
return url_dict[uri]
except KeyError:
return response(
make_error(
"Mock got unexpected request URI: %s \n"
" -- Options are %s --" % (uri, response_dict.keys())
),
500)
http.request.side_effect = request
COMMON_RESPONSES = {
"http://fake.base/rest/companies/1?_type=json":
response(companies.one(
resourceIdentity=make_identity(id=1, url="companies/1"))),
"http://fake.base/rest/users?_type=json":
response(users.searchresult({})),
"http://fake.base/rest/users/current?_type=json":
response(users.one()),
"http://fake.base/rest/products?_type=json":
response(products.searchresult({})),
"http://fake.base/rest/environments?_type=json":
response(environments.searchresult({}, {})),
"http://fake.base/staticData/values/TESTCYCLESTATUS?_type=json":
response(codevalues.array(
{"description": "DRAFT", "id": 1},
{"description": "ACTIVE", "id": 2},
{"description": "LOCKED", "id": 3},
{"description": "CLOSED", "id": 4},
{"description": "DISCARDED", "id": 5},
)),
"http://fake.base/staticData/values/TESTRUNSTATUS?_type=json":
response(codevalues.array(
{"description": "DRAFT", "id": 1},
{"description": "ACTIVE", "id": 2},
{"description": "LOCKED", "id": 3},
{"description": "CLOSED", "id": 4},
{"description": "DISCARDED", "id": 5},
)),
"http://fake.base/staticData/values/TESTCASESTATUS?_type=json":
response(codevalues.array(
{"description": "DRAFT", "id": 1},
{"description": "ACTIVE", "id": 2},
{"description": "LOCKED", "id": 3},
{"description": "CLOSED", "id": 4},
{"description": "DISCARDED", "id": 5},
)),
"http://fake.base/staticData/values/TESTRUNRESULTSTATUS?_type=json":
response(codevalues.array(
{"description": "PENDING", "id": 1},
{"description": "PASSED", "id": 2},
{"description": "FAILED", "id": 3},
{"description": "BLOCKED", "id": 4},
{"description": "STARTED", "id": 5},
{"description": "INVALIDATED", "id": 6},
)),
"http://fake.base/staticData/values/APPROVALSTATUS?_type=json":
response(codevalues.array(
{"description": "PENDING", "id": 1},
{"description": "APPROVED", "id": 2},
{"description": "REJECTED", "id": 3},
)),
"http://fake.base/staticData/values/ATTACHMENTTYPE?_type=json":
response(codevalues.array(
{"description": "BRANDING", "id": 1},
{"description": "DESIGN", "id": 2},
{"description": "USERGUIDE", "id": 3},
{"description": "REQUIREMENTS", "id": 4},
{"description": "KNOWNISSUES", "id": 5},
{"description": "SCREENCAPTURE", "id": 6},
{"description": "NDA", "id": 7},
{"description": "UNSPECIFIED", "id": 8},
)),
}
def setup_common_responses(http, response_dict):
"""
A version of ``setup_responses`` intended for end-to-end request-response
testing. Automatically knows how to respond to the StaticCompanyMiddleware
query for the current company, and to static data requests.
"""
new_dict = COMMON_RESPONSES.copy()
new_dict.update(response_dict)
return setup_responses(http, new_dict)
@contextmanager
def
|
():
cache = get_cache("django.core.cache.backends.locmem.LocMemCache")
cache.clear()
patcher = patch("ccui.core.cache.cache", cache)
patcher.start()
yield cache
patcher.stop()
class CachingFunctionalTestMixin(object):
def setUp(self):
self.cache = get_cache("django.core.cache.backends.locmem.LocMemCache")
self.cache.clear()
self.patcher = patch("ccui.core.cache.cache", self.cache)
self.patcher.start()
self.addCleanup(self.patcher.stop)
def creds(email, password=None, cookie=None):
from ccui.users.auth import UserCredentials
from ccui.users.models import User
creds = UserCredentials(email, password=password, cookie=cookie)
creds._user = User(email=email)
creds._user.auth = creds
creds._permission_codes = []
return creds
class AuthTestCase(TestCase):
def creds(self, email, password=None, cookie=None):
return creds(email, password, cookie)
@property
def auth(self):
"""
Since the server responses are mocked, we could just ignore auth when
not testing it specifically, but we include it for all requests to more
closely match real usage.
"""
return self.creds("admin@example.com", cookie="USERTOKEN: authcookie")
class ViewTestCase(AuthTestCase):
factory = RequestFactory()
def setUp(self):
self.rendered = {}
on_template_render = partial(store_rendered_templates, self.rendered)
template_rendered.connect(on_template_render)
self.addCleanup(template_rendered.disconnect, on_template_render)
def setup_responses(self, http, response_dict=None, user=None):
if user is None:
user = self.auth.user
if response_dict is None:
response_dict = {}
else:
response_dict = response_dict.copy()
response_dict.setdefault(
"http://fake.base/rest/users/current?_type=json",
response(
users.one(
email=user.email,
firstName=user.firstName,
lastName=user.lastName,
screenName=user.screenName
)
)
)
setup_common_responses(http, response_dict)
@property
def app(self):
class AuthWSGIHandler(WSGIHandler):
def get_response(self_, request):
request._cached_user = self.auth.user
request._cached_auth = self.auth
return super(AuthWSGIHandler, self_).get_response(request)
return TestApp(AuthWSGIHandler())
class ResourceTestCase(AuthTestCase):
@property
def resource_class(self):
if not hasattr(self, "_resource_class"):
self._resource_class = self.get_resource_class()
return self._resource_class
def get_resource_class(self):
raise NotImplementedError
@property
def resource_list_class(self):
if not hasattr(self, "_resource_list_class"):
self._resource_list_class = self.get_resource_list_class()
return self._resource_list_class
def get_resource_list_class(self):
raise NotImplementedError
def assertSameResource(self, res1, res2):
self.assertEqual(res1._location, res2._location)
def assertSameResourceList(self, list1, list2):
self.assertEqual(
[r._location for r in list1],
[r._location for r in list2],
)
class TestResourceTestCase(ResourceTestCase):
builder = ListBuilder(
"testresource",
"testresources",
"Testresource",
{ "name": "Default name" })
def get_resource_class(self):
from ccui.core.api import RemoteObject, fields
def filter_callable(vals):
return ("callableFilter", [v+"foo" for v in vals])
class TestResource(RemoteObject):
name = fields.Field()
submit_as = fields.Field(api_name="submitAs")
non_field_filters = {
"non_field": "nonField",
"callable": filter_callable,
}
cache = False
def __unicode__(self_):
return u"__unicode__ of %s" % self_.name
return TestResource
def get_resource_list_class(self):
from ccui.core.api import ListObject, fields
class TestResourceList(ListObject):
entryclass = self.resource_class
api_name = "testresources"
default_url = "testresources"
entries = fields.List(fields.Object(self.resource_class))
cache = False
return TestResourceList
class BaseResourceTest(object):
"""
Generic smoke
|
locmem_cache
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.