language
stringclasses 15
values | src_encoding
stringclasses 34
values | length_bytes
int64 6
7.85M
| score
float64 1.5
5.69
| int_score
int64 2
5
| detected_licenses
listlengths 0
160
| license_type
stringclasses 2
values | text
stringlengths 9
7.85M
|
---|---|---|---|---|---|---|---|
Markdown
|
UTF-8
| 778 | 2.59375 | 3 |
[] |
no_license
|
This repository contains a python script designed to interpret a collection of
texts and create an index based on word frequency for subsequent analysis and
use.
To utilize this script locally, make sure line 8 contains the ip address of
the machine where mongodb has been deployed.
To run locally simply run as follows:
$ python InvIndex.py your_text_file.txt [your_text_file2.txt, ...]
To run on a hadoop cluster, use the following line
$ python InvIndex.py -r hadoop hdfs:///<absolute path to data> --output hdfs:///<absolute path to output>
Additionally under the folder site, you will find a simple web application that'll allow you to visualize
and query a local mongodb database for documents.
To run said website use the following line:
$ python2.7 __init__.py
|
Java
|
UTF-8
| 2,389 | 1.695313 | 2 |
[] |
no_license
|
package com.duokan.reader.ui.reading;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.view.ViewGroup.LayoutParams;
import com.duokan.core.app.IFeature;
import com.duokan.core.app.AppContext;
import com.duokan.p024c.C0255g;
import com.duokan.p024c.C0256h;
import com.duokan.reader.ReaderEnv;
import com.duokan.reader.domain.bookshelf.BookContent;
public class cm extends wx {
/* renamed from: a */
private final su f9941a = ((su) getContext().queryFeature(su.class));
/* renamed from: b */
private final View f9942b;
/* renamed from: c */
private final View f9943c;
/* renamed from: d */
private final View f9944d;
/* renamed from: e */
private boolean f9945e = false;
public cm(IFeature mFeature) {
super(mFeature);
ViewGroup viewGroup = (ViewGroup) LayoutInflater.from(getContext()).inflate(this.f9941a.am() ? C0256h.reading__comic_option_horizontal_view : C0256h.reading__comic_option_vertical_view, null);
viewGroup.setLayoutParams(new LayoutParams(-1, -2));
setContentView((View) viewGroup);
this.f9942b = findViewById(C0255g.reading__comic_option_view__horizontal);
this.f9942b.setOnClickListener(new cn(this));
this.f9943c = findViewById(C0255g.reading__comic_option_view__vertical);
this.f9943c.setOnClickListener(new co(this));
this.f9944d = findViewById(C0255g.reading__comic_option_view__landscape);
this.f9944d.setOnClickListener(new cp(this));
m13849a();
}
protected void onDetachFromStub() {
if (this.f9945e) {
this.f9941a.aA();
((su) AppContext.getAppContext(getContext()).queryFeature(su.class)).mo2040a(128, 0);
}
}
/* renamed from: a */
private void m13849a() {
if (!ReaderEnv.get().forHd()) {
this.f9944d.setVisibility(0);
}
if (this.f9941a.mo1992G().m4246r() == BookContent.VERTICAL_COMIC) {
this.f9942b.setVisibility(8);
}
if (!ReaderEnv.get().forHd() && this.f9941a.am()) {
this.f9944d.setSelected(true);
} else if (this.f9942b.getVisibility() == 0 && this.f9941a.mo2004U() == PageAnimationMode.HSCROLL) {
this.f9942b.setSelected(true);
} else {
this.f9943c.setSelected(true);
}
}
}
|
JavaScript
|
UTF-8
| 487 | 3.359375 | 3 |
[
"MIT"
] |
permissive
|
'use strict';
const random = (min, max) => Math.floor(min + Math.random() * (max - min + 1));
const analyse = arr => {
const analysis = new Map();
for (const element of arr) {
let value = analysis.get(element);
if (value) {analysis.set(element, ++value);} else {
analysis.set(element, 1);
}
}
return analysis;
}
const mass = [];
for (let i = 0; i < 100; i++) {
mass.push(random(0, 10));
}
let map = analyse(mass);
for (const e of map) {
console.log(e);
}
|
Python
|
UTF-8
| 2,651 | 3.265625 | 3 |
[
"MIT"
] |
permissive
|
# based on pyotp
import base64
import uhashlib as hashlib
import hmac
class OTP(object):
"""
Base class for OTP handlers.
"""
def __init__(self, s, digits=6, digest=hashlib.sha1):
"""
:param s: secret in base32 format
:type s: str
:param digits: number of integers in the OTP. Some apps expect this to be 6 digits, others support more.
:type digits: int
:param digest: digest function to use in the HMAC (expected to be sha256)
:type digest: callable
"""
self.digits = digits
self.digest = digest
self.secret = s
self.byte_secret = self._get_byte_secret()
def generate_otp(self, input):
"""
:param input: the HMAC counter value to use as the OTP input.
Usually either the counter, or the computed integer based on the Unix timestamp
:type input: int
"""
if input < 0:
raise ValueError('input must be positive integer')
hasher = hmac.new(self.byte_secret, self.int_to_bytestring(input), self.digest)
hmac_hash = bytearray(hasher.digest())
offset = hmac_hash[-1] & 0xf
code = ((hmac_hash[offset] & 0x7f) << 24 |
(hmac_hash[offset + 1] & 0xff) << 16 |
(hmac_hash[offset + 2] & 0xff) << 8 |
(hmac_hash[offset + 3] & 0xff))
str_code = str(code % 10 ** self.digits)
str_code = '0' * (self.digits - len(str_code)) + str_code
return str_code
def _get_byte_secret(self):
missing_padding = len(self.secret) % 8
if missing_padding != 0:
self.secret += '=' * (8 - missing_padding)
return base64.b32decode(self.secret, casefold=True)
@staticmethod
def int_to_bytestring(i, padding=8):
"""
Turns an integer to the OATH specified
bytestring, which is fed to the HMAC
along with the secret
"""
result = bytearray()
while i != 0:
result.append(i & 0xFF)
i >>= 8
result = bytearray(reversed(result))
return b'\0' * (padding - len(result)) + result
def totp(self, for_time, interval=30):
"""
Accepts a Unix timestamp integer.
:param for_time: the time to generate an OTP for
:type for_time: int
:param interval: the time interval in seconds
for OTP. This defaults to 30.
:type interval: int
:returns: OTP value
:rtype: str
"""
for_time = for_time // interval
return self.generate_otp(for_time)
# TODO
# def verify()
|
JavaScript
|
UTF-8
| 906 | 2.65625 | 3 |
[] |
no_license
|
'use strict';
const assert = require('assert');
module.exports = function(world) {
let browser = world.browser;
world.when('I select no political party', function(done){
browser
.click('label[for="No, I do not want to choose a political party"]')
.then(() => { done(); })
.catch(done);
});
world.then('I will see Yes for my political party choice', function(done) {
browser
.text()
.then((text) => {
assert.ok(text.includes('Yes'), 'Yes not saved in summary');
})
.then(() => { done(); })
.catch(done);
});
world.then('I will see No for my political party choice', function(done) {
browser
.text()
.then((text) => {
assert.ok(text.includes('No, I do not want to choose a political party'), 'No, I do not want to choose a political party not saved in summary');
})
.then(() => { done(); })
.catch(done);
});
};
|
Java
|
UTF-8
| 921 | 2.09375 | 2 |
[
"Apache-2.0"
] |
permissive
|
/*
* Copyright (c) 2020 Yasin Sinan Kayacan
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.ykayacan.dataloader;
@FunctionalInterface
public interface CacheKey<K> {
static <K> CacheKey<K> identity() {
return k -> k;
}
/**
* Returns the cache key that is created from the provided input key.
*
* @param key the input key
* @return the cache key
*/
Object getKey(K key);
}
|
C++
|
UTF-8
| 1,729 | 3.890625 | 4 |
[] |
no_license
|
//自顶向下
//状态转换方程:f(x,y) = triangle[x][y] + min{f(x+1,y) + f(x+1,y+1)}
/*class Solution {
public:
int minimumTotal(vector<vector<int>>& triangle) {
vector<vector<int>> table;
for (int i = 0; i < triangle.size(); i++) {//初始化table空间复杂度为O(n+n-1+n-2+...1)n为深度
table.push_back(vector<int>(triangle[i].size(), INT_MIN));
}
return helper(triangle, table, 0, 0);
}
int helper(vector<vector<int>> &triangle, vector<vector<int>> &table, int x_index, int y_index) {
if (x_index == triangle.size() - 1) {
return table[x_index][y_index] = triangle[x_index][y_index];
}
if (table[x_index][y_index] != INT_MIN) {
return table[x_index][y_index];
} else {
return table[x_index][y_index] = triangle[x_index][y_index] +
std::min(helper(triangle, table, x_index + 1, y_index), helper(triangle, table, x_index + 1, y_index + 1));
}
}
};*/
//自底向上,空间复杂度为O(n),n为深度
//从底端考虑每层第j个元素到底层的最短距离为下一层第j个元素与第j+1个元素离底层最短距离更小的那个加上该层第j个元素的值,以此类推,每层会少一个table[尾部]
//画图更好理解
class Solution {
public:
int minimumTotal(vector<vector<int>>& triangle) {
vector<int> table(triangle.back());//最底层元素的数量
for (int i = triangle.size() - 2; i >= 0; i--) {//从倒数第二层开始
for (int j = 0; j < triangle[i].size(); j++) {
table[j] = triangle[i][j] + std::min(table[j], table[j+1]);
}
}
return table[0];
}
};
|
Shell
|
UTF-8
| 943 | 4 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
NAME=`cat package.json | jq '.name' | sed s.\\"."".g`
echo "> found app name: $NAME"
create_deploy_file() {
echo -e "---
- hosts: web
remote_user: george
tasks:
- name: Pull new image
shell: docker pull registry.jorgeadolfo.com/$1:latest
- name: Stop $1
shell: cd /var/docker/$1 && docker-compose stop && docker-compose rm -f
ignore_errors: yes
- name: Start $1
shell: cd /var/docker/$1 && docker-compose up -d
ignore_errors: yes
" > deploy/update.yml
}
if [[ -n "$1" && -n "$2" ]]; then
create_deploy_file $2
elif [[ -n "$NAME" ]]; then
echo "ssh -> to server"
ssh jorgeadolfo.com "mkdir /var/docker/$NAME"
echo "[ssh] -> created directory"
scp docker/docker-compose.yml jorgeadolfo.com:/var/docker/$NAME/
echo "[scp] -> copy compose"
create_deploy_file $NAME
echo "generated deploy.yml"
else
echo "please provide a project name, i.e. init-server.sh my-app"
fi
|
Java
|
UTF-8
| 1,321 | 2.96875 | 3 |
[] |
no_license
|
import java.util.HashMap;
import java.util.HashSet;
public class Creator
{
static String[] createStrArray()
{
return new String[]{"get", "help", "+illness", "+disease", "-cough"};
}
static HashSet<Integer> createHashSet(int addNumber)
{
HashSet<Integer> hashSet = new HashSet<>();
hashSet.add(addNumber);
return hashSet;
}
static HashSet<Integer> getHashSet1()
{
HashSet<Integer> a = new HashSet<>();
a.add(2);
a.add(0);
a.add(-8);
a.add(-5);
a.add(-4);
return a;
}
static HashSet<Integer> getHashSet2()
{
HashSet<Integer> b = new HashSet<>();
b.add(-5);
b.add(2);
b.add(-10);
b.add(-9);
return b;
}
static HashSet<Integer> getHashSet3()
{
HashSet<Integer> c = new HashSet<>();
c.add(-4);
c.add(-6);
c.add(-1);
c.add(1);
c.add(-5);
return c;
}
static HashMap<String, HashSet<Integer>> getIndex1()
{
HashMap<String, HashSet<Integer>> index = new HashMap<>();
HashSet<Integer> a = getHashSet1();
HashSet<Integer> b = getHashSet2();
index.put("subject", a);
index.put("high", b);
return index;
}
}
|
Python
|
UTF-8
| 1,241 | 2.890625 | 3 |
[] |
no_license
|
import sys, time
from PyQt5.QtWidgets import QApplication, QDialog, QTreeWidgetItem
from PyQt5 import uic
from os import listdir, path, stat
from mimetypes import MimeTypes
class Dialogo(QDialog):
def __init__(self):
QDialog.__init__(self)
uic.loadUi("treewidget.ui", self)
self.boton.clicked.connect(self.getDir)
def getDir(self):
#Eliminar todas las filas de la búsqueda anterior
self.directorio.clear()
#Ruta indicada por el usuario
dir = self.ruta.text()
#Si es un directorio
if path.isdir(dir):
#Recorrer sus elementos
for element in listdir(dir):
name = element
pathinfo = dir + "\\" + name
informacion = stat(pathinfo)
#Si es un directorio
if path.isdir(pathinfo):
type = "Carpeta de archivos"
size = ""
else:
mime = MimeTypes()
type = mime.guess_type(pathinfo)[0]
size = str(informacion.st_size) + " bytes"
#Fecha de modificación
date = str(time.ctime(informacion.st_mtime))
#Crear un array para crear la fila con los items
row = [name, date, type, size]
#Insertar la fila
self.directorio.insertTopLevelItems(0, [QTreeWidgetItem(self.directorio, row)])
app = QApplication(sys.argv)
dialogo = Dialogo()
dialogo.show()
app.exec_()
|
Swift
|
UTF-8
| 850 | 3.21875 | 3 |
[] |
no_license
|
//
// Card.swift
// Project_1
//
// Created by Thaddeus Lim on 23/12/18.
// Copyright © 2018 Thaddeus Lim. All rights reserved.
//
import Foundation
struct Card: Hashable {
var hashValue: Int {
return identity
}
static func ==(lhs: Card, rhs: Card) -> Bool{
return lhs.identity == rhs.identity
}
private static var identityGenerator = 0
var isFaceUp = false
var isMatched = false
private var identity: Int // Changed to private because now we compare though hash method, no longer using it explicitly
// Does not contain emoji, cos emoji is a UI concept, Model does not concern itself with this
init() {
self.identity = Card.getIdentity()
}
private static func getIdentity() -> Int {
identityGenerator += 1
return identityGenerator
}
}
|
C#
|
UTF-8
| 6,763 | 3.1875 | 3 |
[] |
no_license
|
using System;
using System.Collections.Generic;
using System.Text;
namespace Repository
{
using System.Linq;
using DataTransferObjects;
/// <summary>
///
/// </summary>
/// <seealso cref="Repository.IRepository" />
public class InMemoryRepository : IRepository
{
private readonly ApplicationDbContext _db;
/// <summary>
/// The customers
/// </summary>
// private readonly List<Customer> Customers;
/// <summary>
/// The balances
/// </summary>
// private readonly Dictionary<Int32, Decimal> Balances;
/// <summary>
/// Initializes a new instance of the <see cref="InMemoryRepository"/> class.
/// </summary>
public InMemoryRepository(ApplicationDbContext db)
{
_db = db;
// this.Customers = new List<Customer>();
// this.Customers = _db.Customers.ToList();
// this.Balances = new Dictionary<Int32, Decimal>();
}
/// <summary>
/// Deletes the customer.
/// </summary>
/// <param name="id">The identifier.</param>
public void DeleteCustomer(Int32 id)
{
Customer customer = this.GetCustomer(id);
this._db.Customers.Remove(customer);
}
/// <summary>
/// Deposits the funds.
/// </summary>
/// <param name="customerId">The customer identifier.</param>
/// <param name="funds">The funds.</param>
/// <exception cref="System.NotImplementedException"></exception>
public void DepositFunds(Int32 customerId,
Decimal funds)
{
if (this._db.Balances.Any(b => b.CustomerId == customerId))
{
Balance balance = _db.Balances.First(i => i.CustomerId == customerId);
balance.Funds += funds;
}
else
{
Balance balance = new Balance();
balance.CustomerId = customerId;
balance.Funds = funds;
_db.Balances.Add(balance);
}
_db.SaveChanges();
SaveTransaction("Credit", funds, customerId);
//if (this.Balances.ContainsKey(customerId))
//{
// this.Balances[customerId] += funds;
//}
//else
//{
// this.Balances.Add(customerId, funds);
//}
}
public IEnumerable<Customer> GetAllCustomer()
{
return this._db.Customers;
//throw new NotImplementedException();
}
/// <summary>
/// Gets the available funds.
/// </summary>
/// <param name="customerId">The customer identifier.</param>
/// <returns></returns>
public Decimal GetAvailableFunds(Int32 customerId)
{
if (this._db.Balances.Any(b => b.CustomerId == customerId))
{
return this._db.Balances.Where(i => i.CustomerId == customerId)
.Select(i => i.Funds)
.SingleOrDefault();
}
else
{
return 0;
}
//return this.Balances.ContainsKey(customerId) ? this.Balances[customerId] : 0;
}
/// <summary>
/// Gets the customer.
/// </summary>
/// <param name="id">The identifier.</param>
/// <returns></returns>
public Customer GetCustomer(Int32 id)
{
return _db.Customers.FirstOrDefault(c => c.Id == id);
}
/// <summary>
/// Saves the customer.
/// </summary>
/// <param name="customer">The customer.</param>
public void SaveCustomer(Customer customer)
{
if (this.DoesCustomerExist(customer.Id))
{
Customer existingCustomer = this.GetCustomer(customer.Id);
existingCustomer.IdCard = customer.IdCard;
existingCustomer.Name = customer.Name;
existingCustomer.Surname = customer.Surname;
}
else
{
this._db.Customers.Add(customer);
}
//new code
_db.SaveChanges();
}
/// <summary>
/// Withdraws the funds.
/// </summary>
/// <param name="customerId">The customer identifier.</param>
/// <param name="funds">The funds.</param>
/// <exception cref="System.NotImplementedException"></exception>
public void WithdrawFunds(Int32 customerId,
Decimal funds)
{
if(this._db.Balances.Any(b=>b.CustomerId == customerId))
{
Balance balance = _db.Balances.First(i => i.CustomerId == customerId);
if (balance.Funds > funds)
{
balance.Funds -= funds;
_db.SaveChanges();
SaveTransaction("Debit", funds, customerId);
}
}
//if (this.Balances.ContainsKey(customerId))
//{
// this.Balances[customerId] -= funds;
//}
}
/// <summary>
/// Does the customer exist.
/// </summary>
/// <param name="id">The identifier.</param>
/// <returns></returns>
private Boolean DoesCustomerExist(Int32 id)
{
return this._db.Customers.Any(c => c.Id == id);
}
public void TransferFunds(Int32 customerIdFrom, Int32 customerIdTo, Decimal funds)
{
Decimal customerIdFromBalance = GetAvailableFunds(customerIdFrom);
if(customerIdFromBalance > funds)
{
WithdrawFunds(customerIdFrom, funds);
DepositFunds(customerIdTo, funds);
}
}
public void SaveTransaction(String status, Decimal amount, Int32 createdBy)
{
Transaction transaction = new Transaction();
transaction.Status = status;
transaction.CreatedBy = createdBy;
transaction.CreatedOn = Convert.ToDateTime(DateTime.Now.ToString());
if (status == "Credit")
{
transaction.Credit = amount;
transaction.Debit = null;
}
else
{
transaction.Debit = amount;
transaction.Credit = null;
}
_db.Transactions.Add(transaction);
_db.SaveChanges();
}
}
}
|
Java
|
ISO-8859-1
| 257 | 2.515625 | 3 |
[] |
no_license
|
package Armas;
public class ARMAS_CanhaoBehavior implements ARMAS_WeaponBehavior {
int dano = 30;
@Override
public void useWeapon() {
System.out.println("Ataque com Canho");
}
@Override
public double getDano() {
return 0.5;
}
}
|
C++
|
UTF-8
| 6,839 | 3.0625 | 3 |
[
"MIT"
] |
permissive
|
#include <cstddef>
#include <cstdio>
#include <cstring>
#include <queue>
#include "io.hpp"
#include "globals.hpp"
#include "fs_raw.hpp"
#include "fs_fat12.hpp"
#define ARGUMENT_NO_VALUE(arg) "Argument "arg" has no value specified.\n"
/** Global variables */
std::queue<FileArg> g_queue; /* All the input files as given */
char g_outputFile[MAX_PATH]; /* The output file path */
char *g_bootloader; /* The bootloader file */
int g_filesystem; /* The filesystem to use */
/** Process the arguments passed to the application. */
int procArguments(int argc, char **argv)
{
int i;
for (i = 0; i < argc; i++)
{
// 'Quickly seperate arguments from file names
if (argv[i][0] == '-')
{
// Set the bootloader file
if (!strcmp(argv[i], "-b"))
{
if (++i >= argc)
{
printf(ARGUMENT_NO_VALUE("-b"));
continue;
}
// Change the bootloader name to the specified filename
g_bootloader = argv[i];
}
// Change the working directory
if (!strcmp(argv[i], "-d"))
{
if (++i >= argc)
{
printf(ARGUMENT_NO_VALUE("-d"));
continue;
}
// Change the working directory
if (!SetCurrentDirectoryA(argv[i]))
{
return 1;
}
}
// Change the output file
else if (!strcmp(argv[i], "-o"))
{
if (++i >= argc)
{
printf(ARGUMENT_NO_VALUE("-o"));
continue;
}
// Copy the filename into the array; but truncate if too long
strncpy_s(g_outputFile, MAX_PATH, argv[i], _TRUNCATE);
}
// Set the filesystem to raw
else if (!strcmp(argv[i], "-raw"))
{
g_filesystem = FS_RAW;
}
// Set the filesystem to FAT12
else if (!strcmp(argv[i], "-fat12"))
{
g_filesystem = FS_FAT12;
}
}
// Otherwise it is an input file
else
{
char *cur = argv[i];
char *sep = strchr(cur, ':');
// There is no seperator
if (sep == nullptr)
{
g_queue.push(FileArg(ATTRIB_NONE, cur));
}
// Seperate the attributes from the filename
else
{
int attributes = ATTRIB_NONE;
// Loop over all the characters
for (int j = 0; j < (sep-cur); j++)
{
if (cur[j] == 'H')
{
attributes |= ATTRIB_HIDDEN;
}
else if (cur[j] == 'R')
{
attributes |= ATTRIB_READONLY;
}
else if (cur[j] == 'S')
{
attributes |= ATTRIB_SYSTEM;
}
else
{
printf("WARNING: Unknown file attribute '%c'\n", cur[j]);
}
}
g_queue.push(FileArg(attributes, sep+1));
}
}
}
return (i == argc) ? 0 : 1;
}
/** Set the globals to their default values. */
int setDefaults(void)
{
g_bootloader = nullptr;
g_filesystem = FS_FAT12;
return strcpy_s(g_outputFile, MAX_PATH, "default.flp");
}
/** Print a message on how to use the application. */
void printUsage(const char *exeName)
{
// Display standard usage
printf("\nUSAGE: %s <filesystem> <options> inputFiles\n", exeName);
// Display an usage example
printf("\nEXAMPLE: -fat12 %s -d .\\bin -o device.flp -b bootloader.bin RS:kernel.bin p_arkanoid.bin\n", exeName);
#define OPTION(arg, text) \
printf(" %-8s %s\n", arg, text)
// Filesystem
printf("\nFILESYSTEM (DEFAULT: -fat12)\n");
OPTION("-raw", "Use no filesystem");
OPTION("-fat12", "Use the FAT12 filesystem");
// Options list
printf("\nOPTIONS\n");
OPTION("-b", "Set the file to use as the bootloader (default: first input file)");
OPTION("-d", "Change the working directory");
OPTION("-o", "Set the output file name (default: default.flp)");
// Attributes
printf("\nATTRIBUTES (DEFAULT: none are set)\n");
OPTION("H", "Hidden");
OPTION("R", "Read-Only");
OPTION("S", "System");
#undef OPTION
}
/** Initialize all the program variables to the correct values. */
int initialize(int argc, char **argv)
{
if (argc < 3)
{
printUsage(argv[0]);
return -1;
}
// Set the defaults
if (setDefaults())
{
printf("Could set default values.\n");
return -2;
}
// Process the arguments but skip the executable name
if (procArguments(argc - 1, argv + 1))
{
printf("An error occured while processing the passed arguments.\n");
return -3;
}
// If the bootloader was not specifically set take the first file
if (g_bootloader == nullptr)
{
if (g_queue.empty())
{
printf("No bootloader file specified; neither is there a file available to use as a bootloader.\n");
return -4;
}
// Set the bootloader and remove it from the queue
FileArg arg = g_queue.front();
g_bootloader = arg.filename;
g_queue.pop();
}
return 0;
}
/** Application entry point. */
int main(int argc, char **argv)
{
if (initialize(argc, argv))
{
return 0;
}
FILE *oFile = NULL; // output
errno_t err; // Windows error
// Open the output file in write/binary
err = fopen_s(&oFile, g_outputFile, "wb");
if (err)
{
return 0;
}
// Copy the data using the specified filesystem format;
// and give a message on both success and failure.
if (g_filesystem == FS_RAW)
{
if (!Raw_CopyData(oFile))
{
printf("Data has been written successfully!\n");
}
else
{
printf("ERROR: Raw write failed.\n");
}
}
else if (g_filesystem == FS_FAT12)
{
if (!FAT12_CopyData(oFile))
{
printf("Data has been written successfully!\n");
}
else
{
printf("ERROR: FAT12 write failed.\n");
}
}
else
{
printf("ERROR: Filesystem not supported.\n");
}
// Close the file streams
fclose(oFile);
return 0;
}
|
C++
|
UTF-8
| 1,693 | 2.765625 | 3 |
[] |
no_license
|
#include <bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef long double ld;
/*
- finds maximum of A*x+B
- double check max coords for int/long long overflow
- set min x query in put function
- add lines with non-descending A coefficient
*/
const int MAXN = 100005;
struct FastHull {
int A[MAXN];
ll B[MAXN];
ll P[MAXN]; // P is distinct at every point
int c;
FastHull(): c(0) {}
ll query(int x) {
int pos = upper_bound(P, P + c, x) - P - 1;
return (ll)A[pos] * x + B[pos];
}
ll divideCeil(ll p, ll q) {
if (p >= 0) return (p + q - 1) / q;
return -((-p) / q);
}
void insertLine(int a, ll b) {
// assert a is >= everything in A
for (; c; --c) {
if (A[c - 1] == a && B[c - 1] >= b) return;
ll pt = P[c - 1];
if (A[c - 1] * pt + B[c - 1] > a * pt + b) break;
}
P[c] = c ? divideCeil(B[c - 1] - b, a - A[c - 1]) : MIN_QUERY; // min x query
if (P[c] > MAX_QUERY) return; // Don't insert this line if it will never be better
A[c] = a;
B[c] = b;
++c;
}
};
struct SlowHull {
vector<pair<int, ll>> v;
void insertLine(int a, ll b) {
v.emplace_back(a, b);
}
ll query(ll x) {
ll best = -1LL << 60;
for (auto p : v)
best = max(best, p.first * x + p.second);
return best;
}
};
int main() {
FastHull hull1;
SlowHull hull2;
vector<int> as;
for (int i = 0; i < 10000; ++i)
as.push_back(rand() % int(1e8));
sort(as.begin(), as.end());
for (int ii = 0; ii < 10000; ++ii) {
int b = rand() % int(1e8);
hull1.insertLine(as[ii], b);
hull2.insertLine(as[ii], b);
int x = rand() % int(2e8 + 1) - int(1e8);
assert(hull1.query(x) == hull2.query(x));
}
}
|
Java
|
UTF-8
| 568 | 1.84375 | 2 |
[] |
no_license
|
package com.ht.extra.dao;
import com.ht.extra.pojo.outpdoct.OutpConsultationDetail;
import com.ht.extra.pojo.outpdoct.OutpConsultationDetailKey;
public interface OutpConsultationDetailMapper {
int deleteByPrimaryKey(OutpConsultationDetailKey key);
int insert(OutpConsultationDetail record);
int insertSelective(OutpConsultationDetail record);
OutpConsultationDetail selectByPrimaryKey(OutpConsultationDetailKey key);
int updateByPrimaryKeySelective(OutpConsultationDetail record);
int updateByPrimaryKey(OutpConsultationDetail record);
}
|
PHP
|
UTF-8
| 422 | 3.3125 | 3 |
[] |
no_license
|
<?php
/**
* Classe Value
*/
class Value
{
public $Date;
public $Temperature;
public $Humidite;
public function __construct($Date, $Temperature, $Humidite)
{
$this->Date = $Date;
$this->Temperature = new Temp($Temperature);
$this->Humidite = new Humid($Humidite);
}
public function In_Time($Start_Datetime, $End_Datetime)
{
return ($this->Date < $Start_Datetime && $this->Date > $End_Datetime);
}
}
|
Markdown
|
UTF-8
| 981 | 2.53125 | 3 |
[] |
no_license
|
# cabal dependencies
this tool extracts dependencies from Cabal files and prints them as Prolog terms for further processing.
direct dependencies are labelled "1", and transitive dependencies "2".
## extraction
### using `stack`
> stack build
> stack exec cabal-dependencies-exe ../my-other-project/project.cabal > dependencies.pl
### using `cabal`
> cabal new-build
> cabal new-run cabal-dependencies-exe ../my-other-project/project.cabal > dependencies.pl
## analysis
prints out a list of all dependencies.
> swipl -q -t go depdep.pl > dependencies.csv
prints out a list of modules defined by the cabal files.
> swipl -q -t go2 depdep.pl > modules.csv
## example
analysed the dependencies in a huge project ([Cardano-SL](https://github.com/input-output-hk/cardano-sl)).
this project contains more than 20 modules.
the CSV data have been loaded into a LibreOffice [calc sheet](module_dependencies.ods) for visualisation.

|
C++
|
UTF-8
| 3,591 | 3.796875 | 4 |
[] |
no_license
|
#include <iostream>
using namespace std;
class Node
{
public:
int key;
Node* left;
Node* right;
Node* parent;
Node()
{
this->left = NULL;
this->right = NULL;
}
};
void preorder_tree_walk( Node* x );
void inorder_tree_walk( Node* x );
void postorder_tree_walk( Node* x );
Node* tree_search(Node* x, Node* k);
Node* tree_minimum( Node* x );
Node* tree_maximum( Node* x);
Node* tree_successor( Node* x );
void tree_insert( Node* &root, Node* z );
void tree_delete( Node* &root, Node* z );
void print_tree( Node* &root );
void demo();
int main()
{
demo();
}
void demo()
{
Node* root = new Node();
Node* z1 = new Node();
Node* z2 = new Node();
Node* z3 = new Node();
Node* z4 = new Node();
Node* z5 = new Node();
root = NULL;
z1->key = 10;
z2->key = 20;
z3->key = 40;
z4->key = 50;
z5->key = 0;
cout << "Tree before insertion: " << endl;
print_tree(root);
tree_insert( root, z1 );
tree_insert( root, z2 );
tree_insert( root, z3 );
tree_insert( root, z4 );
cout << "Tree after insertion: " << endl;
print_tree(root);
Node* temp = tree_search(root, z4);
cout << "z4 " << " occurs at: " << temp << endl;
temp = tree_search(root, z5);
cout << "z5 occurs at: " << temp << endl;
temp = tree_minimum( root );
cout << "Minimum for tree is: " << temp->key << endl;
temp = tree_maximum( root );
cout << "Maximum for tree is: " << temp->key << endl;
temp = tree_successor( root );
cout << "Successor for: " << root->key << endl << "is: " << temp->key << endl;
tree_insert( root, z5 );
cout << "Tree after insertion: " << endl;
print_tree(root);
tree_delete( root, z3 );
cout << "Tree after deletion: " << endl;
print_tree(root);
}
void preorder_tree_walk( Node* x )
{
if( x != NULL )
{
cout << x->key << "\t";
preorder_tree_walk(x->left);
preorder_tree_walk(x->right);
}
}
void inorder_tree_walk( Node* x )
{
if( x != NULL )
{
inorder_tree_walk(x->left);
cout << x->key << "\t";
inorder_tree_walk(x->right);
}
}
void postorder_tree_walk( Node* x )
{
if( x != NULL )
{
postorder_tree_walk(x->left);
postorder_tree_walk(x->right);
cout << x->key << "\t";
}
}
Node* tree_search(Node* x, Node* k)
{
while( x != NULL && x->key != k->key )
{
if( k->key > x->key )
x = x->right;
else if( k->key < x->key )
x = x->left;
}
return x;
}
Node* tree_minimum( Node* x )
{
while( x->left != NULL )
x = x->left;
return x;
}
Node* tree_maximum( Node* x)
{
while( x->right != NULL )
x = x->right;
return x;
}
Node* tree_successor( Node* x )
{
Node* y = new Node();
if( x->right != NULL )
return tree_minimum(x->right);
y = x->parent;
while( y->right == x && y != NULL )
{
x = y;
y = y->parent;
}
return y;
}
void tree_insert( Node* &root, Node* z )
{
Node* x = new Node();
Node* y = new Node();
x = root;
y = NULL;
while( x != NULL )
{
y = x;
if( z->key < x->key )
x = x->left;
else
x = x->right;
}
z->parent = y;
if( y == NULL )
root = z;
else if( z->key > y->key )
y->right = z;
else
y->left = z;
}
void tree_delete( Node* &root, Node* z )
{
Node* x = new Node();
Node* y = new Node();
if( z->left == NULL || z->right == NULL )
y = z;
else
y = tree_successor(z);
if( y->left != NULL )
x = y->left;
else
x = y->right;
if( x != NULL )
x->parent = y->parent;
if( y->parent == NULL )
root = x;
else if( y == y->parent->left )
y->parent->left = x;
else
y->parent->right = x;
if( y != z )
z->key = y->key;
}
void print_tree( Node* &x )
{
cout << endl;
inorder_tree_walk(x);
cout << endl;
}
|
C++
|
UTF-8
| 2,680 | 2.78125 | 3 |
[] |
no_license
|
#ifndef CANVAS_H_
#define CANVAS_H_
#include <wx/wxprec.h>
#ifndef WX_PRECOMP
#include <wx/wx.h>
#endif
#include <set>
#include <vector>
#include <string>
class Gesture;
class Canvas;
//Some event.
class CanvasEvent;
wxDECLARE_EVENT(CANVAS_EVENT, CanvasEvent);
class CanvasEvent: public wxCommandEvent
{
public:
//self explain event id.
enum {NEW_GESTURE=1, UPDATE_GESTURE, COMPLETE_GESTURE};
public:
CanvasEvent(wxEventType commandType = CANVAS_EVENT, int id = 0);
// You *must* copy here the data to be transported
CanvasEvent(const CanvasEvent& event);
// Required for sending with wxPostEvent()
virtual wxEvent* Clone() const;
CanvasEvent& SetCanvas(Canvas *new_canvas);
Canvas *GetCanvas() const;
private:
Canvas *canvas;
};
typedef void (wxEvtHandler::*CanvasEventFunction)(CanvasEvent &);
#define CanvasEventHandler(func) wxEVENT_HANDLER_CAST(CanvasEventFunction, func)
#define EVT_CANVAS(id, func) \
wx__DECLARE_EVT1(CANVAS_EVENT, id, CanvasEventHandler(func))
/**
* See gesture for a brief design overview.
*
* Right now Canvas can only draw gesture and text.
* If more things needs to be added in the future, we need to do some engineering.
* Perhaps, in OOP manner, define some kind of interface called Drawable, then implement it.
*
* The Canvas also implement some event, @see CanvasEvent. The intention is to decouple
* the display of the new gesture and anything else, which could be drawing other stuffs and log
* the udpate of new gesture.
*/
class Canvas : public wxPanel {
private:
struct Text {
std::string data;
float x, y;
Text(const std::string _d, float _x, float _y) : data(_d), x(_x), y(_y) {
}
Text() {}
};
private:
typedef std::set<wxEvtHandler *> Subscriptions;
typedef std::vector<Gesture *> Gestures;
typedef std::vector<Text> Texts;
public :
Canvas(wxFrame *parent);
void OnMouseLeftUp(wxMouseEvent &event);
void OnMouseLeftDown(wxMouseEvent &event);
void OnMouseMove(wxMouseEvent &event);
Gesture *GetCurrentGesture() const { return cur_gesture; }
void ClearCurrentGesture() { cur_gesture = 0; }
/**
* Subscribe the event published by canvas for client.
*/
void Subscribe(wxEvtHandler *client);
void Unsubscribe(wxEvtHandler *client);
void ClearGesture();
void DrawGeture(Gesture *g);
void ClearText();
void DrawText(std::string data, int x, int y);
private:
enum MouseState {DOWN, UP};
private:
void PaintEvent(wxPaintEvent &evt);
void Publish(const wxEvent &event);
DECLARE_EVENT_TABLE()
private:
MouseState mouse_state;
Gesture *cur_gesture;
Subscriptions subscriptions;
Gestures gestures;
Texts texts;
};
#endif //CANVAS_H_
|
Java
|
UTF-8
| 635 | 2.34375 | 2 |
[] |
no_license
|
package entity;
public interface Food {
public void updateDeatail(String name,Double price_per_person,int minimum_order,Catagory catagories[]);
public void updateName(String name);
public void updatePrice_per_person(double pricePerPerson);
public void updateMinimumOrder(int minimum);
public void updateCatagories(Catagory catagories[]);
public boolean isMatch(String keyword);
public int getId();
public String getName();
public double getPrice_per_person();
public int getMinimum_order();
public Catagory[] getCatagories();
public int getCreate_date();
public int getLast_modified_date();
public boolean isNil();
}
|
Java
|
UTF-8
| 3,384 | 2.734375 | 3 |
[] |
no_license
|
package by.epam.naumovich.film_ordering.dao;
import java.util.Set;
import by.epam.naumovich.film_ordering.bean.Review;
import by.epam.naumovich.film_ordering.dao.exception.DAOException;
/**
* Defines methods for implementing in the DAO layer for the Review entity.
*
* @author Dmitry Naumovich
* @version 1.0
*/
public interface IReviewDAO {
/**
* Adds new review to the data source
*
* @param review new review entity
* @throws DAOException
*/
void addReview(Review review) throws DAOException;
/**
* Deletes review from the data source by user and film IDs
*
* @param userID user ID
* @param filmID film ID
* @throws DAOException
*/
void deleteReview(int userID, int filmID) throws DAOException;
/**
* Returns all reviews that are present in the data source
*
* @return a set of reviews
* @throws DAOException
*/
Set<Review> getAllReviews() throws DAOException;
/**
* Returns a necessary part of all reviews from the data source
*
* @param start start index of necessary reviews part
* @param amount amount of reviews to be returned
* @return a part of the set of all reviews
* @throws DAOException
*/
Set<Review> getAllReviewsPart(int start, int amount) throws DAOException;
/**
* Searches for reviews in the data source by user ID
*
* @param id user ID
* @return a set of found reviews
* @throws DAOException
*/
Set<Review> getReviewsByUserId(int id) throws DAOException;
/**
* Searches for the reviews in the data source by user ID and returns the necessary part of them
*
* @param id user ID
* @param start start index of necessary part
* @param amount amount of reviews to be returned
* @return a set of found reviews
* @throws DAOException
*/
Set<Review> getReviewsPartByUserId(int userID, int start, int amount) throws DAOException;
/**
* Searches for reviews in the data source by film ID
*
* @param id film ID
* @return a set of found reviews
* @throws DAOException
*/
Set<Review> getReviewsByFilmId(int id) throws DAOException;
/**
* Searches for the reviews in the data source by film ID
*
* @param id film ID
* @param start start index of necessary part
* @param amount amount of reviews to be returned
* @return a set of found reviews
* @throws DAOException
*/
Set<Review> getReviewsPartByFilmId(int id, int start, int amount) throws DAOException;
/**
* Searches for review in the data source by user and film IDs
*
* @param userID user ID
* @param filmID film ID
* @return found review or null if it was not found
* @throws DAOException
*/
Review getReviewByUserAndFilmId(int userID, int filmID) throws DAOException;
/**
* Counts the number of all reviews in the data source
*
* @return total review amount
* @throws DAOException
*/
int getNumberOfReviews() throws DAOException;
/**
* Counts the number of user reviews in the data source
*
* @param userID ID of the user whose reviews are counted
* @return total user reviews amount
* @throws DAOException
*/
int getNumberOfUserReviews(int userID) throws DAOException;
/**
* Counts the number of film reviews in the data source
*
* @param filmID ID of the film which reviews are counted
* @return total film reviews amount
* @throws DAOException
*/
int getNumberOfFilmReviews(int filmID) throws DAOException;
}
|
JavaScript
|
UTF-8
| 3,668 | 2.59375 | 3 |
[
"MIT"
] |
permissive
|
function compile() {
for (pagecnt of pages) {
let childrens = [], events = {}, layout = [];
pagecnt.forEach((item, i) => {
if(Object.keys(item)[0] == "layout") layout.push(pagecnt[i].layout);
});
for (var cnt of pagecnt) {
tag = Object.keys(cnt)[0];
let clss = tag.match(/^[^\.]+/)?tag.replace(/^[^\.]+/, "").split("."):[];
clss.shift();
clss.unshift(...layout)
if(isTag(tag)) childrens.push(createAElement(tag, clss, [cnt[tag]]));
else if (tag != "layout" && tag != "comment") events[tag] = cnt[tag];
}
let page = createAElement("DIV", ["pages", layout], childrens);
for (e of Object.keys(events)) page[e] = new Function(events[e]);
if(page.beforeonload) page.beforeonload(localthis=page);
document.body.appendChild(page);
}
for (page of document.getElementsByClassName("pages"))if(page.onload)page.onload(localthis=page);
let style = document.getElementsByTagName("STYLE")[0];
for (userdefined of Object.keys(layouts))
for (layout of Object.keys(layouts[userdefined])) {
style.innerHTML += ((userdefined == "tags")?"":("."+userdefined+" "))+(layout.replace("0",""))+" {\n";
for (properties of Object.keys(layouts[userdefined][layout||0])) style.innerHTML += "\t\t"+properties+": "+layouts[userdefined][layout||0][properties]+";\n";
style.innerHTML += "}\n\n";
}
for (userdefined of Object.keys(themes))
for (theme of Object.keys(themes[userdefined])) {
style.innerHTML += ((userdefined == "tags")?"":(".pages.page."+userdefined+" "))+(theme.replace("0",""))+" {\n";
for (properties of Object.keys(themes[userdefined][theme||0])) style.innerHTML += "\t\t"+properties+": "+themes[userdefined][theme||0][properties]+";\n";
style.innerHTML += "}\n\n";
}
}
function createAElement(type, clss, inner) {
type = type.replace(/@\d/,"");
let tclss = type.split(".");
type = tclss.shift();
clss.concat(tclss);
domelem = document.createElement(type);
for (c of clss) domelem.className += c+" ";
domelem.className = domelem.className.substr(0,domelem.className.length-1);
for (i of inner)
if (typeof i == "string") domelem.innerHTML += i;
else if(i instanceof HTMLElement) domelem.appendChild(i);
return domelem;
}
function isTag(text) {
return text.toString().split(".")[0]==text.toString().split(".")[0].toUpperCase();
}
function toUpperCase(s,q=1,g) {
return s.replace(new RegExp(typeof q=="number"?".{"+q+"}":typeof q=="string"?q.replace(/^\/|\/$/g,""):q,g?"g":""),function(s){
return s.toUpperCase();
});
}
function decompile(all=false) {
let childs = document.body.children, out = [];
for(let i = 0; i < childs.length; i++)
if(all || childs[i].classList.contains("page")) {
let page = [];
for(let elem of childs[i].children) {
let obj = {};
obj[elem.tagName+elem.classList.toString().replace(/page/g,"").replace(/(?:^| )([^ ])/, ".$1")] = elem.innerHTML;
page.push(obj);
}
let list = childs[i].classList.value.replace(/pages ?/g,"");
page.push({layout: list})
for (let evt of testEvents(childs[i])) {
let obj = {};
obj[evt[0]] = evt[1].toString().replace(/\n?\r?/g,"").replace(/function anonymous\(\) \{(.*?)}/,"$1");
page.push(obj);
}
out.push(page);
}
return out;
}
const evts = [
"onbeforeload", "onclick", "onchange", "onload", "onmousemove", "onmouseup", "onmousedown", "onmouseenter", "onmouseleave", "onmouseout", "onmousewheel",
]
function testEvents(dom) {
console.log(dom);
let out = [];
for(evt of evts) if(dom[evt]) out.push([evt, dom[evt]])
return out;
}
|
Swift
|
UTF-8
| 610 | 2.875 | 3 |
[
"MIT"
] |
permissive
|
//
// BookListPresenter.swift
// TheCitadel
//
// Created by Mateusz Popiało on 05/12/2018.
// Copyright © 2018 Mateusz Popiało. All rights reserved.
//
import Foundation
struct BookListViewConfig {
var title : String
}
protocol BookListView : class {
func configureView(using config: BookListViewConfig)
func update(using books: [Book])
func setCentralIndicatorVisibility(visible: Bool)
}
protocol BookListPresenter {
var bookService : FetchBooksService { get }
func attach(view: BookListView)
func selectBook(book: Book)
func fetchAllBooks()
}
|
Python
|
UTF-8
| 586 | 4.125 | 4 |
[] |
no_license
|
import random
num=[ ]#creates list
for values in range(0,10):#appeds a random int between 0 and 100 to the list 10 times creating a list with 10 rand ints between 0 and 100
num.append(random.randint(0,100))
print(num)#prints list
print(min(num))#prints min of list
print(max(num))#prints max of list
print(sum(num)/10)#prints the average if the list by getting the sum and dividning it by the number of elemets which is 10
num.sort()#sorts list
print(num)#prints sorted list
print ((num[5] + num[4])/2)#adds the 4 and 5 element of the list then finds the average to find the median
|
C#
|
UTF-8
| 1,574 | 3.046875 | 3 |
[] |
no_license
|
using DataAccess.Dao.Criteria;
using DataAccess.Dao.Interfaces;
using DataAccess.Entities;
using System.Collections.Generic;
using System.Linq;
namespace DataAccess.Dao
{
public class RoomDao : IRoomDao
{
public Room GetRoomByRoomId(int roomId)
{
using (var db = new HotelBookingDb())
{
Room room = db.Room.Single(x => x.Id == roomId);
return room;
}
}
public Room GetRoomByRoomName(string roomName)
{
using (var db = new HotelBookingDb())
{
Room room = db.Room.Single(x => x.Name == roomName);
return room;
}
}
public List<Room> GetRoomByRoomsIds(List<int> roomIds)
{
using (var db = new HotelBookingDb())
{
List<Room> rooms = db.Room.Where(x => roomIds.Contains(x.Id)).ToList();
return rooms;
}
}
public IList<Room> GetRooms(SearchRoomCriteria criteria)
{
int personNumber = criteria.Adults + criteria.Children;
using (var db = new HotelBookingDb())
{
List<Room> rooms = db.Room.Where(x => x.MaxPerson >= personNumber).ToList();
return rooms;
}
}
public IList<Room> GetAllRooms()
{
using (var db = new HotelBookingDb())
{
List<Room> rooms = db.Room.ToList();
return rooms;
}
}
}
}
|
C#
|
UTF-8
| 2,031 | 3.546875 | 4 |
[] |
no_license
|
using System;
using System.Collections.Generic;
public class GreedyTravelingSalesman {
public int worstDistance(string[] thousands, string[] hundreds, string[] tens, string[] ones) {
int n = thousands.Length;
int[,] distance = new int[n, n];
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
distance[i, j] = getDistance(thousands[i][j], hundreds[i][j], tens[i][j], ones[i][j]);
}
}
int result = 0;
for (int i = 0; i < n; ++i) {
List<int> values = new List<int>();
values.Add(9999);
for (int j = 0; j < n; ++j) {
values.Add(Math.Max(1, distance[i, j]));
values.Add(Math.Max(1, distance[i, j] - 1));
}
for (int j = 0; j < n; ++j) {
if (i != j) {
foreach (int value in values) {
int tmp = distance[i, j];
distance[i, j] = value;
result = Math.Max(result, worstDistance(n, distance));
distance[i, j] = tmp;
}
}
}
}
return result;
}
private int getDistance(char thousand, char hundred, char ten, char one) {
return 1000 * (thousand - '0') + 100 * (hundred - '0') + 10 * (ten - '0') + (one - '0');
}
private int worstDistance(int n, int[,] distance) {
int result = 0;
bool[] isVisited = new bool[n];
int current = 0;
isVisited[current] = true;
for (int i = 1; i < n; ++i) {
int next = -1;
for (int j = 0; j < n; ++j) {
if (current != j && !isVisited[j] && (next == -1 || distance[current, j] < distance[current, next])) {
next = j;
}
}
isVisited[next] = true;
result += distance[current, next];
current = next;
}
return result;
}
}
|
Ruby
|
UTF-8
| 1,331 | 4.25 | 4 |
[] |
no_license
|
#06/30/16
#Implement the Floyd's triangle..
#cannot use a ruby method that would solve it in 1 method
#ask for the range: how many rows you want triangle to be.
#
# 1
# 2 3
# 4 5 6
# 7 8 9 10
# 11 12 13 14 15
# going up till number of rows is reached.
#ask user number how wide triangle base is:
puts "how many rows you want the triangle to have."
#capture user input
rows = gets.chomp.to_i
puts "Your Floyd's Triangle to have #{rows} rows."
# #create array
# arr = (1...92).to_a
# #puts arr
number = 0
#number of rows
x = rows
#number of columns & iterator
y = 1
#build tringle
while x >= y
#take number and keep adding 1 and printing to screen
#number will equal the last value it evaluated to
y.times do
number += 1
print "#{number} "
end
y += 1
#create a new line to create rows
puts " "
#puts "you've iterated this many times: #{y}"
end
#############################
##RAJIV ANSWER##
puts "Enter the number of rows you want to print"
rows_requested = gets
row_count = 1
starting_number = 1
def print_number(row_count, starting_number)
row_count.times do |k|
print starting_number.to_s + ' '
starting_number += 1
end
starting_number
end
while row_count <= rows_requested.to_i
starting_number = print_number(row_count, starting_number)
puts "\n"
row_count += 1
end
|
Go
|
UTF-8
| 4,399 | 2.859375 | 3 |
[
"MIT"
] |
permissive
|
package integration
import (
"bytes"
"os"
"path/filepath"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/require"
"gotest.tools/v3/fs"
"gotest.tools/v3/icmd"
)
func setupDatasourcesGitTest(t *testing.T) *fs.Dir {
tmpDir := fs.NewDir(t, "gomplate-inttests",
fs.WithDir("repo",
fs.WithFiles(map[string]string{
"config.json": `{"foo": {"bar": "baz"}}`,
"jsonfile": `{"foo": {"bar": "baz"}}`,
}),
fs.WithDir("subdir",
fs.WithFiles(map[string]string{
"foo.txt": "hello world",
"bar.json": `{"qux": "quux"}`,
}),
),
),
)
t.Cleanup(tmpDir.Remove)
repoPath := tmpDir.Join("repo")
icmd.RunCommand("git", "init", repoPath).
Assert(t, icmd.Expected{Out: "Initialized empty Git repository"})
icmd.RunCmd(icmd.Command("git", "add", "config.json"), icmd.Dir(repoPath)).Assert(t, icmd.Expected{})
icmd.RunCmd(icmd.Command("git", "add", "jsonfile"), icmd.Dir(repoPath)).Assert(t, icmd.Expected{})
icmd.RunCmd(icmd.Command("git", "add", "subdir"), icmd.Dir(repoPath)).Assert(t, icmd.Expected{})
icmd.RunCmd(icmd.Command("git", "commit", "-m", "Initial commit"), icmd.Dir(repoPath)).Assert(t, icmd.Expected{})
return tmpDir
}
func startGitDaemon(t *testing.T) string {
tmpDir := setupDatasourcesGitTest(t)
pidDir := fs.NewDir(t, "gomplate-inttests-pid")
t.Cleanup(pidDir.Remove)
port, addr := freeport(t)
gitDaemon := icmd.Command("git", "daemon",
"--verbose",
"--port="+strconv.Itoa(port),
"--base-path="+tmpDir.Path(),
"--pid-file="+pidDir.Join("git.pid"),
"--export-all",
tmpDir.Join("repo", ".git"),
)
gitDaemon.Stdin = nil
gitDaemon.Stdout = &bytes.Buffer{}
gitDaemon.Dir = tmpDir.Path()
result := icmd.StartCmd(gitDaemon)
t.Cleanup(func() {
err := result.Cmd.Process.Kill()
require.NoError(t, err)
_, err = result.Cmd.Process.Wait()
require.NoError(t, err)
result.Assert(t, icmd.Expected{ExitCode: 0})
})
// give git time to start
time.Sleep(500 * time.Millisecond)
return addr
}
func TestDatasources_GitFileDatasource(t *testing.T) {
tmpDir := setupDatasourcesGitTest(t)
u := filepath.ToSlash(tmpDir.Join("repo"))
o, e, err := cmd(t,
"-d", "config=git+file://"+u+"//config.json",
"-i", `{{ (datasource "config").foo.bar }}`,
).run()
assertSuccess(t, o, e, err, "baz")
// subpath beginning with // is an antipattern, but should work for
// backwards compatibility, params from subpath are used
o, e, err = cmd(t,
"-d", "repo=git+file://"+u,
"-i", `{{ (datasource "repo" "//jsonfile?type=application/json" ).foo.bar }}`,
).run()
assertSuccess(t, o, e, err, "baz")
// subpath beginning with // is an antipattern, but should work for
// backwards compatibility
o, e, err = cmd(t,
"-d", "repo=git+file://"+u,
"-i", `{{ (datasource "repo" "//config.json" ).foo.bar }}`,
).run()
assertSuccess(t, o, e, err, "baz")
// subdir in datasource URL, relative subpath
o, e, err = cmd(t,
"-d", "repo=git+file://"+u+"//subdir/",
"-i", `{{ include "repo" "foo.txt" }}`,
).run()
assertSuccess(t, o, e, err, "hello world")
// ds URL ends with /, relative subpath beginning with .// is preferred
o, e, err = cmd(t,
"-d", "repo=git+file://"+u+"/",
"-i", `{{ include "repo" ".//subdir/foo.txt" }}`,
).run()
assertSuccess(t, o, e, err, "hello world")
}
func TestDatasources_GitDatasource(t *testing.T) {
if isWindows {
t.Skip("not going to run git daemon on Windows")
}
addr := startGitDaemon(t)
o, e, err := cmd(t,
"-c", "config=git://"+addr+"/repo//config.json",
"-i", `{{ .config.foo.bar}}`,
).run()
assertSuccess(t, o, e, err, "baz")
}
func TestDatasources_GitHTTPDatasource(t *testing.T) {
o, e, err := cmd(t,
"-c", "short=git+https://github.com/git-fixtures/basic//json/short.json",
"-i", `{{ .short.glossary.title}}`,
).run()
assertSuccess(t, o, e, err, "example glossary")
// and one with a default branch of 'main'
o, e, err = cmd(t,
"-c", "data=git+https://github.com/hairyhenderson/git-fixtures.git//small_test.json",
"-i", `{{ .data.foo}}`,
).run()
assertSuccess(t, o, e, err, "bar")
}
func TestDatasources_GitSSHDatasource(t *testing.T) {
if os.Getenv("SSH_AUTH_SOCK") == "" {
t.Skip("SSH Agent not running")
}
o, e, err := cmd(t,
"-c", "short=git+ssh://git@github.com/git-fixtures/basic//json/short.json",
"-i", `{{ .short.glossary.title}}`,
).run()
assertSuccess(t, o, e, err, "example glossary")
}
|
Markdown
|
UTF-8
| 14,945 | 2.9375 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
We will be working with a metagenome sample from an Alaskan Oil Reservoir sequenced in [this study](https://mbio.asm.org/content/7/1/e01669-15).
The sample is named SB1 because it was sampled from the Schrader Bluff Formation.
We will start working with this sample by downloading it.
Then, we will assess its quality using FastQC.
## Finding Sequencing Data
There are many ways to discover and download sequencing data.
In this case, we are using a sample that was detailed in a paper.
When this is the case, it is usually easiest to find this sample by looking at the paper itself.
We can see on Page 9 that the authors have documented the accession numbers of their samples (metagenome-assembled genomes and raw sequences) in the paper:


We can use this information to find a link to use to download the raw sequencing data.
We see that the raw reads are in GenBank under the accession `SRP057267`.
Let's navigate to GenBank and see what these files look like there.
Searching all databases for `SRP057267`, we find the following result: https://www.ncbi.nlm.nih.gov/sra/SRP057267
We see that the raw reads are in the Sequence Read Archive.
The last sample is the one we are interested in: `metagenome SB1 from not soured petroleum reservoir, Schrader bluffer formation, Alaska North Slope`.
Clicking on this link, we see the following description:

If we click on the run link `SRR1976948`, this takes us to the SRA Run Browser.
The Run Browser provides some information about the raw reads, including a estimate of taxonomic composition.
It's worth it to explore the Run Browser, but there are easier ways to download data than using the SRA.
The SRA is mirrored by the [European Nucleotide Archive (ENA)](https://www.ebi.ac.uk/ena).
We can search for our accession number on the ENA as well.
If we click on the result, we see the following page:

## Downloading Sequencing Data
We can click directly on the `FASTQ files (FTP)` to download them to our computer.
Alternative, we can copy the links and use these to download the files to a remote computer like Farm.
Before we download these files, let's set up a directory structure that will help us stay organized.
```
cd ~
mkdir 2020_rotation_project
cd 2020_rotation_project
mkdir raw_data
cd raw_data
```
Then, we can download the data into our `raw_data` directory.
```
wget ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR197/008/SRR1976948/SRR1976948_1.fastq.gz
wget ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR197/008/SRR1976948/SRR1976948_2.fastq.gz
```
## FASTQ format
Although it looks complicated (and it is), we can understand the
[fastq](https://en.wikipedia.org/wiki/FASTQ_format) format with a little decoding. Some rules about the format
include...
|Line|Description|
|----|-----------|
|1|Always begins with '@' and then information about the read|
|2|The actual DNA sequence|
|3|Always begins with a '+' and sometimes the same info in line 1|
|4|Has a string of characters which represent the quality scores; must have same number of characters as line 2|
We can view the first complete read in a fastq file by using `head` to look at the first four lines.
Because the our files are gzipped, we first temporarily decompress them with `zcat`.
```
zcat SRR1976948_2.fastq.gz | head -n 4
```
Using a different SRR accession for example purposes, the first four lines of the file look something like this:
```
@SRR2584863.1 HWI-ST957:244:H73TDADXX:1:1101:4712:2181/1
TTCACATCCTGACCATTCAGTTGAGCAAAATAGTTCTTCAGTGCCTGTTTAACCGAGTCACGCAGGGGTTTTTGGGTTACCTGATCCTGAGAGTTAACGGTAGAAACGGTCAGTACGTCAGAATTTACGCGTTGTTCGAACATAGTTCTG
+
CCCFFFFFGHHHHJIJJJJIJJJIIJJJJIIIJJGFIIIJEDDFEGGJIFHHJIJJDECCGGEGIIJFHFFFACD:BBBDDACCCCAA@@CA@C>C3>@5(8&>C:9?8+89<4(:83825C(:A#########################
```
Line 4 shows the quality for each nucleotide in the read.
Quality is interpreted as the probability of an incorrect base call (e.g. 1 in 10) or, equivalently, the base call accuracy (e.g. 90%).
To make it possible to line up each individual nucleotide with its quality score, the numerical score is converted into a code where each individual character represents the numerical quality score for an individual nucleotide. 'For example, in the line above, the quality score line is:
```
CCCFFFFFGHHHHJIJJJJIJJJIIJJJJIIIJJGFIIIJEDDFEGGJIFHHJIJJDECCGGEGIIJFHFFFACD:BBBDDACCCCAA@@CA@C>C3>@5(8&>C:9?8+89<4(:83825C(:A#########################
```
The numerical value assigned to each of these characters depends on the sequencing platform that generated the reads. The sequencing machine used to generate our data uses the standard Sanger quality PHRED score encoding, using Illumina version 1.8 onwards.
Each character is assigned a quality score between 0 and 41 as shown in the chart below.
```
Quality encoding: !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJ
| | | | |
Quality score: 01........11........21........31........41
```
Each quality score represents the probability that the corresponding nucleotide call is incorrect.
This quality score is logarithmically based, so a quality score of 10 reflects a base call accuracy of 90%, but a quality score of 20 reflects a base call accuracy of 99%.
These probability values are the results from the base calling algorithm and depend on how much signal was captured for the base incorporation.
Looking back at our example read:
```
@SRR2584863.1 HWI-ST957:244:H73TDADXX:1:1101:4712:2181/1
TTCACATCCTGACCATTCAGTTGAGCAAAATAGTTCTTCAGTGCCTGTTTAACCGAGTCACGCAGGGGTTTTTGGGTTACCTGATCCTGAGAGTTAACGGTAGAAACGGTCAGTACGTCAGAATTTACGCGTTGTTCGAACATAGTTCTG
+
CCCFFFFFGHHHHJIJJJJIJJJIIJJJJIIIJJGFIIIJEDDFEGGJIFHHJIJJDECCGGEGIIJFHFFFACD:BBBDDACCCCAA@@CA@C>C3>@5(8&>C:9?8+89<4(:83825C(:A#########################
```
we can now see that there is a range of quality scores, but that the end of the sequence is very poor (`#` = a quality score of 2).
How does the first read in `SRR1976948_2.fastq.gz` compare to this example?
## Assessing Quality with FastQC
In real life, you won't be assessing the quality of your reads by visually inspecting your FASTQ files.
Rather, you'll be using a software program to assess read quality and filter out poor quality reads.
We'll first use a program called [FastQC](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/) to visualize the quality of our reads.
FastQC has a number of features which can give you a quick impression of any problems your data may have, so you can take these issues into consideration before moving forward with your analyses.
Rather than looking at quality scores for each individual read, FastQC looks at quality collectively across all reads within a sample.
The image below shows one FastQC-generated plot that indicatesa very high quality sample:

The x-axis displays the base position in the read, and the y-axis shows quality scores. In this
example, the sample contains reads that are 40 bp long. This is much shorter than the reads we
are working with in our workflow. For each position, there is a box-and-whisker plot showing
the distribution of quality scores for all reads at that position. The horizontal red line
indicates the median quality score and the yellow box shows the 1st to
3rd quartile range. This means that 50% of reads have a quality score that falls within the
range of the yellow box at that position. The whiskers show the absolute range, which covers
the lowest (0th quartile) to highest (4th quartile) values.
For each position in this sample, the quality values do not drop much lower than 32. This
is a high quality score. The plot background is also color-coded to identify good (green),
acceptable (yellow), and bad (red) quality scores.
Now let's take a look at a quality plot on the other end of the spectrum.

Here, we see positions within the read in which the boxes span a much wider range.
Also, quality scores drop quite low into the "bad" range, particularly on the tail end of the reads.
The FastQC tool produces several other diagnostic plots to assess sample quality, in addition to the one plotted above.
## Running FastQC
We will now assess the quality of the reads that we downloaded. First, make sure you're still in the `raw_data` directory
```
cd ~/2020_rotation_project/raw_data
```
Then, use conda to install fastqc. Make sure you activate your rotation environment. (We're using the `mamba` command instead of `conda` because it's faster.)
```
conda activate dib_rotation
mamba install -y fastqc
```
FastQC can accept multiple file names as input, and on both zipped and unzipped files, so we can use the \*.fastq* wildcard to run FastQC on all of the FASTQ files in this directory.
```
fastqc *.fastq*
```
The FastQC program has created several new files within our directory.
For each input FASTQ file, FastQC has created a `.zip` file and a
`.html` file. The `.zip` file extension indicates that this is
actually a compressed set of multiple output files. We'll be working
with these output files soon. The `.html` file is a stable webpage
displaying the summary report for each of our samples.
## Transferring data from Farm to your computer
To transfer a file from a remote server to our own machines, we will use `scp`.
To learn more about `scp`, see the bottom of [this tutorial](https://datacarpentry.org/shell-genomics/05-writing-scripts/).
We've currently been running commands on a terminal open on `farm`.
In order to transfer to our own computer, we want a terminal open on our local computer.
There are two ways to do this: open a second terminal tab/window, or close tmux and exit farm. Let's make a new window.
Once you've opened a second terminal window, you need to make sure you know what file system each window is pointing to.
If you're on farm (the original window), the prompt should say `(dib_rotation) USERNAME@<SRUN NODE>`. Your new window should
just have `USERNAME@<YOUR COMPUTER NAME>`. There will be no `(dib_rotation)` unless you've created and activated that conda
environment on your local computer as well.
Now, from the terminal open to your local computer, copy over the fastqc HTML files using `scp`, a "secure copy" program.
```
scp -P 2022 -i /path/to/key/file username@farm.cse.ucdavis.edu:~/2020_rotation_project/raw_data/*.html ./
```
> - -P 2022 tells scp to use the 2022 port on farm, which is our data transfer port
> - replace /path/to/key/file with the path to your ssh keygen file, created during setup
> - both `cp` and `scp` commands use the format: `cp <FILE_TO_TRANSFER> <DESTINATION>`.
This command will copy the files down to the directory that you're sitting in on your home computer. If you're on a windows machine,
you may need to copy it from your linux partition over to your windows partition (perhaps most easily accessible in a downloads folder
or your desktop).
Once the file is on your local computer, double click on it and it will open in your browser.
You can now explore the FastQC output.
## Decoding the FastQC Output
We've now looked at quite a few "Per base sequence quality" FastQC graphs, but there are nine other graphs that we haven't talked about!
Below we have provided a brief overview of interpretations for each of these plots.
For more information, please see the FastQC documentation [here](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/).
+ [**Per tile sequence quality**](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/12%20Per%20Tile%20Sequence%20Quality.html): the machines that perform sequencing are divided into tiles. This plot displays patterns in base quality along these tiles. Consistently low scores are often found around the edges, but hot spots can also occur in the middle if an air bubble was introduced at some point during the run.
+ [**Per sequence quality scores**](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/3%20Per%20Sequence%20Quality%20Scores.html): a density plot of quality for all reads at all positions. This plot shows what quality scores are most common.
+ [**Per base sequence content**](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/4%20Per%20Base%20Sequence%20Content.html): plots the proportion of each base position over all of the reads. Typically, we expect to see each base roughly 25% of the time at each position, but this often fails at the beginning or end of the read due to quality or adapter content.
+ [**Per sequence GC content**](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/5%20Per%20Sequence%20GC%20Content.html): a density plot of average GC content in each of the reads.
+ [**Per base N content**](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/6%20Per%20Base%20N%20Content.html): the percent of times that 'N' occurs at a position in all reads. If there is an increase at a particular position, this might indicate that something went wrong during sequencing.
+ [**Sequence Length Distribution**](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/7%20Sequence%20Length%20Distribution.html): the distribution of sequence lengths of all reads in the file. If the data is raw, there is often on sharp peak, however if the reads have been trimmed, there may be a distribution of shorter lengths.
+ [**Sequence Duplication Levels**](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/8%20Duplicate%20Sequences.html): A distribution of duplicated sequences. In sequencing, we expect most reads to only occur once. If some sequences are occurring more than once, it might indicate enrichment bias (e.g. from PCR). If the samples are high coverage (or RNA-seq or amplicon), this might not be true.
+ [**Overrepresented sequences**](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/9%20Overrepresented%20Sequences.html): A list of sequences that occur more frequently than would be expected by chance.
+ [**Adapter Content**](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/10%20Adapter%20Content.html): a graph indicating where adapater sequences occur in the reads.
+ [**K-mer Content**](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/11%20Kmer%20Content.html): a graph showing any sequences which may show a positional bias within the reads.
## Extra Info
if you ever need to download >10 accessions from the SRA, the `sra-toolkit` is a great tool to do this with!
However, we find `sra-toolkit` cumbersome when only a couple accessions need to be downloaded.
|
Markdown
|
UTF-8
| 7,343 | 3.078125 | 3 |
[
"MIT"
] |
permissive
|
---
layout: post
title: "Application of Infrastructure Development Problem (Long Chu Style) to Indonesia’s Toll Road"
categories: [blog]
tags: [Economics, English]
---
This article is originally appears [here on my medium blog](https://medium.com/@imedkrisna/application-of-infrastructure-development-problem-long-chu-style-to-indonesias-toll-road-7ccdc68ee817). I remake it in this blog because here I can use $$\LaTeX$$!! yay!! I don't change anything so if you've red this in my medium domain then skip this.
I’ve been ranting a lot about stuff in this blog. This time, let me share what I have learned from my study at ANU. Particularly the [master’s microeconomics course, IDEC8064][1]. Long Chu is the teacher’s name. There is this one problem called “Infrastructure Development Problem”, which is extremely relevant to lots of developing countries’ problem of public provision of services. This time I will present Indonesia’s current case: Jakarta Outer Ring Road (JORR) increased tariff.
Most Jakartans must know this, that JORR have changed their tariff to a one-off payment of 15,000 IDR (Indonesian currency) every entry (for car. Other heavier vehicles face scarier tariff). This leads to increased cost for people who travel a short distance, but reduced cost for long distance travelers. This leads to a question “how much we should charge for toll service”, especially after taking Long Chu’s Final Exam a couple of days ago. If you also took long’s final exam, think of this as the answer to Question 3… xD
So I try to do some google, and found this good paper by [Hermawan et al (2013)][2] talks about this very issue. They use Stated Preference (SP) as the means to find elasticity on Demand. Basically, do a survey and asks the respondents “Would you still use the toll road if the price of using it increases by x%?”. Their results of SP is not very straight forward, so i will just pick one of them which says that the elasticity of demand is -0.72. Seems reasonable enough.
Next, we need to have capacity. Unlike water provision in Long’s example, capacity of toll roads can be a big debate. Hermawan et al (2013) uses Highway Capacity Manual (2010) to tackle this situation. According to the manual (table 1 in Hermawan et al), we can calculate a (debatable) capacity of a toll road with something called Volume to Capacity Ratio
$$ VCR = \frac{Q}{C} \label{1}$$
This ratio says if $$\frac{Q}{C}=1$$, then it meets its capacity. at $$\frac{Q}{C}=1$$, we will see 2000 vehicles per hour, with the speed of less than equal to 30 mph (around 48 kph). This measure makes me laugh. JORR is definitely well over its capacity.
By the way, the standard of having a good provision of toll road is at $$ VCR=0.6–0.8 $$, which have the speed at around 55 mph (88 kph) with 1500-ish vehicles per hour. I laughed even harder.
Anyway, i tried to find the exact capacity of JORR only to no avail. So I had to find some method to calculate the capacity. I got this other paper by [Li & Laurence (2015)][3] which discuss about estimating capacity. They managed summarize a paper which says that breakdown capacity occurs when the flow of vehicle is 2289 vehicles per hour (they stated that this number maybe very stochastic so we need to be careful, but i have no other lead so). Judging from their definition of “breakdown”, JORR may be worse than breakdown, but let’s use this number as maximum capacity.
We then can calculate the demand for toll road:
$$ 2289=Ad*P^{-0.72} $$
Let’s calculate just the average tariff for car at around 10,000 IDR. If we plug that to P, we will get Ad= 1,736,384.07 (too many numbers, right? This is why i hate working with IDR). So we can get the full demand function of JORR:
$$ Q(P)=1,736,384.07*P^{-0.72} $$
Next we find the Dead Weight Loss (DWL) of having such tariff. To do this, we need to find the willingness to pay to get the normal, non-breakdown level of capacity. Let’s use $$ VCR=0.77 $$, then we will get $$ Q=1550 $$. Now estimate $$P$$ with $$ Q=1550 $$,
$$ 1550=1,736,384.07*P^{-0.72} $$
$$ P= 17,185.35329 $$ -> let’s say the proper Toll price to get $$ VCR=0.77 $$, one ideal ratio, is 17,200 IDR. This is the willingness to pay value for JORR user to get an ideal flow of Toll Road.
We can calculate the DWL using integration, but since i’m lazy, we can just try to approximate it using triangle area.
$$ DWL=(0.5)*(2289–1550)*(17,200–10,000) $$
$$ DWL= 2,660,400 IDR \frac{Vehicle}{Hour} $$
That’s… a lot of IDR
We need to be (very very) careful with my parameterisation, though. The parameters are very roughly determined. For a proper analysis to be conducted, one needs to calibrate a better current price. It’s also better if we have exact flow of JORR, instead of by guessing it. Moreover, we need to come up with what level of flow of vehicle which leads to “optimum service” for a toll road. My use of 1550 is hardly justifiable.
We can, however, try to guess some stuff.
Firstly, we can guess the size of DWL. I use the price 10,000. Of course this is wrong, but we can say that the DWL increases as the current price decreases. If the current price is less than I declare, then the DWL can actually be bigger. Also, goes down DWL as we put higher “flow of vehicle” leads to “optimum service”. This doesn’t have to follow my choice of 1550 vehicle/hr.
Secondly, We can also say that to close the DWL up, we need to crank up the price. According to this exercise, we need to crank up the price to up to 17,200 to close DWL to 0. Currently, Indonesian body of toll road service increase it to 15,000, which is not enough according to my calculation. However, this calculation is based on SP, which may not show the true value of the elasticity of demand. This increased price can be a very good opportunity to study the true value of elasticity of demand of the JORR.
Lastly, increase capacity would be nice policy prescription. With this big DWL, I guess any kind of capacity increase would be worth it. But man, we’re talkin Jakarta here. Where else should we build the damn toll road? But well, we are running of option here. According to infrastructure development problem, it’s either pricing up the cost of using the road, or increase the capacity. Both seems problematic. LoL.
References:
- IDEC8064 Masters Microeconomics by Long Chu.
- Hermawan, R, Frazila, RB, Awang, A & Jihanny J 2013, ‘Hubungan antara variasi tarif tol dengan pendapatan dan tingkat pelayanan’, Jurnal Teknik Sipil, vol. 20, №1.
- Li, Z & Laurence, R 2015, ‘An analysis of four methodologies for estimating highway capacity from ITS data’, Civil Engineering Faculty Publication, Vol. 70.
[1]: https://programsandcourses.anu.edu.au/course/IDEC8064 "IDEC8064 Masters Microeconomics by Long Chu."
[2]: http://journals.itb.ac.id/index.php/jts/article/view/2841 "Hermawan, R, Frazila, RB, Awang, A & Jihanny J 2013, ‘Hubungan antara variasi tarif tol dengan pendapatan dan tingkat pelayanan’, Jurnal Teknik Sipil, vol. 20, №1."
[3]: https://www.researchgate.net/publication/277905943_An_analysis_of_four_methodologies_for_estimating_highway_capacity_from_ITS_data "Li, Z & Laurence, R 2015, ‘An analysis of four methodologies for estimating highway capacity from ITS data’, Civil Engineering Faculty Publication, Vol. 70."
|
Java
|
UTF-8
| 329 | 1.914063 | 2 |
[] |
no_license
|
package cn.day20.test1;
public class TestClient {
public static void main(String[] args) {
//发送端
DemoClient6 client6 = new DemoClient6(8888);
new Thread(client6).start();
//接收端
// DemoSocket6 socket6 = new DemoSocket6(8889);
// new Thread(socket6).start();;
}
}
|
PHP
|
UTF-8
| 1,980 | 2.671875 | 3 |
[
"MIT"
] |
permissive
|
<?php
/**
* This file is part of the TangoMan package.
*
* Copyright (c) 2020 "Matthias Morin" <mat@tangoman.io>
*
* This source file is subject to the MIT license that is bundled
* with this source code in the file LICENSE.
*/
declare(strict_types=1);
namespace App\Entity\Traits;
use App\Entity\Clip;
use App\Entity\Episode;
use Doctrine\Common\Collections\Collection;
use Doctrine\ORM\Mapping as ORM;
use Symfony\Component\Serializer\Annotation\Groups;
/**
* Trait EpisodeHasClips
*
* This trait defines the INVERSE side of a OneToMany bidirectional relationship.
*
* 1. Requires `Clip` entity to implement `$episode` property with `ManyToOne` and `inversedBy="items"` annotation.
* 2. Requires `Clip` entity to implement linkEpisode(Episode $episode) public method.
* 3. Requires formType to own `'by_reference => false,` attribute to force use of `add` and `remove` methods.
* 4. Entity constructor must initialize ArrayCollection object
* $this->items = new ArrayCollection();
* 5. Add use statement
* use Doctrine\Common\Collections\ArrayCollection;
*
* @author "Matthias Morin" <mat@tangoman.io>
*/
trait EpisodeHasClips
{
/**
* @var Collection<Clip>|null
*
* @ORM\OneToMany(targetEntity="App\Entity\Clip", mappedBy="partOfEpisode", cascade={"persist"})
* @ORM\OrderBy({"id"="ASC"})
* @Groups({"export", "write:episode", "read:episode"})
*/
private $clips;
public function addClip(Clip $clip): void
{
$this->linkClip($clip);
/** @var Episode $this */
$clip->linkEpisode($this);
}
public function removeClip(Clip $clip): void
{
$this->unlinkClip($clip);
$clip->unlinkEpisode();
}
public function getClips(): Collection
{
return $this->clips;
}
public function linkClip(Clip $clip): void
{
$this->clips[] = $clip;
}
public function unlinkClip(Clip $clip): void
{
$this->clips->removeElement($clip);
}
}
|
C#
|
UTF-8
| 9,817 | 2.71875 | 3 |
[
"Apache-2.0"
] |
permissive
|
/**
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Xml;
namespace Google.Maps.Demos.Zoinkies
{
/// <summary>
/// This class provides access to world data through helper functions.
/// </summary>
public class WorldService
{
// Singleton pattern implementation
private static WorldService _instance;
public static WorldService GetInstance()
{
if (_instance == null)
{
_instance = new WorldService();
}
return _instance;
}
private WorldService()
{
IsInitialized = false;
}
/// <summary>
/// Indicates if player data has been set (at least once).
/// </summary>
public bool IsInitialized { get; set; }
/// <summary>
/// Indicates if world data has been modified in the client and needs to sync with the server.
/// </summary>
public bool DataHasChanged { get; set; }
/// <summary>
/// Keeps a reference to the world data.
/// World data can only be changed through the init function.
/// </summary>
private WorldData _data;
/// <summary>
/// Initializes World Data.
/// </summary>
/// <param name="data">A new trusted world data</param>
public void Init(WorldData data)
{
if (data == null)
{
this._data = new WorldData();
}
else
{
this._data = data;
}
IsInitialized = true;
DataHasChanged = false;
}
/// <summary>
/// Returns spawn locations ids.
/// </summary>
/// <returns>A list of location ids</returns>
/// <exception cref="Exception">Exception if world data is not initialized</exception>
public IEnumerable<string> GetSpawnLocationsIds()
{
if (_data == null)
{
throw new System.Exception("World data not initialized!");
}
return _data.locations.Keys;
}
/// <summary>
/// Returns the spawn location identified by the given id.
/// </summary>
/// <param name="locationId">The location id</param>
/// <returns>A Spawn location</returns>
/// <exception cref="Exception">Exception if id is invalid</exception>
public SpawnLocation GetSpawnLocation(string locationId)
{
if (_data == null)
{
throw new System.Exception("World data not initialized!");
}
if (string.IsNullOrEmpty(locationId))
{
throw new System.Exception(
"Invalid id found while trying to get a spawn location!");
}
if (!_data.locations.ContainsKey(locationId))
{
throw new System.Exception("Missing Spawn location with Id " + locationId +
" from world data!");
}
return _data.locations[locationId];
}
/// <summary>
/// Indicates if the location identified by the given id is respawning.
/// This function will update the location status (in the client) based on local time
/// and respawn information.
/// Note that this information will be overriden after the next server sync.
/// </summary>
/// <param name="locationId">The location id</param>
/// <returns>A boolean to indicate if the location is respawning.</returns>
/// <exception cref="Exception">Exception if id is invalid</exception>
public bool IsRespawning(string locationId)
{
if (string.IsNullOrEmpty(locationId))
{
throw new System.Exception("Invalid Id!");
}
// Init the spawn location respawn time and active flag (to mimic what the server will do)
// Prevents constant syncs - the server is the final authority
SpawnLocation location = GetSpawnLocation(locationId);
if (location.respawnTime != null)
{
DateTime t = DateTime.Parse(location.respawnTime);
if (t <= DateTime.Now)
{
location.respawnTime = null;
location.active = true;
}
else
{
location.active = false;
}
}
else
{
location.active = true;
}
return !location.active;
}
/// <summary>
/// Starts respawn at the location identified by the given id, only if the location
/// definition indicates that this it is respawnable.
/// Note that this information will be overriden after the next server sync.
/// </summary>
/// <param name="locationId">The location id</param>
/// <exception cref="Exception">Exception if id is invalid</exception>
public void StartRespawn(string locationId)
{
if (string.IsNullOrEmpty(locationId))
{
throw new System.Exception("Invalid Location Id!");
}
// Init the spawn location respawn time and active flag (to mimic what the server will do)
// Prevents constant syncs - the server is the final authority
SpawnLocation location = GetInstance().GetSpawnLocation(locationId);
ReferenceItem ri = ReferenceService.GetInstance().GetItem(location.objectTypeId);
if (location != null && ri != null && ri.respawnDuration != null)
{
location.active = false;
TimeSpan ts = XmlConvert.ToTimeSpan(ri.respawnDuration);
DateTime dt = DateTime.Now.Add(ts);
location.respawnTime = dt.ToString("O");
DataHasChanged = true;
}
}
/// <summary>
/// Returns all locations currently respawning.
/// </summary>
/// <returns>A list of Spawn Locations</returns>
/// <exception cref="Exception">Exception if world data is not initialized</exception>
public IEnumerable<SpawnLocation> GetAllRespawningLocations()
{
if (_data == null)
{
throw new System.Exception("World data not initialized!");
}
return _data.locations.Values.Where(s => s.respawnTime != null);
}
/// <summary>
/// Returns all towers locations.
/// </summary>
/// <returns>A list of Spawn Locations</returns>
/// <exception cref="Exception">Exception if world data is not initialized</exception>
public IEnumerable<SpawnLocation> GetTowers()
{
if (_data == null)
{
throw new System.Exception("World data not initialized!");
}
return _data.locations.Values.Where(s => s.objectTypeId == GameConstants.TOWER);
}
/// <summary>
/// Returns all minion locations.
/// </summary>
/// <returns>A list of Spawn Locations</returns>
/// <exception cref="Exception">Exception if world data is not initialized</exception>
public IEnumerable<SpawnLocation> GetMinions()
{
if (_data == null)
{
throw new System.Exception("World data not initialized!");
}
return _data.locations.Values.Where(s => s.objectTypeId == GameConstants.MINION);
}
/// <summary>
/// Returns all chests locations.
/// </summary>
/// <returns>A list of Spawn Locations</returns>
/// <exception cref="Exception">Exception if world data is not initialized</exception>
public IEnumerable<SpawnLocation> GetChests()
{
if (_data == null)
{
throw new System.Exception("World data not initialized!");
}
return _data.locations.Values.Where(s => s.objectTypeId == GameConstants.CHEST);
}
/// <summary>
/// Returns all energy stations locations.
/// </summary>
/// <returns>A list of Spawn Locations</returns>
/// <exception cref="Exception">Exception if world data is not initialized</exception>
public IEnumerable<SpawnLocation> GetEnergyStations()
{
if (_data == null)
{
throw new System.Exception("World data not initialized!");
}
return _data.locations.Values.Where(
s => s.objectTypeId == GameConstants.ENERGY_STATION);
}
}
}
|
Java
|
UTF-8
| 9,984 | 1.84375 | 2 |
[] |
no_license
|
package ua.com.vg.scanervg.activities;
import android.content.DialogInterface;
import android.content.Intent;
import android.support.v7.app.AlertDialog;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.support.v7.widget.DividerItemDecoration;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.view.View;
import android.widget.Button;
import android.widget.ImageButton;
import android.widget.ProgressBar;
import android.widget.TextView;
import android.widget.Toast;
import com.google.zxing.integration.android.IntentIntegrator;
import com.google.zxing.integration.android.IntentResult;
import java.util.ArrayList;
import java.util.List;
import ua.com.vg.scanervg.R;
import ua.com.vg.scanervg.adapters.InventContentRVAdapter;
import ua.com.vg.scanervg.async.DocumentLoader;
import ua.com.vg.scanervg.async.EntityLoader;
import ua.com.vg.scanervg.async.WarehouseLoader;
import ua.com.vg.scanervg.documents.Document;
import ua.com.vg.scanervg.model.Agent;
import ua.com.vg.scanervg.model.Entity;
import ua.com.vg.scanervg.utils.DocumentsKind;
import ua.com.vg.scanervg.utils.ScanKind;
public class InventarizationActivity extends AppCompatActivity implements InventContentRVAdapter.ItemClickListener{
private Document document;
private int docID;
ProgressBar inventProgressBar;
TextView lbSubdivInventarization;
private final int EDIT_CONTENT_CODE = 1234;
private int selectedPosition = -1;
private InventContentRVAdapter inventContentRVAdapter;
private RecyclerView inventarizationContents;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_inventarization);
inventProgressBar = (ProgressBar) findViewById(R.id.inventProgressbar);
lbSubdivInventarization = (TextView) findViewById(R.id.lbSubdivInventarization);
inventarizationContents = (RecyclerView) findViewById(R.id.inventarizationContents);
Intent intent = getIntent();
docID = intent.getIntExtra("DOCID",0);
if(docID == 0){
document = new Document(DocumentsKind.Inventorization);
}else {
document = getDocumentByID(docID);
Agent agentTo = document.getAgentTo();
if(agentTo != null){
lbSubdivInventarization.setText(agentTo.getName());
}
if(document!= null){
document.setDocumentsKind(DocumentsKind.Inventorization);
}
}
Button btnInventSelectSubdiv = (Button) findViewById(R.id.btnInventSelectSubdiv);
btnInventSelectSubdiv.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
selectSubdiv();
}
});
ImageButton btnScanInvent = (ImageButton) findViewById(R.id.btnScanInvent);
btnScanInvent.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
scan();
}
});
ImageButton btnSaveInventarization = (ImageButton) findViewById(R.id.btnSaveInventarization);
btnSaveInventarization.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
inventProgressBar.setVisibility(ProgressBar.VISIBLE);
try{
if (document.save() == 1){
finish();
}
}catch (Exception e){
Toast.makeText(InventarizationActivity.this,e.getMessage(),Toast.LENGTH_LONG).show();
}
}
});
inventContentRVAdapter = new InventContentRVAdapter(this,document.getContentList());
inventContentRVAdapter.setClickListener(this);
inventarizationContents.setLayoutManager(new LinearLayoutManager(this));
inventarizationContents.addItemDecoration(new DividerItemDecoration(this,DividerItemDecoration.VERTICAL));
inventarizationContents.setAdapter(inventContentRVAdapter);
}
private void scan(){
IntentIntegrator integrator = new IntentIntegrator(InventarizationActivity.this);
integrator.setDesiredBarcodeFormats(IntentIntegrator.ONE_D_CODE_TYPES);
integrator.setPrompt("Наведите камеру на код");
integrator.setCameraId(0);
integrator.setOrientationLocked(true);
integrator.setBeepEnabled(true);
integrator.setBarcodeImageEnabled(false);
integrator.setCaptureActivity(CaptureActivityPortrait.class);
integrator.initiateScan();
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
if(requestCode == EDIT_CONTENT_CODE){
if(resultCode == RESULT_FIRST_USER){
inventContentRVAdapter.removeItem(selectedPosition);
selectedPosition = -1;
return;
}if(data != null && selectedPosition > -1){
double qty = Double.valueOf(data.getStringExtra("QTY"));
inventContentRVAdapter.getItem(selectedPosition).setQty(qty);
inventContentRVAdapter.notifyDataSetChanged();
return;
}
}
IntentResult result = IntentIntegrator.parseActivityResult(requestCode,resultCode,data);
if(result != null){
if(result.getContents() != null) {
EntityLoader entityLoader = new EntityLoader(inventProgressBar,InventarizationActivity.this, ScanKind.scanMakedEntity);
List<Entity> entities = new ArrayList<>();
try{
entityLoader.execute(result.getContents());
entities = entityLoader.get();
}catch (Exception e){
Toast.makeText(InventarizationActivity.this,e.getMessage(),Toast.LENGTH_LONG).show();
}
if(entities.size() > 0 ){
Entity entity = new Entity(0,"","");
if(entities.size() == 1){
entity = entities.get(0);
if(entity.getEntid()>0){
document.addDistinctRow(entity,1);
inventContentRVAdapter.notifyDataSetChanged();
}
}else {
selectEntityFromDialog(entities);
}
}else {
Toast.makeText(this,R.string.msgEntityNotFound,Toast.LENGTH_SHORT).show();
}
}
}else {
super.onActivityResult(requestCode,resultCode,data);
}
}
public void selectEntityFromDialog(final List<Entity> entities){
List<String> names = new ArrayList<>();
for(Entity entity:entities){
names.add(entity.getEntname());
}
AlertDialog.Builder builder = new AlertDialog.Builder(InventarizationActivity.this);
builder.setTitle(R.string.captionDialogSelectEntity);
builder.setItems(names.toArray(new String[names.size()]), new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
document.addRow(entities.get(which),1);
inventContentRVAdapter.notifyDataSetChanged();
dialog.dismiss();
}
});
AlertDialog dialog = builder.create();
dialog.show();
}
public void selectSubdiv(){
List<Agent> tmpWarehouses = null;
try {
WarehouseLoader warehouseLoader = new WarehouseLoader(inventProgressBar,InventarizationActivity.this);
warehouseLoader.execute();
tmpWarehouses = warehouseLoader.get();
}catch (Exception e){
Toast.makeText(InventarizationActivity.this,e.getMessage(),Toast.LENGTH_LONG).show();
}
if(tmpWarehouses != null){
final List<Agent> wareHoses = tmpWarehouses;
List<String> names = new ArrayList<>();
for(Agent agent:wareHoses){
names.add(agent.getName());
}
AlertDialog.Builder builder = new AlertDialog.Builder(InventarizationActivity.this);
builder.setTitle(R.string.captionDialogSelectSubDiv);
builder.setItems(names.toArray(new String[names.size()]), new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
Agent selectedAgent = wareHoses.get(which);
document.setAgentTo(selectedAgent);
lbSubdivInventarization.setText(selectedAgent.getName());
dialog.dismiss();
}
});
AlertDialog dialog = builder.create();
dialog.show();
}
}
public Document getDocumentByID(int docID){
Document result = new Document();
try {
DocumentLoader documentLoader = new DocumentLoader(inventProgressBar,InventarizationActivity.this);
documentLoader.execute(docID);
result = documentLoader.get();
}catch (Exception e){
Toast.makeText(InventarizationActivity.this,e.getMessage(),Toast.LENGTH_LONG).show();
}
return result;
}
@Override
public void onItemClick(View view, int position) {
selectedPosition = position;
Intent intent = new Intent(InventarizationActivity.this,DocContentEdit.class);
intent.putExtra("QTY",inventContentRVAdapter.getItem(position).getQty());
intent.putExtra("ENTNAME",inventContentRVAdapter.getItem(position).getEntName());
startActivityForResult(intent,EDIT_CONTENT_CODE);
}
}
|
C#
|
UTF-8
| 6,030 | 2.5625 | 3 |
[] |
no_license
|
using System;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using CommunicationNetwork;
using System.Net.Sockets;
using System.Threading;
using Components;
using System.Text;
using System.Collections.Generic;
using System.Linq;
using CommunicationXML;
using System.Threading.Tasks;
namespace UnitTests.CommunicationNetwork
{
[TestClass]
public class ListenerTest
{
delegate bool memcmp(byte[] d1, byte[] d2);
[TestMethod]
public void TestStartStop() {
NetworkListener.ConnectionHandler ch = (d, cc) => { cc.Send(d); };
byte[] data = new Register(NodeType.ComputationalNode, 1, new List<string>() { "test1", "test2" }).GetXmlData();
byte[] bytes = null;
NetworkClient nc = null;
NetworkListener nl = null;
NetworkClient[] _nc = null;
Thread[] _t = null;
Thread t = null;
bool?[] _state = null;
const int port = 22222;
memcmp mcmp = (d1, d2) => { if (d1.Length != d2.Length) return false;
for (int i = 0; i < d1.Length; ++i)
if (d1[i] != d2[i])
return false;
return true;
};
// *** Valid port: Listener is started (one client) ***
nl = new NetworkListener(port, ch);
Assert.IsNotNull(nl);
t = new Thread(nl.Start);
Assert.IsNotNull(t);
t.Start();
nc = new NetworkClient("localhost", port);
bytes = nc.Work(data);
Assert.IsNotNull(bytes);
Assert.IsTrue(mcmp(bytes, data));
// cleaning
nl.Stop();
Assert.IsFalse(nl.IsRunning());
t.Abort();
t.Join();
// *** Invalid port: Listener is trying to start ***
nl = new NetworkListener(-1, ch);
Assert.IsNotNull(nl);
nl.Start();
Assert.IsFalse(nl.IsRunning());
// *** Valid port: Listener is started (few clients) ***
nl = new NetworkListener(port, ch);
Assert.IsNotNull(nl);
t = new Thread(nl.Start);
Assert.IsNotNull(t);
t.Start();
// few clients connection simulation
const int clients = 5;
_nc = new NetworkClient[clients];
_t = new Thread[clients];
_state = new bool?[clients];
Assert.IsNotNull(_nc);
Assert.IsNotNull(_t);
Assert.IsNotNull(_state);
/*
for (int j = 0; j < clients; ++j) {
_nc[j] = new NetworkClient("localhost", port);
_t[j] = new Thread(() => {
byte[] _b = _nc[j].Work(data);
if (_b == null) {
_state[j] = false;
return;
}
//Assert.IsTrue(mcmp(_b, data));
_state[j] = mcmp(_b, data);
});
_t[j].Start();
}
*/
_nc[0] = new NetworkClient("localhost", port);
_t[0] = new Thread(() =>
{
byte[] _b = _nc[0].Work(data);
if (_b == null)
{
_state[0] = false;
return;
}
//Assert.IsTrue(mcmp(_b, data));
_state[0] = mcmp(_b, data);
});
_t[0].Start();
_nc[1] = new NetworkClient("localhost", port);
_t[1] = new Thread(() =>
{
byte[] _b = _nc[1].Work(data);
if (_b == null)
{
_state[1] = false;
return;
}
//Assert.IsTrue(mcmp(_b, data));
_state[1] = mcmp(_b, data);
});
_t[1].Start();
_nc[2] = new NetworkClient("localhost", port);
_t[2] = new Thread(() =>
{
byte[] _b = _nc[2].Work(data);
if (_b == null)
{
_state[2] = false;
return;
}
//Assert.IsTrue(mcmp(_b, data));
_state[2] = mcmp(_b, data);
});
_t[2].Start();
_nc[3] = new NetworkClient("localhost", port);
_t[3] = new Thread(() =>
{
byte[] _b = _nc[3].Work(data);
if (_b == null)
{
_state[3] = false;
return;
}
//Assert.IsTrue(mcmp(_b, data));
_state[3] = mcmp(_b, data);
});
_t[3].Start();
_nc[4] = new NetworkClient("localhost", port);
_t[4] = new Thread(() =>
{
byte[] _b = _nc[4].Work(data);
if (_b == null)
{
_state[4] = false;
return;
}
//Assert.IsTrue(mcmp(_b, data));
_state[4] = mcmp(_b, data);
});
_t[4].Start();
// czekanie na wątki klientów
for (int k = 0; k < clients; ++k) {
_t[k].Join();
Assert.IsTrue((bool)_state[k]);
}
// cleaning
nl.Stop();
t.Abort();
t.Join();
}
}
}
|
C#
|
UTF-8
| 2,015 | 2.671875 | 3 |
[] |
no_license
|
using SFML.Graphics;
using SFML.System;
using SFML.Window;
namespace Tetris
{
public class GameData
{
public RenderWindow Window { get; }
public StateManager State { get; }
public AssetManager Asset { get; }
public InputManager Input { get; }
public GameData()
{
// Create Main Window
Window = new RenderWindow(new VideoMode(Constants.WindowWidth, Constants.WindowHeight), Constants.WindowTitle, Styles.Close | Styles.Titlebar);
Window.SetFramerateLimit(Constants.Framerate);
Window.SetView(new View(new Vector2f(Window.Size.X / 2, Window.Size.Y / 2), new Vector2f(Window.Size.X, Window.Size.Y)));
// Create State Manager
State = new StateManager();
// Create Asset Manager
Asset = new AssetManager();
// Create Input Manager
Input = new InputManager();
}
}
public class TetrisGame
{
private GameData _data;
private Clock _clock;
public TetrisGame()
{
_data = new GameData();
// Initial State
_data.State.AddState(new MenuState(), false);
// Setup Event Handlers
_data.Window.Closed += (sender, e) => _data.Input.OnClosed(sender, e);
_data.Window.KeyPressed += (sender, e) => _data.Input.HandleKeyPressed(sender, e, _data);
}
public void Run()
{
_clock = new Clock();
while (_data.Window.IsOpen)
{
// Process State Changes
_data.State.ProcessStateChanges(_data);
// Handle Events
_data.Window.DispatchEvents();
// Update
_data.State.GetActiveState().Update(_data);
// Draw
_data.State.GetActiveState().Draw(_data);
}
}
}
}
|
C#
|
UTF-8
| 4,020 | 2.765625 | 3 |
[] |
no_license
|
using System.Collections.Generic;
using System.Data.Entity;
using System.Data.Entity.Infrastructure;
using System.Linq;
using System.Linq.Expressions;
using System.Threading;
using System.Threading.Tasks;
using NSubstitute;
namespace Homework.Business.Tests.TestingUtils
{
internal static class NSubstituteUtils
{
internal static DbSet<T> CreateMockDbSet<T>(IEnumerable<T> data = null)
where T : class
{
var mockSet = Substitute.For<DbSet<T>, IQueryable<T>, IDbAsyncEnumerable<T>>();
if (data != null)
{
var queryable = data.AsQueryable();
// setup all IQueryable and IDbAsyncEnumerable methods using what you have from "data"
// the setup below is a bit different from the test above
((IDbAsyncEnumerable<T>)mockSet).GetAsyncEnumerator()
.Returns(new TestDbAsyncEnumerator<T>(queryable.GetEnumerator()));
((IQueryable<T>)mockSet).Provider.Returns(new TestDbAsyncQueryProvider<T>(queryable.Provider));
((IQueryable<T>)mockSet).Expression.Returns(queryable.Expression);
((IQueryable<T>)mockSet).ElementType.Returns(queryable.ElementType);
((IQueryable<T>)mockSet).GetEnumerator().Returns(queryable.GetEnumerator());
}
return mockSet;
}
}
internal class TestDbAsyncQueryProvider<TEntity> : IDbAsyncQueryProvider
{
private readonly IQueryProvider _inner;
internal TestDbAsyncQueryProvider(IQueryProvider inner)
{
_inner = inner;
}
public IQueryable CreateQuery(Expression expression)
{
return new TestDbAsyncEnumerable<TEntity>(expression);
}
public IQueryable<TElement> CreateQuery<TElement>(Expression expression)
{
return new TestDbAsyncEnumerable<TElement>(expression);
}
public object Execute(Expression expression)
{
return _inner.Execute(expression);
}
public TResult Execute<TResult>(Expression expression)
{
return _inner.Execute<TResult>(expression);
}
public Task<object> ExecuteAsync(Expression expression, CancellationToken cancellationToken)
{
return Task.FromResult(Execute(expression));
}
public Task<TResult> ExecuteAsync<TResult>(Expression expression, CancellationToken cancellationToken)
{
return Task.FromResult(Execute<TResult>(expression));
}
}
internal class TestDbAsyncEnumerable<T> : EnumerableQuery<T>, IDbAsyncEnumerable<T>, IQueryable<T>
{
public TestDbAsyncEnumerable(IEnumerable<T> enumerable)
: base(enumerable)
{ }
public TestDbAsyncEnumerable(Expression expression)
: base(expression)
{ }
public IDbAsyncEnumerator<T> GetAsyncEnumerator()
{
return new TestDbAsyncEnumerator<T>(this.AsEnumerable().GetEnumerator());
}
IDbAsyncEnumerator IDbAsyncEnumerable.GetAsyncEnumerator()
{
return GetAsyncEnumerator();
}
IQueryProvider IQueryable.Provider
{
get { return new TestDbAsyncQueryProvider<T>(this); }
}
}
internal class TestDbAsyncEnumerator<T> : IDbAsyncEnumerator<T>
{
private readonly IEnumerator<T> _inner;
public TestDbAsyncEnumerator(IEnumerator<T> inner)
{
_inner = inner;
}
public void Dispose()
{
_inner.Dispose();
}
public Task<bool> MoveNextAsync(CancellationToken cancellationToken)
{
return Task.FromResult(_inner.MoveNext());
}
public T Current
{
get { return _inner.Current; }
}
object IDbAsyncEnumerator.Current
{
get { return Current; }
}
}
}
|
PHP
|
UTF-8
| 2,653 | 2.765625 | 3 |
[] |
no_license
|
<?php
// необходимые HTTP-заголовки
/*header("Access-Control-Allow-Origin: *");
header("Access-Control-Allow-Headers: access");
header("Access-Control-Allow-Methods: GET");
header("Access-Control-Allow-Credentials: true");
header("Content-Type: application/json");*/
// подключение файла для соединения с базой и файл с объектом
include_once './config/database.php';
include_once './objects/users.php';
// получаем соединение с базой данных
$database = new Database();
$db = $database->getConnection();
// подготовка объекта
$user = new Users($db);
// установим свойство ID записи для чтения
$user->id = isset($_GET['id']) ? $_GET['id'] : die();
// прочитаем записи user для редактирования
$stmt = $user->readOne();
$num = $stmt->rowCount();
if ($num>0) {
// массив товаров
$user_arr=array();
$user_arr_2=array();
// получаем извлеченную строку
while ($row = $stmt->fetch(PDO::FETCH_ASSOC)){
// извлекаем строку
extract($row);
$user_item=array(
"user_first_name" => $first_name,
"user_last_name" => $last_name,
"user_email" => $email,
);
array_push($user_arr, $user_item);
}
/* $c=0;
$i=$product_arr[$c]['id'];
foreach ($product_arr as $key => $v) {
if ($i==$v['id']) {
$v['reviews'][$c]['autor'] = $v['autor'];
$v['reviews'][$c]['review'] = $v['review'];
$b=$v['reviews'][$c]['rating'] = $v['rating'];
$a=$v;
unset($a['autor'], $a['review']);
$i++;
$c++;
}
else {
$a['reviews'][$c]['autor'] = $v['autor'];
$a['reviews'][$c]['review'] = $v['review'];
$b+=$a['reviews'][$c]['rating'] = $v['rating'];
$c++;
}
}
array_push($product_arr_2, $a);*/
// код ответа - 200 OK
http_response_code(200);
// выводим данные о товаре в формате JSON
//$product_arr_2[0]['rating']=$b/$c;
//echo json_encode($product_arr_2);
}
else {
// код ответа - 404 Не найдено
http_response_code(404);
// сообщим пользователю, что не существует
echo json_encode(array("message" => " not exist."), JSON_UNESCAPED_UNICODE);
}
|
PHP
|
UTF-8
| 858 | 2.578125 | 3 |
[
"MIT"
] |
permissive
|
<?php
/**
* Author: Adrian Szuszkiewicz <me@imper.info>
* Github: https://github.com/imper86
* Date: 02.10.2019
* Time: 12:38
*/
namespace Imper86\AllegroApiBundle\Model;
use GuzzleHttp\Psr7\Response as BaseResponse;
use Psr\Http\Message\ResponseInterface as BaseResponseInterface;
class Response extends BaseResponse implements ResponseInterface
{
public function __construct(BaseResponseInterface $response)
{
parent::__construct(
$response->getStatusCode(),
$response->getHeaders(),
$response->getBody(),
$response->getProtocolVersion(),
$response->getReasonPhrase()
);
}
public function getRawBody(): ?string
{
return (string)$this->getBody();
}
public function toArray(): ?array
{
return json_decode($this->getRawBody(), true);
}
}
|
C#
|
UTF-8
| 1,989 | 2.625 | 3 |
[] |
no_license
|
using System;
using System.Threading.Tasks;
namespace ColorsWin.Process.NamedPipe
{
/// <summary>
/// 进程消息操作对象
/// </summary>
internal class NamedPipeMessage : IProcessMessage
{
/// <summary>
/// 是否等待回应以后才能继续发送第二条消息
/// </summary>
public static bool Wait { get; set; } = false;
/// <summary>
/// wait=true,收到消息后回复
/// </summary>
public static string ReplyMessageFlat = "&&Reply&&";
/// <summary>
/// 进程名称
/// </summary>
public string ProcessKey { get; set; } = "eventWaitName";
//标识符防止重复
private const string ProcessKeyTag = "_NamedPipe_ColorsWin";
private NamedPipeClient client;
private NamedPipeListenServer server;
public event Action<string> AcceptMessage;
public NamedPipeMessage(string processName, bool read)
{
this.ProcessKey = processName;
Init(read);
}
public void OnAcceptMessage(string message)
{
if (AcceptMessage != null)
{
AcceptMessage(message);
}
}
public string ReadMessage()
{
return null;
}
public string WaitOneForMessage()
{
return null;
}
public bool SendMessage(string message)
{
return client.SendMessage(message);
}
internal void Init(bool read)
{
if (read)
{
server = new NamedPipeListenServer(ProcessKeyTag + ProcessKey, OnAcceptMessage);
Task.Factory.StartNew(() =>
{
server.Run();
});
}
else
{
client = new NamedPipeClient(".", ProcessKeyTag + ProcessKey);
}
}
}
}
|
PHP
|
UTF-8
| 1,408 | 3.171875 | 3 |
[] |
no_license
|
<?php
namespace xes;
class Metamarkdown {
private $parsedown;
private $separator = "\n\n=-=\n\n";
public function __construct() {
$this->parsedown = new \Parsedown();
}
public function text($text) {
return $this->parsedown->text($text);
}
public function metaVal($text) {
if ($text == 'true' | $text == 'false') {
return ($text == 'true');
}
if ($text == 'yes' | $text == 'no') {
return ($text == 'yes');
}
if (is_numeric($text)) {
return (int) $text;
}
return $text;
}
public function metadata($text) {
$ret = array();
foreach (explode("\n", $text) as $line) {
$colonSplit = explode(": ", $line);
$tagName = $colonSplit[0];
$values = $colonSplit[1];
$CSVs = explode(", ", $values);
if (count($CSVs) == 1) {
$ret[$tagName] = $this->metaVal($CSVs[0]);
} else {
$ret[$tagName] = array();
foreach ($CSVs as $CSV) {
$ret[$tagName][] = $this->metaVal($CSV);
}
}
}
return $ret;
}
public function read($text, &$metadataOut = false, &$textOut = false) {
if ($textOut !== false && $metadataOut !== false) {
$split = explode($this->separator, $text);
$metadataOut = $this->metadata($split[0]);
$textOut = $this->text($split[1]);
} else {
$split = explode($this->separator, $text);
$ret = $this->metadata($split[0]);
$ret['content'] = $this->text($split[1]);
return $ret;
}
}
}
?>
|
JavaScript
|
UTF-8
| 3,103 | 3.03125 | 3 |
[] |
no_license
|
let overlay, popUp, popClose;
let eduPort, skillPort, achivePort,
popEducation, popSkills, popAchivements;
let closeFlag;
let portIds = ["education", "skills", "achivements"];
let dBlock = "display: block;", dNone = "display: none;";
let vVis = "visibility: visible;", vHid = "visibility: hidden;";
let height = "height: ", px = "px;";
// Onload staff happens here
window.addEventListener('load', function (){
initElements();
eduPort.addEventListener("click", function(){
openPopUp(1);
}, false);
skillPort.addEventListener("click", function(){
openPopUp(2);
}, false);
achivePort.addEventListener("click", function(){
openPopUp(3);
}, false);
popClose.addEventListener("click", closePopUp, false);
overlay.addEventListener("click", closePopUp, false);
});
// Do resize staff here
window.addEventListener('resize', function (){
//alignBitHeight();
});
function initElements () {
initTiles();
initPopUp();
initPopElements();
}
// Initializes pop-up elements
function initPopUp (){
overlay = elementById("scr_overlay");
popUp = elementById("pop_up");
popClose = elementById("pop_close");
}
// Initializes tile vars
function initTiles (){
eduPort = elementById("education");
skillPort = elementById("skills");
achivePort = elementById("achivements");
}
function initPopElements (){
popEducation = elementById("pop_education");
popSkills = elementById("pop_skills");
popAchivements = elementById("pop_achievements");
}
//Opens pop-up
function openPopUp (option){
toggleClass(overlay, "show", true);
toggleClass(popUp, "show", true);
switch (option) {
case 1:
toggleClass(popEducation, "pop-invisible", false);
toggleClass(popEducation, "pop-visible", true);
closeFlag = 1;
break;
case 2:
toggleClass(popSkills, "pop-invisible", false);
toggleClass(popSkills, "pop-visible", true);
closeFlag = 2;
break;
case 3:
toggleClass(popAchivements, "pop-invisible", false);
toggleClass(popAchivements, "pop-visible", true);
closeFlag = 3;
break;
default:
break;
}
}
function closePopUp (){
switch (closeFlag) {
case 1:
toggleClass(popEducation, "pop-invisible", true);
toggleClass(popEducation, "pop-visible", true);
break;
case 2:
toggleClass(popSkills, "pop-invisible", true);
toggleClass(popSkills, "pop-visible", false);
break;
case 3:
toggleClass(popAchivements, "pop-invisible", true);
toggleClass(popAchivements, "pop-visible", false);
break;
default:
break;
}
toggleClass(overlay, "show", false);
toggleClass(popUp, "show", false);
}
// Align about height
function alignBitHeight (){
if (window.innerWidth > 992) {
elementById("right_bit").style.height = elementById("left_bit").offsetHeight + "px";
}
}
// Toggle / untoggle class
function toggleClass(element, className, toggle){
if (toggle) {
element.classList.toggle(className);
} else {
element.classList.remove(className);
}
}
// Finds element by id
function elementById (id){
return document.getElementById(id);
}
|
Markdown
|
UTF-8
| 2,333 | 2.546875 | 3 |
[] |
no_license
|
# 10979F - Module 2
- [PowerShell ISE](#powershell-ise)
- [PowerShell Module](#powershell-module)
- [Azure 가상 머신 포털](#azure-가상-머신-포털)
----
- PowerShell은 Azure portal, Visual Studio Tools에서 관리 가능하다.
- Window8.1 부터 PowerShell이 기본으로 설치되어 있다.
- PowerShell은 명령프롬프트(cmd)와 다르다.
- PowerShell의 명령어는 '동사-명사'로 이루어져 있다.
- 예 )
- get-command : PowerShell의 모든 명령어를 출력시켜라.
- get-command > powershell_command.txt : get-command 수행 결과를 powershell_command.txt에 저장하라.
- get-process : 작업관리자의 process 정보를 출력하라.
- Cmdlet : PowerShell 명령어를 뜻함
- 구글에 'powershell 명령어이름' 검색하면 MicrosoftDocs에서 세부적인 설명을 보여준다.
- PowerShell 명령어 실행

...


- cf ) 명령 프롬프트의 명령어를 그대로 사용할 수 있지만 명령어 옵션을 띄어써야 한다.

## PowerShell ISE
- Power Shell 명령어를 스크립트화 시킬 수 있다.
- F5 : 전체실행
- Block + F8 : Block한 부분만 실행
## PowerShell Module
- PowerShell 명령어 들의 집합
- 사용하기 위해서 Module를 등록시켜주어야 그 module과 관련된 명령어가 메모리에 저장되어 사용할 수 있게 된다. (import-Module 사용하기원하는Module)
- Cloud Shell은 이미 설치되어 있기 때문에 Module을 따로 설치할 필요가 없다.
- 해당 Module에서 사용할 수 있는 함수 보는 방법

## Azure 가상 머신 포털
https://docs.microsoft.com/ko-kr/azure/virtual-machines/windows/

- 포털
- GUI
- PowerShell
- Windows
- TXT
- CloudShell에서 그대로 사용가능
- CLI
- 모든 운영체제에서 사용가능
- TXT
- CloudShell에서 그대로 사용가능
=> CloudShell을 사용하면 Module을 설치할 필요가 없으므로 CloudShell 사용을 권함
|
Markdown
|
UTF-8
| 2,110 | 3.90625 | 4 |
[] |
no_license
|
# Item 55: Return optionals judiciously
### Optional
sample code
```java
public staic <E extends Comparable<E>> E max(Collection<E> c){
if (c.isEmpty()){
throw new IllegalArgumentException("Empty collection");
}
E result = null;
for(E e: c){
if (result == null || e.compareTo(result) > 0){
result = Objects.requireNonNull(e);
}
}
return result;
}
```
##### Optional version<br>
> 1st
```java
public staic <E extends Comparable<E>> Optional<E> max(Collection<E> c){
if (c.isEmpty()){
return Optional.empty();
}
E result = null;
for(E e: c){
if (result == null || e.compareTo(result) > 0){
result = Objects.requireNonNull(e);
}
}
return Optional.valueOf(result);
}
```
> 2st
```java
public staic <E extends Comparable<E>> Optional<E> max(Collection<E> c){
return c.stream().max(Comparator.naturalOrder());
}
```
> Never return a null value from a ``Optional`` returning method.
<hr>
Example
> Set default value
> ``String lastWordInLexicon = max(word).orElse("No words ....");``<br>
> Set throw exception
> ``String lastWordInLexicon = max(word).orElseThrow(NullPointException::new);``<br>
* orElseGet
* filter
* map
* flatMap
* ifPresent
* isPresent
* get
<hr>
> 1. Container types, including collections, maps, streams, arrays<br>
> <b>but should not be the optional</b>
> 2. Declare a method to return Optional<T>. When the clients will have to perform special processing if no result is returned.
> 3. Never return an optional of a boxed primitive type.<br>
> <b> Boolean / Byte / Character / Short / Float</b>
> 4. Never appropriate to use an optional as a key, value, or element in a collection or array.
<hr>
### To summarize
> If you find yourself writing a method that can't alway return a value and you believe
it is important that users of the method consider this possibility every time they call it,
then you should probably return an optional.<br>
> <b>For performance-critical methods, return null or throw an Exception is better.</b>
|
Python
|
UTF-8
| 945 | 4.15625 | 4 |
[] |
no_license
|
# calculate the total savings at the end of the retirement, assuming you don't invest your money anywhere
"""
1. Let your starting salary as 50k at the age of 30.
2. Age of retirement is 50.
3. Every year there is a 10% salary hike.
4. Every year there is a 8% hike in living cost.
"""
def calculateSavings(age_start: int, age_retire: int, init_salary: int, init_liv: int, sal_hike: float, liv_hike: float):
years = age_retire - age_start
savings = (init_salary - init_liv) * 12 # calculates the savings for the first year
for i in range(1, years):
init_liv += (init_liv * liv_hike) # updated monthly salary
init_salary += (init_salary * sal_hike) # updated monthly living cost
savings += (init_salary - init_liv) * 12 # updating savings
return savings
if __name__ == "__main__":
print(calculateSavings(age_start=30, age_retire=50, init_salary=50_000, init_liv=30_000, sal_hike=0.1, liv_hike=0.08))
|
C#
|
UTF-8
| 509 | 2.59375 | 3 |
[] |
no_license
|
using FluentAssertions;
using HDC.PowerAnalysis.Security;
using NUnit.Framework;
namespace PowerAnalysis.Domains.UnitTests.Security
{
[TestFixture]
public class CompanyTests
{
[Test]
public void Constructor_Correctly_Initializes_Object_State()
{
// Arrange
const string description = "companyDescription";
// Act
Company company = new Company(description);
// Assert
company.ShouldBeEquivalentTo(new
{
Id = (string)null,
Description = description
});
}
}
}
|
Python
|
UTF-8
| 5,497 | 2.765625 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
# spider_page 页面数据分析
from ..cheat.random_proxies import CheatRequests
from bs4 import BeautifulSoup
import re
from util.common.logger import use_logger, base_info
@use_logger(level="err")
def page_err(msg):
pass
#TODO 后期请求附加参数将从cheat模块中获取
headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36"
}
# 房源特色图片tag枚举表
house_tag_detail = {
"1":"床",
"2":"电视",
"3":"冰箱",
"4":"洗衣机",
"5":"空调",
"6":"暖气",
"7":"宽带",
"9":"天然气",
"10":"热水器",
"11":"衣柜",
"12":"桌椅",
"13":"微波炉"
}
def get_house_info(page_text):
'''筛选页面中所有需要的消息'''
soup = BeautifulSoup(page_text, "lxml")
house_info = list()
# 获取房间编号
try:
house_id_soup = soup.findChild("span",{"class":"houseNum"})
except AttributeError:
page_err("BS解析错误%s"%page_text)
return house_info
house_id_compile = "([\dSH]+)"
house_id = re.findall(house_id_compile, str(house_id_soup))
if len(house_id) == 0:
return house_info
else:
house_id = house_id[0]
house_info.append(house_id)
# 获取房间户型、发布时间
lable_compile = re.compile("<i>(.+)</i>")
result_compile = re.compile("</i>(.+)</p>")
house_type_new = None
sale_date_new = None
house_type_new_soups = soup.findChildren("p",{"class":"lf"})
for house_type_new_soup in house_type_new_soups:
lable = re.findall(lable_compile, str(house_type_new_soup))[0]
result = re.findall(result_compile, str(house_type_new_soup))[0]
if lable.strip() == "房屋户型:":
house_type_new = result
if lable.strip() == "时间:":
sale_date_new = result
house_info.append(house_type_new)
house_info.append(sale_date_new)
# 获取基本属性
basic_info_soup = soup.findChild("div",{"class":"introContent"}).findChild("div",{"class":"content"})
basic_info_compile = "([^\x00-\xff]+)"
basic_info = re.findall(basic_info_compile, str(basic_info_soup))
basic_info = ",".join(basic_info)
basic_info = basic_info.replace(":,",":")
house_info.append(basic_info)
# 获取房源标签
house_tag_soups = soup.findChild("div",{"class":"zf-tag"}).findChildren("li")
house_tag_list = list()
house_tag_compile = "<li class=\"(.+)\">"
for house_tag_soup in house_tag_soups:
house_tag = re.findall(house_tag_compile, str(house_tag_soup))[0]
if house_tag.find("tags") != -1:
house_tag = house_tag[3:-5]
house_tag_list.append(house_tag_detail[house_tag])
house_info.append(",".join(house_tag_list))
# 获取房源特色
house_feature_soup = soup.findChild("div",{"class":"featureContent"})
house_feature_compile = "([\u4e00-\u9fa5\d:]+)"
house_feature = re.findall(house_feature_compile, str(house_feature_soup))
house_feature = ",".join(house_feature)
house_feature = house_feature.replace(":,",":")
house_info.append(house_feature)
return house_info
def get_house_infos(house_id_list):
'''输入一个列表的house_id,返回所需要的房源信息'''
url_template = "https://sh.lianjia.com/zufang/{house_id}.html"
url_list = list()
house_infos = list()
# 获取待请求的URL列表
for house_id in house_id_list:
url_list.append(url_template.format(house_id=house_id))
req = CheatRequests([url_list])
contents = req.get_cheat_all_content
for page_texts in contents:
for page_text in page_texts:
house_info = get_house_info(str(page_text[0].decode('utf-8')))
if len(house_info) == 0:
page_err("[IndexError]更新页面详情数据错误\tpage_text:%s"%str(page_text[0].decode('utf-8')).replace("\n",""))
continue
house_infos.append(house_info)
return house_infos
def create_house_info_db(start=0,num=80):
'''将获取到的房源详情的数据写入到数据库'''
from module.database import LJDBController
lj_db = LJDBController()
house_id_lists = lj_db.get_house_ids(start=start, num=num)
for house_id_list in house_id_lists:
house_id_list_req = list()
for house_id in house_id_list:
house_id_list_req.append(house_id[0])
s = get_house_infos(house_id_list_req)
lj_db.update_house_info(s)
lj_db.close
def create_house_info_redis(num=20):
'''将Redis中缓存的需要重新跑批的房源重新爬虫'''
from module.database import LJDBController
from ..redis import LJRedisController
base_info("开始从Redis内获取房源编号发起请求")
rds = LJRedisController()
lj_db = LJDBController()
rds.failed_page_insert()
house_list = rds.failed_page_get(num)
for house_ids in house_list:
house_id_list_req = list()
for house_id in house_ids:
house_id_list_req.append(house_id[1])
s = get_house_infos(house_id_list_req)
lj_db.update_house_info(s)
rds.success_page_del(house_ids)
lj_db.close
rds.close
if __name__ == "__main__":
create_house_info_db(num=20)
# print(get_house_infos(["107100000682","107002262926","107001043986","SH0003283827"]))
|
C
|
UTF-8
| 1,015 | 3.203125 | 3 |
[] |
no_license
|
//3. A client sends to the server a string. The server returns the reversed string to the client (characters from the end to begging)
#include <sys/types.h>
#include <sys/socket.h>
#include <stdio.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <string.h>
#include <unistd.h>
#include <arpa/inet.h>
int main() {
int c;
struct sockaddr_in server;
c = socket(AF_INET, SOCK_STREAM, 0);
if (c < 0) {
printf("Eroare la crearea socketului cslient\n");
return 1;
}
memset(&server, 0, sizeof(server));
server.sin_port = htons(1235);
server.sin_family = AF_INET;
server.sin_addr.s_addr = inet_addr("127.0.0.1");
if (connect(c, (struct sockaddr *) &server, sizeof(server)) < 0) {
printf("Eroare la conectarea la server\n");
return 1;
}
size_t a;
char b[100];
printf("str: ");
fgets(b, sizeof(b), stdin);
a = strlen(b);
a = htonl(a);
send(c, &a, sizeof(a), 0);
a = ntohl(a);
send(c, b, a, 0);
memset(b, 0, a);
recv(c, b, a, MSG_WAITALL);
printf("%s\n", b);
close(c);
}
|
C++
|
UTF-8
| 330 | 2.609375 | 3 |
[] |
no_license
|
#include <automaton.h>
#include <iostream>
#include <string>
void automaton_by_inv_regex(std::istream& in, std::ostream& out) {
std::string regexp_str;
in >> regexp_str;
out << Automaton(regexp_str).inversed().minimized() << std::endl;
}
int main() {
automaton_by_inv_regex(std::cin, std::cout);
return 0;
}
|
Java
|
UTF-8
| 993 | 2.671875 | 3 |
[
"Apache-2.0"
] |
permissive
|
package com.varunest.loader.particles;
import android.content.Context;
import android.graphics.Canvas;
import android.graphics.Paint;
import android.graphics.Path;
import android.view.View;
public class Circle extends ParticleView {
private int w;
private int h;
private Paint paint;
public Circle(Context context) {
super(context);
init();
}
@Override
protected void onSizeChanged(int w, int h, int oldw, int oldh) {
super.onSizeChanged(w, h, oldw, oldh);
this.w = w;
this.h = h;
}
private void init() {
paint = new Paint(Paint.ANTI_ALIAS_FLAG);
paint.setStrokeCap(Paint.Cap.ROUND);
paint.setStyle(Paint.Style.FILL);
}
@Override
protected void onDraw(Canvas canvas) {
super.onDraw(canvas);
canvas.drawCircle(w / 2, h / 2, w / 2, paint);
}
public void setPaintColor(int paintColor) {
paint.setColor(paintColor);
invalidate();
}
}
|
Markdown
|
UTF-8
| 2,850 | 2.578125 | 3 |
[] |
no_license
|
## Wakanda Paypal Module ##
Here are some new changes from a fork from github ! challenge accepted !
This is a module that implements the Paypal REST API for Wakanda, it's still very basic, but it will allow you to handle payment easily.
If you want to use this module, it's highly recommended that's you don't use it directly in your waPage, but build another module on top of it.
for now it only implements paypal payments, I'm working on other parts too like ExpressCheckout.
### Getting Started ###
In order to start using the paypal module:
1. create another JS Module in Wakanda (i.e: payments.js)
```javascript
var module = require('wak-paypal');
var paypalModule = new module.Paypal();
var payments = {};
payments.create = function(params)
{
var response = paypalModule.payments.create(params.intent, params.payer, params.transactions, params.redirect_urls);
//you can format the response first, check for errors etc
return response;
}
for( var element in payments )
{
exports[element] = payments[element];
}
```
2. Setup your credentials in paypal/config.js (you can use sandbox to get test credentials)
```javascript
config.client_id = "YOUR_CLIENT_ID";
config.secret = "YOUR_SECRET_KEY";
```
3. Set payment.js you've created as RPC Service and Include it in your page.
4. in your page, you can add a form that capture credit card details (first name, last name, card number, cvv2, expire date(year & month), type (visa, mastercard, discover, amex))
5. in your page script add a code that handles the payment, it would like this:
```javascript
Submit.click = function Submit_click (event)
{
var credit_card = {};
credit_card.type = $$('type').getValue();
credit_card.number = $$('number').getValue();
credit_card.cvv2 = $$('cvv2').getValue();
credit_card.expire_month = $$('expire_month').getValue();
credit_card.expire_year = $$('expire_year').getValue();
credit_card.first_name = $$('first_name').getValue();
credit_card.last_name = $$('last_name').getValue();
var payer = {};
payer.payment_method = "credit_card";
payer.funding_instruments = [{"credit_card": credit_card}];
var transactions = [
{
"amount": {
"total": "7.47",
"currency": "USD"
},
"description": "This is the payment transaction description."
}
];
//optional
var redirect_urls = {return_url: "www.wakanda.org", cancel_url:"www.wakanda.org"}
new comments !
var response = payment.createAsync({
onSuccess:function(data)
{
alert(data.state);
},
params: [{
intent: "sale",
payer: payer,
transactions: transactions,
redirect_urls: redirect_urls
}]
});
};
```
6. Checkout the details about the fields in [Paypal Developer Center](https://developer.paypal.com/webapps/developer/docs/api/)
|
Markdown
|
UTF-8
| 1,039 | 2.65625 | 3 |
[
"Apache-2.0"
] |
permissive
|
End to End test uses Ansible playbooks to accomplish the following:
* Grab and build the latest gcloud-maven-plugin code from Github
* Grab and install the latest released gcloud sdk
* Deploy a sample application via 'mvn gcloud:deploy'
#### Prerequisites
The test assumes that you have:
* GCP project
* gcloud SDK installed locally, and pointing to your GCP project
* You are logged in via gcloud (gcloud auth login)
> Note, that if the test runs from one of the VMs in your GCP project, these two steps would be configured for you automatically.
* VM named 'deploy-vm' based on Ubuntu image with wide enough scopes (see scripts/create_vm.sh)
* Pair of SSH keys to be able to ssh to 'deploy-vm' (Public key should be registered in Compute Engine metadata with the user name used by ansible)
#### Ansible roles
* ansible-playbook -i hosts --list-tasks site.yml
* 'gce' play will obtain the IP address of your 'deploy-vm' and will populate dynamic host group 'compute'
* 'compute' play will perform the end to end test on 'deploy-vm'
|
Java
|
SHIFT_JIS
| 2,170 | 2.625 | 3 |
[
"Apache-2.0"
] |
permissive
|
package jp.co.se.android.recipe.chapter04;
import android.annotation.TargetApi;
import android.app.Notification;
import android.app.Service;
import android.content.Intent;
import android.os.AsyncTask;
import android.os.Build;
import android.os.IBinder;
import android.widget.Toast;
public class Ch0409Service extends Service {
private static final int NOTIFY_ID = 1;
@Override
public void onCreate() {
super.onCreate();
showNotification();
}
@Override
public IBinder onBind(Intent intent) {
return null;
}
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
AsyncTask<Void, Void, Void> asyncTask = new AsyncTask<Void, Void, Void>() {
@Override
protected Void doInBackground(Void... params) {
try {
Thread.sleep(3000);
} catch (InterruptedException e) {
}
return null;
}
@Override
protected void onPostExecute(Void result) {
Toast.makeText(getBaseContext(), "Task end.",
Toast.LENGTH_SHORT).show();
stopSelf();
}
};
asyncTask.execute();
Toast.makeText(getBaseContext(), "Task start.", Toast.LENGTH_SHORT)
.show();
return Service.START_STICKY;
}
/**
* Notification\āAServiceForegroundɂB
*/
@TargetApi(Build.VERSION_CODES.JELLY_BEAN)
@SuppressWarnings("deprecation")
private void showNotification() {
Notification.Builder builder = new Notification.Builder(
getApplicationContext()).setContentTitle("Running Service.")
.setContentText("Ch0409").setSmallIcon(R.drawable.ic_launcher)
.setOngoing(true);
final Notification notification;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
notification = builder.build();
} else {
notification = builder.getNotification();
}
startForeground(NOTIFY_ID, notification);
}
}
|
Markdown
|
UTF-8
| 869 | 2.75 | 3 |
[] |
no_license
|
# Create Application
In this IoT solution, create the rental car company’s application that will monitor vehicle health.
## POST: CREATE APPLICATION
Create an application inside IoT solution. A unique applicationId for the application is returned.
SAMPLE REQUEST
```
{
"name": [{
"lang": "en_us",
"text": "Car Health Monitor App"
}],
"description": [{
"lang": "en_us",
"text": "An external partner application to monitor faults in car."
}]
}
```
SAMPLE RESPONSE
```
{
"id": "65eb9268-b0d1-4f75-a855-71eac716a351",
"version": "0",
"creation": 1441377362856,
"name": [{
"lang": "en_us",
"text": "Car Health Monitor App"
}],
"description": [{
"lang": "en_us",
"text": "An external partner application to monitor faults in car."
}]
}
```
|
Markdown
|
UTF-8
| 2,206 | 2.59375 | 3 |
[
"MIT"
] |
permissive
|
## Working Group Updates
- Recurring meeting slot (poll)
- Finalized to Thu 10AM-11AM CST
- Tide issue triage
- Initial triage on 2019-02-22 10AM - 11AM CST
- Once initial triage is done, recurring triage will occur during the weekly meetings
- Status updates
- tide
- @Aaron T submitted a new PR refactoring the internals of tide to use the new http-service crate
- for http-service, the goals are:
- to provide an interface that is not tied in to any particular framework or server
- to work with futures 0.3 directly
- to avoid generics as much as possible, to keep the type system aspects relatively easy
- WG structure
- `rustasync` will be the umbrella org for the rust asynchronous ecosystem with two major focuses
- One would be the async foundations which overlaps with the compiler team and works to add async features to the languages
- The other will be async ecosystem, which focuses on the broader async ecosystem
- Quoting @yoshua w :
> being about networking, the scope of the async ecosystem WG would also include bindings to platforms, reactors, thread pools, and traits to bind them together
## Office Hours
- Web WG is growing: Can we list the core areas of the “new” WG?
- What is the most urgent area where help is needed? Tide? Core-Crates?
- What other ways are there to help out except PRs?
- What’s the timeline for the first version of Tide and how many people are currently working on it?
- //Maybe an offtopic question: How would you position Rust/Tide vs. NodeJS and Go. What are the arguments you would tell a company/shop to use Rust/Tide instead of Node or Go?
- Get a stable core framework up and running from which point we can start semver versioning.
- The triage meeting happening tomorrow and the triage which will happen during the weekly meeting would be a great entry point for onboarding contributors.
- Mentorship
- Are there any plans for providing mentorship to new contributors?
- Similar to the previous question, the triage meetings would be a great point to start the onboarding of new contributors.
- How would this be structured?
|
Markdown
|
UTF-8
| 4,023 | 3.140625 | 3 |
[
"MIT"
] |
permissive
|
# Simulation Documentation
 

## Important Note
This component's target data parser can be configured by setting the PARSER_URL environment variable, which defaults to http://parser:6000
This component's deployer deletion endpoint can be configured by setting the DEPLOYER_DELETE_URL environment variable, which defaults to http://deployer:7000/simulations/
### Format necessary for each file format supported ###
**Note:** For all datasets, non-numeric values are not accepted, and
because of that, any parameters that require it should be converted
to numeric values.
- .npz \
Each dataset file should be a map stored by using numpy.save(),
where there should be two key-value pairs, the first one should be
a list of features of the dataset and second should be a list of labels
corresponding, it's required that the user remembers which names were
used as keys but there isnt any requirement on what these names should be \
Example: \
*train_dt.npz*
```json
{
'train_feature' : [feature list]
'train_label' : [label list]
}
```
- .csv \
Each dataset file should be a file of csv table where each collumn corresponds
to one of the input parameters of the neural network, with the exception
of one which should correspond to the label.Taking all of this into account
each row of non-label collumns should form a input feature vector to be fed
into the algorithim.
The name determined of the label column should be taken into account for,
but there aren't any requirements on what that name should be. \
Example: \
*train_dt.csv*
```json
f1 | f2 | label
58 20 1
12 60 0
```
- .arff \
In a similiar manner to .csv file it's also composed of table where each collumns
corresponds to one of the input parameters of the neural network, with the exception
of one which should correspond to the label, corresponding to what should be the
@DATA section of the files, besides that it should also include a header section
describing each collumn attriute by it's name and type.
The name determined of the label column should be taken into account for,
but there aren't any requirements on what that name should be. \
Example:
```json
@RELATION EXAMPLE
@ATTRIBUTE label NUMERIC
@ATTRIBUTE f1 NUMERIC
@ATTRIBUTE f2 NUMERIC
@ATTRIBUTE f3 NUMERIC
@DATA
1,500,100,200
2,100,700,150
0,200,400,250
```
- .json \
Each dataset file should contain a map, where there should contain a number
of key-value pairs corresponding to each one of the inputs that would
constitute a matrix of feature vectors and a key-value pair corresponding
to the label vector, the key name of the label vector shhould be known by
the user but there aren't any requirements on what that name should be. \
Example:
```json
{
"x1" : [134,214,3123],
"x2" : [1586,245,3123],
"label" : [0,4,5],
}
```
- pandas (.zip and .pickle) \
Each dataset file should be gotten by using the pandas.to_pickle() function
when converting a pandas dataframe, by using the compression='zip' argument
value or outputing to a .zip file instead of a pickle one it's possible to
obtain a compressed version of the pickled dataframe file.
The user should take into account the names of the collumn in the dataframe
object that corresponds to the label vector, here aren't any requirements
on what that name should be. \
Example: \
*Pandas Dataframe Instance - train_df*
```json
f1 | f2 | label
58 20 1
12 60 0
```
Example of conversion for both both compreessed and uncompressed: \
pandas.to_pickle(train_df, "./dataset_train.pickle")
pandas.to_pickle(train_df, "./dataset_train.zip")
|
JavaScript
|
UTF-8
| 4,261 | 2.5625 | 3 |
[] |
no_license
|
function configurations(config, models) {
var config = config;
var models = models;
}
bcrypt = config.bcrypt;
function targetOBJ(passedEmail,passedFirstname,passedLastname,target) {
return { email: passedEmail, firstName: passedFirstname, lastName: passedLastname, target: target };
}
function checkTargetOBJ(targetOBJ){
console.log(targetOBJ)
if(typeof targetOBJ==='undefined'){
return{ email: null, firstName: null, lastName: null, target: null }
}else{
(targetOBJ.email==='undefined') ? '':targetOBJ.email;
(targetOBJ.firstName==='undefined') ? '':targetOBJ.firstName;
(targetOBJ.lastName==='undefined') ? '':targetOBJ.lastName;
(targetOBJ.target==='undefined') ? '':targetOBJ.target;
return targetOBJ;
}
}
async function logOn(username, password, targetOBJ) {
targetOBJ = checkTargetOBJ(targetOBJ);
let errorToClient = {template:'login/login',parameters:{ message: 'username, password, or both, failed. Please try again', email: targetOBJ.email, firstName: targetOBJ.firstName, lastName: targetOBJ.lastName, target: targetOBJ.target }};
response = {};
// console.log(targetOBJ)
return new Promise(async(resolve,reject)=> {
//Check the if the username exist, if it does, return the hash and userrole. If it does not exist, it should throw an error
user = await models.login.getuser(username);
//Deal with no user situation:
if(user ===false){
resolve(errorToClient);
return;
}
bcrypt.compare(password, user.storedhash, function (bycrptError, bycrptResponse) {
if (bycrptResponse) {
if ((typeof target === "undefined") || (target === null)) {
response.attempt = true;
response.string = '/';
resolve(response);
} else {
response.attempt = true;
response.string = target + "?email=" + targetOBJ.email + "&firstName=" + targetOBJ.firstName + "&lastName=" + targetOBJ.lastName;
resolve(response);
}
}
else{
console.log('got here')
resolve(errorToClient);
return;
}
})//end bcrypt.compare
}); //close promise
}//end logOn
async function register(username,password,email,targetOBJ){
targetOBJ = checkTargetOBJ(targetOBJ);
return new Promise(async(resolve,reject)=> {
let userrole = "admin" //HARD CODED FOR NOW
//Need to add data validation
bcrypt.hash(password, 10, async function (err, hash) { //This code won't fire till the hash variable is ready, this is called a "callback." Now we will only store things in the database once everything is read to go.
if (err) {
console.log(err)
response = {};
response.template = 'register';
response.parameters = { message: 'password failed to hash', email: targetOBJ.email, firstName: targetOBJ.firstName, lastName: targetOBJ.lastName, target: targetOBJ.target };
resolve(response);
} else {
user = await models.login.registeruser(username,email,hash,userrole).then(()=>{
if(user.username===username){
resolve(logOn(username, password, targetOBJ))}else{
resolve({template:'login/login',parameters:{ message: 'username, password, or both, failed. Please try again', email: targetOBJ.email, firstName: targetOBJ.firstName, lastName: targetOBJ.lastName, target: targetOB.target }});
}//
}).catch(err =>{
console.log(err);
resolve({template:'login/register',parameters:{ message: 'username or email unavailable', email: targetOBJ.email, firstName: targetOBJ.firstName, lastName: targetOBJ.lastName, target: targetOBJ.target }});
});
}//end else
})//end bcrypt.hash
}); //close promise
}//end function register
const login = {}
login.configurations = configurations;
login.targetOBJ = targetOBJ;
login.logOn = logOn;
login.register = register;
module.exports = login;
|
Java
|
UTF-8
| 402 | 2.359375 | 2 |
[] |
no_license
|
package edu.ycp.cs320.lab02.model;
public class ProjectsAuthors {
private int authorID;
private int projectID;
public ProjectsAuthors() {
}
public void setAuthorID(int authorID) {
this.authorID = authorID;
}
public int getAuthorID() {
return authorID;
}
public void setProjectID(int string) {
this.projectID = string;
}
public int getProjectID() {
return projectID;
}
}
|
Shell
|
UTF-8
| 137 | 2.796875 | 3 |
[] |
no_license
|
fox_files=(*.png)
demo_files=(../demo/*.png)
for ((i=0;i<=${#fox_files[@]};i++)); do
mv ${demo_files[i]} ../demo/${fox_files[i]}
done
|
Markdown
|
UTF-8
| 2,340 | 2.546875 | 3 |
[
"MIT"
] |
permissive
|
---
###########
### SEO ###
###########
# The title of the page, displayed by the browser on the title of the window.
# Ideally this is the same as the name of the event.
title: 'Blockchain Week Afterparty @Sofs bar'
# Description for this event.
description: Afterparty @Sofs bar by Cercle du Coin
#####################
### EVENT DETAILS ###
#####################
# The name of the event you're creating.
# Ideally this is the same as the title.
name: 'Blockchain Week Afterparty @sofs bar'
# There _needs_ to be one hyphen before each paragraph.
# Linebreaks are ignored, but they _must_ start with two spaces.
# Indentation is crucial:
# Two spaces before the hyphen, four spaces before the text. _No_ tabs allowed.
# Add or remove paragraphs as needed, but remember the hyphen before each entry.
synopsis:
-
Après les conférences retrouvons nous au Sofs bar, qui accepte les paiements en Bitcoin et Ether. Certains membre du cercle du coin seront présent et ravi de vous accueillir. Pinte de bière en happy hour jusqu'a 21h — 4€
-
--
-
After the conferences, let's meet at Sofs bar, which accepts payments in Bitcoin and Ether. Some members of the local circle will be present and delighted to welcome you. Pint of beer in happy hour until 9pm — 4€
# The date should be in the format year-month-day (ISO 8601).
# Example: 2018-02-28
date: 2019-03-05
# The date when the event ends. Can be left empty or set to the same day the
# event starts.
endDate: 2019-03-05
# Set the time in 24 hours format, surrounded by quotes.
# _Only_ the starting time!
# Example: '18:00'
time: '18:30'
# Time when the event ends. Can be left empty.
endTime: '23:00'
# The URL where to aquire the tickets. Can be left empty.
tickets:
# If the entrance is free, set zero (0) as the price, or leave it empty.
# _Don't_ write the currency symbol (Euro symbol will be used).
price: 0
# The name of the venue where the event will be held. Can be left empty.
venue: Sof's Bar
# The address to link to a Google map. Please test the address on Google Maps.
address: 43 rue Saint Sauveur, 75003 Paris
# The category of the event.
category: drink
#################
### SPEAKERS ####
#################
### DON'T MAKE CHANGES BELOW THIS LINE! ###
---
<!-- ### DON'T MAKE CHANGES BELOW THIS LINE! ### -->
<Event-Content/>
|
Ruby
|
UTF-8
| 120 | 2.796875 | 3 |
[] |
no_license
|
class Bank
def self.exchange(amount, from, to)
money = amount.to_money(from)
money.exchange_to(to)
end
end
|
Markdown
|
UTF-8
| 1,312 | 2.984375 | 3 |
[] |
no_license
|
## 前置
1. 配置数据库地址(resources/application.yml)
2. 创建**repositories**(与数据库操作相关)、**service**(业务代码)、**controller**(相当于视图函数)
## repository 层
1. 模块外部的类不能直接引入,所以要在 manager 模块的 build.gradle 中添加 **entity** 模块的依赖(manager/build.gradle)
2. 继承`JpaRepository`接口,拥有简单操作数据库的能力
3. 继承`JpaSpecificationExecutor`接口,拥有复杂操作数据库的能力
## service 服务层
1. 服务类前边要加入`@Service`注解
2. 注入 repository 实例(添加`Autowired`注解)
3. 数据校验
4. 设置默认值
5. 调用`repository.save`方法保存产品
## controller 层
1. 添加注`RestController`、`RequestMapping`注解(RequestMapping 可以添加 URL 路径)
2. 自动注入服务实例`@Autowired`
3. 视图函数添加`@RequestMapping(value = "", method = xx)`
4. 视图函数参数接收一个产品对象`(@RequestBody Product product)`
## 分页查询
翻页查询返回的是一个**Page**类
分页查询方法接收多个参数(即查询条件),这些查询参数都是可选的
创建 specification 对象(里面组合了查询条件),将之传给 repository.findAll
|
PHP
|
UTF-8
| 4,266 | 2.890625 | 3 |
[] |
no_license
|
<?php
/* Create the Linear model -by Kaustubh B at March 28 2019 */
include_once ("Neuron.php");
//ini_set('memory_limit', '-1');
class Linear_Model
{
public $model=array();
//this mat is a 3D matrix ...
function add_layer($x_json,$default_weight=0,$neuron_count=1,$activation='sigmoid'){
$layer=array();
$ctr=0;
for($i=0;$i<$neuron_count;$i++)
{
array_push($layer,new Neuron($x_json,$default_weight,$ctr,$activation));
$ctr++;
}
array_push($this->model,$layer);
}
public function train($x_json,$y_json,$steps){
//steps ctr epochs will come here and we take the corrosponding index steps..
for($step=0;$step<$steps;$step++){
$ctr=$step;
for ($i=0;$i<sizeof($this->model);$i++){
$layer=$this->model[$i];
if($i==0){
//input layer neurons will take the x_json as input ...
$arr=array();
$obj=new stdClass();
$obj->input=array();
for($j=0;$j<sizeof($layer);$j++){
array_push($obj->input,$layer[$j]->learn($x_json,$y_json,60,$ctr));
}
array_push($arr,$obj);
}else{
if($step<$ctr){
$ctr=$step-$ctr;
}
$obj2=$arr;
$obj=array();
$obj->input=array();
for($j=0;$j<sizeof($layer);$j++){
$neuron=$layer[$j];
//debug input and output json ... remaining things are okay
// var_dump($obj2);
// echo "----------";
// var_dump($y_json[$ctr]);
// echo "---------->>";
// var_dump($ctr);
$arrX=array();
array_push($arrX,$y_json[$ctr]);
array_push($obj->input,$neuron->learn($x_json, $y_json,10,$ctr));
}
}
}
}
}
public function predict($x_json){
//steps ctr epochs will come here and we take the corrosponding index steps..
for ($i=0;$i<sizeof($this->model);$i++){
$layer=$this->model[$i];
if($i==0){
//input layer neurons will take the x_json as input ...
$arr=array();
$obj=new stdClass();
$obj->input=array();
for($j=0;$j<sizeof($layer);$j++){
array_push($obj->input,$layer[$j]->predict($x_json));
}
array_push($arr,$obj);
}else{
$obj2=$arr;
$arr=array();
$obj=new stdClass();
$obj->input=array();
for($j=0;$j<sizeof($layer);$j++){
$neuron=$layer[$j];
//debug input and output json ... remaining things are okay
array_push($obj->input,$neuron->predict($obj2));
}
array_push($arr,$obj);
}
}
return $arr;
}
}
$x_json=json_decode( '[
{"input": [0,0]},
{"input": [1,0]},
{"input": [0,0]},
{"input": [0,1]},
{"input": [0,0]},
{"input": [1,1]}
]');
$y_json=json_decode('[
{"output": 0},
{"output": 1},
{"output": 0},
{"output": 1},
{"output": 0},
{"output": 1}
]');
$mat=json_decode('[
{"input": [0]}
]');
$mat1=json_decode('[
{"input": [1]}
]');
$mat2=json_decode('[
{"input": [1]}
]');
$x1_json=json_decode( '[
{"input": [0,0]}
]');
$x2_json=json_decode( '[
{"input": [0,0]}
]');
$L1=new Linear_Model();
$L1->add_layer($x_json,0,2,'sigmoid');
$L1->add_layer($x2_json,0,1,'sigmoid');
//Pass train data set...
$L1->train($x_json,$y_json,20);
$check=$L1->predict($mat);
var_dump($check);
$check=$L1->predict($mat1);
var_dump($check);
$check=$L1->predict($mat2);
var_dump($check);
/*
$check=$L1->predict($mat);
if($check[0][1][0]>0.500){
echo("Yes");
}else{
echo("No");
}
die($check[0][1][0]);
*/
|
Markdown
|
UTF-8
| 5,857 | 3.09375 | 3 |
[
"MIT"
] |
permissive
|
# Development Strategy
> `Acme-Web-Design`
This website will be developed for the Acme Web Design Company as a company homepage.
write a short description of your project:
- who would want to use it?
- why would they want to use it?
## Wireframe
<!-- include a wireframe for your project in this repository, and display it here -->
<!-- wireframe.cc is a good site for getting started with wireframes -->

## 0. Set-Up
__A User can see my initial repository and live demo__
### Repo
- Generate from Template
- Write initial, basic README
- Turn on GitHub Pages
## 1. User Story: Header
__As a user I want to see company logo and a navigation menu at the top of the page.__
### Repo
1. This user story was developed on a branch named `1-header`.
2. It was merged to `master` when the feature was finished.
### HTML
- A header element with company logo.
- A nav element which contains a list of navigation menu items.
### CSS
- Layout styling for the header.
- Setting up background color.
- Positioning company logo and navigation items.
- Setting up text size and color.
## 2. User Story: Homepage Showcase Section
__As a user I want to see a section on homepage which contains a title and a short explanation.__
### Repo
1. This user story developed on a branch named `2-homepage-showcase-section`.
2. It was merged to master when the feature was finished.
### HTML
- A section element.
- A div element containing title and description paragraph.
- An h1 element with page title.
- A p element with brief info.
### CSS
- Layout styling for the section element.
- Layout styling fot the container div.
- Setting up background image.
- Text styling for h1 and p elements.
## 3. User Story: Homepage Newsletter Section
__As a user I want to see a form to be able to subscribe for the company newsletter.__
### Repo
1. This user story developed on a branch named `3-homepage-newsletter-section`.
2. It was merged to master when the feature was finished.
### HTML
- A section element.
- A div element containing CTA message and newsletter form.
- An h1 element for CTA.
- A form element with email input and button.
### CSS
- Layout styling for section element.
- Layout styling for container div.
- Setting up background color.
- Text styling for h1.
- Styling of form elements.
## 4. User Story: Homepage Boxes Section
__As a user I want to see services provided by the company and short description for each one on the homepage.__
### Repo
1. This user story developed on a branch named `4-homepage-boxes-section`.
2. It was merged to master when the feature was finished.
### HTML
- A section element.
- A container div to keep all boxes.
- Three divs with class of box to contain images, headings and descriptions of each service.
- img, h1 and p tags for each service.
### CSS
- Layout styling for section element.
- Layout styling for divs with class of box.
- Styling for images.
## 5. User Story: Footer
__As a user I want to see footer with copyright information.__
### Repo
1. This user story developed on a branch named `5-footer`.
2. It was merged to master when the feature was finished.
### HTML
- A footer element.
- A p tag with basic copyright info.
### CSS
- Layout styling for footer element.
- Setting up background color and text styling.
## 6. User Story: About Page
__As a user I want to see an about page in which I can find information about the company__
### Repo
1. This user story developed on a branch named `6-about-page`.
2. It was merged to master when the feature was finished.
### HTML
- A section element and a container div for the main content.
- An article element containing an h1 tag and p tag.
- An aside element containing an h1 and p tag for additional info.
### CSS
- Layout styling for section element.
- Layout styling for article and aside elements.
## 7. User Story: Services Page, Services and Prices
__As a user I want to see a list of services and prices.__
### Repo
1. This user story developed on a branch named `7-services-page-price-list`.
2. It was merged to master when the feature was finished.
### HTML
- A section element and a container div.
- An article element containing an ul tag and one li tag for each service.
- h1 tags for each service name and p tags for description and pricing info.
### CSS
- Layout styling for section element.
- Styling for price list items.
## 8. User Story: Services Page Quote Form
__As a user I want to see a form on services page so I can ask for a quote.__
### Repo
1. This user story developed on a branch named `8-services-page-quote-form`.
2. It was merged to master when the feature was finished.
### HTML
- An aside element to contain quote form.
- A form tag containing input areas for name and email and a text area for the explanation of the services needed.
### CSS
- Layout styling for aside and form elements.
- Styling for input tags.
## 9. User Story: Media Queries
__As a user I want to see a responsive website that displays well on different devices and screen sizes__
### Repo
1. This user story developed on a branch named `9-media-queries`.
2. It was merged to master when the feature was finished.
### CSS
- Adding media queries for responsive display.
## 10. User Story: Final Touches and Fixes
__Final touches and bug fixes__
### Repo
1. This user story developed on a branch named `10-final-touches`.
2. It was merged to master when the feature was finished.
----
## Finishing Touches
- Write final, complete README:
- [makeareadme.com](https://www.makeareadme.com/)
- [bulldogjob](https://bulldogjob.com/news/449-how-to-write-a-good-readme-for-your-github-project)
- [meakaakka](https://medium.com/@meakaakka/a-beginners-guide-to-writing-a-kickass-readme-7ac01da88ab3)
- Validate code to check for any last mistakes
|
Markdown
|
UTF-8
| 3,641 | 2.625 | 3 |
[] |
no_license
|
### 一名被控性骚扰的美国主教辞职
------------------------
<div class="published">
<span class="date" title="中国时间">
<time datetime="2018-09-14T06:56:44+08:00">
2018年9月14日 06:56
</time>
</span>
</div>
<br/>
<div class="wsw">
<span class="dateline">
华盛顿 —
</span>
<p>
罗马天主教教宗方济各在即将开始与美国天主教会领袖讨论美国教会存在大范围性侵的指称之际,接受了一名被控性骚扰成年人的美国主教的辞职。
</p>
<p>
就在美国天主教会高层领袖们即将与教宗方济各坐下来讨论性侵丑闻之前,西维吉尼亚州的主教迈克尔·布兰斯菲尔德辞职。他将面临梵蒂冈的调查。越演越烈的性侵丑闻震撼了好几个国家的教会。
</p>
<p>
星期四,智利当局突击搜查了四个教区,当局正在对神职人员被控性侵未成年人以及主教们掩盖性侵问题的指称进行调查,突击搜查是这项调查的一部分。当地媒体显示,调查人员收缴了文件之后离开教堂。
</p>
<p>
在罗马,美国教会领导人、枢机主教丹尼尔·迪纳尔多说,他和其他天主教领袖“向教宗方济各汇报了我们的局面---邪恶的性侵如何割伤耶稣的圣体。”
</p>
<p>
迪纳尔多说,教宗方济各“发自内心深处地倾听。这是一次长时间、有成果的良好交流。”但是他没有说美国教会领袖可能会采取什么步骤来解决性侵指称。
</p>
<p>
身为美国西南部德克萨斯州主教的迪纳尔多说,“在我们努力医治伤口之际,(教会领袖)一道祈求上帝赐予怜悯和力量。我们盼望着继续一道积极思索,找出接下来最为有效的步骤。”
</p>
<p>
在布兰斯菲尔德辞职的几个星期前,方济各解除了华盛顿总主教西奥多·麦卡里克的职务。媒体曝光说,麦卡里克曾经在1970年代性侵一名祭坛男孩,并性侵其他修道学生和年轻的神职人员。梵蒂冈的批评者说,方济各多年来就知道麦卡里克的事情,但没有当他面质问过性侵指称。
</p>
<p>
前美国驻梵蒂冈大使、总主教卡洛·玛丽亚·维加诺发表公开信说,教宗方济各迟迟不对麦克卡里采取行动,为此应该辞职。
</p>
<p>
就在枢机主教迪纳尔多与教宗方济各会面之际,有两人指控迪纳尔多在他领导的加尔维斯顿-休斯顿大主教区没有采取充分措施制止一名神父的性侵行为。这名神父本星期因性侵指称被逮捕。
</p>
<p>
其中一名指控者说,迪纳尔多向她保证会禁止那名神父接触任何未成年人,但她后来发现,他被调往112公里外的另一座教堂。这两名指责迪纳尔多无作为的人正在配合警方调查。
</p>
<p>
教宗方济各召集世界各地的主教在明年2月出席一次峰会,讨论针对神职人员和其他教会领袖的性侵指称以及教会领导人经常掩盖问题而不是披露真相并向司法当局报告的证据。
</p>
<p>
在美国东部的宾夕法尼亚州,一个大陪审团最近判定,在70年的时间里,有30多个教区的神职人员性侵了至少1千人。其他几个州的检察官也已立案展开类似调查。
</p>
</div>
原文链接:https://www.voachinese.com/a/us-bishop-resigned-charges-sex-abuse-20180913/4570821.html
------------------------
#### [禁闻聚合首页](https://github.com/gfw-breaker/banned-news/blob/master/README.md) | [一键翻墙软件](https://github.com/gfw-breaker/nogfw/blob/master/README.md)
|
Java
|
UTF-8
| 798 | 2.375 | 2 |
[] |
no_license
|
/*
* Created on 16.02.2007
*
* To change the template for this generated file go to
* Window>Preferences>Java>Code Generation>Code and Comments
*/
package ontologizer.dotwriter;
import ontologizer.go.TermID;
/**
* Default implementation of the interface providing attributes for dot graph.
*
* @author Sebastian Bauer
*/
public class AbstractDotAttributesProvider implements IDotAttributesProvider
{
/** Returns the dot attributes for the given term. */
@Override
public String getDotNodeAttributes(TermID id)
{
return null;
}
/**
* Returns the dot attributes for the given edge.
*
* @param id1
* @param id2
* @return
*/
@Override
public String getDotEdgeAttributes(TermID id1, TermID id2)
{
return null;
}
}
|
Python
|
UTF-8
| 1,875 | 2.546875 | 3 |
[
"MIT"
] |
permissive
|
import datetime
from whoosh.fields import TEXT
from whoosh.analysis import StemmingAnalyzer
from sqlalchemy.engine import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.session import Session
from sqlalchemy.schema import Column
from sqlalchemy.types import DateTime, Integer, Text, UnicodeText
from whooshsql.core import IndexSubscriber
# On memory sql lite
engine = create_engine('sqlite:///:memory:', echo=True)
session = Session(bind=engine)
Base = declarative_base()
# create a table with a more complex setup
class Post(Base):
__tablename__ = 'post'
__searchable__ = {'title': TEXT(stored=True, field_boost=2.0, analyzer=StemmingAnalyzer()),
'body': TEXT(stored=True, field_boost=1.0, analyzer=StemmingAnalyzer())}
id = Column(Integer, primary_key=True)
title = Column(Text)
body = Column(UnicodeText)
created = Column(DateTime, default=datetime.datetime.utcnow())
def __repr__(self):
return '{0}(title={1})'.format(self.__class__.__name__,
self.title)
Base.metadata.create_all(engine)
index_subscriber = IndexSubscriber(session=session, whoosh_base_path='index')
index_subscriber.subscribe(Post)
p1 = Post(title='love barcelona', body='it is the best city in the world even before madrid!')
p2 = Post(title='love madrid', body='it is the second best city in the world after barcelona!')
session.add_all([p1, p2])
session.commit()
# normal search, this does not keep whoosh score
Post.whoosh.search('barcelona').all()
Post.whoosh.search('madrid').all()
# ordered result based on whoosh score
results = Post.whoosh.search_all_ordered('madrid')
results = Post.whoosh.search_all_ordered('barcelona')
from whoosh.qparser import FuzzyTermPlugin
Post.whoosh.search_all_ordered('baarcelonaa~2', plugin=FuzzyTermPlugin())
|
C#
|
UTF-8
| 5,140 | 2.53125 | 3 |
[
"MIT"
] |
permissive
|
using System;
using System.Collections.Generic;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Routing;
using Npgsql;
using Serilog;
using TourService.Entities;
using TourService.Extensions;
namespace TourService.Repository
{
public class LogRepository : ILogRepository
{
private readonly NpgsqlConnection _connection;
private readonly ILogger _logger = Log.ForContext<LogRepository>();
public LogRepository(NpgsqlConnection connection)
{
_connection = connection;
}
public async Task<List<LogEntity>> GetAllForRoute(int id)
{
try
{
await _connection.OpenAsync();
var list = new List<LogEntity>();
var sql = "SELECT * FROM Log WHERE route_id=@id";
using var cmd = new NpgsqlCommand(sql, _connection);
cmd.Parameters.AddWithValue("id", id);
var reader = await cmd.ExecuteReaderAsync();
while (reader.Read())
{
list.Add(reader.ToLogEntity());
}
await _connection.CloseAsync();
return list;
}
catch (Exception e)
{
_logger.Error(e.Message);
await _connection.CloseAsync();
return new List<LogEntity>();
}
}
public async Task<List<LogEntity>> GetAll()
{
try
{
await _connection.OpenAsync();
var list = new List<LogEntity>();
var sql = "SELECT * FROM Log";
using var cmd = new NpgsqlCommand(sql, _connection);
var reader = await cmd.ExecuteReaderAsync();
while (reader.Read())
{
list.Add(reader.ToLogEntity());
}
await _connection.CloseAsync();
return list;
}
catch (Exception e)
{
_logger.Error(e.Message);
await _connection.CloseAsync();
return new List<LogEntity>();
}
}
public async Task<int> UpSert(LogEntity entity)
{
try
{
await _connection.OpenAsync();
var sqlInsert =
"INSERT INTO Log VALUES(DEFAULT,@StartDate,@EndDate,@Origin,@Destination,@Distance,@Rating,@Note,@MovementMode,@Mood,@BPM,@route_id) RETURNING id";
var sqlUpdate =
"INSERT INTO Log VALUES(@id,@StartDate,@EndDate,@Origin,@Destination,@Distance,@Rating,@Note,@MovementMode,@Mood,@BPM,@route_id) ON CONFLICT (id) DO UPDATE SET (startdate,enddate,origin,destination,distance,rating,note,movementmode,mood,bpm,route_id) = (@StartDate,@EndDate,@Origin,@Destination,@Distance,@Rating,@Note,@MovementMode,@Mood,@BPM,@route_id) RETURNING id";
NpgsqlCommand cmd;
cmd = entity.Id<=0 ? new NpgsqlCommand(sqlInsert, _connection) : new NpgsqlCommand(sqlUpdate, _connection);
var startdate = entity.StartDate + entity.StartTime;
var enddate = entity.EndDate + entity.EndTime;
cmd.Parameters.AddWithValue("id", entity.Id);
cmd.Parameters.AddWithValue("startdate", startdate);
cmd.Parameters.AddWithValue("enddate", enddate);
cmd.Parameters.AddWithValue("origin", entity.Origin);
cmd.Parameters.AddWithValue("destination", entity.Destination);
cmd.Parameters.AddWithValue("distance", entity.Distance);
cmd.Parameters.AddWithValue("rating", entity.Rating);
cmd.Parameters.AddWithValue("note", entity.Note?? "");
cmd.Parameters.AddWithValue("movementmode", (int) entity.MovementMode);
cmd.Parameters.AddWithValue("mood",(int) entity.Mood);
cmd.Parameters.AddWithValue("bpm", entity.BPM);
cmd.Parameters.AddWithValue("route_id", entity.Route_id);
return (int) await cmd.ExecuteScalarAsync();
}
catch (Exception e)
{
_logger.Error(e.Message);
await _connection.CloseAsync();
return -1;
}
}
public async Task<bool> Delete(int id)
{
try
{
await _connection.OpenAsync();
var sql = "DELETE FROM Log where id=@id";
var cmd = new NpgsqlCommand(sql, _connection);
cmd.Parameters.AddWithValue("id", id);
cmd.ExecuteNonQuery();
await _connection.CloseAsync();
return true;
}
catch (Exception e)
{
_logger.Error(e.Message);
await _connection.CloseAsync();
return false;
}
}
}
}
|
C
|
UTF-8
| 494 | 3.125 | 3 |
[] |
no_license
|
#include <stdio.h>
#include <stdlib.h>
int main()
{
}
char *leer(nombre, espacio)
{
FILE *archivo;
char caracter;
char *ret = malloc(espacio);
if(!ret)
return NULL;
archivo = fopen(nombre,"r");
if (archivo == NULL)
{
return NULL;
}
else
{
caracter = fgetc(archivo);
while(caracter != EOF)
{
printf("%c\n",caracter);
caracter = fgetc(archivo);
}
}
fclose(archivo);
}
|
TypeScript
|
UTF-8
| 23,148 | 2.5625 | 3 |
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
/*!
* Copyright (c) Microsoft Corporation and contributors. All rights reserved.
* Licensed under the MIT License.
*/
import { ISequencedClient, ISequencedDocumentMessage } from "@fluidframework/protocol-definitions";
import { strict as assert } from "assert";
import { Quorum } from "../quorum";
describe("Quorum", () => {
let quorum: Quorum;
beforeEach(() => {
let clientSequenceNumber = 0;
quorum = new Quorum([], [], [], (key, value) => ++clientSequenceNumber);
});
describe("Proposal", () => {
it("Local proposal", async () => {
let resolved = false;
let evented = false;
let acceptanceState = "too early";
const proposalKey = "hello";
const proposalValue = "world";
const proposalSequenceNumber = 53;
const tooEarlyMessage = {
minimumSequenceNumber: 37,
sequenceNumber: 73,
} as ISequencedDocumentMessage;
const justRightMessage = {
minimumSequenceNumber: 64,
sequenceNumber: 79,
} as ISequencedDocumentMessage;
// Observe eventing. We expect a single event, with the correct values, to fire at the right time.
quorum.on(
"approveProposal",
(
sequenceNumber: number,
key: string,
value: any,
approvalSequenceNumber: number,
) => {
assert.strictEqual(evented, false, "Double event");
evented = true;
assert.strictEqual(
sequenceNumber,
proposalSequenceNumber,
"Unexpected proposal sequenceNumber",
);
assert.strictEqual(key, proposalKey, "Unexpected proposal key");
assert.strictEqual(value, proposalValue, "Unexpected proposal value");
assert.strictEqual(
approvalSequenceNumber,
justRightMessage.sequenceNumber,
"Approved on wrong sequence number",
);
},
);
// Proposal generates a promise that will resolve once the proposal is accepted
// This happens by advancing the msn past the sequence number of the proposal.
const proposalP = quorum.propose(proposalKey, proposalValue).then(() => {
resolved = true;
assert.strictEqual(
acceptanceState,
"just right",
".propose() promise resolved at wrong time",
);
});
assert.strictEqual(
quorum.get(proposalKey),
undefined,
"Should not have the proposal value yet 1",
);
// Client sequence number will be 1 for this first proposal.
// The info must match the proposal we sent above.
quorum.addProposal(proposalKey, proposalValue, proposalSequenceNumber, true, 1);
assert.strictEqual(
quorum.get(proposalKey),
undefined,
"Should not have the proposal value yet 2",
);
// This message does nothing since the msn is higher than the sequence number of the proposal.
quorum.updateMinimumSequenceNumber(tooEarlyMessage);
assert.strictEqual(evented, false, "Should not have evented yet 1");
assert.strictEqual(
quorum.get(proposalKey),
undefined,
"Should not have the proposal value yet 3",
);
// Wait to see if the proposal promise resolved.
await Promise.resolve().then(() => {});
assert.strictEqual(evented, false, "Should not have evented yet 2");
assert.strictEqual(
quorum.get(proposalKey),
undefined,
"Should not have the proposal value yet 4",
);
acceptanceState = "just right";
// This message accepts the proposal since the msn is higher than the sequence number of the proposal.
quorum.updateMinimumSequenceNumber(justRightMessage);
assert.strictEqual(evented, true, "Should have evented");
assert.strictEqual(
quorum.get(proposalKey),
proposalValue,
"Should have the proposal value",
);
// Wait to see if the proposal promise resolved.
await Promise.resolve().then(() => {});
// Due to the composition of Quorum -> QuorumProposals, we require one more microtask deferral to resolve.
await Promise.resolve().then(() => {});
// Acceptance should have happened before the above await resolves.
acceptanceState = "too late";
assert(resolved, ".propose() promise should have resolved");
await assert.doesNotReject(proposalP);
});
it("Remote proposal", async () => {
let evented = false;
const proposalKey = "hello";
const proposalValue = "world";
const proposalSequenceNumber = 53;
const tooEarlyMessage = {
minimumSequenceNumber: 37,
sequenceNumber: 73,
} as ISequencedDocumentMessage;
const justRightMessage = {
minimumSequenceNumber: 64,
sequenceNumber: 79,
} as ISequencedDocumentMessage;
// Observe eventing. We expect a single event, with the correct values, to fire at the right time.
quorum.on(
"approveProposal",
(
sequenceNumber: number,
key: string,
value: any,
approvalSequenceNumber: number,
) => {
assert.strictEqual(evented, false, "Double event");
evented = true;
assert.strictEqual(
sequenceNumber,
proposalSequenceNumber,
"Unexpected proposal sequenceNumber",
);
assert.strictEqual(key, proposalKey, "Unexpected proposal key");
assert.strictEqual(value, proposalValue, "Unexpected proposal value");
assert.strictEqual(
approvalSequenceNumber,
justRightMessage.sequenceNumber,
"Approved on wrong sequence number",
);
},
);
// Client sequence number shouldn't matter for remote proposals.
quorum.addProposal(proposalKey, proposalValue, proposalSequenceNumber, false, -5);
assert.strictEqual(
quorum.get(proposalKey),
undefined,
"Should not have the proposal value yet 1",
);
// This message does nothing since the msn is higher than the sequence number of the proposal.
quorum.updateMinimumSequenceNumber(tooEarlyMessage);
assert.strictEqual(evented, false, "Should not have evented yet 1");
assert.strictEqual(
quorum.get(proposalKey),
undefined,
"Should not have the proposal value yet 2",
);
// Wait to see if any async stuff is waiting (shouldn't be).
await Promise.resolve().then(() => {});
assert.strictEqual(evented, false, "Should not have evented yet 2");
assert.strictEqual(
quorum.get(proposalKey),
undefined,
"Should not have the proposal value yet 3",
);
// This message accepts the proposal since the msn is higher than the sequence number of the proposal.
quorum.updateMinimumSequenceNumber(justRightMessage);
assert.strictEqual(evented, true, "Should have evented");
assert.strictEqual(
quorum.get(proposalKey),
proposalValue,
"Should have the proposal value",
);
// Wait to see if any async stuff is waiting (shouldn't be).
await Promise.resolve().then(() => {});
});
it("Remote client overwrite", async () => {
let resolved = false;
let rejected = false;
const proposalKey = "hello";
const localProposalValue = "world";
const remoteProposalValue = "mars";
const localProposalSequenceNumber = 53;
const remoteProposalSequenceNumber = 68;
const approveLocalProposalMessage = {
minimumSequenceNumber: 64,
sequenceNumber: 79,
} as ISequencedDocumentMessage;
const approveRemoteProposalMessage = {
minimumSequenceNumber: 72,
sequenceNumber: 84,
} as ISequencedDocumentMessage;
// This test is going to have a remote proposal overwrite the local proposal before the local proposal
// is approved. The promise will still resolve and the value will reflect the local proposal in the
// window between the approval of the local proposal and the remote proposal.
const proposalP = quorum
.propose(proposalKey, localProposalValue)
.then(() => {
resolved = true;
})
.catch(() => {
rejected = true;
});
quorum.addProposal(
proposalKey,
localProposalValue,
localProposalSequenceNumber,
true,
1,
);
// Client sequence number shouldn't matter for remote proposals.
quorum.addProposal(
proposalKey,
remoteProposalValue,
remoteProposalSequenceNumber,
false,
-5,
);
// Wait to see if the proposal promise settled.
await Promise.resolve().then(() => {});
// Due to the composition of Quorum -> QuorumProposals, we require one more microtask deferral to resolve.
await Promise.resolve().then(() => {});
assert.strictEqual(resolved, false, "Stage 1, Resolved");
assert.strictEqual(rejected, false, "Stage 1, Rejected");
assert.strictEqual(quorum.get(proposalKey), undefined, "Stage 1, Value");
quorum.updateMinimumSequenceNumber(approveLocalProposalMessage);
// Wait to see if the proposal promise settled.
await Promise.resolve().then(() => {});
// Due to the composition of Quorum -> QuorumProposals, we require one more microtask deferral to resolve.
await Promise.resolve().then(() => {});
assert.strictEqual(resolved, true, "Stage 2, Resolved");
assert.strictEqual(rejected, false, "Stage 2, Rejected");
assert.strictEqual(quorum.get(proposalKey), localProposalValue, "Stage 2, Value");
quorum.updateMinimumSequenceNumber(approveRemoteProposalMessage);
// Wait to see if the proposal promise settled.
await Promise.resolve().then(() => {});
// Due to the composition of Quorum -> QuorumProposals, we require one more microtask deferral to resolve.
await Promise.resolve().then(() => {});
assert.strictEqual(resolved, true, "Stage 3, Resolved");
assert.strictEqual(rejected, false, "Stage 3, Rejected");
assert.strictEqual(quorum.get(proposalKey), remoteProposalValue, "Stage 3, Value");
// Backstop to ensure the promise is settled.
await proposalP;
});
describe("Disconnected handling", () => {
it("Settling propose() promise after disconnect/reconnect", async () => {
const proposal1 = {
key: "one",
value: "uno",
sequenceNumber: 53,
resolved: false,
rejected: false,
};
const proposal2 = {
key: "two",
value: "dos",
sequenceNumber: 68,
resolved: false,
rejected: false,
};
const proposal3 = {
key: "three",
value: "tres",
sequenceNumber: 92,
resolved: false,
rejected: false,
};
const messageApproving1 = {
minimumSequenceNumber: 61,
sequenceNumber: 64,
} as ISequencedDocumentMessage;
const messageApproving2 = {
minimumSequenceNumber: 77,
sequenceNumber: 82,
} as ISequencedDocumentMessage;
// Proposal 3 shouldn't actually get approved, but we will test that.
const messageApproving3 = {
minimumSequenceNumber: 98,
sequenceNumber: 107,
} as ISequencedDocumentMessage;
// Testing three scenarios:
// - Proposal 1 will be ack'd and approved before reconnection
// - Proposal 2 will be ack'd before reconnection, and then approved after reconnection
// - Proposal 3 will not be ack'd before reconnection, and so should reject.
const proposal1P = quorum
.propose(proposal1.key, proposal1.value)
.then(() => {
proposal1.resolved = true;
})
.catch(() => {
proposal1.rejected = true;
});
const proposal2P = quorum
.propose(proposal2.key, proposal2.value)
.then(() => {
proposal2.resolved = true;
})
.catch(() => {
proposal2.rejected = true;
});
const proposal3P = quorum
.propose(proposal3.key, proposal3.value)
.then(() => {
proposal3.resolved = true;
})
.catch(() => {
proposal3.rejected = true;
});
quorum.setConnectionState(false);
// Wait to make sure the proposal promises have not settled from the disconnect.
await Promise.resolve().then(() => {});
// Due to the composition of Quorum -> QuorumProposals,
// we require one more microtask deferral to resolve.
await Promise.resolve().then(() => {});
assert.strictEqual(proposal1.resolved, false, "Stage 1, Prop 1, Resolved");
assert.strictEqual(proposal1.rejected, false, "Stage 1, Prop 1, Rejected");
assert.strictEqual(proposal2.resolved, false, "Stage 1, Prop 2, Resolved");
assert.strictEqual(proposal2.rejected, false, "Stage 1, Prop 2, Rejected");
assert.strictEqual(proposal3.resolved, false, "Stage 1, Prop 3, Resolved");
assert.strictEqual(proposal3.rejected, false, "Stage 1, Prop 3, Rejected");
// Now we're simulating "connecting" state, where we will see the ack's for proposals 1 and 2
// And also we'll advance the MSN past proposal 1
quorum.addProposal(
proposal1.key,
proposal1.value,
proposal1.sequenceNumber,
true,
1,
);
quorum.updateMinimumSequenceNumber(messageApproving1);
quorum.addProposal(
proposal2.key,
proposal2.value,
proposal2.sequenceNumber,
true,
2,
);
// Now we'll simulate the transition to connected state
quorum.setConnectionState(true);
// Wait to make sure the proposal promises have settled in the manner we expect.
await Promise.resolve().then(() => {});
// Due to the composition of Quorum -> QuorumProposals,
// we require one more microtask deferral to resolve.
await Promise.resolve().then(() => {});
assert.strictEqual(proposal1.resolved, true, "Stage 2, Prop 1, Resolved");
assert.strictEqual(proposal1.rejected, false, "Stage 2, Prop 1, Rejected");
assert.strictEqual(proposal2.resolved, false, "Stage 2, Prop 2, Resolved");
assert.strictEqual(proposal2.rejected, false, "Stage 2, Prop 2, Rejected");
assert.strictEqual(proposal3.resolved, false, "Stage 2, Prop 3, Resolved");
assert.strictEqual(proposal3.rejected, true, "Stage 2, Prop 3, Rejected");
// Verify the quorum holds the data we expect.
assert.strictEqual(quorum.get(proposal1.key), proposal1.value, "Value 1 missing");
assert.strictEqual(quorum.get(proposal2.key), undefined, "Unexpected value 2");
assert.strictEqual(quorum.get(proposal3.key), undefined, "Unexpected value 3");
// Now advance the MSN past proposal 2
quorum.updateMinimumSequenceNumber(messageApproving2);
// Wait to make sure the proposal promises have settled in the manner we expect.
await Promise.resolve().then(() => {});
// Due to the composition of Quorum -> QuorumProposals,
// we require one more microtask deferral to resolve.
await Promise.resolve().then(() => {});
assert.strictEqual(proposal1.resolved, true, "Stage 3, Prop 1, Resolved");
assert.strictEqual(proposal1.rejected, false, "Stage 3, Prop 1, Rejected");
assert.strictEqual(proposal2.resolved, true, "Stage 3, Prop 2, Resolved");
assert.strictEqual(proposal2.rejected, false, "Stage 3, Prop 2, Rejected");
assert.strictEqual(proposal3.resolved, false, "Stage 3, Prop 3, Resolved");
assert.strictEqual(proposal3.rejected, true, "Stage 3, Prop 3, Rejected");
// Verify the quorum holds the data we expect.
assert.strictEqual(quorum.get(proposal1.key), proposal1.value, "Value 1 missing");
assert.strictEqual(quorum.get(proposal2.key), proposal2.value, "Value 2 missing");
assert.strictEqual(quorum.get(proposal3.key), undefined, "Unexpected value 3");
// Now advance the MSN past proposal 3 (this should have no real effect)
quorum.updateMinimumSequenceNumber(messageApproving3);
// Wait to make sure the proposal promises have settled in the manner we expect.
await Promise.resolve().then(() => {});
// Due to the composition of Quorum -> QuorumProposals,
// we require one more microtask deferral to resolve.
await Promise.resolve().then(() => {});
assert.strictEqual(proposal1.resolved, true, "Stage 4, Prop 1, Resolved");
assert.strictEqual(proposal1.rejected, false, "Stage 4, Prop 1, Rejected");
assert.strictEqual(proposal2.resolved, true, "Stage 4, Prop 2, Resolved");
assert.strictEqual(proposal2.rejected, false, "Stage 4, Prop 2, Rejected");
assert.strictEqual(proposal3.resolved, false, "Stage 4, Prop 3, Resolved");
assert.strictEqual(proposal3.rejected, true, "Stage 4, Prop 3, Rejected");
// Verify the quorum holds the data we expect.
assert.strictEqual(quorum.get(proposal1.key), proposal1.value, "Value 1 missing");
assert.strictEqual(quorum.get(proposal2.key), proposal2.value, "Value 2 missing");
assert.strictEqual(quorum.get(proposal3.key), undefined, "Unexpected value 3");
// Backstop to ensure the promises are settled.
await Promise.all([proposal1P, proposal2P, proposal3P]);
});
});
describe("Snapshot", async () => {
it("Produces the expected stable snapshot", async () => {
const proposal1 = {
key: "one",
value: "uno",
sequenceNumber: 53,
resolved: false,
rejected: false,
};
const proposal2 = {
key: "two",
value: "dos",
sequenceNumber: 68,
resolved: false,
rejected: false,
};
const proposal3 = {
key: "three",
value: "tres",
sequenceNumber: 92,
resolved: false,
rejected: false,
};
const messageApproving1 = {
minimumSequenceNumber: 61,
sequenceNumber: 64,
} as ISequencedDocumentMessage;
const messageApproving2 = {
minimumSequenceNumber: 77,
sequenceNumber: 82,
} as ISequencedDocumentMessage;
const messageApproving3 = {
minimumSequenceNumber: 98,
sequenceNumber: 107,
} as ISequencedDocumentMessage;
// In this test, we'll take the snapshot after proposal 1 has been accepted but not proposal 2
const proposal1P = quorum
.propose(proposal1.key, proposal1.value)
.then(() => {
proposal1.resolved = true;
})
.catch(() => {
proposal1.rejected = true;
});
const proposal2P = quorum
.propose(proposal2.key, proposal2.value)
.then(() => {
proposal2.resolved = true;
})
.catch(() => {
proposal2.rejected = true;
});
const proposal3P = quorum
.propose(proposal3.key, proposal3.value)
.then(() => {
proposal3.resolved = true;
})
.catch(() => {
proposal3.rejected = true;
});
quorum.addProposal(
proposal1.key,
proposal1.value,
proposal1.sequenceNumber,
true,
1,
);
quorum.addProposal(
proposal2.key,
proposal2.value,
proposal2.sequenceNumber,
true,
2,
);
quorum.updateMinimumSequenceNumber(messageApproving1);
const snapshot = quorum.snapshot();
const verifyExpectedSnapshot = () => {
assert.strictEqual(
snapshot.proposals.length,
1,
"Should be exactly one proposal in the snapshot",
);
assert.strictEqual(
snapshot.values.length,
1,
"Should be exactly one value in the snapshot",
);
assert.strictEqual(
snapshot.proposals[0][1].value,
"dos",
"Proposed value should be 'dos'",
);
assert.strictEqual(
snapshot.values[0][1].value,
"uno",
"Accepted value should be 'uno'",
);
};
// Verify initial state of snapshot
verifyExpectedSnapshot();
// The snapshot we took should never change after we take it
quorum.updateMinimumSequenceNumber(messageApproving2);
verifyExpectedSnapshot();
quorum.addProposal(
proposal3.key,
proposal3.value,
proposal3.sequenceNumber,
true,
3,
);
verifyExpectedSnapshot();
quorum.updateMinimumSequenceNumber(messageApproving3);
verifyExpectedSnapshot();
// Backstop to ensure the promises are settled.
await Promise.all([proposal1P, proposal2P, proposal3P]);
});
});
});
describe("Members", () => {
it("Add/remove members", () => {
// Casting details because the contents don't really matter for this test.
const client1Info = {
clientId: "client1",
details: "details1" as any as ISequencedClient,
};
const client2Info = {
clientId: "client2",
details: "details2" as any as ISequencedClient,
};
const unexpected = {
clientId: "unexpectedId",
details: "unexpectedDetails" as any as ISequencedClient,
};
let expectedAdd = unexpected;
let expectedRemove = unexpected;
let addCount = 0;
let removeCount = 0;
quorum.on("addMember", (clientId: string, details: ISequencedClient) => {
assert.strictEqual(clientId, expectedAdd.clientId, "Unexpected client id added");
assert.strictEqual(details, expectedAdd.details, "Unexpected client details added");
addCount++;
});
quorum.on("removeMember", (clientId: string) => {
assert.strictEqual(clientId, expectedRemove.clientId);
removeCount++;
});
assert.strictEqual(quorum.getMembers().size, 0, "Should have no members to start");
expectedAdd = client1Info;
quorum.addMember(client1Info.clientId, client1Info.details);
assert.strictEqual(addCount, 1, "Failed to event for add");
assert.strictEqual(quorum.getMembers().size, 1, "Should have 1 member after add");
assert.strictEqual(
quorum.getMember(client1Info.clientId),
client1Info.details,
"Expecting client 1",
);
assert.strictEqual(
quorum.getMember(client2Info.clientId),
undefined,
"Not expecting client 2",
);
expectedAdd = client2Info;
quorum.addMember(client2Info.clientId, client2Info.details);
assert.strictEqual(addCount, 2, "Failed to event for add");
assert.strictEqual(
quorum.getMembers().size,
2,
"Should have 2 members after second add",
);
assert.strictEqual(
quorum.getMember(client1Info.clientId),
client1Info.details,
"Expecting client 1",
);
assert.strictEqual(
quorum.getMember(client2Info.clientId),
client2Info.details,
"Expecting client 2",
);
expectedAdd = unexpected;
expectedRemove = client1Info;
quorum.removeMember(client1Info.clientId);
assert.strictEqual(removeCount, 1, "Failed to event for remove");
assert.strictEqual(quorum.getMembers().size, 1, "Should have 1 member after remove");
assert.strictEqual(
quorum.getMember(client1Info.clientId),
undefined,
"Not expecting client 1",
);
assert.strictEqual(
quorum.getMember(client2Info.clientId),
client2Info.details,
"Expecting client 2",
);
});
});
describe("Snapshot", async () => {
it("Produces the expected stable snapshot", () => {
// Casting details because the contents don't really matter for this test.
const client1Info = {
clientId: "client1",
details: "details1" as any as ISequencedClient,
};
const client2Info = {
clientId: "client2",
details: "details2" as any as ISequencedClient,
};
quorum.addMember(client1Info.clientId, client1Info.details);
const snapshot = quorum.snapshot();
const verifyExpectedSnapshot = () => {
assert.strictEqual(
snapshot.members.length,
1,
"Should be exactly 1 member in the snapshot",
);
assert.strictEqual(
snapshot.members[0][0],
client1Info.clientId,
"Expecting client 1",
);
};
// Verify initial state of snapshot
verifyExpectedSnapshot();
// The snapshot we took should never change after we take it
quorum.addMember(client2Info.clientId, client2Info.details);
verifyExpectedSnapshot();
quorum.removeMember(client1Info.clientId);
verifyExpectedSnapshot();
});
});
});
|
Ruby
|
UTF-8
| 1,301 | 2.828125 | 3 |
[] |
no_license
|
class MockFirmware
def initialize()
@output_stream ||= []
end
def self.open(file, mode)
@@mock ||= self.new()
end
def readline
@output_stream.pop
end
def write(command)
if command =~ /^!a:/
number = get_parameter(command)
if number.to_i >= 2**16
@output_stream << "A:NACK\r\n"
else
@output_stream << "A:#{number}\r\n"
end
elsif command =~ /^!l:/
number = get_parameter(command)
if number.to_i >= 2**16
@output_stream << "L:NACK\r\n"
else
@output_stream << "L:#{number}\r\n"
end
elsif command =~ /^!t:/
number = get_parameter(command)
if number.to_i >= 2**16
@output_stream << "T:NACK\r\n"
else
@output_stream << "T:#{number}\r\n"
end
elsif command =~ /^!c:/
number = get_parameter(command)
if (0 > number.to_i)
@output_stream << "NACK\r\n"
elsif (number.to_i >= 2**8)
@output_stream << "C:NACK\r\n"
else
@output_stream << "C:#{number}\r\n"
end
elsif command =~ /^!g\W$/
@output_stream << "G\r\n"
else
@output_stream << "NACK\r\n"
end
end
def close
end
def flush
end
def get_parameter(command)
command.gsub(/.*:(.*)\r\n/,'\1')
end
end
|
C
|
UTF-8
| 576 | 3.765625 | 4 |
[] |
no_license
|
#include "holberton.h"
#include <stdio.h>
#include <stdlib.h>
/**
*str_concat - concatenates two strings.
*@s1 : char
*@s2 : char
*Return: char
*/
char *str_concat(char *s1, char *s2)
{
char *array;
unsigned int i = 0, j = 0, k, l = 0, x = 0;
if (s1 == NULL)
s1 = "";
if (s2 == NULL)
s2 = "";
while (s1[j] != '\0')
j++;
while (s2[i] != '\0')
i++;
array = malloc((i + j + 1) * sizeof(char));
if (array == NULL)
return (NULL);
for (k = 0; k <= j; k++)
{
array[k] = s1[k];
}
for (l = j; l <= i + j; l++)
{
array[l] = s2[x];
x++;
}
array[l] = '\0';
return (array);
}
|
Markdown
|
UTF-8
| 1,118 | 2.953125 | 3 |
[] |
no_license
|
Single Page Portfolio Website
====================
This repository is a replica of my portfolio website. My current portfolio website is based off of Wordpress, and eventually would like to have it hosted without Wordpress. This Single Page website example is something I will be considering when taking my website off of Wordpress, due to its simplicity.
Using Jade and SASS (compiling to HTML and CSS), Gulp, and Javascript, I have created a near duplicate of my website.
Instructions on running this file
=====================
1. Download, and unzip to desktop
2. Open Terminal (or any program in which you will be able to run a local server)
3. Type cd Desktop (this will bring you to your Desktop)
4. Type cd singlepage-portfolio (this will bring you into the folder)
5. Run npm install (you will be required to type your password, password will not appear by deafult)
6. If you get errors, run sudo npm install
7. Once node is installed, run node server.js
8. Your server should not be running! Navigate to localhost:3000 in any browser
Thank you for checking this out!
=================================
|
Java
|
UTF-8
| 1,139 | 2.515625 | 3 |
[] |
no_license
|
package com.example.demo.web.api;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.springframework.security.access.prepost.PreAuthorize;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import java.util.Arrays;
import java.util.List;
@RestController
@RequestMapping("/city")
public class CityController {
@PreAuthorize("hasRole('ROLE_ADMIN')")
@GetMapping
public List<City> getCities(){
City moscow = new City(1, "Moscow");
City stpetersburg = new City(2, "St. Petersburg");
City belgorod = new City(2, "Belgorod");
return Arrays.asList(moscow,stpetersburg,belgorod);
}
@PreAuthorize("permitAll()")
@GetMapping("/default")
public City getDefaultCity(){
City city = new City(1,"Default city");
return city;
}
@Data
@NoArgsConstructor
@AllArgsConstructor
class City {
private Integer id;
private String name;
}
}
|
JavaScript
|
UTF-8
| 2,483 | 3 | 3 |
[] |
no_license
|
class PlayerController {
constructor(x, y, spriteName, animateSprite, bullets, configs) {
// Player
this.positionX = x;
this.positionY = y;
this.sprite = Nakama.game.add.sprite(x, y, 'assets', spriteName);
this.sprite.anchor.setTo(0.5, 0.5);
Nakama.game.physics.enable(this.sprite, Phaser.Physics.ARCADE);
this.sprite.body.collideWorldBounds = true;
// Configs
this.configs = configs;
// Rocket
this.spriteRocket = Nakama.game.add.sprite(x, y, animateSprite);
this.spriteRocket.animations.add('run');
this.spriteRocket.animations.play('run', this.configs.rocketSpeed, true);
this.spriteRocket.anchor = new Phaser.Point(0.5, -0.5);
// And some controls to play the game with
this.fireButton = this.configs.fireButton;
this.cursors = this.configs.cursors;
// Time
this.bulletTime = 0;
// Bullets
this.bullets = bullets;
// Audio
this.audioShoot = Nakama.game.add.audio(this.configs.audioShoot);
}
update() {
// Reset the player, then check for movement keys
this.sprite.body.velocity.setTo(0, 0);
if (this.cursors.left.isDown)
{
this.sprite.body.velocity.x = -this.configs.playerSpeed;
}
else if (this.cursors.right.isDown)
{
this.sprite.body.velocity.x = this.configs.playerSpeed;
}
if (this.cursors.up.isDown)
{
this.sprite.body.velocity.y = -this.configs.playerSpeed;
}
else if (this.cursors.down.isDown)
{
this.sprite.body.velocity.y = this.configs.playerSpeed;
}
// Update position player's rocket
this.spriteRocket.position = this.sprite.position;
// Firing?
if (this.fireButton.isDown)
{
this.fireBullet(this.bullets);
}
}
fireBullet(bullets) {
// To avoid them being allowed to fire too fast we set a time limit
if (Nakama.game.time.now > this.bulletTime)
{
// Grab the first bullet we can from the pool
var bullet = bullets.getFirstExists(false);
if (bullet)
{
// Play audio shooter
this.audioShoot.play();
// And fire it
bullet.reset(this.sprite.x, this.sprite.y + 8);
bullet.body.velocity.y = -400;
this.bulletTime = Nakama.game.time.now + 200;
}
}
}
reset() {
this.sprite.position.x = this.positionX;
this.sprite.position.y = this.positionY;
this.bulletTime = 0;
this.sprite.revive();
this.spriteRocket.revive();
}
}
|
Java
|
UTF-8
| 858 | 1.960938 | 2 |
[] |
no_license
|
package ru.yandex.tests;
import org.junit.Test;
import ru.yandex.core.UserInfo;
import ru.yandex.pages.MainPage;
import ru.yandex.qatools.allure.annotations.Title;
import static ru.yandex.core.UserAssistant.getTestUser;
/**
* Created by kai on 07.02.2017.
*/
@Title("Тесты на функциональность passport.yandex")
public class PassportTest extends TestBase {
@Title("Проверка Логина на странице passport. Переход на passport через почту")
@Test
public void CheckPassportInfo(){
UserInfo user = getTestUser();
new MainPage().
openMainPage().
authUser(user).
clickMailProfile().
clickProfilePassport().
checkPassportInfo(user).
logoutUserFromPassport(user);
}
}
|
PHP
|
UTF-8
| 964 | 2.59375 | 3 |
[] |
no_license
|
<?php
namespace masterix21\FatturaElettronica\Models\Body\DatiBeniServizi;
use masterix21\FatturaElettronica\Base\Model;
/**
* Class DatiRiepilogo
* @property float AliquotaIVA
* @property string Natura
* @property float SpeseAccessorie
* @property float Arrotondamento
* @property float ImponibileImporto
* @property float Imposta
* @property string EsigibilitaIVA
* @property string RiferimentoNormativo
* @package masterix21\FatturaElettronica\Models\Body\DatiBeniServizi\DettaglioLinee
*/
class DatiRiepilogo extends Model
{
protected $properties = [
'AliquotaIVA',
'Natura',
'SpeseAccessorie',
'Arrotondamento',
'ImponibileImporto',
'Imposta',
'EsigibilitaIVA',
'RiferimentoNormativo'
];
protected $casts = [
'AliquotaIVA' => 'float',
'SpeseAccessorie' => 'float',
'Arrotondamento' => 'float',
'ImponibileImporto' => 'float',
'Imposta' => 'float',
];
public function __construct() {
parent::__construct();
}
}
|
PHP
|
UTF-8
| 718 | 2.875 | 3 |
[
"MIT"
] |
permissive
|
<?php
/**
* Created by PhpStorm.
* User: Igorut
* Date: 01.10.2017
* Time: 21:47
*/
namespace App\Buttons;
trait ResponseButton
{
public function checkGetResponse(): array
{
/**
* If the get button is pressed, will starts function getPersonal,
* that will return an array of staff
* @return array
*/
if (isset($_POST['get_staff']) && !empty($_POST['number_of_staff'])) {
echo 'Выведено: ' . $_POST['number_of_staff'];
return $this->getPersonal($_POST['number_of_staff']);
}
echo 'Введите сколько сотрудников необходимо вывести. <br>';
return [];
}
}
|
Markdown
|
UTF-8
| 2,520 | 2.84375 | 3 |
[] |
no_license
|
ECE281_Lab4
===========
##Lab4
#### ALU Simulation pictures
###### first 4 operations simulated

###### second 4 operations simulated

#### ALU Sim analysis
In order to make sure that my program worked, I went through and made sure the result was correct for each of the 8 functions. The top row of boxes is the function. The second row of boxes is the Data input. The third row of boxes is the Accumulator input. The final row of boxes is the Result output. Function "000" is AND. The inputs were correctly anded together for that function. Function "001" is NEG. The inputs were correctly put into 2's complement for that function. Function "010" is NOT. The inputs were correctly notted for that function. Function "011" is ROR. The inputs were correctly rotated right one space for that function. Function "100" is OR. The inputs were correctly ored together for that function. Function "101" is IN. The input was correctly stored into the result for that function. Function "110" is ADD. The inputs were correctly added together for this function. Function "111" is LD. All inputs were correctly loaded into the result for this function. After determining that my simulation was correct, it was determined that my program was correct.
#### ALU Sim debugging
The only real problem I had with the ALU sim portion of the lab was the ROR function. I could not figure out how to rotate right for the life of me. So I googled bit rotation and found out there was a built in rotate_right function in the program that I could use. After that, my program was good.
#### Datapath Simulation pictures

#### Datapath Debugging
Was able to do this part of the lab fairly easily. The biggest problem was that the first time I ran through the simulation, my AeqZero and AlessZero were 'U" instead of '1' or '0'. I found out that the problem was in the instantiation of my ALU in the datapath program. I had it hooked up to the incorrect wires. Once I fixed the wiring, my simulation worked correctly, showing that the program I wrote was correct. I confirmed that my simulation was correct by comparing it piece by piece to the picture provided in the lab.
|
Java
|
UTF-8
| 2,304 | 2.15625 | 2 |
[] |
no_license
|
package com.psulccomsci.civilservicereviewer;
import android.content.pm.ActivityInfo;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.database.sqlite.SQLiteOpenHelper;
import android.os.Bundle;
import android.widget.RadioGroup;
import android.widget.TextView;
import android.widget.Toast;
import androidx.appcompat.app.AppCompatActivity;
import java.util.ArrayList;
public class scoreMain extends AppCompatActivity{
SQLiteDatabase db;
ScoreDBHelper scoreDBHelper;
Cursor scoreCursor;
String score, com;
int i=1;
private static String[] list = new String[30];
private static ArrayList<String> data = new ArrayList<String>(5);
private static ArrayList<String> listscore = new ArrayList<String>(5);
private static ArrayList<String> compare = new ArrayList<String>(5);
private static ArrayList<Integer> selectedList = new ArrayList<Integer>(5);
@Override
protected void onCreate(Bundle savedInstanceState) {
setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_LOCKED);
super.onCreate(savedInstanceState);
scoreDBHelper = new ScoreDBHelper(this);
db = scoreDBHelper.getWritableDatabase();
scoreCursor = db.rawQuery(" SELECT "+ scoreDBHelper.col_2+" FROM "+ scoreDBHelper.table_scores + " where " +scoreDBHelper.scoreID
+" = "+i,null);
scoreCursor.moveToFirst();
score = scoreCursor.getString(0);
for (i=1;i<7;i++){
scoreCursor = db.rawQuery(" SELECT "+ scoreDBHelper.col_2+" FROM "+ scoreDBHelper.table_scores + " where " +scoreDBHelper.scoreID
+" = "+i,null);
scoreCursor.moveToFirst();
score = scoreCursor.getString(0);
listscore.add(score);
}
}
public static ArrayList<String> listscore()
{
return listscore;
}
public static ArrayList<String> list()
{
return data;
}
public static ArrayList<String> result()
{
return compare;
}
@Override
public void onBackPressed() {
Toast.makeText(getApplicationContext(),"Action not allowed",Toast.LENGTH_SHORT).show();
}
public static ArrayList<Integer> selectedlist(){
return selectedList;
}
}
|
Java
|
WINDOWS-1252
| 1,187 | 2.34375 | 2 |
[] |
no_license
|
package controllers;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import javax.swing.JFrame;
import javax.swing.JOptionPane;
import view.Loginwindow;
import view.teacherwindow;
public class teacherLogin {
public teacherLogin(String user,String passwd) throws ClassNotFoundException, SQLException {
JFrame frame = new JFrame();
Class.forName("com.mysql.jdbc.Driver");
String url="jdbc:mysql://182.61.5.233/choose_course?user=root&password=6369&characterEncoding=utf-8";
Connection con =DriverManager.getConnection(url);
String sql = "select tea_user,tea_passwd from teacher where tea_user=? and tea_passwd=?";
PreparedStatement ptmt = null;
ptmt = con.prepareStatement(sql);
ptmt.setString(1, user);
ptmt.setString(2, passwd);
ResultSet rs = null;
rs = ptmt.executeQuery();
if (rs.next()) {
new teacherwindow(user);
frame.dispose();
}
else {
JOptionPane.showMessageDialog(frame,"\nµ¼");
new Loginwindow();
}
}
}
|
PHP
|
UTF-8
| 1,027 | 2.640625 | 3 |
[] |
no_license
|
<?php
session_start();
class Exceptions extends Exception {
public function __construct($exception) {
parent::__construct($exception);
}
}
class DB {
private $connect;
private $select;
function __construct() {
$this->con = $connect;
$this->sel = $select;
}
function connection_database() {
define('DB_HOST', 'localhost');
define('DB_USER', 'root'); // 'root', 'pr20'
define('DB_PASS', ''); // '', 'pr20'
define('DB_NAME', 'pr20'); // 'pr20', 'db_users'
try {
$this->con = mysql_connect(DB_HOST, DB_USER, DB_PASS);
if (!$this->con) throw new Exceptions('Server error.');
$this->sel = mysql_select_db(DB_NAME);
mysql_query("SET NAMES `CP1251`");
if (!$this->sel) throw new Exceptions('Server error.');
} catch (Exceptions $ex) {
$str = "<STYLE>.error { color: #FF0000; }</STYLE>";
$str .= $ex->getMessage();
$_SESSION['conn'] = $str;
header("Location: ../form.php");
}
}
}
$db = new DB();
$db->connection_database();
?>
|
C++
|
UTF-8
| 1,284 | 3.171875 | 3 |
[] |
no_license
|
#include<bits/stdc++.h>
using namespace std;
#define hungry 100
#define eating 200
#define thinking 300
#define wait 400
#define signal 500
enum {philosopher1,philosopher2,philosopher3,philosopher4,philosopher5};
class monitor
{
int state[5];
int self[5];
public:
Pickup(int i)
{
state[i] = hungry;
cout<<"philosopher "<<i<<" is hungry "<<endl;
test(i);
if (state[i] != eating)
self[i]=wait;
}
Putdown(int i)
{
state[i] = thinking;
test((i + 1) % 5);
test((i + 4) % 5);
}
test(int i)
{
if (state[(i + 1) % 5] != eating
&& state[(i + 4) % 5] != eating
&& state[i] == hungry)
{
cout<<" philosopher "<<(i+1)%5<<" and philosopher "<<(i+4)%5<<" is not eating"<<endl;
state[i] = eating;
cout<<"philosopher "<<i<<" is eating"<<endl;
self[i]=signal;
}
}
init()
{
for(int i=0; i<=4; i++)
{
cout<<"philosopher "<<i<<" is thinking "<<endl;
state[i] = thinking;
}
}
};
int main()
{
monitor m;
m.init();
m.Pickup(philosopher1);
return 0;
}
|
C#
|
UTF-8
| 520 | 3.828125 | 4 |
[] |
no_license
|
using System;
class IsPrime
{
static void Main()
{
Console.Write("Enter value for n : ");
int n = int.Parse(Console.ReadLine());
bool isPrime = true;
int divider = 2;
int maxdivider = (int)Math.Sqrt(divider);
while (isPrime && (divider <= maxdivider))
{
if (n % divider == 0)
{
isPrime = false;
}
divider++;
}
Console.WriteLine("Is {0} primer? : {1}",n,isPrime);
}
}
|
Python
|
UTF-8
| 1,779 | 4.59375 | 5 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 2 13:29:01 2021
Class on Strings
@author: Maxi
"""
# Starting with strings
hi = "Hello there"
greetings = "hello"
name = "Eric"
print(hi+" "+name)
# Print string Example
x = 1
print(x)
x_str = str(x)
print("my fav num is", x, ".", "x =", x)
print("my fav num is " + x_str + ". " + "x = " + x_str) # removes space between each element.
# Operations with strings - concatenate strings
"ab"+"cd" # concatenate
3*"eric" # sucesive concatenation
len("eric") # length
"eric"[1] # indexing
"eric"[1:3] # slicing
str1 = 'hello'
str2 = ','
str3 = 'world'
str4 = str1 + str3
str4[:-1]
str4[::-1]
#The three numbers in a string slice are
#Start (inclusive) - defaults to start of string
#End (exclusive) - defaults to end of string
#Step - defaults to 1
#When you see [ : : -1 ], the empty spaces mean that the default values are used. So the slice here is the whole string, start to end, with a step of -1, so it counts backwards.
#"helloworld" becomes "dlrowolleh".
# input/output
text = input("Type anything... ")
print(5*text)
num = int(input("Type a number... "))
print(5*num)
# Exercises from strings
if 6 > 7:
print("Yep")
#------
if 6 > 7:
print("Yep")
else:
print("Nope")
#----
var = 'Panda'
if var == "panda":
print("Cute!")
elif var == "Panda":
print("Regal!")
else:
print("Ugly...")
#
temp = 120
if temp > 85:
print("Hot")
elif temp > 100:
print("REALLY HOT!")
elif temp > 60:
print("Comfortable")
else:
print("Cold")
#
temp = 50
if temp > 85:
print("Hot")
elif temp > 100:
print("REALLY HOT!")
elif temp > 60:
print("Comfortable")
else:
print("Cold")
|
Java
|
UTF-8
| 2,706 | 2.78125 | 3 |
[] |
no_license
|
package game;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.sql.Connection;
import javax.swing.JButton;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JPasswordField;
import javax.swing.JTextField;
import com.DBConnection;
public class Main implements ActionListener {
private static JLabel userLabel;
private static JLabel passwordLabel;
private static JTextField userText;
private static JPasswordField passwordText;
private static JButton button;
private static String username = "Empty";
private static String password;
private static JFrame login;
private static ScoreDAO s;
public static void main(String[] args) throws Exception {
final String url = "game-database.c5vzyypaaxx4.us-east-1.rds.amazonaws.com";
final String user = "admin";
final String password = "Nintendo9182!#";
Connection conn = null;
try {
conn = DBConnection.getConnection(url, user, password);
System.out.println("Connection Works");
}
catch(Exception e) {
e.printStackTrace();
System.out.println("Connection Failed");
}
s =new ScoreDAO(conn);
login= new JFrame();
JPanel panel = new JPanel();
login.add(panel);
panel.setLayout(null);
userLabel = new JLabel("Username");
userLabel.setBounds(10,20,80,25);
panel.add(userLabel);
userText = new JTextField(20);
userText.setBounds(100, 20, 165, 25);
panel.add(userText);
passwordLabel = new JLabel("Password");
passwordLabel.setBounds(10,50,80,25);
panel.add(passwordLabel);
passwordText = new JPasswordField();
passwordText.setBounds(100, 50, 165, 25);
panel.add(passwordText);
button = new JButton("Login");
button.setBounds(10,80,80,25);
button.addActionListener(new Main());
panel.add(button);
login.setBounds(10,10,710,610);
login.setTitle("Login Page");
login.setResizable(true);
login.setVisible(true);
login.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
}
@Override
public void actionPerformed(ActionEvent arg0) {
username = userText.getText();
password = passwordText.getText();
try {
s.select(username, password);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
if(username != "Empty")
{
login.setVisible(false);
JFrame obj = new JFrame();
Gameplay game = new Gameplay();
obj.setBounds(10,10,710,610);
obj.setTitle("Break Ball");
obj.setResizable(true);
obj.setVisible(true);
obj.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
obj.add(game);
}
}
}
|
C++
|
UTF-8
| 376 | 2.890625 | 3 |
[] |
no_license
|
#include "Student.h"
#define FILE_OUT_NAME "out.XML"
int main(int argc, char const* argv[])
{
std::ofstream fileout;
fileout.open(FILE_OUT_NAME);
Student a, b;
std::cout << "Enter Student A:\n";
a.input();
std::cout << "Enter Student B:\n";
b.input();
if (a.hasGreaterMark(b))
fileout << a;
else
fileout << b;
fileout.close();
return 0;
}
|
Python
|
UTF-8
| 107,867 | 2.578125 | 3 |
[] |
no_license
|
'''PredNet in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Conv2d(nn.Module):
def __init__(self, inchan, outchan, sample=False):
super().__init__()
self.kernel_size = 3
self.weights = nn.init.xavier_normal(torch.Tensor(outchan,inchan,self.kernel_size,self.kernel_size))
self.weights = nn.Parameter(self.weights, requires_grad=True)
self.sample = sample
if self.sample:
self.Downsample = nn.MaxPool2d(kernel_size=2, stride=2)
self.Upsample = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, x, feedforward=True):
if feedforward:
x = F.conv2d(x, self.weights, stride=1, padding=1)
if self.sample:
x = self.Downsample(x)
else:
if self.sample:
x = self.Upsample(x)
x = F.conv_transpose2d(x, self.weights, stride=1, padding=1)
return x
class PredictiveTied(nn.Module):
def __init__(self, num_classes=10, cls=3,layer_sel =0, record = False):
super().__init__()
ics = [3, 64, 64, 128, 128, 256, 256, 256] # input chanels
ocs = [64, 64, 128, 128, 256, 256, 256, 256] # output chanels
sps = [False, False, True, False, True, False, False, False] # downsample flag
self.cls = cls # circles means number of additional inputs
self.nlays = len(ics)
self.record = record
# Feedforward layers
self.conv = nn.ModuleList([Conv2d(ics[i],ocs[i],sample=sps[i]) for i in range(self.nlays)])
# Update rate
#self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.5) for i in range(1,self.nlays)])
#self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+1.0) for i in range(self.nlays)])
self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.1) for i in range(1,self.nlays)])
self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+0.1) for i in range(self.nlays)])
# Linear layer
self.linear = nn.Linear(ocs[-1], num_classes)
self.layer_sel = layer_sel
def forward(self, x):
# Feedforward
xr = [F.relu(self.conv[0](x))]
state_list=[]
error_list=[]
if self.layer_sel < 1:
state_list.append(xr[self.layer_sel])
P = [None for i in range(self.nlays)]
if self.cls <= self.nlays:
#stage 1
#t = 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
#after t>1
for t in range(2,self.cls):
for i in range(t-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
xr.append(F.relu(self.conv[t](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](xr[t-2]-P[t-2])*b0)
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](xr[i-1]-P[i-1])*b0) + a0*P[i])
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t-self.cls, t):
# print('len:',len(P))
# print(i)
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
for i in range(t-self.cls+1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](xr[i-1]-P[i-1])*b0) + a0*P[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](xr[t-2]-P[t-2])*b0)
if self.layer_sel < t+1 and self.layer_sel > t-self.cls:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t and self.layer_sel > t-self.cls-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.cls-1):
for i in range(self.nlays-self.cls+t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
for i in range(self.nlays-self.cls+2+t,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](xr[i-1]-P[i-1])*b0) + a0*P[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](xr[-2]-P[-2])*b0)
if self.layer_sel > self.nlays-self.cls+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel > self.nlays -self.cls:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
else:
#stage 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
for t in range(2,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](xr[t-2]-P[t-2])*b0)
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](xr[i-1]-P[i-1])*b0) + a0*P[i])
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls-self.nlays):
for i in range(self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](xr[-2]-P[-2])*b0)
for i in range(1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](xr[i-1]-P[i-1])*b0) + a0*P[i])
state_list.append(xr[self.layer_sel])
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.nlays-1):
for i in range(t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
for i in range(t+1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](xr[i-1]-P[i-1])*b0) + a0*P[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](xr[-2]-P[-2])*b0)
if self.layer_sel > t:
state_list.append(xr[self.layer_sel])
if self.layer_sel > t-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
# classifier
out = F.avg_pool2d(xr[-1], xr[-1].size(-1))
out = out.view(out.size(0), -1)
#torch.nn.Dropout()
out = self.linear(out)
if self.record:
return out, error_list
else:
return out#, state_list
class PredictiveTied2(nn.Module):
def __init__(self, num_classes=10, cls=3,layer_sel =0, record = False):
super().__init__()
ics = [3, 64, 64, 128, 128, 256, 256, 256] # input chanels
ocs = [64, 64, 128, 128, 256, 256, 256, 256] # output chanels
sps = [False, False, True, False, True, False, False, False] # downsample flag
self.cls = cls # circles means number of additional inputs
self.nlays = len(ics)
self.record = record
# Feedforward layers
self.conv = nn.ModuleList([Conv2d(ics[i],ocs[i],sample=sps[i]) for i in range(self.nlays)])
# Update rate
#self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.5) for i in range(1,self.nlays)])
#self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+1.0) for i in range(self.nlays)])
self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.1) for i in range(1,self.nlays)])
self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+0.1) for i in range(self.nlays)])
# Linear layer
self.linear = nn.Linear(ocs[-1], num_classes)
self.layer_sel = layer_sel
def forward(self, x):
# Feedforward
xr = [F.relu(self.conv[0](x))]
state_list=[]
error_list=[]
if self.layer_sel < 1:
state_list.append(xr[self.layer_sel])
P = [None for i in range(self.nlays)]
if self.cls <= self.nlays:
#stage 1
#t = 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
#after t>1
for t in range(2,self.cls):
for i in range(t-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
xr.append(F.relu(self.conv[t](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t-self.cls, t):
# print('len:',len(P))
# print(i)
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t-self.cls]
for i in range(t-self.cls+1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
if self.layer_sel < t+1 and self.layer_sel > t-self.cls:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t and self.layer_sel > t-self.cls-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.cls-1):
for i in range(self.nlays-self.cls+t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[self.nlays-self.cls+1]
for i in range(self.nlays-self.cls+2+t,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
if self.layer_sel > self.nlays-self.cls+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel > self.nlays -self.cls:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
else:
#stage 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
for t in range(2,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls-self.nlays):
for i in range(self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
for i in range(1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
state_list.append(xr[self.layer_sel])
if self.layer_sel < self.nlays - 1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.nlays-1):
for i in range(t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t]
for i in range(t+1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
if self.layer_sel > t:
state_list.append(xr[self.layer_sel])
if self.layer_sel > t-1 and self.layer_sel < self.nlays-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
# classifier
out = F.avg_pool2d(xr[-1], xr[-1].size(-1))
out = out.view(out.size(0), -1)
#torch.nn.Dropout()
out = self.linear(out)
if self.record:
return out, state_list
else:
return out#, state_list
class PredictiveTied_penalty(nn.Module):
def __init__(self, num_classes=10, cls=3,layer_sel =0, record = False):
super().__init__()
ics = [3, 64, 64, 128, 128, 256, 256, 256] # input chanels
ocs = [64, 64, 128, 128, 256, 256, 256, 256] # output chanels
sps = [False, False, True, False, True, False, False, False] # downsample flag
self.cls = cls # circles means number of additional inputs
self.nlays = len(ics)
self.record = record
# Feedforward layers
self.conv = nn.ModuleList([Conv2d(ics[i],ocs[i],sample=sps[i]) for i in range(self.nlays)])
# Update rate
#self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.5) for i in range(1,self.nlays)])
#self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+1.0) for i in range(self.nlays)])
self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.01) for i in range(1,self.nlays)])
self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+0.01) for i in range(self.nlays)])
self.c0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+0.01) for i in range(self.nlays)])
# Linear layer
self.linear = nn.Linear(ocs[-1], num_classes)
self.layer_sel = layer_sel
def forward(self, x):
# Feedforward
xr = [F.relu(self.conv[0](x))]
state_list=[]
error_list=[]
if self.layer_sel < 1:
state_list.append(xr[self.layer_sel])
P = [None for i in range(self.nlays)]
if self.cls <= self.nlays:
#stage 1
#t = 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
c0 = F.relu(self.c0[0]).expand_as(xr[0])
xr[0] = F.relu((1-c0)*xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
#after t>1
for t in range(2,self.cls):
for i in range(t-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
xr.append(F.relu(self.conv[t](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
c0 = F.relu(self.c0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0] -c0*xr[0])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
c0 = F.relu(self.c0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i] -c0*xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
c0 = F.relu(self.c0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu((1-c0)*xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t-self.cls, t):
# print('len:',len(P))
# print(i)
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t-self.cls]
for i in range(t-self.cls+1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
c0 = F.relu(self.c0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]-c0*xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
c0 = F.relu(self.c0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu((1-c0)*xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
if self.layer_sel < t+1 and self.layer_sel > t-self.cls:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t and self.layer_sel > t-self.cls-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.cls-1):
for i in range(self.nlays-self.cls+t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[self.nlays-self.cls+1]
for i in range(self.nlays-self.cls+2+t,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
c0 = F.relu(self.c0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]-c0*xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
c0 = F.relu(self.c0[-1]).expand_as(xr[-1])
xr[-1] = F.relu((1-c0)*xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
if self.layer_sel > self.nlays-self.cls+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel > self.nlays -self.cls:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
else:
#stage 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
for t in range(2,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
c0 = F.relu(self.c0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0]-c0*xr[0])
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
c0 = F.relu(self.c0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]-c0*xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
c0 = F.relu(self.c0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu((1-c0)*xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls-self.nlays):
for i in range(self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
for i in range(1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
c0 = F.relu(self.c0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]-c0*xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
c0 = F.relu(self.c0[-1]).expand_as(xr[-1])
xr[-1] = F.relu((1-c0)*xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
state_list.append(xr[self.layer_sel])
if self.layer_sel < self.nlays - 1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.nlays-1):
for i in range(t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t]
for i in range(t+1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
c0 = F.relu(self.c0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]-c0*xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
c0 = F.relu(self.c0[-1]).expand_as(xr[-1])
xr[-1] = F.relu((1-c0)*xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
if self.layer_sel > t:
state_list.append(xr[self.layer_sel])
if self.layer_sel > t-1 and self.layer_sel < self.nlays-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
# classifier
out = F.avg_pool2d(xr[-1], xr[-1].size(-1))
out = out.view(out.size(0), -1)
#torch.nn.Dropout()
out = self.linear(out)
if self.record:
return out, state_list
else:
return out#, state_list
class PredictiveTied3(nn.Module):
def __init__(self, num_classes=10, cls=3,layer_sel =0, record = False):
super().__init__()
ics = [3, 64, 64, 128, 128, 256, 256, 256] # input chanels
ocs = [64, 64, 128, 128, 256, 256, 256, 256] # output chanels
sps = [False, False, True, False, True, False, False, False] # downsample flag
self.cls = cls # circles means number of additional inputs
self.nlays = len(ics)
self.record = record
# Feedforward layers
self.conv = nn.ModuleList([Conv2d(ics[i],ocs[i],sample=sps[i]) for i in range(self.nlays)])
# Update rate
#self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.5) for i in range(1,self.nlays)])
#self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+1.0) for i in range(self.nlays)])
self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,1,1,1)+0.1) for i in range(1,self.nlays)])
self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,1,1,1)+0.1) for i in range(self.nlays)])
# Linear layer
self.linear = nn.Linear(ocs[-1], num_classes)
self.layer_sel = layer_sel
def forward(self, x):
# Feedforward
xr = [F.relu(self.conv[0](x))]
state_list=[]
error_list=[]
if self.layer_sel < 1:
state_list.append(xr[self.layer_sel])
P = [None for i in range(self.nlays)]
if self.cls <= self.nlays:
#stage 1
#t = 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
#after t>1
for t in range(2,self.cls):
for i in range(t-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
xr.append(F.relu(self.conv[t](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = (1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0]
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = (1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = xr[t-1]+self.conv[t-1](old-P[t-2])*b0
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t-self.cls, t):
# print('len:',len(P))
# print(i)
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t-self.cls]
for i in range(t-self.cls+1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = (1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = xr[t-1]+self.conv[t-1](old-P[t-2])*b0
if self.layer_sel < t+1 and self.layer_sel > t-self.cls:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t and self.layer_sel > t-self.cls-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.cls-1):
for i in range(self.nlays-self.cls+t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[self.nlays-self.cls+t+1]
for i in range(self.nlays-self.cls+2+t,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = (1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0
if self.layer_sel > self.nlays-self.cls+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel > self.nlays -self.cls:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
else:
#stage 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
for t in range(2,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = (1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0]
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = (1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = xr[t-1]+self.conv[t-1](old-P[t-2])*b0
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls-self.nlays):
for i in range(self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = (1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0]
for i in range(1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = (1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0
state_list.append(xr[self.layer_sel])
if self.layer_sel < self.nlays - 1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.nlays-1):
for i in range(t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t]
for i in range(t+1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = (1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0
if self.layer_sel > t:
state_list.append(xr[self.layer_sel])
if self.layer_sel > t-1 and self.layer_sel < self.nlays-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
# classifier
out = F.avg_pool2d(xr[-1], xr[-1].size(-1))
out = out.view(out.size(0), -1)
#torch.nn.Dropout()
out = self.linear(out)
if self.record:
return out, error_list
else:
return out#, state_list
class PredictiveTiedLayerNorm(nn.Module):
def __init__(self, num_classes=10, cls=3,layer_sel =0, record = False, affine = True, a = 0.5, b = 0.5):
super().__init__()
ics = [3, 64, 64, 128, 128, 256, 256, 256] # input chanels
ocs = [64, 64, 128, 128, 256, 256, 256, 256] # output chanels
sps = [False, False, True, False, True, False, False, False] # downsample flag
self.cls = cls # circles means number of additional inputs
self.nlays = len(ics)
self.record = record
self.affine = affine
self.a = a
self.b = b
# Feedforward layers
self.conv = nn.ModuleList([Conv2d(ics[i],ocs[i],sample=sps[i]) for i in range(self.nlays)])
# Update rate
#self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.5) for i in range(1,self.nlays)])
#self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+1.0) for i in range(self.nlays)])
self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+self.a) for i in range(1,self.nlays)])
self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+self.b) for i in range(self.nlays)])
# Linear layer
self.linear = nn.Linear(ocs[-1], num_classes)
self.layer_sel = layer_sel
def forward(self, x):
# Feedforward
xr = [F.relu(self.conv[0](x))]
state_list=[]
error_list=[]
if self.layer_sel < 1:
state_list.append(xr[self.layer_sel])
P = [None for i in range(self.nlays)]
if self.cls == 1:
for i in range(1,self.nlays):
xr.append(F.relu(self.conv[i](xr[-1])))
out = F.avg_pool2d(xr[-1], xr[-1].size(-1))
out = out.view(out.size(0), -1)
#torch.nn.Dropout()
out = self.linear(out)
return out#, state_list
if self.cls <= self.nlays:
#stage 1
#t = 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
# print(xr[0].size())
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
# print(xr[0].size())
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
#after t>1
for t in range(2,self.cls):
for i in range(t-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
xr.append(F.relu(self.conv[t](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
# print(xr[0].size())
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] =m(xr[0])
# print(xr[0].size())
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
m = nn.LayerNorm(xr[t-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[t-1] = m(xr[t-1])
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t-self.cls, t):
# print('len:',len(P))
# print(i)
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t-self.cls]
for i in range(t-self.cls+1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
m = nn.LayerNorm(xr[t-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[t-1] = m(xr[t-1])
if self.layer_sel < t+1 and self.layer_sel > t-self.cls:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t and self.layer_sel > t-self.cls-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.cls-1):
for i in range(self.nlays-self.cls+t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[self.nlays-self.cls+1+t]
for i in range(self.nlays-self.cls+2+t,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
# print(xr[i].size())
# print(tmp.size())
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
if self.layer_sel > self.nlays-self.cls+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel > self.nlays -self.cls:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
else:
#stage 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
for t in range(2,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
m = nn.LayerNorm(xr[t-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[t-1] = m(xr[t-1])
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls-self.nlays):
for i in range(self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
for i in range(1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
state_list.append(xr[self.layer_sel])
if self.layer_sel < self.nlays - 1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.nlays-1):
for i in range(t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t]
for i in range(t+1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
if self.layer_sel > t:
state_list.append(xr[self.layer_sel])
if self.layer_sel > t-1 and self.layer_sel < self.nlays-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
# classifier
out = F.avg_pool2d(xr[-1], xr[-1].size(-1))
out = out.view(out.size(0), -1)
#torch.nn.Dropout()
out = self.linear(out)
if self.record:
return out, state_list
else:
return out#, state_list
class PredictiveTiedInstanceNorm(nn.Module):
def __init__(self, num_classes=10, cls=3,layer_sel =0, record = False, affine = False):
super().__init__()
ics = [3, 64, 64, 128, 128, 256, 256, 256] # input chanels
ocs = [64, 64, 128, 128, 256, 256, 256, 256] # output chanels
sps = [False, False, True, False, True, False, False, False] # downsample flag
self.cls = cls # circles means number of additional inputs
self.nlays = len(ics)
self.record = record
self.affine = affine
# Feedforward layers
self.conv = nn.ModuleList([Conv2d(ics[i],ocs[i],sample=sps[i]) for i in range(self.nlays)])
# Update rate
#self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.5) for i in range(1,self.nlays)])
#self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+1.0) for i in range(self.nlays)])
self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.1) for i in range(1,self.nlays)])
self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+0.1) for i in range(self.nlays)])
# Linear layer
self.linear = nn.Linear(ocs[-1], num_classes)
self.layer_sel = layer_sel
def forward(self, x):
# Feedforward
xr = [F.relu(self.conv[0](x))]
m = nn.InstanceNorm2d(xr[0].size()[1], affine=True).cuda()
xr[0] = m(xr[0])
state_list=[]
error_list=[]
if self.layer_sel < 1:
state_list.append(xr[self.layer_sel])
P = [None for i in range(self.nlays)]
if self.cls <= self.nlays:
#stage 1
#t = 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
m = nn.InstanceNorm2d(xr[0].size()[1], affine=True).cuda()
xr[0] = m(xr[0])
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
#after t>1
for t in range(2,self.cls):
for i in range(t-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
xr.append(F.relu(self.conv[t](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
m = nn.InstanceNorm2d(xr[0].size()[1], affine=True).cuda()
xr[0] = m(xr[0])
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.InstanceNorm2d(xr[i].size()[1], affine=True).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
m = nn.InstanceNorm2d(xr[t-1].size()[1], affine=True).cuda()
xr[t-1] = m(xr[t-1])
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t-self.cls, t):
# print('len:',len(P))
# print(i)
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t-self.cls]
for i in range(t-self.cls+1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.InstanceNorm2d(xr[i].size()[1], affine=True).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
m = nn.InstanceNorm2d(xr[t-1].size()[1], affine=True).cuda()
xr[t-1] = m(xr[t-1])
if self.layer_sel < t+1 and self.layer_sel > t-self.cls:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t and self.layer_sel > t-self.cls-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.cls-1):
for i in range(self.nlays-self.cls+t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[self.nlays-self.cls+1]
for i in range(self.nlays-self.cls+2+t,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.InstanceNorm2d(xr[i].size()[1], affine=True).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
m = nn.InstanceNorm2d(xr[-1].size()[1], affine=True).cuda()
xr[-1] = m(xr[-1])
if self.layer_sel > self.nlays-self.cls+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel > self.nlays -self.cls:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
else:
#stage 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
for t in range(2,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls-self.nlays):
for i in range(self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
for i in range(1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
state_list.append(xr[self.layer_sel])
if self.layer_sel < self.nlays - 1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.nlays-1):
for i in range(t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t]
for i in range(t+1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
if self.layer_sel > t:
state_list.append(xr[self.layer_sel])
if self.layer_sel > t-1 and self.layer_sel < self.nlays-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
# classifier
out = F.avg_pool2d(xr[-1], xr[-1].size(-1))
out = out.view(out.size(0), -1)
#torch.nn.Dropout()
out = self.linear(out)
if self.record:
return out, state_list
else:
return out#, state_list
class PredictiveTiedKL(nn.Module):
def __init__(self, num_classes=10, cls=3,layer_sel =0, record = False):
super().__init__()
ics = [3, 64, 64, 128, 128, 256, 256, 256] # input chanels
ocs = [64, 64, 128, 128, 256, 256, 256, 256] # output chanels
sps = [False, False, True, False, True, False, False, False] # downsample flag
self.cls = cls # circles means number of additional inputs
self.nlays = len(ics)
self.record = record
# Feedforward layers
self.conv = nn.ModuleList([Conv2d(ics[i],ocs[i],sample=sps[i]) for i in range(self.nlays)])
# Update rate
#self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.5) for i in range(1,self.nlays)])
#self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+1.0) for i in range(self.nlays)])
# self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.1) for i in range(1,self.nlays)])
self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+0.1) for i in range(self.nlays)])
# Linear layer
self.linear = nn.Linear(ocs[-1], num_classes)
self.layer_sel = layer_sel
def forward(self, x):
# Feedforward
xr = [F.relu(self.conv[0](x))]
state_list=[]
error_list=[]
if self.layer_sel < 1:
state_list.append(xr[self.layer_sel])
P = [None for i in range(self.nlays)]
if self.cls <= self.nlays:
#stage 1
#t = 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]-b0*self.conv[0](torch.log(self.conv[0](xr[0],feedforward=False)/x+1.0)))
# print(xr[0].size())
# m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
# xr[0] = m(xr[0])
# print(xr[0].size())
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
#after t>1
for t in range(2,self.cls):
for i in range(t-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
xr.append(F.relu(self.conv[t](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
# a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu(xr[0]-b0*self.conv[0](torch.log(self.conv[0](xr[0],feedforward=False)/x+1.0)))
# print(xr[0].size())
# print(xr[0].size())
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
# a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu(xr[i]-b0*self.conv[i](torch.log(self.conv[i](xr[i],feedforward=False)/tmp+1.0))+ b0*self.conv[i+1](xr[i+1],feedforward=False)/xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]-b0*self.conv[t-1](torch.log(self.conv[t-1](xr[t-1],feedforward=False)/old+1.0)))
# xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t-self.cls, t):
# print('len:',len(P))
# print(i)
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t-self.cls]
for i in range(t-self.cls+1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
# a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu(xr[i]-b0*self.conv[i](torch.log(self.conv[i](xr[i],feedforward=False)/tmp+1.0))+ b0*self.conv[i+1](xr[i+1],feedforward=False)/xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]-b0*self.conv[t-1](torch.log(self.conv[t-1](xr[t-1],feedforward=False)/old+1.0)))
if self.layer_sel < t+1 and self.layer_sel > t-self.cls:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t and self.layer_sel > t-self.cls-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.cls-1):
for i in range(self.nlays-self.cls+t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[self.nlays-self.cls+1]
for i in range(self.nlays-self.cls+2+t,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
# a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu(xr[i]-b0*self.conv[i](torch.log(self.conv[i](xr[i],feedforward=False)/tmp+1.0))+ b0*self.conv[i+1](xr[i+1],feedforward=False)/xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]-b0*self.conv[-1](torch.log(self.conv[-1](xr[-1],feedforward=False)/old+1.0)))
if self.layer_sel > self.nlays-self.cls+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel > self.nlays -self.cls:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
else:
#stage 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
for t in range(2,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
# a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
# a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls-self.nlays):
for i in range(self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
for i in range(1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
# a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
state_list.append(xr[self.layer_sel])
if self.layer_sel < self.nlays - 1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.nlays-1):
for i in range(t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t]
for i in range(t+1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
#a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
if self.layer_sel > t:
state_list.append(xr[self.layer_sel])
if self.layer_sel > t-1 and self.layer_sel < self.nlays-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
# classifier
out = F.avg_pool2d(xr[-1], xr[-1].size(-1))
out = out.view(out.size(0), -1)
#torch.nn.Dropout()
out = self.linear(out)
if self.record:
return out, state_list
else:
return out#, state_list
class PredictiveTied_LN_Multi(nn.Module):
def __init__(self, num_classes=10, cls=3,layer_sel =0, record = False, affine = True, a = 0.1, b = 0.1):
super().__init__()
ics = [3, 64, 64, 128, 128, 256, 256, 256] # input chanels
ocs = [64, 64, 128, 128, 256, 256, 256, 256] # output chanels
sps = [False, False, True, False, True, False, False, False] # downsample flag
self.cls = cls # circles means number of additional inputs
self.nlays = len(ics)
self.record = record
self.affine = affine
# Feedforward layers
self.conv = nn.ModuleList([Conv2d(ics[i],ocs[i],sample=sps[i]) for i in range(self.nlays)])
# Update rate
#self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.5) for i in range(1,self.nlays)])
#self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+1.0) for i in range(self.nlays)])
self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+a) for i in range(1,self.nlays)])
self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+b) for i in range(self.nlays)])
# Linear layer
self.linear = nn.Linear(ocs[-1], num_classes)
self.layer_sel = layer_sel
def forward(self, x):
# Feedforward
xr = [F.relu(self.conv[0](x))]
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
state_list=[]
error_list=[]
if self.layer_sel < 1:
state_list.append(xr[self.layer_sel])
P = [None for i in range(self.nlays)]
if self.cls == 1:
for i in range(1,self.nlays):
xr.append(F.relu(self.conv[i](xr[-1])))
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
out = F.avg_pool2d(xr[-1], xr[-1].size(-1))
out = out.view(out.size(0), -1)
#torch.nn.Dropout()
out = self.linear(out)
return out#, state_list
if self.cls <= self.nlays:
#stage 1
#t = 1
xr.append(F.relu(self.conv[1](xr[-1])))
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
# print(xr[0].size())
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
out=[]
# print(xr[0].size())
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
#after t>1
for t in range(2,self.cls):
for i in range(t-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
xr.append(F.relu(self.conv[t](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
# print(xr[0].size())
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] =m(xr[0])
# print(xr[0].size())
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
m = nn.LayerNorm(xr[t-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[t-1] = m(xr[t-1])
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t-self.cls, t):
# print('len:',len(P))
# print(i)
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t-self.cls]
for i in range(t-self.cls+1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
m = nn.LayerNorm(xr[t-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[t-1] = m(xr[t-1])
if self.layer_sel < t+1 and self.layer_sel > t-self.cls:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t and self.layer_sel > t-self.cls-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
tmp = xr[-1]/self.cls;
tmp = F.avg_pool2d(tmp, tmp.size(-1))
tmp = tmp.view(tmp.size(0), -1)
#torch.nn.Dropout()
out.append(self.linear(tmp))
#stage 3
for t in range(self.cls-1):
for i in range(self.nlays-self.cls+t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[self.nlays-self.cls+1+t]
for i in range(self.nlays-self.cls+2+t,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
# print(xr[i].size())
# print(tmp.size())
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
tmp = xr[-1]/self.cls;
tmp = F.avg_pool2d(tmp, tmp.size(-1))
tmp = tmp.view(tmp.size(0), -1)
#torch.nn.Dropout()
out.append(self.linear(tmp))
if self.layer_sel > self.nlays-self.cls+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel > self.nlays -self.cls:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
else:
#stage 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
for t in range(2,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
m = nn.LayerNorm(xr[t-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[t-1] = m(xr[t-1])
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls-self.nlays):
for i in range(self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
for i in range(1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
state_list.append(xr[self.layer_sel])
if self.layer_sel < self.nlays - 1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.nlays-1):
for i in range(t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t]
for i in range(t+1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
if self.layer_sel > t:
state_list.append(xr[self.layer_sel])
if self.layer_sel > t-1 and self.layer_sel < self.nlays-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
if self.record:
return out, error_list
else:
return out#, state_list
class PredictiveTied_LN(nn.Module):
def __init__(self, num_classes=10, cls=3,layer_sel =0, record = False, affine = True, a = 0.1, b = 0.1):
super().__init__()
ics = [3, 64, 64, 128, 128, 256, 256, 256] # input chanels
ocs = [64, 64, 128, 128, 256, 256, 256, 256] # output chanels
sps = [False, False, True, False, True, False, False, False] # downsample flag
self.cls = cls # circles means number of additional inputs
self.nlays = len(ics)
self.record = record
self.affine = affine
self.a = a
self.b = b
# Feedforward layers
self.conv = nn.ModuleList([Conv2d(ics[i],ocs[i],sample=sps[i]) for i in range(self.nlays)])
# Update rate
#self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.5) for i in range(1,self.nlays)])
#self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+1.0) for i in range(self.nlays)])
self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+self.a) for i in range(1,self.nlays)])
self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+self.b) for i in range(self.nlays)])
# Linear layer
self.linear = nn.Linear(ocs[-1], num_classes)
self.layer_sel = layer_sel
def forward(self, x):
# Feedforward
xr = [F.relu(self.conv[0](x))]
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] =m(xr[0])
state_list=[]
error_list=[]
if self.layer_sel < 1:
state_list.append(xr[self.layer_sel])
P = [None for i in range(self.nlays)]
if self.cls == 1:
for i in range(1,self.nlays):
xr.append(F.relu(self.conv[i](xr[-1])))
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] =m(xr[-1])
out = F.avg_pool2d(xr[-1], xr[-1].size(-1))
out = out.view(out.size(0), -1)
#torch.nn.Dropout()
out = self.linear(out)
return out#, state_list
if self.cls <= self.nlays:
#stage 1
#t = 1
xr.append(F.relu(self.conv[1](xr[-1])))
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] =m(xr[-1])
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
# print(xr[0].size())
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
# print(xr[0].size())
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
#after t>1
for t in range(2,self.cls):
for i in range(t-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
xr.append(F.relu(self.conv[t](xr[-1])))
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] =m(xr[-1])
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
# print(xr[0].size())
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] =m(xr[0])
# print(xr[0].size())
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
m = nn.LayerNorm(xr[t-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[t-1] = m(xr[t-1])
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
for i in range(t-self.cls, t):
# print('len:',len(P))
# print(i)
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t-self.cls]
for i in range(t-self.cls+1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
m = nn.LayerNorm(xr[t-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[t-1] = m(xr[t-1])
if self.layer_sel < t+1 and self.layer_sel > t-self.cls:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t and self.layer_sel > t-self.cls-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.cls-1):
for i in range(self.nlays-self.cls+t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[self.nlays-self.cls+1+t]
for i in range(self.nlays-self.cls+2+t,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
# print(xr[i].size())
# print(tmp.size())
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
if self.layer_sel > self.nlays-self.cls+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel > self.nlays -self.cls:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
else:
#stage 1
xr.append(F.relu(self.conv[1](xr[-1])))
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
for t in range(2,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
for i in range(t):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
m = nn.LayerNorm(xr[t-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[t-1] = m(xr[t-1])
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls-self.nlays):
for i in range(self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
for i in range(1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
state_list.append(xr[self.layer_sel])
if self.layer_sel < self.nlays - 1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.nlays-1):
for i in range(t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t]
for i in range(t+1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
if self.layer_sel > t:
state_list.append(xr[self.layer_sel])
if self.layer_sel > t-1 and self.layer_sel < self.nlays-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
# classifier
out = F.avg_pool2d(xr[-1], xr[-1].size(-1))
out = out.view(out.size(0), -1)
#torch.nn.Dropout()
out = self.linear(out)
if self.record:
return out, state_list
else:
return out#, state_list
class PredictiveTied_GN(nn.Module):
def __init__(self, num_classes=10, cls=3,layer_sel =0, record = False, affine = True, a = 0.1, b = 0.1, num_GN = 1):
super().__init__()
ics = [3, 64, 64, 128, 128, 256, 256, 256] # input chanels
ocs = [64, 64, 128, 128, 256, 256, 256, 256] # output chanels
sps = [False, False, True, False, True, False, False, False] # downsample flag
self.cls = cls # circles means number of additional inputs
self.nlays = len(ics)
self.record = record
self.affine = affine
self.a = a
self.b = b
# Feedforward layers
self.conv = nn.ModuleList([Conv2d(ics[i],ocs[i],sample=sps[i]) for i in range(self.nlays)])
# Update rate
#self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+0.5) for i in range(1,self.nlays)])
#self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+1.0) for i in range(self.nlays)])
self.a0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ics[i],1,1)+self.a) for i in range(1,self.nlays)])
self.b0 = nn.ParameterList([nn.Parameter(torch.zeros(1,ocs[i],1,1)+self.b) for i in range(self.nlays)])
self.GN = nn.ModuleList([nn.GroupNorm(num_GN,ocs[i]) for i in range(self.nlays)])
# Linear layer
self.linear = nn.Linear(ocs[-1], num_classes)
self.layer_sel = layer_sel
def forward(self, x):
# Feedforward
xr = [F.relu(self.conv[0](x))]
xr[0] = self.GN[0](xr[0])
state_list=[]
error_list=[]
if self.layer_sel < 1:
state_list.append(xr[self.layer_sel])
P = [None for i in range(self.nlays)]
if self.cls == 1:
for i in range(1,self.nlays):
xr.append(F.relu(self.conv[i](xr[-1])))
xr[-1] =self.GN[i](xr[-1])
out = F.avg_pool2d(xr[-1], xr[-1].size(-1))
out = out.view(out.size(0), -1)
#torch.nn.Dropout()
out = self.linear(out)
return out#, state_list
if self.cls <= self.nlays:
#stage 1
#t = 1
xr.append(F.relu(self.conv[1](xr[-1])))
xr[-1] = self.GN[1](xr[-1])
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = self.GN[0](F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0))
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
#after t>1
for t in range(2,self.cls):
for i in range(t-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
xr.append(F.relu(self.conv[t](xr[-1])))
xr[-1] = self.GN[t](xr[-1])
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = self.GN[0](F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0]))
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = self.GN[i](F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]))
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = self.GN[t-1](F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0))
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls,self.nlays):
xr.append(self.GN[t](F.relu(self.conv[t](xr[-1]))))
for i in range(t-self.cls, t):
# print('len:',len(P))
# print(i)
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t-self.cls]
for i in range(t-self.cls+1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = self.GN[i](F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]))
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = self.GN[t-1](F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0))
if self.layer_sel < t+1 and self.layer_sel > t-self.cls:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t and self.layer_sel > t-self.cls-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.cls-1):
for i in range(self.nlays-self.cls+t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[self.nlays-self.cls+1+t]
for i in range(self.nlays-self.cls+2+t,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
# print(xr[i].size())
# print(tmp.size())
xr[i] = self.GN[i](F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i]))
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = self.GN[-1](F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0))
if self.layer_sel > self.nlays-self.cls+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel > self.nlays -self.cls:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
else:
#stage 1
xr.append(F.relu(self.conv[1](xr[-1])))
b0 = F.relu(self.b0[0]).expand_as(xr[0])
xr[0] = F.relu(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0)
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
if self.layer_sel < 2:
state_list.append(xr[self.layer_sel])
if self.layer_sel < 1:
error_list.append(xr[self.layer_sel]-self.conv[self.layer_sel+1](xr[self.layer_sel+1],feedforward=False))
for t in range(2,self.nlays):
xr.append(F.relu(self.conv[t](xr[-1])))
for i in range(t):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
for i in range(1,t-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[t-1]).expand_as(xr[t-1])
xr[t-1] = F.relu(xr[t-1]+self.conv[t-1](old-P[t-2])*b0)
m = nn.LayerNorm(xr[t-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[t-1] = m(xr[t-1])
if self.layer_sel < t+1:
state_list.append(xr[self.layer_sel])
if self.layer_sel < t:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 2
for t in range(self.cls-self.nlays):
for i in range(self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
b0 = F.relu(self.b0[0]).expand_as(xr[0])
a0 = F.relu(self.a0[0]).expand_as(xr[0])
old = xr[0]
xr[0] = F.relu((1-a0)*(xr[0]+self.conv[0](x-self.conv[0](xr[0],feedforward=False))*b0) + a0*P[0])
m = nn.LayerNorm(xr[0].size()[1:],elementwise_affine=self.affine).cuda()
xr[0] = m(xr[0])
for i in range(1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
state_list.append(xr[self.layer_sel])
if self.layer_sel < self.nlays - 1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
#stage 3
for t in range(self.nlays-1):
for i in range(t,self.nlays-1):
P[i] = self.conv[i+1](xr[i+1],feedforward=False)
old = xr[t]
for i in range(t+1,self.nlays-1):
b0 = F.relu(self.b0[i]).expand_as(xr[i])
a0 = F.relu(self.a0[i]).expand_as(xr[i])
tmp = old
old = xr[i]
xr[i] = F.relu((1-a0)*(xr[i]+self.conv[i](tmp-P[i-1])*b0) + a0*P[i])
m = nn.LayerNorm(xr[i].size()[1:],elementwise_affine=self.affine).cuda()
xr[i] = m(xr[i])
b0 = F.relu(self.b0[-1]).expand_as(xr[-1])
xr[-1] = F.relu(xr[-1]+self.conv[self.nlays-1](old-P[-2])*b0)
m = nn.LayerNorm(xr[-1].size()[1:],elementwise_affine=self.affine).cuda()
xr[-1] = m(xr[-1])
if self.layer_sel > t:
state_list.append(xr[self.layer_sel])
if self.layer_sel > t-1 and self.layer_sel < self.nlays-1:
error_list.append(xr[self.layer_sel]-P[self.layer_sel])
# classifier
out = F.avg_pool2d(xr[-1], xr[-1].size(-1))
out = out.view(out.size(0), -1)
#torch.nn.Dropout()
out = self.linear(out)
if self.record:
return out, state_list
else:
return out#, state_list
|
C++
|
WINDOWS-1252
| 337 | 2.71875 | 3 |
[] |
no_license
|
#include ".h"
int main()
{
TemplateLinkedList<char>* IntLinkedList = new TemplateLinkedList<char>;
IntLinkedList->Apend('a');
IntLinkedList->Apend('b');
IntLinkedList->Apend('c');
IntLinkedList->Apend('d');
IntLinkedList->Apend('e');
IntLinkedList->Apend('f');
IntLinkedList->Apend('g');
IntLinkedList->PrintNode();
}
|
C++
|
UTF-8
| 218 | 3.546875 | 4 |
[] |
no_license
|
#include<stdio.h>
int main()
{
int arr[] = { 1, 2, 3, 4, 5 };
int *p = arr;
++*p;
//arr[]={2,2,3,4,5}
printf("%d", *p);
//prints 2
p += 2;
//*p=arr[2]=3
printf("%d", *p);
//prints 3
return 0;
}
|
PHP
|
UTF-8
| 667 | 3.15625 | 3 |
[] |
no_license
|
<?php
class ContaPoupanca extends Conta {
//Variável somente para contaPoupança
public $Aniversario;
//Construtor
function __construct($Agencia, $Codigo, $DataDeCricao, $Titular, $Senha, $Saldo, $Aniversario){
//Chamada do método construtor da classe-pai
parent::__construct($Agencia, $Codigo, $DataDeCricao, $Titular, $Senha, $Saldo);
$this->Aniversario = $Aniversario;
}
//Método Retirar Sobrescrito
function Retirar($valor) {
if($this->Saldo >= $valor){
//Executa método na classe-pai
parent::Retirar($valor);
} else {
echo "Retirada não permitida";
return false;
}
return true;
}
}
|
C#
|
UTF-8
| 3,427 | 2.921875 | 3 |
[] |
no_license
|
using System;
using System.Collections.Generic;
using System.Linq.Expressions;
namespace Nicodem.Lexer
{
public class RegExDfa<T> : AbstractDfa<DFAState<T>, T> where T : IComparable<T>, IEquatable<T>
{
private readonly uint accepting;
private readonly SortedDictionary<RegEx<T>, DFAState<T>> dictionaryOfDfaStates =
new SortedDictionary<RegEx<T>, DFAState<T>>();
private DFAState<T> deadState;
private readonly DFAState<T> _start;
public static TSymbol MinSymbol<TSymbol>()
{
if(typeof(TSymbol).GetField("MinValue") != null) {
return Expression.Lambda<Func<TSymbol>>(Expression.Field(null, typeof(TSymbol), "MinValue")).Compile()();
} else if(typeof(TSymbol).GetProperty("MinValue") != null) {
return Expression.Lambda<Func<TSymbol>>(Expression.Property(null, typeof(TSymbol), "MinValue")).Compile()();
}
throw new ArgumentException(String.Format("There is no implemented static field MinValue in {0}", typeof(TSymbol)));
}
public override DFAState<T> Start
{
get { return _start; }
}
/// <summary>
/// Create RegExDfa with start state
/// </summary>
/// <param name="start">Start state</param>
public RegExDfa(DFAState<T> start)
{
_start = start;
}
/// <summary>
/// Create RegExDfa from RegEx
/// </summary>
/// <param name="regEx">Regular Expression from which is made automata</param>
/// <param name="acceptingStateMarker">Number for accepting states</param>
public RegExDfa(RegEx<T> regEx, uint acceptingStateMarker)
{
accepting = acceptingStateMarker;
_start = CalculateDfaState(regEx);
}
private DFAState<T> CalculateDfaState(RegEx<T> regEx)
{
if (dictionaryOfDfaStates.ContainsKey(regEx))
return dictionaryOfDfaStates[regEx]; //return this state
var new_state = new DFAState<T>();
dictionaryOfDfaStates.Add(regEx, new_state);
var listOfTransitions = new List<KeyValuePair<T, DFAState<T>>>();
foreach (var c in regEx.DerivChanges())
{
listOfTransitions.Add(new KeyValuePair<T, DFAState<T>>(c, CalculateDfaState(regEx.Derivative(c))));
}
if (regEx.HasEpsilon())
new_state._accepting = accepting;
else
new_state._accepting = 0;
if (listOfTransitions.Count == 0 || !listOfTransitions[0].Key.Equals(MinSymbol<T>()))
{
if (deadState == null)
{
deadState = new DFAState<T>();
deadState._accepting = 0;
deadState._transitions = new KeyValuePair<T, DFAState<T>>[] { new KeyValuePair<T, DFAState<T>>(MinSymbol<T>(), deadState) };
}
listOfTransitions.Insert(0, new KeyValuePair<T, DFAState<T>>(MinSymbol<T>(), deadState));
for (int i = 1; i < listOfTransitions.Count; i++)
if (listOfTransitions[i - 1].Key.CompareTo(listOfTransitions[i].Key) >= 0)
throw new Exception("Inpropper order");
}
new_state._transitions = listOfTransitions.ToArray();
return new_state;
}
}
}
|
Java
|
UTF-8
| 982 | 2.078125 | 2 |
[] |
no_license
|
package com.springboot.racemanage.service.serviceImpl;
import com.springboot.racemanage.service.TermService;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.util.List;
import com.springboot.racemanage.po.Term;
import com.springboot.racemanage.dao.TermDao;
import org.springframework.transaction.annotation.Transactional;
@Service
@Transactional
public class TermServiceImpl implements TermService{
@Resource
private TermDao termDao;
public int insert(Term pojo){
return termDao.insert(pojo);
}
public int insertSelective(Term pojo){
return termDao.insertSelective(pojo);
}
public int insertList(List<Term> pojos){
return termDao.insertList(pojos);
}
public int update(Term pojo){
return termDao.update(pojo);
}
@Override
public Term findFirstByStatusOrderByTerm(Integer status) {
return termDao.findFirstByStatusOrderByTerm(status);
}
}
|
Shell
|
UTF-8
| 712 | 2.828125 | 3 |
[] |
no_license
|
# HSTR (Ctrl+R enhancer) configuration
export HH_CONFIG=hicolor
# append new history items to .bash_history
shopt -s histappend
# leading space hides commands from history
export HISTCONTROL=ignorespace
# increase history file size (default is 500)
export HISTFILESIZE=10000
# increase history size (default is 500)
export HISTSIZE=${HISTFILESIZE}
# mem/file sync
export PROMPT_COMMAND="history -a; history -n; ${PROMPT_COMMAND}"
# if this is interactive shell, then bind hh to Ctrl-r (for Vi mode check doc)
if [[ $- =~ .*i.* ]]; then bind '"\C-r": "\C-a hh -- \C-j"'; fi
# if this is interactive shell, then bind 'kill last command' to Ctrl-x k
if [[ $- =~ .*i.* ]]; then bind '"\C-xk": "\C-a hh -k \C-j"'; fi
|
Java
|
UTF-8
| 473 | 2.140625 | 2 |
[] |
no_license
|
package com.walmart.ticketservice.utils;
import org.junit.Assert;
import org.junit.Test;
public class TicketServiceUtilsTest {
@Test
public void testTestUniqueID() {
int holdID1 = TicketServiceUtils.getHoldID();
int holdID2 = TicketServiceUtils.getHoldID();
Assert.assertNotNull(holdID1);
Assert.assertNotNull(holdID2);
Assert.assertNotEquals(holdID1, holdID2);
Assert.assertEquals(holdID2-holdID1, 1);
}
}
|
Python
|
UTF-8
| 1,511 | 3.109375 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
'''
Задание 7.3a
Сделать копию скрипта задания 7.3.
Дополнить скрипт:
- Отсортировать вывод по номеру VLAN
В результате должен получиться такой вывод:
10 01ab.c5d0.70d0 Gi0/8
10 0a1b.1c80.7000 Gi0/4
100 01bb.c580.7000 Gi0/1
200 0a4b.c380.7c00 Gi0/2
200 1a4b.c580.7000 Gi0/6
300 0a1b.5c80.70f0 Gi0/7
300 a2ab.c5a0.700e Gi0/3
500 02b1.3c80.7b00 Gi0/5
1000 0a4b.c380.7d00 Gi0/9
Ограничение: Все задания надо выполнять используя только пройденные темы.
'''
new_line_list = []
with open('CAM_table.txt', 'r') as src:
for line in src:
# ignore_string = False
line_list = line.split()
# print(line)
# print(line_list)
try:
first = int(line_list[0])
# print(line_list)
line_list[0] = first
new_line_list.append(line_list)
# print(new_line_list)
# print('{:4} {} {:}'.format(line_list[0], line_list[1], line_list[3]))
except(ValueError, IndexError):
ignore_string = True
new_line_list.sort()
# print(new_line_list)
for line in new_line_list:
print('{:<4} {} {:}'.format(line[0], line[1], line[3]))
# print(','.join(line))
|
PHP
|
UTF-8
| 2,557 | 2.734375 | 3 |
[] |
no_license
|
<?php
class Post {
private $user_ob;
private $con;
public function __construct($con, $user) {
$this->con = $con;
$this->user_ob = new User($con, $user);
}
public function submitPost($body, $user_to) {
$body = strip_tags($body);
$body = mysqli_real_escape_string($this->con,$body);
$check_empty = preg_replace('/\s+/',' ',$body);
if($check_empty != "") {
// curretn date and time
$date_added = date("Y-m-d H:i:s");
// get username
$added_by = $this->user_ob->getUserName();
//if user is on own profile
if($user_to == $added_by) {
$user_to == "none";
}
$sql = "INSERT INTO posts VALUES('','$body','$added_by','$user_to','$date_added','no','no','0')";
$query = mysqli_query($this->con,$sql);
$returned_id = mysqli_insert_id($this->con);
//insert notification
// update post count
$num_post = $this->user_ob->getNumPosts();
$num_post++;
$update_query = mysqli_query($this->con, "UPDATE usergeneral SET Posts = '$num_post' WHERE U_name = '$added_by'");
}
}
public function loadPostsFriends() {
$str = "";
$data = mysqli_query($this->con, "SELECT * FROM posts WHERE deleted = 'no' ORDER BY id DESC");
$row = mysqli_fetch_array($data);
// $while_count = 2;
while($row = mysqli_fetch_array($data)) { //$row = mysqli_fetch_array($data)
$id = $row['id'];
$body = $row['body'];
$added_by = $row['added_by'];
$date_time = $row['date_added'];
//prepare user_to string even its not posted to the user
if($row['user_to'] == "none") {
$user_to = "";
}
else {
$user_to_obj = new User($this->con, $row['user_to']);
$user_to_name = $user_to_obj->getFirstAndLastName();
$user_to = "<a href=' " . $row['user_to'] ." '> ". $user_to_name. "</a>";
}
//check user who posted has closed their account
$added_by_obj = new User($this->con, $added_by);
if($added_by_obj->isClosed()) {
continue;
}
$user_details_query = mysqli_query($this->con, "SELECT Name, Last_name, Photo FROM usergeneral WHERE U_name = '$added_by' ");
$user_row = mysqli_fetch_array($user_details_query);
$first_name = $user_row['Name'];
$last_name = $user_row['Last_name'];
$f_l_name = $first_name. ' '.$last_name;
$profile_pic = $user_row['Photo'];
$card = new Card($profile_pic, $added_by, $f_l_name, $date_time, $body, $user_to);
$str = $card->getCard();
echo $str;
}
}
}
?>
|
C++
|
UTF-8
| 315 | 2.8125 | 3 |
[] |
no_license
|
class Solution {
public:
bool containsDuplicate(vector<int>& nums) {
vector< set<int> > hashtable;
hashtable.resize(65536);
for(int i = 0; i < nums.size(); i++)
if(hashtable[((nums[i]>0)?nums[i]:-nums[i]) % 65536].insert(nums[i]).second == false){
return true;
}
return false;
}
};
|
Java
|
UTF-8
| 13,653 | 2.15625 | 2 |
[] |
no_license
|
package de.uka.ipd.sdq.beagle.core.judge;
import static de.uka.ipd.sdq.beagle.core.testutil.ExceptionThrownMatcher.throwsException;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
import static org.mockito.BDDMockito.given;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.powermock.api.mockito.PowerMockito.mockStatic;
import de.uka.ipd.sdq.beagle.core.Blackboard;
import de.uka.ipd.sdq.beagle.core.ExternalCallParameter;
import de.uka.ipd.sdq.beagle.core.MeasurableSeffElement;
import de.uka.ipd.sdq.beagle.core.ResourceDemandingInternalAction;
import de.uka.ipd.sdq.beagle.core.SeffBranch;
import de.uka.ipd.sdq.beagle.core.SeffLoop;
import de.uka.ipd.sdq.beagle.core.evaluableexpressions.ConstantExpression;
import de.uka.ipd.sdq.beagle.core.evaluableexpressions.EvaluableExpression;
import de.uka.ipd.sdq.beagle.core.testutil.factories.BlackboardFactory;
import de.uka.ipd.sdq.beagle.core.testutil.factories.EvaluableExpressionFactory;
import org.hamcrest.Matcher;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.mockito.stubbing.Answer;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.rule.PowerMockRule;
import java.util.Set;
/**
* Tests for {@link FinalJudge}.
*
* @author Joshua Gleitze
*/
@PrepareForTest(FinalJudge.class)
public class FinalJudgeTest {
/**
* A factory for {@link Blackboard}s to easily obtain new instances from.
*/
private static final BlackboardFactory BLACKBOARD_FACTORY = new BlackboardFactory();
/**
* A {@link EvaluableExpression} factory to easily obtain new instances from.
*/
private static final EvaluableExpressionFactory EVALUABLE_EXPRESSION_FACTORY = new EvaluableExpressionFactory();
/**
* Matcher for the fact that the judge ends the analysis.
*/
private static final Matcher<Boolean> ENDS_ANALYSIS = is(true);
/**
* Matcher for the fact that the judge does not end the analysis.
*/
private static final Matcher<Boolean> CONTINUES_ANALYSIS = is(false);
/**
* Rule loading PowerMock (to mock static methods).
*/
@Rule
public PowerMockRule loadPowerMock = new PowerMockRule();
/**
* The Final Judge under test.
*/
private FinalJudge testedJudge;
/**
* The blackboard that the {@link #testedJudge} operates on.
*/
private Blackboard testBlackboard;
/**
* All seff elements on {@link #testBlackboard}.
*/
private Set<MeasurableSeffElement> allSeffElements;
/**
* A mocked fitness function that it set on {@link #testBlackboard}.
*/
private EvaluableExpressionFitnessFunction mockFitnessFunction;
/**
* Creates default instances for the test methods.
*/
@Before
public void createObjects() {
this.testedJudge = new FinalJudge();
this.mockFitnessFunction = mock(EvaluableExpressionFitnessFunction.class);
this.testBlackboard = BLACKBOARD_FACTORY.getWithFewElements();
this.testBlackboard = BLACKBOARD_FACTORY.setFitnessFunction(this.testBlackboard, this.mockFitnessFunction);
this.allSeffElements = BLACKBOARD_FACTORY.getAllSeffElements(this.testBlackboard);
}
/**
* Asserts that working on a not-initialised Final Judge gracefully throws an
* {@link IllegalStateException}. Asserts that initialisation is statelessly.
*/
@Test
public void initialisation() {
assertThat(() -> this.testedJudge.judge(this.testBlackboard), throwsException(IllegalStateException.class));
this.testedJudge.init(this.testBlackboard);
this.testedJudge.judge(this.testBlackboard);
new FinalJudge().judge(this.testBlackboard);
}
/**
* Asserts that the judge throws a {@linkplain NullPointerException} if the passed
* blackboard is {@code null}.
*/
@Test
public void detectsNull() {
assertThat(() -> new FinalJudge().init(null), throwsException(NullPointerException.class));
this.testedJudge.init(this.testBlackboard);
assertThat(() -> this.testedJudge.judge(null), throwsException(NullPointerException.class));
}
/**
* Asserts that the final judge ends the analysis if it ran for too long. To not
* impose any restriction that might be chosen with much consideration, this tests
* only that the analysis was aborted after 5 days.
*/
@Test
public void endsWhenRunningTooLong() {
final int daysToWait = 5;
final EvaluableExpression testExpression = EVALUABLE_EXPRESSION_FACTORY.getOne();
final SeffBranch seffElement = this.testBlackboard.getAllSeffBranches().iterator().next();
// “go ahead in time” by mocking {@link System#currentTimeMillis}.
mockStatic(System.class);
given(System.currentTimeMillis()).willReturn(0L);
given(this.mockFitnessFunction.gradeFor(any(SeffBranch.class), any(), any())).willReturn(3d);
this.testedJudge.init(this.testBlackboard);
given(System.currentTimeMillis()).willReturn(daysToWait * 24L * 60L * 60L * 1000L);
this.testBlackboard.addProposedExpressionFor(seffElement, testExpression);
assertThat("The final judge should end the analysis if it lasts too long",
this.testedJudge.judge(this.testBlackboard), ENDS_ANALYSIS);
assertThat("The test for running to long should be performed statelessly",
new FinalJudge().judge(this.testBlackboard), ENDS_ANALYSIS);
assertThat("Must propose the fittest expression", this.testBlackboard.getFinalExpressionFor(seffElement),
is(testExpression));
}
/**
* Asserts that the final judge does not end the analysis while there is still a lot
* of improvement.
*/
@Test
public void doesNotEndWhileThereIsImprovement() {
final int numberOfIterations = 100;
final double startValue = FinalJudge.MAX_CONSIDERED_FITNESS_VALUE;
this.testedJudge.init(this.testBlackboard);
// each expression that’s new for an element will have half of the previous
// fitness value.
final Answer<Double> answerWithBigChange =
(info) -> startValue * Math.pow(2, -info.getArgumentAt(1, ConstantExpression.class).getValue());
given(this.mockFitnessFunction.gradeFor(any(SeffBranch.class), any(), any())).will(answerWithBigChange);
given(this.mockFitnessFunction.gradeFor(any(SeffLoop.class), any(), any())).will(answerWithBigChange);
given(this.mockFitnessFunction.gradeFor(any(ExternalCallParameter.class), any(), any()))
.will(answerWithBigChange);
given(this.mockFitnessFunction.gradeFor(any(ResourceDemandingInternalAction.class), any(), any()))
.will(answerWithBigChange);
for (int i = 0; i <= numberOfIterations / this.allSeffElements.size(); i++) {
for (final MeasurableSeffElement seffElement : this.allSeffElements) {
this.testBlackboard.addProposedExpressionFor(seffElement, ConstantExpression.forValue(i));
assertThat(
String.format("The final judge must not end the analysis while there’s still great improvement "
+ "(stopped after %d iterations)", i * this.allSeffElements.size()),
this.testedJudge.judge(this.testBlackboard), CONTINUES_ANALYSIS);
assertThat("Must always select the best expression",
this.testBlackboard.getFinalExpressionFor(seffElement), is(ConstantExpression.forValue(i)));
}
}
}
/**
* Asserts that the analysis is ended if the proposed expression’s fitness value
* didn’t sufficiently decrease in the last iterations. To not impose any restriction
* that might be chosen with much consideration, this tests only that the analysis was
* ended after 500 iterations.
*/
@Test
public void endsIfTooLittleImprovement() {
final double startValue = 100d;
this.testedJudge.init(this.testBlackboard);
// each expression that’s new for an element will have 99,9% of the previous
// fitness value.
final Answer<Double> answerWithLittleChange =
(info) -> startValue * Math.pow(0.999, info.getArgumentAt(1, ConstantExpression.class).getValue());
given(this.mockFitnessFunction.gradeFor(any(SeffBranch.class), any(), any())).will(answerWithLittleChange);
given(this.mockFitnessFunction.gradeFor(any(SeffLoop.class), any(), any())).will(answerWithLittleChange);
given(this.mockFitnessFunction.gradeFor(any(ExternalCallParameter.class), any(), any()))
.will(answerWithLittleChange);
given(this.mockFitnessFunction.gradeFor(any(ResourceDemandingInternalAction.class), any(), any()))
.will(answerWithLittleChange);
// the value the final judge returned the last time it was called
boolean decision = false;
int iteration;
for (iteration = 0; iteration < 500; iteration++) {
for (final MeasurableSeffElement seffElement : this.allSeffElements) {
this.testBlackboard.addProposedExpressionFor(seffElement, ConstantExpression.forValue(iteration));
}
decision = this.testedJudge.judge(this.testBlackboard);
if (decision) {
break;
}
}
assertThat("The final judge must end the analysis if the results do not improve significantly", decision,
is(ENDS_ANALYSIS));
for (final MeasurableSeffElement seffElement : this.allSeffElements) {
assertThat("Must propose the best expression even if stopping the analysis",
this.testBlackboard.getFinalExpressionFor(seffElement), is(ConstantExpression.forValue(iteration)));
}
this.createObjects();
new FinalJudge().init(this.testBlackboard);
// the value the final judge returned the last time it was called
decision = false;
for (iteration = 0; iteration < 500; iteration++) {
for (final MeasurableSeffElement seffElement : this.allSeffElements) {
this.testBlackboard.addProposedExpressionFor(seffElement, ConstantExpression.forValue(iteration));
}
decision = new FinalJudge().judge(this.testBlackboard);
if (decision) {
break;
}
}
assertThat("Ending the analysis must be done statelessly", decision, is(ENDS_ANALYSIS));
for (final MeasurableSeffElement seffElement : this.allSeffElements) {
assertThat("Must select the best expression", this.testBlackboard.getFinalExpressionFor(seffElement),
is(ConstantExpression.forValue(iteration)));
}
}
/**
* Asserts that the analysis is ended if only perfect expressions are on the
* blackboard.
*/
@Test
public void endsIfEverythingIsPerfect() {
this.testBlackboard =
BLACKBOARD_FACTORY.setFitnessFunction(BLACKBOARD_FACTORY.getFull(), this.mockFitnessFunction);
this.testedJudge.init(this.testBlackboard);
given(this.mockFitnessFunction.gradeFor(any(SeffBranch.class), any(), any())).willReturn(0d);
given(this.mockFitnessFunction.gradeFor(any(SeffLoop.class), any(), any())).willReturn(0d);
given(this.mockFitnessFunction.gradeFor(any(ExternalCallParameter.class), any(), any())).willReturn(0d);
given(this.mockFitnessFunction.gradeFor(any(ResourceDemandingInternalAction.class), any(), any()))
.willReturn(0d);
assertThat("The final judge should end the analysis if everything is perfect",
this.testedJudge.judge(this.testBlackboard), ENDS_ANALYSIS);
for (final MeasurableSeffElement seffElemet : this.allSeffElements) {
assertThat("Must select the best expression", this.testBlackboard.getFinalExpressionFor(seffElemet),
is(not(nullValue())));
}
}
/**
* Asserts that the fittest expression proposed is selected by the final judge.
*/
@Test
public void selectsFittestExpression() {
this.testedJudge.init(this.testBlackboard);
final ResourceDemandingInternalAction[] seffElements =
this.testBlackboard.getAllRdias().toArray(new ResourceDemandingInternalAction[0]);
final EvaluableExpression[] testExpressions = EVALUABLE_EXPRESSION_FACTORY.getAll();
this.testBlackboard.addProposedExpressionFor(seffElements[0], testExpressions[0]);
given(this.mockFitnessFunction.gradeFor(eq(seffElements[0]), eq(testExpressions[0]), any())).willReturn(12d);
this.testedJudge.judge(this.testBlackboard);
assertThat("The final judge should select the fittest expression",
this.testBlackboard.getFinalExpressionFor(seffElements[0]), is(testExpressions[0]));
assertThat("The final judge should not ‘cross-select’ expressions",
this.testBlackboard.getFinalExpressionFor(seffElements[1]), is(nullValue()));
this.testBlackboard.addProposedExpressionFor(seffElements[1], testExpressions[1]);
given(this.mockFitnessFunction.gradeFor(eq(seffElements[1]), eq(testExpressions[1]), any())).willReturn(24d);
this.testedJudge.judge(this.testBlackboard);
assertThat("The final judge should select the fittest expression",
this.testBlackboard.getFinalExpressionFor(seffElements[0]), is(testExpressions[0]));
assertThat("The final judge should select the fittest expression",
this.testBlackboard.getFinalExpressionFor(seffElements[1]), is(testExpressions[1]));
this.testBlackboard.addProposedExpressionFor(seffElements[0], testExpressions[2]);
this.testBlackboard.addProposedExpressionFor(seffElements[0], testExpressions[3]);
this.testBlackboard.addProposedExpressionFor(seffElements[0], testExpressions[4]);
given(this.mockFitnessFunction.gradeFor(eq(seffElements[0]), eq(testExpressions[2]), any())).willReturn(48d);
given(this.mockFitnessFunction.gradeFor(eq(seffElements[0]), eq(testExpressions[3]), any())).willReturn(6d);
given(this.mockFitnessFunction.gradeFor(eq(seffElements[0]), eq(testExpressions[4]), any())).willReturn(12d);
this.testedJudge.judge(this.testBlackboard);
assertThat("The final judge should select the fittest expression",
this.testBlackboard.getFinalExpressionFor(seffElements[0]), is(testExpressions[3]));
assertThat("The final judge should select the fittest expression",
this.testBlackboard.getFinalExpressionFor(seffElements[1]), is(testExpressions[1]));
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.