language
stringclasses 15
values | src_encoding
stringclasses 34
values | length_bytes
int64 6
7.85M
| score
float64 1.5
5.69
| int_score
int64 2
5
| detected_licenses
listlengths 0
160
| license_type
stringclasses 2
values | text
stringlengths 9
7.85M
|
---|---|---|---|---|---|---|---|
Python
|
UTF-8
| 2,413 | 3.328125 | 3 |
[] |
no_license
|
'''
@Author : NONO SAHA Cyrille
Generate a random number between [0,1] using the evolution function
'''
from ROOT import *
import array
import matplotlib.pyplot as plt
import numpy as np
def vonNeumann(x) :
return (2/np.pi)*np.arcsin(np.sqrt(x))
def evolution(lamda, x) :
return 4*lamda*x*(1-x)
def main() :
lamda = 1
x= 0.7
N = 1000000
list_x = array.array('d')
list_y = array.array('d')
i = 0
while i < N:
list_x.append(x)
y = evolution(lamda,x)
list_y.append(y)
x = evolution(lamda,x)
i = i+1
x = y
for i in range(10) :
print "(", list_x[i], list_y[i], ")"
print len(list_x), len(list_y)
#canvas for drawing
c = TCanvas("c","c",200,200)
c.Divide(1,2)
#make initial histogram of the distances
h1 = TH1F("hdata","Logistic On 1D",100,0,1)
for val in list_x:
hdata.Fill(val)
#draw the data histogram
c.cd(1)
h1.SetLineColor(2)
h1.SetMinimum(0)
h1.Draw("same")
#Remove the statistic box on histograms
h1.SetStats(kFALSE)
#Transform the logistic equation applying von Neuman function
list_yn = (2/np.pi)*np.arcsin(np.sqrt(list_x))
h2 = TH1F("Neumann","Neumann on 1D",100,0,1)
for yn in list_yn :
h2.Fill(yn)
h2.SetLineColor( 3)
h2.SetStats(kFALSE)
h2.SetMinimum(0)
h2.Draw("same")
#2D ploting Histogram
h2D = TH2F("2DH","Logistic 2D",100,0,1,100,0,1)
h2DV = TH2F("2DHV","Logistic 2D",100,0,1,100,0,1)
for i in range(len(list_y)) :
h2D.Fill(list_x[i],list_y[i])
h2DV.Fill(vonNeumann(list_x[i]),vonNeumann(list_y[i]))
# label the axes
h2D.GetXaxis().SetTitle( 'x axis label' )
h2D.GetYaxis().SetTitle( 'y axis label' )
h2DV.SetLineColor( 3)
h2D.SetStats(kFALSE)
c.cd(2)
h2D.Draw("colz")
h2DV.Draw("same")
c.SaveAs("logistic-histogram.eps")
if __name__=="__main__" :
main()
|
Python
|
UTF-8
| 156 | 3.53125 | 4 |
[] |
no_license
|
#paste code here
choice = input("what number would you like to make a times table for?")
x=0
while x <15:
x = x+1
ans = x*choice
print (ans)
|
Markdown
|
UTF-8
| 12,117 | 2.75 | 3 |
[] |
no_license
|
> 现在十年过去了,这儿终于长成了一个绿树成荫的地方。有一天下很大的雨,经过池塘的时候,看到一只狸猫趴在池塘中间的荷叶上。
###成荫
####作者/周嘉宁
几个月前的晚上,我和F一起在外面吃饭,忘记是为了庆祝什么事情,喝得都有些醉醺醺的。我们打车回家,一起从车里走出来,在小区里走了一段路。冬天还没有来,但是天已经很冷。这是一个巨型小区,有朋友第一次来做客的话,我都会约他们在轻轨站见面,然后陪他们走回来,否则很容易便会在这儿迷路。走在路上,常常有出租车司机从车里探出脑袋来问出口往哪里走,还有新上任的外卖小弟气急败坏在楼宇之间穿行。小区分成好几期,一期最旧,住着大多是本地人,F的父母也住在那里。而到了我们租住的四期,则变得鱼龙混杂。我们的隔壁是一间群租房,是在其他楼层见过的对面饭馆的职工宿舍,夏天的夜晚,女孩们裸着上半身坐在双层床下面打牌。半夜还在电梯里见过异装癖三兄弟,喷着香水,戴着假发,踩着高跟鞋。不过我从来没有担心过安全的问题,哪怕是偶尔凌晨回家,都觉得没有问题。大概是因为楼底下便是全家超市,便想起在朋友圈里,姑娘说,有一个男孩从外地开车来找她,她想要躲开他,又不好意思直说,便寻求大家的帮助。有朋友回复说,让他去全家呀。全家就是你家。突然间想起这么个笑话似的事情时,我和F正并肩走过全家超市,我们进去买了一桶农夫山泉,一个打火机和一罐口香糖。要蓝莓口味还是西柚口味的?我们站在货架前讨论了一番。“真是太奇怪了,我们竟然住在一起。”F看了我一眼说。“是的。完全没想到有一天竟然会和你一起回家。”“而且是在三十岁的时候!”哦,实际上都已经不止三十岁了,只不过过了三十岁以后,便一直觉得自己是三十岁,也蛮奇怪的。然后再想想,我和F,认识了有二十一年,不由又吓了一跳。告诉朋友们的时候,大家都觉得不可思议。天哪,这应该是二十多岁的时候做的事才对。到了三十岁再和自己最好的朋友住在一起,你们嫌自己人生的漏洞还不够大吗?告诉父母的时候,他们的反应也很消极。你们这么做,大概暂时都不会再找得到新男朋友了。你们现在的当务之急应该是恋爱啊。这样逃避现实不能解决问题。其实我们也没有什么需要逃避的现实。刚刚决定要住在一起的时候,彼此的确都遇见了一些事情。虽然说在当下有些伤心,焦虑,不安,但放在漫漫人生中看,也实在算不上什么了不起的事情。只不过是生活状态突然遭到了变故,打破了旧的秩序,因此而有些措手不及而已。之前我们也都有过同居的经验,但都是和男友。亲密关系下的同居和两个朋友住在一起毕竟是有非常大的不同。尽管我们从小相识,与彼此父母的关系都很好,节假日会去对方家里吃饭,甚至彼此的父母都见过我们的前男友。但是真的要住在一起──我们却都变得非常犹豫,认真地讨论过很多次。“没什么可担心的,我们都那么了解对方。”“没错,我们也都不是什么计较的人。”“如果感觉有什么不舒服,就立刻说出来。”“也可以带男朋友来家里过夜。不是男朋友的话,就不要带回来。”“哪怕再谈恋爱也不会同居了,与房东签一年或者两年的合同也都没有问题。”嗯,基本就是这样,我们也算是彼此鼓着劲,搬到了一起。房间在二十二楼,朝西南,有很大的空荡荡的客厅。厨房和卫生间非常宽敞,电器都是新的,家具也都是房东从宜家买的,北面挨着铁轨,在客厅里有时可以听到火车的声音,在卧室里则听不到。是我租过的所有房子里最好的了,因为觉得太大了,在客厅里养条狗都可以。这儿挨着轻轨站,F上班非常方便,而且两个人平摊房租的话,算下来真的也没多少钱。而且我们从小便是在苏州河流域长大的,因为小学五年级便认识了,之后的初中和高中都是在苏州河旁边的那个中学里一起念的,同学们的家也都是在那儿附近。现在的小区,便是依着河造的,从有些房子的窗口,可以直接看到端午节时,苏州河上的龙舟大赛。对这片区域过分熟悉了,因此找房子的过程其实非常简单,只不过是看了两天的房而已,就做了决定。这儿离市中心并不远,天气好的时候,走上四十分钟,便也走到了淮海路,瑜伽教室在那里附近,就当是练习前的暖身训练好了。要是开车的话,出门右转就上了南北高架,左转则是内环。当然我最喜欢的路线还是半夜沿着苏州河开回来,虽然会绕一点点路,但是全程没有红绿灯,也几乎没有其他车辆,一路畅通无阻地乱开,然后转个弯,过一座桥,突然巨型小区就呈现在了眼前。很多年以前的男朋友,把这个地方叫做恶魔城。因为它太突兀,太密集,偶尔来做客的朋友说,这儿住了多少人,有十万吗?我们后来真的算了算,起码得有五万吧。起初F和我想要搞一个暖屋派对,把共同的朋友们都请来,后来扳指算算,共同的朋友实在是很多,家里却只有四张椅子,就因为这个原因而迟迟拖着没有行动。而且搬完家以后,两个人突然都变得很忙。F每天早晨七点多起床,坐一个小时十分钟的地铁去上班,中间还需要转一趟车。至于我,年前一直在做一本翻译的校对,到后来简直是机械化的劳作,非常痛苦。年后终于结束了翻译的事情以后,又紧接着开始为已经写了一年多的长篇小说做收尾,外加还坚持着一些身体训练,每天虽然说都在家里,忙起来的时候就工作时间也超过了八个小时。虽然说住在了一起,但是并没有像电视剧或者电影里面描述的那样,每天都一起做很多事情。事实上,就连一起吃晚饭的次数,一个星期大概也不会超过三次。平时多数是我做晚饭,吃的都是些非常随便的东西。大部分时候是做一个汤,炒一个或者两个蔬菜。有时候懒散起来炒一碗年糕就好了。我们不是会被日常生活所累的人,但倒也不至于凑合,在家吃饭的话,虽然简单,但也吃得认认真真的。有时候一边吃一边感慨说,“我们实在是太健康了。”当然,生活习惯什么的也并不是都那么健康。那会儿,还是夏末,晚饭以后我们最喜欢做的事情就是打开窗,一起站在窗边抽一根烟。窗户正对着花园,花园里每晚都用高音喇叭播放着广场舞的音乐,有时候我们会不由自主地也哼哼起来。后来天气凉了,这个冬天仿佛非常漫长,广场舞已经暂停了很长一段时间。倒是有点令人怀念的。想起很久以前,起码是五年前了,我还在北京,F还在田纳西州。我们每天都隔着时差挂在msn上面聊天,有时是十三个小时的冬令时,有时是十四个小时的夏令时。那时我们都已经渐渐适应了在异乡的生活,因为有大把的空闲时间,又要对抗孤独症的发作,所以都同时开始学起做饭来。那还是博客年代,我们建了一个博客叫假开心──用来记录我们做的各种菜。起了个这样的名字,却真的是认真在讲做菜的事情。这个嘛,基本就是我和F之间的感情交流方式。那段刚刚住在一起的时间,我们其实都受到了不一样的创伤,但是都没有在彼此面前表现出过脆弱。哪怕是把家里面的存酒都喝完了,其实也都是讲着笑话喝完的。现在家里的窗台上摆了两排各种红酒瓶,像是勋章似的。那批酒喝完以后,没再买新的,于是也没有继续在家里喝酒的习惯。春节的时候,F去了斯里兰卡旅行,我一个人住了大概十几天的时间。这中间又经历过一些情绪的波折。网络上依然有人在转我2008年写的《一个人住第三年》,之后其实也谈恋爱的,也和男友同居过的,但是真的都断断续续保持着一个人住的状态。却在和F住在一起以后,对独居产生了畏惧。我们其实很少交谈,但是大部分的时候她的存在本身就是最好的陪伴。她在春节的最后一天才回家,接着第二天就上班了。我们迅速恢复了日常生活。我们最喜欢的早餐是星巴克的牛肉芝士可颂和另外一个面包房的杏仁牛角。一般来说我做饭的话,她会洗碗。有一回,我们做了两个汤当晚饭,然后都吃完了。家里不能缺少的东西是益达口香糖。我过去常买农夫山泉的瓶装水,在F的纠正下改成了桶装水。我们几乎都不买任何屋子里的装饰品,对于生活的要求非常简单,从来也都没有买过花。不管带回来什么食物,我们都是共享的。那天我做完晚饭,她回到家里,放下包,一边脱鞋,一边忍不住大笑着说:“告诉你一件世界上最好笑,最不可思议的事情。”“什么,快说。”“我被开除了!”新年上班的第一天,她被老板以粗暴的不告知原因的方式开除了。那是她回国以后的第一份工作,第三年。呃,那之后我们开始了一段真正的面面相觑的日子。我们每天从睁开眼睛起就被困在这间大屋子里面。因为是两个人,于是不得不认真地对待三餐。白天我们在各自的房间里,等到饭点的时候便出现在厨房,交谈两句,一起做饭。我们不拥有其他女孩那种建立在撒娇和倾诉上的感情,我们的感情,怎么说呢,有点像是男人间的方式。更独立,更可信赖。我们一起去参加中学同学的聚会,没有告诉其他人F遭遇的事情。有一对夫妇要去美国生孩子,于是大家聊起稍微沉重点的话题。F在饭桌上认真地追问大家的人生态度,结果几乎要弄得不欢而散。“我们是怎么长成了和他们都不一样的人呢?”“明明小的时候吃一样的学校食堂,听一样的音乐,也看一样的书。”“我们的父母也都是一样的人。”“也不知道是哪里出了问题,在哪个拐点上突然就拐上了另外一条路。”第二个面面相觑的星期,我们坚持一日三餐到第三天,终于觉得彻底受不了了,午饭吃了好味的方便面,晚饭叫了肯德基的全家桶。现在的小区其实我很多年前也住过,距现在足足有十年了吧。那会儿我和当时的男朋友决定从浦东搬到浦西。房子是我选的,房租是2300块,对当时的我们来说已经是一笔巨款。那间屋子挨着苏州河,我们在那儿住了两年。现在这样的一间房子已经要被租到5500块左右了。嗯。现在想来,那是我最后一次和其他人一起搬家。我们在浦东那个乱糟糟的破屋子里面整理东西,打包,然后和朋友们一起坐着卡车呼啸过南浦大桥。真正的意气风发,觉得攻占全上海都不在话下。当时这个小区才造到第三期,也就是说我们现在租住的这块地方还是一个大工地。绿色植物都是刚刚移植过来的,病怏怏的小树苗,物业也是各种不完善,因此男友咒骂说真是一个恶魔城。后来我们在这儿度过了两个春天,最喜欢的就是初夏绣球花盛开的日子。现在十年过去了,这儿终于长成了一个绿树成荫的地方。有一天下很大的雨,经过池塘的时候,看到一只狸猫趴在池塘中间的荷叶上。(本文选自《鲤·一间不属于自己的房间》)
|
Markdown
|
UTF-8
| 1,285 | 2.671875 | 3 |
[
"MIT"
] |
permissive
|
# 单元测试使用检查说明
- [@vue/cli-plugin-unit-mocha](https://cli.vuejs.org/core-plugins/unit-mocha.html#injected-commands)
- [chai](http://chaijs.com/)
## 测试用例常用方法
> should 和 expect 是 TDD(Test-Driven Development)——测试驱动开发,assert BDD(Behavior-Driven Development)——行为驱动开发
### 连接器
- to
- be
- been
- is
- that
- which
- and
- has
- have
- with
- at
- of
- same
- but
- does
- still
### 断言
- .not
- .deep
- nested
- own
- ordered
- any
- all
- a
- include
- ok
- true
- false
### asset
- [asset](https://www.chaijs.com/api/assert/)
## 测试用例 demo
- should
```javascript
chai.should()
foo.should.be.a('string')
foo.should.equal('bar')
foo.should.have.lengthOf(3)
tea.should.have.property('flavors').with.lengthOf(3)
```
- expect
```javascript
var expect = chai.expect
expect(foo).to.be.a('string')
expect(foo).to.equal('bar')
expect(foo).to.have.lengthOf(3)
expect(tea).to.have.property('flavors').with.lengthOf(3)
```
- Assert
```javascript
var assert = chai.assert
assert.typeOf(foo, 'string')
assert.equal(foo, 'bar')
assert.lengthOf(foo, 3)
assert.property(tea, 'flavors')
assert.lengthOf(tea.flavors, 3)
```
|
C++
|
UTF-8
| 4,240 | 3.046875 | 3 |
[] |
no_license
|
#include "card.h"
Card::Card()
{
}
int Card::getId()
{
return this->id;
}
QString Card::getCardName()
{
return this->card_name;
}
void Card::setCardName(QString& card_name)
{
this->card_name = card_name;
}
QVector<ColorCard> Card::getColors()
{
return this->colors;
}
void Card::setColors(QVector<ColorCard>& colors)
{
this->colors = colors;
}
void Card::addColors(ColorCard& colorCard)
{
this->colors.push_back(colorCard);
}
int Card::removeColors(ColorCard& colorCard)
{
int pos = this->colors.indexOf(colorCard);
if(pos>=0)
{
this->colors.remove(pos);
}
return (pos >=0);
}
void Card::clearColors()
{
this->colors.clear();
}
int Card::getBlueManaCost()
{
return this->blue_mana_cost;
}
int Card::getBlackManaCost()
{
return this->black_mana_cost;
}
int Card::getWhiteManaCost()
{
return this->white_mana_cost;
}
int Card::getRedManaCost()
{
return this->red_mana_cost;
}
int Card::getGreenManaCost()
{
return this->green_mana_cost;
}
int Card::getUnColorManaCost()
{
return this->uncolor_mana_cost;
}
int Card::getXManaCost()
{
return this->nb_x_mana_cost;
}
int Card::getConvertedManaCost()
{
return this->converted_mana_cost;
}
void Card::setBlueManaCost(int blue_mana_cost)
{
this->blue_mana_cost = blue_mana_cost;
}
void Card::setBlackManaCost(int black_mana_cost)
{
this->black_mana_cost = black_mana_cost;
}
void Card::setWhiteManaCost(int white_mana_cost)
{
this->white_mana_cost = white_mana_cost;
}
void Card::setRedManaCost(int red_mana_cost)
{
this->red_mana_cost = red_mana_cost;
}
void Card::setGreenManaCost(int green_mana_cost)
{
this->green_mana_cost = green_mana_cost;
}
void Card::setUnColorManaCost(int uncolor_mana_cost)
{
this->uncolor_mana_cost = uncolor_mana_cost;
}
void Card::setXManaCost(int nb_x_mana_cost)
{
this->nb_x_mana_cost = nb_x_mana_cost;
}
void Card::setConvertedManaCost(int converted_mana_cost)
{
this->converted_mana_cost = converted_mana_cost;
}
Type Card::getType()
{
return this->type;
}
void Card::setType(Type& type)
{
this->type = type;
}
QString Card::getText()
{
return this->text;
}
void Card::setText(QString& text)
{
this->text = text;
}
QString Card::getFlavorText()
{
return this->flavor_text;
}
void Card::setFlavorText(QString& flavor_text)
{
this->flavor_text = flavor_text;
}
Edition Card::getEdition()
{
return this->edition;
}
void Card::setEdition(Edition& edition)
{
this->edition = edition;
}
QString Card::getArtist()
{
return this->artist;
}
void Card::setArtist(QString& artist)
{
this->artist = artist;
}
Rarity Card::getRarity()
{
return this->rarity;
}
void Card::setRarity(Rarity& rarity)
{
this->rarity = rarity;
}
bool Card::getLegendary()
{
return this->legendary;
}
void Card::setLegendary(bool legendary)
{
this->legendary = legendary;
}
bool Card::getWorld()
{
return this->world;
}
void Card::setWorld(bool world)
{
this->world = world;
}
bool Card::getTribal()
{
return this->tribal;
}
void Card::setTribal(bool tribal)
{
this->tribal = tribal;
}
bool Card::getSnow()
{
return this->snow;
}
void Card::setSnow(bool snow)
{
this->snow = snow;
}
QString Card::getImage()
{
return this->image;
}
void Card::setImage(QString& image)
{
this->image = image;
}
QString Card::getCodeCard()
{
return this->codeCard;
}
void Card::setCodeCard(QString& codeCard)
{
this->codeCard = codeCard;
}
QVector<QString> Card::getSubtype()
{
return this->subtype;
}
void Card::setSubtype(QVector<QString>& subtype)
{
this->subtype = subtype;
}
void Card::addSubtype(QString& subtype)
{
this->subtype.push_back(subtype);
}
int Card::removeSubtype(QString& subtype)
{
int pos = this->subtype.indexOf(subtype);
if(pos>=0)
{
this->subtype.remove(pos);
}
return (pos >=0);
}
void Card::clearSubtype()
{
this->subtype.clear();
}
std::ostream& operator<<( std::ostream& flux, Card& c)
{
//flux << QObject::tr("code : ").toStdString() << m.getCode().toStdString() << QObject::tr(" valeur : ").toStdString() << m.getText().toStdString() << endl;
return flux;
}
|
Markdown
|
UTF-8
| 3,462 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
# PoliCSS
PoliCSS (pronounced Po-lis) will Enforce a CSS methodology, Suppress individual coding styles, Control specificity and Oppress team members.
## What is it?
PoliCSS is a CSS methodology and a PostCSS based linter designed to enforce it.
## Benefits
* Easier maintenance
* Reduce Bugs
* Portable between projects
## Key Concepts
* Use semantically named classes/modules
* Reduce and __Control__ specificity. Keeps as low as possible, without equalling selectors
* Well-defined structure to selectors
* Order of selectors should not make a difference
* Module should not care about parent selectors
* n types of selector:
1. single html tags e.g. `p` - defined in `base` folder
2. nested html tags. Can only be used for tags that could have different parent tags e.g. `ul li` - defined in `base` folder. Limited to `ul/ol li`, `thead/tbody/tfoot tr/th/td`
3. microformat selectors - TODO
4. module selector. single class name e.g. `.foo`. Must have same name as module file i.e. `modules/foo.css`
5. module sub-tag. Used to target a specific tag inside the module e.g. `.foo h3` or `article.foo`. Cannot be parent element of module
6. module sub-component. Single class name, prefixed with module name e.g. `.foo-bar` for `bar sub-component`
7. nested module. Used to alter the css of a child module e.g. `.foo .baz` or `.foo .baz a` where `.baz a` has already been defined in `.baz` module
8. status. Must be prefixed with `is-` or `has-`. These are classes that are likely to be added/removed to an element at runtime. Typical selectors would be `.foo.is-active` or `.foo.has-children`
9. Media query. TODO
## how it works
[PostCSS](https://github.com/postcss/postcss)
[cssnext](http://cssnext.io/)
[Stylelint](http://stylelint.io/)
## Folder Structure
`index.css` includes all files in specific folder order. Files within folders imported alphabetically
to reduce chances of order specificity bugs
1. vendor - any 3rd party css files to import e.g. normalise.css
2. variables - all variables are defined here
3. mixins - mixins defined here
4. base - styles for html tags. no `class` or `id` selectors. `status` selectors allowed
5. microformats - (Optional). Standardized class names and markup patterns
5. modules - Most of the development
## Rules
* Formatting rules - provided by stylelint
* Methodology rules - provided by stylelint
* Methodology rules - custom stylelint plugins
* Methodology rules - Unenforceable
### Enforceable Rules
1. no `div` in selectors - too generic
1. no `span` in selectors - too generic
1. no __id__ selectors - too specific
1. no generic class/module names e.g. `.container, .row, .column`
1. no floating status selectors - must be attached to an element
1. Only allow genuine tag names
1. don't use tag names for class names
1. widths should be % or rem - no px units etc
1. variables used for colors etc
1. module names must match filename
1. filenames should be included alphabetically
1. mixins at top of properties
1. no block elements inside inline elements
### Unenforceable Rules
May be able to provide warnings
1. no __presentational__ class/module names e.g. `.red-border`
1. no __media query__ restricted class/module names e.g. `.col-md-3`
1. don't use `i` tags for icons
## FAQs
1. Empty tags are OK. These should be removed by PostCSS plugins
## Other considerations
1. MaintainableCSS http://maintainablecss.com/ - very good!
1. modernizr
1. css-modules on github
|
C#
|
UTF-8
| 1,442 | 2.984375 | 3 |
[] |
no_license
|
using Gamayun.Infrastucture.Entities;
using System;
using System.Linq;
using System.Collections.Generic;
using System.Text;
namespace Gamayun.Infrastucture.Command.Admin
{
public class CreateSemesterCommandHandler : ICommandHandler<CreateSemesterCommandHandler.Command>
{
private readonly GamayunDbContext _dbContext;
public CreateSemesterCommandHandler(GamayunDbContext dbContext)
{
_dbContext = dbContext;
}
public ICommandResult Handle(Command command)
{
if(string.IsNullOrWhiteSpace(command.Major) || command.Major.Length < 3)
{
return CommandResult.Failed("Major has to have minimum length of 3 letters");
}
if(command.FinishedOn == null || command.FinishedOn.Value.Date <= DateTime.Now)
{
return CommandResult.Failed("Finished date must be in future");
}
var semester = new Semester {
CreatedOn = DateTime.Now,
Major = command.Major,
FinishedOn = command.FinishedOn.Value,
};
_dbContext.Semesters.Add(semester);
_dbContext.SaveChanges();
return CommandResult.Success();
}
public class Command : ICommand
{
public string Major { get; set; }
public DateTime? FinishedOn { get; set; }
}
}
}
|
Java
|
UTF-8
| 942 | 2.296875 | 2 |
[] |
no_license
|
package code.maths.litteraladv;
import code.maths.litteralcom.StrTypes;
import code.util.StringMap;
import code.util.core.IndexConstants;
public final class VariableMaOperation extends LeafMaOperation {
private final String varName;
private final int varOffset;
public VariableMaOperation(int _indexInEl, int _indexChild, MethodMaOperation _m, MaOperationsSequence _op) {
super(_indexInEl, _indexChild, _m);
varOffset = _op.getOffset();
varName = StrTypes.value(_op.getParts(),IndexConstants.FIRST_INDEX).trim();
}
@Override
void calculate(StringMap<MaStruct> _conf, MaError _error, MaDelimiters _del) {
MaStruct val_ = MaNullStruct.def(_conf.getVal(varName));
if (val_ != MaNullStruct.NULL_VALUE) {
setStruct(val_);
return;
}
_error.setOffset(getIndexExp()+varOffset);
}
String getVarName() {
return varName;
}
}
|
Python
|
UTF-8
| 155 | 3.640625 | 4 |
[] |
no_license
|
import itertools
#itertools can create infinite loops so use a for loop
for guess in itertools.product("ABCD","xy"):
guess=''.join(guess)
print(guess)
|
TypeScript
|
UTF-8
| 1,579 | 2.671875 | 3 |
[
"MIT"
] |
permissive
|
import { matches } from '../src/matches'
import { expect } from 'chai'
import { simpleExpressions } from './simpleExpressions'
import { compoundExpressions } from './compoundExpression'
describe('evaluate', () => {
it('should handle primitives', () => {
expect(matches(2, 2), 'Two numbers should be the same').to.be.true
expect(matches('yellow', 'red'), 'these strings dont match').to.be.false
expect(matches({ $eq: 1 }, 1), 'A primitive should be matched').to.be.true
expect(matches({ $gt: 1 }, 2), 'gt should work on primitives').to.be.true
expect(matches({ $ne: 5 }, 5)).to.be.false
expect(matches({ $ne: 6 }, 5)).to.be.true
})
it('should handle simple expressions', () => {
for (const test of simpleExpressions) {
expect(matches(test[1], test[0])).to.equal(test[2], JSON.stringify({ expression: test[1], record: test[0] }, null, 2))
}
})
it('should handle compound expressions', () => {
for (const test of compoundExpressions) {
expect(matches(test[1], test[0])).to.equal(test[2], JSON.stringify({ expression: test[1], record: test[0] }, null, 2))
}
})
it('should dissallow "$and" and "$or" at the same level', () => {
expect(() => {
return matches({
$and: [],
$or: []
}, { any: 'wow' })
}).to.throw()
})
it('should require explicit "$not" usage', () => {
expect(() => {
return matches({
$not: 1
}, 1)
}).to.throw()
expect(() => {
return matches({
car: {
$not: 0
}
}, {})
}).to.throw()
})
})
|
PHP
|
UTF-8
| 5,941 | 2.9375 | 3 |
[] |
no_license
|
<?php
//Filedata é a variável que o flex envia com o arquivo para upload
$doc = $_FILES['doc'];
// Pasta onde o arquivo vai ser salvo
$_UP['pasta'] = 'upload';
// Tamanho máximo do arquivo (em Bytes)
$_UP['tamanho'] = 1024 * 1024 * 2; // 2Mbyte
// Array com os tipos de erros de upload do PHP
$_UP['erros'][0] = 'Não houve erro';
$_UP['erros'][1] = 'O arquivo no upload é maior do que o limite do PHP';
$_UP['erros'][2] = 'O arquivo ultrapassa o limite de tamanho especifiado no HTML';
$_UP['erros'][3] = 'O upload do arquivo foi feito parcialmente';
$_UP['erros'][4] = 'Não foi feito o upload do arquivo';
// Verifica se houve algum erro com o upload. Se sim, exibe a mensagem do erro
if ($_FILES['doc']['error'] != 0)
{
die("Não foi possível fazer o upload, erro:<br />" .
$_UP['erros'][$_FILES['doc']['error']]);
exit;
}
// Faz a verificação do tamanho do arquivo enviado
if ($_UP['tamanho'] < $_FILES['doc']['size'])
{
echo "O arquivo enviado é muito grande, envie arquivos de até 2Mb.";
}
// O arquivo passou em todas as verificações, hora de tentar movê-lo para a pasta
else
{
$nome_final = $_FILES['doc']['name'];
}
if (move_uploaded_file($_FILES['doc']['tmp_name'], $_UP['pasta'] . '/' . $nome_final))
{
echo "Seu arquivo foi inserido com sucesso!";
}
else
{
echo utf8_encode('Não foi possível enviar este arquivo, tente novamente');
}
//Inserir arquivo no zip
$za = new ZipArchive;
$za->open($_UP['pasta'] . '/arquivos_zip.zip', ZipArchive::CREATE);
$za->addFile(realpath($_UP['pasta'] . '/' . $nome_final), $nome_final);
$za->close();
// Parametros de conexao com o banco de dados
$servername = "localhost";
$username = "cefet";
$password = "cefet123";
try
{
$conn = new PDO("mysql:host=$servername;dbname=docs", $username, $password);
//echo 'ok';
}
catch(PDOException $e)
{
echo "Connection failed: " . $e->getMessage();
echo 'nao';
exit;
}
//echo $_FILE['doc']['name'];
// Insere o documento no banco e dados
$sql = "INSERT INTO documento(caminho, nome) VALUES(:caminho, :nome)";
$stmt = $conn->prepare( $sql );
$caminho = realpath($_UP['pasta'] . '/' . $nome_final);
$stmt->bindParam(':caminho', $caminho);
$stmt->bindParam(':nome', $nome_final);
$result = $stmt->execute();
if($result == FALSE)
{
echo 'NO';
var_dump( $stmt->errorInfo() );
exit;
}
$insertid = $conn->lastInsertId();
//Antes de obter os parametros do arquivo precisa saber o tipo dele.
//A variavel $caminho possui o camimho completo para o arquivo.
//Inicializa a variavel $extension como txt
$extension = "txt";
//Define o tempo máximo para esperar.
set_time_limit(7200);
//Inicializa as variaveis para armazenar os bytes
$byte1 = "";
$byte2 = "";
$byte3 = "";
$byte4 = "";
//Verifica se o arquivo existe
if(file_exists($caminho) and !empty($caminho))
{
//Abre o arquivo em modo leitura binário (rb)
$fp = fopen($caminho, "rb");
//Le o primeiro byte
$byte1 = sprintf("%02X",ord(fgetc($fp)));echo $byte1;
//Le o segundo byte
$byte2 = sprintf("%02X",ord(fgetc($fp)));echo $byte2;
//Le o terceiro byte
$byte3 = sprintf("%02X",ord(fgetc($fp)));//echo $byte3;
//Le o quarto byte
$byte4 = sprintf("%02X",ord(fgetc($fp)));//echo $byte4;
//Fecha o arquivo
fclose($fp);
}
echo $byte1;
echo $byte2;
//Agora testa os bytes
if ($byte1=='50' && $byte2=='4B')
{
echo "Arquivo binario";
$extension = "bin";
}
echo $extension;
//Agora continua com a extração dos parâmetros para os casos de ser binário ou texto
if($extension == "txt")
{
$fileContents = file_get_contents($caminho);
if ($fileContents === false)
{
echo 'Erro ao ler o arquivo!';
}
$parametros = array();
$palavras = str_word_count($fileContents,1, "{}<>");
echo 'ok 127';
foreach($palavras as $palavra)
{
if(preg_match('/{{([^}]*)}}/', $palavra, $matches))
{
echo 'entrou';
array_unshift($parametros, $matches[1]);
print_r($matches);
}
}
}
if($extension == "bin")
{
$temp = tempnam('.', 'TMP_');
copy($caminho, $temp);
$zip = new ZipArchive;
$dataFile = "content.xml";
//echo $caminho;
if ($zip->open($temp))
{
$fileContents = $zip->getFromName($dataFile);
//echo '<br>';
//$zip->deleteName($dataFile);
$palavras = str_word_count($fileContents,1, "{}<>");
$parametros = array();
foreach($palavras as $palavra)
{
//echo $palavra . '<br>';
//print_r ($palavras);
echo $palavra;
if(preg_match('/{(<[^>]+>)*({([^}]*)})(<[^>]+>)*}/', $palavra, $matches))
{
echo 'entrou';
print_r($matches);
//print_r ($palavras);
array_unshift($parametros, $matches[3]);
}
}
$zip->close();
echo 'ok 117';
}
else
{
echo 'failed';
}
}
$sql1 = "INSERT INTO parametro(nome, doc) VALUES(:nome, :id)";
$stmt1 = $conn->prepare( $sql1 );
//print_r ($insertid);
foreach($parametros as $item)
{
$stmt1->bindParam(':id', $insertid);
$stmt1->bindParam(':nome', $item);
$stmt1->execute();
}
header("Location: criar.php?id=$insertid");
?>
|
Java
|
UTF-8
| 866 | 2 | 2 |
[] |
no_license
|
package com.apirest.efi.models.services;
import java.util.List;
import com.apirest.efi.models.dao.IndicadorGrupoDao;
import com.apirest.efi.models.entity.IndicadorGrupo;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@Service
public class IndicadorGrupoService {
@Autowired
private IndicadorGrupoDao indicadorGrupoDao;
public List<IndicadorGrupo> findAll() {
return indicadorGrupoDao.findAll();
}
public List<IndicadorGrupo> findByEstado(Integer estado) {
return indicadorGrupoDao.findByEstado(estado);
}
public IndicadorGrupo findById(Integer id) {
return indicadorGrupoDao.findById(id).orElse(null);
}
public IndicadorGrupo save(IndicadorGrupo indicadorGrupo) {
return indicadorGrupoDao.save(indicadorGrupo);
}
}
|
C++
|
UTF-8
| 1,041 | 2.515625 | 3 |
[] |
no_license
|
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/sem.h>
#include <stdio.h>
#include <stdlib.h>
#include "Semaforo.h"
union uni
{
int val = 0; //Valor para el comando SETVAL
struct semid_ds *buf; //Para los comandos IPC_SET
unsigned short *array; //Para los comandos GETALL y SETALL
struct seminfo *__buf; //Para el comando IPC_INFO
};
//Inicializacion de la union y la estructura
uni u;
struct sembuf b; //Utilizado para el Wait y Signal
Semaforo::Semaforo(int valor)
{
this->id = semget(0xB67751, 1, IPC_CREAT|0600);
//Para saber si falla
if (id == -1)
{
perror("Semaforo::Semaforo");
exit(1);
}
u.val = valor; //Parametro entrante (0)
int aux = semctl(id, 0, SETVAL, u);
}
Semaforo::~Semaforo()
{
semctl(id, 0, IPC_RMID);
}
int Semaforo::Wait()
{
b.sem_num = 0;
b.sem_op = -1;
b.sem_flg = 0;
int aux = semop(id, &b, 1);
}
int Semaforo::Signal()
{
b.sem_num = 0;
b.sem_op = +1;
b.sem_flg = 0;
int aux = semop(id, &b, 1);
}
|
Python
|
UTF-8
| 979 | 3.984375 | 4 |
[] |
no_license
|
print('%d'%(12345)) #顯示數值「12345」,未設寬度時直接顯示整數資料
print('%8d'%(12345)) #顯示數值「ΔΔΔ12345」,設寬度為8,寬度有剩時補空格(靠右對齊)
print('%-8d'%(-12345)) #顯示數值「-12345ΔΔ」,靠左對齊,寬度有剩時補空格
print('%08d'%(12345)) #顯示數值「00012345」,設寬度為8,寬度有剩時補0
print('%3d'%(-12345)) #顯示數值「-12345」,設寬度為3,寬度不足時全部顯示
print('%c'%('A')) #顯示字元「A」
print('%4c'%('A')) #顯示字元「ΔΔΔA」,寬度為4有剩補空格
print('%c'%(65)) #顯示字元「A」,65的ASCII碼為「A」
print('%s'%('ABCDE')) #顯示字串「ABCDE」
print('%8s'%('ABCDE')) #顯示字串「ΔΔΔABCDE」,設寬度為8,寬度有剩時補空格(靠右對齊)
print('%3s'%('ABCDE')) #顯示字串「ABCDE」,寬度不足時全部顯示
print('%6.2s'%('ABCDE')) #顯示字串「ΔΔΔΔAB」,設寬度為6並只顯示2字元
|
Java
|
UTF-8
| 8,408 | 2.5 | 2 |
[] |
no_license
|
package com.example.erika_000.devmanager;
import android.content.ContentValues;
import android.content.Intent;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.os.Bundle;
import android.support.v7.app.ActionBarActivity;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.Button;
import android.widget.EditText;
import android.widget.TextView;
import android.widget.Toast;
public class Gestion5Activity extends ActionBarActivity {
private EditText txtCodigo;
private EditText txtNombre;
private TextView txtResultado;
private Button btnInsertar;
private Button btnActualizar;
private Button btnEliminar;
private Button btnConsultar;
private SQLiteDatabase db;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_gestion5);
//Obtenemos las referencias a los controles
txtCodigo = (EditText)findViewById(R.id.txtReg);
txtNombre = (EditText)findViewById(R.id.txtVal);
txtResultado = (TextView)findViewById(R.id.txtResultado);
btnInsertar = (Button)findViewById(R.id.btnInsertar);
btnActualizar = (Button)findViewById(R.id.btnActualizar);
btnEliminar = (Button)findViewById(R.id.btnEliminar);
btnConsultar = (Button)findViewById(R.id.btnConsultar);
//Abrimos la base de datos 'DBUsuarios' en modo escritura
UsuariosSQLiteHelper usdbh =
new UsuariosSQLiteHelper(this, "DBUsuarios", null, 1);
db = usdbh.getWritableDatabase();
btnInsertar.setOnClickListener(new OnClickListener() {
public void onClick(View v) {
//Recuperamos los valores de los campos de texto
String cod = txtCodigo.getText().toString();
String nom = txtNombre.getText().toString();
//Alternativa 1: método sqlExec()
//String sql = "INSERT INTO Usuarios (codigo,nombre) VALUES ('" + cod + "','" + nom + "') ";
//db.execSQL(sql);
//Alternativa 2: método insert()
ContentValues nuevoRegistro = new ContentValues();
nuevoRegistro.put("codigo", cod);
nuevoRegistro.put("nombre", nom);
db.insert("Usuarios", null, nuevoRegistro);
}
});
btnActualizar.setOnClickListener(new OnClickListener() {
public void onClick(View v) {
//Recuperamos los valores de los campos de texto
String cod = txtCodigo.getText().toString();
String nom = txtNombre.getText().toString();
//Alternativa 1: método sqlExec()
//String sql = "UPDATE Usuarios SET nombre='" + nom + "' WHERE codigo=" + cod;
//db.execSQL(sql);
//Alternativa 2: método update()
ContentValues valores = new ContentValues();
valores.put("nombre", nom);
db.update("Usuarios", valores, "codigo=" + cod, null);
}
});
btnEliminar.setOnClickListener(new OnClickListener() {
public void onClick(View v) {
//Recuperamos los valores de los campos de texto
String cod = txtCodigo.getText().toString();
//Alternativa 1: método sqlExec()
//String sql = "DELETE FROM Usuarios WHERE codigo=" + cod;
//db.execSQL(sql);
//Alternativa 2: método delete()
db.delete("Usuarios", "codigo=" + cod, null);
}
});
btnConsultar.setOnClickListener(new OnClickListener() {
public void onClick(View v) {
//Alternativa 1: método rawQuery()
Cursor c = db.rawQuery("SELECT codigo, nombre FROM Usuarios", null);
//Alternativa 2: método delete()
//String[] campos = new String[] {"codigo", "nombre"};
//Cursor c = db.query("Usuarios", campos, null, null, null, null, null);
//Recorremos los resultados para mostrarlos en pantalla
txtResultado.setText("");
if (c.moveToFirst()) {
//Recorremos el cursor hasta que no haya más registros
do {
String cod = c.getString(0);
String nom = c.getString(1);
txtResultado.append(" " + cod + " - " + nom + "\n");
} while(c.moveToNext());
}
}
});
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.menu_main, menu);
return true;
}
/**Cuando seleccionemos un item del menu, mostrará un mensaje, dependiendo del id que me devuelva line 28,
* van a haber muchos case*/
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case R.id.menu_inicio:
Toast.makeText(getApplicationContext(), "INICIO", Toast.LENGTH_SHORT).show();
//arrancar la siguiente activity
Intent abre;
abre = new Intent(Gestion5Activity.this, MainActivity.class);
startActivity(abre);
return true;
case R.id.menu_devtest:
Toast.makeText(getApplicationContext(), "DEVTEST", Toast.LENGTH_SHORT).show();
//arrancar la siguiente activity
Intent abreDev;
abreDev = new Intent(Gestion5Activity.this, DevTest.class);
startActivity(abreDev);
return true;
case R.id.menu_lessons:
Toast.makeText(getApplicationContext(), "LESSONS", Toast.LENGTH_SHORT).show();
//arrancar la siguiente activity
Intent abreLessons;
abreLessons = new Intent(Gestion5Activity.this, Lessons2Activity.class);
startActivity(abreLessons);
return true;
case R.id.menu_profile:
Toast.makeText(getApplicationContext(), "PERFILES", Toast.LENGTH_SHORT).show();
//arrancar la siguiente activity
Intent abreProf;
abreProf = new Intent(Gestion5Activity.this, Prof3Activity.class);
startActivity(abreProf);
return true;
case R.id.menu_git: /**Este Id hace referencia al id del item del menu, en este caso buscar*/
Toast.makeText(getApplicationContext(), "ONCODE", Toast.LENGTH_SHORT).show();
//arrancar la siguiente activity
Intent abreGit;
abreGit = new Intent(Gestion5Activity.this, OnCode4Activity.class);
startActivity(abreGit);
return true;
case R.id.menu_gestion:
Toast.makeText(getApplicationContext(), "GESTIÓN", Toast.LENGTH_SHORT).show();
//arrancar la siguiente activity
Intent abreGestion;
abreGestion = new Intent(Gestion5Activity.this, Gestion5Activity.class);
startActivity(abreGestion);
return true;
case R.id.menu_blog:
Toast.makeText(getApplicationContext(), "GESTIÓN", Toast.LENGTH_SHORT).show();
//arrancar la siguiente activity
Intent abreBlog;
abreBlog = new Intent(Gestion5Activity.this, Blog6Activity.class);
startActivity(abreBlog);
return true;
case R.id.action_settings:
Toast.makeText(getApplicationContext(), "SETTINGS", Toast.LENGTH_SHORT).show();
//TODO crear opciones de personalización
// //arrancar la siguiente activity
// Intent abreBlog;
// abreBlog= new Intent(MainActivity.this, Blog6Activity.class);
// startActivity(abreBlog);
return true;
default:
return super.onOptionsItemSelected(item);
}
}
}
|
Markdown
|
UTF-8
| 15,789 | 2.734375 | 3 |
[
"MIT"
] |
permissive
|
# mesa
[](https://www.npmjs.org/package/mesa)
[](https://travis-ci.org/snd/mesa/branches)
[](http://codecov.io/github/snd/mesa?branch=master)
[](https://david-dm.org/snd/mesa)
> simply elegant sql for nodejs
**this documentation targets the upcoming `mesa@1.0.0` release
currently in alpha and available on npm as `mesa@1.0.0-alpha.*`.**
**it's already used in production, is extremely useful, well tested
and quite stable !**
**mesa is a moving target. we are using it in production and it
grows and changes with the challenges it helps us solve.**
**`mesa@1.0.0` will be released when it's done !**
this documentation does not yet represent everything that is possible with mesa.
feel free to [look at the code](src/mesa.coffee). it's just around 600 lines.
[click here for documentation and code of `mesa@0.7.1` which will see no further development.](https://github.com/snd/mesa/tree/0.7.1)
## install
install latest:
```
npm install --save mesa
```
mesa needs [node-postgres](https://github.com/brianc/node-postgres):
```
npm install --save pg
```
require both:
``` js
var mesa = require('mesa');
var pg = require('pg');
```
## connections
let's tell mesa how to get a database connection for a query:
``` js
var database = mesa
.setConnection(function(cb) {
pg.connect('postgres://localhost/your-database', cb);
});
```
a call to `setConnection` is the (only) thing
tying/coupling the `database` mesa-object
to the node-postgres library and to the specific database.
## core ideas and configuration
calling `setConnection(callbackOrConnection)` has returned a new object.
the original mesa-object is not modified:
``` js
assert(database !== mesa);
```
**mesa embraces functional programming:
no method call on a mesa-object modifies that object.
mesa configuration methods are [pure](https://en.wikipedia.org/wiki/Pure_function):
they create a NEW mesa-object copy all OWN properties over to it,
set some property and return it.**
this has no effect:
``` js
mesa
.setConnection(function(cb) {
pg.connect('postgres://localhost/your-database', cb);
});
```
it creates a new object that is not used anywhere and eventually gets garbage collected.
let's configure some tables:
``` js
var movieTable = database.table('movie');
var personTable = database.table('person');
```
there are no special database-objects, table-objects or query-objects in mesa.
only mesa-objects that all have the same methods.
order of configuration method calls does not matter.
you can change anything at any time:
``` js
var personTableInOtherDatabase = personTable
.setConnection(function(cb) {
pg.connect('postgres://localhost/your-other-database', cb);
});
```
**it naturally follows that method calls on mesa-objects are chainable !**
``` js
var rRatedMoviesOfThe2000s = movieTable
// `where` accepts raw sql and optional parameter bindings
.where('year BETWEEN ? AND ?', 2000, 2009)
// repeated calls to where are 'anded' together
// `where` accepts objects that describe conditions
.where({rating: 'R'});
```
### criterion
the `.where()` and `.having()` methods take **exactly** the same
arguments as criterion...
we can always get the SQL and parameter bindings of a mesa-object:
``` js
rRatedMoviesOfThe2000s.sql();
// -> 'SELECT * FROM "movie" WHERE (year BETWEEN ? AND ?) AND (rating = ?)'
rRatedMoviesOfThe2000s.params();
// -> [2000, 2009, 'R']
```
### query
### mohair
mesa uses mohair to generate sql which it then sends to the database.
in addition to it's own methods every mesa-object has the entire interface
of a mohair-object.
for this reason the mohair methods are not documented in this readme.
consult the mohair documentation as well to get the full picture.
mesa supports all methods supported by mohair with some additions.
look into mohairs documentation to get the full picture of what's possible with mesa.
**mohair powers mesa's `.where`
### criterion
mesa's `.where` method is one such method that is implemented by mohair
mohair uses criterion
for this reason the criterion methods are not documented in this readme.
**criterion powers/documents mesa's `.where` and `.having`
we can refine:
``` js
var top10GrossingRRatedMoviesOfThe2000s = rRatedMoviesOfThe2000s
.order('box_office_gross_total DESC')
.limit(10);
```
**because every mesa-object gets a copy of all
a method added to a mesa-object
is available on all mesa-objects down the chain.**
this makes it very easy to extend the chainable interface...
``` js
movieTable.betweenYears = function(from, to) {
return this
.where('year BETWEEN ? AND ?', from to);
};
movieTable.page = function(page, perPage) {
perPage = perPage ? perPage : 10;
return this
.limit(perPage)
.offset(page * perPage);
};
var paginatedTopGrossingPG13RatedMoviesOfThe90s = movieTable
// we can freely chain and mix build-in and custom methods !
.order('box_office_gross_total DESC')
.page(2)
.where({rating: 'PG13'})
.betweenYears(1990, 1999);
```
**we see how pure functions and immutability lead to simplicity, reusability
and [composability](#composability) !**
## select queries
we can run a select query on a mesa object and return all results:
``` js
top10GrossingRRatedMoviesOfThe2000s
// run a select query and return all results
.find()
// running a query always returns a promise
.then(function(top10Movies) {
});
```
**running a query always returns a promise !**
we can run a select query on a mesa object and return only the first result:
``` js
top10GrossingRRatedMoviesOfThe2000s
// run a select query and return only the first result
// `first` automatically calls `.limit(1)` to be as efficient as possible
.first()
// running a query always returns a promise
.then(function(topMovie) {
});
```
we can also simply check whether a query returns any records:
``` js
movieTable
.where({name: 'Moon'})
.exists()
// running a query always returns a promise
.then(function(exists) {
});
```
## insert queries
we can run an insert query on a mesa object:
``` js
movieTable
// whitelist some properties to prevent mass assignment
.allow('name')
.insert({name: 'Moon'})
// running a query always returns a promise
// if insert is called with a single object only the first inserted object is returned
.then(function(insertedMovie) {
})
```
before running insert queries
if you have control over the properties of the inserted objects
and can ensure that no properties
can disable this by calling `.unsafe()`.
you can reenable it by calling `.unsafe(false)`.
you can insert multiple records by passing multiple arguments and/or arrays
to insert:
``` js
movieTable
// disable mass-assignment protection
.unsafe()
// running a query always returns a promise
.insert(
{name: ''}
[
{name: ''}
{name: ''}
]
{name: ''}
)
.then(function(insertedMovies) {
})
```
you see that mesa returns the inserted records by default
## update queries
This part is coming soon.
## delete queries
This part is coming soon.
## sql fragments
an sql fragment is any object with the following properties:
an sql([escape]) method which returns an sql string
takes an optional parameter escape which is a function to be used to escape column and table names in the resulting sql
a params() method which returns an array
at the heart of mesa is the query method
if you pass mesa (which is an sql fragment) into the query function...
## connections revisited
This part is coming soon.
``` js
```
`setConnection` either accepts
`wrapInConnection`
all of sql
down to the metal
## debugging
``` js
mesaWithDebug = mesa.debug(function( , detail, state, verboseState, instance)
```
only on refined versions
intermediary results
debugging per table, per query, ...
directly before a query debug will just for that specific query
just display sql
``` js
mesa = mesa.debug(function(topic, query, data)
if (topic === 'query' && event === 'before') {
console.log('QUERY', data.sql, data.params);
}
});
```
the topics are `connection`, `query`, `transaction`, `find`, `embed`
that function will be called with five arguments
the first argument is
the fifth argument is the instance
the fourth argument contains ALL additional local state that is `connection, arguments`
here is a quick overview:
look into the source to see exactly which
## queueing
often you want to for all tables, a specific table
a specific
do something to the records
you can `configure` mesa instances.
you can add functions to the queues with the following ways
hooks either run on a the array of all items or item
array queues are run before
functions in queues are run in the order they were added.
there are the following queues:
- `queueBeforeInsert` run before insert on array of items
- there is no `queueBeforeUpdate` because update always operates on a single item. use `queueBeforeEachUpdate`
- `queueBeforeEachInsert` run before insert on each item
- `queueBeforeEachUpdate` run before update on each item
- `queueBeforeEach` run before update or insert on each item
- `queueAfterSelect` run after find or first on array of items
- `queueAfterInsert` run after insert on array of items
- `queueAfterUpdate` run after update on array of items
- `queueAfterDelete` run after delete on array of items
- `queueAfter` run after find, first, insert, update and delete on array of items
- `queueAfterEachSelect` run after find or first on each item
- `queueAfterEachInsert` run after insert on each item
- `queueAfterEachUpdate` run after update on each item
- `queueAfterEachDelete` run after delete on each item
- `queueAfterEach` run after find, first, insert, update and delete on each item
### nice things you can to with queueing:
#### omit password property when a user is returned
``` js
var _ = require('lodash');
userTable
.queueAfterEachSelect(_.omit, 'password')
.where({id: 3})
.first(function(user) {
});
```
#### hash password before user is inserted or updated
``` js
var Promise = require('bluebird');
var bcrypt = Promise.promisifyAll(require('brypt'));
var hashPassword = function(record) {
if (record.password) {
bcrypt.genSaltAsync(10).then(function(salt) {
return bcrypt.hashAsync(password, salt);
});
} else {
return Promise.resolve(null);
};
}
userTable = userTable.queueBeforeEach(hashPassword);
```
#### convert property names between camelCase and snake_case
[see example/active-record.coffee](example/active-record.coffee)
#### set columns like `created_at` and `updated_at` automatically
``` js
userTable = userTable
.queueBeforeEach(function(record) {
record.updated_at = new Date();
return record;
})
.queueBeforeInsert(function(record) {
record.created_at = new Date();
return record;
});
```
#### fetch associated data
[see includes](#includes)
#### protect from mass assignment
mesa comes with a very powerful mechanism to manipulate
records before they are sent to the database or after they were received
from the database and before returning them.
if you are familiar with the active record pattern
prefer a more object-oriented style
here is how you would use mesa to implement it
as the foundation
as the building blocks
if you want to use camelcased property names in your program
and underscored in your database you can automate the translation
```
```
add them to the mesa instance and have it work for all your tables
by setting the order you ensure that the other hooks see
camelcased properties !!!
## includes
**includes are a NEW feature and may not be as stable as the rest**
in any rows in different tables are linked via foreign keys.
includes make it easy to fetch those linked rows and add them to our data:
lets assume, for a moment, the following tables and relationships:
- `user` with columns `id`, `name`. has one `address`, has many `orders`
- `address` with columns `id`, `street`, `city`, `user_id`. belongs to `user` via foreign key `user_id` -> `user.id`
- `order` with columns `id`, `status`. belongs to `user`
``` js
userTable = database.table('user');
addressTable = database.table('address');
orderTable = database.table('order');
```
we can now find some users and include the orders in each of them:
### has many relationship
``` js
userTable
.include(orderTable)
.find(function(users) {
})
```
a lot is happening here. let's break it down:
include has no side-effects and does not fetch any data.
instead it [queues](#queueing) a function to be executed
on all results (if any) of `first`, `find`, `insert`, `delete` and `update`
queries further down the chain.
in this case that function will
will run a query on `orderTable` to fetch all
orders where `order.user_id` is in the list of all `id` values in `users`.
it will then for every user add as property `orders` the list of all
orders where `user.id === order.user_id`.
**by default include queues a fetch of a has-many relationship**
the above code snippet is equivalent to this:
``` js
userTable
.include({
left: 'id',
right: 'user_id',
forward: true,
first: false,
as: 'orders'
}, orderTable)
.find(function(users) {
})
```
the first argument to
in case that link-object is missing or any properties are missing (and only those fields)
mesa will autocomplete it from table names , primary keys set with `.primaryKey(key)`
### belongs to relationship
``` js
orderTable
.include({forward: false, first: true}, userTable)
.find(function(users) {
})
```
### has many through
you can add as many additional link
you can modify, add conditions
you can nest
using an explicit link object:
**you get the idea**
includes are intentionally very flexible.
they work with any two tables where the values in
whose values match up.
if you are using primary keys other than `id`
fetch a one-to-one association (in a single additional query)
the implementation uses the hooks
its surprisingly simple
using the same connection as the
use one additional query to fetch all
and then associate them with the records
order and conditions and limits on the other tables have their full effects
## conditional
using mesa you'll often find yourself calling methods only
when certain conditions are met:
``` js
var dontFindDeleted = true;
var pagination = {page: 4, perPage: 10};
var tmp = userTable;
if (dontFindDeleted) {
tmp = userTable.where({is_deleted: false});
}
if (pagination) {
tmp = tmp
.limit(pagination.perPage)
.offset(pagination.page * pagination.perPage);
}
tmp.find(function(users) {
});
```
all those temporary objects are not very nice.
fortunately there is another way:
``` js
userTable
.when(dontFindDeleted, userTable.where, {is_deleted: false})
.when(pagination, function() {
return this
.limit(pagination.perPage)
.offset(pagination.page * pagination.perPage);
})
.find(function(users) {
});
```
## mesa by example
## contribution
**TL;DR: bugfixes, issues and discussion are always welcome.
ask me before implementing new features.**
i will happily merge pull requests that fix bugs with reasonable code.
i will only merge pull requests that modify/add functionality
if the changes align with my goals for this package
and only if the changes are well written, documented and tested.
**communicate:** write an issue to start a discussion
before writing code that may or may not get merged.
## [license: MIT](LICENSE)
|
Java
|
UTF-8
| 1,717 | 3.875 | 4 |
[] |
no_license
|
import javax.swing.*;
import java.awt.*;
import java.util.Scanner;
import static javax.swing.JFrame.EXIT_ON_CLOSE;
public class GoToCenter {
public static void mainDraw(Graphics graphics) {
// Create a line drawing function that takes 2 parameters:
// The x and y coordinates of the line's starting point
// and draws a line from that point to the center of the canvas.
// Draw 3 lines with that function. Use loop for that.
// This is a different method with user and only one lines
// Scanner scanner = new Scanner(System.in);
// System.out.print("Hello, we will draw today. Gimme a number: ");
// int param = scanner.nextInt();
// System.out.print("Gimme the second number: ");
// int param2 = scanner.nextInt();
// int param = 0;
// int param2 = 0;
for (int i = 1; i <= 3; i++) {
int param = i * 100 + (int )(Math.random() * 10);
int param2 = i + 10 + (int) (Math.random() * 10);
centerLine(graphics, param,param2);
}
}
public static void centerLine(Graphics center, int a, int b) {
center.setColor(Color.red);
center.drawLine(a, b, WIDTH /2, HEIGHT /2);
}
// Don't touch the code below
static int WIDTH = 320;
static int HEIGHT = 343;
public static void main(String[] args) {
JFrame jFrame = new JFrame("Drawing");
jFrame.setSize(new Dimension(WIDTH, HEIGHT));
jFrame.setDefaultCloseOperation(EXIT_ON_CLOSE);
jFrame.add(new ImagePanel());
jFrame.setLocationRelativeTo(null);
jFrame.setVisible(true);
}
static class ImagePanel extends JPanel {
@Override
protected void paintComponent(Graphics graphics) {
super.paintComponent(graphics);
mainDraw(graphics);
}
}
}
|
Python
|
UTF-8
| 2,035 | 3.171875 | 3 |
[
"MIT"
] |
permissive
|
from Rotor import Rotor
from Plugboard import Plugboard
from StaticRotor import StaticRotor
from Reflector import Reflector
class EnigmaController:
def __init__(self, rotors, keys, reflector):
self.plug = Plugboard()
self.static = StaticRotor()
self.left = Rotor(rotors[0], keys[0])
self.middle = Rotor(rotors[1], keys[1])
self.right = Rotor(rotors[2], keys[2])
self.reflector = Reflector(reflector)
self.rotors = rotors
self.initialKeys = keys
def cipher(self, plainLetter):
# Movimientos en notch/al iniciar cifrado
if self.middle.isNotch():
self.middle.step()
self.left.step()
if self.right.isNotch():
if not self.middle.isNotch():
self.middle.step()
self.right.step()
# Si se quiere usar el plugboard descomentar las siguientes lineas
cipherPlug = self.plug.cipher(plainLetter)
cipherStatic = self.static.cipher(cipherPlug)
cipherRightRotor = self.right.cipherLeft(cipherStatic)
cipherMiddle = self.middle.cipherLeft(cipherRightRotor)
cipherLeft = self.left.cipherLeft(cipherMiddle)
cipherReflector = self.reflector.cipher(cipherLeft)
cipherLeft = self.left.cipherRight(cipherReflector)
cipherMiddle = self.middle.cipherRight(cipherLeft)
cipherRight = self.right.cipherRight(cipherMiddle)
return cipherRight
def getKey(self):
return [self.left.getKey(), self.middle.getKey(), self.right.getKey()]
def getRotors(self):
return [self.left.getRotor(), self.middle.getRotor(), self.right.getRotor()]
def setRotorsAndKeys(self, rotors,keys):
self.left = Rotor(rotors[0], keys[0])
self.middle = Rotor(rotors[1], keys[1])
self.right = Rotor(rotors[2], keys[2])
self.initialKeys = keys
def setKeys(self, keys):
self.setRotorsAndKeys(self.rotors, keys)
def resetKeys(self):
self.setKeys(self.initialKeys)
|
Python
|
UTF-8
| 625 | 3.65625 | 4 |
[] |
no_license
|
from collections import Counter
def firstRepeated(str):
words = str.split(" ")
print(words)
dictt = Counter(words)
print(dictt)
for key in words:
if(dictt[key]>1):
print(key)
break
if __name__ == "__main__":
str = "Ravi had been saying that he had been there"
firstRepeated(str)
#Core Logic
"""
def fun(str):
words = str.split(" ")
print(words)
for i in range(len(words)):
for j in range(i+1,len(words)):
if words[i] == words[j]:
print(words[i])
return
if __name__ == "__main__":
str = "Ravi had been saying that he had been there"
fun(str)"""
|
Java
|
UTF-8
| 1,507 | 2.625 | 3 |
[] |
no_license
|
import org.junit.Test;
import static org.junit.Assert.*;
import org.testfx.framework.junit.ApplicationTest;
import resources.*;
public class RahulM4 extends ApplicationTest {
@Test
public void easyMonsterCheck() {
Player easyPlayer = new Player("easyCheck", Weapon.SPEAR, 750, "Easy");
Monster monsterOne = new MonsterOne(easyPlayer);
Monster monsterTwo = new MonsterTwo(easyPlayer);
Monster monsterThree = new MonsterThree(easyPlayer);
assertEquals(100, monsterOne.getHealth(), 0);
assertEquals(10, monsterOne.getDamageGivenAmount(), 0);
assertEquals(50, monsterTwo.getHealth(), 0);
assertEquals(25, monsterTwo.getDamageGivenAmount(), 0);
assertEquals(200, monsterThree.getHealth(), 0);
assertEquals(3, monsterThree.getDamageGivenAmount(), 0);
}
@Test
public void hardMonsterCheck() {
Player hardPlayer = new Player("hardCheck", Weapon.SPEAR, 250, "Hard");
Monster monsterOne = new MonsterOne(hardPlayer);
Monster monsterTwo = new MonsterTwo(hardPlayer);
Monster monsterThree = new MonsterThree(hardPlayer);
assertEquals(200, monsterOne.getHealth(), 0);
assertEquals(20, monsterOne.getDamageGivenAmount(), 0);
assertEquals(100, monsterTwo.getHealth(), 0);
assertEquals(45, monsterTwo.getDamageGivenAmount(), 0);
assertEquals(300, monsterThree.getHealth(), 0);
assertEquals(9, monsterThree.getDamageGivenAmount(), 0);
}
}
|
C++
|
UTF-8
| 5,547 | 3.5 | 4 |
[] |
no_license
|
//Suhan Ma Skip List and Rectangle
#include <iostream>
#include <string>
#include <cstring>
#include <ctime>
#include "rectangle.h"
#include "skip_list.h"
using namespace std;
int skip_list::random_level()
{
int level = 0;
while (rand()%2 == 0) level++;//flip a coin, decide weather to "go deeper"
if (level<MAX_LEVEL)
return level;
else
return MAX_LEVEL;//it is not good to get larger than MAX_LEVEL, and it is unnecessary
}
skip_list::skip_list()
{
srand((int)time(0));//random seed using current time
Rectangle r1;
r1.set_name("~~~~~");//nil, which has very large ascii value
// -1 -1 -1 -1
current_level=1;
head=new node(r1,MAX_LEVEL);
for(int i=0; i<MAX_LEVEL;i++)
head->forward[i]=head;//head points to itself at the very beginning, which means head itself works as an end point
}
skip_list::~skip_list(){
delete head;
}
//insert a new node
void skip_list::insert(Rectangle r1){
int new_level=random_level();//get a random_level at the beginning
node **track= new node *[MAX_LEVEL]; //track where to insert node
node *temp = head;
string name;
for(int i=current_level-1;i>=0;i--)
{
name=temp->forward[i]->r.get_name();
while (name<r1.get_name())
{
temp=temp->forward[i];
name=temp->forward[i]->r.get_name();
}
track[i]=temp;//track where to insert the new "node"
}
if(new_level>current_level)//is it necesssary to use more forward?
{
for (int i=current_level; i < new_level; i++)
track[i]=head;//update our track array, indicates all the new added forward are all point to "nil"
current_level=new_level;
}
node *insert_node =new node(r1,current_level);
for(int i=0;i<current_level;i++){
insert_node->forward[i]=track[i]->forward[i];
track[i]->forward[i]=insert_node;
}
cout<<endl<<"Insetion of "<<r1.get_name()<<" succeed!"<<endl;
}
void skip_list::remove(string k_name){
node *temp = head;
int level_index=0;//indicade the highest level of foward that k_name been found
string name;
for(int i=current_level-1;i>=0;i--)
{
name=temp->forward[i]->r.get_name();
while(name<k_name)
{
temp=temp->forward[i];
name=temp->forward[i]->r.get_name();
}
if (name == k_name)
{
level_index = i;
break;
}
}
//node *temp_destruct[level_index];
if (name==k_name)
{
for(int i=level_index; i>=0;i--)
{
//temp_destruct[i] = temp->forward[i];
temp->forward[i]=temp->forward[i]->forward[i];//jump off the element that will be deleted
//delete temp_destruct[i];
}
}
cout<<endl<<"Remove "<<k_name<<" succeed!"<<endl;
}
//since we did not have a name as the key to help remove the rectangle, we search the lowest level and then remove the element
void skip_list::remove(int x, int y, int w, int h)
{
node *temp=head->forward[0];//start from the first rectangle data
string name;
Rectangle r2;
while(temp!=head)//not run to end yet
{
r2=temp->r;
if(r2.get_x()==x && r2.get_y()==y && r2.get_w()==w && r2.get_h()==h)
{
remove(r2.get_name());
break;
}
temp=temp->forward[0];
}
}
void skip_list::search(string k_name){
node *temp=head;
string name;
bool found=false;
int indicator=0;
for(int i=current_level-1;i>=0;i--)
{
name=temp->forward[i]->r.get_name();
while(name<k_name)
{
temp=temp->forward[i];
name=temp->forward[i]->r.get_name();
}
if (name == k_name)
{
found=true;
indicator=i;
break;
}
}
if (found == true)
{
cout<<endl<<"Found the rectangle of the given name "<<k_name<<endl;
temp->forward[indicator]->r.print_info();
cout<<endl;
}
else
cout<<endl<<"Rectangle "<<k_name<<" not found!"<<endl;
}
//Print all nodes
void skip_list::print(){
node *temp=head->forward[0];
while(temp->r.get_name()!="~~~~~")
{
temp->r.print_info();
temp=temp->forward[0];
}
}
//range search
void skip_list::rangesearch(int x, int y, int w, int h){
node *temp=head->forward[0]; //search for all rectangle in given range, it is better to run through whole list
int count=0;
string name;
cout<<endl<<"Perform Range Search "<<endl;
while(temp!=head)//not run to the nil
{
if(temp->forward[0]->r.recIntersect(x, y, w, h))
{
++count;
cout<<"Overlap No."<<count<<" info:"<<endl;
temp->forward[0]->r.print_info();
}
temp=temp->forward[0];
}
if(count==0)
cout<<"No overlap within the given range!"<<endl<<endl;
}
void skip_list::allintersections(){
int count=0;
node *temp=head->forward[0];
node *temp2=head->forward[0]->forward[0];//one "forward" ahead
while(temp->r.get_name()!="~~~~~") //not went to nil
{
temp2=temp->forward[0];//one step ahead
while(temp2->r.get_name()!="~~~~~") // not went to nil
{
if(temp->r.recIntersect(temp2->r.get_x(),temp2->r.get_y(),temp2->r.get_w(),temp2->r.get_h())==true)
{
cout<<endl<<"Overlap pair "<<++count<<":"<<endl;
cout<<"Rectangle 1: ";temp->r.print_info();
cout<<"Rectangle 2: ";temp2->r.print_info();
}
temp2=temp2->forward[0];
}
temp=temp->forward[0];
}
}
|
JavaScript
|
UTF-8
| 1,169 | 3.125 | 3 |
[] |
no_license
|
function translate(){
var title = document.getElementById("form-signin-heading");
var email = document.getElementById("inputEmail");
var password = document.getElementById("inputPassword");
// se usa document.getElementById para especificar cual nodo sera modificado
var remember = document.getElementsByTagName("span")[0];
var button = document.getElementsByClassName("btn")[0];
//devuelven un array con todos los elementos especificados del sitio y de la clase
title.innerHTML = "Por favor inicia sesion";
email.placeholder ="Correo Electronico";
password.placeholder = "Contraseña";
remember.innerHTML = "Recordar datos";
button.innerHTML = "Iniciar Sesion";
}
translate();
function mostrarInfo(){
var email=document.getElementById("inputEmail").value;
var password=document.getElementById("inputPassword").value;
var emMostrar=document.getElementById("correo");
emMostrar.innerHTML= "<h3>Datos de formulario</h3>" + "El correo ingresado es: </br>"+ mail;
var passMostrar=document.getElementById("contraseña");
passMostrar.innerHTML="La contraseña ingresada es: </br>" + password;
}
//No logro imprir nada en la pagina
|
Python
|
UTF-8
| 1,461 | 2.59375 | 3 |
[] |
no_license
|
""" Mail Sparkpost Driver """
from masonite.contracts.MailContract import MailContract
from masonite.drivers.BaseMailDriver import BaseMailDriver
from masonite.exceptions import DriverLibraryNotFound
class MailSparkpostDriver(BaseMailDriver, MailContract):
"""Sparkpost driver
"""
def _sandbox_mode(self):
if self.config.DEBUG is True:
return True
return False
def send(self, message=None):
"""Sends the message through the Sparkpost service.
Keyword Arguments:
message {string} -- The message to be sent to Sparkpost. (default: {None})
Returns:
requests.post -- Returns the response as a requests object.
"""
try:
from sparkpost import SparkPost
except ImportError:
raise DriverLibraryNotFound(
'Could not find the "sparkpost" library. Please pip install this library '
'by running "pip install sparkpost"')
if not message:
message = self.message_body
sp = SparkPost(api_key=self.config.DRIVERS['sparkpost']['api_key'])
response = sp.transmissions.send(
use_sandbox=self._sandbox_mode(),
recipients=[self.to_address],
html=message,
from_email='{0} <{1}>'.format(self.config.FROM['name'], self.config.FROM['address']),
subject=self.message_subject
)
return response
|
Python
|
UTF-8
| 61 | 2.640625 | 3 |
[
"MIT"
] |
permissive
|
def get_bit(byteval,idx):
return ((byteval&(1<<idx))!=0);
|
Java
|
UTF-8
| 1,428 | 3.75 | 4 |
[] |
no_license
|
package com.nsc.datastructures.list.circularlinkedlist;
import java.util.StringJoiner;
public class CircularLinkedList<T> {
private Node head;
private Node tail;
private int size;
public CircularLinkedList() {
head = tail = null;
size = 0;
}
public void add(T element) {
Node node = new Node(element);
if (head == null) {
head = tail = node;
tail.setNextNode(head);
} else {
node.setNextNode(head);
tail.setNextNode(node);
tail = node;
}
size++;
}
public void printAll() {
System.out.print("[");
StringJoiner stringJoiner = new StringJoiner(", ");
Node currentNode = head;
do {
stringJoiner.add((String) currentNode.getData());
currentNode = currentNode.getNextNode();
} while (currentNode != head);
System.out.print(stringJoiner.toString());
System.out.println("]");
}
private class Node {
private Node nextNode;
T data;
public Node(T data) {
this.data = data;
nextNode = null;
}
public Node getNextNode() {
return nextNode;
}
public void setNextNode(Node nextNode) {
this.nextNode = nextNode;
}
public T getData() {
return data;
}
}
}
|
Python
|
UTF-8
| 1,929 | 2.8125 | 3 |
[] |
no_license
|
import numpy as np
from scipy.optimize import leastsq
import matplotlib.pyplot as pl
import copy
import numpy
def fun(x, p):
a, b = p
return a*x + b
def fun_2(x, p):
a, b ,c = p
return a*x*x+b*x + c
def residuals(p, x, y):
return fun(x, p) - y
def residuals_2(p, x, y):
a = p[0]
if a<0:
return 10000000
return fun_2(x, p) - y
def fitline(x1,y1):
x1 = np.array(x1, dtype=float)
y1 = np.array(y1, dtype=float)
r = leastsq(residuals, [1, 1], args=(x1, y1))
return r
def fitline_2(x1,y1):
x1 = copy.deepcopy(x1)
y1 = copy.deepcopy(y1)
x1 = np.array(x1, dtype=float)
y1 = np.array(y1, dtype=float)
r = leastsq(residuals_2, [1, 1, 1], args=(x1, y1))
return r
def getallData(r,start,end):
y = []
x = []
for x0 in range(start,end,1):
y0 = r[0][0]*x0+r[0][1]
x.append(x0)
y.append(y0)
return x,y
def getallData_2(r,start,end):
y = []
x = []
y_min = 100000
for x0 in range(start,end,1):
y0 = r[0][0]*x0*x0+r[0][1]*x0+r[0][2]
if y0<y_min:
y_min = y0
else:
y0 = y_min
x.append(x0)
y.append(y0)
return x,y
####################ARMA
def regression(x,y):
x1 = []
x2 = []
x3 = []
for line in x[0]:
x1.append(line)
for line in x[1]:
x2.append(line)
for line in x[2]:
x3.append(line)
x1 = np.array(x1, dtype=float)
x2 = np.array(x2, dtype=float)
x3 = np.array(x3, dtype=float)
y1 = np.array(y, dtype=float)
r = leastsq(residuals_ar, [1,1,1,1], args=(x1,x2,x3, y1))
return r
def residuals_ar(p, x1,x2,x3, y):
return fun_ar(x1,x2,x3, p) - y
def fun_ar(x1,x2,x3, p):
a = p[0:3]
b = p[3]
return a[0]*x1+a[1]*x2+a[2]*x3+ b
def getallData_re(r,x1,x2,x3):
y = []
x = []
y0 = r[0][0]*x1+r[0][1]*x2+r[0][2]*x3+r[0][1]
return y0
|
Python
|
UTF-8
| 634 | 4.1875 | 4 |
[] |
no_license
|
'''
Input: a List of integers
Returns: a List of integers
'''
def moving_zeroes(arr):
end = len(arr)-1
# find the last non-zero value
while arr[end] == 0 and end > 0:
end -= 1
cur = 0
# iterate through the array
while cur < end:
# if arr[cur] is 0, swap it with the end
if arr[cur] == 0:
arr[cur], arr[end] = arr[end], arr[cur]
end -= 1
cur += 1
return arr
if __name__ == '__main__':
# Use the main function here to test out your implementation
arr = [0, 3, 1, 0, -2]
print(f"The resulting of moving_zeroes is: {moving_zeroes(arr)}")
|
Java
|
UTF-8
| 95 | 1.5625 | 2 |
[] |
no_license
|
package dao;
import entidade.Task;
public interface TaskDao extends BaseDAO<Task, Long> {
}
|
Java
|
UTF-8
| 3,735 | 2.078125 | 2 |
[] |
no_license
|
package io.mns.mpfm.ui.fragments;
import android.os.Bundle;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.databinding.DataBindingUtil;
import androidx.fragment.app.Fragment;
import androidx.lifecycle.ViewModelProviders;
import androidx.navigation.NavDirections;
import androidx.navigation.Navigation;
import io.mns.mpfm.R;
import io.mns.mpfm.databinding.FragmentHomeBinding;
import io.mns.mpfm.db.entities.Balance;
import io.mns.mpfm.db.entities.Transaction;
import io.mns.mpfm.ui.adapters.TransactionAdapter;
import io.mns.mpfm.ui.callbacks.TransactionClickCallback;
import io.mns.mpfm.viewmodels.HomeViewModel;
/**
* A simple {@link Fragment} subclass.
*/
public class HomeFragment extends Fragment implements TransactionClickCallback {
private FragmentHomeBinding binding;
private HomeViewModel viewModel;
private TransactionAdapter adapter;
@Override
public View onCreateView(@NonNull LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
binding = DataBindingUtil.inflate(inflater, R.layout.fragment_home, container, false);
return binding.getRoot();
}
@Override
public void onActivityCreated(@Nullable Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
init();
}
private void init() {
setupViewModel();
setupListeners();
retrieveBalance();
setupTransactionList();
startDataObservation();
}
private void retrieveBalance() {
if (getContext() != null) {
Balance balance = viewModel.getBalance(getContext());
binding.balanceRemaining.setText(String.valueOf(balance.getRemaining()));
binding.balanceIncome.setText(String.valueOf(balance.getIncome()));
binding.balanceExpense.setText(String.valueOf(balance.getExpense()));
}
}
private void startDataObservation() {
viewModel.loadTransactions().observe(this, transactions ->
adapter.setData(transactions));
}
private void setupTransactionList() {
adapter = new TransactionAdapter(this);
binding.transactionList.setAdapter(adapter);
}
private void setupListeners() {
binding.newTransaction.setOnClickListener(v ->
Navigation.findNavController(v).navigate(R.id.home_to_add_transaction));
binding.income.setOnClickListener(v -> {
if (getView() != null) {
NavDirections action = HomeFragmentDirections.homeToFilter().setFilterType(0);
Navigation.findNavController(getView()).navigate(action);
}
});
binding.expense.setOnClickListener(v -> {
if (getView() != null) {
NavDirections action = HomeFragmentDirections.homeToFilter().setFilterType(1);
Navigation.findNavController(getView()).navigate(action);
}
});
}
private void setupViewModel() {
if (getActivity() != null) {
HomeViewModel.Factory factory = new HomeViewModel.Factory(getActivity().getApplication());
viewModel = ViewModelProviders.of(this, factory).get(HomeViewModel.class);
binding.setViewmodel(viewModel);
}
}
@Override
public void onClick(Transaction transaction) {
if (getView() != null) {
NavDirections action = HomeFragmentDirections.homeToAddTransaction().setTransactionId(transaction.getId());
Navigation.findNavController(getView()).navigate(action);
}
}
}
|
C
|
ISO-8859-1
| 308 | 3.875 | 4 |
[
"MIT"
] |
permissive
|
#include <stdio.h>
#include <stdlib.h>
/*
Apresentar os nmeros entre 0 e 4, com intervalo de 0.25 entre eles, ou seja, 0, 0.25, 0.5, 0.75 ... 4.
*/
int main (void)
{
float Cont;
for(Cont=0;Cont<=4;Cont=Cont+0.25)
{
printf("%.2f\t",Cont);
}
printf("\n");
system("pause");
}
|
C++
|
UTF-8
| 3,347 | 3.09375 | 3 |
[] |
no_license
|
//Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0 ? Find all unique triplets in the array which gives the sum of zero.
//
//Note : The solution set must not contain duplicate triplets.
//
// For example, given array S = [-1, 0, 1, 2, -1, -4],
//
// A solution set is :
//[
// [-1, 0, 1],
// [-1, -1, 2]
//]
#include "Header.h"
int quickSortcompr(const void * a, const void * b)
{
return (*(int*)a - *(int*)b);
}
vector<vector<int>> KSum(vector<int>& nums, int target, int begin, int K) {
int i = begin;
int j = nums.size() - 1;
vector<vector<int>> ret;
int prevI = INT_MAX;
if (K == 2) {
int prevJ = INT_MAX;
while (i < j) {
if (prevJ != INT_MAX && prevJ == nums[j]) {
j--;
continue;
}
if (prevI != INT_MAX && prevI == nums[i]) {
i++;
continue;
}
int sum = nums[i] + nums[j];
if (sum == target) {
vector<int> result;
result.push_back(nums[i]);
result.push_back(nums[j]);
ret.push_back(result);
prevI = i;
prevJ = j;
i++;
j--;
}
else if (sum < target) {
prevI = i;
i++;
}
else {
prevJ = j;
j--;
}
}
}
else {
for (i=begin;i<= nums.size() - K;i++) {
if (prevI != INT_MAX && prevI == nums[i]) {
i++;
continue;
}
prevI = i;
vector<vector<int>> values = KSum(nums, target - nums[i], i+1, K - 1);
for (int j = 0;j < values.size();j++) {
vector<int> result;
result.push_back(nums[i]);
for (int k = 0;k < values[j].size();k++) {
result.push_back(values[j][k]);
}
ret.push_back(result);
}
}
}
return ret;
}
vector<vector<int>> Solution::threeSum(vector<int>& nums) {
qsort(&(nums[0]), nums.size(), sizeof(int), quickSortcompr);
//vector<vector<int>> ret = KSum(nums, 0, 0, 3);
//return ret;
sort(nums.begin(), nums.end());
vector<vector<int>> ans;
if (nums.size() < 3) {
return ans;
}
for (int i = 0; i < nums.size() - 2; i++) {
if (i == 0 || nums[i] != nums[i - 1]) {
int target = 0 - nums[i];
int lo = i + 1;
int hi = nums.size() - 1;
while (lo < hi) {
if (nums[lo] + nums[hi] == target) {
vector<int> result;
result.push_back(nums[i]);
result.push_back(nums[lo]);
result.push_back(nums[hi]);
ans.push_back(result);
while(nums[lo] == nums[lo+1])
lo++;
while (nums[hi] == nums[hi + 1])
hi++;
}
else if (nums[lo] + nums[hi] > target) {
hi--;
}
else {
lo++;
}
}
}
}
return ans;
}
|
Python
|
UTF-8
| 627 | 2.890625 | 3 |
[] |
no_license
|
def process():
result = ""
with open("./srt.txt","r") as f :
for line in f.readlines():
if len(line.strip()) == 0:
line = "&" + line.replace("\n","")
result += line
result = result.split("&")
fianl_result = []
for single in result:
singles = single.split("\n")
for i in range(2, len(singles)):
if len(singles[i].strip()) != 0 :
fianl_result.append(singles[i])
with open("./result.txt",'w') as f:
result = ",".join(fianl_result)
f.write(result)
if __name__ == '__main__':
process()
|
Java
|
UTF-8
| 376 | 2.015625 | 2 |
[] |
no_license
|
package com.switchvov.nio.gateway.filter;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpHeaders;
public class HeaderHttpResponseFilter implements HttpResponseFilter {
@Override
public void filter(FullHttpResponse response) {
HttpHeaders headers = response.headers();
headers.set("version", "2");
}
}
|
C++
|
UTF-8
| 1,488 | 3.3125 | 3 |
[] |
no_license
|
#include <iostream>
#include "Product.h"
using namespace std;
Product::Product()
{
this->name="potato";
this->price=25;
this->weight=500;
}
bool Product::operator> (const Product &qq) const
{
if (name>qq.name)
{return 1;}
return 0;
}
bool Product::operator< (const Product &qq) const
{
if (name<qq.name)
{return 1;}
return 0;
}
bool Product::operator>= (const Product &qq) const
{
if (name>=qq.name)
{return 1;}
return 0;
}
bool Product::operator<= (const Product &qq) const
{
if (name<=qq.name)
{return 1;}
return 0;
}
bool Product::operator==(const Product &qq)const
{
if(name == qq.name &&
price == qq.price &&
weight == qq.weight)
return 1;
return 0;
}
bool Product::operator!= (const Product & qq) const
{
if (*(this) == qq)
return 0;
return 1;
}
ostream& operator << (ostream &out, const Product &qq)
{
out << "Name: " << qq.name << "\n Price: " << qq.price << "\n Weight: " << qq.weight << endl;
return out;
}
istream& operator >> (istream &in, Product &qq)
{
in >> qq.name;
in >> qq.price;
in >> qq.weight;
return in;
}
Product&Product::operator=(Product &qq)
{
this-> name = qq.name;
this-> price = qq.price;
this-> weight = qq.weight;
return *this;
}
Product&Product::operator=(string &str)
{
this-> name = str;
return *this;
}
Product&Product::operator=(float &str)
{
this-> price = str;
return *this;
}
Product&Product::operator=(int &str)
{
this-> weight = str;
return *this;
}
|
C#
|
UTF-8
| 353 | 2.515625 | 3 |
[] |
no_license
|
using System;
// 此处引用基类所属的命名空间
using AssemblyBase;
namespace AssemblyDerived
{
class DerivedClass : BaseClass { }
class Program
{
static void Main(string[] args)
{
DerivedClass dc = new DerivedClass();
dc.PrintMyself();
Console.ReadKey();
}
}
}
|
Swift
|
UTF-8
| 1,637 | 3.953125 | 4 |
[
"MIT"
] |
permissive
|
//: [Assunto Anterior](@previous)
/*:
Esse conceito é muito usado em outras linguagens, principalmente linguages scriptadas, mas na minha visão, se você precisa usar algo como isso, você provavelmente não achou a melhor solução para o seu problema. Por isso, vou cobrir de forma bem breve essa nova funcionalidade.
Agora você pode aceitar chamados para qualquer tipo de propriedade, interceptar o nome dessa proriedade, e então devolver um valor para ela.
*/
//Essa anotação diz que, em runtime, você aceita receber uma propriedade que não está declarada e vai decidir o que fazer com ela.
@dynamicMemberLookup
struct Mercearia {
//Aqui você está recebendo o nome da propriedade (member) e vai dar como resultado um valor inteiro
subscript(dynamicMember member: String) -> Int {
if member.elementsEqual("numeroDePaes") {
return 10
}
return 0
}
//Aqui você está recebendo o nome da propriedade (member) e vai dar como resultado um valor Float
subscript(dynamicMember member: String) -> Float {
if member.elementsEqual("quilogramasDePresunto") {
return 5.0
}
return 0
}
}
let mercearia = Mercearia()
let numeroDePaes: Int = mercearia.numeroDePaes
let presuntoKg: Float = mercearia.quilogramasDePresunto
let presuntoKgNaoEncontrado: Int = mercearia.quilogramasDePresunto
let naoEncontrado: Int = mercearia.blabla
//Essa linha não compila porque o swift não sabe decidir qual método subscript chamar
//let essaLinhaNaoCompila = mercearia.qualquerCoisa
//: [Próximo assunto](@next)
|
Python
|
UTF-8
| 1,201 | 3.078125 | 3 |
[] |
no_license
|
def ha(s):
return ' ' + '-' * s + ' '
def hz(s):
return ' '*(s+2)
def vb(s):
return '|'+ ' '*s + '|'
def vl(s):
return '|'+ ' '*s + ' '
def vr(s):
return ' '+ ' '*s + '|'
def paint(h,s,i):#h:높이,s:간격표준,i:그리는숫자
if h == 0: #첫째줄
if i == 1 or i == 4:
return hz(s)
else:
return ha(s)
elif h == (2*s+2)/2: #중간줄
if i == 1 or i == 7 or i == 0:
return hz(s)
else:
return ha(s)
elif h == 2*s+2: #마지막줄
if i == 1 or i == 4 or i ==7:
return hz(s)
else:
return ha(s)
elif h < (2*s+2)/2:
if i == 4 or i == 8 or i ==9 or i ==0:
return vb(s)
elif i== 5 or i ==6:
return vl(s)
else:
return vr(s)
elif h > (2*s+2)/2:
if i == 6 or i == 8 or i == 0:
return vb(s)
elif i == 2:
return vl(s)
else:
return vr(s)
s,n = input().split()
s = int(s)
for h in range(2*s+3):
tmp = ''
for i in range(len(n)):
tmp+=paint(h,s,int(n[i]))
if i != len(n)-1:
tmp+=' '
print(tmp)
|
Python
|
UTF-8
| 1,204 | 2.921875 | 3 |
[] |
no_license
|
from collections import defaultdict, Counter
with open('input') as f:
lines = [x.strip() for x in f]
foods = {}
for i in range(len(lines)): # line in lines:
line = lines[i]
ingr, allerg = line.split(' (contains ')
ingr = ingr.split()
allerg = allerg[:-1].split(',')
allerg = [x.strip() for x in allerg]
foods[i] = {'ingr': set(ingr), 'allerg': set(allerg)}
allerg = defaultdict(list) # allergen -> foods
for k,v in foods.items():
for al in v['allerg']:
allerg[al].append(k)
candidates = {}
for k, v in allerg.items():
inter = set.intersection(*[foods[num]['ingr'] for num in v])
candidates[k] = inter
all_ingr = []
for k, v in foods.items():
all_ingr += list(v['ingr'])
cnt = Counter(all_ingr)
for i in set.union(*candidates.values()):
del cnt[i]
print('part1:', sum(cnt.values())) # part 1
cands = list(sorted(candidates.items(), key=lambda x: (len(x[1]), x[0])))
for i in range(len(cands)):
if i > 0:
to_discard = set.union(*[x[1] for x in cands[:i]])
for td in to_discard:
cands[i][1].discard(td)
cands = sorted(cands, key=lambda x: x[0])
ans = [x[1].pop() for x in cands]
print('part2:', ','.join(ans))
|
Python
|
UTF-8
| 4,667 | 3.03125 | 3 |
[] |
no_license
|
import pickle
import pandas as pd
from flask import Flask, jsonify, request
# User defined modules
import ML_Model.Test_Dataframes_pickles.dataframe_shape_functions as dfsf
import ML_Model.Test_Transformations_pickles.transformation_functions as tff
app = Flask(__name__)
model_path = "ML_Model/prediction_pickles/model.pkl"
@app.route("/predict", methods=['POST'])
def predict_model():
# read in the dictionary data from the curl request
test = request.get_json(force=True)
# convert to dataframe
test = pd.DataFrame.from_dict(test)
# formatting to numeric
test['x12'] = test['x12'].str.replace('$', '')
test['x12'] = test['x12'].str.replace(',', '')
test['x12'] = test['x12'].str.replace(')', '')
test['x12'] = test['x12'].str.replace('(', '-')
test['x12'] = test['x12'].astype(float)
test['x63'] = test['x63'].str.replace('%', '')
test['x63'] = test['x63'].astype(float)
# call the imputer function
test_imputed = tff.imputer_func(test, 'mean', ['x5', 'x31', 'x81', 'x82'])
# loading the train_columns from disk
train_col_path = "ML_Model/prediction_pickles/train_columns.pkl"
train_columns = pickle.load(open(train_col_path, 'rb'))
# loading the fitted_std_scaler from disk
fitted_std_scaler_path = "ML_Model/prediction_pickles/fitted_std_scaler.pkl"
fitted_std_scaler = pickle.load(open(fitted_std_scaler_path, 'rb'))
# stadardize the numeric variables with standardscaler()
test_imputed_std = pd.DataFrame(fitted_std_scaler.transform(test_imputed), columns=train_columns)
# Apply categorical_funct to x5 variable
testx5 = tff.categorical_funct(test['x5'], ['friday', 'saturday', 'sunday', 'monday',
'tuesday', 'wednesday', 'thursday'])
# Create dummies for x5 variable
dumb5 = tff.dummies_funct(testx5, 'x5')
test_imputed_std = dfsf.df_df_concat(test_imputed_std, dumb5)
# Apply categorical_funct to x31 variable
testx31 = tff.categorical_funct(test['x31'], ['america', 'germany', 'asia', 'japan'])
# Create dummies for x31 variable
dumb31 = tff.dummies_funct(testx31, 'x31')
test_imputed_std = dfsf.df_df_concat(test_imputed_std, dumb31)
# Apply categorical_funct to x81 variable
testx81 = tff.categorical_funct(test['x81'], ['April', 'July', 'December', 'October', 'February',
'September', 'March', 'November', 'June', 'May', 'August',
'January'])
# Create dummies for x81 variable
dumb81 = tff.dummies_funct(testx81, 'x81')
test_imputed_std = dfsf.df_df_concat(test_imputed_std, dumb81)
# Apply categorical_funct to x82 variable
testx82 = tff.categorical_funct(test['x82'], ['Female', 'Male'])
# Create dummies for x82 variable
dumb82 = tff.dummies_funct(testx82, 'x82')
test_imputed_std = dfsf.df_df_concat(test_imputed_std, dumb82)
# Passing data to model & loading the model from disk
model = pickle.load(open(model_path, 'rb'))
# loading the model variables from disk
variables_path = "ML_Model/prediction_pickles/variables.pkl"
variables = pickle.load(open(variables_path, 'rb'))
# Given,the input data, predict the results from the model
predictions = pd.DataFrame(model.predict(test_imputed_std[variables])).rename(columns={0: 'probs'})
predictions['prob_bin'] = pd.qcut(predictions['probs'], q=20)
# position of 75%
upperq = int(20 * 0.75)
API_Output = predictions
# Lower bound of 75th %
qt_75 = API_Output.prob_bin.cat.categories[upperq]
qt_75 = qt_75.left
# Create a column of all lower bounds and convert Caterory to floats
API_Output['left'] = API_Output['prob_bin'].apply(lambda x: x.left).astype('float')
# Re-label 75th% to 'event',below that label as 'no event'
API_Output['left'] = ['event' if x >= qt_75 else 'no event' for x in API_Output['left']]
# Model Results to return
API_Output.drop(columns=['prob_bin'], inplace=True)
API_Output.rename(columns={'probs': 'phat', 'left': 'business_outcome'}, inplace=True)
# Model Variables to return
sort_vars = sorted(variables)
sort_vars = pd.DataFrame(sort_vars).rename(columns={0: 'model_variables'})
# return the results as json to API endpoint
return jsonify(API_Output.to_dict(), sort_vars.to_dict())
# set the port to 8080,debug for changes
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8050, debug=True)
|
C#
|
UTF-8
| 4,217 | 3.5 | 4 |
[] |
no_license
|
//namespace EX_6._Jagged_Array_Manipulator
using System;
using System.Linq;
namespace EX_6._Jagged_Array_Manipulator
{
class Program
{
static void Main(string[] args)
{
int n = int.Parse(Console.ReadLine());
double[][] jaggedArray = new double[n][];
//Първо трябва да напълним назъбения масив със стойностите, които ще ни подадат от конзолата.
for (int row = 0; row < n; row++)
{
jaggedArray[row] = Console.ReadLine()
.Split(" ")
.Select(double.Parse)
.ToArray();
}
//Преди да добавяме или премахваме стойности, трябва да анализираме матрицата, и въз основа на началните стойности
//да умножаваме или делим. Затова и назъбения масив трябва да е със стойности от тип double, а не int,
//защото при делене се получава double число.
for (int i = 0; i < jaggedArray.GetLength(0); i++)
{
int currentRowLenght = jaggedArray[i].Length;
int nextRowLenght = int.MinValue;
if (i + 1 < jaggedArray.GetLength(0))
{
nextRowLenght = jaggedArray[i + 1].Length;
if (currentRowLenght == nextRowLenght)
{
for (int j = 0; j < jaggedArray[i].Length; j++)
{
jaggedArray[i][j] *= 2;
jaggedArray[i + 1][j] *= 2;
}
}
else
{
for (int j = 0; j < jaggedArray[i].Length; j++)
{
jaggedArray[i][j] /= 2;
}
for (int j = 0; j < jaggedArray[i + 1].Length; j++)
{
jaggedArray[i + 1][j] /= 2;
}
}
}
}
string input = Console.ReadLine();
while (input.ToLower() != "end")
{
string[] command = input
.Split(" ", StringSplitOptions.RemoveEmptyEntries)
.ToArray();
if (command[0].ToLower() == "add")
{
int row = int.Parse(command[1]);
int col = int.Parse(command[2]);
double value = double.Parse(command[3]);
//При проверка за валидност трябва да се включи col >= 0 (тъй като може да е отрицателно число),
//и също row < jaggedArray.GetLength(0), а не row <= jaggedArray.GetLength(0) (при дължина 5,
//посления валиден индекс за row ще е 4, а не 5), същото и за проверката за col.
if (row >= 0 && row < jaggedArray.GetLength(0) && col < jaggedArray[row].Length && col >= 0)
{
jaggedArray[row][col] += value;
}
}
else if (command[0].ToLower() == "subtract")
{
int row = int.Parse(command[1]);
int col = int.Parse(command[2]);
double value = double.Parse(command[3]);
if (row >= 0 && row < jaggedArray.GetLength(0) && col < jaggedArray[row].Length && col >= 0)
{
jaggedArray[row][col] -= value;
}
}
input = Console.ReadLine();
}
foreach (var row in jaggedArray)
{
Console.WriteLine(string.Join(" ", row));
}
}
}
}
|
Python
|
UTF-8
| 285 | 3.296875 | 3 |
[] |
no_license
|
# Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.x=int(imput("dividendo:"))
x=int(input("divisor:"))
y=int(input("dividor:"))
z=x//y
a=x%y
print(x,y,z,a)
|
Java
|
UTF-8
| 2,336 | 2.9375 | 3 |
[] |
no_license
|
public class item {
protected String username;
protected int itemID;
protected String itemTitle;
protected String itemDescription;
protected String date;
protected double itemPrice;
protected String itemCategory;
public item() {
}
public item(int itemID) {
this.itemID = itemID;
}
//need itemID since it is primary key
public item(String username, int itemID, String itemTitle, String itemDescription, String date, double itemPrice, String itemCategory) {
this.username = username;
this.itemID = itemID;
this.itemTitle = itemTitle;
this.itemDescription = itemDescription;
this.date = date;
this.itemPrice = itemPrice;
this.itemCategory = itemCategory;
}
//constructor required when inserting an item: them itemID is generateed, and the date is taken from the time of posting
//so they are not required just yet.
public item(String username, String itemTitle, String itemDescription, double itemPrice, String itemCategory) {
this.username = username;
this.itemTitle = itemTitle;
this.itemDescription = itemDescription;
this.itemPrice = itemPrice;
this.itemCategory = itemCategory;
}
public item(String itemTitle, String itemDescription, double itemPrice, String itemCategory) {
this.itemTitle = itemTitle;
this.itemDescription = itemDescription;
this.itemPrice = itemPrice;
this.itemCategory = itemCategory;
}
public String getusername() {
return username;
}
public void setusername(String username) {
this.username = username;
}
public int getitemID() {
return itemID;
}
public void setitemID(int itemID) {
this.itemID = itemID;
}
public String getitemTitle() {
return itemTitle;
}
public void setitemTitle(String itemTitle) {
this.itemTitle = itemTitle;
}
public String getitemDescription() {
return itemDescription;
}
public void setItemDescription(String itemDescription) {
this.itemDescription = itemDescription;
}
public String getDate() {
return date;
}
public void setDate(String date) {
this.date = date;
}
public double getitemPrice() {
return itemPrice;
}
public void setitemPrice(double itemPrice) {
this.itemPrice = itemPrice;
}
public String getitemCategory() {
return itemCategory;
}
public void setitemCategory(String itemCategory) {
this.itemCategory = itemCategory;
}
}
|
C++
|
UTF-8
| 263 | 2.625 | 3 |
[] |
no_license
|
// move.h -- class definition
#ifndef MOVE_H_
#define MOVE_H_
class Move
{
private:
double x;
double y;
public:
Move(double a = 0, double b = 0);
void showmove() const;
Move & add(const Move & m) const;
void reset(double a = 0, double b = 0);
};
#endif
|
SQL
|
UTF-8
| 4,246 | 3.3125 | 3 |
[] |
no_license
|
create table if not exists `user`(
`id` int unsigned primary key auto_increment comment '主键ID',
`promoter_id` int not null default 0 comment '推广人id',
`note` varchar(128) not null default '' comment '推广备注',
`type` enum('0','2','4') not null default '0' comment '0:普通用户;2:商家;4:区域合伙人',
`user_name` varchar(16) not null default '' comment '用户名',
`ava_url` varchar(128) not null default '' comment '图像地址',
`phone` char(11) not null default '' comment '手机号',
`word_help` varchar(256) not null default '' comment '助记词',
`password` varchar(64) not null default '' comment '登录密码',
`pay_password` varchar(64) not null default '' comment '支付密码',
`invite_code` char(6) not null default '' comment '邀请码',
`region_id` int not null default 0 comment '区域id',
`region` varchar(128) not null default '' comment '区域',
`status` enum('0','1') not null default '0' comment '0:无效;1:有效',
`create_time` int not null default 0 comment '创建时间'
)engine=innodb default charset=utf8 comment '用户表';
create table if not exists `system_notice`(
`id` int unsigned primary key auto_increment comment '主键ID',
`title` varchar(128) not null default '' comment '标题',
`image_url` varchar(128) not null default '' comment '公告图片',
`content` varchar(1280) not null default '' comment '公告内容',
`read_num` mediumint unsigned not null default 0 comment '阅读量',
`create_time` int not null default 0 comment '创建时间'
)engine=innodb default charset=utf8;
create table if not exists `payment_way`(
`id` int unsigned primary key auto_increment comment '主键ID',
`user_id` int unsigned comment '用户ID',
`payment_type` enum('2','4','6') not null default '2' comment '2:支付宝,4:微信,6:银行卡',
`name` varchar(16) not null default '' comment '姓名',
`code` varchar(32) not null default '' comment '账号',
`pay_url` varchar(128) not null default '' comment '收款二维码',
`bank_name` varchar(16) not null default '' comment '开户银行',
`bank_info` varchar(64) not null default '' comment '支行信息',
`status` tinyint not null default 1 comment '1:有效,0:无效',
`create_time` int not null default 0 comment '创建时间'
)engine=innodb default charset=utf8;
create table if not exists `wallet`(
`id` int unsigned primary key auto_increment comment '主键ID',
`user_id` int unsigned comment '用户ID',
`type` tinyint not null default 1 comment '钱包类型,1:wocs钱包;2:消费钱包;3:补贴钱包;4:余额钱包;5:加速钱包',
`balance` decimal(16,4) not null default 0.0000 comment '可用余额',
`freeze` decimal(16,4) not null default 0.0000 comment '冻结数',
`create_time` int not null default 0 comment '创建时间'
)engine=innodb default charset=utf8;
create table if not exists `wallet_log`(
`id` int unsigned primary key auto_increment comment '主键ID',
`business_id` int unsigned comment '业务ID',
`business_type` tinyint not null default 1 comment '业务类型',
`user_id` int unsigned comment '用户ID',
`type` tinyint not null default 1 comment '钱包类型,1:wocs钱包;2:消费钱包;3:补贴钱包;4:余额钱包;5:加速钱包',
`balance` decimal(16,4) not null default 0.0000 comment '可用余额',
`freeze` decimal(16,4) not null default 0.0000 comment '冻结数',
`create_time` int not null default 0 comment '创建时间'
)engine=innodb default charset=utf8;
create table if not exists `shop_address`(
`id` int unsigned primary key auto_increment comment '主键ID',
`user_id` int unsigned comment '用户ID',
`name` varchar(16) not null default '' comment '联系人',
`phone` char(11) not null default '' comment '手机号',
`detail_address` varchar(256) not null default '' comment '详细地址',
`tag` enum('0','2','4') not null default '0' comment '0:家;2:公司;4:学校',
`create_time` int not null default 0 comment '创建时间'
)engine=innodb default charset=utf8;
区域合伙人、我的团队暂时空白不知道怎样
|
Markdown
|
UTF-8
| 5,616 | 2.625 | 3 |
[] |
no_license
|
# Exercise 08
I'm so sorry that there are always some errors when i download and install the vpython modual, and i have no idea to solve the problem.
## Abstract
Use python to study the oscillatory motion and chaos further.
## Background
* Simple pendulum<br>

* Euler method<br>

* Euler-Cromer method<br>

* Damped pendulum<br>

* Driven, damped pendulum<br>

* Driven, damped, nonlinear pendulum<br>

* Feigenbaum <br>

## Program
### [Click here to see the code.](https://github.com/whucyb/computational_physics_N2014301020067/blob/master/Exercise_08/Exercise_08.py)
### Problem 3.18.
Calculate Poincaré section for the pendulum as it undergoes the period-doubling route to chaos. Plot  versus , with one point plotted for each drive cycle, as in Figure 3.9. Do this for  = 1.4, 1.44, 1.465, using the other parameters as given in connection with Figure 3.10. You should find that after removing the points corresponding to the initial transient the attractor in the period-1 regime will contain only a single point. Likewise, if the behavior is period n, the attractor will contain n discrete points.
*  = 0.5

*  = 1.2

*  = 1.4

*  = 1.44

*  = 1.465

*  = 1.475

* From the figures above, we can find that in nonchaotic system, if the behavior is period n, the attractor will contain n discrete points.
### Problem 3.20.
Calculate the bifurcation diagrams for the pendulum in the vicinity of  = 1.35 to 1.5. Make a magnified plot of the diagram (as compared to Figure 3.11) and obtain an estimate of the Feigenbaum  parameter.
*  = 0 to 2

*  = 1.35 to 1.5

*  = 1.475 to 1.485
<br>
Enlarge the two parts to see their structures.<br>


* From the figures above, we can find that there are fractal structures in bifurcation diagrams.
* The program still needs to be improved to abtain an estimate of the Feigenbaum  parameter.
## Conclusion
The chaos in nonlinear pendulum has something to do with fractal geometry, and it is so surprising!
## Reference
* [Chapter 3 Oscillatory Motion and Chaos_Cai Hao_Wuhan University](https://www.evernote.com/shard/s140/sh/0724815b-79a9-4357-9e85-416c33cb1b69/e2b0667446e6f7d74181969ed0c7c357)
* 《Computational Physics》 (Second Edition)
|
C#
|
UTF-8
| 2,367 | 2.78125 | 3 |
[] |
no_license
|
using System;
using System.Collections.Generic;
using System.Threading.Tasks;
using Akka.Actor;
using FluentAssertions;
using Xunit;
namespace CommandQueuePoc
{
public class AkkaTest
{
[Fact]
public async Task InOrderCommandHandlerTest()
{
MyCommandHandler.Commands = new List<string>();
var handler = new AkkaHandler();
var t1 = handler.SendAsync(new MyCommand("command1", 300));
var t2 = handler.SendAsync(new MyCommand("command2", 250));
var t3 = handler.SendAsync(new MyCommand("command3", 1));
var t4 = handler.SendAsync(new MyCommand("command4", 20));
await Task.WhenAll(t1, t2, t3, t4);
var commands = MyCommandHandler.Commands;
commands.Should().ContainInOrder("command1", "command2", "command3", "command4");
}
[Fact]
public void ThrowExceptionTest()
{
MyCommandHandler.Commands = new List<string>();
var handler = new AkkaHandler();
Func<Task> act = () => handler.SendAsync(new MyCommand("command5", 300));
act.ShouldThrow<InvalidOperationException>();
}
}
class AkkaHandler
{
IActorRef MyActor { get; }
ActorSystem ActorSystem { get; }
public AkkaHandler()
{
ActorSystem = ActorSystem.Create("app");
MyActor = ActorSystem.ActorOf<CommandHandlerAkkaActor>();
}
public async Task SendAsync<T>(T command)
{
var result = await MyActor.Ask<object>(command).ConfigureAwait(false);
if (result is Exception ex)
{
throw new InvalidOperationException("error in command execution", ex);
}
}
}
class CommandHandlerAkkaActor : ReceiveActor
{
public CommandHandlerAkkaActor()
{
ReceiveAny(ReceiveMessage);
}
private void ReceiveMessage(object message)
{
if (message is MyCommand r)
{
try
{
new MyCommandHandler().Handle(r);
Sender.Tell("ok");
}
catch (Exception ex)
{
Sender.Tell(ex);
}
}
}
}
}
|
PHP
|
UTF-8
| 289 | 2.546875 | 3 |
[] |
no_license
|
<?php
namespace Topxia\Service\Common;
class FieldChecker
{
public static function checkFieldName($name)
{
if (!ctype_alnum(str_replace('_', '', $name))) {
throw new \InvalidArgumentException('Field name is invalid.');
}
return true;
}
}
|
TypeScript
|
UTF-8
| 4,624 | 2.5625 | 3 |
[
"MIT"
] |
permissive
|
import { join } from "path";
import { readJsonSync } from "fs-extra";
type CmdDecl = {
command: string;
when?: string;
title?: string;
};
type DebuggerDecl = {
variables?: Record<string, string>;
};
describe("commands declared in package.json", () => {
const manifest = readJsonSync(join(__dirname, "../../package.json"));
const commands = manifest.contributes.commands;
const menus = manifest.contributes.menus;
const debuggers = manifest.contributes.debuggers;
const disabledInPalette: Set<string> = new Set<string>();
// These commands should appear in the command palette, and so
// should be prefixed with 'CodeQL: '.
const paletteCmds: Set<string> = new Set<string>();
// These commands arising on context menus in non-CodeQL controlled
// panels, (e.g. file browser) and so should be prefixed with 'CodeQL: '.
const contribContextMenuCmds: Set<string> = new Set<string>();
// These are commands used in CodeQL controlled panels, and so don't need any prefixing in their title.
const scopedCmds: Set<string> = new Set<string>();
const commandTitles: { [cmd: string]: string } = {};
commands.forEach((commandDecl: CmdDecl) => {
const { command, title } = commandDecl;
if (command.match(/^codeQL\./) || command.match(/^codeQLQueryResults\./)) {
paletteCmds.add(command);
expect(title).toBeDefined();
commandTitles[command] = title!;
} else if (
command.match(/^codeQLDatabases\./) ||
command.match(/^codeQLQueries\./) ||
command.match(/^codeQLVariantAnalysisRepositories\./) ||
command.match(/^codeQLQueryHistory\./) ||
command.match(/^codeQLAstViewer\./) ||
command.match(/^codeQLEvalLogViewer\./) ||
command.match(/^codeQLTests\./) ||
command.match(/^codeQLModelEditor\./)
) {
scopedCmds.add(command);
expect(title).toBeDefined();
commandTitles[command] = title!;
} else {
throw new Error(`Unexpected command name ${command}`);
}
});
menus["explorer/context"].forEach((commandDecl: CmdDecl) => {
const { command } = commandDecl;
paletteCmds.delete(command);
contribContextMenuCmds.add(command);
});
menus["editor/context"].forEach((commandDecl: CmdDecl) => {
const { command } = commandDecl;
paletteCmds.delete(command);
contribContextMenuCmds.add(command);
});
menus["editor/title"].forEach((commandDecl: CmdDecl) => {
const { command } = commandDecl;
paletteCmds.delete(command);
contribContextMenuCmds.add(command);
});
debuggers.forEach((debuggerDecl: DebuggerDecl) => {
if (debuggerDecl.variables !== undefined) {
for (const command of Object.values(debuggerDecl.variables)) {
// Commands used as debug configuration variables need not be enabled in the command palette.
paletteCmds.delete(command);
}
}
});
menus.commandPalette.forEach((commandDecl: CmdDecl) => {
if (commandDecl.when === "false")
disabledInPalette.add(commandDecl.command);
});
it("should have commands appropriately prefixed", () => {
paletteCmds.forEach((command) => {
// command ${command} should be prefixed with 'CodeQL: ', since it is accessible from the command palette
expect(commandTitles[command]).toMatch(/^CodeQL: /);
});
contribContextMenuCmds.forEach((command) => {
// command ${command} should be prefixed with 'CodeQL: ', since it is accessible from a context menu in a non-extension-controlled context
expect(commandTitles[command]).toMatch(/^CodeQL: /);
});
scopedCmds.forEach((command) => {
// command ${command} should not be prefixed with 'CodeQL: ', since it is accessible from an extension-controlled context
expect(commandTitles[command]).not.toMatch(/^CodeQL: /);
});
});
it("should have the right commands accessible from the command palette", () => {
paletteCmds.forEach((command) => {
// command ${command} should be enabled in the command palette
if (disabledInPalette.has(command) !== false) {
expect(command).toBe("enabled");
}
expect(disabledInPalette.has(command)).toBe(false);
});
// Commands in contribContextMenuCmds may reasonbly be enabled or
// disabled in the command palette; for example, codeQL.runQuery
// is available there, since we heuristically figure out which
// query to run, but codeQL.setCurrentDatabase is not.
scopedCmds.forEach((command) => {
// command ${command} should be disabled in the command palette
expect(disabledInPalette.has(command)).toBe(true);
});
});
});
|
Markdown
|
UTF-8
| 1,794 | 2.859375 | 3 |
[
"GPL-3.0-only",
"MIT"
] |
permissive
|
# packman
## Getting Started
#### Prerequisites
- Neovim(>= 4.0)
- Git
#### Installing
The whole plugin is just a lua file.
Run this command to install or update packman.
```sh
$ curl https://raw.githubusercontent.com/theJian/packman.lua/master/packman.lua -o $HOME/.config/nvim/lua/packman.lua
```
Configure packman by adding just one single line to your `init.vim`
```VimL
lua require 'packman'
```
That's it! Unlike other plugin managers, which are typically reading plugin list from `init.vim`, packman takes a different approach. Keep reading!
#### Adding/Removing plugins
Packman exposes several methods that you can use to add or remove plugins. To access them you can use `:lua` command.
For example, this command will install a plugin from a git remote url. **Don't forget the surrounding quotes!**
```
:lua packman.get "plugin_git_remote_url"
```
If plugin is hosted on github, you can simply use `username/plugin`
```
:lua packman.get "username/plugin"
```
To remove a installed plugin, pass the exact plugin name to `packman.remove`.
```
:lua packman.remove "plugin"
```
#### Updating plugin
```
:lua packman.update "plugin"
```
#### Synchronizing plugin list
Since plugin list isn't part of the `init.vim` file, we can't keep plugins in sync by syncing the configuration. Pacman reads plugin list from a individual file and can generate this file from installed plugins.
```
:lua packman.dump()
```
By default a file `packfile` will be generated at the same directory with where packman file is located in. If you follow the installation instruction then it can be found under `$HOME/.config/nvim/lua`.
Then you can sync `packfile` and install plugins from it. It reads from the same filename as the output file of dump method.
```
:lua packman.install()
```
|
Python
|
UTF-8
| 176 | 3.625 | 4 |
[] |
no_license
|
a = input()
count_a = 0
count_b = 0
for i in range(0, len(a)) :
if a[i] == "(" :
count_a += 1
if a[i] == ")" :
count_b += 1
print(count_a, count_b)
|
Markdown
|
UTF-8
| 1,571 | 3.46875 | 3 |
[] |
no_license
|
# 伪目标
## 1. 伪目标的语法:
在书写伪目标时,首先需要声明伪目标,然后再定义伪目标规则.
### 1.1 声明伪目标:
```makefile
.PHONY clean (这里声明clean是伪目标)
```
### 1.2 定义伪目标规则:
```makefile
clean: (这里定义伪目标clean的规则,即伪目标的执行动作)
rm *.c
```
## 2. 伪目标的作用:
### 2.1 避免目标名与文件名冲突
也即,如果指定了伪目标,那么伪目标一定会被执行。
### 2.2 提高执行效率
当一个目标被声明为 伪目标后,makefile在执行规则时不会去试图查找隐含规则来创建它。
## 3. 伪目标的示例:
### 3.1 文件名冲突
#### 3.1.1 当文件名冲突示例
假设,makefile 当前目录下有与伪目标clean 同名的文件clean,而makefile中没有定义伪目标clean。
```makefile
#.PHONY: clean
clean:
rm temp
```

从执行的结果可以看出,目标clean并未执行。
```makefile
.PHONY: clean
clean:
rm temp
```

从执行结果可以看出,伪目标clean执行了,temp文件被删除。
#### 3.1.2 多文件名冲突示例
```makefile
.PHONY: all clean prog1 prog2 prog3
all: prog1 prog2 prog3
prog1:
gcc prog1/main.c -o p1
prog2:
gcc prog2/main.c -o p2
prog3:
gcc prog3/main.c -o p3
clean:
rm p1 p2 p3
```

如果将伪目标的声明改为 .PHONY: all clean,目标prog1,prog2,prog3将不会被执行。
##
|
Python
|
UTF-8
| 2,452 | 3.328125 | 3 |
[] |
no_license
|
import unittest
from app.calculator import Calculator
from unittest.mock import patch, Mock
class TddInPythonAttempt(unittest.TestCase):
def setUp(self):
self.calc = Calculator()
#Test validate_input function
def test_calculator_validate_input_both_ints_is_1(self):
result = self.calc.validate_input(1,2)
self.assertEqual(1, result)
def test_calculator_validate_input_both_floats_is_1(self):
result = self.calc.validate_input(1.1,2.1)
self.assertEqual(1, result)
def test_calculator_validate_input_int_and_float_is_1(self):
result = self.calc.validate_input(1,2.1)
self.assertEqual(1, result)
def test_calculator_validate_input_int_and_complex_is_0(self):
c = complex(1, 0)
result = self.calc.validate_input(1,c)
self.assertEqual(0, result)
def test_calculator_validate_input_int_and_string_is_0(self):
result = self.calc.validate_input(1, "four")
self.assertEqual(0, result)
def test_calculator_validate_input_negetive_ints_is_1(self):
result = self.calc.validate_input(-2, -4)
self.assertEqual(1, result)
#Test divide function
def test_calculator_divide_10_by_5_returns_2(self):
result = self.calc.divide(10,5)
self.assertEqual(2, result)
def test_calculator_divide_5_by_10_returns_half(self):
result = self.calc.divide(5,10)
self.assertEqual(0.5, result)
def test_calculator_divide_10_by_3_returns_3_33(self):
result = self.calc.divide(10,3)
self.assertEqual(3.33, result)
def test_calculator_divide_10_by_3_returns_3_33(self):
result = self.calc.divide(10,3)
self.assertEqual(3.33, result)
def test_calculator_divide_minus_10_by_3_returns_minus_3_33(self):
result = self.calc.divide(-10,3)
self.assertEqual(-3.33, result)
def test_calculator_divide_returns_error_message_if_both_args_not_numbers(self):
self.assertRaises(ValueError, self.calc.divide, 'two', 'three')
#Test add function
def test_calculator_add_method_returns_correct_result(self):
result = self.calc.add(2,2)
self.assertEqual(4, result)
@patch('app.calculator.Calculator.add', return_value=9)
def test_calculator_add_method_mock_result_valid_input_invalid_output(self,add):
result = add(2,2)
self.assertEqual(result, 9)
def mock_sum(a, b):
# mock sum function without the long running time.sleep
return a + b
@patch('app.calculator.Calculator.add', side_effect=mock_sum)
def test_add_with_side_effect(self, add):
self.assertEqual(add(2,3), 5)
self.assertEqual(add(7,3), 10)
|
Java
|
UTF-8
| 1,619 | 2.203125 | 2 |
[] |
no_license
|
package com.supra.service;
import java.util.List;
import java.util.stream.Collector;
import java.util.stream.Collectors;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.supra.common.util.CommonUtil;
import com.supra.dto.EmployeeDTO;
import com.supra.dto.EmployeeDetailsDTO;
import com.supra.model.AuthUserEntity;
import com.supra.model.EmployeeDetailEntity;
import com.supra.respository.EmployeeRespository;
@Service
public class EmployeeServiceImpl implements EmployeeService {
@Autowired
EmployeeRespository employeeRespository;
@Override
public AuthUserEntity save(AuthUserEntity empEntity) throws Exception {
return employeeRespository.save(empEntity);
}
@Override
public List<EmployeeDetailsDTO> getAllEmpDetails() throws Exception {
List<AuthUserEntity> listUsers=employeeRespository.findAll();
List<EmployeeDetailsDTO> listEmpDTO=listUsers.stream().map(authUserEntity->{
EmployeeDetailsDTO empDetailDto=new EmployeeDetailsDTO();
BeanUtils.copyProperties(authUserEntity, empDetailDto);
EmployeeDetailEntity empDetEnt=authUserEntity.getEmpDetail();
if(empDetEnt!=null) {
BeanUtils.copyProperties(empDetEnt, empDetailDto);
if(empDetEnt.getDob()!=null) {
empDetailDto.setDob(CommonUtil.getFormattedDate(empDetEnt.getDob()));
}
if(empDetEnt.getNatEntity()!=null) {
empDetailDto.setNationality(empDetEnt.getNatEntity().getCountryName());
}
}
return empDetailDto;
}).collect(Collectors.toList());
return listEmpDTO;
}
}
|
Java
|
UTF-8
| 1,353 | 3.1875 | 3 |
[] |
no_license
|
package com.revature.vehicles;
import java.io.FileNotFoundException;
import java.io.IOException;
public class Driver {
public static void main(String[] args) throws FileNotFoundException, IOException {
Car ferari = new Car("Ferari", "Red", 95);
Car delorean = new Car("Delorean", "Silver", 88);
Car chevy = new Car("Chevrolet");
Car corolla = new Car("Corolla");
Car cadillac = new Car("Cadillac", "Green", 80);
Motorcycle chopper = new Motorcycle("Chopper", "black", 85);
ferari.checkTires();
ferari.drive();
chevy.drive();
System.out.println(ferari.getColor());
chopper.jump();
chopper.checkTires();
ferari.checkTraffic();
Motorcycle harley = new Motorcycle("Harley", "gray", 75);
Motorcycle dirtbike = new Motorcycle("Dirt Bike", "red", 95);
//collection methods
Gang ravens = new Gang();
ravens.addGangMember(chopper);
ravens.addGangMember(harley);
ravens.addGangMember(dirtbike);
ravens.ownTheHighway();
ravens.expelGangMember(dirtbike);
ravens.ownTheHighway();
//Drag race methods
System.out.println("\nA Drag Race is in Progress");
DragRace race = new DragRace();
race.startRace();
race.enterRace(ferari);
race.enterRace(chevy);
race.enterRace(cadillac);
race.startRace();
Records r = new Records();
r.recordResults("\n" + race.getWinner());
}
}
|
Java
|
UTF-8
| 367 | 2.640625 | 3 |
[] |
no_license
|
package com.sharknado.gestfish_hfc2018.model;
public enum TipoProducao {
SEMI_INTENSIVO("Semi-Intensivo"), INTENSIVO("Intensivo");
private String label;
TipoProducao(String label) {
this.label = label;
}
public String getLabel() {
return label;
}
@Override
public String toString() {
return label;
}
}
|
SQL
|
UTF-8
| 2,485 | 3.234375 | 3 |
[] |
no_license
|
REM
REM Quick tuning prober
REM
@_BEGIN
DEFINE p = SUM(pins)
DEFINE r = SUM(reloads)
DEFINE m = &r/&p*100
SELECT
&p pins,
&r reloads,
&m "MISS RATE",
DECODE(SIGN(&m - 1), 1, 'Increase SHARED_POOL_SIZE', 'OK') comments
FROM
v$librarycache
/
UNDEFINE p r m
DEFINE g = SUM(gets)
DEFINE m = SUM(getmisses)
DEFINE mi = &m/&g*100
SELECT
&g gets,
&m getmisses,
&mi "MISS RATE",
DECODE(SIGN(&mi - 10), 1, 'Increase SHARED_POOL_SIZE','OK') comments
FROM
v$rowcache
/
UNDEFINE g m mi
DEFINE p = SUM(DECODE(statistic#,39,value,0))
DEFINE l = SUM(DECODE(statistic#,37,value,38,value,0))
DEFINE h = (1-(&p/&l))*100
SELECT
&p physical,
&l logical,
&h "HIT RATE",
DECODE(SIGN(&h - 70),
-1 , 'Increase DB_BLOCK_BUFFER',
DECODE(SIGN(&h - 95),
1, 'Decrease DB_BLOCK_BUFFER',
'OK'
)
) comments
FROM
v$sysstat
/
UNDEFINE p l h
COLUMN disk FORMAT 99,999,999
DEFINE m = SUM(DECODE(statistic#,121,value,0))
DEFINE d = SUM(DECODE(statistic#,122,value,0))
DEFINE r = SUM(DECODE(statistic#,123,value,0))
SELECT
&m "MEMORY",
&d "DISK",
&r "ROWS",
DECODE(SIGN((&d) - (&m)), 1, 'Increase SORT_AREA_SIZE', 'OK') comments
FROM
v$sysstat
/
UNDEFINE m d r
DEFINE t = SUM(DECODE(statistic#,4,DECODE(value,0,1,value),0))
DEFINE l = SUM(DECODE(statistic#,0,DECODE(value,0,1,value),0))
DEFINE e = SUM(DECODE(statistic#,23,value,0))
SELECT
&e "ENQUEUE WAITS",
&e / &t "PER TRANSACTION",
&e / &l "PER LOGON" ,
DECODE(SIGN((&e / &t) - 1), 1, 'Increase ENQUEUE_RESOURCES', 'OK') comments
FROM
v$sysstat
/
UNDEFINE t l e
DEFINE t = SUM(DECODE(statistic#,4,DECODE(value,0,1,value),0))
DEFINE l = SUM(DECODE(statistic#,0,DECODE(value,0,1,value),0))
DEFINE e = SUM(DECODE(statistic#,58,value,0))
SELECT
&e "DBWR CHECKPOINTS",
&e / &t "PER TRANSACTION",
&e / &l "PER LOGON" ,
DECODE(SIGN((&e / &t) - 1), 1, 'Increase LOG_CHECKPOINT_INTERVAL', 'OK') comments
FROM
v$sysstat
/
UNDEFINE t l e
DEFINE t = SUM(DECODE(statistic#,4,DECODE(value,0,1,value),0))
DEFINE l = SUM(DECODE(statistic#,0,DECODE(value,0,1,value),0))
DEFINE e = SUM(DECODE(statistic#,83,value,0))
SELECT
&e "REDO LOG SPACE REQUESTS",
&e / &t "PER TRANSACTION",
&e / &l "PER LOGON" ,
DECODE(SIGN((&e / &t) - 1), 1, 'Increase LOG_BUFFER', 'OK') comments
FROM
v$sysstat
/
UNDEFINE t l e
@_END
|
Java
|
UTF-8
| 2,243 | 2.390625 | 2 |
[] |
no_license
|
package com.hebidu.example.test1.entity;
import javax.persistence.*;
@Entity
@Table(name = "bs_book_source_type", schema = "bookstore", catalog = "")
public class BsBookSourceTypeEntity {
private long id;
private String name;
private String homeUrl;
private long createTime;
private long updateTime;
@Id
@Column(name = "id")
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
@Basic
@Column(name = "name")
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Basic
@Column(name = "home_url")
public String getHomeUrl() {
return homeUrl;
}
public void setHomeUrl(String homeUrl) {
this.homeUrl = homeUrl;
}
@Basic
@Column(name = "create_time")
public long getCreateTime() {
return createTime;
}
public void setCreateTime(long createTime) {
this.createTime = createTime;
}
@Basic
@Column(name = "update_time")
public long getUpdateTime() {
return updateTime;
}
public void setUpdateTime(long updateTime) {
this.updateTime = updateTime;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
BsBookSourceTypeEntity that = (BsBookSourceTypeEntity) o;
if (id != that.id) return false;
if (createTime != that.createTime) return false;
if (updateTime != that.updateTime) return false;
if (name != null ? !name.equals(that.name) : that.name != null) return false;
if (homeUrl != null ? !homeUrl.equals(that.homeUrl) : that.homeUrl != null) return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (id ^ (id >>> 32));
result = 31 * result + (name != null ? name.hashCode() : 0);
result = 31 * result + (homeUrl != null ? homeUrl.hashCode() : 0);
result = 31 * result + (int) (createTime ^ (createTime >>> 32));
result = 31 * result + (int) (updateTime ^ (updateTime >>> 32));
return result;
}
}
|
Python
|
UTF-8
| 724 | 3.953125 | 4 |
[] |
no_license
|
from datetime import datetime
#def 函式名稱(參數):
# def sumCal(numberC,numberD):
# result = numberC**numberD
# return result
# print(sumCal(10,2))
def GetDatetime(formatDate):
#formatDate 是 要轉換前的日期
#分割字串取得年分
tmpYear = formatDate.split("年")[0]
#民國年轉換成西洋年
nowYear = str(int(tmpYear)+1911)
#分割字串取得月份
tmpMonth = formatDate.split("年")[1]
nowMonth = tmpMonth.split("月")[0]
#組合字串供datetime.strptime()函數使用
nowDate= nowYear+"/"+nowMonth+"/1"
resultDate = datetime.strptime(nowDate,"%Y/%m/%d")
return resultDate
print(GetDatetime("109年8月"))
print(GetDatetime("109年7月"))
|
Markdown
|
UTF-8
| 2,447 | 2.578125 | 3 |
[] |
no_license
|
# dhis2-dummydatatracker
Tool box to generate and manage dummy data in Tracker
## Tools for dummy data generation in Tracker
Python 3.6+ is required.
## Installation
Use the package manager [pip](https://pip.pypa.io/en/stable/) to install required packages.
```bash
pip install -r requirements.txt
```
Create auth.json file containing the credentials of the default server to use. The script relies on a username 'robot' with SuperUser role to have an account in the server.
```json
{
"dhis": {
"baseurl": "https://who-dev.dhis2.org/tracker_dev",
"username": "robot",
"password": "TOPSECRET"
}
}
```
To be able to work in Google Spreadsheets, the script needs a token in the form of credentials.json. Please contact manuel@dhis2.org to get a token
## Usage
1. Create a flat file in Google Spreadsheets for your program. If a flat file already matches, the GSpreadsheet is updated.
positional mandatory arguments:
program_uid the uid of the program to use
optional arguments:
-h, --help show the help message and exit
-wtf ORGUNIT, --with_teis_from ORGUNIT
Pulls TEIs from specified org unit and adds them to flat file. Eg: --with_teis_from_ou=Q7RbNZcHrQ9
-rs stage_uid number_repeats, --repeat_stage stage_uid number_repeats
provide a stage uid which is REPEATABLE and specify how many times you are planning to enter it. Eg: --repeat_stage QXtjg5dh34A 3
-sw email, --share_with email
email address to share the generated spreadsheet with as OWNER. Eg: --share_with=peter@dhis2.org
```bash
python create_flat_file.py Lt6P15ps7f6 --with_teis_from_ou=GZ5Ty90HtW --share_with=johndoe@dhis2.org
python create_flat_file Lt6P15ps7f6 --repeat_stage Hj38Uhfo012 5 --repeat_stage 77Ujkfoi9kG 3 --share_with=person1@dhis2.org --share_with=person2@dhis2.org
```
2. Create the dummy TEIs from flat file.
positional mandatory arguments:
document_id the id of the spreadsheet to use
optional arguments:
-h, --help show the help message and exit
For https://docs.google.com/spreadsheets/d/1xOeOpz4lSTdtiAJTuLwx40GgC5n9fkH2gltiB-sHmwg do:
```bash
python create_TEIs.py 1xOeOpz4lSTdtiAJTuLwx40GgC5n9fkH2gltiB-sHmwg
```
3. Remove dummy data if needed. Script takes no arguments (currently is a draft)
```bash
python delete_TEIs.py
```
4. Remove google spreadsheet created with create_flat_file:
```bash
python delete_sh.py 1xOeOpz4lSTdtiAJTuLwx40GgC5n9fkH2gltiB-sHmwg
```
|
Shell
|
UTF-8
| 1,279 | 3.21875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
GREEN='\033[0;32m'
LB='\033[1;34m' # light blue
NC='\033[0m' # No Color
./utils/dependency-chec-helm.sh
echo "############################################################################"
echo "Now deploying Rancher latest in namespace cattle-system"
echo "############################################################################"
echo "set kubeconfig"
export KUBECONFIG=`pwd`/k3s.yaml && echo KUBECONFIG=$KUBECONFIG
kubectl create namespace cattle-system
# OFFICIAL RANCHER HELM CHART DOES NOT SUPPORT KUBERNETES 1.20 AT THE MOMENT
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
helm install rancher rancher-latest/rancher \
--namespace cattle-system \
--set hostname=rancher.my.org
# Wait a few seconds for deployment to be created
sleep 5
kubectl -n cattle-system rollout status deploy/rancher
echo "Exposing Rancher deployment with loadbalancer service"
kubectl expose deployment rancher --type=LoadBalancer --name=rancher -n cattle-system
kubectl get svc rancher -n cattle-system
echo "############################################################################"
echo -e "[${GREEN}Success rancher deployment rolled out${NC}]"
echo "############################################################################"
|
Python
|
UTF-8
| 10,161 | 2.875 | 3 |
[] |
no_license
|
import pandas as pd
from astrodbkit import astrodb
def matches_sortCSV(gaia_catalogue, search_radius = 0.000048481, db_path = '../BDNYCdb_practice/bdnycdev_copy.db', save_all = False, save_needsreview = False):
"""Sort Gaia data into separate dataframes row by row, one for 'matches', one for 'new objects', and one for objects with more than one match that need review. Returns dataframes for matches and new objects.
Required Parameters:
gaia_catalogue: pandas dataframe of data. Requires RA and DEC for matching against the database
Optional Parameters:
search_radius: __float__ Search radius for cross matching RA/DEC against database.
Default is 0.000048481 radians (10 arcseconds).
db_path: __string__ Local path to database.
Default is '../BDNYCdb_practice/bdnycdev_copy.db'.
save_all: __boolean__ Saves all dataframes as CSV files. Default is False
save_needsreview: __boolean__ Saves only the needs_review dataframe as a CSV file.
Default is False.
"""
db = astrodb.Database('../BDNYCdb_practice/bdnycdev_copy.db')
# matches will store gaia data for objects in BDNYC database
matches = pd.DataFrame(columns=np.insert(gaia_catalogue.columns.values, 0, 'source_id', axis=0))
# new_objects will store gaia data for objects that do not exist in BDNYC database
new_objects = pd.DataFrame(columns=gaia_catalogue.columns.values)
# needs_review will store gaia data for objects that have too many matched in the database and need further review
needs_review = pd.DataFrame(columns=gaia_catalogue.columns.values)
# ===============================================
# sort each row of gaia data into matches/new_objects using celestial coordinates: right ascension (ra/RA) and declination (dec/DEC)
# ===============================================
for i in range(len(gaia_catalogue)):
results=db.search((gaia_catalogue['RA'][i], gaia_catalogue['DEC'][i]), 'sources', radius=0.000048481, fetch=True)
if len(results) == 1:
matches = matches.append(gaia_catalogue.loc[[i]])
matches['source_id'].loc[i]=results['id'][0]
elif len(results)>1:
# if there is MORE THAN ONE result, just print a note
needs_review = needs_review.append(gaia_catalogue.loc[[i]])
else:
new_objects = new_objects.append(gaia_catalogue.loc[[i]])
if save_all==True:
matches.to_csv('matches.csv')
new_objects.to_csv('new_objects.csv')
needs_review.to_csv('needs_review.csv')
print('matches, new_objects, and needs_review saved as CSV files.')
if save_needsreview==True:
needs_review.to_csv('needs_review.csv')
return matches, new_objects
def generateMatchtables(matches, addToDb=False):
##################################################
# matches tables
##################################################
# create new empty list to store data we want to add to database
matchParallax_data = list()
matchPropermotions_data = list()
matchPhotometry_data = list()
# append the column name (as it's written in the BDNYC database) to match on the appropriate column
matchParallax_data.append(['source_id','parallax', 'parallax_unc','publication_shortname', 'adopted','comments'])
matchPropermotions_data.append(['source_id','proper_motion_ra', 'proper_motion_ra_unc','proper_motion_dec', 'proper_motion_dec_unc','publication_shortname', 'comments'])
matchPhotometry_data.append(['source_id','band', 'magnitude','magnitude_unc', 'publication_shortname', 'comments'])
for i in range(len(matches)):
matchParallax_data.append([matches.iloc[[i]]['source_id'].values[0], matches.iloc[[i]]['PARALLAX'].values[0], matches.iloc[[i]]['PARALLAX_ERROR'].values[0], 'GaiaDR2', 1, 'added by SpectreCell'])
matchPropermotions_data.append([matches.iloc[[i]]['source_id'].values[0], matches.iloc[[i]]['PMRA'].values[0], matches.iloc[[i]]['PMRA_ERROR'].values[0], matches.iloc[[i]]['PMDEC'].values[0], matches.iloc[[i]]['PMDEC_ERROR'].values[0],'GaiaDR2','added by SpectreCell'])
matchPhotometry_data.append([matches.iloc[[i]]['source_id'].values[0], 'GaiaDR2_G', matches.iloc[[i]]['PHOT_G_MEAN_MAG'].values[0], matches.iloc[[i]]['PHOT_G_MEAN_MAG_ERROR'].values[0],'GaiaDR2','added by SpectreCell'])
matchPhotometry_data.append([matches.iloc[[i]]['source_id'].values[0], 'GaiaDR2_BP', matches.iloc[[i]]['PHOT_BP_MEAN_MAG'].values[0], matches.iloc[[i]]['PHOT_BP_MEAN_MAG_ERROR'].values[0],'GaiaDR2','added by SpectreCell'])
matchPhotometry_data.append([matches.iloc[[i]]['source_id'].values[0], 'GaiaDR2_RP', matches.iloc[[i]]['PHOT_RP_MEAN_MAG'].values[0], matches.iloc[[i]]['PHOT_RP_MEAN_MAG_ERROR'].values[0],'GaiaDR2','added by SpectreCell'])
if addToDb==True:
db.add_data(matchParallax_data, 'parallaxes')
db.add_data(matchPropermotions_data, 'proper_motions')
db.add_data(matchPhotometry_data, 'photometry')
return matchParallax_data, matchPropermotions_data, matchPhotometry_data
def generateNewObjTables(new_objects, db, addSourceTable=False):
##################################################
# new objects tables
##################################################
# create new empty list to store data we want to add to database
newobjects_data = list()
# append the column name (as it's written in the BDNYC database) to match on the appropriate column
newobjects_data.append(['ra','dec', 'designation','publication_shortname', 'shortname','names', 'comments'])
for i in range(len(new_objects)):
newobjects_data.append([new_objects.iloc[[i]]['RA'].values[0], new_objects.iloc[[i]]['DEC'].values[0], new_objects.iloc[[i]]['DISCOVERYNAME'].values[0], 'GaiaDR2', new_objects.iloc[[i]]['SHORTNAME'].str.replace('J', '').str.strip().values[0], new_objects.iloc[[i]]['SOURCE_ID'].values[0],'added by SpectreCell'])
if addSourceTable==True:
db.add_data(newobjects_data, 'sources')
# create new empty list to store data we want to add to database
newObjParallax_data = list()
newObjPropermotions_data = list()
newObjPhotometry_data = list()
# append the column name (as it's written in the BDNYC database) to match on the appropriate column
newObjParallax_data.append(['source_id','parallax', 'parallax_unc','publication_shortname', 'adopted','comments'])
newObjPropermotions_data.append(['source_id','proper_motion_ra', 'proper_motion_ra_unc','proper_motion_dec', 'proper_motion_dec_unc','publication_shortname', 'comments'])
newObjPhotometry_data.append(['source_id','band', 'magnitude','magnitude_unc', 'publication_shortname', 'comments'])
for i in range(len(new_objects)):
db_sourceid=db.search((new_objects['RA'].iloc[[i]], new_objects['DEC'].iloc[[i]]), 'sources', radius=0.00278, fetch=True)['id'][0]
newObjParallax_data.append([db_sourceid, new_objects.iloc[[i]]['PARALLAX'].values[0], new_objects.iloc[[i]]['PARALLAX_ERROR'].values[0], 'GaiaDR2', 1, 'added by SpectreCell'])
newObjPropermotions_data.append([db_sourceid, new_objects.iloc[[i]]['PMRA'].values[0], new_objects.iloc[[i]]['PMRA_ERROR'].values[0], new_objects.iloc[[i]]['PMDEC'].values[0], new_objects.iloc[[i]]['PMDEC_ERROR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, 'GaiaDR2_G', new_objects.iloc[[i]]['PHOT_G_MEAN_MAG'].values[0], new_objects.iloc[[i]]['PHOT_G_MEAN_MAG_ERROR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, 'GaiaDR2_BP', new_objects.iloc[[i]]['PHOT_BP_MEAN_MAG'].values[0], new_objects.iloc[[i]]['PHOT_BP_MEAN_MAG_ERROR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, 'GaiaDR2_RP', new_objects.iloc[[i]]['PHOT_RP_MEAN_MAG'].values[0], new_objects.iloc[[i]]['PHOT_RP_MEAN_MAG_ERROR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, '2MASS_J', new_objects.iloc[[i]]['TMASSJ'].values[0], new_objects.iloc[[i]]['TMASSJERR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, '2MASS_H', new_objects.iloc[[i]]['TMASSH'].values[0], new_objects.iloc[[i]]['TMASSHERR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, '2MASS_K', new_objects.iloc[[i]]['TMASSK'].values[0], new_objects.iloc[[i]]['TMASSKERR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, 'WISE_W1', new_objects.iloc[[i]]['WISEW1'].values[0], new_objects.iloc[[i]]['WISEW1ERR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, 'WISE_W2', new_objects.iloc[[i]]['WISEW2'].values[0], new_objects.iloc[[i]]['WISEW2ERR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, 'WISE_W3', new_objects.iloc[[i]]['WISEW3'].values[0], new_objects.iloc[[i]]['WISEW3ERR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, 'GUNN_G', new_objects.iloc[[i]]['GUNNG'].values[0], new_objects.iloc[[i]]['GUNNGERR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, 'GUNN_R', new_objects.iloc[[i]]['GUNNR'].values[0], new_objects.iloc[[i]]['GUNNRERR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, 'GUNN_I', new_objects.iloc[[i]]['GUNNI'].values[0], new_objects.iloc[[i]]['GUNNIERR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, 'GUNN_Z', new_objects.iloc[[i]]['GUNNZ'].values[0], new_objects.iloc[[i]]['GUNNZERR'].values[0],'GaiaDR2','added by SpectreCell'])
newObjPhotometry_data.append([db_sourceid, 'GUNN_Y', new_objects.iloc[[i]]['GUNNY'].values[0], new_objects.iloc[[i]]['GUNNYERR'].values[0],'GaiaDR2','added by SpectreCell'])
return newObjParallax_data, newObjPropermotions_data, newObjPhotometry_data
|
C#
|
UTF-8
| 8,489 | 2.890625 | 3 |
[] |
no_license
|
// UBMModel
// Model for UnblockMe Solver
// 2014-07-22 PV
#pragma warning disable IDE0052 // Remove unread private members
#pragma warning disable IDE0051 // Remove unused private members
using System;
using System.Collections.Generic;
namespace CS523B_UnblockMe_Solver_Visual
{
public class UBMModel
{
// Describes the blocks of a specific game
// Puzzle 1602
public Block[] Pieces = {
new Block { IsHorizontal = true, RowCol = 0, Length = 3 },
new Block { IsHorizontal = true, RowCol = 1, Length = 2 },
new Block { IsHorizontal = true, RowCol = 2, Length = 2 },
new Block { IsHorizontal = true, RowCol = 4, Length = 2 },
new Block { IsHorizontal = true, RowCol = 5, Length = 3 },
new Block { IsHorizontal = false, RowCol = 0, Length = 3 },
new Block { IsHorizontal = false, RowCol = 2, Length = 2 },
new Block { IsHorizontal = false, RowCol = 3, Length = 3 },
new Block { IsHorizontal = false, RowCol = 4, Length = 3 },
};
public byte redPiece = 2;
public Config Configuration = new Config() { Length = 9, Pos = new byte[] { 0, 2, 2, 4, 0, 1, 3, 3, 0 } };
private readonly SortedSet<int> History = new SortedSet<int>();
private void UBModel()
{
/*
ShowConfig(Configuration);
if (Configuration.IsValid(Pieces))
Console.WriteLine("Config Ok");
else
Console.WriteLine("Invalid config!");
History.Add(Configuration.Signature());
Move(1, Configuration);
Console.WriteLine("{0} configurations analyzed, {1} moves for solution", nbConfig, solutionMoves);
Console.WriteLine();
Console.Write("(Pause)");
Console.ReadLine();
* */
}
private bool foundSolution = false;
private int nbConfig = 0;
private int solutionMoves = 0;
private bool Move(int depth, Config config)
{
nbConfig++;
// is it a winning combination?
if (config.Pos[redPiece] + Pieces[redPiece].Length == 6)
{
ShowConfig(config);
Console.WriteLine("Solution found, depth={0}", depth);
foundSolution = true;
return true;
}
// try to move each piece in both directions 1 step
for (int i = 0; i < config.Length; i++)
{
// Move left/up 1 position
Config newConfig = config.Clone();
newConfig.Pos[i]--;
if (!History.Contains(newConfig.Signature()) && newConfig.IsValid(Pieces))
{
History.Add(newConfig.Signature());
//ShowConfig(newConfig);
if (Move(depth + 1, newConfig))
{
ShowConfig(newConfig);
solutionMoves++;
return true;
}
}
if (foundSolution)
return false;
// Move right/down 1 position
newConfig = config.Clone();
newConfig.Pos[i]++;
if (!History.Contains(newConfig.Signature()) && newConfig.IsValid(Pieces))
{
History.Add(newConfig.Signature());
//ShowConfig(newConfig);
if (Move(depth + 1, newConfig))
{
ShowConfig(newConfig);
solutionMoves++;
return true;
}
if (foundSolution)
return false;
}
}
return false;
}
private readonly ConsoleColor[] Colors = { (ConsoleColor)1, (ConsoleColor)2, (ConsoleColor)3, (ConsoleColor)5, (ConsoleColor)6, (ConsoleColor)8, (ConsoleColor)9, (ConsoleColor)10, (ConsoleColor)11, (ConsoleColor)12, (ConsoleColor)13, (ConsoleColor)14, (ConsoleColor)15 };
private void ShowConfig(Config config)
{
Console.WriteLine();
for (int r = 0; r < 6; r++)
{
for (int c = 0; c < 6; c++)
{
byte i;
for (i = 0; i < config.Length; i++)
{
if (Pieces[i].IsHorizontal)
{
if (Pieces[i].RowCol == r && c >= config.Pos[i] && c <= config.Pos[i] + Pieces[i].Length - 1)
break;
}
else
{
if (Pieces[i].RowCol == c && r >= config.Pos[i] && r <= config.Pos[i] + Pieces[i].Length - 1)
break;
}
}
char ch;
if (i == config.Length)
{
Console.BackgroundColor = ConsoleColor.Black;
//ch = (char)183; // centered dot
}
else if (i == redPiece)
{
Console.BackgroundColor = ConsoleColor.Red;
//ch = (char)(49 + i);
//ch = ' ';
}
else
{
Console.BackgroundColor = Colors[i];
//ch = (char)(49 + i);
//ch = ' ';
}
ch = (char)183; // centered dot
Console.Write(ch);
Console.Write(ch);
}
Console.BackgroundColor = ConsoleColor.Black;
Console.WriteLine();
}
}
}
// Represents one wooden block in the game
public class Block
{
public bool IsHorizontal;
public byte RowCol;
public byte Length;
}
public struct Config
{
public byte Length;
public byte[] Pos;
public Config Clone()
{
Config c2 = new Config
{
Length = Length,
Pos = (byte[])Pos.Clone()
};
return c2;
}
// Equivalent of Pos, but packed in a 32-bit integer
public int Signature()
{
return Pos[0] + (Pos[1] << 3) + (Pos[2] << 6) + (Pos[3] << 9) + (Pos[4] << 12) + (Pos[5] << 15) + (Pos[6] << 18) + (Pos[7] << 21) + (Pos[8] << 24); // +(Pos[9] << 27);
}
// Check the validity of a configuration
public bool IsValid(Block[] Pieces)
{
for (int i = 0; i < Length; i++)
{
// Check that the piece is in the board
if (Pos[i] == 255 || Pos[i] + Pieces[i].Length > 6) return false;
// Check that it doesn't cover another piece
for (int j = 0; j < Length; j++)
if (j != i)
{
// Same orientation?
if (Pieces[i].IsHorizontal == Pieces[j].IsHorizontal)
{
// Only check for overlap if they are in the same row/col
if (Pieces[i].RowCol == Pieces[j].RowCol)
if (Pos[i] < Pos[j])
if (Pos[i] + Pieces[i].Length - 1 >= Pos[j])
return false;
else if (Pos[j] + Pieces[j].Length - 1 >= Pos[i])
return false;
}
else
{
// Check for intersection: piece j rowcol is in the rowcol range of piece i, and piece i rowcol is in the range of piece j
if (Pieces[j].RowCol >= Pos[i] && Pieces[j].RowCol <= Pos[i] + Pieces[i].Length - 1)
if (Pieces[i].RowCol >= Pos[j] && Pieces[i].RowCol <= Pos[j] + Pieces[j].Length - 1)
return false;
}
}
}
return true;
}
}
}
|
C++
|
UTF-8
| 948 | 3.109375 | 3 |
[] |
no_license
|
class Solution {
public:
int lengthOfLongestSubstring(string s) {
if (s.length() <= 1) { return s.length(); }
int res = -1, right = 0;
unordered_set<char> store;
for (int left = 0; left < s.length(); ++left) {
while (right < s.length() && !store.count(s[right])) {
store.insert(s[right]);
++right;
}
res = max(res, right - left);
store.erase(s[left]);
if (right >= s.length()) { break; }
}
return res;
}
};
class Solution {
public:
int nthUglyNumber(int n) {
vector<int> factors = { 2, 3, 5 };
unordered_set<long> seen;
priority_queue<long, vector<long>, greater<long>> heap;
seen.insert(1L);
heap.push(1L);
int ugly = 0;
for (int i = 0; i < n; i++) {
long curr = heap.top();
heap.pop();
ugly = (int)curr;
for (int factor : factors) {
long next = curr * factor;
if (!seen.count(next)) {
seen.insert(next);
heap.push(next);
}
}
}
return ugly;
}
};
|
Java
|
UTF-8
| 1,197 | 2.21875 | 2 |
[
"MIT"
] |
permissive
|
package kaptainwutax.featureutils.decorator.ore.nether;
import kaptainwutax.biomeutils.biome.Biomes;
import kaptainwutax.featureutils.decorator.ore.HeightProvider;
import kaptainwutax.featureutils.decorator.ore.ScatterOreDecorator;
import kaptainwutax.mcutils.block.Blocks;
import kaptainwutax.mcutils.version.MCVersion;
import kaptainwutax.mcutils.version.VersionMap;
public class SmallDebrisOre extends ScatterOreDecorator<ScatterOreDecorator.Config, ScatterOreDecorator.Data<SmallDebrisOre>> {
public static final VersionMap<Config> CONFIGS = new VersionMap<Config>()
.add(MCVersion.v1_16, new Config(16, 7, 2, 1, HeightProvider.range(8, 16, 128), Blocks.ANCIENT_DEBRIS, BASE_STONE_NETHER)
.add(13, 7, Biomes.CRIMSON_FOREST)
.add(14, 7, Biomes.WARPED_FOREST))
.add(MCVersion.v1_17, new Config(16, 7, 2, 1, HeightProvider.uniformRange(8, 119), Blocks.ANCIENT_DEBRIS, BASE_STONE_NETHER)
.add(13, 7, Biomes.CRIMSON_FOREST)
.add(14, 7, Biomes.WARPED_FOREST));
public SmallDebrisOre(MCVersion version) {
super(CONFIGS.getAsOf(version), version);
}
@Override
public String getName() {
return name();
}
public static String name() {
return "small_debris_ore";
}
}
|
Markdown
|
UTF-8
| 1,343 | 2.84375 | 3 |
[] |
no_license
|
# Notes on Git
## Setup
### Set identity
~~~~
git config [--global] user.email "user@email.com"
git config [--global] user.name "Name"
~~~~
### Download repository
`git clone https://repository.git`
### View remote repositories
`git remote -v`
### Add upstream repository
`git remote add upstream https://repository.git`
## Updating
### Without review
* origin: `git pull`
* upstream: `git pull upstream master`
### With review
~~~~
git fetch origin master
git log ..origin/master
git merge origin/master
~~~~
## Committing
### Commit all changes to local
~~~~
git add .
git commit -m "Commit message"
~~~~
### Commit to remote repository
* origin: `git push origin master`
* upstream: `git push upstream master`
## Branch operations
### View available branches
* local: `git branch`
* remote: `git branch -r`
* upstream: `git remote show upstream`
### Delete branches
* local: `git branch -d branch-name`
* remote: `git branch -r -d branch-name`
### Create new branch
`git checkout -b branch-name`
## Reverting
### Discard uncommitted changes
`git reset --hard`
or
`git checkout -p`
### Revert to previous commit
`git reset --hard <commit>`
`<commit>` can be part of a commit hash, or `HEAD~1` for previous commit.
## Untrack repository files
~~~
git rm -r --cached <filename>
git add -u
~~~
Commit, pull, then push to remote.
|
C#
|
UTF-8
| 796 | 2.640625 | 3 |
[] |
no_license
|
using NUnit.Framework;
using Lab_17_NorthwindTests;
using LAB_014_LINQ;
namespace Lab_17_NwindTests
{
public class Tests
{
[SetUp]
public void Setup()
{
}
[Test]
public void Test1()
{
Assert.Pass();
}
[TestCase(null, -1)] // how many cust total?
[TestCase("London", 5)]
[TestCase("London", 1)]
public void TestNumberOfNorthwindCustomers(string city, int expected)
{
//arrange
//var testInstance = new LAB_014_LINQ.NorthwindCustomers();
//act
//var actual = testInstance.NumberOfNorthwindCustomers();
//expected
//Asert.AreEqual(expected, LAB_014_LINQ.Program.GetCustomers(city));
}
}
}
|
Java
|
UTF-8
| 1,897 | 2.265625 | 2 |
[] |
no_license
|
package uk.co.wehavecookies56.kk.common.container.inventory;
import net.minecraft.entity.player.EntityPlayer;
import net.minecraft.item.ItemStack;
import net.minecraft.nbt.NBTTagCompound;
import net.minecraft.util.EnumHand;
import net.minecraft.util.text.ITextComponent;
import net.minecraft.util.text.TextComponentString;
import uk.co.wehavecookies56.kk.common.core.helper.TextHelper;
import uk.co.wehavecookies56.kk.common.item.ItemSynthesisBagM;
import uk.co.wehavecookies56.kk.common.lib.Strings;
public class InventorySynthesisBagM extends AbstractInventory {
private String name = TextHelper.localize(Strings.SynthesisBagMInventory);
private static final String SAVE_KEY = "SynthesisBagMInventory";
public static final int INV_SIZE = 28;
private final ItemStack invStack;
public InventorySynthesisBagM (ItemStack stack) {
inventory = new ItemStack[INV_SIZE];
this.invStack = stack;
if (!invStack.hasTagCompound()) invStack.setTagCompound(new NBTTagCompound());
readFromNBT(invStack.getTagCompound());
}
@Override
public int getInventoryStackLimit () {
return 64;
}
@Override
public boolean hasCustomName () {
return name.length() > 0;
}
@Override
public void markDirty () {
for (int i = 0; i < getSizeInventory(); i++)
if (getStackInSlot(i) != null && getStackInSlot(i).stackSize == 0) inventory[i] = null;
writeToNBT(invStack.getTagCompound());
}
@Override
public boolean isUseableByPlayer (EntityPlayer player) {
return player.getHeldItem(EnumHand.MAIN_HAND) == invStack;
}
@Override
public boolean isItemValidForSlot (int index, ItemStack stack) {
return !(stack.getItem() instanceof ItemSynthesisBagM);
}
@Override
protected String getNbtKey () {
return SAVE_KEY;
}
@Override
public String getName () {
return name;
}
@Override
public ITextComponent getDisplayName () {
return new TextComponentString(name);
}
}
|
C++
|
UTF-8
| 1,091 | 2.859375 | 3 |
[] |
no_license
|
#include<iostream>
#include<omp.h>
#include<math.h>
using namespace std;
int main()
{
int start = omp_get_wtime();
int N = 100000;
float arr[N];
for(int i =0;i<N;i++){
arr[i] = 5;
}
long int sum =0;
omp_set_num_threads(1);
#pragma omp parallel for reduction(+:sum)
for(int i =0;i<N;i++){
sum+= arr[i];
}
cout<<sum<<endl;
int min_s = arr[0];
#pragma omp parallel for reduction(min:min_s)
for(int i =0;i<N;i++){
if(arr[i] < min_s)
{
min_s = arr[i];
}
}
cout<<min_s<<endl;
int max_s = arr[0];
#pragma omp parallel for reduction(max:max_s)
for(int i =0;i<N;i++){
if(arr[i] > max_s)
{
max_s = arr[i];
}
}
cout<<max_s<<endl;
float mean = sum/N;
#pragma omp parallel for
for(int i =0;i<N;i++){
arr[i] -= mean;
}
#pragma omp parallel for
for(int i =0;i<N;i++){
arr[i] *= arr[i];
}
float sums =0;
#pragma omp parallel for reduction(+:sums)
for(int i =0;i<N;i++){
sums+= arr[i];
}
float var = sums/N;
float stddev = sqrt(var);
cout<<var<<endl;
cout<<stddev<<endl;
cout<<"Time taken : "<<omp_get_wtime() - start<<" s";
return 0;
}
|
PHP
|
UTF-8
| 5,750 | 2.53125 | 3 |
[
"MIT"
] |
permissive
|
<?php
namespace App\Repositories;
use App\Models\BankModel;
use App\Models\ParcelModel;
use App\Models\User;
use Illuminate\Support\Facades\DB;
use Illuminate\Support\Facades\Schema;
class HomeActionsRepo
{
public static function splitInputs(array $arr)
{
$bankSchema = Schema::getColumnListing("bank");
$parcelSchema = Schema::getColumnListing("parcel");
$effected = [];
foreach ($arr as $key => $value) {
in_array($key, $bankSchema) && ($effected["bank"][$key] = $value);
in_array($key, $parcelSchema) && ($effected["parcel"][$key] = $value);
}
return $effected;
}
public function store(array $effected)
{
try {
DB::beginTransaction();
$dataPacket = new BankModel($effected['bank']);
$dataPacket["date"] = date('Y.m.d');
$dataPacket->save();
$inserderId = $dataPacket->parcel_id;
$dataPacket = new ParcelModel($effected['parcel']);
$dataPacket->id = $inserderId;
$dataPacket->save();
$user = request()->user();
if (!$user->isSender()) {
$user->roles()->attach(2);
}
DB::commit();
} catch (\PDOException $e) {
DB::rollBack();
return $e;
}
return $inserderId;
}
public function show_one(int $id)
{
$order = BankModel::with("parcels:id,price,phone")->where("parcel_id", $id)->first();
if ($order) {
return $order->toArray();
}
return false;
}
public function update($id, $effected)
{
try {
DB::beginTransaction();
count($effected["bank"]) && BankModel::where("parcel_id", $id)->update($effected["bank"]);
count($effected["parcel"]) && ParcelModel::where("id", $id)->update($effected["parcel"]);
DB::commit();
} catch (\PDOException $e) {
DB::rollBack();
return $e;
}
return 1;
}
public function destroy($id)
{
try {
DB::beginTransaction();
BankModel::destroy($id);
ParcelModel::destroy($id);
DB::commit();
} catch (\PDOException $e) {
DB::rollBack();
return $e;
}
return 1;
}
public function get_for_edit(int $id)
{
$order = BankModel::with("parcels:id,price,phone")
->where("parcel_id", $id)
->first();
if ($order) {
return $order;
}
return false;
}
public function get_default_values_to_form()
{
$array = [];
$array['price'] = DB::table('parcel')->selectRaw("MAX(price) as price_to ,MIN(price) as price_from")->first();
$array['date'] = DB::table('bank')->selectRaw("MAX(date) as date_to ,MIN(date) as date_from")->first();
$array['endpoints'] = DB::table('bank')->selectRaw("DISTINCT endpoint")->get();
$bankSchema = Schema::getColumnListing("bank");
$parcelSchema = Schema::getColumnListing("parcel");
$array['columns'] = array_diff(array_merge($bankSchema, $parcelSchema), ["created_at", "updated_at"]);
return $array;
}
public function get_all($isAdmin = false, $name = null)
{
$orders = (new BankModel)->join("parcel", "bank.parcel_id", "=", "parcel.id");
if ($isAdmin) {
$orders = $orders->get(["parcel_id", "sender_name", "recipient", "product", "endpoint", "date", "price", "phone"]);
} else {
$orders = $orders
->where('sender_name', $name)
->get(["parcel_id", "sender_name", "recipient", "product", "endpoint", "date", "price", "phone"]);
}
return $orders;
}
public function search_by_inputs(array $arr, bool $isAdmin)
{
if ($isAdmin) {
$builder = (new BankModel())
->join("parcel", "bank.parcel_id", "=", "parcel.id");
} else {
$sender_name = request()->user()->name;
$builder = (new BankModel())
->join("parcel", "bank.parcel_id", "=", "parcel.id")
->where('sender_name', $sender_name);
}
foreach ($arr as $key => $value) {
if (($key != 'price_from'
&& $key != 'price_to'
&& $key != 'date_to'
&& $key != 'date_from'
&& $key != 'order_by'
&& $key != 'order_type')
&& $value != null
&& $value != 'all') {
$builder = $builder->where($key, $value);
}
}
$orders = $builder
->whereBetween('date', [$arr['date_from'], $arr['date_to']])
->whereBetween('price', [$arr['price_from'], $arr['price_to']])
->orderBy($arr['order_by'], $arr['order_type'])
->get();
return $orders;
}
public $rules_for_index = [
"sender_name" => ["nullable", "string"],
"recipient" => ["nullable", "string"],
"product" => ["nullable", "string"],
"price_from" => ["nullable", "regex:/^\d*(\.\d{2})?$/", "required_with:price_to"],
"price_to" => ["nullable", "regex:/^\d*(\.\d{2})?$/", "required_with:price_from"],
"date_from" => ["date_format:Y-m-d", "required_with:date_to"],
"date_to" => ["date_format:Y-m-d", "required_with:date_from"],
"endpoint" => ["required", "string"],
"order_by" => ["required", "string"],
"order_type" => ["required", "string"],
];
}
|
Java
|
UTF-8
| 452 | 2.15625 | 2 |
[] |
no_license
|
package com.test01.anno;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import com.test01.anno.MyNickName;
public class MTest {
public static void main(String[] args) {
ApplicationContext factory = new ClassPathXmlApplicationContext("com/test01/anno/beans.xml");
MyNickName nick = (MyNickName) factory.getBean("myNickName");
System.out.println(nick);
}
}
|
Markdown
|
UTF-8
| 813 | 2.84375 | 3 |
[] |
no_license
|
# Topics
* Command Line
* Git
* For loops
* If/else/elif statements
* Lists
* Functions
* Dictionaries
* Syntax
* Debugging
* Flowcharts
# Examples of question types you may find
* I might ask you what to type into the command line to navigate from one folder to another
* Core git concepts, for instance matching a term up with its definition
* Fill in what is needed to complete a piece of code
* Choose the correct version of a piece of code
* Refer to the requested piece of information in a list or dictionary, i.e. refer to the second item in a list, or to the value associated with a specified key in a dictionary
* Identify what a function will do
* Know what it means for a function to return a vlue
* Be able to create a simple flowchart
* Be able to understand a flowchart and explain what it means
|
Java
|
UTF-8
| 2,620 | 2.484375 | 2 |
[] |
no_license
|
package Control;
import Model.Categoria;
import Model.Gerenciadores.GerenciaCategoria;
import Model.Gerenciadores.GerenciaProduto;
import Model.PDFProduto;
import javafx.event.ActionEvent;
import javafx.fxml.FXMLLoader;
import javafx.scene.Parent;
import javafx.scene.Scene;
import javafx.scene.control.*;
import javafx.scene.control.cell.PropertyValueFactory;
import javafx.scene.input.KeyEvent;
import javafx.stage.FileChooser;
import javafx.stage.Stage;
import java.io.File;
import java.io.IOException;
import java.sql.SQLException;
public class RPController {
public ComboBox categorias;
public Button btFiltrar;
public TextField tfProduto;
public Button btVoltar;
public TableView tabelinha;
public TableColumn nome;
public TableColumn descricao;
public TableColumn valor;
public Button btPDF;
Stage stage = null;
Parent myNewScene = null;
public void initialize(){
try {
nome.setCellValueFactory(new PropertyValueFactory<>("Nome"));
descricao.setCellValueFactory(new PropertyValueFactory<>("descricao"));
valor.setCellValueFactory(new PropertyValueFactory<>("Valor"));
categorias.setItems(GerenciaCategoria.getInstance().listaCategorias());
tabelinha.setItems(GerenciaProduto.getInstance().listaProdutos());
} catch (SQLException e) {
e.printStackTrace();
}
}
public void Filtrar(ActionEvent actionEvent) throws SQLException {
Categoria categoria = (Categoria) categorias.getSelectionModel().getSelectedItem();
tabelinha.setItems(GerenciaProduto.getInstance().buscaCategoria(categoria));
categorias.getSelectionModel().clearSelection();
}
public void TextFilter(KeyEvent keyEvent) throws SQLException {
tabelinha.setItems(GerenciaProduto.getInstance().listaProdutos(tfProduto.getText()));
}
public void Voltar(ActionEvent actionEvent) throws IOException {
stage = (Stage) btVoltar.getScene().getWindow();
myNewScene = FXMLLoader.load(getClass().getResource("../View/ViewAdm.fxml"));
Scene scene = new Scene(myNewScene);
stage.setScene(scene);
stage.setTitle("ARTE'S DRI");
stage.show();
}
public void gerarPDF(ActionEvent av) {
PDFProduto geradorPDF = new PDFProduto();
Button bt = (Button) av.getSource();
FileChooser fc = new FileChooser();
File f = fc.showSaveDialog(null);
if(f != null){
String arq = f.getAbsolutePath();
geradorPDF.criaPdf_3(arq);
}
}
}
|
C++
|
SHIFT_JIS
| 1,518 | 2.671875 | 3 |
[] |
no_license
|
#pragma once
#include "Common.h"
class Title : public MyApp::Scene
{
private:
public:
const Font font = Font(40);
Title(const InitData& init) : IScene(init)
{
// V[ǂݍݎɈxs
}
// t[ updateAndDraw() ŌĂ
void update() override
{
if (KeySpace.up())
{
// ̃V[PXƁAtF[hCEAEg̎ԁi~bj
changeScene(State::Game);
}
// ̃V[PXƁAtF[hCEAEg̎ԁi~bj
}
// t[ update() ̎ɌĂ
void draw() const override
{
TextureAsset(U"Title").scaled(3.5).drawAt(300, 300);
//@SW(X,Y)@̕@@Op̕@@
Line(150, 460, 150, 502)
.drawArrow(10, Vec2(20, 20), Palette::White);
Line(174, 423, 218, 423)
.drawArrow(10, Vec2(20, 20), Palette::White);
Line(126, 423, 82, 423)
.drawArrow(10, Vec2(20, 20), Palette::White);
Line(150, 400, 150, 358)
.drawArrow(10, Vec2(20, 20), Palette::White);
font(U"EEEړL[").draw(249, 395);
font(U"ZL[EEEe").draw(370, 520);
font(U"SpaceL[ŃX^[g").draw(220, 300);
font(U"er").drawAt(400,100);
}
};
|
Java
|
UTF-8
| 2,660 | 1.859375 | 2 |
[
"MIT"
] |
permissive
|
package io.fundrequest.core.config;
import feign.Client;
import feign.Contract;
import feign.Feign;
import feign.auth.BasicAuthRequestInterceptor;
import feign.codec.Decoder;
import feign.codec.Encoder;
import feign.httpclient.ApacheHttpClient;
import io.fundrequest.core.identity.IdentityAPIClient;
import io.fundrequest.core.identity.IdentityAPIClientDummy;
import org.apache.http.client.HttpClient;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.cloud.netflix.feign.FeignClientsConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
@Configuration
@Import( {FeignClientsConfiguration.class})
public class IdentityApiConfig {
@Bean
@ConditionalOnProperty(value = "io.fundrequest.mock.identity-api", havingValue = "false", matchIfMissing = true)
public Client identityApiFeignClient(@Autowired(required = false) final HttpClient httpClient) {
if (httpClient != null) {
return new ApacheHttpClient(httpClient);
}
return new ApacheHttpClient();
}
@Bean
@ConditionalOnProperty(value = "io.fundrequest.mock.identity-api", havingValue = "false", matchIfMissing = true)
public IdentityAPIClient identityAPIClient(final Decoder decoder,
final Encoder encoder,
final Client identityApiFeignClient,
final Contract contract,
@Value("${io.fundrequest.identity.api.url}") final String url,
@Value("${io.fundrequest.identity.api.user.name}") final String username,
@Value("${io.fundrequest.identity.api.user.password}") final String password) {
return Feign.builder()
.client(identityApiFeignClient)
.encoder(encoder)
.decoder(decoder)
.contract(contract)
.requestInterceptor(new BasicAuthRequestInterceptor(username, password))
.target(IdentityAPIClient.class, url);
}
@Bean
@ConditionalOnProperty(value = "io.fundrequest.mock.identity-api", havingValue = "true")
public IdentityAPIClient mockIdentityAPIClient() {
return new IdentityAPIClientDummy();
}
}
|
Java
|
UTF-8
| 466 | 1.859375 | 2 |
[
"Apache-2.0"
] |
permissive
|
package com.palmelf.eoffice.service.arch.impl;
import com.palmelf.core.service.impl.BaseServiceImpl;
import com.palmelf.eoffice.dao.arch.ArchFondDao;
import com.palmelf.eoffice.model.arch.ArchFond;
import com.palmelf.eoffice.service.arch.ArchFondService;
public class ArchFondServiceImpl extends BaseServiceImpl<ArchFond> implements
ArchFondService {
private ArchFondDao dao;
public ArchFondServiceImpl(ArchFondDao dao) {
super(dao);
this.dao = dao;
}
}
|
JavaScript
|
UTF-8
| 1,427 | 3.109375 | 3 |
[] |
no_license
|
const enviar = document.getElementById("btnEnviar");
const nombre = document.getElementById("txtNombre");
const mensaje = document.getElementById("txtComentario");
enviar.addEventListener('click', function() {
//utilizando el PI Fetch
fetch('http://localhost/app_web/servidor_V4/php/script_form_mensajes.php', {
method: 'POST',
headers: {
"Content-type": "application/json; charset=utf-8"
},
body: JSON.stringify({
_nombre: nombre.value,
_comentario: mensaje.value
})
})
.then(function(respuesta) {
return respuesta.json();
})
.then(function(json) {
console.log(json);
document.getElementById('respuesta').innerHTML = "";
let respuesta = `<tr>
<th>NOMBRE</th>
<th>COMENTARIO</th>
</tr>`;
//PROCESAR EL OBJETO JASON
json.forEach(function(info) {
respuesta += `<tr>
<td>${info.C_nombre}</td>
<td>${info.C_comentario}</td>
</tr>`;
});
document.getElementById('respuesta').innerHTML = respuesta;
})
.catch(function(error) {
console.error("ERROR: ", error);
});
});
|
Java
|
UTF-8
| 4,340 | 2.203125 | 2 |
[] |
no_license
|
package edu.gatech.cs2340.nonprofitdonationtracker.controllers;
import android.content.Intent;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.text.Editable;
import android.view.View;
import android.widget.ArrayAdapter;
import android.widget.EditText;
import android.widget.Spinner;
import android.widget.TextView;
import edu.gatech.cs2340.nonprofitdonationtracker.R;
import edu.gatech.cs2340.nonprofitdonationtracker.models.Category;
import edu.gatech.cs2340.nonprofitdonationtracker.models.LocCategory;
import edu.gatech.cs2340.nonprofitdonationtracker.models.Location;
import edu.gatech.cs2340.nonprofitdonationtracker.models.LocationType;
public class AddLocationActivity extends AppCompatActivity {
private int key;
private EditText name;
private EditText latitude;
private EditText longitude;
private EditText street;
private EditText city;
private EditText state;
private EditText zip;
private EditText phonenum;
private EditText website;
private Spinner catSpinner;
//location has key, name, lat, long, street, city, state
// zipcode, type, phonenum, website and
//DonationList (init. null)
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_add_location);
Intent intent = getIntent();
Bundle extras = intent.getExtras();
assert extras != null;
key = 0;
while (Location.getLocationWithKey(key) != null) {
key++;
}
name = findViewById(R.id.Name);
latitude = findViewById(R.id.Latitude);
longitude = findViewById(R.id.Longitude);
street = findViewById(R.id.Street);
city = findViewById(R.id.City);
state = findViewById(R.id.State);
zip = findViewById(R.id.Zip);
phonenum = findViewById(R.id.Phone);
website = findViewById(R.id.Website);
catSpinner = findViewById(R.id.catSpinner);
ArrayAdapter<LocCategory> adapter = new ArrayAdapter<>(
this, android.R.layout.simple_spinner_item, LocCategory.values());
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
catSpinner.setAdapter(adapter);
}
public void onCancelPressed(View view) {
Intent extrasIntent = getIntent();
Bundle extras = extrasIntent.getExtras();
assert extras != null;
String userType = extras.getString("user_type");
Intent intent = new Intent(this, HomePageActivity.class);
intent.putExtra("user_type", userType);
startActivity(intent);
}
public void onAddLocationPressed(View view) {
Editable nameField = this.name.getText();
String nameString = nameField.toString();
Editable latField = this.latitude.getText();
Double latDouble = Double.parseDouble(latField.toString());
Editable longField = this.longitude.getText();
Double longDouble = Double.parseDouble(longField.toString());
Editable streetField = this.street.getText();
String streetString = streetField.toString();
Editable cityField = this.city.getText();
String cityString = cityField.toString();
Editable stateField = this.state.getText();
String stateString = stateField.toString();
Editable zipField = this.zip.getText();
Integer zipInt = Integer.parseInt(zipField.toString());
Editable phoneField = this.phonenum.getText();
Long phoneLong = Long.parseLong(phoneField.toString());
Editable webField = this.website.getText();
String webString = webField.toString();
Location l = new Location(key, nameString, latDouble, longDouble,
streetString, cityString, stateString, zipInt, (LocationType) catSpinner.getSelectedItem(),
phoneLong, webString);
Location.addLocation(l);
Intent passedIntent = getIntent();
Bundle extras = passedIntent.getExtras();
assert extras != null;
String userType = extras.getString("user_type");
Intent intent = new Intent(this, HomePageActivity.class);
intent.putExtra("user_type", userType);
startActivity(intent);
}
}
|
Python
|
UTF-8
| 531 | 3.125 | 3 |
[] |
no_license
|
def minSetSize(arr):
from collections import Counter
number_counter = Counter(arr)
num_set = set(arr)
num_set = sorted(num_set, key=lambda x: number_counter[x], reverse=True)
print(num_set)
total_count = len(arr)
count = 0
set_count = 0
for n in num_set:
set_count += 1
count += number_counter[n]
if total_count - count <= total_count // 2:
break
return set_count
array = [7,7,7,7,7,7,7]
result = minSetSize(array)
print(result)
|
Python
|
UTF-8
| 938 | 3.40625 | 3 |
[] |
no_license
|
'''
def move(ac):
global p
if ac == "LEFT":
p -= 1
acs.append(ac)
elif ac == "RIGHT":
p += 1
acs.append(ac)
p = 0
acs = []
res = []
n = int(raw_input())
for i in range(n):
n = int(raw_input())
for i in range(n):
acao = raw_input()
if acao[0] == "S":
move(acs[int(acao.split()[2])-1])
else:
move(acao)
print acs
res.append(p)
p = 0
acs = []
for e in res:
print e
'''
def move(ac):
global p
if ac == "LEFT":
p -= 1
acs.append(ac)
elif ac == "RIGHT":
p += 1
acs.append(ac)
else:
acA = acs[int(ac[-1])-1]
move(acA)
p = 0
acs = []
res = []
n = int(raw_input())
for i in range(n):
n = int(raw_input())
for i in range(n):
move(raw_input())
res.append(p)
p = 0
acs = []
for e in res:
print e
|
C++
|
UTF-8
| 2,892 | 2.859375 | 3 |
[
"MIT"
] |
permissive
|
// Author: btjanaka (Bryon Tjanaka)
// Problem: (UVa) 380
#include <bits/stdc++.h>
#define FOR(i, a, b) for (int i = a; i < b; ++i)
#define FORe(i, a, b) for (int i = a; i <= b; ++i)
#define PAI(arr, len) /*Print array of integers*/ \
{ \
for (int _i = 0; _i < len; ++_i) { \
if (_i != len - 1) { \
printf("%d ", arr[_i]); \
} else { \
printf("%d", arr[_i]); \
} \
} \
putchar('\n'); \
}
#define PBS(n, len) /*Print a bitset*/ \
{ \
for (int _i = 0; _i < len; ++_i) { \
putchar(n % 2 + '0'); \
n /= 2; \
} \
putchar('\n'); \
}
#define GET(x) scanf("%d", &x)
#define PLN putchar('\n')
#define INF 2147483647
typedef long long ll;
using namespace std;
int main() {
int n;
GET(n);
printf("CALL FORWARDING OUTPUT\n");
// Store call forwarding as a map of source to a vector of arrays.
// Each array is of size 3 and tells a time, duration, and target.
map<int, vector<int*>> forwards;
FORe(i, 1, n) {
// Leaving out free-ing of allocated memory - memory leaks - oops
forwards.clear();
// Record forwarding times
int source;
while (scanf("%d", &source) && source != 0) {
// time - duration - target
int* entry = (int*)calloc(sizeof(int), 3);
scanf("%d %d %d", entry, entry + 1, entry + 2);
if (forwards.find(source) == forwards.end()) {
forwards.insert(std::pair<int, vector<int*>>(source, {}));
}
forwards[source].push_back(entry);
}
printf("SYSTEM %d\n", i);
// Process all times
int time;
int extension;
while (scanf("%d", &time) && time != 9000) {
scanf("%d", &extension);
// Iterate until there are no more redirects
int ext_itr = extension;
while (true) {
// Exit if cannot find another forwarding
if (forwards.find(ext_itr) == forwards.end()) break;
// Try to find a forwarding
bool found = false;
for (auto fwd : forwards[ext_itr]) {
if (fwd[0] <= time && time <= (fwd[0] + fwd[1])) {
found = true;
ext_itr = fwd[2];
// 9999 if reached original
if (ext_itr == extension) {
found = false;
ext_itr = 9999;
}
break;
}
}
// Break if could not find
if (!found) break;
}
printf("AT %04d CALL TO %04d RINGS %04d\n", time, extension, ext_itr);
}
}
printf("END OF OUTPUT\n");
return 0;
}
|
Python
|
UTF-8
| 476 | 3.3125 | 3 |
[] |
no_license
|
class Room:
def __init__(self, id, num_features, size):
self.id = id
self.size = size
self.features = [0] * num_features
def __eq__(self, value):
return self.id == value.id
def __str__(self):
return "{} (size={}) : {}".format(self.id, self.size, self.features)
def addFeature(self, feature_id):
self.features[feature_id] = 1
def hasFeature(self, feature_id):
return self.features[feature_id]
|
Markdown
|
UTF-8
| 422 | 4.03125 | 4 |
[] |
no_license
|
# superDigit
This code is written in JavaScript. It repeats a number n times and sums all resulting digits, until only one reminds. This is the super digit that will be printed.
For example:
For n=3 and the number numero=148, the concatenated number will be P=148148148.
Then, the function will sum all the digits as follows:
superDigit(148148148) = 39
superDigit(39) = 12
superDigit(12) =3
Finally, it will print 3.
|
Python
|
UTF-8
| 749 | 2.609375 | 3 |
[] |
no_license
|
def run(m, data, ut):
class tests(ut.TestCase):
def test_1_test_lin_data(self):
shader = m().setLimits(0, 10, 10, scale_type='lin').initialize()
shader.apply([1])
shader.apply([2])
shader.apply([3])
shader.apply([4])
shader.apply(5)
shader.apply(6)
self.assertEqual(shader.getAgg(),\
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
def test_2_apply_linear_data(self):
shader = m().setLimits(0, 4, 4).setLimits(0, 4, 4).init()
shader.apply([0, 0])
shader.apply([1, 1])
shader.apply([2, 2])
shader.apply([3, 3])
self.assertEqual(shader.getAgg(), [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
return tests
|
Java
|
UTF-8
| 399 | 2.3125 | 2 |
[] |
no_license
|
package figures;
import com.jogamp.opengl.GLEventListener;
import java.awt.*;
public interface Figure extends GLEventListener {
Figure withRotate(int rotateSpeed, boolean axisX, boolean axisY, boolean axisZ);
Figure withSize(float sizeX, float sizeY, float sizeZ);
Figure withColor(Color color);
Figure withVertices(int verticesCount);
boolean isChangeableVertices();
}
|
Python
|
UTF-8
| 392 | 3.484375 | 3 |
[] |
no_license
|
# Program to check the first index of number if any using recursion in a better way
from sys import setrecursionlimit
setrecursionlimit(20000)
def checkIndex(arr, x, si):
#base case
l = len(arr)
if si == l:
return -1
if arr[si] == x:
return si
#IH
smallerList = checkIndex(arr, x, si + 1)
return smallerList
#I
print(checkIndex([1,2,3,4,5], 3, 0))
|
Python
|
UTF-8
| 2,580 | 2.828125 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
# cooperated with
# 3619d41d-b80b-11e7-a937-00505601122b
import argparse
import sys
import numpy as np
import sklearn.datasets
import sklearn.linear_model
import sklearn.metrics
import sklearn.model_selection
import sklearn.pipeline
import sklearn.preprocessing
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--seed", default=42, type=int, help="Random seed")
parser.add_argument("--test_ratio", default=0.5, type=float, help="Test set size ratio")
args = parser.parse_args()
# Set random seed
np.random.seed(args.seed)
# Load digit dataset
dataset = sklearn.datasets.load_digits()
print(dataset.DESCR, file=sys.stderr)
# Split the data randomly to train and test using `sklearn.model_selection.train_test_split`,
# with `test_size=args.test_ratio` and `random_state=args.seed`.
train_data, test_data, train_target, test_target = sklearn.model_selection.train_test_split(
dataset.data, dataset.target, test_size=args.test_ratio, random_state=args.seed)
# TODO: Create a pipeline, which
# 1. performs sklearn.preprocessing.MinMaxScaler()
# 2. performs sklearn.preprocessing.PolynomialFeatures()
# 3. performs sklearn.linear_model.LogisticRegression(multi_class="multinomial", random_state=args.seed)
#
# Then, using sklearn.model_selection.StratifiedKFold(5), evaluate crossvalidated
# train performance of all combinations of the the following parameters:
# - polynomial degree: 1, 2
# - LogisticRegression regularization C: 0.01, 1, 100
# - LogisticRegression solver: lbfgs, sag
#
# For the best combination of parameters, compute the test set accuracy.
#
# The easiest way is to use `sklearn.model_selection.GridSearchCV`.
pipeline = sklearn.pipeline.Pipeline([("minMax", sklearn.preprocessing.MinMaxScaler()),
("poly", sklearn.preprocessing.PolynomialFeatures()),
("Regresor", sklearn.linear_model.LogisticRegression(multi_class="multinomial", random_state=args.seed))])
kfold = sklearn.model_selection.StratifiedKFold(5)
params = dict(poly__degree=[1,2],Regresor__C=[0.01,1,100], Regresor__solver=["lbfgs", "sag"])
grid = sklearn.model_selection.GridSearchCV(estimator=pipeline, param_grid=params, scoring='accuracy', cv=kfold)
result = grid.fit(train_data, train_target)
best_model = result.best_estimator_
predic = best_model.predict(test_data)
accuracy = sklearn.metrics.accuracy_score(test_target, predic)
print("{:.2f}".format(100 * accuracy))
|
C++
|
UTF-8
| 1,001 | 3.65625 | 4 |
[] |
no_license
|
/*
Given a string S, find and return all the possible permutations of the input string.
Note 1 : The order of permutations is not important.
Note 2 : If original string contains duplicate characters, permutations will also be duplicates.
Input Format :
String S
Output Format :
All permutations (in different lines)
Sample Input :
abc
Sample Output :
abc
acb
bac
bca
cab
cba
*/
#include<bits/stdc++.h>
using namespace std;
int return_permutations(string s, string output[]) {
if (!s.size()) {
output[0] = "";
return 1;
}
string temp[10000];
int size = 0;
for (int i = 0; i < s.size(); i++) {
int temp_size = return_permutations(s.substr(0, i) + s.substr(i + 1), temp);
for (int j = 0; j < temp_size; j++) {
output[j + size] = s[i] + temp[j];
}
size += temp_size;
}
return size;
}
int main() {
string s;
cin >> s;
string output[10000];
int size = return_permutations(s, output);
for (int i = 0; i < size; i++) {
cout << output[i] << endl;
}
return 0;
}
|
Markdown
|
UTF-8
| 7,447 | 3.078125 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
# Semidefinite Programming {#SemidefiniteProgramming}
If you want a living version of this chapter just run the notebook
`NC/DEMOS/4_SemidefiniteProgramming.nb`.
There are two different packages for solving semidefinite programs:
* [`SDP`](#PackageSDP) provides a template algorithm that can be
customized to solve semidefinite programs with special
structure. Users can provide their own functions to evaluate the
primal and dual constraints and the associated Newton system. A
built in solver along conventional lines, working on vector
variables, is provided by default. It does not require NCAlgebra to
run.
* [`NCSDP`](#PackageNCSDP) coordinates with NCAlgebra to handle matrix
variables, allowing constraints, etc, to be entered directly as
noncommutative expressions.
## Semidefinite Programs in Matrix Variables
The package [NCSDP](#PackageNCSDP) allows the symbolic manipulation
and numeric solution of semidefinite programs.
After loading NCAlgebra, the package NCSDP must be loaded using:
<< NCSDP`
Semidefinite programs consist of symbolic noncommutative expressions
representing inequalities and a list of rules for data
replacement. For example the semidefinite program:
$$
\begin{aligned}
\min_Y \quad & <I,Y> \\
\text{s.t.} \quad & A Y + Y A^T + I \preceq 0 \\
& Y \succeq 0
\end{aligned}
$$
can be solved by defining the noncommutative expressions
SNC[a, y];
obj = {-1};
ineqs = {a ** y + y ** tp[a] + 1, -y};
The inequalities are stored in the list `ineqs` in the form of
noncommutative linear polyonomials in the variable `y` and the
objective function constains the symbolic coefficients of the inner
product, in this case `-1`. The reason for the negative signs in the
objective as well as in the second inequality is that semidefinite
programs are expected to be cast in the following *canonical form*:
$$
\begin{aligned}
\max_y \quad & <b,y> \\
\text{s.t.} \quad & f(y) \preceq 0
\end{aligned}
$$
or, equivalently:
$$
\begin{aligned}
\max_y \quad & <b,y> \\
\text{s.t.} \quad & f(y) + s = 0, \quad s \succeq 0
\end{aligned}
$$
Semidefinite programs can be visualized using
[`NCSDPForm`](#NCSDPForm) as in:
vars = {y};
NCSDPForm[ineqs, vars, obj]
The above commands produce a formatted output similar to the ones
shown above.
In order to obtaining a numerical solution for an instance of the
above semidefinite program one must provide a list of rules for data
substitution. For example:
A = {{0, 1}, {-1, -2}};
data = {a -> A};
Equipped with the above list of rules representing a problem instance
one can load [`SDPSylvester`](#PackageSDPSylvester) and use `NCSDP` to create
a problem instance as follows:
{abc, rules} = NCSDP[ineqs, vars, obj, data];
The resulting `abc` and `rules` objects are used for calculating the
numerical solution using [`SDPSolve`](#SDPSolve). The command:
<< SDPSylvester`
{Y, X, S, flags} = SDPSolve[abc, rules];
produces an output like the folowing:
Problem data:
* Dimensions (total):
- Variables = 4
- Inequalities = 2
* Dimensions (detail):
- Variables = {{2,2}}
- Inequalities = {2,2}
Method:
* Method = PredictorCorrector
* Search direction = NT
Precision:
* Gap tolerance = 1.*10^(-9)
* Feasibility tolerance = 1.*10^(-6)
* Rationalize iterates = False
Other options:
* Debug level = 0
K <B, Y> mu theta/tau alpha |X S|2 |X S|oo |A* X-B| |A Y+S-C|
-------------------------------------------------------------------------------------------
1 1.638e+00 1.846e-01 2.371e-01 8.299e-01 1.135e+00 9.968e-01 9.868e-16 2.662e-16
2 1.950e+00 1.971e-02 2.014e-02 8.990e-01 1.512e+00 9.138e-01 2.218e-15 2.937e-16
3 1.995e+00 1.976e-03 1.980e-03 8.998e-01 1.487e+00 9.091e-01 1.926e-15 3.119e-16
4 2.000e+00 9.826e-07 9.826e-07 9.995e-01 1.485e+00 9.047e-01 8.581e-15 2.312e-16
5 2.000e+00 4.913e-10 4.913e-10 9.995e-01 1.485e+00 9.047e-01 1.174e-14 4.786e-16
-------------------------------------------------------------------------------------------
* Primal solution is not strictly feasible but is within tolerance
(0 <= max eig(A* Y - C) = 8.06666*10^-10 < 1.*10^-6 )
* Dual solution is within tolerance
(|| A X - B || = 1.96528*10^-9 < 1.*10^-6)
* Feasibility radius = 0.999998
(should be less than 1 when feasible)
The output variables `Y` and `S` are the *primal* solutions and `X` is
the *dual* solution.
A symbolic dual problem can be calculated easily using
[`NCSDPDual`](#NCSDPDual):
{dIneqs, dVars, dObj} = NCSDPDual[ineqs, vars, obj];
The dual program for the example problem above is:
$$
\begin{aligned}
\max_x \quad & <c,x> \\
\text{s.t.} \quad & f^*(x) + b = 0, \quad x \succeq 0
\end{aligned}
$$
In the case of the above problem the dual program is
$$
\begin{aligned}
\max_{X_1, X_2} \quad & <I,X_1> \\
\text{s.t.} \quad & A^T X_1 + X_1 A -X_2 - I = 0 \\
& X_1 \succeq 0, \\
& X_2 \succeq 0
\end{aligned}
$$
which can be visualized using [`NCSDPDualForm`](#NCSDPDualForm) using:
NCSDPDualForm[dIneqs, dVars, dObj]
## Semidefinite Programs in Vector Variables
The package [SDP](#PackageSDP) provides a crude and not very efficient
way to define and solve semidefinite programs in standard form, that
is vectorized. You do not need to load `NCAlgebra` if you just want to
use the semidefinite program solver. But you still need to load `NC`
as in:
<< NC`
<< SDP`
Semidefinite programs are optimization problems of the form:
$$
\begin{aligned}
\max_{y, S} \quad & b^T y \\
\text{s.t.} \quad & A y + S = c \\
& S \succeq 0
\end{aligned}
$$
where $S$ is a symmetric positive semidefinite matrix and $y$ is a
vector of decision variables.
A user can input the problem data, the triplet $(A, b, c)$, or use the
following convenient methods for producing data in the proper format.
For example, problems can be stated as:
$$
\begin{aligned}
\min_y \quad & f(y), \\
\text{s.t.} \quad & G(y) \succeq 0
\end{aligned}
$$
where $f(y)$ and $G(y)$ are affine functions of the vector
of variables $y$.
Here is a simple example:
y = {y0, y1, y2};
f = y2;
G = {y0 - 2, {{y1, y0}, {y0, 1}}, {{y2, y1}, {y1, 1}}};
The list of constraints in `G` is to be interpreted as:
$$
\begin{aligned}
y_0 - 2 \geq 0, \\
\begin{bmatrix} y_1 & y_0 \\ y_0 & 1 \end{bmatrix} \succeq 0, \\
\begin{bmatrix} y_2 & y_1 \\ y_1 & 1 \end{bmatrix} \succeq 0.
\end{aligned}
$$
The function [`SDPMatrices`](#SDPMatrices) convert the above symbolic
problem into numerical data that can be used to solve an SDP.
abc = SDPMatrices[f, G, y]
All required data, that is $A$, $b$, and $c$, is stored in the
variable `abc` as Mathematica's sparse matrices. Their contents can be
revealed using the Mathematica command `Normal`.
Normal[abc]
The resulting SDP is solved using [`SDPSolve`](#SDPSolve):
{Y, X, S, flags} = SDPSolve[abc];
The variables `Y` and `S` are the *primal* solutions and `X` is the
*dual* solution. Detailed information on the computed solution is
found in the variable `flags`.
The package `SDP` is built so as to be easily overloaded with more
efficient or more structure functions. See for example
[SDPFlat](#PackageSDPFlat) and [SDPSylvester](#PackageSDPSylvester).
|
PHP
|
UTF-8
| 328 | 2.828125 | 3 |
[
"MIT"
] |
permissive
|
<?php
namespace Fabic\Nql\Exceptions;
class ParserException extends NqlException
{
/**
* Creates a new ParserException describing a Syntax error.
*
* @param string $message Exception message
*
* @return self
*/
public static function syntaxError($message)
{
return new self('[Syntax Error] ' . $message);
}
}
|
C++
|
MacCentralEurope
| 1,082 | 4.15625 | 4 |
[] |
no_license
|
/*
7. Write a program that reads input a word at a time until a lone q is entered.The
program should then report the number of words that began with vowels, the number
that began with consonants, and the number that fit neither of those categories.
One approach is to use isalpha() to discriminate between words beginning with
letters and those that dont and then use an if or switch statement to further identify
those passing the isalpha() test that begin with vowels.
*/
#include <iostream>
#include <string>
using namespace std;
int main() {
string word;
int vowels = 0, cons = 0, other = 0;
cout << "Enter words (q to quit):" << endl;
cin >> word;
while (word != "q") {
if (isalpha(word[0])) {
if (word[0] == 'a' || word[0] == 'e' ||
word[0] == 'i' || word[0] == 'o' ||
word[0] == 'u') {
++vowels;
}
else {
++cons;
}
}
else {
++other;
}
cin >> word;
}
// print results
cout << endl << vowels << " words begin with vowels.\n"
<< cons << " words begin with consonants.\n"
<< other << " others.\n ~FIN~\n";
return 0;
}
|
Python
|
UTF-8
| 4,144 | 2.59375 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
import sys
from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN, AgglomerativeClustering
import ROOT as r
Pixel = namedtuple('Pixel', ['x', 'y', 'raw', 'cal'])
def cluster(pixels, algorithm):
data = np.array([[x, y, raw, cal] for x, y, raw, cal in zip(pixels.x, pixels.y, pixels.raw, pixels.cal)])
# fit xy
algorithm.fit(np.array(data)[:,:2])
ordered_indices = np.argsort(algorithm.labels_)
ordered_labels = algorithm.labels_[ordered_indices]
diff = np.diff(ordered_labels)
split_idx = (np.argwhere(diff != 0) + 1).flatten()
groups = np.array_split(ordered_indices, split_idx)
return [list(map(Pixel._make, data[gp])) for gp in groups]
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description='Construct clusters from pixels above threshold')
parser.add_argument('pfiles', nargs = '+', help = "ROOT files containing pixel data to cluster")
parser.add_argument('--thresh', default=0, type=int, help='Threshold on calibrated trigger value')
parser.add_argument('--dist', default=1, type=float, help = "set distance threshold")
parser.add_argument('-a', '--agglom', action='store_true', help="Use sklearn Agglomerative Clustering algorithm (default DBSCAN)")
parser.add_argument('-p', '--plot', action='store_true', help='Plot distance distribution')
parser.add_argument('--out', help = 'Output file name')
args = parser.parse_args()
t0 = r.TChain('triggers')
for pfile in args.pfiles:
t0.Add(pfile)
if args.thresh:
t0_pre = t0
r.gROOT.cd()
t0 = t0_pre.CopyTree('cal > {}'.format(args.thresh))
n = t0.GetEntries()
if args.agglom:
algorithm = AgglomerativeClustering(n_clusters=None, compute_full_tree=True, distance_threshold=args.dist)
else:
algorithm = DBSCAN(eps=args.dist, min_samples=1)
if args.plot:
bins = np.arange(0, 4001, 40)
hist = np.zeros(bins.size-1)
for i,trig in enumerate(t0):
if len(trig.x) == 2:
continue
# do this simply for efficiency
dx = trig.x[0] - trig.x[1]
dy = trig.y[0] - trig.y[1]
ds = np.sqrt(dx**2 + dy**2)
ds_bin = (int(ds) - 1) // 10
if ds_bin < bins.size - 1:
hist[ds_bin] += 1
elif len(trig.x) > 2:
print('{} / {}'.format(i, n), end='\r')
x = np.array([x for x in trig.x])
y = np.array([y for y in trig.y])
dx = x - x.reshape(-1,1)
dy = y - y.reshape(-1,1)
ds = np.sqrt(dx**2 + dy**2)
ds_bin = (int(ds.max()) - 1) // 10
if ds_bin < bins.size - 1:
hist[ds_bin] += 1
plt.hist(bins[:-1]+1, bins=bins, weights=hist, log=True)
plt.xlabel('Euclidean distance')
plt.title('Distance between above-threshold pixels in same frame')
plt.show()
if args.out:
outfile = r.TFile(args.out, 'recreate')
t = t0.CloneTree(0)
frame_n = np.zeros(1, dtype=int)
frame_occ = np.zeros(1, dtype=int)
t.Branch('frame_n', frame_n, 'frame_n/i')
t.Branch('frame_occ', frame_occ, 'frame_occ/i')
for i,trig in enumerate(t0):
print('{} / {}'.format(i+1, t0.GetEntries()), end='\r')
clusters = cluster(trig, algorithm)
for cl in clusters:
t.x.clear()
t.y.clear()
t.raw.clear()
t.cal.clear()
for pix in cl:
t.x.push_back(int(pix.x))
t.y.push_back(int(pix.y))
t.raw.push_back(int(pix.raw))
t.cal.push_back(int(pix.cal))
frame_n[0] = i
frame_occ[0] = len(clusters)
t.Fill()
outfile.Write()
outfile.Close()
|
PHP
|
UTF-8
| 518 | 2.5625 | 3 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
<?php
/*
* This file is part of the Indigo Ruler package.
*
* (c) Indigo Development Team
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Indigo\Ruler;
/**
* Defines a single rule
*
* @author Márk Sági-Kazár <mark.sagikazar@gmail.com>
*/
interface Rule
{
/**
* Checks whether the rule can be applied for the Context
*
* @param mixed $context
*
* @return boolean
*/
public function check($context);
}
|
Python
|
UTF-8
| 6,630 | 2.515625 | 3 |
[
"MIT"
] |
permissive
|
"""Section Service."""
import datetime
import os
import sys
import time
from collections import defaultdict
from multiprocessing.pool import ThreadPool
from django.core.cache import cache
from berkeleytime.utils import AtomicInteger, BColors
from catalog.mapper import section_mapper
from catalog.models import Course, Section
from catalog.resource import sis_class_resource
from catalog.service import course_service
from enrollment.mapper import enrollment_mapper
from enrollment.service import enrollment_service
NUM_THREADS = min(2 * os.cpu_count(), 16)
class SectionService:
"""Application logic for section information."""
def update(self, semester, year, abbreviation=None, course_number=None):
"""Update all sections in a semester.
If given abbreviation + course_number, update only that course's sections.
"""
print({
'message': 'Updating sections.',
'semester': semester,
'year': year,
'abbreviation': abbreviation,
'course_number': course_number,
})
# Get list of courses for which to update sections, excluding those updated already today
if abbreviation and course_number:
courses = Course.objects.filter(abbreviation=abbreviation, course_number=course_number)
else:
midnight = datetime.datetime.combine(datetime.datetime.now().date(),datetime.time(0))
courses = Course.objects.exclude(section__last_updated__gte=midnight).distinct()
# Asynchronously perform an update for each course's sections
i = AtomicInteger()
def update_wrapper(course):
i.inc()
self._update_class(
course=course,
semester=semester,
year=year,
)
p = ThreadPool(NUM_THREADS)
result = p.map_async(update_wrapper, courses)
# Log progress of updates
print(BColors.OKGREEN + f'Starting job with {NUM_THREADS} workers.' + BColors.ENDC)
while not result.ready():
print(BColors.OKGREEN + f'Updating course {i.value()} of {len(courses)}.' + BColors.ENDC)
time.sleep(5)
def _update_class(self, course, semester, year):
"""Update all sections for a course in a semester.
Though not rigorously defined, a 'class' here is the collection
of sections for a course offered in a single semester.
"""
if cache_result := cache.get(f'no classes {course.id}'):
print(f'no classes found for course {course.id} at {cache_result}')
return
# Get response from SIS class resource
response = sis_class_resource.get(
semester=semester,
year=year,
course_id=course.id,
abbreviation=course.abbreviation,
course_number=course.course_number,
)
if len(response) == 0:
cache.add(f'no classes {course.id}', datetime.datetime.now(), timeout=7 * 24 * 60 * 60)
print(f'no classes found for course {course.id}')
return
updated_section_ids = set()
primary_sect_id_to_sections = defaultdict(list)
# Map response to Section and Enrollment objects and persist to database
section_extras = {
'course_id': int(course.id),
'abbreviation': course.abbreviation,
'course_number': course.course_number,
'semester': semester,
'year': year,
}
for sect in response:
if not sect:
continue
section_dict = section_mapper.map(sect, extras=section_extras)
section, created = self.update_or_create_from_dict(section_dict)
if not section:
continue
updated_section_ids.add(section.id)
if section_dict['primary_section']:
primary_sect_id_to_sections[section_dict['primary_section']].append(section)
# Update enrollment
if semester != 'summer' and section.is_primary and not section.disabled:
enrollment_dict = enrollment_mapper.map(sect, extras={'section_id': section.id})
enrollment_service.update_or_create_from_dict(enrollment_dict)
# Add associations between primary and non-primary sections
for related_sections in primary_sect_id_to_sections.values():
primary_section = [s for s in related_sections if s.is_primary][0]
other_sections = [s for s in related_sections if not s.is_primary]
primary_section.associated_sections.add(*other_sections)
for section in related_sections:
section.save()
if len(updated_section_ids) > 0:
print({
'message': 'Updated sections for course',
'course': course,
'sections updated': len(updated_section_ids),
})
# Disable existing section if data not found in response
sections_to_disable = Section.objects.filter(
course_id=course.id,
semester=semester,
year=year,
).exclude(id__in=updated_section_ids)
for section in sections_to_disable:
if not section.disabled:
section.disabled = True
section.save()
print({
'message': 'Disabling section not in API response.',
'section': section,
})
# Update derived enrollment fields in course object
course_service._update_derived_enrollment_fields(course)
def update_or_create_from_dict(self, section_dict):
try:
section_obj, created = Section.objects.update_or_create(
course_id=section_dict['course_id'],
semester=section_dict['semester'],
year=section_dict['year'],
section_number=section_dict['section_number'],
kind=section_dict['kind'],
defaults={
key: section_dict[key] for key in section_dict if key != 'primary_section'
},
)
print({
'message': 'Created/updated section object',
'section': section_obj,
'created': created,
})
return section_obj, created
except Exception as e:
print('Exception encountered while updating/creating section', section_dict, e, file=sys.stderr)
return None, False
section_service = SectionService()
|
Python
|
UTF-8
| 15,164 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
import sys
import getopt
import os.path
import itertools
from scipy.stats import chisquare
from typing import Tuple
from Bio import Phylo
from Bio.Phylo.BaseTree import TreeMixin
from .base import Tree
from ...helpers.files import read_single_column_file_to_list
class PolytomyTest(Tree):
def __init__(self, args) -> None:
super().__init__(**self.process_args(args))
def run(self):
# read in groups
groups_arr = self.read_in_groups(self.groups)
# determine groups of groups
groups_of_groups, outgroup_taxa = self.determine_groups_of_groups(groups_arr)
# read trees into list
trees_file_path = read_single_column_file_to_list(self.trees)
# go through all triplets of all trees and
# examine sister relationships among all triplets
summary = self.loop_through_trees_and_examine_sister_support_among_triplets(
trees_file_path,
groups_of_groups,
outgroup_taxa
)
# count triplet and gene support frequencies for different sister relationships
triplet_group_counts, gene_support_freq = self.get_triplet_and_gene_support_freq_counts(summary)
# conduct chisquare tests
triplet_res, gene_support_freq_res = self.chisquare_tests(triplet_group_counts, gene_support_freq)
# print results
self.print_gene_support_freq_res(gene_support_freq_res, gene_support_freq, trees_file_path)
# self.print_triplet_based_res(triplet_res, triplet_group_counts)
def process_args(self, args):
return dict(trees=args.trees, groups=args.groups)
def read_in_groups(self, groups) -> list:
groups_arr = []
try:
for line in open(self.groups):
line = line.strip()
if not line.startswith("#"):
try:
line = line.split("\t")
temp = []
temp.append(line[0])
temp.append(line[1].split(";"))
temp.append(line[2].split(";"))
temp.append(line[3].split(";"))
temp.append(line[4].split(";"))
groups_arr.append(temp)
except IndexError:
try:
print(f"{self.groups} contains an indexing error.")
print("Please format the groups file (-g) as a four column tab-delimited file with column 1 being the name of the test")
print("col2: the tip names of one group (; separated)")
print("col3: the tip names of a second group (; separated)")
print("col4: the tip names of a third group (; separated)")
print("col5: the tip names of the outgroup taxa (; separated)")
sys.exit()
except BrokenPipeError:
pass
except FileNotFoundError:
try:
print(f"{self.groups} corresponds to no such file.")
print("Please check filename and pathing again.")
sys.exit()
except BrokenPipeError:
pass
return groups_arr
def loop_through_trees_and_examine_sister_support_among_triplets(
self,
trees_file_path: str,
groups_of_groups: dict,
outgroup_taxa: list
) -> dict:
"""
go through all trees and all triplets of all trees. For each triplet,
determine which two taxa are sister to one another
"""
summary = {}
# loop through trees
try:
#cnt = 0
for tree_file in trees_file_path:
#cnt+=1
#print(f"processing tree {cnt} of {len(trees_file_path)}")
tree = Phylo.read(tree_file, 'newick')
# get tip names
tips = self.get_tip_names_from_tree(tree)
# examine all triplets and their support for
# any sister pairing
summary = self.examine_all_triplets_and_sister_pairing(
tips,
tree_file,
summary,
groups_of_groups,
outgroup_taxa
)
except FileNotFoundError:
try:
print(f"{tree_file} corresponds to no such file.")
print("Please check file name and pathing")
sys.exit()
except BrokenPipeError:
pass
return summary
def determine_groups_of_groups(
self,
groups_arr: list
) -> dict:
groups_of_groups = {}
for group in groups_arr:
temp = []
for i in range(1, 4):
temp.append([taxon_name for taxon_name in group[i]])
groups_of_groups[group[0]] = (temp)
outgroup_taxa = [taxon_name for taxon_name in group[4]]
return groups_of_groups, outgroup_taxa
def examine_all_triplets_and_sister_pairing(
self,
tips: list,
tree_file: str,
summary: dict,
groups_of_groups: dict,
outgroup_taxa: list
) -> dict:
"""
evaluate all triplets for sister relationships. Polytomies
in input trees are accounted for
"""
# get all combinations of three tips
identifier = list(groups_of_groups.keys())[0]
triplet_tips = (list(itertools.product(*groups_of_groups[identifier])))
for triplet in triplet_tips:
# obtain tree of the triplet
tree = self.get_triplet_tree(tips, triplet, tree_file, outgroup_taxa)
cnt=0
try:
for leaf in tree.get_terminals():
cnt+=1
except AttributeError:
continue
if tree and cnt==3:
for _, groups in groups_of_groups.items():
# see if there any intersctions between
# the triplet and the the group
num_groups_represented = self.count_number_of_groups_in_triplet(triplet, groups)
# if one taxa is represented from each group,
# use the triplet
tip_names = []
if num_groups_represented == 3:
# get names in triplet and set tree branch lengths to 1
tip_names = self.get_tip_names_from_tree(tree)
self.set_branch_lengths_in_tree_to_one(tree)
# determine sisters and add to sister pair counter
summary = self.determine_sisters_and_add_to_counter(
tip_names,
tree,
tree_file,
groups,
summary
)
else:
continue
return summary
def count_number_of_groups_in_triplet(self, triplet: list, groups: tuple) -> int:
"""
determine how many groups are represented in a triplet
"""
num_groups_represented = 0
for group in groups:
temp = [value for value in list(triplet) if value in group]
if len(temp) >= 1:
num_groups_represented +=1
return num_groups_represented
def set_branch_lengths_in_tree_to_one(self, tree: Tree) -> None:
for term in tree.get_terminals():
term.branch_length = 1
for internode in tree.get_nonterminals():
internode.branch_length = 1
def check_if_triplet_is_a_polytomy(self, tree: Tree) -> Tree:
"""
count the number of internal branches. If 1, then the triplet is a polytomy
"""
num_int=0
# check if the triplet is a polytomy
for internal_branch in tree.get_nonterminals():
num_int+=1
if num_int == 1:
return True
else:
return False
def sister_relationship_counter(
self,
tree_file: str,
summary: dict,
sisters: str
) -> dict:
"""
counter for how many times a particular sister relationship is observed
"""
# if tree is not in summary, create a key for it
if tree_file not in summary.keys():
summary[str(tree_file)]={}
# if the sister relationship is not in the tree file dict, create a key for it
if sisters not in summary[str(tree_file)].keys():
summary[str(tree_file)][sisters] = 1
else:
summary[str(tree_file)][sisters] += 1
return summary
def get_triplet_tree(
self,
tips: list,
triplet: tuple,
tree_file: str,
outgroup_taxa: list
) -> Tree:
"""
get a tree object of only the triplet of interest
"""
# determine tips that are not in the triplet of interest
tips_to_prune = list(set(tips)- set(list(triplet)))
# determine tips that in the outgroup
outgroup_present = [value for value in tips if value in outgroup_taxa]
tree = Phylo.read(tree_file, 'newick')
# root tree on outgroup taxa
try:
tree.root_with_outgroup(outgroup_present)
# prune to a triplet
tree = self.prune_tree_using_taxa_list(tree, tips_to_prune)
return tree
except ValueError:
tree = False
return tree
def determine_sisters_from_triplet(
self,
groups: list,
pair: tuple
) -> str:
"""
determine sister taxa from a triplet
"""
sisters = sorted([[i for i, lst in enumerate(groups) if pair[0] in lst][0], [i for i, lst in enumerate(groups) if pair[1] in lst][0]])
sisters = [str(i) for i in sisters]
sisters = str("-".join(sisters))
return sisters
def determine_sisters_and_add_to_counter(
self,
tip_names: list,
tree: Tree,
tree_file: str,
groups: list,
summary: dict
) -> dict:
"""
determine which pair of taxa are sister to one another
and add 1 to the counter for the sister pair
"""
# get pairs from tip names
pairs = list(itertools.combinations(tip_names, 2))
for pair in pairs:
is_polytomy = self.check_if_triplet_is_a_polytomy(tree)
# if distance between pair is 2 and the triplet is
# not a polytomy (i.e., having only 1 internal branch)
# then report the sisters in the triplet
if tree.distance(pair[0], pair[1]) == 2 and not is_polytomy:
# determine which two tips are sisters
sisters = self.determine_sisters_from_triplet(groups, pair)
# add to summary dictionary of how many times that sister
# relationship is observed
summary = self.sister_relationship_counter(tree_file, summary, sisters)
return summary
def get_triplet_and_gene_support_freq_counts(
self,
summary: dict
) -> Tuple[dict, dict]:
"""
count how many triplets and genes support the various sister relationships
"""
# Count the total number of sister pairings
# for the three possible pairs for triplets
triplet_group_counts = {
'g0g1_count' : 0,
'g0g2_count' : 0,
'g1g2_count' : 0
}
# Also, keep track of which and how many genes
# support each sister pairing
gene_support_freq = {
'0-1' : 0,
'1-2' : 0,
'0-2' : 0
}
for tree in summary:
# create empty key value pairs in case sister
# pairing was never observed
if '0-1' not in summary[tree].keys():
summary[tree]['0-1'] = 0
if '0-2' not in summary[tree].keys():
summary[tree]['0-2'] = 0
if '1-2' not in summary[tree].keys():
summary[tree]['1-2'] = 0
# create a running value of triplets that support each sister pair
triplet_group_counts['g0g1_count'] += summary[tree]['0-1']
triplet_group_counts['g0g2_count'] += summary[tree]['0-2']
triplet_group_counts['g1g2_count'] += summary[tree]['1-2']
# determine which sister pairing is best supported in a single gene
# and add one to the corresponding gene support frequency count
gene_support_freq[max(summary[tree], key=summary[tree].get)] += 1
return triplet_group_counts, gene_support_freq
def chisquare_tests(
self,
triplet_group_counts: dict,
gene_support_freq: dict
):
triplet_res = chisquare(
[
triplet_group_counts['g0g1_count'],
triplet_group_counts['g0g2_count'],
triplet_group_counts['g1g2_count']
]
)
gene_support_freq_res = chisquare(
[
gene_support_freq['0-1'],
gene_support_freq['0-2'],gene_support_freq['1-2']
]
)
return triplet_res, gene_support_freq_res
# def print_triplet_based_res(
# self,
# triplet_res,
# triplet_group_counts: dict
# ) -> None:
# """
# print results to stdout for user
# """
# try:
# print(f"\nTriplet Results")
# print(f"===============")
# print(f"chi-squared: {round(triplet_res.statistic, 4)}")
# print(f"p-value: {round(triplet_res.pvalue, 6)}")
# print(f"total triplets: {sum(triplet_group_counts.values())}")
# print(f"0-1: {triplet_group_counts['g0g1_count']}")
# print(f"0-2: {triplet_group_counts['g0g2_count']}")
# print(f"1-2: {triplet_group_counts['g1g2_count']}")
# except BrokenPipeError:
# pass
def print_gene_support_freq_res(
self,
gene_support_freq_res,
gene_support_freq: dict,
trees_file_path: list
) -> None:
"""
print results to stdout for user
"""
try:
print(f"Gene Support Frequency Results")
print(f"==============================")
print(f"chi-squared: {round(gene_support_freq_res.statistic, 4)}")
print(f"p-value: {round(gene_support_freq_res.pvalue, 6)}")
print(f"total genes: {(gene_support_freq['0-1'] + gene_support_freq['0-2'] + gene_support_freq['1-2'])}")
print(f"0-1: {gene_support_freq['0-1']}")
print(f"0-2: {gene_support_freq['0-2']}")
print(f"1-2: {gene_support_freq['1-2']}")
except BrokenPipeError:
pass
|
C++
|
UTF-8
| 327 | 3.390625 | 3 |
[] |
no_license
|
#include <iostream>
#include <string.h>
using namespace std;
class A
{
public:
static void setA( int i );
static int getA();
private:
static int a;
};
int A::a = 0;
void A::setA( int i )
{
a = i;
}
int A::getA()
{
return a;
}
int main()
{
A::setA( 1 );
cout << "A::a = " << A::getA() << endl;
}
|
C#
|
UTF-8
| 859 | 2.859375 | 3 |
[] |
no_license
|
using System;
namespace Cubo.Core.Domain
{
public class Item : Entity
{
public Guid BucketId { get; protected set; }
public string Key { get; protected set; }
public string Value { get; protected set; }
protected Item()
{
}
public Item(Guid bucketId, string key, string value)
{
if(string.IsNullOrWhiteSpace(key))
{
throw new CuboException("empty_item_key",
"Item can not have an empty key.");
}
if(string.IsNullOrWhiteSpace(value))
{
throw new CuboException("empty_item_value",
"Item can not have an empty value.");
}
BucketId = bucketId;
Key = key.ToLowerInvariant();
Value = value;
}
}
}
|
JavaScript
|
UTF-8
| 298 | 2.71875 | 3 |
[] |
no_license
|
export default function getSortStep(originalArr, operation, elementsInOperation) {
const arr = JSON.parse(JSON.stringify(originalArr));
elementsInOperation.forEach(id => {
if (operation === "compare") arr[id].isInComparison = true;
else arr[id].isInSwap = true;
});
return arr;
}
|
Markdown
|
UTF-8
| 3,577 | 2.875 | 3 |
[] |
no_license
|
Justin Nguyen - JNguyen96
Alexander Kang - alexk2060
CS56 W16
a) This is a chance based game where the user faces off against the computer or another player, rolling dice in order to build an insect or person.
b) As a player, I can click "Roll" so that the "dice" will roll
As a player, I can see which parts I still need so that I can tell who is in the lead
As a player, I can choose between 'ant', 'beetle', and 'person' so that I can have variety in the game
As a player I can click "Exit" so that I can leave the game at any time
As the GUI, I can display graphics so that the user can interact with the game
c) The software runs. It opens a new window which displays four buttons, which are options of what the user can do. These buttons are used to start the game. After a button is clicked, a new window opens for each prompt.
d) As a user, I want a scoreboard so that I can keep track of the number of wins each palyer has
As a user, I want graphics of the object being built so that I can have a better picture of the progress of the game
As a user, I want a graphic of the dice so that the game feels more interactive
As a user, I want an end game menu so that I can have some options once one game ends
e) Right now, the README.md only contains a short description of what the game does. Things that we can add are instructions of the game, description of different mode options, screenshots of graphics, and a description of the file structure and history.
f) The targets javadoc, test, and download do not have descriptions. We do not see and old legacy JWS stuff that needs to be removed.
g)The issues currently open for the repo tally to an estimated 1100 points. The issues are pretty clear in terms of their expextations. One issue, 'Add New Levels/Animal' seems to be already completed.
h)Since our code does not have any testcases, an issue we can have is adding testcases. All other seeable issues are covered. https://github.com/UCSB-CS56-Projects/cs56-games-beetle/issues/12
i)The code is organized into various classes. Each Level/Animal has its own GUI along with a StartGUI. There is a class for running the game and for drawing graphics. The class PPlayer is unclear on what it does and there is no documentation on what it does. However, for most of the the classes it was clear what the purpose of each class and its methods are. It is obvious how most of the classes relate to one another. The code is fairly easy to read and understand; there is no confusing or overly complicated code. I would convey to a programmer the startGUI class and the path that it takes from the start to the gameplay screen (Start -> single/multiplayer -> player name -> game screen). I would also show them a Player class and what its methods do.
j) There are no JUnit tests that we see. 'ant test' does not run anything. Therefore, there is great oppurtunity to expand test coverage. We would go about it by adding in tests to check various parts of the game such as if there is a limit on how many games can be played in a row and if the variables for the body parts are holding the right amounts.
Code related stuff:
1. Every class seems to be adequately explained by either normal or javadoc comments.
2. There dooes not seem to be any unecessary/obsolete/confusing portions of the build.xml file.
3. All of the package names correspond to the preferred naming convention for legacy code projects.
4. All attributes and methods are set to public/private/"package private" accordingly.
5. There are no objects that have too much functionality bundled into them.
|
C#
|
UTF-8
| 1,642 | 3.109375 | 3 |
[] |
no_license
|
using System;
using System.IO;
using System.Linq;
using System.Collections.Generic;
namespace CSharp_Shell
{
public class CDAction : CommonAction
{
override public bool TryDo(out int responseCode)
{
responseCode = 0;
string arg;
if(Args != null && Args.Count > 0)
{
arg = Args.Peek();
}
else
{
responseCode = 1;
return false;
}
if(arg == "..")
{
Directory.SetCurrentDirectory(Directory.GetParent(Directory.GetCurrentDirectory()).FullName);
}
else if(Directory.Exists(arg))
{
ChangeDirTo(arg);
}
else
{
string dir = ConcatDirectories(Directory.GetCurrentDirectory(), arg);
if(Directory.Exists(dir))
{
ChangeDirTo(dir);
}
else
{
CLIDisplay.DisplayText("There is no such directory");
return false;
}
}
return true;
}
private void ChangeDirTo(string dir)
{
Directory.SetCurrentDirectory(dir);
}
private string ConcatDirectories(string dir1, string dir2)
{
if(dir1 == null || dir2 == null)
{
return "";
}
string dir;
if(dir1.Last() == '/' || dir2.First() == '/')
{
if(dir1.Last() == '/' && dir2.First() == '/')
{
dir = dir1 + dir2.Remove(0,1);
}
else
{
dir = dir1 + dir2;
}
}
else
{
dir = dir1 + '/' + dir2;
}
return dir;
}
}
}
|
C#
|
UTF-8
| 1,310 | 2.703125 | 3 |
[
"MIT"
] |
permissive
|
using System.Collections.Generic;
using System.ComponentModel.DataAnnotations;
using System.IO;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Http;
using Newtonsoft.Json;
namespace AspNetCore.Extensions
{
public static class HttpExtensions
{
private static readonly JsonSerializer Serializer = new JsonSerializer();
public static Task WriteJson<T>(this HttpResponse response, T obj)
{
response.ContentType = "application/json";
return response.WriteAsync(JsonConvert.SerializeObject(obj));
}
public static async Task<T> ReadFromJson<T>(this HttpContext httpContext)
{
using (var streamReader = new StreamReader(httpContext.Request.Body))
using (var jsonTextReader = new JsonTextReader(streamReader))
{
var obj = Serializer.Deserialize<T>(jsonTextReader);
var results = new List<ValidationResult>();
if (Validator.TryValidateObject(obj, new ValidationContext(obj), results))
{
return obj;
}
httpContext.Response.StatusCode = 400;
await httpContext.Response.WriteJson(results);
return default(T);
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.